# generated by datamodel-codegen:
#   filename:  openapi.json

from __future__ import annotations

from dataclasses import dataclass
from typing import Any, Literal, Mapping, Optional, Sequence, Union


@dataclass
class CreateExperimentRequestBody:
    name: Optional[str] = None
    description: Optional[str] = None
    metadata: Optional[Mapping[str, Any]] = None
    version_id: Optional[str] = None
    repetitions: Optional[int] = 1


@dataclass
class Dataset:
    id: str
    name: str
    description: Optional[str]
    metadata: Mapping[str, Any]
    created_at: str
    updated_at: str


@dataclass
class DatasetExample:
    id: str
    input: Mapping[str, Any]
    output: Mapping[str, Any]
    metadata: Mapping[str, Any]
    updated_at: str


@dataclass
class DatasetVersion:
    version_id: str
    description: Optional[str]
    metadata: Mapping[str, Any]
    created_at: str


@dataclass
class DatasetWithExampleCount:
    id: str
    name: str
    description: Optional[str]
    metadata: Mapping[str, Any]
    created_at: str
    updated_at: str
    example_count: int


@dataclass
class Experiment:
    id: str
    dataset_id: str
    dataset_version_id: str
    repetitions: int
    metadata: Mapping[str, Any]
    project_name: Optional[str]
    created_at: str
    updated_at: str


@dataclass
class GetDatasetResponseBody:
    data: DatasetWithExampleCount


@dataclass
class GetExperimentResponseBody:
    data: Experiment


@dataclass
class InsertedSpanAnnotation:
    id: str


@dataclass
class ListDatasetExamplesData:
    dataset_id: str
    version_id: str
    examples: Sequence[DatasetExample]


@dataclass
class ListDatasetExamplesResponseBody:
    data: ListDatasetExamplesData


@dataclass
class ListDatasetVersionsResponseBody:
    data: Sequence[DatasetVersion]
    next_cursor: Optional[str]


@dataclass
class ListDatasetsResponseBody:
    data: Sequence[Dataset]
    next_cursor: Optional[str]


@dataclass
class ListExperimentsResponseBody:
    data: Sequence[Experiment]


@dataclass
class Prompt:
    name: str
    id: str
    description: Optional[str] = None
    source_prompt_id: Optional[str] = None


@dataclass
class PromptAnthropicInvocationParametersContent:
    max_tokens: int
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    stop_sequences: Optional[Sequence[str]] = None


@dataclass
class PromptAzureOpenAIInvocationParametersContent:
    temperature: Optional[float] = None
    max_tokens: Optional[int] = None
    frequency_penalty: Optional[float] = None
    presence_penalty: Optional[float] = None
    top_p: Optional[float] = None
    seed: Optional[int] = None
    reasoning_effort: Optional[Literal["low", "medium", "high"]] = None


@dataclass
class PromptData:
    name: str
    description: Optional[str] = None
    source_prompt_id: Optional[str] = None


@dataclass
class PromptGoogleInvocationParametersContent:
    temperature: Optional[float] = None
    max_output_tokens: Optional[int] = None
    stop_sequences: Optional[Sequence[str]] = None
    presence_penalty: Optional[float] = None
    frequency_penalty: Optional[float] = None
    top_p: Optional[float] = None
    top_k: Optional[int] = None


@dataclass
class PromptOpenAIInvocationParametersContent:
    temperature: Optional[float] = None
    max_tokens: Optional[int] = None
    frequency_penalty: Optional[float] = None
    presence_penalty: Optional[float] = None
    top_p: Optional[float] = None
    seed: Optional[int] = None
    reasoning_effort: Optional[Literal["low", "medium", "high"]] = None


@dataclass
class PromptResponseFormatJSONSchemaDefinition:
    name: str
    description: Optional[str] = None
    schema_: Optional[Mapping[str, Any]] = None
    strict: Optional[bool] = None


@dataclass
class PromptStringTemplate:
    template: str
    type: Literal["string"]


@dataclass
class PromptToolChoiceNone:
    type: Literal["none"]


@dataclass
class PromptToolChoiceOneOrMore:
    type: Literal["one_or_more"]


@dataclass
class PromptToolChoiceSpecificFunctionTool:
    function_name: str
    type: Literal["specific_function"]


@dataclass
class PromptToolChoiceZeroOrMore:
    type: Literal["zero_or_more"]


@dataclass
class PromptToolFunctionDefinition:
    name: str
    description: Optional[str] = None
    parameters: Optional[Mapping[str, Any]] = None
    strict: Optional[bool] = None


@dataclass
class SpanAnnotationResult:
    label: Optional[str] = None
    score: Optional[float] = None
    explanation: Optional[str] = None


@dataclass
class TextContentPart:
    text: str
    type: Literal["text"]


@dataclass
class ToolCallFunction:
    name: str
    arguments: str
    type: Literal["function"]


@dataclass
class ToolResultContentPart:
    tool_call_id: str
    tool_result: Optional[Union[bool, int, float, str, Mapping[str, Any], Sequence[Any]]]
    type: Literal["tool_result"]


@dataclass
class UploadDatasetData:
    dataset_id: str


@dataclass
class UploadDatasetResponseBody:
    data: UploadDatasetData


@dataclass
class ValidationError:
    loc: Sequence[Union[str, int]]
    msg: str
    type: str


@dataclass
class AnnotateSpansResponseBody:
    data: Sequence[InsertedSpanAnnotation]


@dataclass
class CreateExperimentResponseBody:
    data: Experiment


@dataclass
class GetPromptsResponseBody:
    data: Sequence[Prompt]


@dataclass
class HTTPValidationError:
    detail: Optional[Sequence[ValidationError]] = None


@dataclass
class PromptAnthropicInvocationParameters:
    anthropic: PromptAnthropicInvocationParametersContent
    type: Literal["anthropic"]


@dataclass
class PromptAzureOpenAIInvocationParameters:
    azure_openai: PromptAzureOpenAIInvocationParametersContent
    type: Literal["azure_openai"]


@dataclass
class PromptGoogleInvocationParameters:
    google: PromptGoogleInvocationParametersContent
    type: Literal["google"]


@dataclass
class PromptOpenAIInvocationParameters:
    openai: PromptOpenAIInvocationParametersContent
    type: Literal["openai"]


@dataclass
class PromptResponseFormatJSONSchema:
    json_schema: PromptResponseFormatJSONSchemaDefinition
    type: str = "json_schema"


@dataclass
class PromptToolFunction:
    function: PromptToolFunctionDefinition
    type: str = "function"


@dataclass
class PromptTools:
    tools: Sequence[PromptToolFunction]
    type: str = "tools"
    tool_choice: Optional[
        Union[
            PromptToolChoiceNone,
            PromptToolChoiceZeroOrMore,
            PromptToolChoiceOneOrMore,
            PromptToolChoiceSpecificFunctionTool,
        ]
    ] = None
    disable_parallel_tool_calls: Optional[bool] = None


@dataclass
class SpanAnnotation:
    span_id: str
    name: str
    annotator_kind: Literal["LLM", "HUMAN"]
    result: Optional[SpanAnnotationResult] = None
    metadata: Optional[Mapping[str, Any]] = None


@dataclass
class ToolCallContentPart:
    tool_call_id: str
    tool_call: ToolCallFunction
    type: Literal["tool_call"]


@dataclass
class AnnotateSpansRequestBody:
    data: Sequence[SpanAnnotation]


@dataclass
class PromptMessage:
    role: Literal["user", "assistant", "model", "ai", "tool", "system", "developer"]
    content: Union[
        str,
        Sequence[Union[TextContentPart, ToolCallContentPart, ToolResultContentPart]],
    ]


@dataclass
class PromptChatTemplate:
    messages: Sequence[PromptMessage]
    type: Literal["chat"]


@dataclass
class PromptVersion:
    model_provider: Literal["OPENAI", "AZURE_OPENAI", "ANTHROPIC", "GOOGLE"]
    model_name: str
    template: Union[PromptChatTemplate, PromptStringTemplate]
    template_type: Literal["STR", "CHAT"]
    template_format: Literal["MUSTACHE", "F_STRING", "NONE"]
    invocation_parameters: Union[
        PromptOpenAIInvocationParameters,
        PromptAzureOpenAIInvocationParameters,
        PromptAnthropicInvocationParameters,
        PromptGoogleInvocationParameters,
    ]
    id: str
    description: Optional[str] = None
    tools: Optional[PromptTools] = None
    response_format: Optional[PromptResponseFormatJSONSchema] = None


@dataclass
class PromptVersionData:
    model_provider: Literal["OPENAI", "AZURE_OPENAI", "ANTHROPIC", "GOOGLE"]
    model_name: str
    template: Union[PromptChatTemplate, PromptStringTemplate]
    template_type: Literal["STR", "CHAT"]
    template_format: Literal["MUSTACHE", "F_STRING", "NONE"]
    invocation_parameters: Union[
        PromptOpenAIInvocationParameters,
        PromptAzureOpenAIInvocationParameters,
        PromptAnthropicInvocationParameters,
        PromptGoogleInvocationParameters,
    ]
    description: Optional[str] = None
    tools: Optional[PromptTools] = None
    response_format: Optional[PromptResponseFormatJSONSchema] = None


@dataclass
class CreatePromptRequestBody:
    prompt: PromptData
    version: PromptVersionData


@dataclass
class CreatePromptResponseBody:
    data: PromptVersion


@dataclass
class GetPromptResponseBody:
    data: PromptVersion


@dataclass
class GetPromptVersionsResponseBody:
    data: Sequence[PromptVersion]
