"""Comparator — Orchestrate schema comparison and self-assessment via OpenAI Structured Outputs."""

import time

from openai import OpenAI, APITimeoutError, RateLimitError, APIConnectionError, APIStatusError

from config import (
    OPENAI_API_KEY, COMPARISON_PROMPT_PATH, SELF_ASSESSMENT_PROMPT_PATH,
    TIMEOUT_SECONDS, MAX_RETRIES,
)
from comparison_models import ComparisonResult, SelfAssessmentResult


_RETRYABLE = (APITimeoutError, RateLimitError, APIConnectionError)


def compare_schemas(
    schema_v1: str,
    schema_v2: str,
    label_v1: str = "V1",
    label_v2: str = "V2",
    model: str = "gpt-4o",
) -> tuple[ComparisonResult, float, int]:
    """Compare two SQL schemas and return a structured scorecard.

    Returns:
        (ComparisonResult, elapsed_seconds, total_tokens)
    """
    system_prompt = COMPARISON_PROMPT_PATH.read_text()

    user_message = (
        f"## Schema: {label_v1}\n\n```sql\n{schema_v1}\n```\n\n"
        f"## Schema: {label_v2}\n\n```sql\n{schema_v2}\n```\n\n"
        f"Compare these two schemas. Use \"{label_v1}\" and \"{label_v2}\" as labels."
    )

    client = OpenAI(api_key=OPENAI_API_KEY, timeout=TIMEOUT_SECONDS)
    last_error = None

    for attempt in range(1 + MAX_RETRIES):
        try:
            start = time.time()

            completion = client.beta.chat.completions.parse(
                model=model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_message},
                ],
                response_format=ComparisonResult,
                max_tokens=4000,
                temperature=0.3,
            )

            elapsed = round(time.time() - start, 1)

            message = completion.choices[0].message

            if message.refusal:
                raise ValueError(f"Model refused the request: {message.refusal}")

            if message.parsed is None:
                raise ValueError("Model returned no structured output")

            total_tokens = completion.usage.total_tokens if completion.usage else 0

            return message.parsed, elapsed, total_tokens

        except _RETRYABLE as e:
            last_error = e
            if attempt < MAX_RETRIES:
                wait = 2 ** (attempt + 1)
                time.sleep(wait)
            else:
                break

        except APIStatusError as e:
            raise RuntimeError(
                f"OpenAI API error ({e.status_code}): {e.message}"
            ) from e

    raise RuntimeError(
        f"Failed after {1 + MAX_RETRIES} attempts. "
        f"Last error: {type(last_error).__name__}: {last_error}"
    )


def assess_schema(
    sql_text: str,
    user_description: str = "",
    model: str = "gpt-4o",
) -> tuple[SelfAssessmentResult, float, int]:
    """Self-assess a single SQL schema against 10 quality criteria.

    Returns:
        (SelfAssessmentResult, elapsed_seconds, total_tokens)
    """
    system_prompt = SELF_ASSESSMENT_PROMPT_PATH.read_text()

    user_message = f"## Original Business Description\n\n{user_description}\n\n" if user_description else ""
    user_message += f"## SQL Schema\n\n```sql\n{sql_text}\n```\n\nRate this schema against all 10 criteria."

    client = OpenAI(api_key=OPENAI_API_KEY, timeout=TIMEOUT_SECONDS)
    last_error = None

    for attempt in range(1 + MAX_RETRIES):
        try:
            start = time.time()

            completion = client.beta.chat.completions.parse(
                model=model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_message},
                ],
                response_format=SelfAssessmentResult,
                max_tokens=4000,
                temperature=0.3,
            )

            elapsed = round(time.time() - start, 1)

            message = completion.choices[0].message

            if message.refusal:
                raise ValueError(f"Model refused the request: {message.refusal}")

            if message.parsed is None:
                raise ValueError("Model returned no structured output")

            total_tokens = completion.usage.total_tokens if completion.usage else 0

            return message.parsed, elapsed, total_tokens

        except _RETRYABLE as e:
            last_error = e
            if attempt < MAX_RETRIES:
                wait = 2 ** (attempt + 1)
                time.sleep(wait)
            else:
                break

        except APIStatusError as e:
            raise RuntimeError(
                f"OpenAI API error ({e.status_code}): {e.message}"
            ) from e

    raise RuntimeError(
        f"Failed after {1 + MAX_RETRIES} attempts. "
        f"Last error: {type(last_error).__name__}: {last_error}"
    )
