"""Configuration for Mantara."""

import os
from pathlib import Path
from dotenv import load_dotenv

load_dotenv()

# Paths
BASE_DIR = Path(__file__).parent
PROMPTS_DIR = BASE_DIR / "prompts"
SCHEMAS_DIR = BASE_DIR / "schemas"
OUTPUT_DIR = BASE_DIR / "output"

# MANTARA_PROMPT_VERSION env var picks the prompt file:
#   "v8"  → system_prompt_v8.md  (cfg_* lookup tables, no PostgreSQL ENUMs — current production)
#   "v1"  → system_prompt.md     (legacy, used PostgreSQL ENUM types)
# Default is v8 because that matches the v8 validators (25 checks).
_PROMPT_VERSION = os.getenv("MANTARA_PROMPT_VERSION", "v8").lower()
if _PROMPT_VERSION == "v1":
    SYSTEM_PROMPT_PATH = PROMPTS_DIR / "system_prompt.md"
else:
    SYSTEM_PROMPT_PATH = PROMPTS_DIR / "system_prompt_v8.md"
COMPARISON_PROMPT_PATH = PROMPTS_DIR / "comparison_prompt.md"
SELF_ASSESSMENT_PROMPT_PATH = PROMPTS_DIR / "self_assessment_prompt.md"
JSON_SCHEMA_PATH = SCHEMAS_DIR / "mantara_schema_v1.json"

# OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
MODEL = os.getenv("MANTARA_MODEL", "gpt-4o")
MAX_TOKENS = int(os.getenv("MANTARA_MAX_TOKENS", "16000"))
TEMPERATURE = float(os.getenv("MANTARA_TEMPERATURE", "0.2"))

# AWS Bedrock (Claude)
AWS_BEARER_TOKEN_BEDROCK = os.getenv("AWS_BEARER_TOKEN_BEDROCK", "")
AWS_REGION = os.getenv("AWS_REGION", "ap-southeast-7")
BEDROCK_MODEL = os.getenv("BEDROCK_MODEL", "apac.anthropic.claude-sonnet-4-20250514-v1:0")

# Ollama (local LLM)
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434/v1")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen2.5-coder:7b")

# Retry / resilience
TIMEOUT_SECONDS = int(os.getenv("MANTARA_TIMEOUT", "600"))
MAX_RETRIES = int(os.getenv("MANTARA_MAX_RETRIES", "2"))

# Backend selection (openai, ollama, llamacpp, bedrock)
BACKEND = os.getenv("MANTARA_BACKEND", "openai")
