#!/usr/bin/env bash
# Step 5 — Mantara schema generation.
#
# To switch LLM backend (OpenAI / Bedrock / Anthropic / Ollama / etc.):
#   1. Copy step-05-schema/.env.example to step-05-schema/.env
#   2. Edit .env — set MANTARA_BACKEND and the matching credentials
#   3. Re-run; no code changes needed
#   See: mantara_v8/BACKENDS.md for full per-backend setup instructions.
#
# Modes:
#   ./run.sh                       preview adapter input on latest step-4 run_dir
#   ./run.sh --run-dir <path>      specific run_dir
#   ./run.sh --preview             dump adapter input text only (no Mantara call)
#   ./run.sh --invoke              call Mantara end-to-end (P2 — pending)
#   ./run.sh --save-input          persist runs/{rid}/schema/step5_input.txt
#   ./run.sh --no-prd              exclude PRD prose (smaller, cheaper input)
#   ./run.sh --test                run pytest

set -euo pipefail
cd "$(dirname "$0")"
HERE="$(pwd)"

if [[ -f "$HERE/.env" ]]; then
  set -a
  # shellcheck disable=SC1091
  source "$HERE/.env"
  set +a
fi

# Reuse step-2's venv (same Python deps)

#TODO: CHange the path here (most preferebly to uv)
VENV="$HERE/../step-02-vision-extraction/pipeline/.venv"
if [[ ! -d "$VENV" ]]; then
  echo "ERROR: step-2 venv not found at $VENV — run step-2 setup first" >&2
  exit 1
fi
# shellcheck disable=SC1091
source "$VENV/bin/activate"

MODE="${1:-}"
case "$MODE" in
  --test)
    exec python3 -m pytest tests/ -v
    ;;
  *)
    exec python3 -m pipeline.runner "$@"
    ;;
esac
