from __future__ import annotations

import argparse
import base64
import os
from typing import Optional

from langchain_openai import ChatOpenAI


def file_to_data_url(path: str) -> str:
    ext = os.path.splitext(path)[1].lower()
    mime = {
        ".jpg": "image/jpeg",
        ".jpeg": "image/jpeg",
        ".png": "image/png",
        ".gif": "image/gif",
        ".webp": "image/webp",
        ".bmp": "image/bmp",
        ".tiff": "image/tiff",
        ".svg": "image/svg+xml",
    }.get(ext, "application/octet-stream")
    with open(path, "rb") as f:
        b64 = base64.b64encode(f.read()).decode("utf-8")
    return f"data:{mime};base64,{b64}"


def build_multimodal_content(prompt: str, image_url: Optional[str], image_path: Optional[str]):
    if not image_url and not image_path:
        raise ValueError("Provide --url or --path for the image")
    content = []
    if prompt:
        content.append({"type": "text", "text": prompt})
    if image_url:
        content.append({
            "type": "input_image",
            "image_url": image_url,
            "detail": "high",
        })
    else:
        content.append({
            "type": "input_image",
            "image_url": file_to_data_url(image_path or ""),
            "detail": "high",
        })
    return content


def main() -> None:
    parser = argparse.ArgumentParser(description="DeepSeek multimodal OCR via langchain-openai")
    src = parser.add_mutually_exclusive_group(required=True)
    src.add_argument("--url", dest="image_url", help="Image URL")
    src.add_argument("--path", dest="image_path", help="Local image path")
    parser.add_argument("--prompt", default="请识别图片中的文字与违规之处，并给出要点。")
    parser.add_argument("--model", default=os.environ.get("DEEPSEEK_VISION_MODEL", "deepseek-vl"))
    parser.add_argument("--base", default=os.environ.get("DEEPSEEK_API_BASE", "https://api.deepseek.com/v1"))
    parser.add_argument("--api-key", default=os.environ.get("DEEPSEEK_API_KEY"))
    args = parser.parse_args()

    if not args.api_key:
        raise RuntimeError("Missing API key. Set --api-key or DEEPSEEK_API_KEY env var.")

    # Configure ChatOpenAI to target DeepSeek-compatible OpenAI endpoint
    llm = ChatOpenAI(
        api_key=args.api_key,
        base_url=args.base,
        model=args.model,
        temperature=0.2,
        max_tokens=1024,
    )

    content = build_multimodal_content(args.prompt, args.image_url, args.image_path)
    # langchain-openai expects messages as a list of dict-like content
    # We pass a single user message containing multimodal parts
    resp = llm.invoke([
        {"role": "user", "content": content}
    ])

    # Print the final text output
    text = resp.content if hasattr(resp, "content") else str(resp)
    print(text)


if __name__ == "__main__":
    main()


