import os
from typing import Optional

try:
    from dotenv import load_dotenv
    load_dotenv()
except ImportError:
    pass  # dotenv不是必需的


class BaseLLM:
    def generate(self, prompt: str, max_length: int = 256) -> str:
        raise NotImplementedError()


# LLM selection logic:
# - Keep qwen/Tongyi logic if DASHSCOPE_API_KEY is set and preferred.
# - Support OpenAI if OPENAI_API_KEY is set.
# - Support Hugging Face Inference API if HUGGINGFACE_API_KEY is set.
# - Support local transformers pipeline as fallback if installed.
# Configure preference via env var `LLM_PREFERENCE`: 'qwen', 'openai', 'hf', or 'auto' (default auto).

LLM_PREFERENCE = os.getenv("LLM_PREFERENCE", "auto").lower()
DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")


# Lightweight LLM client shim.
# If DASHSCOPE_API_KEY is not set, the processing functions fall back to
# built-in mock behavior so unit tests can run without an external LLM.
llm: Optional[BaseLLM] = None


# Try to load OpenAI wrapper if available
def _init_openai():
    try:
        import openai
        openai.api_key = OPENAI_API_KEY

        class OpenAIWrapper(BaseLLM):
            def __init__(self):
                self.model = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")

            def generate(self, prompt: str, max_length: int = 512) -> str:
                # Use chat/completions API
                try:
                    response = openai.ChatCompletion.create(
                        model=self.model,
                        messages=[{"role": "user", "content": prompt}],
                        max_tokens=max_length,
                        temperature=0.7,
                    )
                    return response.choices[0].message["content"]
                except Exception as e:
                    # fallback to completion API (for older models)
                    try:
                        response = openai.Completion.create(
                            model=self.model,
                            prompt=prompt,
                            max_tokens=max_length,
                            temperature=0.7,
                        )
                        return response.choices[0].text
                    except Exception:
                        raise e

        if OPENAI_API_KEY:
            return OpenAIWrapper()
    except Exception:
        return None


# Try to load Tongyi wrapper if available
def _init_tongyi():
    try:
        from langchain_community.llms import Tongyi  # type: ignore

        class TongyiWrapper(BaseLLM):
            def __init__(self, api_key: str):
                self._client = Tongyi(model_name="Qwen-Turbo-2025-04-28", dashscope_api_key=api_key)

            def generate(self, prompt: str, max_length: int = 256) -> str:
                # LangChain Tongyi LLMs typically use .generate or call; provide robust handling
                try:
                    return str(self._client.invoke(prompt))
                except Exception:
                    # attempt other interface
                    try:
                        return str(self._client.generate(prompt))
                    except Exception as e:
                        raise

        if DASHSCOPE_API_KEY:
            return TongyiWrapper(DASHSCOPE_API_KEY)
    except Exception:
        return None


# Try to load Hugging Face Inference API wrapper
def _init_hf_inference():
    try:
        if HUGGINGFACE_API_KEY:
            from huggingface_hub import InferenceClient  # type: ignore

            client = InferenceClient(token=HUGGINGFACE_API_KEY)

            class HFInferenceWrapper(BaseLLM):
                def __init__(self, client: "InferenceClient"):
                    self.client = client

                def generate(self, prompt: str, max_length: int = 512) -> str:
                    # Use text generation default model on HF Inference API (user can set model in env HF_MODEL)
                    model = os.getenv("HUGGINGFACE_MODEL", "google/flan-t5-large")
                    try:
                        out = self.client.text_generation(model=model, inputs=prompt, max_new_tokens=max_length)
                        # InferenceClient returns dict/list; extract text
                        if isinstance(out, dict) and "generated_text" in out:
                            return out["generated_text"]
                        if isinstance(out, list) and len(out) > 0 and "generated_text" in out[0]:
                            return out[0]["generated_text"]
                        return str(out)
                    except Exception:
                        # older API
                        res = self.client(inputs=prompt, parameters={"max_new_tokens": max_length})
                        if isinstance(res, dict) and "generated_text" in res:
                            return res["generated_text"]
                        return str(res)

            return HFInferenceWrapper(client)
    except Exception:
        return None


# Try to load local transformers pipeline
def _init_transformers_pipeline():
    try:
        from transformers import pipeline  # type: ignore

        # default model can be overridden with env TRANSFORMERS_MODEL
        model = os.getenv("TRANSFORMERS_MODEL", "google/flan-t5-large")
        pipe = pipeline("text2text-generation", model=model)

        class TransformersWrapper(BaseLLM):
            def __init__(self, pipe):
                self.pipe = pipe

            def generate(self, prompt: str, max_length: int = 512) -> str:
                outs = self.pipe(prompt, max_length=max_length, truncation=True)
                if isinstance(outs, list) and len(outs) > 0:
                    first = outs[0]
                    if isinstance(first, dict) and "generated_text" in first:
                        return first["generated_text"]
                    if isinstance(first, str):
                        return first
                return str(outs)

        return TransformersWrapper(pipe)
    except Exception:
        return None


# Selection logic
def _select_llm():
    # LLM_PREFERENCE: 'qwen', 'openai', 'hf', 'auto'
    global llm
    pref = LLM_PREFERENCE

    if pref == "qwen":
        llm = _init_tongyi()
        if llm is None:
            llm = _init_openai() or _init_hf_inference() or _init_transformers_pipeline()
    elif pref == "openai":
        llm = _init_openai() or _init_tongyi() or _init_hf_inference() or _init_transformers_pipeline()
    elif pref == "hf":
        llm = _init_hf_inference() or _init_transformers_pipeline() or _init_openai() or _init_tongyi()
    else:  # auto
        # prefer OpenAI, then HF, then transformers, then qwen
        llm = _init_openai() or _init_hf_inference() or _init_transformers_pipeline() or _init_tongyi()


# initialize at import time
_select_llm()


