from common.log import logger
import os

from config import base_config
import requests
from langchain_ollama import OllamaLLM, ChatOllama, OllamaEmbeddings


def init_llm(use_online=False):
    try:
        if use_online:
            logger.warning("[LLM INIT] Force to use online AI and raise Exception to skip to this logic.")
            raise Exception("[LLM] Force to use online AI.")
        isOllamaRunning = True if requests.get(base_config.OLLAMA_URL).text == "Ollama is running" else False
        model_list = requests.get(base_config.OLLAMA_URL + '/api/tags').json()['models']
        isModelAvailable = False
        isEmbeddingAvailable = False
        for model in model_list:
            if model["name"] == base_config.OLLAMA_MODEL:
                isModelAvailable = True
            if model["name"] == base_config.OLLAMA_EMBEDDING:
                isEmbeddingAvailable = True
        if isOllamaRunning and isModelAvailable and isEmbeddingAvailable:
            logger.info(f"[LLM INIT] Ollama is running and model {base_config.OLLAMA_MODEL} is used.")
            return OllamaLLM(model=base_config.OLLAMA_MODEL,
                             temperature=base_config.TEMPERATURE, base_url=base_config.OLLAMA_URL), \
                OllamaEmbeddings(model=base_config.OLLAMA_EMBEDDING, base_url=base_config.OLLAMA_URL), \
                ChatOllama(model=base_config.OLLAMA_MODEL, base_url=base_config.OLLAMA_URL, 
                          temperature=base_config.TEMPERATURE)
    except Exception as e:
        logger.error(f"[LLM INIT] Ollama is not running. {e} Trying to use online model. ")
        available_ai = base_config.AVAILABLE_ONLINE_AI
        from langchain_openai import OpenAIEmbeddings, ChatOpenAI
        for ai_conf in available_ai:
            for ai_key in ai_conf:
                ai = ai_conf[ai_key]
                logger.info(f"[LLM INIT] Try to connect: {ai_key}")
                try:
                    # 使用ChatOpenAI替代OpenAI，确保调用/chat/completions端点
                    llm = ChatOpenAI(model=ai['MODEL'], temperature=base_config.TEMPERATURE,
                                     base_url=ai['URL'],
                                     api_key=ai['API_KEY'])
                    os.environ['OPENAI_API_KEY'] = ai['API_KEY']
                    os.environ['OPENAI_API_BASE'] = ai['EMBEDDINGS_URL']
                    embeddings = OpenAIEmbeddings(model=ai['EMBEDDINGS'])
                    cllm = ChatOpenAI(model=ai['MODEL'], temperature=base_config.TEMPERATURE,
                                      base_url=ai['URL'],
                                      api_key=ai['API_KEY'])
                    logger.info(
                        f"[LLM INIT] Connected to {ai_key} with {ai['MODEL']} using ChatOpenAI.")
                    return llm, embeddings, cllm
                except Exception as e:
                    logger.error(f"[LLM INIT] Failed to connect: {ai} with {e}")
                    continue
        logger.error("[LLM INIT] No LLM available.")
        return None, None, None


llm, embeddings, cllm = init_llm(use_online=True)
