import logging
from langchain.docstore.document import Document
import os
from langchain_openai import ChatOpenAI, AzureChatOpenAI
from langchain_google_vertexai import ChatVertexAI
from langchain_groq import ChatGroq
from langchain_google_vertexai import HarmBlockThreshold, HarmCategory
from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from langchain_experimental.graph_transformers import LLMGraphTransformer
from langchain_core.prompts import ChatPromptTemplate
from langchain_anthropic import ChatAnthropic
from langchain_fireworks import ChatFireworks
from langchain_aws import ChatBedrock
from langchain_community.chat_models import ChatOllama
import boto3
import google.auth
from pydantic import BaseModel, Field
from typing import Any, List, Optional
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.outputs import ChatResult, ChatGeneration

from src.shared.constants import MODEL_VERSIONS, PROMPT_TO_ALL_LLMs

class CustomChatLLM(BaseChatModel):
    base_url: str = Field(...)
    api_key: str = Field(...)
    model: str = Field(...)
    temperature: float = Field(default=0)

    def _generate(
        self,
        messages: List[BaseMessage],
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> ChatResult:
        # 构造请求数据
        prompt = " ".join([m.content for m in messages])
        
        # 构造API请求
        import requests
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        data = {
            "model": self.model,
            "messages": [{"role": "user", "content": prompt}],
            "temperature": self.temperature
        }
        
        data.update(kwargs)  # 添加任何额外的参数
        
        response = requests.post(
            f"{self.base_url}/v1/chat/completions",
            headers=headers,
            json=data
        )
        response.raise_for_status()
        
        response_data = response.json()
        
        # 处理响应
        message_data = response_data["choices"][0]["message"]
        if "function_call" in message_data:
            message = AIMessage(content="", additional_kwargs={"function_call": message_data["function_call"]})
        else:
            message = AIMessage(content=message_data["content"])
        
        generation = ChatGeneration(message=message)
        
        return ChatResult(generations=[generation])

    @property
    def _llm_type(self) -> str:
        return "custom_chat_llm"

    def _generate_with_functions(
        self,
        messages: List[BaseMessage],
        functions: List[dict[str, Any]],
        function_call: Optional[str] = None,
        **kwargs: Any,
    ) -> ChatResult:
        return self._generate(messages, functions=functions, function_call=function_call, **kwargs)

def get_llm(model: str = "llm"):
    """Retrieve the specified language model based on the model name."""
    # 确保model有值,如果为None或空字符串则使用默认值"llm"
    model = model if model and model.strip() else "llm"
    
    # 如果是默认的llm模型,直接返回硬编码的配置
    if model == "llm":
        llm = CustomChatLLM(
            base_url="http://1.15.125.13:11434",
            api_key="sk-3033&5004",
            model="qwen2.5:14b",
            temperature=0.85
        )
        model_name = "qwen2.5:14b"
        return llm, model_name
    
    # 对于其他模型,检查环境变量
    env_key = "LLM_MODEL_CONFIG_" + model
    env_value = os.environ.get(env_key)
    logging.info("Model: {}".format(env_key))
    
    # 如果找不到配置,使用默认llm模型
    if env_value is None:
        logging.warning(f"未找到模型 {model} 的配置,将使用默认llm模型")
        llm = CustomChatLLM(
            base_url="http://1.15.125.13:11434",
            api_key="sk-3033&5004",
            model="qwen2.5:14b",
            temperature=0.85
        )
        model_name = "qwen2.5:14b"
        return llm, model_name
    
    # 处理其他模型的配置
    if "gemini" in model:
        model_name = env_value
        credentials, project_id = google.auth.default()
        llm = ChatVertexAI(...)
    elif "azure" in model:
        model_name, api_endpoint, api_key, api_version = env_value.split(",")
        llm = AzureChatOpenAI(...)
    # ... 其他模型的配置保持不变 ...
    
    return llm, model_name


def get_combined_chunks(chunkId_chunkDoc_list):
    chunks_to_combine = int(os.environ.get("NUMBER_OF_CHUNKS_TO_COMBINE"))
    logging.info(f"Combining {chunks_to_combine} chunks before sending request to LLM")
    combined_chunk_document_list = []
    combined_chunks_page_content = [
        "".join(
            document["chunk_doc"].page_content
            for document in chunkId_chunkDoc_list[i : i + chunks_to_combine]
        )
        for i in range(0, len(chunkId_chunkDoc_list), chunks_to_combine)
    ]
    combined_chunks_ids = [
        [
            document["chunk_id"]
            for document in chunkId_chunkDoc_list[i : i + chunks_to_combine]
        ]
        for i in range(0, len(chunkId_chunkDoc_list), chunks_to_combine)
    ]

    for i in range(len(combined_chunks_page_content)):
        combined_chunk_document_list.append(
            Document(
                page_content=combined_chunks_page_content[i],
                metadata={"combined_chunk_ids": combined_chunks_ids[i]},
            )
        )
    return combined_chunk_document_list


async def get_graph_document_list(
    llm, combined_chunk_document_list, allowedNodes, allowedRelationship
):
    futures = []
    graph_document_list = []
    if "diffbot_api_key" in dir(llm):
        llm_transformer = llm
    else:
        if "get_name" in dir(llm) and llm.get_name() != "ChatOenAI" or llm.get_name() != "ChatVertexAI" or llm.get_name() != "AzureChatOpenAI":
            node_properties = False
            relationship_properties = False
        else:
            node_properties = ["description"]
            relationship_properties = ["description"]
        llm_transformer = LLMGraphTransformer(
            llm=llm,
            node_properties=node_properties,
            relationship_properties=relationship_properties,
            allowed_nodes=allowedNodes,
            allowed_relationships=allowedRelationship,
            ignore_tool_usage=True,
            #prompt = ChatPromptTemplate.from_messages(["system",PROMPT_TO_ALL_LLMs])
        )
    # with ThreadPoolExecutor(max_workers=10) as executor:
    #     for chunk in combined_chunk_document_list:
    #         chunk_doc = Document(
    #             page_content=chunk.page_content.encode("utf-8"), metadata=chunk.metadata
    #         )
    #         futures.append(
    #             executor.submit(llm_transformer.convert_to_graph_documents, [chunk_doc])
    #         )

    #     for i, future in enumerate(concurrent.futures.as_completed(futures)):
    #         graph_document = future.result()
    #         graph_document_list.append(graph_document[0])
    
    if isinstance(llm,DiffbotGraphTransformer):
        graph_document_list = llm_transformer.convert_to_graph_documents(combined_chunk_document_list)
    else:
        graph_document_list = await llm_transformer.aconvert_to_graph_documents(combined_chunk_document_list)
    return graph_document_list


async def get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowedRelationship):
    
    llm, model_name = get_llm(model)
    combined_chunk_document_list = get_combined_chunks(chunkId_chunkDoc_list)
    
    if  allowedNodes is None or allowedNodes=="":
        allowedNodes =[]
    else:
        allowedNodes = allowedNodes.split(',')    
    if  allowedRelationship is None or allowedRelationship=="":   
        allowedRelationship=[]
    else:
        allowedRelationship = allowedRelationship.split(',')
        
    graph_document_list = await get_graph_document_list(
        llm, combined_chunk_document_list, allowedNodes, allowedRelationship
    )
    return graph_document_list
