import json
from typing import AsyncGenerator, Dict, Any, Callable, Optional, List, Union
from openai import AsyncOpenAI
import tools1
import tools2  # Import tool2.py
import asyncio
from jsonschema import validate

# 定义回调函数类型
CallbackFunction = Callable[..., Union[dict, list]]

class VolcEngineClient:
    def __init__(self, model_type: str = 'deepseek'):
        self.model_type = model_type
        self.client = AsyncOpenAI(
            base_url="https://ark.cn-beijing.volces.com/api/v3",
            api_key="6c9cd376-a83b-4eaa-97af-4d3bd55d25db"
        )
        self.model_ids = {
            'doubao': 'ep-20250426163802-pxhrq',
            'deepseek': 'ep-20250410231550-wlff7'
        }

        # 工具参数校验模板
        self.schemas = {
            "recommend_related_skills": {
                "type": "object",
                "properties": {
                    "tel": {"type": "string"}
                },
                "required": ["tel"]
            },
            "required_skills_for_position": {
                "type": "object",
                "properties": {
                    "positionName": {"type": "string"}
                },
                "required": ["positionName"]
            },
            "suitable_positions_for_skill": {
                "type": "object",
                "properties": {
                    "tech_list": {"type": "array", "items": {"type": "string"}}
                },
                "required": ["tech_list"]
            },
            "query_salary": {
                "type": "object",
                "properties": {
                    "positionName": {"type": "string"},
                    "city": {"type": "string", "default": None}
                },
                "required": ["positionName"]
            },
            "query_education_level": {
                "type": "object",
                "properties": {
                    "positionName": {"type": "string"},
                    "city": {"type": "string", "default": None}
                },
                "required": ["positionName"]
            },
            "query_position_count": {
                "type": "object",
                "properties": {
                    "positionName": {"type": "string"},
                    "city": {"type": "string", "default": None}
                },
                "required": ["positionName"]
            },
            "query_position_count_by_company_category": {
                "type": "object",
                "properties": {
                    "positionName": {"type": "string"},
                    "company_category": {"type": "string"}
                },
                "required": ["positionName", "company_category"]
            },
            "generate_echarts_data": {
                "type": "object",
                "properties": {
                    "data": {"type": "object"},
                    "chart_type": {"type": "string", "enum": ["柱状图", "折线图", "饼图"]},
                    "func_name": {"type": "string"}
                },
                "required": ["data", "chart_type", "func_name"]
            }
        }

        # 注册工具函数
        self.tool_functions: Dict[str, CallbackFunction] = {
            "recommend_related_skills": tools1.recommend_related_skills,
            "required_skills_for_position": tools1.required_skills_for_position,
            "suitable_positions_for_skill": tools1.suitable_positions_for_skill,
            "query_education_level": lambda **kwargs: tools2.query_education_level(**{k: v for k, v in kwargs.items() if k in ['positionName', 'city']}),
            "query_position_count": lambda **kwargs: tools2.query_position_count(**{k: v for k, v in kwargs.items() if k in ['positionName', 'city']}),
            "query_salary": lambda **kwargs: tools2.query_salary(chart_type=kwargs.pop('chart_type', None), **kwargs),
            "query_position_count_by_company_category": lambda **kwargs: tools2.query_position_count_by_company_category(chart_type=kwargs.pop('chart_type', None), **kwargs),
            "generate_echarts_data": lambda **kwargs: tools2.generate_echarts_data(**{
                k: v for k, v in kwargs.items() if k in ['data', 'chart_type', 'func_name']
            })
        }
        # Combine tools from tool1.py and tool2.py
        self.tools = tools1.tools + tools2.tools

    async def chat_completion(self, prompt: str, chart_type: str) -> dict:
        try:
            # 步骤1: 调用LLM识别用户意图并提取参数
            function_call_result = await self._detect_function_call(prompt)

            if function_call_result["status"] == "function_call":
                # 步骤2: 执行识别出的函数
                function_results = await self._execute_functions(function_call_result["function_calls"], chart_type)

                # 步骤3: 将结果和原始问题发送回LLM生成自然语言回答
                return await self._generate_final_response(prompt, function_results, chart_type)
            else:
                # 直接返回LLM的回答
                return {"status": "normal", "content": function_call_result["content"]}
        except Exception as e:
            print(f"调用 chat_completion 时出错: {e}")
            return {"status": "error", "content": "调用LLM时出错，请稍后重试。"}

    async def _detect_function_call(self, prompt: str) -> dict:
        try:
            response = await self.client.chat.completions.create(
                model=self.model_ids[self.model_type],
                messages=[{"role": "user", "content": prompt}],
                temperature=0.7,
                max_tokens=2000,
                tools=self.tools
            )

            message = response.choices[0].message
            if message.tool_calls:
                return {
                    "status": "function_call",
                    "function_calls": [
                        {
                            "name": tool_call.function.name,
                            "parameters": json.loads(tool_call.function.arguments)
                        }
                        for tool_call in message.tool_calls
                    ]
                }
            else:
                return {"status": "normal", "content": message.content}
        except Exception as e:
            print(f"调用 _detect_function_call 时出错: {e}")
            return {"status": "error", "content": "调用LLM时出错，请稍后重试。"}

    async def _execute_functions(self, function_calls: List[Dict[str, Any]], chart_type: str) -> List[Dict[str, Any]]:
        results = []
        for call in function_calls:
            func_name = call["name"]
            args = call["parameters"]

            # 移除函数不需要的参数
            if func_name in ["query_education_level", "query_position_count"]:
                filtered_args = {k: v for k, v in args.items() if k in ['positionName', 'city']}
            elif func_name == "query_position_count_by_company_category":
                filtered_args = {k: v for k, v in args.items() if k in ['positionName', 'company_category']}
            else:
                filtered_args = args

            # 参数校验
            if func_name in self.schemas:
                try:
                    validate(instance=filtered_args, schema=self.schemas[func_name])
                except Exception as e:
                    results.append({
                        "name": func_name,
                        "parameters": filtered_args,
                        "error": f"参数校验失败: {str(e)}"
                    })
                    continue

            # 执行函数调用
            try:
                func = self.tool_functions[func_name]
                if func_name in ["recommend_related_skills", "required_skills_for_position", "suitable_positions_for_skill"]:
                    result = await self._run_function(func, **filtered_args)
                else:
                    result = await self._run_function(func, chart_type=chart_type, **filtered_args)
                results.append({
                    "name": func_name,
                    "parameters": filtered_args,
                    "result": result
                })
            except Exception as e:
                results.append({
                    "name": func_name,
                    "parameters": filtered_args,
                    "error": str(e)
                })
        return results

    async def _run_function(self, func, **kwargs):
        if asyncio.iscoroutinefunction(func):
            return await func(**kwargs)
        else:
            loop = asyncio.get_event_loop()
            return await loop.run_in_executor(None, lambda: func(**kwargs))

    async def _generate_final_response(self, original_prompt: str, function_results: List[Dict[str, Any]], chart_type: str) -> dict:
        try:
            # 构建结果描述
            results_description = []
            need_chart = False
            for result in function_results:
                if "error" in result:
                    results_description.append(f"函数 {result['name']} 调用失败: {result['error']}")
                else:
                    func_desc = next(
                        (tool["function"]["description"] for tool in self.tools
                         if tool["function"]["name"] == result["name"]),
                        result["name"]
                    )
                    results_description.append(f"根据您的问题，我们查询了 {func_desc}。结果如下: {json.dumps(result['result'])}")
                    print("调用函数："+result["name"])
                    if result["name"] in ["query_salary", "query_education_level", "query_position_count", "query_position_count_by_company_category"]:
                        need_chart = True

            # 构建提示
            final_prompt = f"""
用户的原始问题是: {original_prompt}

我们已经通过以下操作获取了相关数据:
{chr(10).join(results_description)}
"""
            #         print("是否需要图表："+str(need_chart))
            #         if need_chart:
            #             final_prompt = f"""
            # 用户的原始问题是: {original_prompt}

            # 我们已经通过以下操作获取了相关数据:
            # {chr(10).join(results_description)}

            # 请根据以上信息，用自然语言回答用户的问题，并根据需要生成一个{chart_type}。
            # """

            # 调用LLM生成最终回答
            response = await self.client.chat.completions.create(
                model=self.model_ids[self.model_type],
                messages=[{"role": "user", "content": final_prompt}],
                temperature=0.7,
                max_tokens=2000
            )

            return {"status": "success", "content": response.choices[0].message.content, "function_results": function_results}
        except Exception as e:
            print(f"调用 _generate_final_response 时出错: {e}")
            return {"status": "error", "content": "调用LLM时出错，请稍后重试。"}

    async def stream_generate(self, prompt: str) -> AsyncGenerator[str, None]:
        try:
            async for chunk in await self.client.chat.completions.create(
                model=self.model_ids[self.model_type],
                messages=[{"role": "user", "content": prompt}],
                stream=True,
                temperature=0.7,
                max_tokens=2000
            ):
                if chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
        except Exception as e:
            print(f"调用 stream_generate 时出错: {e}")

    # 封装关联规则查询函数
    async def query_related_skills(self, skill: str, chart_type: str) -> str:
        """封装关联规则查询函数，直接返回自然语言回答"""
        try:
            prompt = f"我比较擅长{skill}，请给我推荐一些关联技术"
            response = await self.chat_completion(prompt, chart_type)
            return response["content"] if response["status"] == "normal" else response["content"]
        except Exception as e:
            print(f"调用 query_related_skills 时出错: {e}")
            return "调用LLM时出错，请稍后重试。"

    async def query_required_skills(self, position: str, chart_type: str) -> str:
        """查询特定岗位需要的技能，返回自然语言回答"""
        try:
            prompt = f"从事{position}岗位需要学什么技术？"
            response = await self.chat_completion(prompt, chart_type)
            return response["content"] if response["status"] == "normal" else response["content"]
        except Exception as e:
            print(f"调用 query_required_skills 时出错: {e}")
            return "调用LLM时出错，请稍后重试。"

    async def query_suitable_positions(self, skills: List[str], chart_type: str) -> str:
        """查询适合特定技能的岗位，返回自然语言回答"""
        try:
            skills_text = "、".join(skills)
            prompt = f"我会{skills_text}，适合什么岗位？"
            response = await self.chat_completion(prompt, chart_type)
            return response["content"] if response["status"] == "normal" else response["content"]
        except Exception as e:
            print(f"调用 query_suitable_positions 时出错: {e}")
            return "调用LLM时出错，请稍后重试。"

# 使用示例
async def usellm(type, prompt, chart_type):
    try:
        client = VolcEngineClient(model_type=type)
        response = await client.chat_completion(prompt, chart_type)
        if response["status"] == "success":
            return [response["content"]], response.get("function_results", [])
        elif response["status"] == "normal":
            return [response["content"]], []
        else:
            return [response["content"]], []
    except Exception as e:
        print(f"调用 usellm 时出错: {e}")
        return ["调用LLM时出错，请稍后重试。"], []