import asyncio
import os
import traceback
import json
import requests
import time
from math import trunc

from pydantic import BaseModel

from config import config

DEEPSEEK_URL = os.getenv('DEEPSEEK_URL', "https://api.deepseek.com/v1")
DEEPSEEK_API_KEY = os.getenv('DEEPSEEK_API_KEY', "sk-2bb6a669991540a0931a7c9d37e8d2cd")
MODEL_NAME = os.getenv('MODEL_NAME', "deepseek-chat")


class LLMBase(BaseModel):
    url: str
    api_key: str


class Deepseek(LLMBase):
    url: str = DEEPSEEK_URL
    api_key: str = DEEPSEEK_API_KEY


class Hunyuan(LLMBase):
    url: str = "https://api.lkeap.cloud.tencent.com/v1"
    api_key: str = "sk-7QWj4U3eYu2lpQCO6MipG4m3GnbKOg44ysLjzZGG6XAlMoa6"


async def llm_request(
        system_prompt: str,
        history: list[dict],
        prompt: str,
        # model: str = "hunyuan-turbo",
        model: str = MODEL_NAME,
        stream: bool = False
):
    print(f"🔍 LLM调用开始 - 模型: {model}, 流式: {stream}")
    print(f"🔍 系统提示词长度: {len(system_prompt)}")
    print(f"🔍 历史对话数量: {len(history)}")
    print(f"🔍 用户提示词: {prompt[:100]}...")
    
    try:
        # api_key = config.get_config("api_key")
        llm = Deepseek()
        api_key = llm.api_key
        base_url = llm.url
        
        print(f"🔍 使用LLM配置:")
        print(f"   - URL: {base_url}")
        print(f"   - API Key: {api_key[:10]}...{api_key[-4:] if len(api_key) > 14 else '***'}")
        print(f"   - 模型: {model}")
        
        print(f"🔍 构建消息列表...")
        messages = [
            {'role': "system", "content": system_prompt},
        ]
        if history:
            messages += history
        messages.append({'role': "user", "content": prompt})
        
        print(f"🔍 构建消息列表，共 {len(messages)} 条消息")
        print(f"🔍 发送请求到LLM...")
        
        # 使用requests库发送请求
        url = f"{base_url}/chat/completions"
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": model,
            "messages": messages,
            "temperature": 0.1
        }
        
        if stream:
            data["stream"] = True
        
        # 发送请求
        response = requests.post(
            url,
            headers=headers,
            json=data,
            timeout=60,  # 增加超时时间，流式响应可能需要更长时间
            stream=stream
        )
        
        print(f"✅ LLM请求成功，状态码: {response.status_code}")
        
        # 检查响应状态
        if response.status_code != 200:
            error_text = response.text
            print(f"❌ LLM请求失败，状态码: {response.status_code}")
            print(f"❌ 错误响应: {error_text}")
            raise Exception(f"LLM请求失败: {response.status_code} - {error_text}")
        
        if not stream:
            result = response.json()
            content = result['choices'][0]['message']['content']
            print(f"🔍 非流式响应长度: {len(content)}")
            return content

        print(f"🔍 返回流式响应对象")
        return response
        
    except Exception as e:
        print(f"❌ LLM调用失败: {e}")
        print(f"❌ 错误类型: {type(e).__name__}")
        print(f"❌ 详细错误信息:")
        traceback.print_exc()
        raise e


async def http_request(
        system_prompt: str,
        history: list[dict],
        prompt: str,
        # model: str = "hunyuan-turbo"
        model: str = MODEL_NAME
):
    print(f"🚀 http_request开始调用")
    try:
        response = await llm_request(system_prompt, history, prompt, model, stream=False)
        print(f"✅ http_request成功，结果长度: {len(response)}")
        print(f"🔍 http_request结果: {response[:200]}...")
        return response
    except Exception as e:
        print(f"❌ http_request失败: {e}")
        raise e


async def sse_request(
        system_prompt: str,
        history: list[dict],
        prompt: str,
        # model: str = "hunyuan-turbo"
        model: str = MODEL_NAME,
):
    print(f"🚀 sse_request开始调用")
    try:
        response = await llm_request(system_prompt, history, prompt, model, stream=True)
        
        print(f"🔍 开始处理流式响应...")
        chunk_count = 0
        total_content = ""
        start_time = time.time()
        
        try:
            for line in response.iter_lines():
                if line:
                    line = line.decode('utf-8')
                    
                    # 减少日志输出，只在调试时显示
                    if chunk_count < 3:  # 只显示前3行的日志
                        print(f"🔍 原始行: {line[:100]}...")
                    
                    if line.startswith('data: '):
                        data = line[6:]
                        if data == '[DONE]':
                            end_time = time.time()
                            duration = end_time - start_time
                            print(f"✅ 流式响应完成，共处理 {chunk_count} 个chunk")
                            print(f"🔍 总内容长度: {len(total_content)}")
                            print(f"🔍 总耗时: {duration:.2f}秒")
                            if duration > 0:
                                print(f"🔍 平均速度: {len(total_content)/duration:.0f}字符/秒")
                            break
                        
                        try:
                            chunk = json.loads(data)
                            chunk_count += 1
                            
                            if 'choices' in chunk and len(chunk['choices']) > 0:
                                choice = chunk['choices'][0]
                                
                                # 检查是否有finish_reason
                                if choice.get('finish_reason') is not None:
                                    if chunk_count <= 3:  # 只在前几个chunk显示
                                        print(f"🔍 收到finish_reason: {choice['finish_reason']}")
                                    continue
                                
                                # 处理delta内容
                                if 'delta' in choice:
                                    delta = choice['delta']
                                    
                                    # 处理content
                                    if 'content' in delta and delta['content']:
                                        content = delta['content']
                                        total_content += content
                                        yield content
                                    
                                    # 处理reasoning_content (如果有)
                                    elif 'reasoning_content' in delta and delta['reasoning_content']:
                                        reasoning = delta['reasoning_content']
                                        total_content += reasoning
                                        yield reasoning
                                    
                                    # 处理tool_calls (如果有)
                                    elif 'tool_calls' in delta and delta['tool_calls']:
                                        tool_calls = delta['tool_calls']
                                        tool_calls_str = str(tool_calls)
                                        total_content += tool_calls_str
                                        yield tool_calls_str
                                
                        except json.JSONDecodeError as e:
                            print(f"⚠️ JSON解析失败: {e}, 数据: {data[:100]}...")
                            continue
                        except Exception as e:
                            print(f"⚠️ 处理chunk失败: {e}")
                            continue
                            
        except Exception as e:
            print(f"❌ sse_request流式处理失败: {e}")
            import traceback
            traceback.print_exc()
            yield f"发生未知错误: {e}"
            
    except Exception as e:
        print(f"❌ sse_request失败: {e}")
        import traceback
        traceback.print_exc()
        yield f"发生未知错误: {e}"


if __name__ == "__main__":
    # s = http_request("你的名字叫二蛋", [], "你叫什么名字")
    # print(s)
    # print("================")

    async def test():
        s = sse_request("你的名字叫二蛋", [], "你叫什么名字", model="deepseek-reasoner")
        async for x in s:
            print(x, end="")


    s = asyncio.run(test())
