from typing import List, Optional
from datetime import datetime
import uuid
import json
from app.database.database import get_db_with_dict_factory
from app.models.ai_model import AIModelCreate, AIModelUpdate
import asyncio

class AIModelService:
    def __init__(self):
        self.db = get_db_with_dict_factory()

    async def get_all_models(self):
        """获取所有 AI 模型"""
        cursor = self.db.cursor()
        cursor.execute("SELECT * FROM ai_models ORDER BY name")
        models = cursor.fetchall()
        return models

    async def get_model_by_id(self, model_id):
        """根据 ID 获取 AI 模型"""
        cursor = self.db.cursor()
        cursor.execute("SELECT * FROM ai_models WHERE id = ?", (model_id,))
        model = cursor.fetchone()
        return model

    async def create_model(self, model_data: AIModelCreate):
        """创建新的 AI 模型"""
        cursor = self.db.cursor()
        
        model_id = str(uuid.uuid4())
        now = datetime.now().isoformat()
        
        cursor.execute(
            """
            INSERT INTO ai_models (
                id, name, provider, model_id, api_type, description, 
                max_tokens, temperature_default, api_key, api_endpoint, 
                api_version, stream_enabled, is_active, created_at, updated_at
            ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            """,
            (
                model_id, model_data.name, model_data.provider, model_data.model_id,
                model_data.api_type, model_data.description, model_data.max_tokens,
                model_data.temperature_default, model_data.api_key, model_data.api_endpoint,
                model_data.api_version, 1 if model_data.stream_enabled else 0, 
                1 if model_data.is_active else 0, now, now
            )
        )
        
        self.db.commit()
        
        return await self.get_model_by_id(model_id)

    async def update_model(self, model_id: str, model_data: AIModelUpdate):
        """更新 AI 模型"""
        cursor = self.db.cursor()
        
        # 检查模型是否存在
        cursor.execute("SELECT * FROM ai_models WHERE id = ?", (model_id,))
        existing = cursor.fetchone()
        
        if not existing:
            return None
            
        now = datetime.now().isoformat()
        
        # 更新模型
        cursor.execute(
            """
            UPDATE ai_models SET
                name = ?, provider = ?, model_id = ?, api_type = ?, description = ?,
                max_tokens = ?, temperature_default = ?, api_key = ?, api_endpoint = ?,
                api_version = ?, stream_enabled = ?, is_active = ?, updated_at = ?
            WHERE id = ?
            """,
            (
                model_data.name, model_data.provider, model_data.model_id,
                model_data.api_type, model_data.description, model_data.max_tokens,
                model_data.temperature_default, model_data.api_key, model_data.api_endpoint,
                model_data.api_version, 1 if model_data.stream_enabled else 0,
                1 if model_data.is_active else 0, now, model_id
            )
        )
        
        self.db.commit()
        
        return await self.get_model_by_id(model_id)

    async def delete_model(self, model_id: str):
        """删除 AI 模型"""
        cursor = self.db.cursor()
        
        # 检查模型是否存在
        cursor.execute("SELECT * FROM ai_models WHERE id = ?", (model_id,))
        existing = cursor.fetchone()
        
        if not existing:
            return False
            
        # 删除模型
        cursor.execute("DELETE FROM ai_models WHERE id = ?", (model_id,))
        self.db.commit()
        
        return True

    async def test_ai_model(self, model_id: str, test_message: str) -> str:
        """测试 AI 模型连接"""
        cursor = self.db.cursor()
        cursor.execute("SELECT * FROM ai_models WHERE id = ?", (model_id,))
        model = cursor.fetchone()
        
        if not model:
            print(f"错误: 未找到 ID 为 {model_id} 的模型")
            raise ValueError("AI Model not found")
        
        print(f"测试模型: {model['name']}, 类型: {model['api_type']}, 模型ID: {model['model_id']}")
        
        # 根据不同的 API 类型调用不同的 API
        try:
            if model['api_type'] == 'openai':
                return await self._test_openai(model, test_message)
            elif model['api_type'] == 'anthropic':
                return await self._test_anthropic(model, test_message)
            elif model['api_type'] == 'google':
                return await self._test_google(model, test_message)
            elif model['api_type'] == 'custom':
                return f"自定义 API 类型 ({model['model_id']}) 的测试响应: 这是一个模拟响应"
            else:
                return f"暂不支持测试 {model['api_type']} 类型的 API"
        except Exception as e:
            print(f"API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"API 调用失败: {str(e)}"

    async def _test_openai(self, model, test_message):
        """测试 OpenAI 兼容的 API"""
        try:
            print(f"配置 OpenAI 兼容 API: 模型={model['model_id']}")
            print(f"API 端点: {model['api_endpoint'] or '默认端点'}")
            
            # 使用新版 OpenAI API
            from openai import OpenAI
            
            # 配置 API 客户端
            client_args = {"api_key": model['api_key']}
            
            # 如果提供了自定义端点，使用它
            if model['api_endpoint']:
                client_args["base_url"] = model['api_endpoint']
            
            # 创建客户端
            client = OpenAI(**client_args)
            
            # 如果有 API 版本，设置请求头
            headers = {}
            if model['api_version']:
                headers["api-version"] = model['api_version']
                print(f"使用 API 版本: {model['api_version']}")
            
            # 调用 API
            print("发送 API 请求...")
            
            # 准备请求参数
            request_params = {
                "model": model['model_id'],
                "messages": [
                    {"role": "system", "content": "你是一个有用的AI助手。"},
                    {"role": "user", "content": test_message}
                ],
                "max_tokens": 100,
                "temperature": model['temperature_default']
            }
            
            # 检查是否有 stream_enabled 字段，如果有则使用它
            stream_enabled = False
            if 'stream_enabled' in model:
                stream_enabled = bool(model['stream_enabled'])
                request_params["stream"] = stream_enabled
                print(f"流式输出: {'启用' if stream_enabled else '禁用'}")
            
            # 如果有额外的请求头，添加它们
            if headers:
                request_params["extra_headers"] = headers
            
            print(f"请求参数: {request_params}")
            
            # 发送请求
            try:
                if stream_enabled:
                    # 处理流式响应
                    print("使用流式响应模式")
                    full_response = ""
                    for chunk in client.chat.completions.create(**request_params):
                        if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                            content = chunk.choices[0].delta.content
                            if content:
                                print(f"收到流式内容: {content}")
                                full_response += content
                    
                    print(f"完整流式响应: {full_response}")
                    return full_response
                else:
                    # 非流式响应
                    print("使用非流式响应模式")
                    response = client.chat.completions.create(**request_params)
                    print(f"API 响应: {response}")
                    return response.choices[0].message.content
            except AttributeError as ae:
                print(f"AttributeError: {ae}")
                # 尝试旧版 API 格式
                print("尝试使用旧版 API 格式...")
                response = client.completions.create(
                    model=model['model_id'],
                    prompt=test_message,
                    max_tokens=100,
                    temperature=model['temperature_default'],
                    stream=stream_enabled
                )
                
                if stream_enabled:
                    full_response = ""
                    for chunk in response:
                        if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
                            content = chunk.choices[0].text
                            if content:
                                print(f"收到流式内容: {content}")
                                full_response += content
                    
                    print(f"完整流式响应: {full_response}")
                    return full_response
                else:
                    print(f"API 响应: {response}")
                    return response.choices[0].text
            
        except Exception as e:
            print(f"API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            
            # 尝试使用 requests 直接调用 API
            try:
                print("尝试使用 requests 直接调用 API...")
                import requests
                import json
                
                # 构建 API URL
                api_url = model['api_endpoint'] or "https://api.openai.com/v1"
                if not api_url.endswith("/"):
                    api_url += "/"
                api_url += "chat/completions"
                
                # 构建请求头
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {model['api_key']}"
                }
                
                if model['api_version']:
                    headers["api-version"] = model['api_version']
                
                # 构建请求体
                data = {
                    "model": model['model_id'],
                    "messages": [
                        {"role": "system", "content": "你是一个有用的AI助手。"},
                        {"role": "user", "content": test_message}
                    ],
                    "max_tokens": 100,
                    "temperature": model['temperature_default'],
                    "stream": False  # 直接 API 调用不支持流式输出
                }
                
                print(f"API URL: {api_url}")
                print(f"请求头: {headers}")
                print(f"请求体: {data}")
                
                # 发送请求
                response = requests.post(
                    api_url,
                    headers=headers,
                    data=json.dumps(data),
                    timeout=30
                )
                
                # 检查响应
                response.raise_for_status()
                result = response.json()
                print(f"API 响应: {result}")
                
                # 提取内容
                if "choices" in result and len(result["choices"]) > 0:
                    if "message" in result["choices"][0]:
                        return result["choices"][0]["message"]["content"]
                    elif "text" in result["choices"][0]:
                        return result["choices"][0]["text"]
                
                return f"无法解析 API 响应: {result}"
                
            except Exception as req_error:
                print(f"直接 API 调用也失败: {str(req_error)}")
                import traceback
                print(traceback.format_exc())
                return f"API 调用失败: {str(e)}\n直接调用也失败: {str(req_error)}"

    async def _test_anthropic(self, model, test_message):
        """测试 Anthropic API"""
        try:
            import anthropic
            
            print(f"配置 Anthropic API: 模型={model['model_id']}")
            
            # 配置 API
            client = anthropic.Anthropic(api_key=model['api_key'])
            
            # 调用 API
            print("发送 Anthropic API 请求...")
            response = client.messages.create(
                model=model['model_id'],
                max_tokens=100,
                temperature=model['temperature_default'],
                system="你是一个有用的AI助手。",
                messages=[
                    {"role": "user", "content": test_message}
                ]
            )
            
            print(f"Anthropic API 响应: {response}")
            return response.content[0].text
        except Exception as e:
            print(f"Anthropic API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"Anthropic API 调用失败: {str(e)}"

    async def _test_google(self, model, test_message):
        """测试 Google API"""
        try:
            import google.generativeai as genai
            
            print(f"配置 Google API: 模型={model['model_id']}")
            
            # 配置 API
            genai.configure(api_key=model['api_key'])
            
            # 调用 API
            print("发送 Google API 请求...")
            model_obj = genai.GenerativeModel(model['model_id'])
            response = model_obj.generate_content(test_message)
            
            print(f"Google API 响应: {response}")
            return response.text
        except Exception as e:
            print(f"Google API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"Google API 调用失败: {str(e)}"

    async def stream_ai_model(self, model_id: str, test_message: str):
        """流式测试 AI 模型连接"""
        cursor = self.db.cursor()
        cursor.execute("SELECT * FROM ai_models WHERE id = ?", (model_id,))
        model = cursor.fetchone()
        
        if not model:
            print(f"错误: 未找到 ID 为 {model_id} 的模型")
            raise ValueError("AI Model not found")
        
        print(f"流式测试模型: {model['name']}, 类型: {model['api_type']}, 模型ID: {model['model_id']}")
        
        # 根据不同的 API 类型调用不同的流式 API
        if model['api_type'] == 'openai':
            async for chunk in self._stream_openai(model, test_message):
                yield chunk
        elif model['api_type'] == 'anthropic':
            async for chunk in self._stream_anthropic(model, test_message):
                yield chunk
        elif model['api_type'] == 'google':
            async for chunk in self._stream_google(model, test_message):
                yield chunk
        else:
            # 对于不支持流式输出的 API 类型，模拟流式输出
            response = f"暂不支持 {model['api_type']} 类型的流式 API"
            for char in response:
                yield char
                await asyncio.sleep(0.01)  # 模拟打字效果

    async def _stream_openai(self, model, test_message):
        """流式测试 OpenAI 兼容的 API"""
        try:
            print(f"配置 OpenAI 兼容流式 API: 模型={model['model_id']}")
            
            # 使用新版 OpenAI API
            from openai import OpenAI
            
            # 配置 API 客户端
            client_args = {"api_key": model['api_key']}
            
            # 如果提供了自定义端点，使用它
            if model['api_endpoint']:
                client_args["base_url"] = model['api_endpoint']
                print(f"使用自定义端点: {model['api_endpoint']}")
            
            # 创建客户端
            client = OpenAI(**client_args)
            
            # 如果有 API 版本，设置请求头
            headers = {}
            if model['api_version']:
                headers["api-version"] = model['api_version']
                print(f"使用 API 版本: {model['api_version']}")
            
            # 准备请求参数
            request_params = {
                "model": model['model_id'],
                "messages": [
                    {"role": "system", "content": "你是一个有用的AI助手。"},
                    {"role": "user", "content": test_message}
                ],
                "max_tokens": model['max_tokens'],
                "temperature": model['temperature_default'],
                "stream": True  # 强制启用流式输出
            }
            
            # 如果有额外的请求头，添加它们
            if headers:
                request_params["extra_headers"] = headers
            
            print(f"流式请求参数: {request_params}")
            
            # 发送流式请求
            try:
                stream = client.chat.completions.create(**request_params)
                for chunk in stream:
                    if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                        content = chunk.choices[0].delta.content
                        if content:
                            print(f"收到流式内容: {content}")
                            yield content
            except AttributeError as ae:
                print(f"流式 AttributeError: {ae}")
                # 尝试旧版 API 格式
                print("尝试使用旧版 API 流式格式...")
                stream = client.completions.create(
                    model=model['model_id'],
                    prompt=test_message,
                    max_tokens=model['max_tokens'],
                    temperature=model['temperature_default'],
                    stream=True
                )
                
                for chunk in stream:
                    if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
                        content = chunk.choices[0].text
                        if content:
                            print(f"收到流式内容: {content}")
                            yield content
                        
        except Exception as e:
            print(f"流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"流式 API 调用失败: {str(e)}"

    async def _stream_anthropic(self, model, test_message):
        """流式测试 Anthropic API"""
        try:
            import anthropic
            
            print(f"配置 Anthropic 流式 API: 模型={model['model_id']}")
            
            # 配置 API
            client = anthropic.Anthropic(api_key=model['api_key'])
            
            # 调用流式 API
            print("发送 Anthropic 流式 API 请求...")
            with client.messages.stream(
                model=model['model_id'],
                max_tokens=model['max_tokens'],
                temperature=model['temperature_default'],
                system="你是一个有用的AI助手。",
                messages=[
                    {"role": "user", "content": test_message}
                ]
            ) as stream:
                for text in stream.text_stream:
                    print(f"收到 Anthropic 流式内容: {text}")
                    yield text
                
        except Exception as e:
            print(f"Anthropic 流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"Anthropic 流式 API 调用失败: {str(e)}"

    async def _stream_google(self, model, test_message):
        """流式测试 Google API"""
        try:
            import google.generativeai as genai
            
            print(f"配置 Google 流式 API: 模型={model['model_id']}")
            
            # 配置 API
            genai.configure(api_key=model['api_key'])
            
            # 调用流式 API
            print("发送 Google 流式 API 请求...")
            model_obj = genai.GenerativeModel(model['model_id'])
            
            response = model_obj.generate_content(
                test_message,
                stream=True
            )
            
            for chunk in response:
                if hasattr(chunk, 'text'):
                    print(f"收到 Google 流式内容: {chunk.text}")
                    yield chunk.text
                
        except Exception as e:
            print(f"Google 流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"Google 流式 API 调用失败: {str(e)}"

    async def generate_text(self, model_id: str, system_prompt: str, user_prompt: str) -> str:
        """使用 AI 模型生成文本"""
        # 获取模型信息
        model = await self.get_model_by_id(model_id)
        if not model:
            raise ValueError(f"找不到 ID 为 {model_id} 的模型")
        
        print(f"使用模型生成文本: {model['name']} (ID: {model_id})")
        print(f"系统提示词: {system_prompt}")
        print(f"用户提示词: {user_prompt}")
        
        # 根据模型类型调用不同的 API
        if model["api_type"] == "openai":
            return await self._generate_with_openai(model, system_prompt, user_prompt)
        elif model["api_type"] == "anthropic":
            return await self._generate_with_anthropic(model, system_prompt, user_prompt)
        elif model["api_type"] == "google":
            return await self._generate_with_google(model, system_prompt, user_prompt)
        else:
            raise ValueError(f"不支持的 API 类型: {model['api_type']}")
        
    async def _generate_with_openai(self, model, system_prompt, user_prompt):
        """实现 OpenAI API 调用"""
        try:
            from openai import OpenAI
            
            # 配置 API 客户端
            client_args = {"api_key": model['api_key']}
            
            # 如果提供了自定义端点，使用它
            if model['api_endpoint']:
                client_args["base_url"] = model['api_endpoint']
            
            # 创建客户端
            client = OpenAI(**client_args)
            
            # 准备请求参数
            request_params = {
                "model": model['model_id'],
                "messages": [
                    {"role": "system", "content": system_prompt or "你是一个有用的AI助手。"},
                    {"role": "user", "content": user_prompt}
                ],
                "max_tokens": model['max_tokens'],
                "temperature": model['temperature_default']
            }
            
            # 发送请求
            response = client.chat.completions.create(**request_params)
            return response.choices[0].message.content
        except Exception as e:
            print(f"OpenAI API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"OpenAI API 调用失败: {str(e)}"

    async def _generate_with_anthropic(self, model, system_prompt, user_prompt):
        """实现 Anthropic API 调用"""
        try:
            import anthropic
            
            # 配置 API
            client = anthropic.Anthropic(api_key=model['api_key'])
            
            # 调用 API
            response = client.messages.create(
                model=model['model_id'],
                max_tokens=model['max_tokens'],
                temperature=model['temperature_default'],
                system=system_prompt or "你是一个有用的AI助手。",
                messages=[
                    {"role": "user", "content": user_prompt}
                ]
            )
            
            return response.content[0].text
        except Exception as e:
            print(f"Anthropic API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"Anthropic API 调用失败: {str(e)}"

    async def _generate_with_google(self, model, system_prompt, user_prompt):
        """实现 Google API 调用"""
        try:
            import google.generativeai as genai
            
            # 配置 API
            genai.configure(api_key=model['api_key'])
            
            # 调用 API
            model_obj = genai.GenerativeModel(model['model_id'])
            
            # 组合系统提示词和用户提示词
            combined_prompt = f"{system_prompt}\n\n{user_prompt}" if system_prompt else user_prompt
            
            response = model_obj.generate_content(combined_prompt)
            
            return response.text
        except Exception as e:
            print(f"Google API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            return f"Google API 调用失败: {str(e)}"

    async def generate_text_stream(self, model_id: str, system_prompt: str, user_prompt: str):
        """使用 AI 模型流式生成文本"""
        # 获取模型信息
        model = await self.get_model_by_id(model_id)
        if not model:
            yield f"data: 找不到 ID 为 {model_id} 的模型\n\n"
            return
        
        print(f"使用模型流式生成文本: {model['name']} (ID: {model_id})")
        print(f"系统提示词: {system_prompt}")
        print(f"用户提示词: {user_prompt}")
        
        # 根据模型类型调用不同的 API
        if model["api_type"] == "openai":
            async for chunk in self._stream_with_openai(model, system_prompt, user_prompt):
                yield f"data: {chunk}\n\n"
        elif model["api_type"] == "anthropic":
            async for chunk in self._stream_with_anthropic(model, system_prompt, user_prompt):
                yield f"data: {chunk}\n\n"
        elif model["api_type"] == "google":
            async for chunk in self._stream_with_google(model, system_prompt, user_prompt):
                yield f"data: {chunk}\n\n"
        else:
            yield f"data: 不支持的 API 类型: {model['api_type']}\n\n"
        
        # 发送完成信号
        yield "data: [DONE]\n\n"

    async def _stream_with_openai(self, model, system_prompt, user_prompt):
        """使用 OpenAI API 流式生成文本"""
        try:
            from openai import OpenAI
            
            # 配置 API 客户端
            client_args = {"api_key": model['api_key']}
            
            # 如果提供了自定义端点，使用它
            if model['api_endpoint']:
                client_args["base_url"] = model['api_endpoint']
            
            # 创建客户端
            client = OpenAI(**client_args)
            
            # 准备请求参数
            request_params = {
                "model": model['model_id'],
                "messages": [
                    {"role": "system", "content": system_prompt or "你是一个有用的AI助手。"},
                    {"role": "user", "content": user_prompt}
                ],
                "max_tokens": model['max_tokens'],
                "temperature": model['temperature_default'],
                "stream": True
            }
            
            # 发送请求
            stream = client.chat.completions.create(**request_params)
            
            for chunk in stream:
                if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                    content = chunk.choices[0].delta.content
                    if content:
                        yield content
        except Exception as e:
            print(f"OpenAI 流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"OpenAI 流式 API 调用失败: {str(e)}"

    async def _stream_with_anthropic(self, model, system_prompt, user_prompt):
        """使用 Anthropic API 流式生成文本"""
        try:
            import anthropic
            
            # 配置 API
            client = anthropic.Anthropic(api_key=model['api_key'])
            
            # 调用 API
            with client.messages.stream(
                model=model['model_id'],
                max_tokens=model['max_tokens'],
                temperature=model['temperature_default'],
                system=system_prompt or "你是一个有用的AI助手。",
                messages=[
                    {"role": "user", "content": user_prompt}
                ]
            ) as stream:
                for text in stream.text_stream:
                    yield text
        except Exception as e:
            print(f"Anthropic 流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"Anthropic 流式 API 调用失败: {str(e)}"

    async def _stream_with_google(self, model, system_prompt, user_prompt):
        """使用 Google API 流式生成文本"""
        try:
            import google.generativeai as genai
            
            # 配置 API
            genai.configure(api_key=model['api_key'])
            
            # 调用 API
            model_obj = genai.GenerativeModel(model['model_id'])
            
            # 组合系统提示词和用户提示词
            combined_prompt = f"{system_prompt}\n\n{user_prompt}" if system_prompt else user_prompt
            
            response = model_obj.generate_content(
                combined_prompt,
                stream=True
            )
            
            for chunk in response:
                if hasattr(chunk, 'text'):
                    yield chunk.text
        except Exception as e:
            print(f"Google 流式 API 调用失败: {str(e)}")
            import traceback
            print(traceback.format_exc())
            yield f"Google 流式 API 调用失败: {str(e)}"

    def __del__(self):
        """确保数据库连接被正确关闭"""
        if hasattr(self, 'db'):
            self.db.close() 