'''
Author: WangNing
Date: 2025-04-25 20:28:56
LastEditors: WangNing
LastEditTime: 2025-04-27 09:57:10
FilePath: \linetest\backend\services\ai_service.py
Description: AI服务模块，提供任务拆分、执行和测试功能
Copyright (c) 2025 by VGE, All Rights Reserved. 
'''

import json
import time
import openai
import requests
import re
from datetime import datetime
from flask import g
from config.config import OPENAI_MODEL, DEEPSEEK_MODEL, DEEPSEEK_API_BASE, get_openai_api_key, get_deepseek_api_key
from utils.notifications import log_ai_conversation
from psycopg2.extras import RealDictCursor

# 设置API调用的超时时间（秒）
API_TIMEOUT = 180  # 3分钟的超时时间

# 获取用户AI设置
def get_user_ai_settings(conn, username):
    """获取用户的AI设置"""
    cursor = conn.cursor(cursor_factory=RealDictCursor)
    cursor.execute(
        "SELECT * FROM user_settings WHERE username = %s",
        (username,)
    )
    settings = cursor.fetchone()
    
    if not settings:
        # 返回默认设置，但避免使用无效的API密钥
        default_deepseek_key = None
        if get_deepseek_api_key() and get_deepseek_api_key() != "your_deepseek_api_key_here":
            default_deepseek_key = get_deepseek_api_key()
            
        default_openai_key = None
        if get_openai_api_key() and get_openai_api_key() != "your_api_key_here":
            default_openai_key = get_openai_api_key()
        
        # 根据有效的API密钥选择默认提供商
        default_provider = 'openai'  # 默认OpenAI，除非有有效的DeepSeek密钥
        if default_deepseek_key:
            default_provider = 'deepseek'
        elif not default_openai_key:
            # 两者都没有有效密钥，使用OpenAI作为默认但会在调用时提示用户配置密钥
            default_provider = 'openai'
            
        return {
            'preferred_ai_provider': default_provider,
            'openai_api_key': default_openai_key,
            'deepseek_api_key': default_deepseek_key
        }
    
    return settings

def ai_stream_response(task_description, conn=None, username='admin'):
    """
    流式AI响应函数，支持实时返回结果
    返回一个生成器，每次yield一个结果片段
    """
    try:
        print(f"[DEBUG-STREAM] 开始流式处理，用户名: {username}")
        ai_provider = 'deepseek'  # 默认使用DeepSeek
        openai_api_key = get_openai_api_key()  # 默认的API密钥
        deepseek_api_key = get_deepseek_api_key()  # 默认的API密钥
        
        if conn:
            print(f"[DEBUG-STREAM] 数据库连接有效，获取用户[{username}]设置")
            settings = get_user_ai_settings(conn, username)
            ai_provider = settings['preferred_ai_provider']
            print(f"[DEBUG-STREAM] 获取到用户设置: 首选AI={ai_provider}")
            
            # 保存API密钥以便在函数内部使用
            if settings.get('openai_api_key'):
                openai_api_key = settings['openai_api_key']
            if settings.get('deepseek_api_key'):
                deepseek_api_key = settings['deepseek_api_key']
        
        # 使用适当的AI提供商
        if ai_provider == 'deepseek' and deepseek_api_key and deepseek_api_key != "your_deepseek_api_key_here":
            yield json.dumps({"type": "start", "message": "正在使用DeepSeek引擎..."})
            yield from deepseek_stream(task_description, deepseek_api_key)
        elif openai_api_key and openai_api_key != "your_api_key_here":
            yield json.dumps({"type": "start", "message": "正在使用OpenAI引擎..."})
            # 保存原始API密钥
            original_api_key = openai.api_key
            try:
                openai.api_key = openai_api_key
                yield from openai_stream(task_description)
            finally:
                # 确保恢复原始API密钥
                openai.api_key = original_api_key
        else:
            # 没有有效的API密钥
            error_msg = "未配置有效的API密钥"
            yield json.dumps({"type": "error", "message": error_msg})
                
            # 返回一个错误响应
            error_response = {
                "error": error_msg,
                "tasks": [
                    {
                        "title": "配置错误",
                        "description": "请在用户设置中配置有效的API密钥",
                        "estimated_hours": 0,
                        "is_atomic": False,
                        "children": []
                    }
                ]
            }
            yield json.dumps({"final_response": json.dumps(error_response)})
    
    except Exception as e:
        print(f"流式AI处理出错: {str(e)}")
        yield json.dumps({"type": "error", "message": str(e)})
        # 如果过程中发生了错误，返回None作为最终结果
        yield json.dumps({"final_response": None})

def deepseek_stream(task_description, api_key):
    """使用DeepSeek API进行流式响应"""
    # 检查API key是否为空
    if not api_key or api_key == "your_deepseek_api_key_here":
        # 记录错误并返回提示
        error_msg = "DeepSeek API key未设置或无效"
        print(f"错误: {error_msg}")
        yield json.dumps({"type": "error", "message": error_msg})
        
        # 生成一个简单的错误响应
        fallback_response = {
            "error": error_msg,
            "tasks": [
                {
                    "title": "API密钥错误",
                    "description": "请在用户设置中配置有效的DeepSeek API密钥",
                    "estimated_hours": 0,
                    "is_atomic": False,
                    "children": []
                }
            ]
        }
        yield json.dumps({"final_response": json.dumps(fallback_response)})
        return
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }
    
    data = {
        "model": DEEPSEEK_MODEL,
        "messages": [
            {"role": "system", "content": "你是一个专业的助手，请回答用户提出的问题。请用简洁、清晰的语言回复，避免无关的内容。"},
            {"role": "user", "content": task_description}
        ],
        "stream": True  # 启用流式响应
    }
    
    try:
        # 模拟流式思考过程
        yield json.dumps({"type": "thinking", "data": {"message": "正在分析您的问题...", "progress": 10}})
        time.sleep(0.5)
        yield json.dumps({"type": "thinking", "data": {"message": "搜索相关信息...", "progress": 30}})
        time.sleep(0.5)
        yield json.dumps({"type": "thinking", "data": {"message": "连接到DeepSeek AI...", "progress": 50}})
        
        try:
            # 发送实际请求
            response = requests.post(
                f"{DEEPSEEK_API_BASE}/chat/completions", 
                headers=headers, 
                json=data,
                timeout=API_TIMEOUT,
                stream=True  # 启用流式响应
            )
            
            if response.status_code != 200:
                error_info = response.json()
                error_msg = f"DeepSeek API返回错误: {error_info}"
                yield json.dumps({"type": "error", "message": error_msg})
                yield json.dumps({"final_response": json.dumps({"error": error_msg})})
                return
                
            # 缓存完整响应
            complete_response = ""
            # 用于缓存响应内容，定期发送到前端
            content_buffer = ""
            
            # 处理流式响应
            for line in response.iter_lines():
                if not line:
                    continue
                    
                if line.startswith(b"data: "):
                    line = line[6:]  # 去掉 "data: " 前缀
                
                try:
                    chunk = json.loads(line)
                    if 'choices' in chunk and len(chunk['choices']) > 0:
                        delta = chunk['choices'][0]['delta']
                        if 'content' in delta:
                            content = delta['content']
                            if content:
                                complete_response += content
                                content_buffer += content
                                
                                # 当缓冲区累积了一定数量的字符或包含完整句子时发送
                                if len(content_buffer) > 50 or any(c in content_buffer for c in ['.', '!', '?', '\n']):
                                    yield json.dumps({"content": content_buffer})
                                    content_buffer = ""
                except json.JSONDecodeError:
                    continue
            
            # 发送剩余的缓冲区内容
            if content_buffer:
                yield json.dumps({"content": content_buffer})
            
            # 返回完整响应
            yield json.dumps({"final_response": complete_response})
                
        except requests.exceptions.Timeout:
            # 特别处理超时错误
            error_msg = "连接DeepSeek API超时。请检查网络连接或稍后重试。"
            print(error_msg)
            yield json.dumps({"type": "error", "message": error_msg})
            yield json.dumps({"final_response": None})
            
        except Exception as e:
            # 处理其他可能的错误
            error_msg = f"DeepSeek API错误: {str(e)}"
            print(error_msg)
            yield json.dumps({"type": "error", "message": error_msg})
            yield json.dumps({"final_response": None})
                
    except Exception as e:
        print(f"DeepSeek流式处理出错: {str(e)}")
        yield json.dumps({"type": "error", "message": str(e)})
        yield json.dumps({"final_response": None})

def openai_stream(task_description):
    """使用OpenAI API进行流式响应"""
    # 设置OpenAI的超时时间
    openai.api_requestor.TIMEOUT_SECS = API_TIMEOUT
    
    system_prompt = "你是一个专业的助手，请回答用户提出的问题。请用简洁、清晰的语言回复，避免无关的内容。"
    user_prompt = task_description
    
    try:
        # 模拟流式思考过程
        yield json.dumps({"type": "thinking", "data": {"message": "正在分析您的问题...", "progress": 10}})
        time.sleep(0.5)
        yield json.dumps({"type": "thinking", "data": {"message": "搜索相关信息...", "progress": 30}})
        time.sleep(0.5)
        yield json.dumps({"type": "thinking", "data": {"message": "连接到OpenAI API...", "progress": 50}})
        
        try:
            # 创建流式请求
            response = openai.ChatCompletion.create(
                model=OPENAI_MODEL,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                timeout=API_TIMEOUT,
                request_timeout=API_TIMEOUT,
                stream=True
            )
            
            # 缓存完整响应
            complete_response = ""
            # 用于缓存响应内容，定期发送到前端
            content_buffer = ""
            
            # 处理流式响应
            for chunk in response:
                if 'choices' in chunk and len(chunk['choices']) > 0:
                    delta = chunk['choices'][0]['delta']
                    if 'content' in delta:
                        content = delta['content']
                        if content:
                            complete_response += content
                            content_buffer += content
                            
                            # 当缓冲区累积了一定数量的字符或包含完整句子时发送
                            if len(content_buffer) > 50 or any(c in content_buffer for c in ['.', '!', '?', '\n']):
                                yield json.dumps({"content": content_buffer})
                                content_buffer = ""
            
            # 发送剩余的缓冲区内容
            if content_buffer:
                yield json.dumps({"content": content_buffer})
                
            # 返回完整响应
            yield json.dumps({"final_response": complete_response})
            
        except openai.error.Timeout as e:
            # 特别处理超时错误
            error_msg = f"连接OpenAI API超时: {str(e)}。请检查网络连接或稍后重试。"
            print(error_msg)
            yield json.dumps({"type": "error", "message": error_msg})
            yield json.dumps({"final_response": None})
            
        except Exception as e:
            # 处理其他可能的错误
            error_msg = f"OpenAI API错误: {str(e)}"
            print(error_msg)
            yield json.dumps({"type": "error", "message": error_msg})
            yield json.dumps({"final_response": None})
            
    except Exception as e:
        # 捕获所有其他异常
        error_msg = f"OpenAI流式处理出错: {str(e)}"
        print(error_msg)
        yield json.dumps({"type": "error", "message": str(e)})
        yield json.dumps({"final_response": None})

def save_tasks_recursive(tasks, breakdown_version_id, parent_task_id=None, level=1, conn=None, cursor=None, task_breakdown_config=None):
    """保存任务拆分结果到数据库的递归函数"""
    total_subtasks = 0
    
    for i, task in enumerate(tasks):
        estimated_hours = task.get('estimated_hours', 1.0)  # 如果未提供，默认为1小时
        is_atomic = task.get('is_atomic', False)
        
        # 验证原子任务的估计时间是否在合理范围内
        if is_atomic and task_breakdown_config:
            min_hours = task_breakdown_config.get('MIN_ATOMIC_HOURS', 0.5)
            max_hours = task_breakdown_config.get('MAX_ATOMIC_HOURS', 4.0)
            
            if estimated_hours < min_hours:
                estimated_hours = min_hours
            elif estimated_hours > max_hours:
                estimated_hours = max_hours
        
        # 插入子任务
        cursor.execute('''
        INSERT INTO sub_tasks 
        (breakdown_version_id, parent_task_id, title, description, level, sequence, is_atomic, estimated_hours)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        RETURNING id
        ''', (
            breakdown_version_id,
            parent_task_id,
            task['title'],
            task.get('description', ''),
            level,
            i + 1,
            is_atomic,
            estimated_hours
        ))
        task_id = cursor.fetchone()['id']
        total_subtasks += 1
        
        # 如果有子任务，递归处理
        if 'children' in task and task['children']:
            children_count = save_tasks_recursive(
                task['children'], 
                breakdown_version_id, 
                task_id, 
                level + 1, 
                conn, 
                cursor, 
                task_breakdown_config
            )
            total_subtasks += children_count
    
    # 返回统计结果
    return total_subtasks

def ai_breakdown_task_stream(task_description, conn=None, username='admin'):
    """
    流式AI任务拆分函数，支持实时返回结果
    返回一个生成器，每次yield一个结果片段
    """
    try:
        print(f"[DEBUG-STREAM] 开始流式任务拆分，用户名: {username}")
        ai_provider = 'deepseek'  # 默认使用DeepSeek
        openai_api_key = get_openai_api_key()  # 默认的API密钥
        deepseek_api_key = get_deepseek_api_key()  # 默认的API密钥
        
        if conn:
            print(f"[DEBUG-STREAM] 数据库连接有效，获取用户[{username}]设置")
            settings = get_user_ai_settings(conn, username)
            ai_provider = settings['preferred_ai_provider']
            print(f"[DEBUG-STREAM] 获取到用户设置: 首选AI={ai_provider}")
            
            # 保存API密钥以便在函数内部使用
            if settings.get('openai_api_key'):
                openai_api_key = settings['openai_api_key']
            if settings.get('deepseek_api_key'):
                deepseek_api_key = settings['deepseek_api_key']
        
        # 使用适当的AI提供商
        if ai_provider == 'deepseek' and deepseek_api_key and deepseek_api_key != "your_deepseek_api_key_here":
            yield json.dumps({"type": "start", "message": "正在使用DeepSeek引擎..."})
            yield from deepseek_stream(task_description, deepseek_api_key)
        elif openai_api_key and openai_api_key != "your_api_key_here":
            yield json.dumps({"type": "start", "message": "正在使用OpenAI引擎..."})
            # 保存原始API密钥
            original_api_key = openai.api_key
            try:
                openai.api_key = openai_api_key
                yield from openai_stream(task_description)
            finally:
                # 确保恢复原始API密钥
                openai.api_key = original_api_key
        else:
            # 没有有效的API密钥
            error_msg = "未配置有效的API密钥"
            yield json.dumps({"type": "error", "message": error_msg})
                
            # 返回一个错误响应
            error_response = {
                "error": error_msg,
                "tasks": [
                    {
                        "title": "配置错误",
                        "description": "请在用户设置中配置有效的API密钥",
                        "estimated_hours": 0,
                        "is_atomic": False,
                        "children": []
                    }
                ]
            }
            yield json.dumps({"final_response": json.dumps(error_response)})
    
    except Exception as e:
        print(f"流式AI处理出错: {str(e)}")
        yield json.dumps({"type": "error", "message": str(e)})
        # 如果过程中发生了错误，返回None作为最终结果
        yield json.dumps({"final_response": None})

def ai_execute_task(task_info):
    """AI任务执行函数"""
    try:
        start_time = datetime.now()
        
        # 设置OpenAI的超时时间
        openai.api_requestor.TIMEOUT_SECS = API_TIMEOUT
        
        response = openai.ChatCompletion.create(
            model=OPENAI_MODEL,
            messages=[
                {"role": "system", "content": "你是一个专业的软件开发执行助手，请执行用户提供的具体任务。"},
                {"role": "user", "content": f"请执行以下任务并提供执行结果：\n\n任务标题：{task_info['title']}\n任务描述：{task_info['description']}\n\n请提供你的执行计划、具体实现步骤和最终结果。"}
            ],
            timeout=API_TIMEOUT,
            request_timeout=API_TIMEOUT
        )
        
        end_time = datetime.now()
        duration = (end_time - start_time).total_seconds()
        tokens = response.usage.total_tokens if hasattr(response, 'usage') else 0
        
        result = response.choices[0].message['content']
        
        # 记录AI会话
        log_ai_conversation(
            conn=g.db,
            related_task_id=task_info['id'],
            conversation_type='task_execution',
            prompt=f"执行任务: {task_info['title']}\n{task_info['description']}",
            response=result,
            tokens=tokens,
            model=OPENAI_MODEL,
            duration=duration,
            success=True
        )
        
        # 生成执行指标
        execution_metrics = json.dumps({
            'tokens': tokens,
            'duration_seconds': duration,
            'model': OPENAI_MODEL
        })
        
        return {
            'result': result,
            'metrics': execution_metrics,
            'tokens': tokens,
            'duration': duration
        }
    except Exception as e:
        error_msg = str(e)
        print(f"AI任务执行出错: {error_msg}")
        
        # 记录失败的AI会话
        log_ai_conversation(
            conn=g.db,
            related_task_id=task_info['id'],
            conversation_type='task_execution',
            prompt=f"执行任务: {task_info['title']}\n{task_info['description']}",
            success=False,
            error_message=error_msg
        )
        
        return {
            'result': f"任务执行失败: {error_msg}",
            'metrics': json.dumps({'error': error_msg}),
            'tokens': 0,
            'duration': 0
        }

def ai_test_task(task_info, execution_result):
    """AI任务测试函数"""
    try:
        # 设置OpenAI的超时时间
        openai.api_requestor.TIMEOUT_SECS = API_TIMEOUT
        
        response = openai.ChatCompletion.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": "你是一个专业的软件开发测试助手，请测试任务执行结果是否符合要求。"},
                {"role": "user", "content": f"请评估以下任务执行结果是否符合要求：\n\n任务标题：{task_info['title']}\n任务描述：{task_info['description']}\n\n执行结果：\n{execution_result}\n\n请评估执行结果是否完成了任务要求，并给出通过或不通过的结论，以及具体的评估理由。"}
            ],
            timeout=API_TIMEOUT,
            request_timeout=API_TIMEOUT
        )
        result = response.choices[0].message['content']
        # 简单判断是否通过测试
        is_pass = "通过" in result or "成功" in result or "完成" in result
        return {"test_result": "pass" if is_pass else "fail", "test_details": result}
    except Exception as e:
        print(f"AI任务测试出错: {str(e)}")
        return {"test_result": "fail", "test_details": f"测试过程出错: {str(e)}"}