"""
AI分析相关API路由
"""

import logging
import re
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form
from sqlalchemy.orm import Session

from ...core.database import get_db
from ...core.security import get_current_user
from ...core.config import settings
from ...models.user import User
from ...models.analysis import AnalysisRecord
from ...services.ai_service import ai_service
from ...services.file_service import file_service
from ...schemas.analysis import AnalysisRequest, AnalysisResponse, TestCaseRequest, TestCaseResponse, SaveAnalysisRequest, DocumentProcessResponse

router = APIRouter()
logger = logging.getLogger(__name__)


def _clean_utf8_content(content: str) -> str:
    """
    清理内容中的4字节UTF-8字符（如emoji），避免MySQL编码错误
    保留基本的中文、英文、数字和常用符号
    """
    if not content:
        return content
    
    try:
        # 移除4字节UTF-8字符（emoji等）
        # MySQL的utf8字符集只支持1-3字节的UTF-8字符
        cleaned = re.sub(r'[^\u0000-\uFFFF]', '', content)
        
        # 替换一些常见的控制字符
        cleaned = cleaned.replace('\x01', ' ')  # 替换控制字符
        cleaned = cleaned.replace('\x00', ' ')  # 替换NULL字符
        
        # 清理多余的空白字符
        cleaned = re.sub(r'\s+', ' ', cleaned)
        cleaned = cleaned.strip()
        
        return cleaned
    except Exception as e:
        logger.warning(f"清理UTF-8内容失败: {e}")
        # 如果清理失败，返回ASCII安全的版本
        return content.encode('ascii', 'ignore').decode('ascii')


def _extract_title_from_content(content: str) -> str:
    """
    从输入内容中提取首行有文字的内容作为标题
    """
    if not content:
        return "文本输入"
    
    try:
        # 按行分割内容，过滤空行
        lines = [line.strip() for line in content.split('\n') if line.strip()]
        
        if not lines:
            return "文本输入"
        
        # 取第一行有内容的文字，限制长度
        first_line = lines[0]
        max_length = 30
        
        if len(first_line) > max_length:
            return first_line[:max_length] + "..."
        
        return first_line
    except Exception as e:
        logger.warning(f"标题提取失败: {e}")
        return "文本输入"


@router.post("/analyze", response_model=AnalysisResponse)
async def analyze_requirement(
    requirement_text: Optional[str] = Form(None),
    requirement_file: Optional[UploadFile] = File(None),
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """分析需求文档，识别模糊点和风险"""
    try:
        logger.info(f"🎯 收到需求分析请求 - 用户: {current_user.username}")
        logger.info(f"📋 请求参数 - 文本长度: {len(requirement_text) if requirement_text else 0}, 文件: {requirement_file.filename if requirement_file else 'None'}")
        
        content = ""
        file_info = None
        
        # 处理文件上传
        if requirement_file:
            logger.info(f"📁 开始处理上传文件: {requirement_file.filename}")
            
            # 验证文件
            logger.info(f"🔍 验证文件格式和大小...")
            validation = file_service.validate_file(requirement_file)
            if not validation["valid"]:
                logger.error(f"❌ 文件验证失败: {validation['error']}")
                raise HTTPException(status_code=400, detail=validation["error"])
            logger.info(f"✅ 文件验证通过")
            
            # 保存文件
            logger.info(f"💾 保存上传文件...")
            save_result = await file_service.save_file(requirement_file)
            if not save_result["success"]:
                logger.error(f"❌ 文件保存失败: {save_result['error']}")
                raise HTTPException(status_code=500, detail=save_result["error"])
            logger.info(f"✅ 文件保存成功: {save_result['filename']}")
            
            # 提取文件内容
            logger.info(f"🔍 开始提取文件内容...")
            extract_result = file_service.extract_content(save_result["file_path"])
            if not extract_result["success"]:
                logger.error(f"❌ 文件内容提取失败: {extract_result['error']}")
                # 清理文件
                file_service.cleanup_file(save_result["file_path"])
                raise HTTPException(status_code=500, detail=extract_result["error"])
            
            content = extract_result["content"]
            logger.info(f"✅ 文件内容提取成功，内容长度: {len(content)} 字符")
            
            file_info = {
                "filename": requirement_file.filename,
                "file_type": extract_result["file_type"],
                "language": extract_result.get("language", "unknown"),
                "used_ocr": extract_result.get("used_ocr", False),
                "used_docling": extract_result.get("used_docling", False)
            }
            logger.info(f"📊 文件信息: {file_info}")
            
            # 清理临时文件
            logger.info(f"🗑️ 清理临时文件...")
            file_service.cleanup_file(save_result["file_path"])
        
        # 处理文本输入
        elif requirement_text:
            logger.info(f"📝 使用文本输入，长度: {len(requirement_text)} 字符")
            content = requirement_text
        else:
            logger.error(f"❌ 请求参数错误：未提供需求文本或文件")
            raise HTTPException(status_code=400, detail="请提供需求文本或上传文件")
        
        # 调用AI分析服务
        logger.info(f"🤖 开始调用AI分析服务...")
        analysis_result = await ai_service.analyze_requirement(content)
        logger.info(f"🤖 AI分析完成，成功: {analysis_result['success']}")
        
        if analysis_result["success"]:
            logger.info(f"✅ 分析结果长度: {len(analysis_result.get('analysis', ''))} 字符")
        else:
            logger.error(f"❌ AI分析失败: {analysis_result.get('error', 'Unknown error')}")
        
        # 记录分析历史
        logger.info(f"💾 保存分析记录到数据库...")
        
        # 清理内容，避免数据库编码错误
        safe_input = _clean_utf8_content(content[:1000])
        safe_output = _clean_utf8_content(analysis_result.get("analysis", ""))
        
        # 提取标题作为记录名称
        title = _extract_title_from_content(content)
        
        analysis_record = AnalysisRecord(
            analysis_type="requirement_analysis",
            input_content=safe_input,  # 只保存前1000字符，并清理特殊字符
            output_content=safe_output,  # 清理输出内容
            file_name=title,  # 使用提取的标题作为记录名称
            file_type=file_info["file_type"] if file_info else "text",
            status="success" if analysis_result["success"] else "failed",
            error_message=analysis_result.get("error"),
            ai_model="gpt-3.5-turbo",
            user_id=current_user.id
        )
        
        db.add(analysis_record)
        db.commit()
        db.refresh(analysis_record)  # 刷新以获取ID
        logger.info(f"✅ 分析记录已保存，ID: {analysis_record.id}")
        
        response = AnalysisResponse(
            success=analysis_result["success"],
            analysis=analysis_result.get("analysis", ""),
            error=analysis_result.get("error"),
            file_info=file_info,
            record_id=analysis_record.id  # 添加记录ID
        )
        
        logger.info(f"🎉 需求分析请求处理完成！")
        return response
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"需求分析失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/generate-test-cases", response_model=TestCaseResponse)
async def generate_test_cases(
    requirement_text: Optional[str] = Form(None),
    requirement_file: Optional[UploadFile] = File(None),
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """基于需求生成测试用例"""
    try:
        logger.info(f"🧪 收到测试用例生成请求 - 用户: {current_user.username}")
        logger.info(f"📋 请求参数 - 文本长度: {len(requirement_text) if requirement_text else 0}, 文件: {requirement_file.filename if requirement_file else 'None'}")
        
        content = ""
        file_info = None
        
        # 处理文件上传
        if requirement_file:
            logger.info(f"📁 开始处理上传文件: {requirement_file.filename}")
            
            # 验证文件
            logger.info(f"🔍 验证文件格式和大小...")
            validation = file_service.validate_file(requirement_file)
            if not validation["valid"]:
                logger.error(f"❌ 文件验证失败: {validation['error']}")
                raise HTTPException(status_code=400, detail=validation["error"])
            logger.info(f"✅ 文件验证通过")
            
            # 保存文件
            logger.info(f"💾 保存上传文件...")
            save_result = await file_service.save_file(requirement_file)
            if not save_result["success"]:
                logger.error(f"❌ 文件保存失败: {save_result['error']}")
                raise HTTPException(status_code=500, detail=save_result["error"])
            logger.info(f"✅ 文件保存成功: {save_result['filename']}")
            
            # 提取文件内容
            logger.info(f"🔍 开始提取文件内容...")
            extract_result = file_service.extract_content(save_result["file_path"])
            if not extract_result["success"]:
                logger.error(f"❌ 文件内容提取失败: {extract_result['error']}")
                # 清理文件
                file_service.cleanup_file(save_result["file_path"])
                raise HTTPException(status_code=500, detail=extract_result["error"])
            
            content = extract_result["content"]
            logger.info(f"✅ 文件内容提取成功，内容长度: {len(content)} 字符")
            
            file_info = {
                "filename": requirement_file.filename,
                "file_type": extract_result["file_type"],
                "language": extract_result.get("language", "unknown"),
                "used_ocr": extract_result.get("used_ocr", False),
                "used_docling": extract_result.get("used_docling", False)
            }
            logger.info(f"📊 文件信息: {file_info}")
            
            # 清理临时文件
            logger.info(f"🗑️ 清理临时文件...")
            file_service.cleanup_file(save_result["file_path"])
        
        # 处理文本输入
        elif requirement_text:
            logger.info(f"📝 使用文本输入，长度: {len(requirement_text)} 字符")
            content = requirement_text
        else:
            logger.error(f"❌ 请求参数错误：未提供需求文本或文件")
            raise HTTPException(status_code=400, detail="请提供需求文本或上传文件")
        
        # 调用AI生成服务
        logger.info(f"🤖 开始调用AI测试用例生成服务...")
        generation_result = await ai_service.generate_test_cases(content)
        logger.info(f"🤖 AI测试用例生成完成，成功: {generation_result['success']}")
        
        # 记录生成历史
        # 清理内容，避免数据库编码错误
        safe_input = _clean_utf8_content(content[:1000])
        safe_output = _clean_utf8_content(generation_result.get("test_cases", ""))
        
        # 提取标题作为记录名称
        title = _extract_title_from_content(content)
        
        analysis_record = AnalysisRecord(
            analysis_type="test_case_generation",
            input_content=safe_input,  # 只保存前1000字符，并清理特殊字符
            output_content=safe_output,  # 清理输出内容
            file_name=title,  # 使用提取的标题作为记录名称
            file_type=file_info["file_type"] if file_info else "text",
            status="success" if generation_result["success"] else "failed",
            error_message=generation_result.get("error"),
            ai_model="gpt-3.5-turbo",
            user_id=current_user.id
        )
        
        db.add(analysis_record)
        db.commit()
        db.refresh(analysis_record)  # 刷新以获取ID
        
        return TestCaseResponse(
            success=generation_result["success"],
            test_cases=generation_result.get("test_cases", ""),
            error=generation_result.get("error"),
            file_info=file_info,
            record_id=analysis_record.id  # 添加记录ID
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"测试用例生成失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/history")
async def get_analysis_history(
    page: int = 1,
    page_size: int = 20,
    analysis_type: Optional[str] = None,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """获取分析历史记录"""
    try:
        query = db.query(AnalysisRecord).filter(AnalysisRecord.user_id == current_user.id)
        
        if analysis_type:
            query = query.filter(AnalysisRecord.analysis_type == analysis_type)
        
        # 计算总数
        total = query.count()
        
        # 分页查询
        offset = (page - 1) * page_size
        records = query.order_by(AnalysisRecord.created_at.desc()).offset(offset).limit(page_size).all()
        
        return {
            "success": True,
            "records": [
                {
                    "id": record.id,
                    "analysis_type": record.analysis_type,
                    "file_name": record.file_name,
                    "file_type": record.file_type,
                    "status": record.status,
                    "error_message": record.error_message,
                    "created_at": record.created_at,
                    "processing_time": record.processing_time
                }
                for record in records
            ],
            "pagination": {
                "page": page,
                "page_size": page_size,
                "total": total,
                "total_pages": (total + page_size - 1) // page_size
            }
        }
        
    except Exception as e:
        logger.error(f"获取分析历史失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/history/{record_id}")
async def get_analysis_detail(
    record_id: int,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """获取分析记录详情"""
    try:
        record = db.query(AnalysisRecord).filter(
            AnalysisRecord.id == record_id,
            AnalysisRecord.user_id == current_user.id
        ).first()
        
        if not record:
            raise HTTPException(status_code=404, detail="分析记录不存在")
        
        return {
            "success": True,
            "record": {
                "id": record.id,
                "analysis_type": record.analysis_type,
                "input_content": record.input_content,
                "output_content": record.output_content,
                "file_name": record.file_name,
                "file_type": record.file_type,
                "file_size": record.file_size,
                "processing_time": record.processing_time,
                "status": record.status,
                "error_message": record.error_message,
                "ai_model": record.ai_model,
                "tokens_used": record.tokens_used,
                "created_at": record.created_at
            }
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取分析详情失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/save")
async def save_analysis_result(
    request: SaveAnalysisRequest,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """保存编辑后的分析结果"""
    try:
        import os
        from datetime import datetime
        
        # 如果有record_id，更新现有记录；否则创建新记录
        if request.record_id:
            # 更新现有记录
            analysis_record = db.query(AnalysisRecord).filter(
                AnalysisRecord.id == request.record_id,
                AnalysisRecord.user_id == current_user.id
            ).first()
            
            if not analysis_record:
                raise HTTPException(status_code=404, detail="记录不存在")
            
            # 更新输出内容
            analysis_record.output_content = _clean_utf8_content(request.content)
            analysis_record.updated_at = datetime.now()
            
            db.commit()
            logger.info(f"✅ 已更新分析记录 ID: {analysis_record.id}")
            
            return {
                "success": True,
                "message": "保存成功",
                "record_id": analysis_record.id
            }
        else:
            # 创建新记录（保持原有逻辑）
            save_dir = os.path.join(settings.UPLOAD_DIR, "saved_analysis")
            os.makedirs(save_dir, exist_ok=True)
            
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = request.filename or f"analysis_result_{timestamp}.md"
            if not filename.endswith('.md'):
                filename += '.md'
            
            file_path = os.path.join(save_dir, filename)
            
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(request.content)
            
            analysis_record = AnalysisRecord(
                analysis_type="saved_analysis",
                input_content="",
                output_content=request.content[:1000],
                file_name=filename,
                file_type="markdown",
                status="success",
                ai_model="user_edited",
                user_id=current_user.id
            )
            
            db.add(analysis_record)
            db.commit()
            
            return {
                "success": True,
                "message": "保存成功",
                "file_path": file_path,
                "filename": filename
            }
        
    except Exception as e:
        logger.error(f"保存分析结果失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@router.post("/process-document", response_model=DocumentProcessResponse)
async def process_document(
    document_file: UploadFile = File(...),
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """处理上传的文档，提取内容用于回写到文本框"""
    try:
        logger.info(f"📄 收到文档处理请求 - 用户: {current_user.username}")
        logger.info(f"📁 文档信息: {document_file.filename}")
        
        # 验证文件
        logger.info(f"🔍 验证文件格式和大小...")
        validation = file_service.validate_file(document_file)
        if not validation["valid"]:
            logger.error(f"❌ 文件验证失败: {validation['error']}")
            raise HTTPException(status_code=400, detail=validation["error"])
        logger.info(f"✅ 文件验证通过")
        
        # 保存文件
        logger.info(f"💾 保存上传文件...")
        save_result = await file_service.save_file(document_file)
        if not save_result["success"]:
            logger.error(f"❌ 文件保存失败: {save_result['error']}")
            raise HTTPException(status_code=500, detail=save_result["error"])
        logger.info(f"✅ 文件保存成功: {save_result['filename']}")
        
        # 提取文件内容
        logger.info(f"🔍 开始提取文件内容...")
        extract_result = file_service.extract_content(save_result["file_path"])
        if not extract_result["success"]:
            logger.error(f"❌ 文件内容提取失败: {extract_result['error']}")
            # 清理文件
            file_service.cleanup_file(save_result["file_path"])
            raise HTTPException(status_code=500, detail=extract_result["error"])
        
        content = extract_result["content"]
        logger.info(f"✅ 文件内容提取成功，内容长度: {len(content)} 字符")
        
        file_info = {
            "filename": document_file.filename,
            "file_type": extract_result["file_type"],
            "language": extract_result.get("language", "unknown"),
            "used_ocr": extract_result.get("used_ocr", False),
            "used_docling": extract_result.get("used_docling", False)
        }
        logger.info(f"📊 文件信息: {file_info}")
        
        # 清理临时文件
        logger.info(f"🗑️ 清理临时文件...")
        file_service.cleanup_file(save_result["file_path"])
        
        # 记录处理历史（不涉及AI分析）
        logger.info(f"💾 保存文档处理记录...")
        
        # 清理内容中的4字节UTF-8字符（如emoji），避免MySQL编码错误
        safe_content = _clean_utf8_content(content[:500])
        
        analysis_record = AnalysisRecord(
            analysis_type="document_processing",
            input_content="",  # 不保存文件内容
            output_content=safe_content,  # 只保存前500字符作为预览，并清理特殊字符
            file_name=document_file.filename,
            file_type=extract_result["file_type"],
            status="success",
            ai_model="docling",
            user_id=current_user.id
        )
        
        db.add(analysis_record)
        db.commit()
        logger.info(f"✅ 处理记录已保存")
        
        response = DocumentProcessResponse(
            success=True,
            content=content,
            file_info=file_info
        )
        
        logger.info(f"📤 准备返回响应: success={response.success}, content_length={len(response.content) if response.content else 0}")
        logger.info(f"🎉 文档处理请求完成！")
        return response
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ 文档处理失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e)) 