"""
原创性检测API路由
"""
import asyncio
import hashlib
import json
import os
from typing import Optional, Dict, Any, List
from datetime import datetime, timedelta
from uuid import uuid4

from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks, Query, UploadFile, File, Request
from fastapi.responses import HTMLResponse, FileResponse
from pydantic import BaseModel, Field, field_validator
from redis import asyncio as aioredis
from motor.motor_asyncio import AsyncIOMotorDatabase

from src.database import get_mongodb_client, get_redis_client
from src.models.fingerprint import ContentFingerprint, OriginalityReport
from src.models.whitelist import WhitelistType
from src.repositories.fingerprint_repository import FingerprintRepository
from src.repositories.whitelist_repository import WhitelistRepository
from src.detectors.originality_detector import OriginalityDetector
from src.analyzers.citation_analyzer import CitationAnalyzer, CitationFormat
from src.reports.originality_report import OriginalityReportGenerator, ReportData
from src.services.whitelist_service import WhitelistService


router = APIRouter(prefix="/api/v1/originality", tags=["Originality"])


# 依赖函数
async def get_db() -> AsyncIOMotorDatabase:
    """获取数据库连接"""
    client = await get_mongodb_client()
    return client.ai_writing


async def get_redis() -> Optional[aioredis.Redis]:
    """获取Redis连接"""
    return await get_redis_client()


async def get_current_user() -> dict:
    """获取当前用户（简化版本，实际应从token解析）"""
    # TODO: 实际实现应该从JWT token解析用户信息
    return {"id": "user_123", "name": "Test User"}


async def check_rate_limit(
    request: Request,
    redis: Optional[aioredis.Redis] = Depends(get_redis),
    current_user: dict = Depends(get_current_user)
) -> None:
    """
    检查API速率限制
    - 每用户每分钟最多10次请求
    - 使用Redis计数器实现
    """
    if not redis:
        return  # 无Redis时跳过限流
    
    user_id = current_user.get('id', 'anonymous')
    rate_key = f"rate_limit:originality:{user_id}"
    
    try:
        # 获取当前计数
        current_count = await redis.get(rate_key)
        
        if current_count is None:
            # 首次请求，设置计数器
            await redis.setex(rate_key, 60, 1)  # 60秒过期
        else:
            count = int(current_count)
            if count >= 10:  # 超过限制
                raise HTTPException(
                    status_code=429,
                    detail="请求过于频繁，请稍后重试"
                )
            # 增加计数
            await redis.incr(rate_key)
    except HTTPException:
        raise
    except Exception as e:
        # Redis错误时不阻塞请求
        print(f"Rate limit check error: {e}")


class OriginalityCheckRequest(BaseModel):
    """原创性检测请求"""
    content: str = Field(..., description="待检测文本内容")
    title: Optional[str] = Field(None, description="文档标题")
    content_type: str = Field(default="article", description="内容类型")
    check_citations: bool = Field(default=True, description="是否检查引用")
    exclude_whitelist: bool = Field(default=True, description="是否排除白名单")
    save_fingerprint: bool = Field(default=True, description="是否保存指纹")
    generate_report: bool = Field(default=True, description="是否生成报告")
    report_format: str = Field(default="html", description="报告格式: html/json/pdf")
    
    @field_validator('content')
    @classmethod
    def validate_content(cls, v: str) -> str:
        if not v or len(v.strip()) < 10:
            raise ValueError("内容至少需要10个字符")
        if len(v) > 500000:  # 限制500KB
            raise ValueError("内容不能超过500KB")
        return v


class BatchCheckRequest(BaseModel):
    """批量检测请求"""
    documents: List[Dict[str, str]] = Field(..., description="文档列表")
    check_citations: bool = Field(default=True)
    exclude_whitelist: bool = Field(default=True)
    
    @field_validator('documents')
    @classmethod
    def validate_documents(cls, v: List[Dict[str, str]]) -> List[Dict[str, str]]:
        if not v:
            raise ValueError("至少需要一个文档")
        if len(v) > 10:
            raise ValueError("批量检测最多支持10个文档")
        return v


class OriginalityCheckResponse(BaseModel):
    """检测响应"""
    report_id: str
    originality_score: float
    total_segments: int
    similar_segments_count: int
    citations_needed: int
    summary: str
    report_url: Optional[str] = None
    processing_time: float
    cached: bool = False


class ReportQueryResponse(BaseModel):
    """报告查询响应"""
    report_id: str
    content_id: str
    originality_score: float
    created_at: datetime
    report_url: str
    report_data: Optional[Dict[str, Any]] = None


@router.post("/check", response_model=OriginalityCheckResponse)
async def check_originality(
    request: OriginalityCheckRequest,
    background_tasks: BackgroundTasks,
    db: AsyncIOMotorDatabase = Depends(get_db),
    redis: aioredis.Redis = Depends(get_redis),
    current_user: dict = Depends(get_current_user),
    _: None = Depends(check_rate_limit)
) -> OriginalityCheckResponse:
    """
    单文本原创性检测端点
    
    - 支持缓存优化响应速度
    - 支持白名单排除
    - 支持多种报告格式
    """
    start_time = datetime.now()
    
    # 生成内容hash作为缓存key
    content_hash = hashlib.md5(request.content.encode()).hexdigest()
    cache_key = f"originality:check:{content_hash}"
    
    # 检查缓存
    cached_result = None
    if redis:
        try:
            cached_result = await redis.get(cache_key)
            if cached_result:
                cached_result = json.loads(cached_result)
                cached_result['cached'] = True
                return OriginalityCheckResponse(**cached_result)
        except Exception as e:
            print(f"Redis cache error: {e}")
    
    # 初始化仓库和检测器
    fp_repository = FingerprintRepository(db)
    wl_repository = None
    
    if request.exclude_whitelist:
        wl_repository = WhitelistRepository(db)
    
    detector = OriginalityDetector(
        repository=fp_repository,
        whitelist_repository=wl_repository
    )
    
    # 执行检测
    detection_result = await detector.detect_originality(
        text=request.content,
        content_id=str(uuid4()),
        save_fingerprint=request.save_fingerprint
    )
    
    # 引用分析
    citations_needed = []
    if request.check_citations:
        analyzer = CitationAnalyzer()
        citation_result = analyzer.analyze(request.content)
        citations_needed = citation_result.get('suggestions', [])
    
    report_id = str(uuid4())
    report_url = None
    
    # 生成报告
    if request.generate_report:
        report_data = ReportData(
            report_id=report_id,
            content_id=str(uuid4()),
            title=request.title or "未命名文档",
            originality_score=detection_result.overall_originality,
            total_segments=detection_result.total_segments,
            similar_segments_count=detection_result.similar_segments_count,
            similar_contents=detection_result.similar_contents,
            citations_needed=[
                {
                    'position': c.start_pos,
                    'suggestion': c.suggested_citation,
                    'priority': c.priority
                }
                for c in citations_needed
            ],
            detection_time=datetime.now(),
            word_count=len(request.content),
            summary=detection_result.detection_summary,
            metadata={'user_id': current_user.get('id')}
        )
        
        # 异步生成报告文件
        background_tasks.add_task(
            generate_report_file,
            report_data,
            detection_result,
            request.report_format,
            fp_repository
        )
        
        report_url = f"/api/v1/originality/report/{report_id}"
    
    # 计算处理时间
    processing_time = (datetime.now() - start_time).total_seconds()
    
    # 准备响应
    response_data = {
        'report_id': report_id,
        'originality_score': round(detection_result.overall_originality, 2),
        'total_segments': detection_result.total_segments,
        'similar_segments_count': detection_result.similar_segments_count,
        'citations_needed': len(citations_needed),
        'summary': detection_result.detection_summary,
        'report_url': report_url,
        'processing_time': processing_time,
        'cached': False
    }
    
    # 缓存结果（5分钟）
    if redis and not cached_result:
        try:
            await redis.setex(
                cache_key,
                300,  # 5分钟过期
                json.dumps(response_data, default=str)
            )
        except Exception as e:
            print(f"Redis cache set error: {e}")
    
    return OriginalityCheckResponse(**response_data)


@router.post("/batch-check")
async def batch_check_originality(
    request: BatchCheckRequest,
    background_tasks: BackgroundTasks,
    db: AsyncIOMotorDatabase = Depends(get_db),
    redis: aioredis.Redis = Depends(get_redis),
    current_user: dict = Depends(get_current_user)
) -> Dict[str, Any]:
    """
    批量原创性检测端点
    
    - 支持最多10个文档同时检测
    - 使用异步并发处理
    - 返回批次ID用于查询结果
    """
    batch_id = str(uuid4())
    
    # 初始化仓库和检测器
    fp_repository = FingerprintRepository(db)
    wl_repository = WhitelistRepository(db) if request.exclude_whitelist else None
    detector = OriginalityDetector(
        repository=fp_repository,
        whitelist_repository=wl_repository
    )
    
    # 创建检测任务
    tasks = []
    for doc in request.documents:
        task = detector.detect_originality(
            text=doc.get('content', ''),
            content_id=doc.get('id', str(uuid4())),
            save_fingerprint=True
        )
        tasks.append(task)
    
    # 并发执行检测
    results = await asyncio.gather(*tasks)
    
    # 汇总结果
    batch_results = []
    for i, (doc, result) in enumerate(zip(request.documents, results)):
        batch_results.append({
            'document_id': doc.get('id', f'doc_{i}'),
            'title': doc.get('title', f'文档{i+1}'),
            'originality_score': round(result.overall_originality, 2),
            'summary': result.detection_summary
        })
    
    # 保存批次结果到Redis（1小时过期）
    if redis:
        try:
            await redis.setex(
                f"originality:batch:{batch_id}",
                3600,
                json.dumps(batch_results, default=str)
            )
        except Exception as e:
            print(f"Redis batch save error: {e}")
    
    return {
        'batch_id': batch_id,
        'total_documents': len(request.documents),
        'results': batch_results,
        'status': 'completed'
    }


@router.get("/report/{report_id}")
async def get_report(
    report_id: str,
    format: str = Query(default="html", description="报告格式: html/json/pdf"),
    db: AsyncIOMotorDatabase = Depends(get_db),
    redis: aioredis.Redis = Depends(get_redis),
    current_user: dict = Depends(get_current_user)
):
    """
    获取原创性检测报告
    
    - 支持HTML、JSON、PDF格式
    - 使用Redis缓存报告内容
    """
    # 检查缓存
    cache_key = f"originality:report:{report_id}:{format}"
    
    if redis:
        try:
            cached_report = await redis.get(cache_key)
            if cached_report:
                if format == "json":
                    return json.loads(cached_report)
                elif format == "html":
                    return HTMLResponse(content=cached_report)
                elif format == "pdf":
                    # PDF需要从文件系统读取
                    pdf_path = f"/tmp/reports/{report_id}.pdf"
                    if os.path.exists(pdf_path):
                        return FileResponse(
                            pdf_path,
                            media_type="application/pdf",
                            filename=f"report_{report_id}.pdf"
                        )
        except Exception as e:
            print(f"Redis cache get error: {e}")
    
    # 从数据库获取报告数据
    report_collection = db["originality_reports"]
    report_doc = await report_collection.find_one({"report_id": report_id})
    
    if not report_doc:
        raise HTTPException(status_code=404, detail="报告不存在")
    
    # 检查权限
    if report_doc.get('metadata', {}).get('user_id') != current_user.get('id'):
        raise HTTPException(status_code=403, detail="无权访问此报告")
    
    # 根据格式返回报告
    if format == "json":
        return report_doc
    elif format == "html":
        # 生成HTML报告
        generator = OriginalityReportGenerator()
        html_content = generator.generate_html_report(
            detection_result=None,  # 从数据库恢复
            report_data=ReportData(**report_doc)
        )
        # 缓存HTML（10分钟）
        if redis:
            await redis.setex(cache_key, 600, html_content)
        return HTMLResponse(content=html_content)
    elif format == "pdf":
        # 生成或返回PDF文件
        pdf_path = f"/tmp/reports/{report_id}.pdf"
        if not os.path.exists(pdf_path):
            generator = OriginalityReportGenerator()
            generator.generate_pdf_report(
                detection_result=None,
                report_data=ReportData(**report_doc),
                output_path=pdf_path
            )
        return FileResponse(
            pdf_path,
            media_type="application/pdf",
            filename=f"report_{report_id}.pdf"
        )
    
    raise HTTPException(status_code=400, detail="不支持的报告格式")


@router.get("/batch/{batch_id}")
async def get_batch_results(
    batch_id: str,
    redis: aioredis.Redis = Depends(get_redis),
    current_user: dict = Depends(get_current_user)
) -> Dict[str, Any]:
    """
    获取批量检测结果
    """
    if not redis:
        raise HTTPException(status_code=503, detail="缓存服务不可用")
    
    try:
        results = await redis.get(f"originality:batch:{batch_id}")
        if not results:
            raise HTTPException(status_code=404, detail="批次结果不存在或已过期")
        
        return {
            'batch_id': batch_id,
            'results': json.loads(results),
            'status': 'completed'
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取批次结果失败: {str(e)}")


@router.post("/file-check")
async def check_file_originality(
    file: UploadFile = File(...),
    title: Optional[str] = None,
    check_citations: bool = True,
    exclude_whitelist: bool = True,
    db: AsyncIOMotorDatabase = Depends(get_db),
    redis: aioredis.Redis = Depends(get_redis),
    current_user: dict = Depends(get_current_user)
) -> OriginalityCheckResponse:
    """
    文件上传原创性检测
    
    - 支持TXT、DOC、DOCX、PDF等格式
    - 自动提取文本内容
    """
    # 检查文件类型
    allowed_types = [
        'text/plain',
        'application/pdf',
        'application/msword',
        'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
    ]
    
    if file.content_type not in allowed_types:
        raise HTTPException(status_code=400, detail="不支持的文件格式")
    
    # 检查文件大小 (最大10MB)
    content = await file.read()
    if len(content) > 10 * 1024 * 1024:  # 10MB
        raise HTTPException(status_code=413, detail="文件大小不能超过10MB")
    
    # 根据文件类型提取文本
    text_content = ""
    if file.content_type == 'text/plain':
        try:
            text_content = content.decode('utf-8')
        except UnicodeDecodeError:
            raise HTTPException(status_code=400, detail="文件编码不支持，请使用UTF-8编码")
    else:
        # 这里需要额外的库来处理DOC/PDF等格式
        # 简化处理，暂时只支持纯文本
        raise HTTPException(status_code=501, detail="暂不支持此文件格式的文本提取")
    
    # 创建检测请求
    request = OriginalityCheckRequest(
        content=text_content,
        title=title or file.filename,
        check_citations=check_citations,
        exclude_whitelist=exclude_whitelist
    )
    
    # 调用检测接口
    return await check_originality(
        request=request,
        background_tasks=BackgroundTasks(),
        db=db,
        redis=redis,
        current_user=current_user
    )


@router.get("/stats")
async def get_originality_stats(
    days: int = Query(default=7, description="统计天数"),
    db: AsyncIOMotorDatabase = Depends(get_db),
    current_user: dict = Depends(get_current_user)
) -> Dict[str, Any]:
    """
    获取原创性检测统计信息
    """
    # 计算时间范围
    end_date = datetime.now()
    start_date = end_date - timedelta(days=days)
    
    # 查询统计数据
    report_collection = db["originality_reports"]
    pipeline = [
        {
            "$match": {
                "metadata.user_id": current_user.get('id'),
                "checked_at": {"$gte": start_date, "$lte": end_date}
            }
        },
        {
            "$group": {
                "_id": None,
                "total_checks": {"$sum": 1},
                "avg_originality": {"$avg": "$originality_score"},
                "min_originality": {"$min": "$originality_score"},
                "max_originality": {"$max": "$originality_score"}
            }
        }
    ]
    
    results = await report_collection.aggregate(pipeline).to_list(1)
    
    if not results:
        return {
            'period_days': days,
            'total_checks': 0,
            'avg_originality': 0,
            'min_originality': 0,
            'max_originality': 0
        }
    
    stats = results[0]
    return {
        'period_days': days,
        'total_checks': stats['total_checks'],
        'avg_originality': round(stats['avg_originality'], 2),
        'min_originality': round(stats['min_originality'], 2),
        'max_originality': round(stats['max_originality'], 2)
    }


# 辅助函数
async def generate_report_file(
    report_data: ReportData,
    detection_result,
    format: str,
    repository: FingerprintRepository
):
    """
    异步生成报告文件
    """
    generator = OriginalityReportGenerator()
    
    if format == "html":
        output_path = f"/tmp/reports/{report_data.report_id}.html"
        generator.generate_html_report(detection_result, report_data, output_path)
    elif format == "pdf":
        output_path = f"/tmp/reports/{report_data.report_id}.pdf"
        generator.generate_pdf_report(detection_result, report_data, output_path)
    elif format == "json":
        output_path = f"/tmp/reports/{report_data.report_id}.json"
        generator.generate_json_report(detection_result, report_data, output_path)
    
    # 保存报告记录到数据库
    await generator.save_report_to_db(report_data, repository)


# 错误处理
@router.exception_handler(ValueError)
async def value_error_handler(request, exc):
    return HTTPException(status_code=400, detail=str(exc))


@router.exception_handler(Exception)
async def general_exception_handler(request, exc):
    return HTTPException(status_code=500, detail="内部服务器错误")


