"""爬虫管理路由"""
from fastapi import APIRouter, HTTPException, BackgroundTasks, UploadFile, File, Body
from typing import Dict, Any, Optional, List
from app.scrapers.legal_exam_scraper import legal_exam_scraper
from app.scrapers.zhuma_scraper import zhuma_scraper
from app.db.question_bank import add_questions
import json
import csv
import io

router = APIRouter()

# 爬虫状态存储
scraper_status = {
    "is_running": False,
    "current_year": None,
    "total_scraped": 0,
    "errors": []
}


@router.post("/scraper/start")
async def start_scraper(
    years: Optional[list] = None,
    background_tasks: BackgroundTasks = None
):
    """启动爬虫"""
    if scraper_status["is_running"]:
        raise HTTPException(status_code=400, detail="爬虫正在运行中")
    
    if years is None:
        from datetime import datetime
        current_year = datetime.now().year
        years = list(range(current_year - 4, current_year + 1))
    
    scraper_status["is_running"] = True
    scraper_status["current_year"] = None
    scraper_status["total_scraped"] = 0
    scraper_status["errors"] = []
    
    # 在后台运行爬虫
    if background_tasks:
        background_tasks.add_task(run_scraper_task, years)
    
    return {
        "message": "爬虫已启动",
        "years": years,
        "status": "running"
    }


def run_scraper_task(years: list):
    """后台运行爬虫任务"""
    try:
        questions = legal_exam_scraper.scrape_recent_years(years)
        scraper_status["total_scraped"] = len(questions)
        
        # 将题目添加到题库
        add_questions(questions)
        
        print(f"✅ 爬虫任务完成，共爬取 {len(questions)} 道题目")
    except Exception as e:
        error_msg = f"爬虫任务失败: {str(e)}"
        scraper_status["errors"].append(error_msg)
        print(f"❌ {error_msg}")
    finally:
        scraper_status["is_running"] = False
        scraper_status["current_year"] = None


@router.get("/scraper/status")
async def get_scraper_status():
    """获取爬虫状态"""
    stats = legal_exam_scraper.get_stats()
    return {
        "is_running": scraper_status["is_running"],
        "current_year": scraper_status["current_year"],
        "total_scraped": scraper_status["total_scraped"],
        "errors": scraper_status["errors"],
        "stats": stats
    }


@router.get("/scraper/stats")
async def get_scraper_stats():
    """获取爬取统计信息"""
    stats = legal_exam_scraper.get_stats()
    return stats


@router.post("/scraper/login")
async def login_zhuma(
    username: str = Body(...),
    password: Optional[str] = Body(None),
    token: Optional[str] = Body(None),
    cookies: Optional[Dict[str, str]] = Body(None),
    cookies_str: Optional[str] = Body(None)  # 支持直接传入cookies字符串
):
    """
    登录竹马法考
    
    方式1：使用用户名密码登录
    方式2：使用token和cookies（从浏览器复制）
    方式3：使用cookies字符串（从浏览器Network标签复制）
    """
    if cookies_str:
        # 从cookies字符串中提取token
        zhuma_scraper.username = username
        if zhuma_scraper.set_token_from_cookies(cookies_str):
            zhuma_scraper._save_session()
            return {
                "success": True,
                "message": "使用cookies登录成功",
                "username": username
            }
        else:
            raise HTTPException(status_code=401, detail="无法从cookies中提取token")
    elif token and cookies:
        # 使用token和cookies登录（从浏览器复制）
        result = zhuma_scraper.login(username, password or "", token=token, cookies=cookies)
    else:
        # 使用用户名密码登录
        if not password:
            raise HTTPException(status_code=400, detail="请提供密码、token+cookies或cookies字符串")
        result = zhuma_scraper.login(username, password)
    
    if result.get("success"):
        return {
            "success": True,
            "message": "登录成功",
            "username": username
        }
    else:
        raise HTTPException(status_code=401, detail=result.get("message", "登录失败"))


@router.post("/scraper/set-token")
async def set_token(
    token: str = Body(...),
    username: str = Body("default_user")
):
    """
    直接设置竹马法考token（快速登录方式）
    
    Args:
        token: token值（token、stoken、mtoken通常相同）
        username: 用户名（可选，默认default_user）
    """
    zhuma_scraper.username = username
    zhuma_scraper.token = token
    zhuma_scraper.stoken = token
    zhuma_scraper.mtoken = token
    zhuma_scraper.logged_in = True
    
    # 更新请求头
    zhuma_scraper._update_headers()
    
    # 保存到Redis
    try:
        from app.core.database import RedisClient
        from datetime import datetime
        import json
        
        redis = RedisClient.get_client()
        if redis:
            session_key = f"zhuma:session:{username}"
            session_data = {
                "username": username,
                "cookies": {},
                "token": token,
                "stoken": token,
                "mtoken": token,
                "logged_in_at": datetime.now().isoformat()
            }
            redis.setex(session_key, 7 * 24 * 3600, json.dumps(session_data, ensure_ascii=False))
    except Exception as e:
        print(f"保存token到Redis失败: {e}")
    
    # 验证登录状态
    status = zhuma_scraper.check_login_status()
    
    return {
        "success": True,
        "message": "Token设置成功",
        "username": username,
        "logged_in": status.get("logged_in", False)
    }


@router.post("/scraper/test-select-catalog")
async def test_select_catalog(
    answer_id: str = Body(...),
    catalog_id: int = Body(...),
    question_type_id: int = Body(705),
    business_type_id: int = Body(104),
    kind_id: int = Body(2)
):
    """测试selectByCatalogId接口（用于调试）"""
    login_status = zhuma_scraper.check_login_status()
    if not login_status.get("logged_in"):
        raise HTTPException(status_code=401, detail="请先登录")
    
    result = zhuma_scraper.select_by_catalog_id(
        answer_id=answer_id,
        catalog_id=catalog_id,
        question_type_id=question_type_id,
        business_type_id=business_type_id,
        kind_id=kind_id
    )
    
    return {
        "success": True,
        "answer_id": answer_id,
        "catalog_id": catalog_id,
        "result": result,
        "message": "selectByCatalogId 测试完成"
    }


@router.post("/scraper/test-question-list")
async def test_question_list(
    answer_id: str = Body(...),
    is_begin: int = Body(1),
    question_ids: Optional[List[int]] = Body(None),
    batch_size: int = Body(50)
):
    """测试获取题目列表（用于调试）"""
    login_status = zhuma_scraper.check_login_status()
    if not login_status.get("logged_in"):
        raise HTTPException(status_code=401, detail="请先登录")
    
    question_list = zhuma_scraper.get_question_list(answer_id, is_begin, question_ids, batch_size)
    
    # 解析前几个题目作为示例
    parsed_questions = []
    for item in question_list[:3]:  # 只解析前3个作为示例
        parsed = zhuma_scraper._parse_question_item(item, None)
        if parsed:
            parsed_questions.append(parsed)
    
    return {
        "success": True,
        "answer_id": answer_id,
        "raw_count": len(question_list),
        "parsed_count": len(parsed_questions),
        "raw_questions": question_list[:2] if len(question_list) > 2 else question_list,  # 原始数据示例
        "parsed_questions": parsed_questions,  # 解析后的题目示例
        "message": f"成功获取 {len(question_list)} 道题目（显示前2个原始数据，前3个解析后数据）"
    }


@router.get("/scraper/login-status")
async def get_login_status():
    """检查登录状态"""
    return zhuma_scraper.check_login_status()


@router.post("/scraper/import")
async def import_questions(
    file: UploadFile = File(...),
    format: str = "json"  # json 或 csv，通过查询参数或form-data传递
):
    """手动导入题目（支持JSON/CSV格式）"""
    try:
        content = await file.read()
        
        if format == "json":
            # JSON格式导入
            try:
                questions = json.loads(content.decode('utf-8'))
                if not isinstance(questions, list):
                    raise HTTPException(status_code=400, detail="JSON格式错误：应为题目数组")
            except json.JSONDecodeError as e:
                raise HTTPException(status_code=400, detail=f"JSON解析失败: {str(e)}")
        
        elif format == "csv":
            # CSV格式导入
            questions = []
            try:
                csv_content = content.decode('utf-8')
                reader = csv.DictReader(io.StringIO(csv_content))
                
                for row in reader:
                    # 解析CSV行，映射到题目格式
                    question = {
                        "question_id": row.get("question_id", ""),
                        "content": row.get("content", ""),
                        "options": [
                            row.get("option_a", ""),
                            row.get("option_b", ""),
                            row.get("option_c", ""),
                            row.get("option_d", "")
                        ],
                        "correct_answer": row.get("correct_answer", ""),
                        "explanation": row.get("explanation", ""),
                        "type": row.get("type", "single_choice"),
                        "difficulty": row.get("difficulty", "medium"),
                        "category": row.get("category", ""),
                        "subject": row.get("subject", ""),
                        "year": int(row.get("year", 0)) if row.get("year") else None,
                        "exam_type": row.get("exam_type", "客观题"),
                        "source": "imported",
                        "knowledge_points": row.get("knowledge_points", "").split(",") if row.get("knowledge_points") else [],
                        "difficulty_score": float(row.get("difficulty_score", 0)) if row.get("difficulty_score") else None,
                        "wrong_count": 0,
                        "correct_count": 0
                    }
                    
                    # 验证必填字段
                    if not question["question_id"] or not question["content"]:
                        continue
                    
                    questions.append(question)
            except Exception as e:
                raise HTTPException(status_code=400, detail=f"CSV解析失败: {str(e)}")
        else:
            raise HTTPException(status_code=400, detail=f"不支持的格式: {format}")
        
        if not questions:
            raise HTTPException(status_code=400, detail="未找到有效题目")
        
        # 添加题目到题库
        result = add_questions(questions)
        
        return {
            "success": True,
            "message": f"成功导入 {result['added']} 道题目",
            "added": result["added"],
            "skipped": result["skipped"],
            "errors": result.get("errors", [])
        }
        
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"导入失败: {str(e)}")


@router.post("/scraper/start-zhuma")
async def start_zhuma_scraper(
    catalog_info: Optional[List[Dict[str, Any]]] = Body(None),
    answer_ids: Optional[List[str]] = Body(None),  # 兼容旧接口
    years: Optional[List[int]] = Body(None),
    background_tasks: BackgroundTasks = None
):
    """
    启动竹马法考爬虫
    
    Args:
        catalog_info: 目录信息列表（推荐），格式：
            [{
                "answer_id": "1986985830593683456",
                "catalog_id": 9305,
                "question_type_id": 705,
                "business_type_id": 104,
                "kind_id": 2,
                "batch_size": 50  # 可选，每次请求的questionIds数量
            }]
        answer_ids: 答案ID列表（兼容旧接口，需要配合catalog_id使用）
        years: 年份列表（可选，用于标记题目年份）
    """
    if scraper_status["is_running"]:
        raise HTTPException(status_code=400, detail="爬虫正在运行中")
    
    # 检查登录状态
    login_status = zhuma_scraper.check_login_status()
    if not login_status.get("logged_in"):
        raise HTTPException(status_code=401, detail="请先登录竹马法考账号")
    
    # 优先使用catalog_info
    if not catalog_info and not answer_ids:
        raise HTTPException(
            status_code=400, 
            detail="请提供catalog_info（推荐）或answer_ids。catalog_info格式：{\"answer_id\": \"...\", \"catalog_id\": 9305, ...}"
        )
    
    # 如果没有提供years，默认使用当前年份
    if not years:
        from datetime import datetime
        current_year = datetime.now().year
        if catalog_info:
            years = [current_year] * len(catalog_info)
        else:
            years = [current_year] * len(answer_ids)
    
    scraper_status["is_running"] = True
    scraper_status["current_year"] = None
    scraper_status["total_scraped"] = 0
    scraper_status["errors"] = []
    
    # 在后台运行爬虫
    if background_tasks:
        background_tasks.add_task(run_zhuma_scraper_task, catalog_info, answer_ids, years)
    
    return {
        "message": "竹马法考爬虫已启动",
        "catalog_info": catalog_info,
        "answer_ids": answer_ids,
        "years": years,
        "status": "running"
    }


def run_zhuma_scraper_task(catalog_info: Optional[List[Dict[str, Any]]], answer_ids: Optional[List[str]], years: List[int]):
    """后台运行竹马法考爬虫任务"""
    try:
        from datetime import datetime
        import time
        
        all_questions = []
        
        # 优先使用catalog_info
        if catalog_info:
            for i, info in enumerate(catalog_info):
                answer_id = info.get("answer_id")
                catalog_id = info.get("catalog_id")
                year = years[i] if i < len(years) else datetime.now().year
                
                if not answer_id or not catalog_id:
                    continue
                
                scraper_status["current_year"] = year
                
                print(f"📥 正在爬取 answerId: {answer_id}, catalogId: {catalog_id}, 年份: {year}")
                
                # 使用新的scrape_by_catalog方法
                questions = zhuma_scraper.scrape_by_catalog(
                    answer_id=answer_id,
                    catalog_id=catalog_id,
                    year=year,
                    question_type_id=info.get("question_type_id", 705),
                    business_type_id=info.get("business_type_id", 104),
                    kind_id=info.get("kind_id", 2),
                    max_questions=info.get("max_questions", 200),
                    batch_size=info.get("batch_size", 50)  # 可以设置更大的批量，如50-100
                )
                
                all_questions.extend(questions)
                scraper_status["total_scraped"] = len(all_questions)
                print(f"✅ 已爬取 {len(all_questions)} 道题目")
                
                time.sleep(2)
        
        # 兼容旧接口：使用answer_ids（需要catalog_id）
        elif answer_ids:
            print("⚠️ 仅提供answer_ids，建议使用catalog_info参数提供完整信息")
            # 这里可以尝试使用旧的逻辑，但效果可能不如catalog_info方式
            for i, answer_id in enumerate(answer_ids):
                year = years[i] if i < len(years) else datetime.now().year
                scraper_status["current_year"] = year
                
                print(f"📥 正在爬取 answerId: {answer_id}, 年份: {year}")
                print("⚠️ 需要catalog_id信息，请使用catalog_info参数")
        
        # 将题目添加到题库
        if all_questions:
            result = add_questions(all_questions)
            print(f"✅ 竹马法考爬虫任务完成，共爬取 {len(all_questions)} 道题目，成功添加 {result['added']} 道")
        else:
            print("⚠️ 未爬取到任何题目")
            
    except Exception as e:
        error_msg = f"竹马法考爬虫任务失败: {str(e)}"
        scraper_status["errors"].append(error_msg)
        print(f"❌ {error_msg}")
        import traceback
        traceback.print_exc()
    finally:
        scraper_status["is_running"] = False
        scraper_status["current_year"] = None


@router.post("/scraper/migrate")
async def migrate_questions():
    """将内存中的题库迁移到Redis"""
    try:
        from app.db.question_bank import QUESTION_BANK
        from app.services.question_storage_service import question_storage_service
        
        result = question_storage_service.migrate_from_memory(QUESTION_BANK)
        
        return {
            "success": True,
            "message": f"迁移完成：添加 {result['added']} 道，跳过 {result['skipped']} 道",
            "added": result["added"],
            "skipped": result["skipped"],
            "errors": result.get("errors", [])
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"迁移失败: {str(e)}")


@router.get("/scraper/question-stats")
async def get_question_stats():
    """获取题目统计信息"""
    try:
        from app.services.question_storage_service import question_storage_service
        stats = question_storage_service.get_question_count()
        return stats
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取统计失败: {str(e)}")


@router.get("/scraper/questions")
async def get_questions(
    subject: Optional[str] = None,
    year: Optional[int] = None,
    limit: Optional[int] = None
):
    """查询题目（支持按科目、年份筛选）"""
    try:
        from app.services.question_storage_service import question_storage_service
        
        if subject:
            questions = question_storage_service.get_questions_by_subject(subject)
        elif year:
            questions = question_storage_service.get_questions_by_year(year)
        else:
            questions = question_storage_service.get_all_questions(limit=limit)
        
        if limit and len(questions) > limit:
            questions = questions[:limit]
        
        return {
            "total": len(questions),
            "questions": questions
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"查询失败: {str(e)}")

