import datetime
from typing import List, Optional

from fastapi import BackgroundTasks, Depends, Query, Request
from pydantic import BaseModel
from sqlalchemy.orm import Session

from db.base import SessionLocal
from db.session import get_db
from schemas.response_entity import JsonModel
from db.models.user import User
from db.models.data_collect import CrawlTask, Article, DataSource

from db.repository.user import db_check_user_permission
from service.crawler_service.collect_service import (
    get_crawl_progress, crawl_multiple_sources, update_global_config, get_selected_sources, 
    get_update_frequency, get_enterprise_info
)
from service.crawler_service.spider_factory import SpiderFactory



class CrawlTaskCreateReq(BaseModel):
    token: str
    sources: List[str]  # 支持多个数据源
    time_range: str  # '1d'/'3d'/'1w'/'1m'

    @classmethod
    def validate_time_range(cls, v):
        if v not in ['1d', '3d', '1w', '1m']:
            raise ValueError("time_range must be one of '1d', '3d', '1w', '1m'")
        return v

    class Config:
        validate_assignment = True


class GlobalConfigUpdateReq(BaseModel):
    token: str
    selected_sources: Optional[List[str]] = None
    update_frequency: Optional[str] = None
    enterprise_info: Optional[str] = None

    class Config:
        validate_assignment = True


def create_crawl_task(request: Request, req: CrawlTaskCreateReq, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
    user: User = request.state.user
    if not db_check_user_permission(user, "image"):
        return JsonModel(code=401, data=None, msg=f"用户无权限").to_response()
    # 验证数据源是否存在
    valid_sources = []
    for source_id in req.sources:
        source = db.query(DataSource).filter(DataSource.source_id == source_id, DataSource.is_active == True).first()
        if source:
            valid_sources.append(source_id)
        else:
            return JsonModel(code=400, data=None, msg=f"数据源 {source_id} 不存在或已禁用").to_response()
    
    if not valid_sources:
        return JsonModel(code=400, data=None, msg="未指定有效的数据源").to_response()
    
    task = CrawlTask(
        user_id=user.id,
        source=",".join(valid_sources),  # 多个源用逗号分隔
        status=0,
        progress=0.0,
        total_count=0,
        finished_count=0,
        params={
            "sources": valid_sources,
            "time_range": req.time_range
        },
        created_at=datetime.datetime.utcnow(),
        updated_at=datetime.datetime.utcnow()
    )
    db.add(task)
    db.commit()
    db.refresh(task)
    
    # 使用多源采集任务
    background_tasks.add_task(crawl_multiple_sources, task.id)
    data = {"task_id": task.id}
    return JsonModel(code=200, data=data, msg="成功").to_response()


def get_task_progress(task_id: int = Query(...)):
    progress = get_crawl_progress(task_id)
    if not progress:
        return JsonModel(code=404, data=None, msg="任务不存在").to_response()
    return JsonModel(code=200, data=progress, msg="成功").to_response()


def get_task_result(
    task_id: int = Query(...),
    source: Optional[str] = Query(None, description="数据源ID，单选"),
    category: Optional[str] = Query(None, description="数据源分类/标签"),
    start_time: Optional[str] = Query(None, description="发布时间起始，格式：YYYY-MM-DD"),
    end_time: Optional[str] = Query(None, description="发布时间结束，格式：YYYY-MM-DD"),
    db: Session = Depends(get_db)
):
    task = db.query(CrawlTask).filter(CrawlTask.id == task_id).first()
    if not task:
        return JsonModel(code=404, data=None, msg="任务不存在").to_response()
    
    # 从params中获取任务的数据源列表，兼容旧数据
    task_sources = []
    if task.params and 'sources' in task.params:
        task_sources = task.params['sources']
    else:
        # 兼容旧数据：从source字段拆分
        task_sources = [s.strip() for s in task.source.split(',')] if task.source else []
    
    if not task_sources:
        return JsonModel(code=200, data={"total": 0, "articles": []}, msg="成功").to_response()
    
    # 构建查询条件
    query = db.query(Article).filter(
        Article.source.in_(task_sources),
        Article.publish_time != None,
        Article.created_at >= task.created_at
    )
    
    # 筛选条件：指定数据源（单选）
    if source:
        # 检查是否在任务源中
        if source in task_sources:
            query = query.filter(Article.source == source)
        else:
            return JsonModel(code=200, data={"total": 0, "articles": []}, msg="成功").to_response()
    
    # 筛选条件：数据源分类
    if category:
        # 获取该分类下的所有数据源ID
        category_sources = db.query(DataSource.source_id).filter(
            DataSource.category == category,
            DataSource.is_active == True
        ).all()
        category_source_ids = [s[0] for s in category_sources]
        
        if category_source_ids:
            # 取交集
            valid_sources = list(set(task_sources) & set(category_source_ids))
            if valid_sources:
                query = query.filter(Article.source.in_(valid_sources))
            else:
                return JsonModel(code=200, data={"total": 0, "articles": []}, msg="成功").to_response()
        else:
            return JsonModel(code=200, data={"total": 0, "articles": []}, msg="成功").to_response()
    
    # 筛选条件：发布时间区间
    if start_time:
        try:
            start_dt = datetime.datetime.strptime(start_time, '%Y-%m-%d')
            query = query.filter(Article.publish_time >= start_dt)
        except ValueError:
            return JsonModel(code=400, data=None, msg="开始时间格式错误，应为：YYYY-MM-DD").to_response()
    
    if end_time:
        try:
            end_dt = datetime.datetime.strptime(end_time, '%Y-%m-%d')
            # 包含结束日期的全天
            end_dt = end_dt + datetime.timedelta(days=1)
            query = query.filter(Article.publish_time < end_dt)
        except ValueError:
            return JsonModel(code=400, data=None, msg="结束时间格式错误，应为：YYYY-MM-DD").to_response()
    
    # 执行查询并排序
    articles = query.order_by(Article.publish_time.desc()).all()
    
    result = []
    for a in articles:
        result.append({
            "id": a.id,
            "title": a.title,
            "summary": a.summary[:100] if a.summary else '',
            "publish_time": a.publish_time.strftime('%Y-%m-%d %H:%M:%S') if a.publish_time else None,
            "source": a.source,
            "is_enterprise_related": a.is_enterprise_related if hasattr(a, 'is_enterprise_related') else False,
            "relevance_score": a.relevance_score if hasattr(a, 'relevance_score') else 0.0
        })
    return JsonModel(code=200, data={"total": len(result), "articles": result}, msg="成功").to_response()


class ImportReq(BaseModel):
    task_id: int


def import_articles(req: ImportReq, db: Session = Depends(get_db)):
    task = db.query(CrawlTask).filter(CrawlTask.id == req.task_id).first()
    if not task:
        return JsonModel(code=404, data=None, msg="任务不存在").to_response()
    
    # 从params中获取数据源列表，兼容旧数据
    sources = []
    if task.params and 'sources' in task.params:
        sources = task.params['sources']
    else:
        # 兼容旧数据：从source字段拆分
        sources = [s.strip() for s in task.source.split(',')] if task.source else []
    
    if not sources:
        return JsonModel(code=400, data=None, msg="任务数据源为空").to_response()
    
    articles = db.query(Article).filter(
        Article.source.in_(sources),
        Article.publish_time != None,
        Article.created_at >= task.created_at
    ).all()
    
    success, failed = [], []
    for a in articles:
        try:
            a.crawl_status = 1
            db.commit()
            success.append(a.id)
        except Exception:
            failed.append(a.id)
    
    task.status = 3
    db.commit()
    return JsonModel(code=200, data={"success": success, "failed": failed}, msg="成功").to_response()


def get_data_sources():
    """获取可用数据源列表"""
    sources = SpiderFactory.get_available_spiders()
    return JsonModel(code=200, data={"sources": sources}, msg="成功").to_response()


def get_source_latest_info(sources: str = Query(...)):
    """
    获取多个数据源的最新信息
    sources: 逗号分隔的数据源ID列表，如 "mem_bl,mem_tb,mot_gov"
    """
    source_list = [s.strip() for s in sources.split(',')]
    all_results = []
    
    for source_id in source_list:
        try:
            spider = SpiderFactory.create_spider(source_id)
            if not spider:
                continue
            
            # 获取最新文章
            articles = spider.get_latest_articles(limit=20)
            
            # 查询数据库已采集url
            db = SessionLocal()
            try:
                urls = [a['url'] for a in articles]
                exist_urls = set(r[0] for r in db.query(Article.url).filter(Article.url.in_(urls)).all())
                
                for article in articles:
                    status = "crawled" if article['url'] in exist_urls else "not_crawled"
                    all_results.append({
                        "title": article['title'],
                        "publish_time": article['date'],
                        "url": article['url'],
                        "status": status,
                        "source": source_id
                    })
            finally:
                db.close()
                
        except Exception as e:
            print(f"获取数据源 {source_id} 信息失败: {e}")
            continue
    
    # 按发布时间排序（从新到旧）
    all_results.sort(key=lambda x: x.get('publish_time', ''), reverse=True)
    
    return JsonModel(code=200, data={"list": all_results}, msg="成功").to_response()


def get_user_crawl_status(request: Request, db: Session = Depends(get_db)):
    user: User = request.state.user
    if not db_check_user_permission(user, "image"):
        return JsonModel(code=401, data=None, msg=f"用户无权限").to_response()

    task = db.query(CrawlTask).filter(CrawlTask.user_id == user.id).order_by(CrawlTask.created_at.desc()).first()
    if not task:
        return JsonModel(code=200, data={"status": "none", "task_id": None}, msg="成功").to_response()

    if task.status == 2:
        ret = {"status": "can_import", "task_id": task.id}
    elif task.status == 3:
        ret = {"status": "none", "task_id": task.id}
    elif task.status == -1:
        ret = {"status": "failed", "task_id": task.id}
    elif task.status == 1:
        ret = {"status": "in_progress", "task_id": task.id}
    else:
        ret = {"status": "none", "task_id": task.id}

    return JsonModel(code=200, data=ret, msg="成功").to_response()


# 新增的API接口

def get_global_config_api(request: Request, db: Session = Depends(get_db)):
    """获取全局配置"""
    user: User = request.state.user
    if not db_check_user_permission(user, "image"):
        return JsonModel(code=401, data=None, msg="用户无权限").to_response()
    
    config = {
        "selected_sources": get_selected_sources(),
        "update_frequency": get_update_frequency(),
        "enterprise_info": get_enterprise_info()
    }
    
    return JsonModel(code=200, data=config, msg="成功").to_response()


def update_global_config_api(request: Request, req: GlobalConfigUpdateReq, db: Session = Depends(get_db)):
    """更新全局配置"""
    user: User = request.state.user
    if not db_check_user_permission(user, "image"):
        return JsonModel(code=401, data=None, msg="用户无权限").to_response()
    
    success = True
    if req.selected_sources is not None:
        success &= update_global_config("selected_sources", req.selected_sources, "选中的数据源列表")
    
    if req.update_frequency is not None:
        if req.update_frequency not in ['1d', '3d', '1w', '1m']:
            return JsonModel(code=400, data=None, msg="更新频率必须是 1d、3d、1w、1m 之一").to_response()
        success &= update_global_config("update_frequency", req.update_frequency, "数据更新频率")
    
    if req.enterprise_info is not None:
        success &= update_global_config("enterprise_info", req.enterprise_info, "企业信息")
    
    if success:
        return JsonModel(code=200, data=None, msg="配置更新成功").to_response()
    else:
        return JsonModel(code=500, data=None, msg="配置更新失败").to_response()
