import datetime
import traceback
from dateutil.parser import parse as date_parse
from dateutil.relativedelta import relativedelta

import requests
from loguru import logger
from sqlalchemy.orm import Session

from db.base import SessionLocal
from db.models.data_collect import Article, CrawlTask, GlobalConfig
from utils.minio_utils import MinioRepository
from utils.file_utils import generate_md5_filename

from .collect_utils import (
    safe_request, extract_articles, extract_article_details, get_total_pages, LIST_URL, BASE_URL
)
from .spider_factory import SpiderFactory
from .relevance_service import judge_article_relevance


def parse_date(date_str):
    try:
        # 处理中文日期格式：2025年07月25日 -> 2025-07-25
        if '年' in date_str and '月' in date_str and '日' in date_str:
            import re
            match = re.search(r'(\d{4})年(\d{1,2})月(\d{1,2})日', date_str)
            if match:
                year, month, day = match.groups()
                date_str = f"{year}-{month.zfill(2)}-{day.zfill(2)}"
        return date_parse(date_str)
    except Exception:
        return None

def in_time_range(article_date, time_range):
    now = datetime.datetime.now()
    if not article_date:
        return False
    if time_range == '3d':
        return article_date >= now - relativedelta(days=3)
    elif time_range == '1w':
        return article_date >= now - relativedelta(weeks=1)
    elif time_range == '1m':
        return article_date >= now - relativedelta(months=1)
    return True

def crawl_task(task_id):
    db: Session = SessionLocal()
    task = db.query(CrawlTask).filter(CrawlTask.id == task_id).first()
    if not task:
        logger.error(f"[crawl_task] 未找到任务: task_id={task_id}")
        return
    try:
        minio = MinioRepository()
        minio_bucket = 'attachments'

        logger.info(f"[crawl_task] 任务开始: task_id={task_id}")
        # 1. 查询任务参数
        params = task.params or {}
        time_range = params.get('time_range')  # '3d'/'1w'/'1m'

        # 2. 获取所有列表页文章
        html = safe_request(LIST_URL)
        if not html:
            logger.error(f"[crawl_task] 获取首页失败: {LIST_URL}")
            task.status = -1
            db.commit()
            return
        total_pages = get_total_pages(html)
        all_articles = []
        stop_collect = False
        for page in range(0, total_pages):
            if stop_collect:
                break
            if page == 0:
                page_html = html
            else:
                page_url = f"{BASE_URL}list_{page}.html"
                page_html = safe_request(page_url)
                if not page_html:
                    logger.warning(f"[crawl_task] 第{page+1}页请求失败，跳过")
                    continue
            articles = extract_articles(page_html)
            for a in articles:
                article_date = parse_date(a['date'])
                if in_time_range(article_date, time_range):
                    a['publish_time'] = article_date
                    all_articles.append(a)
                else:
                    stop_collect = True
                    break
        logger.info(f"[crawl_task] 采集到{len(all_articles)}篇文章（时间范围内）")
        # 3. 数据库去重，只采集未采集的
        urls = [a['url'] for a in all_articles]
        exist_urls = set(r[0] for r in db.query(Article.url).filter(Article.url.in_(urls)).all())
        filtered_articles = [a for a in all_articles if a['url'] not in exist_urls]
        logger.info(f"[crawl_task] 未采集过的文章数: {len(filtered_articles)}")
        total = len(filtered_articles)
        task.total_count = total
        task.status = 1
        db.commit()
        finished = 0
        for article in filtered_articles:
            try:
                logger.info(f"[crawl_task] 采集文章: {article['title']} {article['url']}")
                detail_html = safe_request(article['url'])
                details, error = extract_article_details(detail_html, article['url'])
                minio_urls = {}
                if details and details['attachments']:
                    for att in details['attachments']:
                        att_url = att['url']
                        att_name = att['name']
                        try:
                            resp = requests.get(att_url, timeout=30)
                            if resp.status_code == 200:
                                file_data = resp.content
                                content_type = resp.headers.get('Content-Type', '')
                                
                                # 使用MD5+扩展名生成唯一文件名
                                md5_filename = generate_md5_filename(
                                    file_data,
                                    filename=att_name,
                                    url=att_url,
                                    content_type=content_type
                                )
                                
                                minio_path = f"{datetime.datetime.now().strftime('%Y%m%d')}/{md5_filename}"
                                minio.upload_file(minio_bucket, minio_path, file_data, len(file_data))
                                minio_urls[att_name] = f"{minio_bucket}/{minio_path}"
                                logger.info(f"[crawl_task] 附件上传成功: {att_name} -> {md5_filename} -> {minio_bucket}/{minio_path}")
                            else:
                                logger.warning(f"[crawl_task] 附件下载失败: {att_url}")
                        except Exception as e:
                            logger.error(f"[crawl_task] 附件上传异常: {att_url} {e}")
                            continue
                if details:
                    summary = details['content'][:100] if details['content'] else ''
                    article_dict = {
                        'title': article['title'],
                        'url': article['url'],
                        'content': details['content'],
                        'summary': summary,
                        'publish_time': article['publish_time'],
                        'source': task.source,
                        'crawl_status': 0,
                        'attachments': details['attachments'],
                        'minio_urls': minio_urls if minio_urls else None,
                        'created_at': datetime.datetime.utcnow()
                    }
                    db_article = Article(**article_dict)
                    db.add(db_article)
                    db.commit()
                    db.refresh(db_article)
                    logger.info(f"[crawl_task] 文章入库成功: {article['title']}")
                    
                    # 判断文章与企业的相关性
                    try:
                        enterprise_info = get_enterprise_info()
                        if enterprise_info and enterprise_info.strip():
                            logger.info(f"[crawl_task] 开始判断文章相关性: {article['title']}")
                            is_relevant, score, reason = judge_article_relevance(
                                title=article['title'],
                                content=details['content'],
                                enterprise_info=enterprise_info
                            )
                            # 更新文章的相关性字段
                            db_article.is_enterprise_related = is_relevant
                            db_article.relevance_score = score
                            db.commit()
                            logger.info(f"[crawl_task] 相关性判断完成: is_relevant={is_relevant}, score={score}, reason={reason}")
                        else:
                            logger.info(f"[crawl_task] 企业信息为空，跳过相关性判断: {article['title']}")
                    except Exception as e:
                        logger.error(f"[crawl_task] 相关性判断失败: {article['title']} {e}")
                else:
                    logger.warning(f"[crawl_task] 文章详情采集失败: {article['title']} {error}")
            except Exception as e:
                logger.error(f"[crawl_task] 采集文章异常: {article['title']} {e}")
            finished += 1
            task.finished_count = finished
            task.progress = finished / total if total else 1.0
            task.updated_at = datetime.datetime.utcnow()
            db.commit()
        task.status = 2  # 完成
        task.progress = 1.0
        db.commit()
        logger.info(f"[crawl_task] 任务完成: task_id={task_id}")
    except Exception as e:
        logger.error(f"[crawl_task] 任务异常: task_id={task_id} {str(traceback.format_exc())}")
        task.status = -1
        db.commit()
    finally:
        db.close()

# 进度查询

def get_crawl_progress(task_id):
    db: Session = SessionLocal()
    try:
        task = db.query(CrawlTask).filter(CrawlTask.id == task_id).first()
        if not task:
            return None
        return {
            'status': task.status,
            'progress': task.progress,
            'total_count': task.total_count,
            'finished_count': task.finished_count,
            'created_at': task.created_at.strftime('%Y-%m-%d %H:%M:%S') if task.created_at else None,
            'updated_at': task.updated_at.strftime('%Y-%m-%d %H:%M:%S') if task.updated_at else None
        }
    finally:
        db.close()

# 保存文章

def save_article(article_dict):
    db: Session = SessionLocal()
    try:
        article = Article(**article_dict)
        db.add(article)
        db.commit()
        return article.id
    finally:
        db.close()


# 全局配置管理
def get_global_config(config_key: str = None):
    """获取全局配置"""
    db: Session = SessionLocal()
    try:
        if config_key:
            config = db.query(GlobalConfig).filter(GlobalConfig.config_key == config_key).first()
            return config.config_value if config else None
        else:
            configs = db.query(GlobalConfig).all()
            return {config.config_key: config.config_value for config in configs}
    finally:
        db.close()


def update_global_config(config_key: str, config_value, description: str = None):
    """更新全局配置"""
    db: Session = SessionLocal()
    try:
        config = db.query(GlobalConfig).filter(GlobalConfig.config_key == config_key).first()
        if config:
            config.config_value = config_value
            if description:
                config.description = description
            config.updated_at = datetime.datetime.utcnow()
        else:
            config = GlobalConfig(
                config_key=config_key,
                config_value=config_value,
                description=description or ""
            )
            db.add(config)
        db.commit()
        return True
    except Exception as e:
        db.rollback()
        logger.error(f"更新全局配置失败: {e}")
        return False
    finally:
        db.close()


def get_selected_sources():
    """获取选中的数据源"""
    selected_sources = get_global_config("selected_sources")
    if not selected_sources:
        # 如果没有配置，返回默认的数据源（应急管理部+交通部）
        return ["mem_bl", "mem_tb", "mem_yjbgg", "mem_tz", "mot_gov"]
    return selected_sources


def get_update_frequency():
    """获取更新频率配置"""
    frequency = get_global_config("update_frequency")
    return frequency or "1d"


def get_enterprise_info():
    """获取企业信息配置"""
    return get_global_config("enterprise_info") or ""


# 多源采集任务
def crawl_multiple_sources(task_id):
    """多源采集任务"""
    db: Session = SessionLocal()
    task = db.query(CrawlTask).filter(CrawlTask.id == task_id).first()
    if not task:
        logger.error(f"[crawl_multiple_sources] 未找到任务: task_id={task_id}")
        return
    
    try:
        minio = MinioRepository()
        minio_bucket = 'attachments'
        
        logger.info(f"[crawl_multiple_sources] 多源采集任务开始: task_id={task_id}")
        
        # 获取任务参数
        params = task.params or {}
        sources = params.get('sources', [])  # 要采集的数据源列表
        time_range = params.get('time_range', '1d')
        
        if not sources:
            logger.error(f"[crawl_multiple_sources] 未指定数据源")
            task.status = -1
            db.commit()
            return
        
        # 采集结果按源分组
        results_by_source = {}
        total_articles = 0
        
        for source_id in sources:
            try:
                logger.info(f"[crawl_multiple_sources] 开始采集数据源: {source_id}")
                spider = SpiderFactory.create_spider(source_id)
                if not spider:
                    logger.error(f"[crawl_multiple_sources] 无法创建爬虫: {source_id}")
                    continue
                
                # 获取文章列表
                articles = spider.crawl_articles(time_range)
                logger.info(f"[crawl_multiple_sources] 数据源 {source_id} 采集到 {len(articles)} 篇文章")
                
                # 数据库去重
                urls = [a['url'] for a in articles]
                exist_urls = set(r[0] for r in db.query(Article.url).filter(Article.url.in_(urls)).all())
                filtered_articles = [a for a in articles if a['url'] not in exist_urls]
                
                logger.info(f"[crawl_multiple_sources] 数据源 {source_id} 未采集过的文章数: {len(filtered_articles)}")
                
                # 处理每篇文章
                source_articles = []
                for article in filtered_articles:
                    try:
                        # 处理附件
                        minio_urls = {}
                        if article.get('attachments'):
                            for att in article['attachments']:
                                try:
                                    att_url = att['url']
                                    att_title = att.get('title', att.get('name', ''))
                                    
                                    resp = requests.get(att_url, timeout=30)
                                    if resp.status_code == 200:
                                        file_data = resp.content
                                        content_type = resp.headers.get('Content-Type', '')
                                        
                                        # 使用MD5+扩展名生成唯一文件名
                                        md5_filename = generate_md5_filename(
                                            file_data,
                                            filename=att_title,
                                            url=att_url,
                                            content_type=content_type
                                        )
                                        
                                        minio_path = f"{datetime.datetime.now().strftime('%Y%m%d')}/{md5_filename}"
                                        minio.upload_file(minio_bucket, minio_path, file_data, len(file_data))
                                        minio_urls[att_title] = f"{minio_bucket}/{minio_path}"
                                        logger.info(f"[crawl_multiple_sources] 附件上传成功: {att_title} -> {md5_filename} -> {minio_bucket}/{minio_path}")
                                except Exception as e:
                                    logger.error(f"[crawl_multiple_sources] 附件上传失败: {att.get('url', 'unknown')} {e}")
                        
                        # 保存文章到数据库
                        article_dict = {
                            'title': article['title'],
                            'url': article['url'],
                            'content': article.get('content', ''),
                            'summary': article.get('content', '')[:100] if article.get('content') else '',
                            'publish_time': article.get('publish_time'),
                            'source': source_id,
                            'crawl_status': 0,
                            'attachments': article.get('attachments', []),
                            'minio_urls': minio_urls if minio_urls else None,
                            'created_at': datetime.datetime.utcnow()
                        }
                        
                        db_article = Article(**article_dict)
                        db.add(db_article)
                        db.commit()
                        db.refresh(db_article)
                        
                        # 判断文章与企业的相关性
                        try:
                            enterprise_info = get_enterprise_info()
                            if enterprise_info and enterprise_info.strip():
                                logger.info(f"[crawl_multiple_sources] 开始判断文章相关性: {article['title']}")
                                is_relevant, score, reason = judge_article_relevance(
                                    title=article['title'],
                                    content=article.get('content', ''),
                                    enterprise_info=enterprise_info
                                )
                                # 更新文章的相关性字段
                                db_article.is_enterprise_related = is_relevant
                                db_article.relevance_score = score
                                db.commit()
                                logger.info(f"[crawl_multiple_sources] 相关性判断完成: is_relevant={is_relevant}, score={score}, reason={reason}")
                            else:
                                logger.info(f"[crawl_multiple_sources] 企业信息为空，跳过相关性判断: {article['title']}")
                        except Exception as e:
                            logger.error(f"[crawl_multiple_sources] 相关性判断失败: {article['title']} {e}")
                        
                        # 创建用于JSON存储的副本，转换datetime为字符串
                        article_dict_for_json = {
                            'title': article['title'],
                            'url': article['url'],
                            'content': article.get('content', ''),
                            'summary': article.get('content', '')[:100] if article.get('content') else '',
                            'publish_time': article.get('publish_time').strftime('%Y-%m-%d %H:%M:%S') if article.get('publish_time') else None,
                            'source': source_id,
                            'attachments': article.get('attachments', []),
                            'created_at': article_dict['created_at'].strftime('%Y-%m-%d %H:%M:%S'),
                            'is_enterprise_related': db_article.is_enterprise_related,
                            'relevance_score': db_article.relevance_score
                        }
                        
                        source_articles.append(article_dict_for_json)
                        total_articles += 1
                        
                    except Exception as e:
                        logger.error(f"[crawl_multiple_sources] 处理文章失败: {article['title']} {e}")
                
                results_by_source[source_id] = source_articles
                
            except Exception as e:
                logger.error(f"[crawl_multiple_sources] 采集数据源 {source_id} 失败: {e}")
                results_by_source[source_id] = []
        
        # 更新任务状态
        task.total_count = total_articles
        task.finished_count = total_articles
        task.progress = 1.0
        task.status = 2  # 完成
        task.updated_at = datetime.datetime.utcnow()
        
        # 保存采集结果到任务参数中
        task.params = {
            **task.params,
            'results_by_source': results_by_source
        }
        
        db.commit()
        logger.info(f"[crawl_multiple_sources] 多源采集任务完成: task_id={task_id}, 总文章数: {total_articles}")
        
    except Exception as e:
        logger.error(f"[crawl_multiple_sources] 多源采集任务异常: task_id={task_id} {str(traceback.format_exc())}")
        task.status = -1
        db.commit()
    finally:
        db.close()


def get_crawl_results(task_id):
    """获取采集结果"""
    db: Session = SessionLocal()
    try:
        task = db.query(CrawlTask).filter(CrawlTask.id == task_id).first()
        if not task:
            return None
        
        params = task.params or {}
        return params.get('results_by_source', {})
    finally:
        db.close()
