#!/usr/bin/env python3
"""
政策爬虫主程序 - 使用6个专用爬虫类
直接获取今日日期并爬取相关数据
"""

import os
import sys
import logging
import time
from datetime import datetime, timedelta
import pandas as pd
import re

# 添加项目路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# 导入6个专用爬虫
from spider.xinjiang_energy_info_spider import XinjiangEnergyInfoSpider
from spider.xinjiang_energy_policy_spider import XinjiangEnergyPolicySpider
from spider.bingtuan_policy_spider import BingtuanPolicySpider
from spider.xinxing_policy_spider import XinxingPolicySpider
from spider.nea_policy_spider import NEAPolicySpider
from spider.ndrc_policy_spider import NDRCPolicySpider
# 导入兵团新闻和活动爬虫
from spider.bingtuan_news_spider import BingtuanNewsSpider
from spider.bingtuan_act_spider import BingtuanActivitySpider
from utils.file_utils import save_data, save_data_by_category, download_attachments
# 导入AI分类器
from utils.ai_classifier import KimiAIClassifier

# 配置日志
LOG_DIR = 'logs'
os.makedirs(LOG_DIR, exist_ok=True)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(LOG_DIR, f'spider_{datetime.now().strftime("%Y%m%d")}.log')),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

# 爬虫映射表
SPIDERS = {
    'xinjiang_energy_info': {
        'name': '自治区能源信息',
        'spider': XinjiangEnergyInfoSpider()
    },
    'xinjiang_energy_policy': {
        'name': '自治区能源政策',
        'spider': XinjiangEnergyPolicySpider()
    },
    'bingtuan_policy': {
        'name': '兵团政策',
        'spider': BingtuanPolicySpider()
    },
    'xinxing_policy': {
        'name': '新星市政策',
        'spider': XinxingPolicySpider()
    },
    'nea_policy': {
        'name': '国家能源局',
        'spider': NEAPolicySpider()
    },
    'ndrc_policy': {
        'name': '国家发改委',
        'spider': NDRCPolicySpider()
    },
    'bingtuan_news': {
        'name': '兵团新闻',
        'spider': BingtuanNewsSpider()
    },
    'bingtuan_act': {
        'name': '兵团活动',
        'spider': BingtuanActivitySpider()
    }
}


def get_date_range_for_spider(spider_name):
    """为不同爬虫获取不同的日期范围"""
    today = datetime.now().date()
    
    # 兵团新闻爬取今天以前10天内容
    if spider_name == '兵团新闻':
        start = today - timedelta(days=10)
        return start, today
    
    # 兵团活动爬取今天以及以前3天内容
    elif spider_name == '兵团活动':
        start = today - timedelta(days=3)
        return start, today
    
    # 其他爬虫保持原有逻辑
    else:
        weekday = today.weekday()
        if weekday == 0:          # 周一
            start = today - timedelta(days=3)
        else:
            start = today - timedelta(days=1)
        return start, today


def get_date_range():
    today = datetime.now().date()
    weekday = today.weekday()
    if weekday == 0:          # 周一
        start = today - timedelta(days=3)
    else:
        start = today - timedelta(days=1)
    logger.info(f"爬取日期范围：{start} ~ {today}")
    return start, today


def parse_date(date_str, source):
    if not date_str:
        return None
    date_str = str(date_str).strip()
    # 支持的格式
    fmts = ('%Y-%m-%d', '%Y/%m/%d', '%Y.%m.%d',
            '%Y年%m月%d日', '%m-%d', '%m/%d')
    for fmt in fmts:
        try:
            if fmt in ('%m-%d', '%m/%d'):
                dt = datetime.strptime(f"{datetime.now().year}-{date_str}", '%Y-%m-%d')
            else:
                dt = datetime.strptime(date_str, fmt)
            return dt.date()   # <-- 关键
        except ValueError:
            continue
    # 正则兜底
    m = re.search(r'(\d{4})[年\-/\.](\d{1,2})[月\-/\.](\d{1,2})', date_str)
    if m:
        return datetime(*map(int, m.groups())).date()
    m = re.search(r'(\d{1,2})[月\-/\.](\d{1,2})', date_str)
    if m:
        return datetime(datetime.now().year, *map(int, m.groups())).date()
    logger.warning(f"无法解析日期: {date_str} (来源: {source})")
    return None


def should_process_article(article_date, date_range, source):
    """判断文章是否在目标日期范围内"""
    if not article_date:
        logger.warning(f"文章日期为空，跳过处理 (来源: {source})")
        return False

    # 检查日期是否在范围内
    if date_range[0] <= article_date <= date_range[1]:
        return True

    # 特殊处理：如果文章日期比开始日期还早，说明网站没有新内容
    if article_date < date_range[0]:
        logger.info(f"文章日期 {article_date.strftime('%Y-%m-%d')} 早于目标范围，网站可能无新内容 (来源: {source})")

    return False


def normalize_article_data(articles):
    """统一化文章数据格式：标题，链接，日期，正文，附件名称，附件下载链接"""
    normalized_articles = []
    
    for article in articles:
        # 确保所有字段都存在
        normalized_article = {
            '标题': article.get('title', ''),
            '链接': article.get('url', ''),
            '日期': article.get('date', ''),
            '正文': article.get('content', ''),
            '附件名称': '',
            '附件下载链接': '',
            '数据来源': article.get('source', '')  # 添加数据来源字段
        }
        
        # 处理附件信息
        attachments = article.get('attachments', [])
        if attachments:
            # 如果有多个附件，用分号分隔
            normalized_article['附件名称'] = '; '.join([attach.get('name', '') for attach in attachments])
            normalized_article['附件下载链接'] = '; '.join([attach.get('url', '') for attach in attachments])
        elif 'attachments' in article:
            # 即使attachments字段存在但为空，也要确保字段存在
            normalized_article['附件名称'] = ''
            normalized_article['附件下载链接'] = ''
            
        normalized_articles.append(normalized_article)
        
    return normalized_articles


def run_spider():
    """执行爬取任务"""
    logger.info("=" * 60)
    logger.info("开始执行政策爬取任务...")

    # 创建今日数据目录
    today_str = datetime.now().strftime('%Y-%m-%d')
    data_dir = os.path.join('data', today_str)
    os.makedirs(data_dir, exist_ok=True)
    
    # 初始化AI分类器
    classifier = KimiAIClassifier()
    
    total_articles = 0

    # 遍历所有爬虫
    for site_key, site_config in SPIDERS.items():
        try:
            spider_name = site_config['name']
            spider = site_config['spider']

            logger.info(f"🚀 开始爬取 {spider_name}")

            # 根据爬虫类型设置爬取条数
            limit = 10
            if spider_name == '兵团新闻':
                limit = 20  # 爬取更多新闻
            elif spider_name == '兵团活动':
                limit = 15  # 爬取更多活动
            
            # 获取数据
            articles = spider.run(limit=limit)

            if not articles:
                logger.warning(f"⚠️ {spider_name} 未获取到数据")
                continue

            logger.info(f"📊 {spider_name} 获取到 {len(articles)} 条原始数据")

            # 获取该爬虫的日期范围
            date_range = get_date_range_for_spider(spider_name)

            # 过滤日期范围内的文章
            filtered_articles = []
            for article in articles:
                try:
                    # 获取日期字段（不同爬虫可能使用不同字段名）
                    date_str = article.get('date', '') or article.get('publish_date', '')

                    # 解析日期
                    article_date = parse_date(date_str, spider_name)

                    # 判断是否处理该文章
                    if should_process_article(article_date, date_range, spider_name):
                        article['source'] = spider_name
                        article['parsed_date'] = article_date.strftime('%Y-%m-%d') if article_date else '未知'
                        filtered_articles.append(article)
                        logger.info(f"✅ 保留文章: {article['title'][:30]}... ({article_date.strftime('%Y-%m-%d') if article_date else '未知'})")

                except Exception as e:
                    logger.error(f"❌ 处理文章时出错: {str(e)}")
                    continue

            if filtered_articles:
                # 统一化数据格式
                normalized_articles = normalize_article_data(filtered_articles)
                
                # 使用AI进行分类
                logger.info(f"🧠 开始使用Kimi AI对 {spider_name} 的文章进行分类...")
                classified_articles = classifier.classify_articles_batch(normalized_articles)
                
                # 保存数据到CSV（按日期）
                df = pd.DataFrame(classified_articles)
                file_path = os.path.join(data_dir, f"{spider_name.replace(' ', '_')}_{today_str}.csv")
                df.to_csv(file_path, index=False, encoding='utf-8-sig')
                
                # 保存数据到CSV（按分类）
                save_data_by_category(classified_articles, spider_name.replace(' ', '_'), today_str)
                
                logger.info(f"✅ {spider_name}: 成功爬取 {len(classified_articles)} 条数据")
                logger.info(f"💾 数据保存路径: {file_path}")

                # 下载附件（如果有）
                for article in filtered_articles:
                    if article.get('attachments'):
                        attach_dir = os.path.join(data_dir, 'attachments', spider_name)
                        os.makedirs(attach_dir, exist_ok=True)
                        download_attachments(article['attachments'], attach_dir)

                total_articles += len(classified_articles)
            else:
                logger.info(f"📭 {spider_name}: 日期范围内无新数据")

        except Exception as e:
            logger.error(f"❌ {spider_name} 爬取失败: {str(e)}")
            continue

    logger.info(f"🎯 任务完成！共爬取 {total_articles} 条数据")
    logger.info(f"数据保存路径: {os.path.abspath(data_dir)}")


def main():
    """主函数"""
    print("🔥 政策爬虫系统启动")
    print("=" * 50)

    # 检查数据目录
    if not os.path.exists('data'):
        os.makedirs('data')

    # 直接运行爬虫
    run_spider()


if __name__ == "__main__":
    main()