"""
应急管理部数据采集爬虫模块
工程化封装，支持多个分类的数据采集
"""
import os
import re
import json
import time
import requests
from urllib.parse import urljoin, urlparse
from typing import List, Dict, Optional, Tuple

from loguru import logger
from bs4 import BeautifulSoup


class EmergencySpider:
    """应急管理部爬虫类"""
    
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.base_url = "https://www.mem.gov.cn/gk/tzgg/"
        self.detail_base_url = "https://www.mem.gov.cn/gk/zfxxgkpt/fdzdgknr/"
        
        # 分类配置
        self.categories = {
            "bl": "部令",
            "tb": "通报", 
            "yjbgg": "公告",
            "tz": "通知",
            "h": "函",
            "yj": "意见",
            "qt": "其他",
            "yjglbgg": "应急管理部公报"
        }
    
    def get_page_content(self, url: str, timeout: int = 10) -> Optional[str]:
        """获取页面内容"""
        try:
            response = requests.get(url, headers=self.headers, timeout=timeout)
            response.encoding = 'utf-8'
            return response.text
        except Exception as e:
            logger.error(f"获取页面失败 {url}: {e}")
            return None
    
    def parse_list_page(self, html: str, category: str) -> List[Dict]:
        """解析列表页面，提取文章链接"""
        soup = BeautifulSoup(html, 'html.parser')
        articles = []
        
        # 查找文章列表
        content_div = soup.find('div', class_='cont')
        if content_div:
            links = content_div.find_all('a')
            for link in links:
                title = link.get_text(strip=True)
                href = link.get('href')
                if href:
                    # 处理相对链接
                    full_url = urljoin(self.detail_base_url, href)
                    # 提取日期
                    span = link.find_next('span')
                    date = span.get_text(strip=True) if span else ""
                    
                    articles.append({
                        'title': title,
                        'url': full_url,
                        'date': date,
                        'category': category
                    })
        
        return articles
    
    def parse_detail_page(self, url: str) -> Tuple[Optional[str], List[Dict]]:
        """解析详情页面，提取正文内容和附件"""
        html = self.get_page_content(url)
        if not html:
            return None, []
        
        soup = BeautifulSoup(html, 'html.parser')
        
        # 提取正文内容
        content = ""
        content_div = soup.find('div', class_='TRS_Editor') or soup.find('div', class_='content') or soup.find('div', id='content')
        if content_div:
            # 移除script和style标签
            for script in content_div(["script", "style"]):
                script.decompose()
            content = content_div.get_text(strip=True)
        
        # 提取附件
        attachments = []
        attachment_links = soup.find_all('a', href=re.compile(r'\.(pdf|doc|docx|xls|xlsx|zip|rar)$'))
        for link in attachment_links:
            att_title = link.get_text(strip=True)
            att_href = link.get('href')
            if att_href:
                full_att_url = urljoin(url, att_href)
                attachments.append({
                    'title': att_title,
                    'url': full_att_url
                })
        
        return content, attachments
    
    def download_attachment(self, attachment: Dict, save_dir: str) -> Optional[str]:
        """下载附件到指定目录"""
        try:
            att_response = requests.get(attachment['url'], headers=self.headers, timeout=30)
            if att_response.status_code == 200:
                # 获取文件名
                parsed_url = urlparse(attachment['url'])
                att_filename = os.path.basename(parsed_url.path)
                if not att_filename or '.' not in att_filename:
                    att_filename = attachment['title'] + ".pdf"  # 默认扩展名
                
                # 清理文件名
                att_filename = re.sub(r'[<>:"/\\|?*\x00-\x1F]', '_', att_filename)
                
                # 保存附件
                file_path = os.path.join(save_dir, att_filename)
                with open(file_path, 'wb') as f:
                    f.write(att_response.content)
                logger.info(f"附件下载成功: {att_filename}")
                return file_path
        except Exception as e:
            logger.error(f"下载附件失败 {attachment['url']}: {e}")
        return None
    
    def crawl_category(self, category: str, category_name: str, save_dir: str = None) -> List[Dict]:
        """采集特定分类的数据"""
        logger.info(f"正在采集分类: {category_name}")
        
        if save_dir:
            os.makedirs(save_dir, exist_ok=True)
        
        # 获取分类页面
        category_url = urljoin(self.base_url, category + "/")
        html = self.get_page_content(category_url)
        if not html:
            logger.error(f"无法获取分类页面: {category_url}")
            return []
        
        # 解析列表页面
        articles = self.parse_list_page(html, category_name)
        logger.info(f"找到 {len(articles)} 篇文章")
        
        # 采集每篇文章的详细内容
        for i, article in enumerate(articles):
            logger.info(f"正在采集文章 ({i+1}/{len(articles)}): {article['title']}")
            
            # 解析详情页面
            content, attachments = self.parse_detail_page(article['url'])
            article['content'] = content
            article['attachments'] = attachments
            
            # 保存文章信息到JSON文件
            if save_dir:
                filename = re.sub(r'[<>:"/\\|?*\x00-\x1F]', '_', article['title'])[:50]  # 清理文件名
                with open(os.path.join(save_dir, f"{filename}.json"), 'w', encoding='utf-8') as f:
                    json.dump(article, f, ensure_ascii=False, indent=2)
            
            # 下载附件
            if save_dir:
                for att in attachments:
                    self.download_attachment(att, save_dir)
            
            # 添加延时，避免请求过于频繁
            time.sleep(1)
        
        return articles
    
    def crawl_all_categories(self, save_base_dir: str = None) -> Dict[str, List[Dict]]:
        """采集所有分类的数据"""
        logger.info("开始采集应急管理部通知公告...")
        results = {}
        
        for category, name in self.categories.items():
            try:
                category_dir = os.path.join(save_base_dir, name) if save_base_dir else None
                articles = self.crawl_category(category, name, category_dir)
                results[category] = articles
                time.sleep(2)  # 分类间延时
            except Exception as e:
                logger.error(f"采集分类 {name} 时出错: {e}")
                results[category] = []
        
        logger.info("采集完成!")
        return results
    
    def get_latest_articles(self, category: str = None, limit: int = 20) -> List[Dict]:
        """获取最新文章列表（不下载详情）"""
        if category and category in self.categories:
            categories_to_check = {category: self.categories[category]}
        else:
            categories_to_check = self.categories
        
        all_articles = []
        for cat, name in categories_to_check.items():
            try:
                category_url = urljoin(self.base_url, cat + "/")
                html = self.get_page_content(category_url)
                if html:
                    articles = self.parse_list_page(html, name)
                    all_articles.extend(articles)
            except Exception as e:
                logger.error(f"获取分类 {name} 最新文章失败: {e}")
        
        # 按日期排序（假设日期格式为 YYYY-MM-DD）
        all_articles.sort(key=lambda x: x.get('date', ''), reverse=True)
        return all_articles[:limit]


# 工厂函数，用于创建爬虫实例
def create_emergency_spider() -> EmergencySpider:
    """创建应急管理部爬虫实例"""
    return EmergencySpider()
