import requests
import re
from datetime import datetime
import logging
import os
import urllib3
from bs4 import BeautifulSoup
import urllib.parse

# 全局关闭 urllib3 的 HTTPS 证书验证警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

class NDRCPolicySpider:
    def __init__(self):
        self.name = "国家发改委"
        self.base_url = "https://www.ndrc.gov.cn"
        # 真实 JSON 接口
        self.api_url = "https://fwfx.ndrc.gov.cn/api/query"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        logging.basicConfig(
            level=logging.INFO,
            format="%(levelname)s:%(name)s:%(message)s"
        )
        self.logger = logging.getLogger(self.name)

    # ---------- 公共：获取页面 ----------
    def fetch_page(self, url):
        """通用获取页面内容（用于详情页）"""
        try:
            response = requests.get(url, headers=self.headers, timeout=30, verify=False)
            response.raise_for_status()
            response.encoding = 'utf-8'
            return response.text
        except Exception as e:
            self.logger.error(f"获取页面失败: {url}, 错误: {str(e)}")
            return None

    def parse_list(self, limit=3):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Referer': 'https://www.ndrc.gov.cn/xxgk/wjk/'
        }
        params = {
            "qt": "",
            "tab": "all",
            "page": 1,
            "pageSize": limit,
            "siteCode": "bm04000fgk",
            "key": "CAB549A94CF659904A7D6B0E8FC8A7E9",
            "startDateStr": "",
            "endDateStr": "",
            "timeOption": 0,
            "sort": "dateDesc"
        }
        try:
            resp = requests.get(self.api_url, params=params, headers=headers, timeout=15, verify=False)
            resp.raise_for_status()
            data = resp.json()
        except Exception as e:
            self.logger.error("列表接口请求失败: %s", e)
            return []

        articles = []
        for item in data.get("data", {}).get("resultList", [])[:limit]:
            title = item.get("title", "").strip()
            url = item.get("url", "")
            if url and url.startswith("/"):
                url = self.base_url + url
            date_str = item.get("docDate", "").split(" ")[0]
            articles.append({
                'title': title,
                'url': url,
                'date': date_str,
                'source': self.name
            })
        return articles



    def parse_detail(self, url):
        """解析详情页：正文去噪 + 附件绝对 URL + 自动下载"""
        html = self.fetch_page(url)
        if not html:
            return None

        soup = BeautifulSoup(html, 'lxml')

        # ---------- 1. 正文 ----------
        # 先尝试常见容器
        content_div = (
                soup.select_one('div.content, div.article-content, div#content, div.TRS_Editor') or
                soup.select_one('#zoom, .Custom_UnionStyle, .article') or
                soup.find('td', class_=re.compile('content', re.I))
        )
        if content_div:
            content = content_div.get_text(separator='\n', strip=True)
        else:
            # 兜底：整 body 去噪
            for tag in soup(['script', 'style', 'header', 'footer', 'nav', 'aside']):
                tag.decompose()
            content = soup.body.get_text(separator='\n', strip=True) if soup.body else ''
        # 去掉常见导航、面包屑
        content = re.sub(r'首页\s*>\s*政务公开\s*>\s*政策\s*>\s*[\s\S]*?\n', '', content, flags=re.I)

        # ---------- 2. 附件 ----------
        attachments = []
        base = self.base_url
        # 匹配常见附件后缀
        for a in soup.find_all('a', href=re.compile(r'\.(pdf|doc|docx|xls|xlsx|rar|zip)$', re.I)):
            href = a.get('href', '').strip()
            # 拼绝对地址
            href = urllib.parse.urljoin(url, href)
            name = a.get_text(strip=True) or os.path.basename(href)
            attachments.append({'name': name, 'url': href})

        # ---------- 3. 自动下载附件 ----------
        for att in attachments:
            try:
                save_dir = 'downloads'
                os.makedirs(save_dir, exist_ok=True)
                file_path = os.path.join(save_dir, att['name'])
                if os.path.exists(file_path):
                    self.logger.info(f"文件已存在，跳过：{file_path}")
                    continue
                resp = requests.get(att['url'], headers=self.headers, timeout=60, stream=True, verify=False)
                resp.raise_for_status()
                with open(file_path, 'wb') as f:
                    for chunk in resp.iter_content(chunk_size=8192):
                        if chunk:
                            f.write(chunk)
                self.logger.info(f"下载完成：{file_path}")
            except Exception as e:
                self.logger.error(f"下载失败 {att['url']}：{e}")

        return {'content': content, 'attachments': attachments}

    # ---------- 入口 ----------
    def run(self, limit=3):
        """运行爬虫"""
        self.logger.info(f"开始爬取 {self.name}")

        articles = self.parse_list(limit)
        results = []

        for article in articles:
            self.logger.info(f"正在获取详情: {article['title']}")
            detail = self.parse_detail(article['url'])
            if detail:
                article.update(detail)
                results.append(article)

        return results

# # ==================== 测试代码 ====================
# if __name__ == "__main__":
#     import json
#     from pprint import pprint
#
#     spider = NDRCPolicySpider()
#     data = spider.run(limit=3)
#
#     print("\n" + "="*60)
#     print("共抓取到 {} 篇文章".format(len(data)))
#     print("="*60)
#
#     for idx, item in enumerate(data, 1):
#         print(f"\n【第 {idx} 篇】")
#         print("标题 :", item["title"])
#         print("链接 :", item["url"])
#         print("时间 :", item["date"])
#         # 正文可能很长，截断 300 字
#         content_preview = item["content"][:300] + "…" if len(item["content"]) > 300 else item["content"]
#         print("正文 :", content_preview)
#         if item.get("attachments"):
#             print("附件 :")
#             pprint(item["attachments"], indent=2)
#         print("-" * 60)