import json
import logging
import os
import re
import time

import requests
from bs4 import BeautifulSoup
from typing import List, Dict, Any

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(name)s: %(message)s'
)


class BingtuanPolicySpider:
    def __init__(self):
        self.name = "兵团政策"
        self.base_url = "https://www.btnsss.gov.cn"
        self.api_url = "https://api.so-gov.cn/query/s"
        self.session = requests.Session()
        self.session.headers.update({
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "X-Requested-With": "XMLHttpRequest"
        })
        self.logger = logging.getLogger(self.name)

    # ---------- 列表 ----------
    def parse_list(self, limit: int = 3) -> List[Dict[str, str]]:
        payload = (
            f"siteCode=BT13000003_zck"
            f"&tab=ff0f35c85088806637f38802134f3170"
            f"&page=1&pageSize={limit}"
        )
        try:
            resp = self.session.post(self.api_url, data=payload, timeout=15)
            resp.raise_for_status()
            data = resp.json()
            docs = data.get("data", {}).get("result", {}).get("search", {}).get("docs", [])
        except Exception as e:
            self.logger.error("列表接口失败: %s", e)
            return []

        articles = []
        for doc in docs:
            articles.append({
                "title": doc.get("title", "").strip(),
                "url": doc.get("url", "") if doc.get("url", "").startswith("http") else self.base_url + doc.get("url", ""),
                "date": doc.get("docDate", ""),
                "source": self.name
            })
        return articles

    def parse_detail(self, url: str) -> Dict[str, Any]:
        # 1. 重试 3 次，每次 15 秒，GB18030 解码
        for _ in range(3):
            try:
                resp = self.session.get(url, timeout=15)
                resp.encoding = 'utf-8'  # 强制 GBK/GB2312 兼容
                soup = BeautifulSoup(resp.text, "lxml")
                break
            except requests.exceptions.RequestException:
                time.sleep(2)  # 重试前等 2 秒
        else:
            return {"content": "", "attachments": []}

        # 2. 正文
        content_div = (soup.select_one("div.content") or
                       soup.select_one("div.article-content") or
                       soup.select_one("div.detail-content"))
        content = content_div.get_text(" ", strip=True) if content_div else ""

        # 3. 去掉导航前缀
        content = re.sub(r'^.*?正文\s*', '', content, flags=re.S)

        # 4. 附件
        attachments = [
            {"name": a.get_text(strip=True) or os.path.basename(a["href"]),
             "url": a["href"] if a["href"].startswith("http") else self.base_url + a["href"]}
            for a in soup.select("a[href]")
            if re.search(r"\.(pdf|doc|docx|xls|xlsx|rar|zip)$", a["href"], re.I)
        ]
        return {"content": content, "attachments": attachments}

    # ---------- 统一入口 ----------
    def run(self, limit: int = 3) -> List[Dict[str, Any]]:
        self.logger.info("开始爬取 %s", self.name)
        articles = self.parse_list(limit)
        for art in articles:
            detail = self.parse_detail(art["url"])
            if detail:
                art.update(detail)
        self.logger.info("完成，共 %d 条", len(articles))
        return articles


# if __name__ == "__main__":
#     spider = BingtuanPolicySpider()
#     for art in spider.run(3):
#         print("=" * 70)
#         print("标题:", art["title"])
#         print("链接:", art["url"])
#         print("日期:", art["date"])
#         print("正文前 200:", art["content"][:200], "...")
#         if art["attachments"]:
#             print("附件:", art["attachments"])