import json, logging, requests, re, os
from bs4 import BeautifulSoup
from typing import List, Dict, Any

logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

class XinxingPolicySpider:
    def __init__(self):
        self.name = "新星市政策"
        self.base_url = "https://www.btnsss.gov.cn"
        self.api_url = "https://api.so-gov.cn/query/s"
        self.session = requests.Session()
        # 和 curl 完全一致的头
        self.session.headers.update({
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Referer": "https://www.btnsss.gov.cn/",
            "X-Requested-With": "XMLHttpRequest"
        })

    def parse_list(self, limit: int = 3) -> List[Dict[str, str]]:
        # 和 curl 完全一致的表单字符串
        payload = (
            "siteCode=BT13000003_zck"
            "&tab=8148067192c1ee3b02372005e6f31612"
            f"&page=1&pageSize={limit}"
        )
        try:
            r = self.session.post(self.api_url, data=payload, timeout=10)
            r.raise_for_status()
            data = r.json()
            # 新版返回结构：data → result → search → docs
            docs = data.get("data", {}).get("result", {}).get("search", {}).get("docs", [])
            logging.debug("拿到 %d 条", len(docs))
        except Exception as e:
            logging.error("列表接口失败: %s", e)
            return []

        return [
            {
                "title": doc["title"].strip(),
                "url": doc["url"] if doc["url"].startswith("http") else self.base_url + doc["url"],
                "date": doc["docDate"],
                "source": self.name
            }
            for doc in docs
        ]

    def parse_detail(self, url: str) -> Dict[str, Any]:
        html = self.session.get(url, timeout=10).text
        soup = BeautifulSoup(html, "lxml")

        # 1. 先拿正文容器
        content_div = (soup.select_one("div.content") or
                       soup.select_one("div.article-content") or
                       soup.select_one("div.detail-content") or
                       soup.body)

        # 2. 去掉首尾导航/广告
        raw = content_div.get_text(" ", strip=True)

        # 3. 去掉固定前缀（“首页 > … > 正文”）
        clean = re.sub(r'^.*?正文\s*', '', raw, flags=re.S)

        # 4. 附件
        atts = [
            {"name": a.get_text(strip=True) or os.path.basename(a["href"]),
             "url": a["href"] if a["href"].startswith("http") else self.base_url + a["href"]}
            for a in soup.select("a[href]")
            if re.search(r"\.(pdf|doc|docx|xls|xlsx|rar|zip)$", a["href"], re.I)
        ]
        return {"content": clean, "attachments": atts}
    def run(self, limit: int = 3) -> List[Dict[str, Any]]:
        logging.info("开始爬取...")
        articles = self.parse_list(limit)
        for a in articles:
            a.update(self.parse_detail(a["url"]))
        logging.info("完成，共 %d 条", len(articles))
        return articles


# if __name__ == "__main__":
#     spider = XinxingPolicySpider()
#     for art in spider.run(3):
#         print("=" * 60)
#         print("标题:", art["title"])
#         print("链接:", art["url"])
#         print("日期:", art["date"])
#         print("正文前 200:", art["content"][:200], "...")
#         if art["attachments"]:
#             print("附件:", art["attachments"])