import json
import re
import os
import logging
import urllib

from playwright.sync_api import sync_playwright

logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s:%(message)s")

class NEAPolicySpider:
    def __init__(self):
        self.name = "国家能源局"
        self.base_url = "http://www.nea.gov.cn"
        self.list_url = "https://www.nea.gov.cn/policy/zxwj.htm"
        self.logger = logging.getLogger(self.name)

    # ---------- 列表 ----------
    def parse_list(self, limit=3):
        with sync_playwright() as p:
            browser = p.chromium.launch(headless=True)
            page = browser.new_page()
            try:
                page.goto(self.list_url, timeout=30_000)
                # 等待 JS 渲染完成
                page.wait_for_selector("ul.list li", timeout=20_000)
                li_list = page.locator("ul.list li").all()[:limit]
                articles = []
                for li in li_list:
                    title = li.locator("a").inner_text().strip()
                    href = li.locator("a").get_attribute("href")
                    if href.startswith("/"):
                        href = self.base_url + href
                    date_str = li.locator("span.sj").inner_text().strip()
                    articles.append({
                        "title": title,
                        "url": href,
                        "date": date_str,
                        "source": self.name
                    })
                browser.close()
                return articles
            except Exception as e:
                self.logger.error("Playwright 抓取出错: %s", e)
                browser.close()
                return []

    # ---------- 详情 ----------
    def fetch_page(self, url):
        try:
            import requests
            resp = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=15, verify=False)
            resp.raise_for_status()
            resp.encoding = "utf-8"
            return resp.text
        except Exception as e:
            self.logger.error("获取详情页失败: %s", e)
            return None

    # def parse_detail(self, url):
    #     html = self.fetch_page(url)
    #     if not html:
    #         return None
    #     from bs4 import BeautifulSoup
    #     soup = BeautifulSoup(html, "lxml")
    #     try:
    #         content_div = (
    #             soup.find("div", class_="article-content") or
    #             soup.find("div", class_="TRS_Editor") or
    #             soup.find("div", id="content")
    #         )
    #         content = content_div.get_text(" ", strip=True) if content_div else ""
    #         attachments = []
    #         for link in soup.find_all("a", href=re.compile(r"\.(pdf|doc|docx|xls|xlsx|rar|zip)$", re.I)):
    #             attach_url = link.get("href", "")
    #             if attach_url and not attach_url.startswith("http"):
    #                 attach_url = self.base_url + attach_url
    #             attach_name = link.get_text(strip=True) or os.path.basename(attach_url)
    #             attachments.append({"name": attach_name, "url": attach_url})
    #         return {"content": content, "attachments": attachments}
    #     except Exception as e:
    #         self.logger.error("解析详情页失败: %s", e)
    #         return None

    def parse_detail(self, url):
        html = self.fetch_page(url)
        if not html:
            return None
        from bs4 import BeautifulSoup, SoupStrainer
        soup = BeautifulSoup(html, "lxml")
        try:
            content_div = (
                    soup.find("div", class_="article-content") or
                    soup.find("div", class_="TRS_Editor") or
                    soup.find("div", id="content")
            )
            content = content_div.get_text(" ", strip=True) if content_div else ""
            attachments = []
            for link in soup.find_all("a", href=re.compile(r"\.(pdf|doc|docx|xls|xlsx|rar|zip)$", re.I)):
                href = link.get("href", "").strip()
                # 修复相对URL处理
                if not href:
                    continue
                    
                # 如果已经是完整URL，不做处理
                if href.startswith(('http://', 'https://')):
                    pass
                elif href.startswith('//'):
                    href = 'http:' + href
                elif href.startswith('/'):
                    href = self.base_url.rstrip('/') + href
                else:
                    # 处理所有其他相对路径（包括以..开头的）
                    href = urllib.parse.urljoin(url, href)
                    
                name = link.get_text(strip=True) or os.path.basename(href)
                attachments.append({"name": name, "url": href})
            return {"content": content, "attachments": attachments}
        except Exception as e:
            self.logger.error("解析详情页失败: %s", e)
            return None
    # ---------- 入口 ----------
    def run(self, limit=3):
        self.logger.info("开始爬取 %s", self.name)
        articles = self.parse_list(limit)
        results = []
        for article in articles:
            self.logger.info("正在获取详情: %s", article["title"])
            detail = self.parse_detail(article["url"])
            if detail:
                article.update(detail)
                results.append(article)
        return results


# ---------------- 测试 ----------------
if __name__ == "__main__":
    spider = NEAPolicySpider()
    result = spider.run(limit=3)
    print(json.dumps(result, ensure_ascii=False, indent=2))