import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
import logging
import os


class XinjiangEnergyPolicySpider:
    def __init__(self):
        self.name = "自治区能源政策"
        self.base_url = "https://xjdrc.xinjiang.gov.cn"
        self.list_url = "https://xjdrc.xinjiang.gov.cn/xjfgw/c108385/common_list.shtml"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        self.logger = logging.getLogger(self.name)

    def fetch_page(self, url):
        try:
            response = requests.get(url, headers=self.headers, timeout=30)
            response.raise_for_status()
            response.encoding = 'utf-8'
            return response.text
        except Exception as e:
            print(f"❌ 获取页面失败: {url}, 错误: {str(e)}")
            return None

    def parse_list(self, limit=3):
        """精确匹配真实结构"""
        html = self.fetch_page(self.list_url)
        if not html:
            return []

        soup = BeautifulSoup(html, 'lxml')
        articles = []

        # ✅ 精确匹配：#list-data > ul.list > li
        list_container = soup.find('div', id='list-data')
        if not list_container:
            print("❌ 未找到#list-data容器")
            return []

        list_ul = list_container.find('ul', class_='list')
        if not list_ul:
            print("❌ 未找到ul.list")
            return []

        list_items = list_ul.find_all('li')[:limit]

        for item in list_items:
            try:
                # ✅ 精确提取日期：<span>2025-03-12</span>
                date_span = item.find('span')
                date_str = date_span.text.strip() if date_span else ''

                # ✅ 精确提取链接：<a href="...">标题</a>
                link = item.find('a')
                if not link:
                    continue

                title = link.text.strip()
                href = link.get('href', '')
                if href.startswith('/'):
                    href = self.base_url + href

                articles.append({
                    'title': title,
                    'url': href,
                    'date': date_str,
                    'source': self.name
                })

            except Exception as e:
                print(f"解析列表项失败: {str(e)}")
                continue

        return articles

    def parse_detail(self, url):
        """100%匹配真实详情页结构"""
        html = self.fetch_page(url)
        if not html:
            return None

        soup = BeautifulSoup(html, 'lxml')

        try:
            # ✅ 提取正文：从data-article="content"获取
            content_div = soup.find('div', {'data-article': 'content'})
            content = content_div.get_text(strip=True) if content_div else ''

            # ✅ 提取发布时间
            published_time = soup.find('div', {'data-article': 'publishedTime'})
            publish_date = published_time.text.strip() if published_time else ''

            # ✅ 提取标题
            data_title = soup.find('div', {'data-article': 'data-title'})
            full_title = data_title.text.strip() if data_title else ''

            # ✅ 提取附件（如果有）
            attachments = []
            attach_links = soup.find_all('a', href=re.compile(r'\.(pdf|doc|docx|xls|xlsx|rar|zip)$', re.I))

            for link in attach_links:
                attach_url = link.get('href', '')
                if attach_url and not attach_url.startswith('http'):
                    attach_url = self.base_url + attach_url

                attach_name = link.get_text(strip=True) or os.path.basename(attach_url)
                attachments.append({
                    'name': attach_name,
                    'url': attach_url
                })

            return {
                'full_title': full_title,
                'content': content,
                'publish_date': publish_date,
                'attachments': attachments
            }

        except Exception as e:
            print(f"解析详情页失败: {url}, 错误: {str(e)}")
            return None

    def run(self, limit=3):
        """运行爬虫"""
        print(f"🚀 开始爬取 {self.name}")

        articles = self.parse_list(limit)
        results = []

        for article in articles:
            print(f"📄 正在获取详情: {article['title']}")
            detail = self.parse_detail(article['url'])
            if detail:
                article.update(detail)
                results.append(article)
                print(f"✅ 成功: {article['title'][:50]}...")

        return results


# ✅ 立即测试代码
if __name__ == "__main__":
    spider = XinjiangEnergyPolicySpider()
    results = spider.run(limit=3)

    if results:
        import pandas as pd

        df = pd.DataFrame(results)
        df.to_csv('test/xinjiang_energy_policy_real.csv', index=False, encoding='utf-8-sig')
        print(f"\n💾 已保存 {len(results)} 条数据到 test/xinjiang_energy_policy_real.csv")

        # 打印第一条验证
        if results:
            print("\n📋 第一条数据示例:")
            print(f"标题: {results[0]['title']}")
            print(f"日期: {results[0]['date']}")
            print(f"内容: {results[0]['content'][:200]}...")