import requests
from bs4 import BeautifulSoup
import logging
import re
import time
from urllib3.exceptions import InsecureRequestWarning
from urllib.parse import urljoin
import urllib3

# 禁用SSL警告
urllib3.disable_warnings(InsecureRequestWarning)

class BingtuanActivitySpider:
    def __init__(self):
        self.name = "兵团要闻-活动"
        self.base_url = "http://www.xjbt.gov.cn"
        self.list_url = "http://www.xjbt.gov.cn/dt/btyw/zdhd/"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Accept-Charset': 'UTF-8',
        }
        self.logger = logging.getLogger(self.name)
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        # 设置重试策略
        retries = requests.adapters.Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
        adapter = requests.adapters.HTTPAdapter(max_retries=retries)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def fetch_page(self, url, retries=3, delay=3):
        """
        获取页面内容，增加重试机制
        """
        for attempt in range(retries):
            try:
                response = self.session.get(url, timeout=30, verify=False)
                response.raise_for_status()
                response.encoding = 'utf-8'
                return response.text
            except Exception as e:
                self.logger.error(f"获取页面失败 (尝试 {attempt+1}/{retries}): {url}, 错误: {str(e)}")
                if attempt < retries - 1:
                    time.sleep(delay * (attempt + 1))  # 指数退避
                else:
                    return None

    def parse_list(self, limit=10):
        """
        解析活动列表页面，提取标题、链接和时间
        """
        html = self.fetch_page(self.list_url)
        if not html:
            return []

        soup = BeautifulSoup(html, 'lxml')
        articles = []

        # 查找所有文章条目
        article_divs = soup.find_all('div', class_='article')
        
        for article in article_divs[:limit]:
            try:
                # 提取标题和链接
                name_div = article.find('div', class_='name')
                if not name_div:
                    continue
                    
                link = name_div.find('a')
                if not link:
                    continue
                    
                title = link.text.strip()
                href = link.get('href', '')
                # 修复URL处理
                if href.startswith('//'):
                    url = 'http:' + href
                elif href.startswith('/'):
                    url = self.base_url + href
                elif not href.startswith('http'):
                    url = urljoin(self.base_url, href)
                else:
                    url = href

                # 提取时间
                time_div = article.find('div', class_='time')
                date_str = time_div.text.strip() if time_div else ''

                articles.append({
                    'title': title,
                    'url': url,
                    'date': date_str,
                    'source': self.name
                })

            except Exception as e:
                self.logger.error(f"解析列表项失败: {str(e)}")
                continue

        return articles

    def parse_detail(self, url):
        """
        解析详情页，提取正文内容
        """
        html = self.fetch_page(url)
        if not html:
            return {'content': ''}

        soup = BeautifulSoup(html, 'lxml')

        # 尝试多种可能的正文选择器
        content_div = (
            soup.select_one('div.content') or
            soup.select_one('div.article-content') or
            soup.select_one('div.detail-content') or
            soup.select_one('div#content') or
            soup.select_one('div.article') or
            soup.select_one('div.viewbox') or
            soup.find('div', class_='content')
        )
        
        content = ''
        if content_div:
            # 移除script和style标签
            for script in content_div(["script", "style"]):
                script.decompose()
            content = content_div.get_text(strip=True)

        return {'content': content}

    def run(self, limit=10):
        """
        爬取入口函数
        """
        self.logger.info("开始爬取 %s", self.name)
        articles = self.parse_list(limit)
        
        for article in articles:
            detail = self.parse_detail(article['url'])
            if detail:
                article.update(detail)
                
        self.logger.info("完成，共 %d 条", len(articles))
        return articles


# if __name__ == "__main__":
#     spider = BingtuanActivitySpider()
#     results = spider.run(5)
#     for item in results:
#         print("=" * 70)
#         print("标题:", item["title"])
#         print("链接:", item["url"])
#         print("日期:", item["date"])
#         print("正文前200字:", item["content"][:200])