import requests
from bs4 import BeautifulSoup
import time
from datetime import datetime
from .base_crawler import BaseCrawler
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.cookie_manager import CookieManager
import logging
from utils.news_parser import NewsParser

class JiuyeNewsCrawler(BaseCrawler):
    def __init__(self, source_config):
        super().__init__(source_config)
        # 设置会话和Cookie
        self.session = requests.Session()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Accept-Encoding': 'gzip, deflate, br, zstd',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Priority': 'u=0, i',
            'Pragma': 'no-cache',
            'Cache-Control': 'no-cache'
        }
        
        # 获取最新的cookies
        try:
            cookie_manager = CookieManager(self.source_config['name'])  # 传入源名称
            self.cookies = cookie_manager.get_fresh_cookies()
            if not self.cookies:
                logging.error("获取cookies失败")
                raise Exception("无法获取有效的cookies")
            
            logging.info(f"成功获取cookies: {self.cookies}")
            self.session.cookies.update(self.cookies)
        except Exception as e:
            logging.error(f"初始化Cookie失败: {str(e)}")
            # 使用默认cookies
            self.cookies = {
                'ariauseGraymode': 'false',
                'arialoadData': 'false'
            }
            self.session.cookies.update(self.cookies)

        # 配置新闻解析规则
        self.news_config = {
            'title_selectors': ['div.insMainConTitle_b'],
            'time_selectors': ['div.insMainConTitle_c'],
            'content_selectors': ['div.insMainConTxt_c'],
            'time_formats': ['%Y-%m-%d']
        }

    def get_page_content(self, url):
        """获取页面内容"""
        try:
            response = self.session.get(url, headers=self.headers)
            response.encoding = 'utf-8'
            return response.text
        except Exception as e:
            logging.error(f"获取页面失败 {url}: {str(e)}")
            return None

    def get_urls(self):
        """获取所有新闻URL"""
        base_url = self.source_config['url']
        urls = []
        
        try:
            # 获取第一页
            logging.info("正在获取第1页...")
            first_page = self.get_page_content(f'{base_url}/index.html')
            if first_page:
                first_page_urls = self.parse_list_page(first_page, page=1)  # 添加页码参数
                logging.info(f"第1页获取到 {len(first_page_urls)} 个URL")
                urls.extend(first_page_urls)
            
            # 获取其他页面(2-10页)
            for page in range(2, 10):
                url = f'{base_url}/index_{page}.html'
                logging.info(f'正在获取第{page}页...')
                page_html = self.get_page_content(url)
                if page_html:
                    page_urls = self.parse_list_page(page_html, page=page)  # 添加页码参数
                    logging.info(f"第{page}页获取到 {len(page_urls)} 个URL")
                    urls.extend(page_urls)
                time.sleep(1)  # 避免请求过快
                
            logging.info(f"总共获取到 {len(urls)} 个URL")
            return urls
        except Exception as e:
            logging.error(f"获取URL列表失败: {str(e)}")
            return []

    def parse_list_page(self, html, page=0):
        """解析列表页获取URL"""
        if not html:
            return []
            
        soup = BeautifulSoup(html, 'html.parser')
        urls = []
        base_url = self.source_config['url']
        
        try:
            for item in soup.find_all('div', class_='serviceMainListTabCon'):
                link = item.find('a')
                if link:
                    href = link.get('href', '')
                    title = link.get_text().strip()  # 获取标题
                    # 使用基类的通用方法解析URL
                    url = self.resolve_url(base_url, href)
                    
                    # 记录详细信息
                    logging.info(f"第{page}页发现文章: {title}")
                    logging.info(f"原始href: {href}")
                    logging.info(f"解析后URL: {url}")
                    
                    urls.append(url)
            return urls
        except Exception as e:
            logging.error(f"解析列表页失败: {str(e)}")
            return []

    def parse_page(self, url):
        """解析新闻详情页"""
        logging.info(f'正在解析: {url}')
        html = self.get_page_content(url)
        if not html:
            return None
            
        try:
            # 使用通用解析工具解析页面
            result = NewsParser.parse_news_page(html, url, self.news_config)
            if result:
                result['url'] = url
                return result
            return None
        except Exception as e:
            logging.error(f"解析详情页失败 {url}: {str(e)}")
            return None

    def resolve_url(self, base_url, href):
        """处理人社部特殊的URL结构"""
        # 如果不是相对路径，使用基类的处理方法
        if not href.startswith('../'):
            return super().resolve_url(base_url, href)
        
        # 获取所有有效的路径段
        href_segments = [seg for seg in href.split('/') if seg and seg != '..']
        
        # 直接使用href中的路径段构建URL
        return f"https://www.mohrss.gov.cn/{'/'.join(href_segments)}" 