import random
from bs4 import BeautifulSoup
import re
import time
import traceback
from datetime import datetime
from typing import List, Dict, Optional, Any
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .base_crawler import BaseCrawler

class HuxiuCrawler(BaseCrawler):
    def __init__(self):
        super().__init__("config/huxiu_config.yaml")
        self._setup_anti_anti_crawler()
        
    def _setup_anti_anti_crawler(self):
        # 添加额外的反反爬策略
        self.request_utils.session.headers.update({
            "Referer": "https://www.huxiu.com/",
            "X-Requested-With": "XMLHttpRequest",
            "User-Agent": self.request_utils._get_random_user_agent(),
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Origin": "https://www.huxiu.com",
            "Connection": "keep-alive",
            "Cache-Control": "no-cache",
            "Pragma": "no-cache",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "Accept-Encoding": "gzip, deflate, br",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1"
        })
        
        # 初始化cookie
        self.request_utils.session.cookies.update({
            "huxiu_analyze": "1",
            "huxiu_visit": "1",
            "huxiu_remember": "1"
        })
        
        # 初始化时不设置代理，在具体请求时根据目标URL设置
        pass
        
    def parse_article_list(self, html: str) -> List[Dict[str, str]]:
        soup = BeautifulSoup(html, "html.parser")
        articles = []
        
        for item in soup.select(".article-item"):
            title = item.select_one(".article-item__title")
            if not title:
                continue
                
            link = item.select_one("a")
            if not link:
                continue
                
            articles.append({
                "title": title.text.strip(),
                "link": "https://www.huxiu.com" + link["href"]
            })
            
        return articles
        
    def parse_article_detail(self, html: str) -> Dict[str, str]:
        soup = BeautifulSoup(html, "html.parser")
        
        title = soup.select_one(".article__title")
        content = soup.select_one(".article__content")
        time = soup.select_one(".article__time")
        
        if not all([title, content, time]):
            return {}
            
        return {
            "title": title.text.strip(),
            "content": self._clean_content(content),
            "publish_time": self._parse_time(time.text.strip())
        }
        
    def _clean_content(self, content) -> str:
        # 清理文章内容
        for tag in content.select("script, style, iframe"):
            tag.decompose()
        return content.get_text().strip()
        
    def _parse_time(self, time_str: str) -> str:
        # 解析发布时间
        try:
            dt = datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S")
            return dt.isoformat()
        except ValueError:
            return ""
            
    def crawl_page(self, url: str, page: int = 1, retry_count: int = 3) -> Optional[List[Dict[str, Any]]]:
        """爬取指定页面的文章列表
        
        Args:
            url: 目标URL
            page: 页码
            retry_count: 重试次数
            
        Returns:
            文章列表或None
        """
        for attempt in range(retry_count):
            try:
                print(f"[第{attempt+1}次尝试] 正在获取第{page}页数据...")
                print(f"请求URL: https://www.huxiu.com/v2_action/article_list")
                print(f"请求参数: page={page}")
                
                # 获取huxiu_hash
                huxiu_hash = self._get_huxiu_hash()
                print(f"获取到的huxiu_hash: {huxiu_hash}")
                
                # 发送请求
                start_time = time.time()
                # 添加随机延迟
                delay = random.uniform(1, 3)
                print(f"随机延迟: {delay:.2f}秒")
                time.sleep(delay)
                
                # 获取随机代理
                proxy = self.request_utils._get_random_proxy()
                if proxy:
                    print(f"使用代理: {proxy}")
                
                response = self.request_utils.request(
                    "POST",
                    "https://www.huxiu.com/v2_action/article_list",
                    data={
                        "huxiu_hash_code": huxiu_hash,
                        "page": page,
                        "last_dateline": str(int(time.time()))
                    },
                    timeout=10,  # 10秒超时
                    allow_redirects=False,
                    proxies=proxy
                )
                elapsed_time = time.time() - start_time
                print(f"请求耗时: {elapsed_time:.2f}秒")
                
                if not response:
                    print(f"[警告] 第{page}页请求失败，响应为空")
                    continue
                    
                if response.status_code != 200:
                    print(f"[警告] 请求失败，状态码: {response.status_code}")
                    continue
                    
                print(f"成功获取第{page}页数据")
                print(f"响应长度: {len(response.text)}字节")
                
                # 解析文章列表
                articles = self.parse_article_list(response.text)
                print(f"解析到{len(articles)}篇文章")
                return articles
                
            except requests.exceptions.Timeout:
                print(f"[超时] 第{page}页请求超时")
            except requests.exceptions.RequestException as e:
                print(f"[网络错误] 获取第{page}页数据时出错: {str(e)}")
            except Exception as e:
                print(f"[异常] 处理第{page}页数据时发生未预期错误: {str(e)}")
                print(traceback.format_exc())
            
            # 重试前等待
            if attempt < retry_count - 1:
                wait_time = 2 ** attempt  # 指数退避
                print(f"等待{wait_time}秒后重试...")
                time.sleep(wait_time)
                
        print(f"[错误] 获取第{page}页数据失败，已达到最大重试次数")
        return None
        
    def _get_huxiu_hash(self) -> str:
        """使用Selenium获取动态分页所需的hash值"""
        print("开始使用Selenium获取huxiu_hash...")
        options = Options()
        options.add_argument("--headless")
        options.add_argument("--disable-gpu")
        options.add_argument("--no-sandbox")
        options.add_argument("--disable-dev-shm-usage")
        options.add_argument(f"user-agent={self.request_utils._get_random_user_agent()}")
        
        # 设置代理
        proxy = self.request_utils._get_random_proxy()
        if proxy:
            options.add_argument(f"--proxy-server={proxy}")
        
        driver = webdriver.Chrome(options=options)
        try:
            driver.get("https://www.huxiu.com/")
            
            # 增加等待时间，确保页面完全加载
            WebDriverWait(driver, 30).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            
            # 模拟用户滚动页面
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)
            driver.execute_script("window.scrollTo(0, 0);")
            time.sleep(1)
            
            # 执行JavaScript获取hash值
            hash_value = driver.execute_script("""
                // 尝试从多个位置获取hash值
                return window.huxiu_hash_code || 
                       document.querySelector('meta[name=\"huxiu:hash_code\"]')?.content ||
                       document.querySelector('input[name=\"huxiu_hash_code\"]')?.value ||
                       document.cookie.match(/huxiu_hash_code=([^;]+)/)?.[1] ||
                       '';
            """)
            
            if hash_value:
                print(f"成功获取huxiu_hash: {hash_value}")
                return hash_value
                
            # 如果仍未找到，尝试从页面其他位置获取
            hash_value = driver.execute_script("""
                // 尝试从更多位置获取hash值
                return window.huxiu_hash_code || 
                       document.querySelector('meta[name=\"huxiu:hash_code\"]')?.content ||
                       document.querySelector('input[name=\"huxiu_hash_code\"]')?.value ||
                       document.cookie.match(/huxiu_hash_code=([^;]+)/)?.[1] ||
                       document.querySelector('script[data-hash]')?.dataset.hash ||
                       '';
            """)
            
            if hash_value:
                print(f"成功获取huxiu_hash: {hash_value}")
                return hash_value
                
            print("[错误] 未找到huxiu_hash_code")
            return ""
            
        except Exception as e:
            print(f"[异常] 使用Selenium获取huxiu_hash时出错: {str(e)}")
            print(traceback.format_exc())
            return ""
        finally:
            driver.quit()

    def _get_article_list(self) -> List[Dict[str, Any]]:
        print("获取第一页文章列表...")
        articles = self.crawl_page("https://www.huxiu.com/")
        if not articles:
            return []
            
        print(f"成功获取{len(articles)}篇文章")
        return articles
        
    def _get_article_detail(self, article: Dict[str, Any]) -> Dict[str, Any]:
        print(f"获取文章详情: {article['title']}")
        response = self.request_utils.request("GET", article["link"])
        if not response:
            return {}
            
        detail = self.parse_article_detail(response.text)
        article.update(detail)
        return article
        
    def crawl(self):
        print("开始爬取文章列表...")
        articles = self._get_article_list()
        print(f"成功获取{len(articles)}篇文章列表")
        
        print("开始获取文章详情...")
        for i, article in enumerate(articles):
            print(f"正在处理第{i+1}篇文章: {article['title']}")
            self._get_article_detail(article)
            print(f"第{i+1}篇文章处理完成")
            
        print("所有文章处理完成")
        return articles

if __name__ == "__main__":
    print("=== 测试爬虫 ===")
    try:
        crawler = HuxiuCrawler()
        print("爬虫初始化成功")
        print("开始爬取...")
        articles = crawler.crawl()
        print(f"爬取完成，共获取{len(articles)}篇文章")
    except Exception as e:
        print(f"发生错误: {str(e)}")
        print(traceback.format_exc())
