import os
import json
import random
import time
import logging
import requests
import urllib.parse
from typing import Dict, List, Optional, Set, Any
from dataclasses import dataclass
from http.cookies import SimpleCookie
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from fake_useragent import UserAgent
import re
import hashlib
import execjs

# 配置日志
logger = logging.getLogger('RequestHandler')

@dataclass
class ProxyConfig:
    """
    代理配置类
    """
    host: str
    port: int
    username: Optional[str] = None
    password: Optional[str] = None
    protocol: str = 'http'
    
    def to_dict(self) -> Dict[str, str]:
        """转换为requests可用的代理格式"""
        if self.username and self.password:
            proxy_url = f"{self.protocol}://{self.username}:{self.password}@{self.host}:{self.port}"
        else:
            proxy_url = f"{self.protocol}://{self.host}:{self.port}"
        return {
            'http': proxy_url,
            'https': proxy_url
        }

class RequestHandler:
    """
    请求处理类，负责发送HTTP请求，管理会话、Cookies和代理
    """
    
    def __init__(self):
        """
        初始化请求处理器
        """
        self.session = self._create_session()
        self.proxies = None
        self.cookies = {}
        self.ua = UserAgent()
        self.headers = {
            'User-Agent': self.ua.random,
            'Accept': '*/*',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Referer': 'https://www.xiaohongshu.com/',
            'Origin': 'https://www.xiaohongshu.com'
        }
        # 搜索参数缓存，避免重复计算
        self.search_params_cache = {}
        # 初始化JavaScript环境（用于生成某些加密参数）
        self.js_env = self._init_js_environment()
    
    def _create_session(self) -> requests.Session:
        """
        创建并配置requests会话
        """
        session = requests.Session()
        
        # 配置重试策略
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["GET", "POST"]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        session.mount("http://", adapter)
        session.mount("https://", adapter)
        
        # 设置超时
        session.timeout = 30
        
        # 启用连接池
        session.keep_alive = True
        
        return session
    
    def _init_js_environment(self) -> Optional[execjs.Runtime]:
        """
        初始化JavaScript环境
        """
        try:
            # 尝试初始化JavaScript环境
            ctx = execjs.compile("""
            // 简单的MD5函数，用于生成某些签名
            function md5(str) {
                var crypto = require('crypto');
                return crypto.createHash('md5').update(str).digest('hex');
            }
            
            // 生成搜索参数的签名（简化版）
            function generateSearchSign(params) {
                var sortedKeys = Object.keys(params).sort();
                var signStr = '';
                for (var i = 0; i < sortedKeys.length; i++) {
                    signStr += sortedKeys[i] + '=' + params[sortedKeys[i]] + '&';
                }
                signStr = signStr.slice(0, -1);
                return md5(signStr);
            }
            """)
            return ctx
        except Exception as e:
            logger.warning(f"初始化JavaScript环境失败: {e}")
            return None
    
    def load_cookies(self, cookies_file: str) -> bool:
        """
        从文件加载cookies
        
        Args:
            cookies_file (str): cookies文件路径
            
        Returns:
            bool: 是否加载成功
        """
        try:
            with open(cookies_file, 'r', encoding='utf-8') as f:
                cookies_data = json.load(f)
                
                # 处理不同格式的cookies数据
                if isinstance(cookies_data, list):
                    # Chrome扩展导出的cookies格式
                    for cookie in cookies_data:
                        self.session.cookies.set(cookie['name'], cookie['value'], domain=cookie.get('domain', '.xiaohongshu.com'))
                        self.cookies[cookie['name']] = cookie['value']
                elif isinstance(cookies_data, dict):
                    # 简单的键值对格式
                    for name, value in cookies_data.items():
                        self.session.cookies.set(name, value)
                        self.cookies[name] = value
                else:
                    # 尝试解析Cookie字符串格式
                    cookie = SimpleCookie()
                    cookie.load(str(cookies_data))
                    for key, morsel in cookie.items():
                        self.session.cookies.set(key, morsel.value)
                        self.cookies[key] = morsel.value
                
                logger.info(f"成功加载cookies: {len(self.cookies)} 个Cookie")
                return True
                
        except Exception as e:
            logger.error(f"加载cookies失败: {e}")
            return False
    
    def save_cookies(self, cookies_file: str) -> bool:
        """
        保存cookies到文件
        
        Args:
            cookies_file (str): cookies文件路径
            
        Returns:
            bool: 是否保存成功
        """
        try:
            # 获取当前会话的所有cookies
            cookies_dict = {}
            for cookie in self.session.cookies:
                cookies_dict[cookie.name] = cookie.value
            
            with open(cookies_file, 'w', encoding='utf-8') as f:
                json.dump(cookies_dict, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存cookies: {len(cookies_dict)} 个Cookie")
            return True
            
        except Exception as e:
            logger.error(f"保存cookies失败: {e}")
            return False
    
    def set_proxy_file(self, proxy_file: str) -> bool:
        """
        从文件加载代理列表
        
        Args:
            proxy_file (str): 代理文件路径
            
        Returns:
            bool: 是否加载成功
        """
        try:
            with open(proxy_file, 'r', encoding='utf-8') as f:
                proxies = []
                for line in f:
                    line = line.strip()
                    if not line or line.startswith('#'):
                        continue
                    
                    # 支持多种代理格式
                    # 1. http://host:port
                    # 2. host:port
                    # 3. username:password@host:port
                    if '://' in line:
                        protocol, rest = line.split('://', 1)
                    else:
                        protocol = 'http'
                        rest = line
                    
                    if '@' in rest:
                        auth, host_port = rest.split('@', 1)
                        username, password = auth.split(':', 1)
                        host, port = host_port.split(':', 1)
                    else:
                        username = None
                        password = None
                        host, port = rest.split(':', 1)
                    
                    proxy = ProxyConfig(
                        host=host,
                        port=int(port),
                        username=username,
                        password=password,
                        protocol=protocol
                    )
                    proxies.append(proxy)
                
                if proxies:
                    self.proxies = proxies
                    logger.info(f"成功加载 {len(proxies)} 个代理")
                    return True
                else:
                    logger.warning("未从文件中加载到有效代理")
                    return False
                    
        except Exception as e:
            logger.error(f"加载代理文件失败: {e}")
            return False
    
    def _load_proxies(self, proxy_file: str) -> bool:
        """
        内部方法：加载代理列表
        """
        return self.set_proxy_file(proxy_file)
    
    def _get_proxy(self) -> Optional[Dict[str, str]]:
        """
        获取一个随机代理
        
        Returns:
            dict or None: 代理配置
        """
        if self.proxies:
            proxy = random.choice(self.proxies)
            return proxy.to_dict()
        return None
    
    def get_page(self, url: str, max_retries: int = 3, **kwargs) -> Optional[str]:
        """
        获取页面内容
        
        Args:
            url (str): 页面URL
            max_retries (int): 最大重试次数
            **kwargs: 额外的请求参数
            
        Returns:
            str or None: 页面HTML内容
        """
        retry_count = 0
        last_error = None
        
        while retry_count < max_retries:
            try:
                # 随机化User-Agent
                headers = self.headers.copy()
                headers['User-Agent'] = self.ua.random
                
                # 获取代理
                proxy = self._get_proxy() if self.proxies else None
                
                logger.debug(f"请求页面: {url} (重试: {retry_count})")
                
                response = self.session.get(
                    url,
                    headers=headers,
                    proxies=proxy,
                    verify=False,  # 忽略SSL验证警告
                    **kwargs
                )
                
                response.raise_for_status()
                
                # 处理编码
                if 'charset' in response.headers.get('Content-Type', ''):
                    return response.text
                else:
                    # 尝试自动检测编码
                    response.encoding = response.apparent_encoding
                    return response.text
                    
            except requests.RequestException as e:
                last_error = e
                retry_count += 1
                logger.warning(f"请求页面失败: {e}, 正在进行第 {retry_count} 次重试")
                
                # 指数退避策略
                delay = (2 ** retry_count) + random.uniform(0, 1)
                time.sleep(delay)
        
        logger.error(f"请求页面失败，已达到最大重试次数: {last_error}")
        return None
    
    def post_data(self, url: str, data: Dict = None, json_data: Dict = None, max_retries: int = 3, **kwargs) -> Optional[Dict]:
        """
        发送POST请求并返回JSON数据
        
        Args:
            url (str): 请求URL
            data (dict): 表单数据
            json_data (dict): JSON数据
            max_retries (int): 最大重试次数
            **kwargs: 额外的请求参数
            
        Returns:
            dict or None: 响应数据
        """
        retry_count = 0
        last_error = None
        
        while retry_count < max_retries:
            try:
                # 随机化User-Agent
                headers = self.headers.copy()
                headers['User-Agent'] = self.ua.random
                headers['Content-Type'] = 'application/json'
                
                # 获取代理
                proxy = self._get_proxy() if self.proxies else None
                
                logger.debug(f"发送POST请求: {url} (重试: {retry_count})")
                
                response = self.session.post(
                    url,
                    data=data,
                    json=json_data,
                    headers=headers,
                    proxies=proxy,
                    verify=False,
                    **kwargs
                )
                
                response.raise_for_status()
                return response.json()
                
            except requests.RequestException as e:
                last_error = e
                retry_count += 1
                logger.warning(f"POST请求失败: {e}, 正在进行第 {retry_count} 次重试")
                
                # 指数退避策略
                delay = (2 ** retry_count) + random.uniform(0, 1)
                time.sleep(delay)
            except json.JSONDecodeError as e:
                logger.error(f"解析JSON响应失败: {e}")
                return None
        
        logger.error(f"POST请求失败，已达到最大重试次数: {last_error}")
        return None
    
    def fetch_note_data(self, note_id: str) -> Optional[Dict]:
        """
        获取笔记数据
        
        Args:
            note_id (str): 笔记ID
            
        Returns:
            dict or None: 笔记数据
        """
        try:
            # 构建笔记数据API URL
            api_url = f"https://www.xiaohongshu.com/api/sns/web/v1/feed"
            
            # 准备请求参数
            timestamp = int(time.time() * 1000)
            params = {
                'id': note_id,
                'time': timestamp
            }
            
            # 发送请求
            headers = self.headers.copy()
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Content-Type'] = 'application/json'
            
            response = self.session.get(
                api_url,
                params=params,
                headers=headers
            )
            
            response.raise_for_status()
            
            # 解析响应
            data = response.json()
            
            # 检查响应状态
            if data.get('code') == 0:
                return data.get('data')
            else:
                logger.error(f"获取笔记数据失败: {data.get('msg', '未知错误')}")
                return None
                
        except Exception as e:
            logger.error(f"获取笔记数据时出错: {e}")
            
            # 如果API请求失败，尝试获取页面HTML并从中提取数据
            note_url = f"https://www.xiaohongshu.com/explore/{note_id}"
            html = self.get_page(note_url)
            
            if html:
                # 尝试从HTML中提取JSON数据
                try:
                    # 查找页面中的JSON数据
                    json_match = re.search(r'window.__INITIAL_STATE__=({.*?});', html, re.DOTALL)
                    if json_match:
                        json_data = json_match.group(1)
                        data = json.loads(json_data)
                        return data
                except Exception as inner_e:
                    logger.error(f"从HTML提取数据失败: {inner_e}")
            
            return None
    
    def generate_search_params(self, keyword: str, page: int = 1, sort_by: str = 'general') -> Dict[str, Any]:
        """
        生成搜索请求参数
        
        Args:
            keyword (str): 搜索关键词
            page (int): 页码
            sort_by (str): 排序方式 ('general' 或 'popular')
            
        Returns:
            dict: 搜索请求参数
        """
        # 检查缓存
        cache_key = f"{keyword}_{page}_{sort_by}"
        if cache_key in self.search_params_cache:
            return self.search_params_cache[cache_key]
        
        # 基础参数
        params = {
            'keyword': keyword,
            'page': page,
            'sort': sort_by,
            'per_page': 30,  # 默认每页30条
            'timestamp': int(time.time() * 1000)
        }
        
        # 生成签名（简化版）
        if self.js_env:
            try:
                sign = self.js_env.call('generateSearchSign', params)
                params['sign'] = sign
            except Exception:
                # 如果JavaScript执行失败，使用Python实现的简单签名
                sorted_keys = sorted(params.keys())
                sign_str = '&'.join([f"{k}={params[k]}" for k in sorted_keys])
                sign = hashlib.md5(sign_str.encode()).hexdigest()
                params['sign'] = sign
        
        # 缓存参数
        self.search_params_cache[cache_key] = params
        return params
    
    def fetch_search_results(self, keyword: str, page: int = 1, sort_by: str = 'general') -> Optional[Dict]:
        """
        获取搜索结果
        
        Args:
            keyword (str): 搜索关键词
            page (int): 页码
            sort_by (str): 排序方式 ('general' 或 'popular')
            
        Returns:
            dict or None: 搜索结果数据
        """
        try:
            # 构建搜索API URL
            api_url = "https://www.xiaohongshu.com/api/sns/web/v1/search/notes"
            
            # 准备请求参数
            params = self.generate_search_params(keyword, page, sort_by)
            
            # 准备请求头
            headers = self.headers.copy()
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Content-Type'] = 'application/json'
            
            # 发送请求
            proxy = self._get_proxy() if self.proxies else None
            response = self.session.get(
                api_url,
                params=params,
                headers=headers,
                proxies=proxy
            )
            
            response.raise_for_status()
            
            # 解析响应
            data = response.json()
            
            # 检查响应状态
            if data.get('code') == 0:
                return data.get('data')
            else:
                logger.error(f"搜索失败: {data.get('msg', '未知错误')}")
                
                # 尝试使用备用搜索方法
                return self._fetch_search_results_alt(keyword, page, sort_by)
                
        except Exception as e:
            logger.error(f"获取搜索结果时出错: {e}")
            
            # 尝试备用方法
            return self._fetch_search_results_alt(keyword, page, sort_by)
    
    def _fetch_search_results_alt(self, keyword: str, page: int = 1, sort_by: str = 'general') -> Optional[Dict]:
        """
        备用搜索方法：直接从HTML页面中提取数据
        
        Args:
            keyword (str): 搜索关键词
            page (int): 页码
            sort_by (str): 排序方式
            
        Returns:
            dict or None: 搜索结果数据
        """
        try:
            # 构建搜索URL
            encoded_keyword = urllib.parse.quote(keyword)
            sort_param = 'hot' if sort_by == 'popular' else 'general'
            search_url = f"https://www.xiaohongshu.com/search_result/{page}?keyword={encoded_keyword}&sort={sort_param}"
            
            # 获取页面HTML
            html = self.get_page(search_url)
            
            if html:
                # 尝试从HTML中提取笔记ID
                note_ids = re.findall(r'data-note-id="(\w+)"', html)
                
                if note_ids:
                    # 构造结果数据结构
                    results = {
                        'items': [{'id': note_id} for note_id in note_ids],
                        'has_more': len(note_ids) >= 20  # 假设有20条及以上表示还有更多
                    }
                    return results
                
                # 尝试其他模式提取
                note_matches = re.findall(r'href="/explore/(\w+)"', html)
                if note_matches:
                    results = {
                        'items': [{'id': match} for match in note_matches],
                        'has_more': len(note_matches) >= 20
                    }
                    return results
        
        except Exception as e:
            logger.error(f"备用搜索方法失败: {e}")
        
        return None
    
    def fetch_many_search_results(self, keyword: str, max_pages: int = 100, max_note_ids: int = 1000, sort_by: str = 'general') -> List[str]:
        """
        获取大量搜索结果，直到达到最大页数或最大笔记数量
        
        Args:
            keyword (str): 搜索关键词
            max_pages (int): 最大搜索页数
            max_note_ids (int): 最大笔记ID数量
            sort_by (str): 排序方式 ('general' 或 'popular')
            
        Returns:
            list: 笔记ID列表
        """
        note_ids = []
        processed_ids = set()
        page = 1
        has_more = True
        retry_count = 0
        max_retries_per_page = 3
        
        logger.info(f"开始获取关键词'{keyword}'的搜索结果")
        logger.info(f"参数: 最大页数={max_pages}, 最大笔记数={max_note_ids}, 排序方式={sort_by}")
        
        while page <= max_pages and len(note_ids) < max_note_ids and has_more:
            logger.info(f"正在获取第 {page}/{max_pages} 页的搜索结果")
            
            # 重置重试计数
            retry_count = 0
            
            while retry_count < max_retries_per_page:
                # 获取搜索结果
                results = self.fetch_search_results(keyword, page, sort_by)
                
                if results:
                    # 提取笔记ID
                    new_ids = []
                    
                    # 处理不同格式的结果
                    if isinstance(results, dict):
                        items = results.get('items', [])
                        
                        if isinstance(items, list):
                            # 标准结果格式
                            for item in items:
                                if isinstance(item, dict) and 'id' in item:
                                    note_id = item['id']
                                    if note_id not in processed_ids:
                                        new_ids.append(note_id)
                                        processed_ids.add(note_id)
                        else:
                            # 尝试直接从结果中提取
                            for key, value in results.items():
                                if key == 'id' or (isinstance(value, dict) and 'id' in value):
                                    note_id = value['id'] if isinstance(value, dict) else value
                                    if note_id not in processed_ids:
                                        new_ids.append(note_id)
                                        processed_ids.add(note_id)
                    
                    # 添加新的笔记ID
                    if new_ids:
                        # 限制总数不超过max_note_ids
                        remaining = max_note_ids - len(note_ids)
                        if remaining > 0:
                            note_ids.extend(new_ids[:remaining])
                        
                        logger.info(f"从第 {page} 页成功提取 {len(new_ids[:remaining])} 个笔记ID")
                        
                        # 检查是否还有更多
                        has_more = results.get('has_more', False) or len(new_ids) == 30
                        
                        # 成功获取，跳出重试循环
                        break
                    else:
                        logger.warning(f"第 {page} 页未找到笔记ID")
                        has_more = False
                        break
                else:
                    retry_count += 1
                    logger.warning(f"获取第 {page} 页搜索结果失败，正在进行第 {retry_count} 次重试")
                    
                    # 增加延迟
                    delay = 2 * retry_count + random.uniform(0, 1)
                    time.sleep(delay)
            
            # 如果达到最大重试次数还未成功，尝试下一页
            if retry_count >= max_retries_per_page:
                logger.error(f"第 {page} 页搜索失败，已达到最大重试次数")
                has_more = False  # 停止搜索
            
            # 增加页面延迟，避免频繁请求
            if page < max_pages and len(note_ids) < max_note_ids:
                delay = random.uniform(3, 8)
                logger.info(f"等待 {delay:.2f} 秒后继续获取下一页")
                time.sleep(delay)
            
            page += 1
        
        logger.info(f"搜索完成，共获取 {len(note_ids)} 个笔记ID")
        return note_ids
    
    def close(self):
        """
        关闭会话和释放资源
        """
        try:
            if hasattr(self, 'session'):
                self.session.close()
                logger.info("会话已关闭")
        except Exception as e:
            logger.error(f"关闭会话时出错: {e}")

# 测试代码
if __name__ == '__main__':
    handler = RequestHandler()
    
    # 测试代理设置
    # handler.set_proxy_file('proxies.txt')
    
    # 测试获取页面
    url = 'https://www.xiaohongshu.com'
    html = handler.get_page(url)
    if html:
        print(f"成功获取页面，长度: {len(html)}")
    else:
        print("获取页面失败")
    
    # 测试搜索
    # note_ids = handler.fetch_many_search_results('美食', max_pages=3, max_note_ids=20)
    # print(f"搜索结果: {note_ids}")
    
    handler.close()
