import requests
import time
import random
from typing import Dict, List, Tuple, Optional, Any ,Union,Callable # 这里必须包含 Any
from requests.exceptions import RequestException, Timeout, ConnectionError
from bs4 import BeautifulSoup
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


class Spider:
    """通用爬虫库封装"""
    
    def __init__(
        self,
        headers: Optional[Dict] = None,
        proxies: Optional[Dict] = None,
        timeout: int = 10,
        retry_times: int = 3,
        retry_delay: int = 2,
        random_delay: tuple = (1, 3)  # 随机延迟范围(秒)
    ):
        """
        初始化爬虫
        
        :param headers: 请求头字典
        :param proxies: 代理字典，格式: {'http': 'http://ip:port', 'https': 'https://ip:port'}
        :param timeout: 超时时间(秒)
        :param retry_times: 失败重试次数
        :param retry_delay: 重试间隔(秒)
        :param random_delay: 每次请求前的随机延迟范围
        """
        self.session = requests.Session()
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
        }
        self.session.headers.update(self.headers)
        self.proxies = proxies
        self.timeout = timeout
        self.retry_times = retry_times
        self.retry_delay = retry_delay
        self.random_delay = random_delay

    def set_headers(self, headers: Dict) -> None:
        """更新请求头"""
        self.headers.update(headers)
        self.session.headers.update(headers)

    def set_proxies(self, proxies: Dict) -> None:
        """更新代理"""
        self.proxies = proxies

    def _random_sleep(self) -> None:
        """随机延迟，防止爬取过快被封"""
        if self.random_delay:
            delay = random.uniform(*self.random_delay)
            time.sleep(delay)

    def _request(
        self,
        method: str,
        url: str,
        params: Optional[Dict] = None,
        data: Optional[Union[Dict, str]] = None,
        json: Optional[Dict] = None,
        **kwargs
    ) -> Optional[requests.Response]:
        """
        内部请求方法，包含重试机制
        
        :param method: 请求方法('get'/'post'等)
        :param url: 请求URL
        :param params: URL参数
        :param data: POST数据
        :param json: JSON数据
        :param kwargs: 其他请求参数
        :return: 响应对象或None
        """
        self._random_sleep()
        for i in range(self.retry_times + 1):
            try:
                response = self.session.request(
                    method=method,
                    url=url,
                    params=params,
                    data=data,
                    json=json,
                    proxies=self.proxies,
                    timeout=self.timeout,** kwargs
                )
                response.raise_for_status()  # 抛出HTTP错误状态码
                logger.info(f"请求成功: {url} (状态码: {response.status_code})")
                return response
            except (Timeout, ConnectionError) as e:
                logger.warning(f"请求超时/连接错误: {url} (错误: {str(e)})")
            except RequestException as e:
                logger.warning(f"请求失败: {url} (错误: {str(e)}, 状态码: {getattr(e.response, 'status_code', '未知')})")
            
            if i < self.retry_times:
                logger.info(f"第 {i+1} 次重试... (剩余: {self.retry_times - i})")
                time.sleep(self.retry_delay)
        
        logger.error(f"多次重试后仍失败: {url}")
        return None

    def get(
        self,
        url: str,
        params: Optional[Dict] = None,
        **kwargs
    ) -> Optional[requests.Response]:
        """GET请求"""
        return self._request('get', url, params=params,** kwargs)

    def post(
        self,
        url: str,
        data: Optional[Union[Dict, str]] = None,
        json: Optional[Dict] = None,
        **kwargs
    ) -> Optional[requests.Response]:
        """POST请求"""
        return self._request('post', url, data=data, json=json,** kwargs)

    def get_html(self, url: str, encoding: Optional[str] = None) -> Optional[str]:
        """获取网页HTML内容"""
        response = self.get(url)
        if response:
            if encoding:
                response.encoding = encoding
            return response.text
        return None

    def get_json(self, url: str) -> Optional[Dict]:
        """获取JSON数据"""
        response = self.get(url)
        if response:
            try:
                return response.json()
            except ValueError:
                logger.error(f"解析JSON失败: {url}")
        return None

    def parse_html(self, html: str, parser: str = 'html.parser') -> BeautifulSoup:
        """解析HTML为BeautifulSoup对象"""
        return BeautifulSoup(html, parser)

    def crawl(
        self,
        url: str,
        parse_func: Callable[[str], Any],
        **kwargs
    ) -> Any:
        """
        完整爬取流程: 请求 -> 解析
        
        :param url: 目标URL
        :param parse_func: 解析函数，接收HTML字符串并返回解析结果
        :return: 解析结果
        """
        html = self.get_html(url,** kwargs)
        if html:
            return parse_func(html)
        return None


# # 使用示例
##pip install requests beautifulsoup4
# if __name__ == "__main__":
#     # 初始化爬虫
#     spider = Spider(
#         retry_times=2,
#         random_delay=(1, 2)  # 每次请求前随机延迟1-2秒
#     )

#     url = "http://music.2t58.com/"

    # 示例1: 爬取网页HTML
    # html = spider.get_html(url)
    # if html:
    #     print(f"获取到HTML长度: {len(html)}")

#     # 示例2: 解析网页标题
    # def parse_title(html: str) -> Optional[str]:
    #     soup = spider.parse_html(html)
    #     title_tag = soup.title
    #     return title_tag.text if title_tag else None

    # title = spider.crawl(url, parse_title)
    # print(f"网页标题: {title}")

    # def parse_list(html: str) -> Optional[str]:
    #     soup = spider.parse_html(html)
    #     result = []
    #     li_tag = soup.select('.ilingkuplay_list > ul li div.name a')
    #    # 2. 遍历每个 li，只提取 class="name" 的 div 下的 a 标签
    #     for a in li_tag:
    #         href = a.get('href')
    #         title = a.get('title')
    #         if href and title:  # 过滤掉 href 或 title 为空的无效数据
    #             result.append({
    #                 'href': href,
    #                 'title': title
    #             })
    #         else:
    #             print(f"警告：发现无效 a 标签，href={href}, title={title}")
    #             # 打印结果
    #     for item in result:
    #         print(f"href: {item['href']}, title: {item['title']}")

    # spider.crawl(url, parse_list)
    # title = spider.crawl(url, parse_list)
    # print(f"网页标题: {title}")

    # 示例3: 使用代理(如需)
    # proxy = {
    #     'http': 'http://127.0.0.1:7890',
    #     'https': 'http://127.0.0.1:7890'
    # }
    # spider.set_proxies(proxy)
    # response = spider.get("https://httpbin.org/ip")
    # print(response.json())