import random
import time
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from typing import Optional, Dict, Any
from pathlib import Path
import yaml

class RequestUtils:
    def __init__(self):
        self.session = requests.Session()
        self._setup_session()
        self._setup_logging()
        self.user_agents = self._load_user_agents()
        self.proxy_pool = self._load_proxy_pool()
        
    def _setup_session(self):
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[429, 500, 502, 503, 504],
            allowed_methods=["HEAD", "GET", "POST", "PUT", "DELETE", "OPTIONS", "TRACE"]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        
    def _setup_logging(self):
        Path("logs").mkdir(exist_ok=True)
        logging.basicConfig(
            filename="logs/requests.log",
            level=logging.INFO,
            format="%(asctime)s - %(levelname)s - %(message)s"
        )
        
    def _load_user_agents(self) -> list:
        with open("config/user_agents.yaml") as f:
            return yaml.safe_load(f)["user_agents"]
            
    def _load_proxy_pool(self) -> list:
        with open("config/proxies.yaml") as f:
            return yaml.safe_load(f)["proxies"]
            
    def _get_random_user_agent(self) -> str:
        return random.choice(self.user_agents)
        
    def _check_proxy(self, proxy: str) -> bool:
        try:
            test_url = "http://httpbin.org/ip"
            proxies = {
                "http": f"http://{proxy}",
                "https": f"http://{proxy}"
            }
            response = requests.get(test_url, proxies=proxies, timeout=5)
            return response.status_code == 200
        except:
            return False

    def _get_random_proxy(self) -> Optional[Dict[str, str]]:
        if not self.proxy_pool:
            return None
            
        # 尝试最多3个代理
        for _ in range(3):
            proxy = random.choice(self.proxy_pool)
            if self._check_proxy(proxy):
                return {
                    "http": f"http://{proxy}",
                    "https": f"http://{proxy}"
                }
            # 移除无效代理
            self.proxy_pool.remove(proxy)
            
        return None
        
    def _handle_anti_scraping(self, response: requests.Response) -> bool:
        if response.status_code == 403:
            logging.warning("Detected 403 Forbidden")
            return False
        if "captcha" in response.text.lower():
            logging.warning("Detected CAPTCHA")
            return False
        return True
        
    def request(self, method: str, url: str, **kwargs) -> Optional[requests.Response]:
        headers = kwargs.get("headers", {})
        headers["User-Agent"] = self._get_random_user_agent()
        kwargs["headers"] = headers
        
        for attempt in range(3):
            try:
                # 国内网站不使用代理
                if not url.startswith("https://www.huxiu.com"):
                    proxy = self._get_random_proxy()
                    if proxy:
                        kwargs["proxies"] = proxy
                        logging.info(f"Using proxy: {proxy}")
                response = self.session.request(method, url, timeout=10, **kwargs)
                
                if not self._handle_anti_scraping(response):
                    continue
                    
                if response.status_code == 200:
                    logging.info(f"Success: {method} {url}")
                    return response
                    
                logging.warning(f"Attempt {attempt + 1}: {response.status_code} {url}")
                time.sleep(2 ** attempt)
                
            except Exception as e:
                logging.error(f"Request failed: {str(e)}")
                time.sleep(2 ** attempt)
                
        logging.error(f"Max retries exceeded for {url}")
        return None
