import os
import time
import random
import re
import concurrent.futures
import threading
import pandas as pd
import requests
import logging
from urllib.parse import urlparse
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'crawl_log.txt')),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 全局配置
CONFIG = {
    'max_retries': 3,
    'timeout': 15,
    'thread_count': 5,  # 用户设置的线程数
    'request_delay': (1, 3),  # 请求间隔时间范围
    'proxy_pool_threshold': 10,  # 代理池阈值，低于此值时补充代理
    'user_agents': [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/89.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
    ],
    'login_retry_limit': 2,  # 登录页面重试限制
    'empty_proxy_wait_time': 5  # 代理池为空时的等待时间（秒）
}

# 代理API配置
PROXY_CONFIG = {
    'main_api': 'http://dddip.top/api/get?token=426e97524922b118745249567bd332&number=1000&type=https&format=1',
    'backup_apis': [
        # 可添加备用代理API
    ]
}

# 全局变量
processed_count = 0
failed_count = 0
progress_lock = threading.Lock()  # 用于进度显示和计数的线程锁

# 获取脚本所在目录
script_dir = os.path.dirname(os.path.abspath(__file__))
excel_file = os.path.join(script_dir, '店铺.xlsx')
output_dir = os.path.join(script_dir, 'url')
progress_file = os.path.join(script_dir, 'processed_urls.txt')

class ProxyManager:
    """代理管理器类，负责代理的获取、测试和管理"""
    def __init__(self):
        self.proxies_pool = []
        self.current_proxy_index = 0
        self.lock = threading.Lock()
        
    def get_proxies_from_api(self, api_url=None):
        """从API获取代理IP列表"""
        if api_url is None:
            api_url = PROXY_CONFIG['main_api']
            
        try:
            logger.info("正在从API获取代理IP...")
            response = requests.get(api_url, timeout=10)
            response.raise_for_status()
            
            # 解析API返回的代理IP列表
            proxies_text = response.text.strip()
            proxies_list = proxies_text.split('\r\n')
            
            # 去重处理
            original_count = len(proxies_list)
            proxies_list = list(set(proxies_list))  # 使用集合去重
            unique_count = len(proxies_list)
            if original_count > unique_count:
                logger.info(f"已去除{original_count - unique_count}个重复代理IP")
            
            # 格式化代理IP为requests可用的格式
            formatted_proxies = []
            for proxy in proxies_list:
                if proxy.strip():
                    formatted_proxies.append({
                        'http': f'http://{proxy.strip()}',
                        'https': f'http://{proxy.strip()}'  # HTTPS请求也使用HTTP代理
                    })
            
            logger.info(f"成功获取{len(formatted_proxies)}个代理IP")
            return formatted_proxies
        except Exception as e:
            logger.error(f"获取代理IP失败: {str(e)}")
            # 添加备用代理获取逻辑
            logger.info("使用备用代理获取方法...")
            # 这里可以添加备用代理API或使用固定代理
            return []
    
    def update_proxies(self):
        """更新代理池"""
        with self.lock:
            self.proxies_pool = self.get_proxies_from_api()
            # 如果没有获取到代理，使用空代理（直接请求）
            if not self.proxies_pool:
                self.proxies_pool = [None]
            self.current_proxy_index = 0
    
    def get_next_proxy(self):
        """获取下一个可用代理（线程安全）"""
        with self.lock:
            if len(self.proxies_pool) > 0:
                proxy = self.proxies_pool[self.current_proxy_index]
                self.current_proxy_index = (self.current_proxy_index + 1) % len(self.proxies_pool)
                return proxy
            else:
                return None
    
    def mark_proxy_failed(self, proxy, reason=None):
        """标记失败的代理并从池中移除（线程安全）"""
        with self.lock:
            if proxy and proxy in self.proxies_pool:
                self.proxies_pool.remove(proxy)
                if reason:
                    logger.warning(f"从代理池中移除不可用代理: {proxy.get('http', '未知代理')} (原因: {reason})\n代理池剩余数量: {len(self.proxies_pool)}")
                else:
                    logger.warning(f"从代理池中移除不可用代理: {proxy.get('http', '未知代理')}\n代理池剩余数量: {len(self.proxies_pool)}")
                
            # 当代理池数量少于阈值时，尝试补充代理
            if self.proxies_pool and len(self.proxies_pool) < CONFIG['proxy_pool_threshold']:
                logger.info("代理池数量不足，开始补充代理...")
                new_proxies = self.get_proxies_from_api()
                if new_proxies:
                    logger.info(f"成功补充 {len(new_proxies)} 个代理")
                else:
                    logger.warning("补充代理失败，继续使用现有代理")
                    # 如果代理池为空，设置空代理
                    if not self.proxies_pool:
                        self.proxies_pool = [None]
            
            # 如果代理池为空，尝试重新获取代理
            elif not self.proxies_pool:
                logger.warning("代理池为空，尝试重新获取代理...")
                self.update_proxies()
                # 如果仍然没有代理，等待一段时间
                if not self.proxies_pool:
                    logger.warning(f"获取代理失败，等待{CONFIG['empty_proxy_wait_time']}秒后重试...")
                    time.sleep(CONFIG['empty_proxy_wait_time'])

# 创建全局代理管理器实例
proxy_manager = ProxyManager()

# 确保输出目录存在
def ensure_output_dir():
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

# 加载已处理的URL
def load_progress():
    """从文件加载已处理的URL列表"""
    processed_urls = set()
    if os.path.exists(progress_file):
        try:
            with open(progress_file, 'r', encoding='utf-8') as f:
                for line in f:
                    url = line.strip()
                    if url:
                        processed_urls.add(url)
        except Exception as e:
            logger.error(f"加载进度文件失败: {str(e)}")
    return processed_urls

# 保存已处理的URL
def save_progress(url):
    """将已处理的URL保存到文件"""
    try:
        with open(progress_file, 'a', encoding='utf-8') as f:
            f.write(url + '\n')
    except Exception as e:
        logger.error(f"保存进度文件失败: {str(e)}")

# 创建带重试机制的Session
def create_session():
    """创建一个带重试机制的requests会话"""
    session = requests.Session()
    
    # 配置重试策略
    retry_strategy = Retry(
        total=CONFIG['max_retries'],
        backoff_factor=0.3,
        status_forcelist=[500, 502, 503, 504],
        allowed_methods=["HEAD", "GET", "OPTIONS"]
    )
    
    # 创建适配器并挂载到会话
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session.mount("http://", adapter)
    session.mount("https://", adapter)
    
    return session

# 带重试机制的请求函数
def robust_request(url, headers, session, max_retries=None):
    """带重试机制和代理管理的请求函数"""
    if max_retries is None:
        max_retries = CONFIG['max_retries']
        
    for attempt in range(max_retries):
        proxy = proxy_manager.get_next_proxy()
        
        try:
            # 指数退避策略
            if attempt > 0:
                delay = 2 ** attempt + random.uniform(0, 1)
                logger.info(f"第{attempt+1}次重试，等待{delay:.2f}秒")
                time.sleep(delay)
            
            # 根据重试次数调整超时时间
            timeout = 15 if attempt == 0 else 12 if attempt == 1 else 10
            
            # 发送请求
            if proxy:
                logger.debug(f"尝试使用代理: {proxy.get('http', '未知代理')}")
                response = session.get(url, headers=headers, proxies=proxy, timeout=timeout)
            else:
                logger.debug("无可用代理，使用直接连接")
                response = session.get(url, headers=headers, timeout=timeout)
            
            # 检查响应状态
            if response.status_code == 200:
                return response
            else:
                logger.warning(f"请求返回非200状态码: {response.status_code}")
                
        except requests.exceptions.RequestException as e:
            error_type = type(e).__name__
            logger.warning(f"请求失败 ({error_type}): {str(e)}")
            
            # 标记失败代理
            if proxy:
                # 对于ProxyError和ReadTimeout错误，移除代理
                if error_type in ['ProxyError', 'ReadTimeout']:
                    proxy_manager.mark_proxy_failed(proxy, f"{error_type}: {str(e)}")
                else:
                    # 其他错误，不立即移除代理
                    logger.info(f"代理可能暂时不可用，将在下次使用时重试")
            
            # 如果是最后一次尝试，抛出异常
            if attempt == max_retries - 1:
                raise
    
    # 所有重试都失败
    raise Exception(f"所有{max_retries}次重试均失败")

# 处理单行数据的核心函数
def process_row(row_data, total_rows, processed_urls, session):
    """处理单行数据的核心函数"""
    global processed_count, failed_count
    
    index, row = row_data
    
    try:
        # 获取第一列作为文件名（店铺名）
        shop_name = str(row.iloc[0]).strip()
        # 获取第二列作为网址
        url = str(row.iloc[1]).strip()
        
        # 跳过无效数据
        if not shop_name or not url or url.lower() == 'nan':
            logger.warning(f"跳过行：无效的店铺名或网址")
            return False
        
        # 检查URL是否已处理
        if url in processed_urls:
            logger.info(f"URL已处理过，跳过: {url}")
            return False
        
        # 正序处理时，计算正确的进度显示（从1开始计数）
        current_index = index + 1
        logger.info(f"正在处理第{current_index}/{total_rows}条：{shop_name} - {url}")
        
        # 清理文件名，移除非法字符
        valid_filename = re.sub(r'[^\w\-_\. ]', '_', shop_name)
        valid_filename = valid_filename.replace(' ', '_')
        
        # 添加前置0，确保文件名长度为5位数
        digits = ''.join(filter(str.isdigit, valid_filename))
        if digits:
            padded_digits = digits.zfill(5)
            valid_filename = valid_filename.replace(digits, padded_digits)
        
        # 计算完整文件路径
        file_path = os.path.join(output_dir, f"{valid_filename}.txt")
        
        # 检查文件是否已存在，如果存在则跳过
        if os.path.exists(file_path):
            logger.info(f"{current_index}:文件已存在，跳过读取：{file_path}")
            save_progress(url)
            return False
        
        # 确保URL格式正确
        url = re.sub(r'[^a-zA-Z0-9:/._-]', '', url)
        if not url.startswith(('http://', 'https://')):
            url = 'http://' + url
        
        # 随机选择User-Agent
        headers = {
            'User-Agent': random.choice(CONFIG['user_agents']),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Connection': 'keep-alive'
        }
        
        # 发送请求
        response = robust_request(url, headers, session)
        
        # 获取网页源码
        html_content = response.text
        
        # 检查网页内容是否包含"义乌购-登录"文本
        login_text_count = 0
        max_login_retries = CONFIG['login_retry_limit']
        
        while "义乌购-登录" in html_content and login_text_count < max_login_retries:
            login_text_count += 1
            proxy = proxy_manager.get_next_proxy()
            logger.warning(f"{current_index}:检测到登录页面 (第{login_text_count}次)，尝试使用新代理重新获取")
            
            # 标记当前代理为不可用（针对登录页面）
            if proxy:
                proxy_manager.mark_proxy_failed(proxy, "检测到登录页面")
            
            # 当代理池数量少于阈值或为空时，尝试补充代理
            if proxy_manager.proxies_pool and len(proxy_manager.proxies_pool) < CONFIG['proxy_pool_threshold']:
                logger.info("代理池数量不足，开始补充代理...")
                new_proxies = proxy_manager.get_proxies_from_api()
                if new_proxies:
                    logger.info(f"成功补充 {len(new_proxies)} 个代理")
            
            # 获取新的代理
            proxy = proxy_manager.get_next_proxy()
            
            # 使用新代理重新请求
            try:
                # 增加等待时间，避免请求过于频繁
                time.sleep(random.uniform(2, 4))
                
                timeout = 15  # 对登录页面的重试使用更长的超时时间
                if proxy:
                    logger.info(f"{current_index}:使用新代理: {proxy.get('http', '未知代理')} 重新请求")
                    response = requests.get(url, headers=headers, proxies=proxy, timeout=timeout)
                else:
                    logger.info(f"{current_index}:无可用代理，使用直接请求重新尝试")
                    response = requests.get(url, headers=headers, timeout=timeout)
                response.raise_for_status()
                html_content = response.text
                logger.info(f"{current_index}:使用新代理重新请求成功")
            except Exception as e:
                logger.error(f"{current_index}:使用新代理重新请求失败: {str(e)}")
                # 如果重试次数已用完，保留原始响应内容，继续处理
                if login_text_count >= max_login_retries:
                    break
        
        # 保存源码到文件
        if "义乌购-登录" not in html_content:
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(html_content)
            
            # 更新进度
            save_progress(url)
            
            with progress_lock:
                logger.info(f"{current_index}:成功保存：{file_path}")
                processed_count += 1
        else:
            with progress_lock:
                logger.warning(f"{current_index}:多次尝试后仍检测到登录页面，跳过保存")
                failed_count += 1
            return False
        
        # 添加随机延迟，避免请求过于规律
        time.sleep(random.uniform(*CONFIG['request_delay']))
        
        return True
        
    except Exception as e:
        with progress_lock:
            logger.error(f"{current_index}:处理失败: {str(e)}")
            failed_count += 1
        return False

# 主函数
def main():
    """主函数"""
    # 确保输出目录存在
    ensure_output_dir()
    
    # 初始化代理池
    logger.info("初始化代理池...")
    proxy_manager.update_proxies()
    
    # 创建会话
    session = create_session()
    
    # 读取Excel文件内容
    try:
        # 使用pandas读取Excel文件
        df = pd.read_excel(excel_file)
        # 取前20条数据测试（用户设置）
        df = df.head(20)
        
        # 显示前几行数据，确认列的顺序
        logger.info("Excel文件内容预览:")
        logger.info(df.head())
        
        # 获取总行数
        total_rows = len(df)
        logger.info(f"共发现{total_rows}条记录")
        
        # 加载已处理的URL
        processed_urls = load_progress()
        logger.info(f"已加载{len(processed_urls)}条已处理记录")
        
        # 准备要处理的数据（正序）
        data_to_process = list(df.iterrows())
        
        # 配置线程池大小
        thread_count = CONFIG['thread_count']
        logger.info(f"使用{thread_count}个线程进行多线程爬取...")
        start_time = time.time()
        
        # 初始化计数器
        global processed_count, failed_count
        processed_count = 0
        failed_count = 0
        
        # 使用线程池并行处理
        with concurrent.futures.ThreadPoolExecutor(max_workers=thread_count) as executor:
            # 创建一个字典，存储future对象和对应的行索引
            future_to_row = {
                executor.submit(process_row, row_data, total_rows, processed_urls, session): row_data 
                for row_data in data_to_process
                if str(row_data[1].iloc[1]).strip() not in processed_urls  # 跳过已处理的URL
            }
            
            # 等待所有任务完成
            for future in concurrent.futures.as_completed(future_to_row):
                row_data = future_to_row[future]
                try:
                    future.result()
                except Exception as e:
                    index, row = row_data
                    shop_name = str(row.iloc[0]).strip() if len(row) > 0 else "未知店铺"
                    url = str(row.iloc[1]).strip() if len(row) > 1 else "未知URL"
                    logger.error(f"任务执行异常 ({shop_name} - {url})：{str(e)}")
                    failed_count += 1
        
        end_time = time.time()
        elapsed_time = end_time - start_time
        
        logger.info(f"\n所有店铺网址爬取完成！")
        logger.info(f"总耗时: {elapsed_time:.2f}秒")
        logger.info(f"成功处理: {processed_count}条")
        logger.info(f"处理失败: {failed_count}条")
        
        # 计算成功率
        if processed_count + failed_count > 0:
            success_rate = processed_count / (processed_count + failed_count) * 100
            logger.info(f"成功率: {success_rate:.2f}%")
            
    except Exception as e:
        logger.error(f"读取Excel文件失败：{str(e)}")
        logger.error("请检查文件是否存在，以及是否安装了pandas和openpyxl库。")
        logger.error("安装命令：pip install pandas openpyxl requests")
        # 确保即使在异常情况下也能正确报告计数
        logger.info(f"成功处理: {processed_count}条")
        logger.info(f"处理失败: {failed_count}条")

if __name__ == "__main__":
    main()