import requests
import os
import time
import random
import re
import concurrent.futures
import threading
import pandas as pd
import logging
from urllib.parse import urlparse
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(threadName)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("yiwu_crawler.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 全局配置
CONFIG = {
    'max_retries': 3,
    'timeout': 15,
    'thread_count': 5,
    'request_delay': (1, 3),
    'max_proxy_retries': 2,
    'proxy_update_threshold': 10,
    'user_agents': [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0'
    ]
}

# 代理API配置
PROXY_API_URL = 'http://dddip.top/api/get?token=426e97524922b118745249567bd332&number=1000&type=https&format=1'
BACKUP_PROXY_API = 'http://另一个代理API地址'  # 备用代理API

# 全局变量
processed_count = 0
failed_count = 0
progress_lock = threading.Lock()

# 获取脚本所在目录
script_dir = os.path.dirname(os.path.abspath(__file__))
excel_file = os.path.join(script_dir, '店铺.xlsx')
output_dir = os.path.join(script_dir, 'url')
progress_file = os.path.join(script_dir, 'progress.txt')

class ProxyManager:
    """代理管理器类"""
    def __init__(self):
        self.proxies = []
        self.current_index = 0
        self.lock = threading.Lock()
        self.failed_proxies = set()
    
    def get_proxies_from_api(self, api_url):
        """从API获取代理IP列表"""
        try:
            logger.info(f"正在从API获取代理IP: {api_url}")
            response = requests.get(api_url, timeout=10)
            response.raise_for_status()
            
            # 解析API返回的代理IP列表
            proxies_text = response.text.strip()
            proxies_list = proxies_text.split('\r\n')
            
            # 去重处理
            original_count = len(proxies_list)
            proxies_list = list(set(proxies_list))  # 使用集合去重
            unique_count = len(proxies_list)
            
            if original_count > unique_count:
                logger.info(f"已去除{original_count - unique_count}个重复代理IP")
            
            # 格式化代理IP为requests可用的格式
            formatted_proxies = []
            for proxy in proxies_list:
                if proxy.strip():
                    formatted_proxies.append({
                        'http': f'http://{proxy.strip()}',
                        'https': f'http://{proxy.strip()}'  # HTTPS请求也使用HTTP代理
                    })
            
            logger.info(f"成功获取{len(formatted_proxies)}个代理IP")
            return formatted_proxies
        except Exception as e:
            logger.error(f"获取代理IP失败: {str(e)}")
            return []
    
    def test_proxy(self, proxy):
        """测试代理是否可用"""
        try:
            test_url = "https://www.yiwugo.com"
            response = requests.get(test_url, proxies=proxy, timeout=8)
            return response.status_code == 200
        except:
            return False
    
    def update_proxies(self):
        """更新代理池"""
        # 尝试主API
        new_proxies = self.get_proxies_from_api(PROXY_API_URL)
        
        # 如果主API失败，尝试备用API
        if not new_proxies:
            new_proxies = self.get_proxies_from_api(BACKUP_PROXY_API)
        
        # 测试代理有效性
        if new_proxies:
            valid_proxies = []
            for proxy in new_proxies:
                if self.test_proxy(proxy):
                    logger.info(f"代理 {proxy.get('http', '未知代理')} 测试通过")
                    valid_proxies.append(proxy)
                else:
                    logger.warning(f"代理 {proxy.get('http', '未知代理')} 测试失败")
            
            with self.lock:
                self.proxies = valid_proxies
                self.current_index = 0
                logger.info(f"代理池更新完成，有效代理数量: {len(self.proxies)}")
        else:
            logger.warning("无法获取任何代理，将使用直接连接")
    
    def get_next_proxy(self):
        """获取下一个可用代理"""
        with self.lock:
            if not self.proxies:
                return None
            
            # 轮询获取代理
            proxy = self.proxies[self.current_index]
            self.current_index = (self.current_index + 1) % len(self.proxies)
            
            # 跳过已知失败的代理
            proxy_key = tuple(sorted(proxy.items()))
            if proxy_key in self.failed_proxies:
                return self.get_next_proxy() if len(self.proxies) > 1 else None
            
            return proxy
    
    def mark_proxy_failed(self, proxy):
        """标记代理为失败"""
        if proxy:
            proxy_key = tuple(sorted(proxy.items()))
            with self.lock:
                self.failed_proxies.add(proxy_key)
                logger.debug(f"标记代理为失败: {proxy.get('http', '未知代理')}")

# 创建全局代理管理器
proxy_manager = ProxyManager()

def ensure_output_dir():
    """确保输出目录存在"""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

def load_progress():
    """加载进度文件，返回已处理的URL集合"""
    processed_urls = set()
    if os.path.exists(progress_file):
        try:
            with open(progress_file, 'r', encoding='utf-8') as f:
                for line in f:
                    processed_urls.add(line.strip())
        except Exception as e:
            logger.error(f"读取进度文件失败: {e}")
    return processed_urls

def save_progress(url):
    """保存进度到文件"""
    try:
        with open(progress_file, 'a', encoding='utf-8') as f:
            f.write(url + '\n')
    except Exception as e:
        logger.error(f"保存进度失败: {e}")

def create_session():
    """创建带有重试机制的Session"""
    session = requests.Session()
    
    # 配置重试策略
    retry_strategy = Retry(
        total=CONFIG['max_retries'],
        status_forcelist=[429, 500, 502, 503, 504],
        allowed_methods=["GET"],
        backoff_factor=1
    )
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session.mount("http://", adapter)
    session.mount("https://", adapter)
    
    return session

def robust_request(url, headers, session, max_retries=CONFIG['max_retries']):
    """带重试机制的请求函数"""
    for attempt in range(max_retries):
        proxy = proxy_manager.get_next_proxy()
        
        try:
            # 指数退避策略
            if attempt > 0:
                delay = 2 ** attempt + random.uniform(0, 1)
                logger.info(f"第{attempt+1}次重试，等待{delay:.2f}秒")
                time.sleep(delay)
            
            # 发送请求
            if proxy:
                logger.debug(f"尝试使用代理: {proxy.get('http', '未知代理')}")
                response = session.get(url, headers=headers, proxies=proxy, timeout=CONFIG['timeout'])
            else:
                logger.debug("无可用代理，使用直接连接")
                response = session.get(url, headers=headers, timeout=CONFIG['timeout'])
            
            # 检查响应状态
            if response.status_code == 200:
                return response
            else:
                logger.warning(f"请求返回非200状态码: {response.status_code}")
                
        except requests.exceptions.RequestException as e:
            error_type = type(e).__name__
            logger.warning(f"请求失败 ({error_type}): {str(e)}")
            
            # 标记失败代理
            if proxy:
                proxy_manager.mark_proxy_failed(proxy)
            
            # 如果是最后一次尝试，抛出异常
            if attempt == max_retries - 1:
                raise
    
    # 所有重试都失败
    raise Exception(f"所有{max_retries}次重试均失败")

def process_row(row_data, total_rows, processed_urls, session):
    """处理单行数据的核心函数"""
    global processed_count, failed_count
    
    index, row = row_data
    
    try:
        # 获取第一列作为文件名（店铺名）
        shop_name = str(row.iloc[0]).strip()
        # 获取第二列作为网址
        url = str(row.iloc[1]).strip()
        
        # 跳过无效数据
        if not shop_name or not url or url.lower() == 'nan':
            logger.warning(f"跳过行：无效的店铺名或网址")
            return False
        
        # 检查URL是否已处理
        if url in processed_urls:
            logger.info(f"URL已处理过，跳过: {url}")
            return False
        
        # 正序处理时，计算正确的进度显示
        current_index = index + 1  # index从0开始，加1后从1开始计数
        logger.info(f"正在处理第{current_index}/{total_rows}条：{shop_name} - {url}")
        
        # 清理文件名，移除非法字符
        valid_filename = re.sub(r'[^\w\-_\. ]', '_', shop_name)
        valid_filename = valid_filename.replace(' ', '_')
        
        # 添加前置0，确保文件名长度为5位数
        digits = ''.join(filter(str.isdigit, valid_filename))
        if digits:
            padded_digits = digits.zfill(5)
            valid_filename = valid_filename.replace(digits, padded_digits)
        
        # 计算完整文件路径
        file_path = os.path.join(output_dir, f"{valid_filename}.txt")
        
        # 检查文件是否已存在，如果存在则跳过
        if os.path.exists(file_path):
            logger.info(f"{current_index}:文件已存在，跳过读取：{file_path}")
            save_progress(url)
            return False
        
        # 确保URL格式正确
        url = re.sub(r'[^a-zA-Z0-9:/._-]', '', url)
        if not url.startswith(('http://', 'https://')):
            url = 'http://' + url
        
        # 随机选择User-Agent
        headers = {
            'User-Agent': random.choice(CONFIG['user_agents']),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Connection': 'keep-alive'
        }
        
        # 发送请求
        response = robust_request(url, headers, session)
        
        # 获取网页源码
        html_content = response.text
        
        # 检查网页内容是否包含"义乌购-登录"文本
        if "义乌购-登录" in html_content:
            logger.warning(f"{current_index}:检测到登录页面，可能被反爬虫")
            # 可以在这里添加处理登录页面的逻辑，如更换代理、等待等
            failed_count += 1
            return False
        
        # 保存源码到文件
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(html_content)
        
        # 更新进度
        save_progress(url)
        
        with progress_lock:
            logger.info(f"{current_index}:成功保存：{file_path}")
            processed_count += 1
        
        # 添加随机延迟，避免请求过于规律
        time.sleep(random.uniform(*CONFIG['request_delay']))
        
        return True
        
    except Exception as e:
        with progress_lock:
            logger.error(f"{current_index}:处理失败: {str(e)}")
            failed_count += 1
        return False

def main():
    """主函数"""
    # 确保输出目录存在
    ensure_output_dir()
    
    # 初始化代理池
    logger.info("初始化代理池...")
    proxy_manager.update_proxies()
    
    # 创建会话
    session = create_session()
    
    # 读取Excel文件内容
    try:
        # 使用pandas读取Excel文件
        df = pd.read_excel(excel_file)
        # 取前30条数据测试
        df = df.head(20)
        
        # 显示前几行数据，确认列的顺序
        logger.info("Excel文件内容预览:")
        logger.info(df.head())
        
        # 获取总行数
        total_rows = len(df)
        logger.info(f"共发现{total_rows}条记录")
        
        # 加载已处理的URL
        processed_urls = load_progress()
        logger.info(f"已加载{len(processed_urls)}条已处理记录")
        
        # 准备要处理的数据（正序）
        data_to_process = list(df.iterrows())
        
        # 配置线程池大小
        thread_count = CONFIG['thread_count']
        logger.info(f"使用{thread_count}个线程进行多线程爬取...")
        start_time = time.time()
        
        # 初始化计数器
        global processed_count, failed_count
        processed_count = 0
        failed_count = 0
        
        # 使用线程池并行处理
        with concurrent.futures.ThreadPoolExecutor(max_workers=thread_count) as executor:
            # 创建一个字典，存储future对象和对应的行索引
            future_to_row = {
                executor.submit(process_row, row_data, total_rows, processed_urls, session): row_data 
                for row_data in data_to_process
                if str(row_data[1].iloc[1]).strip() not in processed_urls  # 跳过已处理的URL
            }
            
            # 等待所有任务完成
            for future in concurrent.futures.as_completed(future_to_row):
                row_data = future_to_row[future]
                try:
                    future.result()
                except Exception as e:
                    index, row = row_data
                    shop_name = str(row.iloc[0]).strip() if len(row) > 0 else "未知店铺"
                    url = str(row.iloc[1]).strip() if len(row) > 1 else "未知URL"
                    logger.error(f"任务执行异常 ({shop_name} - {url})：{str(e)}")
                    failed_count += 1
        
        end_time = time.time()
        elapsed_time = end_time - start_time
        
        logger.info(f"\n所有店铺网址爬取完成！")
        logger.info(f"总耗时: {elapsed_time:.2f}秒")
        logger.info(f"成功处理: {processed_count}条")
        logger.info(f"处理失败: {failed_count}条")
        
        # 计算成功率
        if processed_count + failed_count > 0:
            success_rate = processed_count / (processed_count + failed_count) * 100
            logger.info(f"成功率: {success_rate:.2f}%")
            
    except Exception as e:
        logger.error(f"读取Excel文件失败：{str(e)}")
        logger.error("请检查文件是否存在，以及是否安装了pandas和openpyxl库。")
        logger.error("安装命令：pip install pandas openpyxl requests")
        # 确保即使在异常情况下也能正确报告计数
        logger.info(f"成功处理: {processed_count}条")
        logger.info(f"处理失败: {failed_count}条")

if __name__ == "__main__":
    main()