import pandas as pd
import random
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import threading
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
from selenium.webdriver.chrome.service import Service
import shutil
import requests

# 用户代理列表
USER_AGENTS = [
    # 浏览器 UA
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    # 爬虫 UA
    'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
    'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
]

def create_driver(proxy=None):
    """创建Chrome驱动"""
    chrome_driver_path = r'C:\Program Files\Google\Chrome\Application\chromedriver.exe'
    service = Service(executable_path=chrome_driver_path)
    
    chrome_options = Options()
    #chrome_options.add_argument('--headless')  # 无头模式
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--disable-software-rasterizer')
    chrome_options.add_argument('--blink-settings=imagesEnabled=false')
    chrome_options.add_argument('--disable-extensions')
    chrome_options.add_argument('--silent')
    
    # 随机UA
    chrome_options.add_argument(f'user-agent={random.choice(USER_AGENTS)}')
    
    
    # 禁用各种功能
    prefs = {
        'profile.default_content_setting_values': {
            'images': 2,
            'javascript': 2,
            'css': 2,
            'plugins': 2,
        },
        'profile.managed_default_content_settings': {
            'javascript': 2
        },
        'profile.managed_default_content_settings.images': 2,
        'profile.managed_default_content_settings.stylesheets': 2,
    }
    chrome_options.add_experimental_option('prefs', prefs)
    
    if proxy:
        chrome_options.add_argument(f'--proxy-server={proxy}')
    
    driver = webdriver.Chrome(service=service, options=chrome_options)
    
    # 执行 CDP 命令来反检测
    driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
        'source': '''
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            });
            Object.defineProperty(navigator, 'plugins', {
                get: () => [1, 2, 3, 4, 5]
            });
            window.chrome = {
                runtime: {}
            };
        '''
    })
    
    # 设置超时和禁用跳转
    driver.set_page_load_timeout(10)
    
    return driver

def get_shop_name(driver, url):
    """获取店铺名称"""
    try:
        # 记录初始URL
        original_url = url
        
        driver.get(url)
        
        # 检查是否发生跳转
        current_url = driver.current_url
        if current_url != original_url:
            print(f"检测到跳转: {original_url} -> {current_url}")
            driver.detected_redirect = True  # 标记检测到跳转
            return "无数据"  # 触发IP更换
        
        # 等待店铺名称元素加载
        shop_div = WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "div.name a"))
        )
        shop_name = shop_div.get_attribute('title')
        driver.detected_redirect = False  # 重置跳转标记
        return shop_name if shop_name else "无数据"
    except Exception as e:
        print(f"Error processing {url}: {str(e)}")
        return "无数据"

def backup_csv():
    """备份CSV文件"""
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    backup_file = f'tbmb_backup_{timestamp}.csv'
    try:
        shutil.copy2('tbmb.csv', backup_file)
        print(f"已创建备份文件: {backup_file}")
    except Exception as e:
        print(f"备份文件创建失败: {str(e)}")

def get_proxy_ip():
    """从API获取新的代理IP"""
    try:
        response = requests.get('https://xxxxxxx', timeout=10)
        if response.status_code == 200:
            proxy = response.text.strip()
            print(f"获取到新代理IP: {proxy}")
            return proxy
        else:
            print(f"获取代理IP失败: {response.status_code}")
            return None
    except Exception as e:
        print(f"获取代理IP出错: {str(e)}")
        return None

def worker(task_queue, proxy=None):
    """工作线程函数"""
    driver = create_driver(proxy)
    last_backup_time = time.time()
    
    try:
        while True:
            try:
                index, url, df = task_queue.get_nowait()
            except Queue.Empty:
                break
                
            print(f"\n正在处理第 {index + 1} 行...")  # 添加行号显示
            shop_name = get_shop_name(driver, url)
            
            # 如果需要更换IP（检测到跳转或无数据）
            if shop_name == "无数据" or getattr(driver, 'detected_redirect', False):
                print(f"第 {index + 1} 行首次尝试失败，使用当前IP重试: {url}")
                # 先用当前IP重试一次
                time.sleep(random.uniform(1, 2))
                shop_name = get_shop_name(driver, url)
                
                # 如果重试后仍然失败，则更换IP
                if shop_name == "无数据" or getattr(driver, 'detected_redirect', False):
                    driver.quit()
                    
                    # 获取新代理
                    new_proxy = get_proxy_ip()
                    if new_proxy:
                        print(f"第 {index + 1} 行重试失败，正在切换到新代理: {new_proxy}")
                        driver = create_driver(new_proxy)
                        # 使用新IP重试
                        shop_name = get_shop_name(driver, url)
                        if shop_name == "无数据" or getattr(driver, 'detected_redirect', False):
                            print(f"第 {index + 1} 行使用新代理仍然失败，跳过此行")
                            continue  # 跳过写入，继续下一行
                    else:
                        print(f"第 {index + 1} 行处理失败：无法获取新代理IP，跳过此行")
                        continue  # 跳过写入，继续下一行
            
            # 只有成功获取到店铺名称才更新CSV
            if shop_name != "无数据" and not getattr(driver, 'detected_redirect', False):
                df.at[index, '店铺名称'] = shop_name
                df.to_csv('tbmb.csv', index=False, encoding='gbk')
                print(f"第 {index + 1} 行已获取并保存 {url} 的店铺名称为: {shop_name}")
            
            # 每60秒创建一次备份
            current_time = time.time()
            if current_time - last_backup_time >= 60:
                backup_csv()
                last_backup_time = current_time
            
            time.sleep(random.uniform(0.5, 1.5))
            task_queue.task_done()
    finally:
        driver.quit()

def main(proxy=None, max_workers=1):
    """主函数"""
    try:
        df = pd.read_csv('tbmb.csv', encoding='gbk')
    except Exception as e:
        print(f"无法读取CSV文件: {str(e)}")
        return

    # 创建任务队列
    task_queue = Queue()

    # 添加任务到队列
    for index, row in df.iterrows():
        if pd.isna(row.get('店铺名称', '')):
            url = row.get('链接', '')
            if url and isinstance(url, str):
                task_queue.put((index, url, df))

    # 创建并启动工作线程
    threads = []
    for _ in range(max_workers):
        thread = threading.Thread(
            target=worker,
            args=(task_queue, proxy)
        )
        thread.start()
        threads.append(thread)

    # 等待所有任务完成
    for thread in threads:
        thread.join()

if __name__ == "__main__":
    # 不使用代理，3个线程
    main(proxy="xxxxxxxxxx:xxxx",max_workers=1)
    # 使用代理
    # main(proxy="http://your-proxy-address:port", max_workers=3)
