import time
import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests

def setup_stealth_driver():
    """设置隐身Chrome驱动"""
    chrome_options = Options()
    
    # 隐身模式设置
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--disable-extensions')
    chrome_options.add_argument('--disable-plugins')
    chrome_options.add_argument('--disable-images')
    
    # 随机用户代理
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
    ]
    chrome_options.add_argument(f'--user-agent={random.choice(user_agents)}')
    
    # 窗口大小
    chrome_options.add_argument('--window-size=1920,1080')
    
    try:
        driver = webdriver.Chrome(options=chrome_options)
        # 执行隐藏自动化特征的脚本
        driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        return driver
    except Exception as e:
        print(f"Chrome驱动初始化失败: {e}")
        return None

def human_like_behavior(driver):
    """模拟人类行为"""
    # 随机滚动
    scroll_scripts = [
        "window.scrollTo(0, document.body.scrollHeight * 0.3);",
        "window.scrollTo(0, document.body.scrollHeight * 0.7);",
        "window.scrollTo(0, document.body.scrollHeight);",
    ]
    for script in random.sample(scroll_scripts, random.randint(1, 2)):
        driver.execute_script(script)
        time.sleep(random.uniform(1, 3))
    
    # 随机鼠标移动（模拟）
    time.sleep(random.uniform(2, 5))

def selenium_douban_crawl(url):
    """使用Selenium进行豆瓣爬取"""
    print("🚀 启动Selenium真实浏览器方案...")
    
    driver = setup_stealth_driver()
    if not driver:
        print("❌ 浏览器启动失败")
        return None
    
    try:
        # 步骤1: 先访问一个非豆瓣页面
        print("步骤1: 访问跳转页面...")
        referer_sites = [
            'https://www.baidu.com',
            'https://www.google.com',
            'https://www.bing.com'
        ]
        driver.get(random.choice(referer_sites))
        time.sleep(random.uniform(3, 7))
        
        # 步骤2: 访问豆瓣首页
        print("步骤2: 访问豆瓣首页...")
        driver.get('https://www.douban.com')
        time.sleep(random.uniform(5, 10))
        
        # 模拟人类行为
        human_like_behavior(driver)
        
        # 检查是否被拦截
        page_source = driver.page_source
        if "验证码" in page_source or "禁止访问" in page_source:
            print("❌ 被豆瓣拦截，需要验证码")
            return None
        
        # 步骤3: 访问目标页面
        print("步骤3: 访问目标页面...")
        driver.get(url)
        time.sleep(random.uniform(8, 15))
        
        # 再次模拟人类行为
        human_like_behavior(driver)
        
        # 获取最终页面内容
        page_source = driver.page_source
        
        if "豆瓣电影" in page_source or "movie.douban.com" in driver.current_url:
            print("✅ Selenium方案成功!")
            return page_source
        else:
            print("❌ 可能被重定向到验证页面")
            return None
            
    except Exception as e:
        print(f"Selenium请求失败: {e}")
        return None
    finally:
        driver.quit()

def use_web_scraping_api():
    """使用第三方网页抓取API"""
    print("🌐 尝试第三方API方案...")
    
    # 这里列出一些可用的第三方服务（需要注册获取API Key）
    apis = {
        'ScrapingBee': '需要注册获取API Key',
        'ScraperAPI': '需要注册获取API Key', 
        'ZenRows': '需要注册获取API Key',
        'Crawlera': '需要注册获取API Key'
    }
    
    print("可用的第三方API服务:")
    for name, desc in apis.items():
        print(f"  - {name}: {desc}")
    
    print("\n推荐使用 ScrapingBee，有免费额度")
    return None

def mobile_api_approach():
    """模拟移动端API请求"""
    print("📱 尝试移动端API方案...")
    
    # 豆瓣移动端API端点（可能已变更）
    mobile_apis = [
        'https://frodo.douban.com/api/v2/movie/1292052',  # 肖申克的救赎
        'https://frodo.douban.com/api/v2/movie/30166972', # 热门电影
        'https://frodo.douban.com/api/v2/tv/30181478',    # 热门剧集
    ]
    
    headers = {
        'User-Agent': 'api-client/1 com.douban.frodo/7.30.0(230) Android/25 product/santoni vendor/Xiaomi model/Redmi 4X  rom/miui6  network/wifi  platform/mobile nd/1',
        'Authorization': 'Bearer YOUR_ACCESS_TOKEN',  # 需要真实token
        'X-API-Key': '0dad551ec0d84ed02907ff5c42e8ec70',  # 豆瓣公开API Key
    }
    
    for api_url in mobile_apis:
        try:
            response = requests.get(api_url, headers=headers, timeout=10)
            if response.status_code == 200:
                print(f"✅ 移动端API成功: {api_url}")
                return response.json()
        except Exception as e:
            print(f"❌ API请求失败 {api_url}: {e}")
    
    return None

def emergency_solutions():
    """紧急解决方案"""
    print("🆘 启动紧急解决方案...")
    print("=" * 50)
    
    print("由于你的IP已被豆瓣彻底封禁，建议:")
    print("\n🔥 立即解决方案:")
    print("1. 使用手机热点更换IP")
    print("2. 使用VPN或代理服务器")
    print("3. 前往网吧或使用公共WiFi")
    print("4. 重启路由器获取新IP")
    
    print("\n💼 长期解决方案:")
    print("1. 使用Selenium + 真实浏览器（见上面代码）")
    print("2. 注册第三方爬虫API服务")
    print("3. 使用云服务器（AWS、GCP、Azure等）")
    print("4. 搭建分布式爬虫系统")
    
    print("\n🛠️ 技术解决方案:")
    print("1. IP代理池轮换")
    print("2. 浏览器指纹伪装")
    print("3. 请求频率控制")
    print("4. 验证码识别服务")
    
    return None

def test_network_environment():
    """测试网络环境"""
    print("🔍 网络环境测试...")
    
    test_urls = {
        "百度": "https://www.baidu.com",
        "谷歌": "https://www.google.com", 
        "豆瓣首页": "https://www.douban.com",
        "豆瓣电影": "https://movie.douban.com"
    }
    
    for name, url in test_urls.items():
        try:
            response = requests.get(url, timeout=10, verify=False)
            print(f"  {name}: ✅ 状态码 {response.status_code}")
        except Exception as e:
            print(f"  {name}: ❌ {e}")

if __name__ == "__main__":
    print("🆘 豆瓣爬虫终极解决方案")
    print("=" * 60)
    
    # 测试当前网络环境
    test_network_environment()
    
    print("\n" + "=" * 60)
    print("选择解决方案:")
    print("1. Selenium真实浏览器（推荐）")
    print("2. 第三方API服务")
    print("3. 移动端API")
    print("4. 紧急建议")
    
    #choice = input("请输入选择 (1-4): ").strip()
    choice = 1
    if choice == "1":
        result = selenium_douban_crawl("https://movie.douban.com/")
        if result:
            print(f"✅ 成功获取 {len(result)} 字符数据")
        else:
            print("❌ Selenium方案失败")
            
    elif choice == "2":
        use_web_scraping_api()
        
    elif choice == "3":
        result = mobile_api_approach()
        if result:
            print(f"✅ API返回数据: {result}")
        else:
            print("❌ 移动端API失败")
            
    elif choice == "4":
        emergency_solutions()
    else:
        print("无效选择")
    
    print("\n" + "=" * 60)
    print("💡 重要提醒:")
    print("- 你的IP已被豆瓣标记，短期内无法直接访问")
    print("- 建议更换网络环境或使用上述技术方案")
    print("- 尊重网站规则，合理控制爬取频率")
    print("=" * 60)
