import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import unquote
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import urlparse, parse_qs, urlunparse, urlencode
from contextlib import contextmanager
import random

import dowoload_picture as download

keyword = '包臀裙'

# 线程本地存储浏览器实例
thread_local = threading.local()


def init_driver():
    """初始化Chrome浏览器驱动"""
    chrome_driver_path = "/usr/local/bin/chromedriver"
    options = Options()
    
    # 反检测选项
    options.add_argument("--disable-blink-features=AutomationControlled")
    options.add_argument("--disable-web-security")
    options.add_argument("--allow-running-insecure-content")
    options.add_argument("--disable-features=VizDisplayCompositor")
    
    # 稳定性选项
    options.add_argument("--headless")
    options.add_argument("--disable-gpu")
    options.add_argument("--no-sandbox")
    options.add_argument("--disable-dev-shm-usage")
    options.add_argument("--disable-software-rasterizer")
    
    # 伪装用户代理
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    ]
    options.add_argument(f"--user-agent={random.choice(user_agents)}")
    
    # 网络优化
    options.add_argument("--aggressive-cache-discard")
    options.add_argument("--memory-pressure-off")
    
    # 创建服务
    service = Service(executable_path=chrome_driver_path)
    # 移除Windows特定的创建标志
    # service.creation_flags = 0x08000000  # CREATE_NO_WINDOW flag for Windows
    
    driver = webdriver.Chrome(service=service, options=options)
    
    # 设置反检测脚本
    driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
        'source': '''
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined,
            });
        '''
    })
    
    return driver

def get_thread_driver():
    """获取线程本地的浏览器实例"""
    if not hasattr(thread_local, 'driver'):
        thread_local.driver = init_driver()
    return thread_local.driver

@contextmanager
def browser_context():
    """浏览器上下文管理器"""
    driver = get_thread_driver()
    try:
        yield driver
    except Exception as e:
        print(f"Browser error: {e}")
        # 重新创建driver实例
        if hasattr(thread_local, 'driver'):
            thread_local.driver.quit()
            delattr(thread_local, 'driver')
        raise

def wait_for_images_load(driver, timeout=3):
    """智能等待图片加载完成"""
    try:
        WebDriverWait(driver, timeout).until(
            lambda d: d.execute_script(
                "return document.readyState === 'complete'"
            )
        )
    except:
        # 如果等待超时，继续执行
        pass

def retry_on_failure(max_retries=2, delay=2):
    """重试装饰器 - 增加延迟时间"""
    def decorator(func):
        def wrapper(*args, **kwargs):
            for attempt in range(max_retries + 1):
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    if attempt < max_retries:
                        wait_time = delay + random.uniform(1, 3)  # 增加随机延迟
                        print(f"Attempt {attempt + 1} failed: {e}. Retrying in {wait_time:.1f} seconds...")
                        time.sleep(wait_time)
                    else:
                        print(f"All {max_retries + 1} attempts failed: {e}")
                        raise
            return None
        return wrapper
    return decorator
@retry_on_failure(max_retries=2, delay=2)
def get_news_info(page=0):
    """获取新闻信息的优化版本"""
    # 添加随机延迟避免请求过于频繁
    time.sleep(random.uniform(1, 3))
    
    with browser_context() as driver:
        search_url = ("https://so.toutiao.com/search?dvpf=pc&source=input&keyword=" + 
                     keyword + "&pd=synthesis&action_type=pagination&page_num=" + 
                     str(page) + "&from=search_tab&cur_tab_title=search_tab")
        print(f'Processing page {page}: {search_url}')
        
        try:
            driver.get(search_url)
            # 添加页面加载后的等待
            time.sleep(random.uniform(2, 4))
            
            # 等待搜索结果加载
            s_result_list_element = WebDriverWait(driver, 15).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, 'div.s-result-list'))
            )
            
            result_content_elements = s_result_list_element.find_elements(By.CLASS_NAME, 'result-content')
            print(f'Found {len(result_content_elements)} articles on page {page}')
            
            # 处理每篇文章
            for i, tout in enumerate(result_content_elements):
                try:
                    # 文章间添加随机延迟
                    time.sleep(random.uniform(0.5, 1.5))
                    process_article(driver, tout, page, i)
                except Exception as e:
                    print(f"Error processing article {i} on page {page}: {e}")
                    continue
                    
        except Exception as e:
            print(f"Error processing page {page}: {e}")

@retry_on_failure(max_retries=1, delay=0.5)
def process_article(driver, tout, page_num, article_idx):
    """处理单篇文章"""
    tag = WebDriverWait(tout, 5).until(
        EC.presence_of_element_located((By.TAG_NAME, 'a'))
    )
    
    if not tag:
        return
        
    href = tag.get_attribute('href')
    em = tag.find_element(By.TAG_NAME, 'em')
    
    if not (href.find('jump') != -1 and href.find('channel') != -1 and href.find('toutiao') != -1):
        return
        
    if em:
        print(f'Processing: {em.text}')
    
    url = paser_img_url(unquote(href))
    print(f'Article ID: {url}')
    
    # 使用新标签页处理文章
    driver.execute_script("window.open('');")
    driver.switch_to.window(driver.window_handles[-1])
    
    try:
        visit_url = f'https://www.toutiao.com/article/{url}/?channel=&source=search_tab'
        driver.get(visit_url)
        # 增加页面加载等待时间
        time.sleep(random.uniform(1, 2))
        
        # 等待文章内容加载
        wait = WebDriverWait(driver, 8)  # 增加等待时间
        article_content = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'article-content')))
        
        # 获取文章信息
        article_title = article_content.find_element(By.TAG_NAME, 'h1').text
        article_time = article_content.find_element(By.XPATH,
                                                  '/html/body/div[1]/div[2]/div[2]/div[1]/div/div/div/div/div[1]/span[1]').text
        article_name = article_content.find_element(By.XPATH,
                                                  '//*[@id="root"]/div[2]/div[2]/div[1]/div/div/div/div/div/span[3]').text
        
        # 获取图片
        pgc_img_divs = article_content.find_elements(By.TAG_NAME, 'img')
        
        # 优化的图片加载策略
        for div in pgc_img_divs:
            driver.execute_script("arguments[0].scrollIntoView({behavior: 'smooth', block: 'center'});", div)
            time.sleep(0.1)  # 大幅减少等待时间
        
        # 智能等待图片加载
        wait_for_images_load(driver, timeout=2)
        
        # 收集图片链接
        img_elements = article_content.find_elements(By.TAG_NAME, 'img')
        img_list = []
        for tmp in img_elements:
            src_ = tmp.get_attribute("src")
            if src_ and not src_.startswith('data:image'):
                img_list.append(src_)
        
        if img_list:
            folder_path = f"{article_title}_{article_time}"
            download.saved_images(folder_path, img_list)
            print(f'Downloaded {len(img_list)} images for: {article_title}')
        
    finally:
        # 确保关闭标签页
        driver.close()
        driver.switch_to.window(driver.window_handles[0])


def paser_img_url(url):
    # 提取跳转 URL
    global article_id
    jump_url = url.split('jump?url=')[1]

    # 提取 /a 之后并且 ?channel= 之前的数字部分
    start_marker = '/a'
    end_marker = '/?channel='

    start_index = jump_url.find(start_marker)
    if start_index != -1:
        start_index += len(start_marker)
        end_index = jump_url.find(end_marker, start_index)
        if end_index != -1:
            article_id = jump_url[start_index:end_index]
    else:
        print("Start marker '/a' not found in the URL")
    return article_id


def process_pages_concurrently(max_pages=3, max_workers=1):
    """并发处理多个页面 - 降低并发数避免被限制"""
    print(f"Starting processing of {max_pages} pages with {max_workers} workers")
    
    if max_workers == 1:
        # 串行处理，更稳定
        for page in range(1, max_pages + 1):
            try:
                print(f"Processing page {page}...")
                get_news_info(page)
                print(f"Successfully completed page {page}")
                # 页面间添加较长延迟
                time.sleep(random.uniform(3, 6))
            except Exception as e:
                print(f"Page {page} processing failed: {e}")
    else:
        # 并发处理
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有页面处理任务
            futures = {executor.submit(get_news_info, page): page for page in range(1, max_pages + 1)}
            
            # 收集结果
            for future in as_completed(futures):
                page = futures[future]
                try:
                    future.result()
                    print(f"Successfully completed page {page}")
                except Exception as e:
                    print(f"Page {page} processing failed: {e}")

def cleanup_thread_drivers():
    """清理线程本地的浏览器实例"""
    if hasattr(thread_local, 'driver'):
        try:
            thread_local.driver.quit()
            delattr(thread_local, 'driver')
        except:
            pass


if __name__ == '__main__':
    try:
        # 使用单线程处理，避免被限制
        process_pages_concurrently(max_pages=3, max_workers=1)
    finally:
        # 确保清理资源
        cleanup_thread_drivers()
        print("All processing completed.")
