"""
百度指数数据获取 - 多线程版本
此脚本完成：
1. 清洗关键词
2. 多线程并发请求，提高爬取效率
3. 线程安全的文件操作和断点续传
4. 智能cookie分配和错误重试
"""
import threading
import time
import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue
from typing import Dict, List, Tuple
import pandas as pd
import os
from threading import Lock

from utils import get_search_index, get_clear_keywords_list
from qdata.baidu_index.common import split_keywords
from static.city import *
from static.cookie_pool import cookies
from threading_config import THREAD_CONFIG, FILE_CONFIG, CRAWL_CONFIG, DEBUG_CONFIG


# 加载配置
THREAD_COUNT = THREAD_CONFIG["thread_count"]
REQUEST_DELAY = THREAD_CONFIG["request_delay"]
ERROR_DELAY = THREAD_CONFIG["error_delay"]
MAX_RETRIES = THREAD_CONFIG["max_retries"]
RETRY_DELAY = THREAD_CONFIG["retry_delay"]

# 线程安全锁
file_lock = Lock()
console_lock = Lock()

# 城市代码列表
city_code_list = [CITY_CODE_1, CITY_CODE_2, CITY_CODE_3, CITY_CODE_4]


def thread_safe_print(*args, **kwargs):
    """线程安全的打印函数"""
    with console_lock:
        print(f"[线程{threading.current_thread().name}]", *args, **kwargs)


def load_keywords():
    """
    加载关键词，优先使用清洗过的关键词文件，否则执行清洗
    """
    cleared_keywords_path = FILE_CONFIG["cleared_keywords_file"]
    original_keywords_path = FILE_CONFIG["keywords_file"]
    
    if os.path.exists(cleared_keywords_path):
        print(f"发现清洗过的关键词文件: {cleared_keywords_path}")
        with open(cleared_keywords_path, 'r', encoding='utf-8') as f:
            content = f.read().strip()
        if content:
            keywords = [kw.strip() for kw in content.split(',') if kw.strip()]
            print(f"加载了 {len(keywords)} 个已清洗的关键词")
            return keywords
        else:
            print("清洗过的关键词文件为空，重新执行清洗")
    
    print("未找到清洗过的关键词文件，开始执行关键词清洗...")
    clear_keywords = get_clear_keywords_list(original_keywords_path, cleared_keywords_path)
    return clear_keywords


def safe_save_dataframe(df: pd.DataFrame, file_path: str):
    """线程安全的DataFrame保存"""
    with file_lock:
        df.to_csv(file_path, index=False)


def get_incomplete_tasks(df: pd.DataFrame, keywords_list: List[List[str]]) -> List[Tuple[int, str, int, List[List[str]]]]:
    """
    获取所有未完成的任务
    返回: [(行索引, 城市, 年份, 未完成关键词列表), ...]
    """
    tasks = []
    
    for col, row in df.iterrows():
        city, year = row['City'], row['Year']
        
        # 检查该行是否已完全完成
        if not row.isnull().any():
            continue
        
        # 检查每个关键词的完成状态
        incomplete_keywords = []
        for kw in keywords_list:
            keyword_name = kw[0]
            pc_wise_col = f"(pc+wise)-{keyword_name}"
            pc_col = f"(pc)-{keyword_name}"
            wise_col = f"(wise)-{keyword_name}"
            
            if (pd.isna(row[pc_wise_col]) or pd.isna(row[pc_col]) or pd.isna(row[wise_col])):
                incomplete_keywords.append(kw)
        
        if incomplete_keywords:
            tasks.append((col, city, year, incomplete_keywords))
    
    return tasks


def process_keyword_batch(task_info: Tuple[int, str, int, List[List[str]], str, int, str]) -> bool:
    """
    处理一批关键词的爬取任务
    
    Args:
        task_info: (行索引, 城市, 年份, 关键词批次, cookie, 城市代码, csv_路径)
    
    Returns:
        是否成功完成
    """
    row_idx, city, year, keyword_batch, cookie, area_code, csv_path = task_info
    thread_name = threading.current_thread().name
    
    try:
        thread_safe_print(f"开始处理: {year}==={city}===关键词: {[kw[0] for kw in keyword_batch]}")
        
        # 请求百度指数数据
        for index in get_search_index(
            keywords_list=keyword_batch,
            start_date=str(year) + '-01-01',
            end_date=str(year) + '-12-31',
            cookies=cookie,
            area=area_code,
        ):
            index["keyword"] = ",".join(index["keyword"])
            
            # 线程安全地更新数据
            with file_lock:
                # 重新读取最新的DataFrame
                df = pd.read_csv(csv_path)
                
                # 更新数据
                df.at[row_idx, "(pc+wise)-"+index["keyword"]] = index['all']
                df.at[row_idx, "(pc)-"+index["keyword"]] = index['pc']
                df.at[row_idx, "(wise)-"+index["keyword"]] = index['wise']
                
                # 保存文件
                df.to_csv(csv_path, index=False)
                
            thread_safe_print(f"已保存关键词数据: {index['keyword']}")
        
        thread_safe_print(f"完成处理: {year}==={city}===关键词: {[kw[0] for kw in keyword_batch]}")
        time.sleep(REQUEST_DELAY)
        return True
        
    except Exception as e:
        thread_safe_print(f"处理失败: {year}==={city}===错误: {e}")
        traceback.print_exc()
        time.sleep(ERROR_DELAY)
        return False


def create_task_queue(pool_id: int) -> Tuple[Queue, str, pd.DataFrame]:
    """
    创建任务队列并初始化数据文件
    
    Returns:
        (任务队列, csv文件路径, DataFrame)
    """
    # 加载清洗后的关键词
    clear_keywords = load_keywords()
    if not clear_keywords:
        raise ValueError("没有有效的关键词")
    
    keywords_list = [[keyword] for keyword in clear_keywords]
    
    # 确保结果目录存在
    result_dir = FILE_CONFIG["result_dir"]
    os.makedirs(result_dir, exist_ok=True)
    
    # 初始化数据文件
    file_suffix = FILE_CONFIG["file_suffix"]
    csv_path = f"{result_dir}/index_{pool_id}{file_suffix}.csv"
    CITY_CODE = city_code_list[pool_id]
    years = CRAWL_CONFIG["years"]
    
    if os.path.exists(csv_path):
        print(f"发现已存在的数据文件: {csv_path}")
        datas = pd.read_csv(csv_path)
    else:
        print(f"创建新的数据文件: {csv_path}")
        datas = pd.DataFrame()
        datas["City"] = None
        datas["Year"] = None
        
        # 为每个关键词创建列
        for kw in keywords_list:
            keyword_name = kw[0]
            datas[["(pc+wise)-"+keyword_name, "(pc)-"+keyword_name, "(wise)-"+keyword_name]] = [None, None, None]

        # 为每个城市和年份创建行
        for city in CITY_CODE.keys():
            for year in years:
                index_row = pd.Series([city, year], index=['City', 'Year'])
                datas.loc[len(datas)] = index_row
        
        datas.to_csv(csv_path, index=False)
        print(f"初始化数据文件完成，包含 {len(datas)} 行数据")
    
    # 获取所有未完成的任务
    incomplete_tasks = get_incomplete_tasks(datas, keywords_list)
    print(f"发现 {len(incomplete_tasks)} 个未完成的城市-年份组合")
    
    # 创建任务队列
    task_queue = Queue()
    total_batches = 0
    
    for row_idx, city, year, incomplete_keywords in incomplete_tasks:
        # 将关键词分批
        for keyword_batch in split_keywords(incomplete_keywords):
            # 为每个批次分配cookie
            cookie_idx = total_batches % len(cookies)
            cookie = cookies[cookie_idx]
            area_code = CITY_CODE[city]
            
            task_info = (row_idx, city, year, keyword_batch, cookie, area_code, csv_path)
            task_queue.put(task_info)
            total_batches += 1
    
    print(f"创建了 {total_batches} 个批次任务，将使用 {min(THREAD_COUNT, total_batches)} 个线程")
    return task_queue, csv_path, datas


def multithreaded_spider(pool_id: int):
    """
    多线程爬虫主函数
    """
    print(f"=== 开始多线程爬虫 (Pool ID: {pool_id}) ===")
    
    try:
        # 创建任务队列
        task_queue, csv_path, initial_df = create_task_queue(pool_id)
        
        if task_queue.empty():
            print("所有任务已完成，无需爬取")
            return
        
        successful_tasks = 0
        failed_tasks = []
        
        # 使用线程池执行任务
        with ThreadPoolExecutor(max_workers=THREAD_COUNT, thread_name_prefix=f"Spider-{pool_id}") as executor:
            # 提交所有任务
            future_to_task = {}
            while not task_queue.empty():
                task_info = task_queue.get()
                future = executor.submit(process_keyword_batch, task_info)
                future_to_task[future] = task_info
            
            # 处理完成的任务
            for future in as_completed(future_to_task):
                task_info = future_to_task[future]
                row_idx, city, year, keyword_batch, _, _, _ = task_info
                
                try:
                    success = future.result()
                    if success:
                        successful_tasks += 1
                        thread_safe_print(f"✅ 任务完成: {year}==={city}===关键词批次大小: {len(keyword_batch)}")
                    else:
                        failed_tasks.append(task_info)
                        thread_safe_print(f"❌ 任务失败: {year}==={city}===关键词批次大小: {len(keyword_batch)}")
                except Exception as e:
                    failed_tasks.append(task_info)
                    thread_safe_print(f"❌ 任务异常: {year}==={city}===错误: {e}")
        
        # 重试失败的任务
        if failed_tasks:
            print(f"\n=== 重试 {len(failed_tasks)} 个失败的任务 ===")
            retry_count = 0
            
            while failed_tasks and retry_count < MAX_RETRIES:
                retry_count += 1
                print(f"第 {retry_count} 次重试，剩余任务: {len(failed_tasks)}")
                
                retry_failed = []
                for task_info in failed_tasks:
                    success = process_keyword_batch(task_info)
                    if success:
                        successful_tasks += 1
                    else:
                        retry_failed.append(task_info)
                
                failed_tasks = retry_failed
                time.sleep(RETRY_DELAY)  # 重试间隔
        
        print(f"\n=== 爬取完成 (Pool ID: {pool_id}) ===")
        print(f"成功任务: {successful_tasks}")
        print(f"失败任务: {len(failed_tasks)}")
        print(f"数据文件: {csv_path}")
        
        if failed_tasks:
            print("失败的任务:")
            for task_info in failed_tasks:
                row_idx, city, year, keyword_batch, _, _, _ = task_info
                print(f"  - {year}==={city}===关键词数: {len(keyword_batch)}")
    
    except Exception as e:
        print(f"多线程爬虫出错: {e}")
        traceback.print_exc()


if __name__ == "__main__":
    # 显示配置信息
    print("🕷️  多线程百度指数爬虫")
    print(f"线程数: {THREAD_COUNT}")
    print(f"Cookie数量: {len(cookies)}")
    print(f"城市代码池数量: {len(city_code_list)}")
    print(f"爬取年份: {CRAWL_CONFIG['years']}")
    print(f"请求间隔: {REQUEST_DELAY}秒")
    print(f"最大重试次数: {MAX_RETRIES}")
    
    # 确定要运行的池范围
    start_pool = CRAWL_CONFIG["start_pool"]
    end_pool = CRAWL_CONFIG["end_pool"] or len(city_code_list)
    
    print(f"将处理池 {start_pool} 到 {end_pool-1}")
    
    # 运行指定范围的池
    for i in range(start_pool, min(end_pool, len(city_code_list))):
        print(f"\n{'='*50}")
        print(f"开始处理池 {i}")
        print(f"{'='*50}")
        multithreaded_spider(i)
        
    print("\n🎉 所有任务完成!")
    
    if DEBUG_CONFIG["show_statistics"]:
        print("\n📊 统计信息:")
        for i in range(start_pool, min(end_pool, len(city_code_list))):
            csv_path = f"{FILE_CONFIG['result_dir']}/index_{i}{FILE_CONFIG['file_suffix']}.csv"
            if os.path.exists(csv_path):
                df = pd.read_csv(csv_path)
                total_cells = len(df) * (len(df.columns) - 2)  # 减去City和Year列
                completed_cells = total_cells - df.isnull().sum().sum()
                completion_rate = (completed_cells / total_cells) * 100 if total_cells > 0 else 0
                print(f"  池 {i}: {completed_cells}/{total_cells} 单元格完成 ({completion_rate:.1f}%)")
