"""
百度指数数据获取最佳实践
此脚本完成
1. 清洗关键词
2. 发更少请求获取更多的数据
3. 请求容错
4. 容错后并保留当前已经请求过的数据，并print已请求过的keywords
"""
from queue import Queue
from typing import Dict, List
import traceback
import time
from multiprocessing import Pool
import pandas as pd
from utils import get_search_index, get_clear_keywords_list
from qdata.baidu_index.common import split_keywords
from static.city import *
import os
from static.cookie_pool import cookies

# from qdata.baidu_index.config import PROVINCE_CODE, CITY_CODE
city_code_list = [CITY_CODE_1, CITY_CODE_2, CITY_CODE_3, CITY_CODE_4]

def load_keywords():
    """
    加载关键词，优先使用清洗过的关键词文件，否则执行清洗
    """
    cleared_keywords_path = "static/keywords_cleared.txt"
    original_keywords_path = "static/keywords.txt"
    
    if os.path.exists(cleared_keywords_path):
        print(f"发现清洗过的关键词文件: {cleared_keywords_path}")
        with open(cleared_keywords_path, 'r', encoding='utf-8') as f:
            content = f.read().strip()
        if content:
            keywords = [kw.strip() for kw in content.split(',') if kw.strip()]
            print(f"加载了 {len(keywords)} 个已清洗的关键词")
            return keywords
        else:
            print("清洗过的关键词文件为空，重新执行清洗")
    
    print("未找到清洗过的关键词文件，开始执行关键词清洗...")
    clear_keywords = get_clear_keywords_list(original_keywords_path, cleared_keywords_path)
    return clear_keywords


def main(pool_id: int):
    """
        1. 先清洗keywords数据，把没有收录的关键词拎出来
        2. 然后split_keywords关键词正常请求
        3. 数据存入excel
    """
    # 加载清洗后的关键词
    clear_keywords = load_keywords()
    if not clear_keywords:
        print("没有有效的关键词，程序退出")
        return
    
    # 将关键词转换为需要的格式 [[keyword1], [keyword2], ...]
    keywords_list = [[keyword] for keyword in clear_keywords]
    
    requested_keywords = []
    q = Queue(-1)
    print("开始请求百度指数")

    CITY_CODE = city_code_list[pool_id]
    years = [2023, 2024]
    # 确保结果目录存在
    os.makedirs("./res", exist_ok=True)
    
    # 检测之前是否已经爬过
    csv_path = f"./res/index_{pool_id}.csv"
    if os.path.exists(csv_path):
        print(f"发现已存在的数据文件: {csv_path}")
        datas = pd.read_csv(csv_path)
    else:
        print(f"创建新的数据文件: {csv_path}")
        datas = pd.DataFrame()
        datas["City"] = None
        datas["Year"] = None
        
        # 为每个关键词创建列
        for kw in keywords_list:
            keyword_name = kw[0]  # 每个子列表只有一个关键词
            datas[["(pc+wise)-"+keyword_name, "(pc)-"+keyword_name, "(wise)-"+keyword_name]] = [None, None, None]

        # 为每个城市和年份创建行
        for city in CITY_CODE.keys():
            for year in years:
                index_row = pd.Series([city, year], index=['City', 'Year'])
                datas.loc[len(datas)] = index_row
        
        datas.to_csv(csv_path, index=False)
        print(f"初始化数据文件完成，包含 {len(datas)} 行数据")

    for col, row in datas.iterrows():
        city, year = row['City'], row['Year']
        
        # 检查该行是否已完全完成
        if not row.isnull().any():
            print(f"跳过已完成的行: {year}==={city}")
            continue
        
        print(f"开始处理: {year}==={city}")
        
        # 检查每个关键词的完成状态，只处理未完成的关键词
        incomplete_keywords = []
        for kw in keywords_list:
            keyword_name = kw[0]
            # 检查该关键词的三个指标是否都已完成
            pc_wise_col = f"(pc+wise)-{keyword_name}"
            pc_col = f"(pc)-{keyword_name}"
            wise_col = f"(wise)-{keyword_name}"
            
            # 检查列是否存在，如果不存在则认为是未完成的
            try:
                if (pd.isna(row[pc_wise_col]) or pd.isna(row[pc_col]) or pd.isna(row[wise_col])):
                    incomplete_keywords.append(kw)
            except KeyError:
                # 如果列不存在，说明该关键词还未处理过
                print(f"列不存在，需要处理关键词: {keyword_name}")
                incomplete_keywords.append(kw)
        
        if not incomplete_keywords:
            print(f"该行所有关键词已完成: {year}==={city}")
            continue
        
        print(f"发现 {len(incomplete_keywords)} 个未完成的关键词: {[kw[0] for kw in incomplete_keywords]}")
        
        # 将未完成的关键词分批处理
        for splited_keywords_list in split_keywords(incomplete_keywords):
            q.put(splited_keywords_list)

        while not q.empty():
            cur_keywords_list = q.get()
            try:
                print(f"{year}==={city}===开始请求: {[kw[0] for kw in cur_keywords_list]}")
                # 尝试按照队列请求index
                for index in get_search_index(
                        keywords_list=cur_keywords_list,
                        start_date=str(year) + '-01-01',
                        end_date=str(year) + '-12-31',
                        cookies=cookies[pool_id],
                        area=CITY_CODE[city],
                ):
                    index["keyword"] = ",".join(index["keyword"])
                    # 实时更新单元格数据
                    try:
                        datas.at[col, "(pc+wise)-"+index["keyword"]] = index['all']
                        datas.at[col, "(pc)-"+index["keyword"]] = index['pc']
                        datas.at[col, "(wise)-"+index["keyword"]] = index['wise']
                    except KeyError:
                        # 如果列不存在，先创建列
                        print(f"创建缺失的列: {index['keyword']}")
                        datas[f"(pc+wise)-{index['keyword']}"] = None
                        datas[f"(pc)-{index['keyword']}"] = None
                        datas[f"(wise)-{index['keyword']}"] = None
                        # 然后更新数据
                        datas.at[col, "(pc+wise)-"+index["keyword"]] = index['all']
                        datas.at[col, "(pc)-"+index["keyword"]] = index['pc']
                        datas.at[col, "(wise)-"+index["keyword"]] = index['wise']
                    
                    # 每完成一个关键词就保存一次（单元格级别断点续传）
                    datas.to_csv(csv_path, index=False)
                    print(f"已保存关键词数据: {index['keyword']}")

                requested_keywords.extend([kw[0] for kw in cur_keywords_list])
                print(f"请求完成: {[kw[0] for kw in cur_keywords_list]}")
                time.sleep(15)
            except Exception as e:
                print(f"请求出错: {e}")
                print(f"已请求的关键词: {requested_keywords}")
                traceback.print_exc()
                # 保存当前进度
                datas.to_csv(csv_path, index=False)
                # 将失败的请求重新放入队列
                q.put(cur_keywords_list)
                time.sleep(120)
        
        print(f'{year}==={city}===该行爬取完成')
        # 最终保存结果
        datas.to_csv(csv_path, index=False)


if __name__ == "__main__":
    # 根据城市代码列表的数量来确定进程数
    for i in range(len(city_code_list)):
        main(i)