import yaml
from crawler.factory import CrawlerFactory
from processor.pipeline import DataPipeline
from storage.factory import StorageFactory
from web.app import create_app
import os, sys
from pathlib import Path
import pandas as pd
import time
import random
from tqdm import tqdm  # 导入 tqdm 库
from utils.pd_show_utils import df_table

project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)

CONFIG_PATH = Path(project_root) / 'config' / 'sources.yaml'

temp_file_path =  Path(project_root) / 'csv' / '可转债信息列表.csv'
pd_bond_aive_list = []
if os.path.exists(temp_file_path):
    pd_bond_aive_list = pd.read_csv(temp_file_path, dtype={'转债代码': str, '正股代码': str})[['转债代码', '正股代码','交易市场','发行日期']]
    pd_bond_aive_list = pd_bond_aive_list[pd_bond_aive_list['交易市场'] != 'sb']
    pd_bond_aive_list = pd_bond_aive_list[pd_bond_aive_list['发行日期'].notna()]
    pd_bond_aive_list = pd_bond_aive_list

# 定义要爬取的爬虫名称列表
selected_crawlers_part1 = ["可转债信息列表"]
selected_crawlers_part2 = ["可转债整体行情", "可转债强赎信息列表","可转债下修信息列表",
                            "可转债已退市信息列表","待发可转债信息列表","可转债持有人信息列表"]

selected_crawlers_part3 = ["jsl_bond_concept","jsl_bond_his",
                            "qs_bond_his","qs_stock_his"]
                        
selected_crawlers_part4= ["qs_stock_zcfz","qs_stock_xjll","qs_stock_lr"]
selected_crawlers = ["jsl_bond_his"]

def main():
    # 加载配置
    with open(CONFIG_PATH, encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 初始化组件
    crawlers = CrawlerFactory.create_from_config(config)

    progress = {'crawler_index': 0, 'item_index': 0}
    # 执行爬取流程

    # 封装一个批量处理方法用于内部调用
    def batch_process_data(index, crawler, pd_bond_aive_list, progress, data_process_type):
        # 使用 tqdm 包装迭代器以显示进度条
        for j, item in tqdm(pd_bond_aive_list.iterrows(), total=len(pd_bond_aive_list), desc=f"{crawler.name} 批量处理进度"):
            if index < progress['crawler_index'] and j < progress['item_index']:
                continue  # 跳过已经完成的任务
            item_name, raw_data = crawler.fetch_one(item)

            storage = StorageFactory.create('csv', item_name + '_' + crawler.name)
            time.sleep(0.1)
            parsed_data = crawler.parse(raw_data)

            if data_process_type == "append":
                src_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
                crawler.append(src_path, item_name, parsed_data)
            elif data_process_type == "save":
                if not os.path.exists(os.path.join('csv',crawler.name)):
                    os.mkdir(os.path.join('csv',crawler.name))
                storage.filename = os.path.join('csv',crawler.name, f'{item_name}.csv')
                storage.save(parsed_data)
            else:
                storage.save(parsed_data)

    # 批量爬虫任务部分也需要类似修改
    print("开始爬虫任务")
    # 使用 tqdm 包装爬虫迭代器以显示整体进度条
    for i, crawler in tqdm(enumerate(crawlers), total=len(crawlers), desc="整体爬虫进度"):
        if crawler.name not in selected_crawlers:
            continue  # 跳过不在列表中的爬虫
        if crawler.data_process_type == "single_save":
            print(crawler.name + "任务开始")
            if i < progress['crawler_index']:
                continue  # 跳过已经完成的爬虫
            storage = StorageFactory.create('csv', crawler.name)
            raw_data = crawler.fetch()
            time.sleep(0.5)
            parsed_data = crawler.parse(raw_data)
            storage.save(parsed_data)
        if crawler.data_process_type == "merge":
            print(crawler.name + "任务开始")
            batch_process_data(i, crawler, pd_bond_aive_list, progress, "merge")
            all_parsed_data = []
            storage = StorageFactory.create('csv', crawler.name)
            for j, item in pd_bond_aive_list.iterrows():
                target_file = os.path.join('csv', item['转债代码'] + '_' + crawler.name+'.csv')
                parsed_data = pd.read_csv(target_file)
                os.remove(target_file)
                all_parsed_data.append(parsed_data)
            combined_df = pd.concat(all_parsed_data, ignore_index=True)
            storage.save(combined_df)
        elif crawler.data_process_type == "save":
            print(crawler.name + "任务开始")
            batch_process_data(i, crawler, pd_bond_aive_list, progress, "save")
        elif crawler.data_process_type == "append":
            print(crawler.name + "任务开始")
            batch_process_data(i, crawler, pd_bond_aive_list, progress, "append")

    
    # 启动Web服务
    # app = create_app(storage)
    # app.run(host='0.0.0.0', port=5000)

if __name__ == '__main__':
    main()
    # import akshare as ak
    # df = ak.stock_zygc_em(symbol="SZ300826")
    # df_table(df)
    # df.to_csv("test.csv")