import akshare as ak
import pandas as pd
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock
from get_fund_data import process_fund_data
from mysql import save_fund_data_to_db


# 创建线程锁用于打印输出
print_lock = Lock()


def process_single_fund(idx, total, fund_code, fund_name):
    """
    处理单只基金的数据并存储到MySQL数据库
    
    Args:
        idx: 当前基金的索引
        total: 基金总数
        fund_code: 基金代码
        fund_name: 基金简称
        
    Returns:
        pd.DataFrame or None: 处理后的基金数据
    """
    try:
        with print_lock:
            print(f"\n正在处理第 {idx + 1}/{total} 只基金: {fund_code} - {fund_name}")
        
        # 调用 process_fund_data 函数处理基金数据
        df = process_fund_data(fund_code, fund_name)
        
        if df is not None and not df.empty:
            # 保存数据到MySQL数据库
            try:
                save_result = save_fund_data_to_db(df)
                if save_result:
                    with print_lock:
                        print(f"✓ 成功处理基金 {fund_code}，数据行数: {len(df)}，已存储到数据库")
                else:
                    with print_lock:
                        print(f"⚠ 警告: 基金 {fund_code} 数据处理成功但存储到数据库失败")
            except Exception as db_error:
                with print_lock:
                    print(f"⚠ 警告: 基金 {fund_code} 存储到数据库时发生异常: {db_error}")
            
            return df
        else:
            with print_lock:
                print(f"⚠ 警告: 基金 {fund_code} 没有返回数据")
            return None
            
    except Exception as e:
        with print_lock:
            print(f"✗ 错误: 处理基金 {fund_code} 时发生异常: {e}")
        return None


def get_all_funds_data(max_workers=10):
    """
    使用多线程获取所有基金的数据并处理，数据直接存储到MySQL数据库
    
    Args:
        max_workers: 最大线程数，默认10
        
    Returns:
        tuple: (成功数量, 失败数量, 总数量)
    """
    # 获取所有基金的基本信息
    print("正在获取所有基金列表...")
    fund_open_fund_rank_em_df = ak.fund_open_fund_rank_em(symbol="全部")
    
    # 提取基金代码和基金简称
    fund_info_df = fund_open_fund_rank_em_df[['基金代码', '基金简称']]
    total_funds = len(fund_info_df)
    print(f"共获取到 {total_funds} 只基金")
    print(f"使用 {max_workers} 个线程并发处理...\n")
    
    # 统计信息
    success_count = 0
    fail_count = 0
    
    # 使用线程池并发处理
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        future_to_fund = {
            executor.submit(
                process_single_fund, 
                idx, 
                total_funds, 
                row['基金代码'], 
                row['基金简称']
            ): (row['基金代码'], row['基金简称'])
            for idx, row in fund_info_df.iterrows()
        }
        
        # 处理完成的任务
        for future in as_completed(future_to_fund):
            fund_code, fund_name = future_to_fund[future]
            try:
                result = future.result()
                if result is not None:
                    success_count += 1
                else:
                    fail_count += 1
            except Exception as e:
                with print_lock:
                    print(f"✗ 获取基金 {fund_code} 结果时发生异常: {e}")
                fail_count += 1
    
    # 打印统计信息
    print(f"\n{'='*60}")
    print(f"处理完成！成功: {success_count} 只，失败: {fail_count} 只")
    print(f"{'='*60}")
    
    return success_count, fail_count, total_funds


if __name__ == "__main__":
    start_time = time.time()
    
    # 设置线程数（可根据机器性能调整，建议5-50之间）
    MAX_WORKERS = 50
    
    # 获取所有基金数据（使用多线程）
    success_count, fail_count, total_funds = get_all_funds_data(max_workers=MAX_WORKERS)
    
    end_time = time.time()
    elapsed_time = end_time - start_time
    print(f"\n总耗时: {elapsed_time:.2f} 秒")
    print(f"平均每只基金耗时: {elapsed_time / total_funds if total_funds > 0 else 0:.2f} 秒")





