import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
import logging
from datetime import datetime,timedelta
import os
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import queue
from typing import Dict, List, Tuple, Optional, Any
from config.data_config import FUND_CACHE_FILE_PATH
import random,json,re
import argparse


# 定义基金类型
FUND_TYPES = ['gp', 'zs', 'hh', 'zq', 'qdii', 'fof']
#FUND_TYPES = ['qdii']

class FundDataSync:
    def __init__(self, fund_type: str, list_file: str, data_file: str, 
                 max_workers: int = 10,  info_mode: bool = False):
        """初始化同步器
        
        Args:
            fund_type: 基金类型
            list_file: 基金列表文件路径
            data_file: 数据文件路径
            max_workers: 最大并行线程数
            info_mode: 是否为信息模式
        """
        self.fund_type = fund_type
        self.list_file = list_file
        self.data_file = data_file
        self.max_workers = max_workers
        self.info_mode = info_mode  # 信息模式标志
        self.session_local = threading.local()  # 线程本地存储
        self.write_queue = queue.Queue()  # 数据写入队列
        self.write_thread = None  # 写入线程
        self.file_lock = threading.Lock()  # 文件操作锁
        self._init_write_thread()
        self.headers = {
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
                    "Accept" : "*/*",
                    "Accept-Language" : "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
                    "Connection" : "keep-alive",
                    "Referer" : 'https://fund.eastmoney.com/',
                    "Host" : 'api.fund.eastmoney.com',
                    "Cookie" : "ASP.NET_SessionId=fn33e1v1horkjpvmjdsdtrnu",
                    "Accept-Encoding":"gzip, deflate",
                }
                
    def _init_write_thread(self) -> None:
        """初始化写入线程"""
        self.write_thread = threading.Thread(target=self._process_write_queue, daemon=True)
        self.write_thread.start()
        
    def _get_session(self) -> requests.Session:
        """为每个线程创建独立的session"""
        if not hasattr(self.session_local, 'session'):
            session = requests.Session()
            retry_strategy = Retry(
                total=3,
                backoff_factor=1,
                status_forcelist=[429, 500, 502, 503, 504]
            )
            adapter = HTTPAdapter(max_retries=retry_strategy)
            session.mount("http://", adapter)
            session.mount("https://", adapter)
            self.session_local.session = session
        return self.session_local.session
    
    def _process_write_queue(self) -> None:
        """处理写入队列，批量写入文件"""
        batch = []
        batch_size = 100  # 批量写入大小
        
        while True:
            try:
                item = self.write_queue.get(timeout=1)
                
                if item is None:  # 退出信号
                    if batch:
                        self._write_batch(batch)
                    break
                    
                batch.append(item)
                
                if len(batch) >= batch_size:
                    self._write_batch(batch)
                    batch = []
                    
                self.write_queue.task_done()
                
            except queue.Empty:
                if batch:
                    self._write_batch(batch)
                    batch = []
                continue
    
    def _write_batch(self, batch: List[Dict[str, Any]]) -> None:
        """将一批数据写入文件"""
        if not batch:
            return
            
        try:
            # 转换新数据为DataFrame
            new_df = pd.DataFrame(batch)
            
            # 信息模式下需要处理覆盖逻辑
            if self.info_mode:
                # 定义唯一标识列
                unique_keys = ['基金代码', '日期']
                
                # 检查文件是否存在，存在则读取原有数据
                if os.path.exists(self.data_file):
                    # 读取源文件数据
                    existing_df = pd.read_csv(self.data_file, dtype={'基金代码': str})
                    
                    # 合并新旧数据，新数据覆盖旧数据
                    # 先删除原有数据中与新数据重复的记录
                    mask = ~existing_df.set_index(unique_keys).index.isin(new_df.set_index(unique_keys).index)
                    filtered_existing = existing_df[mask]
                    
                    # 合并过滤后的旧数据和新数据
                    combined_df = pd.concat([filtered_existing, new_df], ignore_index=True)
                else:
                    # 文件不存在，直接使用新数据
                    combined_df = new_df
                
                # 再次去重确保万无一失（按唯一键保留最后一条，即新数据）
                combined_df.drop_duplicates(subset=unique_keys, keep='last', inplace=True)
            else:
                # 非信息模式直接使用新数据
                combined_df = new_df
            
            with self.file_lock:
                # 写入合并后的数据（信息模式覆盖写入，非信息模式追加）
                mode = 'w' if self.info_mode else 'a'
                file_exists = os.path.exists(self.data_file)
                
                combined_df.to_csv(
                    self.data_file,
                    mode=mode,
                    header=not file_exists or self.info_mode,  # 信息模式始终写表头（因覆盖）
                    index=False,
                    encoding='utf-8'
                )
            
            mode_label = "[信息模式]" if self.info_mode else ""
            action = "覆盖更新" if self.info_mode else "追加"
            logging.info(f"{mode_label}[{self.fund_type}] 成功{action}{len(batch)}条数据，合并后总记录数: {len(combined_df)}")
        except Exception as e:
            logging.error(f"[{self.fund_type}] 写入数据失败: {str(e)}")
    
    def load_fund_codes(self) -> List[str]:
        """从基金列表文件加载基金代码"""
        try:
            if not os.path.exists(self.list_file):
                raise FileNotFoundError(f"基金列表文件不存在: {self.list_file}")
                
            df = pd.read_csv(self.list_file,dtype={'基金代码':str})
            if '基金代码' not in df.columns:
                raise ValueError(f"基金列表文件中未找到'基金代码'列")
            
            # 去重并转换为字符串
            fund_codes = df['基金代码'].astype(str).drop_duplicates().tolist()
            logging.info(f"[{self.fund_type}] 成功加载{len(fund_codes)}个基金代码")
            return fund_codes
        except Exception as e:
            logging.error(f"[{self.fund_type}] 加载基金代码失败: {str(e)}")
            raise
    
    def load_existing_data(self) -> Dict[str, datetime]:
        """加载已有数据，返回基金代码到最大日期的映射"""
        # 信息模式下不需要加载已有数据
        if self.info_mode:
            return {}
        
        fund_max_date = {}
        
        if not os.path.exists(self.data_file):
            logging.error(f'没有找到数据文件:{self.data_file}')
            return fund_max_date
            
        try:
            df = pd.read_csv(self.data_file,dtype={'基金代码':str})
            if '基金代码' not in df.columns or '日期' not in df.columns:
                logging.warning(f"[{self.fund_type}] 数据文件缺少'基金代码'或'日期'列")
                return fund_max_date
                
            # 转换日期列为datetime类型
            df['日期'] = pd.to_datetime(df['日期'], errors='coerce')
            
            # 按基金代码分组，获取最大日期
            fund_groups = df.groupby('基金代码')
            for fund_code, group in fund_groups:
                max_date = group['日期'].max()
                if pd.notna(max_date):
                    fund_max_date[fund_code] = max_date
            
            logging.info(f"[{self.fund_type}] 已加载{len(fund_max_date)}个基金的历史数据")
            return fund_max_date
        except Exception as e:
            logging.error(f"[{self.fund_type}] 加载已有数据失败: {str(e)}")
            raise
    
    def fetch_api_data(self, fund_code: str ,max_date: datetime) -> List[Dict[str, Any]]:
        """从API获取基金分页数据"""
        all_data = []
        page = 1
        session = self._get_session()
        
        while True:
            try:
                # 构建URL
                # 生成时间戳和随机字符串
                timestamp_seconds_float = time.time()
                timestamp_milliseconds = int(timestamp_seconds_float * 1000)
                timestamp_ms_str = str(timestamp_milliseconds)
                random_str = ''.join(random.choices('0123456789', k=17))
                callback_str = 'jQuery1830' + random_str + '_' + timestamp_ms_str

                start_date_str = max_date.strftime("%Y-%m-%d")

                url = f'https://api.fund.eastmoney.com/f10/lsjz' \
                    f'?fundCode={fund_code}' \
                    f'&pageIndex={page}' \
                    f'&pageSize=20' \
                    f'&startDate={start_date_str if start_date_str else ""}' \
                    f'&endDate=' \
                    f'&callback={callback_str}' \
                    f'&_={timestamp_ms_str}'
                
                headers = self.headers
                headers["Referer"] = 'https://fund.eastmoney.com/'
                headers["Host"] = 'api.fund.eastmoney.com'
                
                response = session.get(url, timeout=60,headers=headers)
                response.raise_for_status()

                # 处理JSONP响应，提取JSON数据
                json_str = re.sub(r'^jQuery.*?\(', '', response.text)
                json_str = re.sub(r'\)$', '', json_str)
                
                # 解析JSON
                data = json.loads(json_str)
                
                # 检查是否有数据
                if not data.get("Data") or not data["Data"].get("LSJZList"):
                    logging.info(f"基金 {fund_code} 第{page}页没有更多数据")
                    break
                
                # 获取当前页的列表数据
                current_page_data = data["Data"]["LSJZList"]
                
                # 过滤出日期晚于等于start_date的数据
                filtered_data = [item for item in current_page_data if item["FSRQ"] >= start_date_str]

                # 过滤数据，只保留指定字段，简化数据
                filtered_data_2 = [
                    {
                        "日期": item["FSRQ"],  # 将"FSRQ"改名为"日期"
                        "DWJZ": item["DWJZ"],
                        "LJJZ": item["LJJZ"]
                    }
                    for item in filtered_data
                ]
                # 添加基金代码和类型信息  
                for item in filtered_data_2:
                    item['基金代码'] = fund_code

                all_data.extend(filtered_data_2)
                
                # 如果当前页有数据早于start_date，说明已经获取了所有需要的数据
                if len(filtered_data) < len(current_page_data):
                    break
                
                # 检查是否已经获取了所有数据
                total_count = data.get("TotalCount", 0)
                if len(all_data) >= total_count:
                    logging.info(f"基金 {fund_code} 已获取全部数据，共{len(all_data)}条")
                    break
                
                # 准备获取下一页
                page += 1
                
                # 添加适当的延迟，避免请求过于频繁
                time.sleep(0.5)
                
            except Exception as e:
                logging.error(f"[{self.fund_type}] 基金{fund_code}第{page}页获取失败: {str(e)}")
                return all_data
                
        return all_data
    
    def fetch_all_fund_data(self, fund_code: str) -> List[Dict[str, Any]]:
        """获取基金全部历史数据"""
        try:
            session = self._get_session()

            # 生成随机时间戳（近30天内的随机时间）
            now = time.time()
            ten_min_ago = now - 3 * 60 * 60
            random_timestamp = random.uniform(ten_min_ago, now)
            
            # 转换为指定格式的字符串（例如：20250802125747）
            dt = datetime.fromtimestamp(random_timestamp)
            v_param = dt.strftime("%Y%m%d%H%M%S")

            url = f"https://fund.eastmoney.com/pingzhongdata/{fund_code}.js?v={v_param}"

            headers = self.headers
            headers["Referer"] = f'https://fund.eastmoney.com/{fund_code}.html'
            headers["Host"] = 'fund.eastmoney.com'

            response = session.get(url, timeout=60,headers=headers)
            response.raise_for_status()
            

            var_name = 'Data_netWorthTrend'
            # 使用正则匹配变量内容（处理数组和对象格式）
            pattern = re.compile(f"var {var_name} =(.*?);", re.DOTALL)
            match = pattern.search(response.text)
            if not match:
                logging.error(f"未找到变量 {var_name}")

            var_a = match.group(1)

            var_name = 'Data_ACWorthTrend'
            # 使用正则匹配变量内容（处理数组和对象格式）
            pattern = re.compile(f"var {var_name} =(.*?);", re.DOTALL)
            match = pattern.search(response.text)
            if not match:
                logging.error(f"未找到变量 {var_name}")

            var_b = match.group(1)

            net_worth_trend = json.loads(var_a)
            ac_worth_trend = json.loads(var_b) 

            if len(net_worth_trend)  != len(ac_worth_trend):
                logging.info('数据未对齐，需要特殊处理')


            # 3. 数据格式转换
            result = []
            for net_item, ac_item in zip(net_worth_trend, ac_worth_trend):
                # 提取时间戳进行比较
                timestamp_a = net_item['x']
                timestamp_b = ac_item[0]
                
                # 当时间戳相同时才合并
                if timestamp_a == timestamp_b:
                    # 转换时间戳为日期（毫秒级时间戳需除以1000）
                    timestamp = net_item["x"] / 1000
                    fsrq = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d")

                    # 跳过过早日期的数据
                    start_date = (datetime.now() - timedelta(days=365)).strftime('%Y-%m-%d')
                    if start_date:
                        # 解析格式为YYYY-MM-DD的字符串
                        date_a = datetime.strptime(start_date, "%Y-%m-%d")
                        date_b = datetime.strptime(fsrq, "%Y-%m-%d")
                        if date_b < date_a:
                            continue
                    
                    # 提取单位净值和累计净值
                    dwjz = str(net_item["y"])
                    ljjz = str(ac_item[1])  # ACWorthTrend的第二个元素是累计净值
                    
                    result.append({
                        "日期": fsrq,
                        "DWJZ": dwjz,
                        "LJJZ": ljjz,
                        "基金代码": fund_code
                    })
                else:
                    logging.info(f"跳过不匹配的时间戳: a={timestamp_a}, b={timestamp_b}")

            return result

        except Exception as e:
            logging.error(f"[{self.fund_type}] 基金{fund_code}全部数据获取失败: {str(e)}")
            return []
    
    def filter_new_data(self, data: List[Dict[str, Any]], max_date: datetime) -> List[Dict[str, Any]]:
        """过滤出新于max_date的数据"""
        new_data = []
        for item in data:
            try:
                item_date = datetime.strptime(item['日期'], '%Y-%m-%d')
                if item_date > max_date:
                    new_data.append(item)
                else:
                    break  # 假设数据按时间倒序排列
            except (KeyError, ValueError) as e:
                logging.warning(f"[{self.fund_type}] 日期解析失败: {str(e)}, 数据: {item}")
                continue
                
        return new_data
    
    def fetch_fund_info(self, fund_code: str) -> List[Dict[str, Any]]:
        """
        信息模式下从HTML页面提取基金信息（新函数）
        通过正则表达式解析页面内容
        """
        try:
            session = self._get_session()

            # 生成随机时间戳（近30天内的随机时间）
            now = time.time()
            ten_min_ago = now - 3 * 60 * 60
            random_timestamp = random.uniform(ten_min_ago, now)
            
            # 转换为指定格式的字符串（例如：20250802125747）
            dt = datetime.fromtimestamp(random_timestamp)
            v_param = dt.strftime("%Y%m%d%H%M%S")

            url = f"https://fund.eastmoney.com/pingzhongdata/{fund_code}.js?v={v_param}"

            headers = self.headers
            headers["Referer"] = f'https://fund.eastmoney.com/{fund_code}.html'
            headers["Host"] = 'fund.eastmoney.com'

            response = session.get(url, timeout=60,headers=headers)
            response.raise_for_status()

            html_content = response.text
            
            
            # --------------------------
            # 1. 提取三个核心数据块（正则匹配）
            # --------------------------
            # 规模变动数据块
            fluct_pattern = re.compile(r'var Data_fluctuationScale =(.*?);', re.DOTALL)
            fluct_match = fluct_pattern.search(html_content)
            if not fluct_match:
                logging.warning(f"基金{fund_code}未找到规模变动数据块")
                return []
            fluct_data = json.loads(fluct_match.group(1))
            
            # 资产配置数据块
            asset_pattern = re.compile(r'var Data_assetAllocation =(.*?);', re.DOTALL)
            asset_match = asset_pattern.search(html_content)
            if not asset_match:
                logging.warning(f"基金{fund_code}未找到资产配置数据块")
                return []
            asset_data = json.loads(asset_match.group(1))
            
            # 申购赎回数据块
            buyredem_pattern = re.compile(r'var Data_buySedemption =(.*?);', re.DOTALL)
            buyredem_match = buyredem_pattern.search(html_content)
            if not buyredem_match:
                logging.warning(f"基金{fund_code}未找到申购赎回数据块")
                return []
            buyredem_data = json.loads(buyredem_match.group(1))
            
            # --------------------------
            # 2. 解析数据块结构（映射字段）
            # --------------------------
            # 提取日期列表（以资产配置的日期为基准，需与其他数据块日期对齐）
            base_dates = asset_data.get("categories", [])
            if not base_dates or base_dates == []:
                logging.warning(f"基金{fund_code}资产配置无日期数据")
                return []
            
            # 提取资产配置中的字段（股票/债券/现金占净比、净资产）
            asset_series = {item["name"]: item["data"] for item in asset_data["series"]}
            stock_ratios = asset_series.get("股票占净比", [None]*len(base_dates))  # 股票占净比
            bond_ratios = asset_series.get("债券占净比", [None]*len(base_dates))   # 债券占净比
            cash_ratios = asset_series.get("现金占净比", [None]*len(base_dates))   # 现金占净比
            net_assets = asset_series.get("净资产", [None]*len(base_dates))        # 净资产
            
            # 提取申购赎回中的字段（期间申购、期间赎回、总份额）
            buyredem_series = {item["name"]: item["data"] for item in buyredem_data["series"]}
            subscriptions = buyredem_series.get("期间申购", [None]*len(base_dates))  # 期间申购
            redemptions = buyredem_series.get("期间赎回", [None]*len(base_dates))   # 期间赎回
            total_shares = buyredem_series.get("总份额", [None]*len(base_dates))    # 总份额
            
            # 提取规模变动中的字段（规模、规模变动）
            fluct_dates = fluct_data.get("categories", [])
            fluct_series = fluct_data.get("series", [])
            # 构建规模变动映射（日期->(规模, 变动率)）
            fluct_map = {}
            for i, date in enumerate(fluct_dates):
                if i < len(fluct_series):
                    fluct_map[date] = {
                        "规模": fluct_series[i].get("y"),          # 规模值
                        "规模变动": fluct_series[i].get("mom").replace('%','')      # 环比变动率
                    }
            
            # --------------------------
            # 3. 组合多数据块字段（按日期对齐）
            # --------------------------
            fund_info_list = []
            for i, date in enumerate(base_dates):
                # 从规模变动映射中匹配当前日期的规模数据
                fluct_item = fluct_map.get(date, {})
                
                # 单条记录（基金代码+日期为唯一键）
                info = {
                    "基金代码": fund_code,
                    #"基金类型": self.fund_type,
                    "日期": date,
                    "股票占净比%": f"{stock_ratios[i]}" if stock_ratios[i] is not None else None,
                    "债券占净比%": f"{bond_ratios[i]}" if bond_ratios[i] is not None else None,
                    "现金占净比%": f"{cash_ratios[i]}" if cash_ratios[i] is not None else None,
                    "净资产(亿)": f"{net_assets[i]}" if net_assets[i] is not None else None,
                    "期间申购(亿)": f"{subscriptions[i]}" if subscriptions[i] is not None else None,
                    "期间赎回(亿)": f"{redemptions[i]}" if redemptions[i] is not None else None,
                    "总份额(亿份)": f"{total_shares[i]}" if total_shares[i] is not None else None,
                    "规模(亿)": f"{fluct_item.get('规模')}" if fluct_item.get("规模") is not None else None,
                    "规模变动%": fluct_item.get("规模变动") if fluct_item.get("规模变动") is not None else None
                    #"信息获取时间": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
                fund_info_list.append(info)
            
            time.sleep(0.5)

            logging.info(f"基金{fund_code}提取到{len(fund_info_list)}条信息（按日期）")
            return fund_info_list
            
        except Exception as e:
            logging.error(f"基金{fund_code}信息提取失败: {str(e)}", exc_info=True)
            return []
    
    def process_single_fund(self, fund_code: str, fund_max_date: Dict[str, datetime]) -> Tuple[str, bool, int]:
        """处理单个基金的数据同步"""
        
        try:
            # 信息模式：使用新函数提取信息
            if self.info_mode:
                # 调用新的信息提取函数
                fund_info_list = self.fetch_fund_info(fund_code)
                
                if fund_info_list:
                    for item in fund_info_list:
                        self.write_queue.put(item)
                    return (fund_code, True, len(fund_info_list))
                else:
                    return (fund_code, False, 0)

            if fund_code in fund_max_date:
                max_date = fund_max_date[fund_code]
                all_data = self.fetch_api_data(fund_code,max_date)
                new_data = self.filter_new_data(all_data, max_date)                
                if new_data:
                    for item in new_data:
                        self.write_queue.put(item)
                    return (fund_code, True, len(new_data))
                else:
                    return (fund_code, True, 0)
            else:
                all_data = self.fetch_all_fund_data(fund_code)
                
                if all_data:
                    for item in all_data:
                        self.write_queue.put(item)
                    return (fund_code, True, len(all_data))
                else:
                    return (fund_code, False, 0)
                    
        except Exception as e:
            logging.error(f"[{self.fund_type}] 处理基金{fund_code}错误: {str(e)}")
            return (fund_code, False, 0)
    
    def run(self) -> Dict[str, Any]:
        """运行同步程序"""
        result = {
            'type': self.fund_type,
            'success': False,
            'total_processed': 0,
            'total_success': 0,
            'total_data_added': 0,
            'time_spent': 0,
            'error': None
        }
        
        try:
            start_time = time.time()
            mode_label = "[信息模式]" if self.info_mode else ""
            logging.info(f"{mode_label}[{self.fund_type}] 开始基金数据同步,目标文件: {os.path.basename(self.data_file)}")
            
            # 加载基金代码
            fund_codes = self.load_fund_codes()
            if not fund_codes:
                logging.warning(f"{mode_label}[{self.fund_type}] 没有基金代码需要处理")
                result['success'] = True
                return result
            
            # 加载已有数据
            fund_max_date = self.load_existing_data()
            # 并行处理基金
            results = []
            with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                futures = {
                    executor.submit(self.process_single_fund, fund_code, fund_max_date): 
                    fund_code for fund_code in fund_codes
                }
                
                for future in tqdm(as_completed(futures), total=len(fund_codes), 
                                  desc=f"{mode_label}[{self.fund_type}] 处理进度"):
                    try:
                        res = future.result()
                        results.append(res)
                    except Exception as e:
                        fund_code = futures[future]
                        logging.error(f"{mode_label}[{self.fund_type}] 获取基金{fund_code}结果错误: {str(e)}")
                
            
            # 等待写入队列处理完成
            self.write_queue.put(None)
            self.write_thread.join()
            
            # 统计结果
            result['total_processed'] = len(results)
            result['total_success'] = sum(1 for r in results if r[1])
            result['total_data_added'] = sum(r[2] for r in results)
            result['time_spent'] = time.time() - start_time
            result['success'] = True
            
            logging.info(f"{mode_label}[{self.fund_type}] 同步完成，耗时{result['time_spent']:.2f}秒")
            
        except Exception as e:
            result['error'] = str(e)
            logging.error(f"{mode_label}[{self.fund_type}] 同步失败: {str(e)}", exc_info=True)
        finally:
            # 清理资源
            if hasattr(self.session_local, 'session'):
                self.session_local.session.close()
                
        return result


def setup_logging() -> logging.Logger:
    """配置日志系统"""
    logger = logging.getLogger('fund_sync')
    logger.setLevel(logging.INFO)
    
    # 避免重复添加处理器
    if logger.handlers:
        return logger
        
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    
    # 文件处理器
    file_handler = logging.FileHandler('fund_sync.log')
    file_handler.setFormatter(formatter)
    
    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)
    
    return logger

def get_previous_trading_day(date=None):
    """
    获取指定日期的上一个股市交易日（默认使用当前日期）
    
    参数:
    date: 参考日期，格式为datetime.date对象，默认使用今天
    
    返回:
    上一个交易日的datetime.date对象
    """
    from chinese_calendar import is_workday

    # 如果未指定日期，使用当前日期
    if date is None:
        date = datetime.date.today()
    
    # 回溯查找上一个交易日
    delta = datetime.timedelta(days=1)
    previous_day = date - delta
    
    while True:
        # 股市交易日需满足：是工作日（非节假日且非周末）
        if is_workday(previous_day):
            return previous_day
        # 否则继续往前一天查找
        previous_day -= delta


def get_file_paths(fund_type: str, info_mode: bool) -> Dict[str, str]:
    """获取指定类型基金的文件路径"""
    if info_mode:
        # 信息模式下所有类型都写入同一个基金信息文件
        return {
            'list_file': f"{FUND_CACHE_FILE_PATH}开放基金数据{fund_type}.csv",
            'data_file': f"{FUND_CACHE_FILE_PATH}开放基金信息-{fund_type}.csv"
        }
    else:
        # 正常模式下
        return {
            'list_file': f"{FUND_CACHE_FILE_PATH}开放基金数据{fund_type}.csv",
            'data_file': f"{FUND_CACHE_FILE_PATH}历史净值数据-{fund_type}.csv"
        }


def run_type_sync(fund_type: str, max_workers: int = 10, info_mode: bool = False) -> Dict[str, Any]:
    """运行指定类型的基金同步"""
    file_paths = get_file_paths(fund_type,info_mode)
    sync = FundDataSync(
        fund_type=fund_type,
        list_file=file_paths['list_file'],
        data_file=file_paths['data_file'],
        max_workers=max_workers,
        info_mode=info_mode
    )
    return sync.run()


def main(get_info:bool = False):
    """主函数，运行所有类型的基金同步"""
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='基金数据同步程序')
    parser.add_argument('-info', action='store_true', help='信息模式：直接获取所有基金的全部信息并写入文件')
    parser.add_argument('-p', action='store_true', help='是否开始并行模式')
    args = parser.parse_args()

    logger = setup_logging()
    logger.info("====== 开始所有类型基金数据同步 ======")

    mode_label = "[信息模式]" if args.info else ""

    logger.info(f"{mode_label}====== 开始所有类型基金数据同步 ======")
    
    # 每种类型的最大并行数配置

    type_workers = {
        'hh': 15,
        'zs': 10,
        'gp': 15,
        'zq': 10,
        'qdii': 8,
        'fof': 5
    }
    
    # 是否并行处理不同类型
    parallel_types = True if args.p else False
    
    results = []
    
    if parallel_types:
        # 并行处理不同类型
        with ThreadPoolExecutor(max_workers=3) as executor:
            futures = {
                executor.submit(run_type_sync, fund_type, type_workers.get(fund_type, 10), args.info): 
                fund_type for fund_type in FUND_TYPES
            }
            
            for future in futures:
                fund_type = futures[future]
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    logger.error(f"{mode_label}处理{fund_type}类型基金时发生错误: {str(e)}")
    else:
        # 串行处理不同类型
        for fund_type in FUND_TYPES:
            try:
                result = run_type_sync(fund_type, type_workers.get(fund_type, 10), args.info)
                results.append(result)
            except Exception as e:
                logger.error(f"{mode_label}处理{fund_type}类型基金时发生错误: {str(e)}")
    
    # 打印汇总结果
    logger.info("\n{mode_label}====== 所有类型基金同步汇总 ======")
    total_funds = 0
    total_success = 0
    total_data = 0
    total_time = 0
    
    for result in results:
        if not result['success']:
            logger.error(f"{result['type']}: 同步失败 - {result.get('error', '未知错误')}")
            continue
            
        logger.info(
            f"{mode_label}{result['type']}: "
            f"处理基金{result['total_processed']}只, "
            f"成功{result['total_success']}只, "
            f"新增数据{result['total_data_added']}条, "
            f"耗时{result['time_spent']:.2f}秒"
        )
        
        total_funds += result['total_processed']
        total_success += result['total_success']
        total_data += result['total_data_added']
        total_time += result['time_spent']
    
    logger.info(
        f"\n{mode_label}总计: "
        f"处理基金{total_funds}只, "
        f"成功{total_success}只, "
        f"新增数据{total_data}条, "
        f"总耗时{total_time:.2f}秒"
    )
    logger.info("{mode_label}====== 所有类型基金数据同步结束 ======")


if __name__ == "__main__":    
    main()
