#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TDX 板块数据处理脚本
读取多个板块文件，按照指定格式存储为 pkl 和 csv 文件
支持增量更新模式：
1. 如果tdx_blocks.pkl不存在，则处理所有txt文件并生成新的pkl文件
2. 如果tdx_blocks.pkl存在，则只添加新增的数据，并更新设立日期为当天
"""

import pandas as pd
import pickle
from datetime import datetime
from pathlib import Path
import os
import json
import re
import sys
import io

# 设置Windows控制台输出为UTF-8编码，避免中文乱码
if sys.platform == 'win32':
    try:
        sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace', line_buffering=True)
        sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace', line_buffering=True)
    except Exception:
        pass  # 如果设置失败，继续运行

# 高频股票过滤阈值：在超过10个板块中分布的股票会被过滤掉
HIGHFREQ = 12
# 低频股票过滤阈值：在少于5个板块中分布的股票会被过滤掉
LOWFREWQ = 5
# 板块覆盖率阈值：如果板块的股票被其他板块覆盖比例超过50%，则判定为弱特征板块，整体过滤
COVERTHRESHOLD = 0.5
# 板块最少股票数阈值：股票数少于此阈值的板块会被过滤掉
MIN_STOCK_COUNT = 10
# 手动排除的板块列表（在此列表中的板块将被完全排除）
MANUAL_EXCLUDE_BLOCKS = [
    # 默认排除DeepSeek概念，可以添加其他要排除的板块名称
    "DeepSeek概念",
    "新股与次新股",
    "注册制次新股"
]




def update_stock_count(df):
    """
    更新每个板块的股票个数字段
    根据过滤后的实际数据重新统计每个板块的股票数
    
    Args:
        df: 过滤后的DataFrame
    
    Returns:
        df: 更新了股票个数的DataFrame
    """
    if len(df) == 0:
        return df
    
    # 重新统计每个板块的股票数
    df['股票个数'] = df.groupby('板块名称')['成分股代码'].transform('nunique').astype(int)
    
    return df


def filter_by_stock_count(df, min_count=MIN_STOCK_COUNT):
    """
    过滤股票数量太少的板块
    
    Args:
        df: 包含所有板块数据的DataFrame
        min_count: 最少股票数，默认10
    
    Returns:
        filtered_df: 过滤后的DataFrame
        excluded_count: 被过滤的记录数
    """
    if len(df) == 0:
        return df, 0
    
    # 统计每个板块的股票数
    block_stock_count = df.groupby('板块名称')['成分股代码'].nunique().reset_index()
    block_stock_count.columns = ['板块名称', '股票数']
    
    # 找出股票数少于阈值的板块
    small_blocks = block_stock_count[block_stock_count['股票数'] < min_count]
    
    before_count = len(df)
    filtered_df = df[~df['板块名称'].isin(small_blocks['板块名称'])].copy()
    excluded_count = before_count - len(filtered_df)
    
    if excluded_count > 0:
        print(f"\n【股票数过滤】过滤掉 {len(small_blocks)} 个股票数 < {min_count} 的板块...")
        for idx, row in small_blocks.iterrows():
            print(f"  × {row['板块名称']} ({row['股票数']} 只股票)")
        print(f"  - 过滤前记录数: {before_count}")
        print(f"  - 过滤后记录数: {len(filtered_df)}")
        print(f"  - 已过滤记录数: {excluded_count}")
    
    return filtered_df, excluded_count


def filter_by_manual_exclude(df, exclude_blocks):
    """
    根据手动排除列表过滤板块
    
    Args:
        df: 包含所有板块数据的DataFrame
        exclude_blocks: 要排除的板块名称列表
    
    Returns:
        filtered_df: 过滤后的DataFrame
        excluded_count: 被排除的记录数
    """
    if not exclude_blocks:
        return df, 0
    
    before_count = len(df)
    filtered_df = df[~df['板块名称'].isin(exclude_blocks)].copy()
    excluded_count = before_count - len(filtered_df)
    
    if excluded_count > 0:
        print(f"\n【手动排除板块】过滤掉 {len(exclude_blocks)} 个板块...")
        for block_name in sorted(exclude_blocks):
            block_count = len(df[df['板块名称'] == block_name])
            print(f"  × {block_name} ({block_count} 条记录)")
        print(f"  - 过滤前记录数: {before_count}")
        print(f"  - 过滤后记录数: {len(filtered_df)}")
        print(f"  - 已排除记录数: {excluded_count}")
    
    return filtered_df, excluded_count


def load_block_file(filepath):
    """
    读取板块文件，自动处理多种编码格式
    格式: code,block_name,stock_code,stock_name
    
    优先使用GBK（通达信原始导出），其次尝试UTF-8（已转换的文件）
    特别说明：风格板块文件强制使用GBK编码，如果失败则用latin-1并修复
    """
    # 风格板块文件强制使用GBK编码（避免被latin-1误读导致乱码）
    if '风格板块' in str(filepath):
        encodings = ['gbk', 'gb18030', 'gb2312', 'latin-1']  # 添加latin-1作为最后备选
        print(f"  - 检测到风格板块文件，强制使用GBK系列编码")
    else:
        # 其他文件尝试多种编码格式（GBK优先）
        encodings = ['gbk', 'gb2312', 'gb18030', 'utf-8', 'utf-8-sig', 'latin-1']
    
    for encoding in encodings:
        try:
            data = []
            with open(filepath, 'r', encoding=encoding) as f:
                for line in f:
                    line = line.strip()
                    if line:
                        # 移除行号标记（如果有）
                        if '→' in line:
                            # 去掉箭头之前的部分
                            parts = line.split('→')
                            if len(parts) > 1:
                                line = parts[1].strip()
                        
                        parts = line.split(',')
                        if len(parts) >= 4:
                            # 清理字段内容
                            block_code = parts[0].strip()
                            block_name = parts[1].strip()
                            stock_code = parts[2].strip()
                            stock_name = parts[3].strip()
                            
                            # 跳过包含'etf'或'债'的成分股
                            if 'etf' in stock_name.lower() or '债' in stock_name:
                                continue
                            
                            data.append({
                                '板块代码': block_code,
                                '板块名称': block_name,
                                '成分股代码': stock_code,
                                '成分股名称': stock_name
                            })
            print(f"  - 使用 {encoding} 编码成功读取文件")
            
            # 特殊处理：如果风格板块文件使用latin-1读取，需要修复编码
            if encoding == 'latin-1' and '风格板块' in str(filepath):
                print(f"  - 警告：风格板块文件使用latin-1读取，尝试修复中文编码")
                fixed_count = 0
                for record in data:
                    try:
                        # 尝试修复板块名称
                        block_name_bytes = record['板块名称'].encode('latin-1')
                        record['板块名称'] = block_name_bytes.decode('gbk', errors='ignore')
                        fixed_count += 1
                    except:
                        pass
                    
                    try:
                        # 尝试修复成分股名称
                        stock_name_bytes = record['成分股名称'].encode('latin-1')
                        record['成分股名称'] = stock_name_bytes.decode('gbk', errors='ignore')
                    except:
                        pass
                print(f"  - 已修复 {fixed_count} 条记录的编码")
            
            return data
        except UnicodeDecodeError as e:
            print(f"  - {encoding} 编码解码失败: {str(e)[:80]}")
            continue
        except Exception as e:
            print(f"  - 使用 {encoding} 编码读取文件时出错: {e}")
            continue
    
    # 如果所有编码都失败，抛出异常
    raise RuntimeError(f"无法使用任何编码读取文件: {filepath}")


def process_all_files(base_path, use_today_date=True):
    """
    处理所有板块文件并返回DataFrame
    
    Args:
        base_path: 基础路径
        use_today_date: 是否使用当天日期，如果False则使用20250101
    
    说明：总是从txt_archive目录读取数据，若不存在则报错退出
    """
    # 总是从txt_archive目录读取文件（必须存在）
    source_dir = base_path / 'txt_archive'
    if not source_dir.exists():
        print(f"\n❌ 致命错误: txt_archive目录不存在")
        print(f"路径: {source_dir}")
        print(f"请先运行 archive_txt_files.py 进行文件备份")
        return False
    else:
        print(f"从归档目录读取数据:")
    
    # 定义要读取的文件名基础（不含日期）
    files_base = [
        '行业板块',
        '概念板块',
        '地区板块',
        # '指数板块',
        '风格板块'
    ]
    
    # 扫描txt_archive目录，找出所有带日期的txt文件
    date_pattern = r'_(\d{4}-\d{2}-\d{2})\.txt$'
    latest_date = None
    
    for file_path in source_dir.glob('*.txt'):
        match = re.search(date_pattern, file_path.name)
        if match:
            file_date = match.group(1)
            if latest_date is None or file_date > latest_date:
                latest_date = file_date
    
    # 根据是否找到带日期的文件，生成文件列表
    if latest_date:
        print(f"检测到最新日期: {latest_date}")
        # 生成带日期的文件名列表
        files = [f'{base}_{latest_date}.txt' for base in files_base]
    else:
        print(f"⚠ txt_archive目录中未找到带日期的txt文件，尝试使用原始文件名")
        files = [f'{base}.txt' for base in files_base]
    
    # 读取所有文件数据
    all_data = []
    for filename in files:
        filepath = source_dir / filename
        print(f'读取文件: {filename}')
        
        if not filepath.exists():
            print(f'  ⚠ 文件不存在，跳过: {filepath}')
            continue
        
        file_data = load_block_file(filepath)
        all_data.extend(file_data)
        print(f'  - 读取 {len(file_data)} 条记录')
    
    # 创建 DataFrame
    df = pd.DataFrame(all_data)
    
    # 进行去重操作，基于['板块名称', '成分股代码']组合
    df = df.drop_duplicates(subset=['板块名称', '成分股代码'], keep='first').copy()
    print(f'去重后记录数: {len(df)}')
    
    # 添加日期列
    if use_today_date:
        today = datetime.now().strftime('%Y%m%d')
    else:
        today = '20250101'
    df['设立日期'] = today
    df['股票个数'] = df.groupby(['板块名称'])['成分股代码'].transform('count').astype(int)

    # 在成分股代码列中添加 'sh', 'bj', 'sz' 前缀
    df['股票代码'] = df['成分股代码'].apply(lambda x: 'sh' + x if x.startswith('6') else 'sz' + x if x.startswith('00') or x.startswith('3') else 'bj' + x if x.startswith('8') or x.startswith('920') else x)
    df['成分股代码'] = df['成分股代码'].apply(lambda x: x + '.SH' if x.startswith('6') else x + '.SZ' if x.startswith('00') or x.startswith('3') else x + '.BJ' if x.startswith('8') or x.startswith('920') else x)
    # 重新排列列顺序
    df = df[['板块名称', '成分股代码', '成分股名称', '股票代码', '股票个数', '设立日期']]
    
    # 按板块名称和设立日期排序，使相同板块的数据聚合在一起，内部按日期排序
    df = df.sort_values(by=['板块名称', '设立日期'], ignore_index=True)
    
    return df


def filter_by_block_distribution(df, high_freq_threshold=HIGHFREQ, low_freq_threshold=LOWFREWQ, PRINTFlag = False):
    """根据股票在所有板块中的分布过滤股票
    
    规则：
    - 在超过high_freq_threshold个板块中分布的股票 → 过滤（高频股票）
    - 在少于low_freq_threshold个板块中分布的股票 → 过滤（低频股票）
    
    Args:
        df: 包含所有板块数据的DataFrame
        high_freq_threshold: 高频阈值，默认10（>10个板块则过滤）
        low_freq_threshold: 低频阈值，默认5（<5个板块则过滤）
    
    Returns:
        filtered_df: 过滤后的DataFrame
        filtered_stocks_dict: 被过滤的股票字典 {'成分股代码': 板块数}
    """
    if len(df) == 0:
        return df, {}
    
    # 统计每个股票在所有板块中的分布数量
    stock_distribution = df.groupby('成分股代码')['板块名称'].nunique().reset_index()
    stock_distribution.columns = ['成分股代码', '板块分布数']
    
    # 找出需要过滤的股票
    high_freq_stocks = stock_distribution[stock_distribution['板块分布数'] > high_freq_threshold]
    low_freq_stocks = stock_distribution[stock_distribution['板块分布数'] < low_freq_threshold]
    
    # 合并高频和低频股票
    filtered_result = pd.concat([high_freq_stocks, low_freq_stocks]).drop_duplicates(subset=['成分股代码'])
    filtered_stocks_dict = dict(zip(filtered_result['成分股代码'], filtered_result['板块分布数']))
    
    
    # 打印过滤信息
    if len(high_freq_stocks) > 0:
        print(f"\n  【高频股票过滤】检测到 {len(high_freq_stocks)} 个高频股票（在超过{high_freq_threshold}个板块分布）:")
        if PRINTFlag:
            for idx, row in high_freq_stocks.iterrows():
                print(f"    × {row['成分股代码']} ({row['板块分布数']} 个板块)")
    
    if len(low_freq_stocks) > 0:
        print(f"\n  【低频股票过滤】检测到 {len(low_freq_stocks)} 个低频股票（在少于{low_freq_threshold}个板块分布）:")
        if PRINTFlag:
            for idx, row in low_freq_stocks.iterrows():
                print(f"    × {row['成分股代码']} ({row['板块分布数']} 个板块)")
    
    # 过滤掉这些股票
    filtered_df = df[~df['成分股代码'].isin(filtered_stocks_dict.keys())].copy()
    
    return filtered_df, filtered_stocks_dict


def load_filter_info(filter_info_file):
    """加载过滤信息文件
    
    Returns:
        过滤信息字典 {
            '成分股代码': {
                '名称': '股票名称',
                '板块分布数': 整数,
                '不是过滤': True/False
            }
        }
    """
    if filter_info_file and filter_info_file.exists():
        try:
            with open(filter_info_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except:
            return {}
    return {}


def save_filter_info(filter_info, filter_info_file):
    """保存过滤信息文件"""
    if filter_info_file:
        with open(filter_info_file, 'w', encoding='utf-8') as f:
            json.dump(filter_info, f, ensure_ascii=False, indent=2)


def update_filter_info(df_combined, filtered_stocks_dict, old_filter_info=None):
    """根据当前板块分布更新过滤信息
    
    Args:
        df_combined: 合并后的数据框
        filtered_stocks_dict: 当前需要过滤的股票字典
        old_filter_info: 旧的过滤信息
    
    Returns:
        new_filter_info: 新的过滤信息
    """
    if old_filter_info is None:
        old_filter_info = {}
    
    new_filter_info = {}
    
    # 统计每个股票在所有板块中的分布数量
    stock_distribution = df_combined.groupby('成分股代码')['板块名称'].nunique().reset_index()
    stock_distribution.columns = ['成分股代码', '板块分布数']
    
    # 获取股票名称映射
    stock_names = dict(zip(df_combined['成分股代码'], df_combined['成分股名称']))
    
    # 更新所有股票信息
    for idx, row in stock_distribution.iterrows():
        stock_code = row['成分股代码']
        distribution = row['板块分布数']
        
        # 判断是否被过滤
        is_filtered = stock_code in filtered_stocks_dict
        
        new_filter_info[stock_code] = {
            '名称': stock_names.get(stock_code, ''),
            '板块分布数': int(distribution),
            '是否被过滤': is_filtered
        }
    
    return new_filter_info


def check_and_restore_with_filter_info(previously_filtered_dict, current_filtered_dict, df_combined, PRINTFlag = False):
    # """根据旧的过滤信息检查是否需要恢复或保住。同时检查未被过滤的股票是否需要新过滤"""
    restored_count = 0
    kept_filtered_count = 0
    removed_count = 0
    newly_filtered_count = 0
    
    # 第一部分：检查之前被过滤的股票（是否被过滤: true）
    for stock_code, info in previously_filtered_dict.items():
        # 检查是否是之前被过滤的股票 
        if not info.get('是否被过滤', False):
            continue  # 跳过未被过滤的股票
        
        # 检查此股票是否不再在txt数据中
        if stock_code not in df_combined['成分股代码'].values:
            print(f"  - {stock_code} ({info['名称']}) 已从txt中删除")
            removed_count += 1
            continue
        
        # 检查是否仍然需要过滤
        if stock_code in current_filtered_dict:
            old_dist = info.get('板块分布数', 0)
            new_dist = current_filtered_dict[stock_code]
            if PRINTFlag:
                print(f"  - {stock_code} ({info['名称']}) 仍然需过滤 旧分布:{old_dist}个 新分布:{new_dist}个")
            kept_filtered_count += 1
        else:
            # 股票分布已符合恢复条件，可以恢复
            print(f"  ✓ {stock_code} ({info['名称']}) 已恢复（不再按过滤条件）")
            restored_count += 1
    
    # 第二部分：检查之前未被过滤的股票是否现在需要被过滤
    print(f"\n【检查未被过滤股票是否现在需要过滤】:")
    unfiltered_stocks = []
    for stock_code, info in previously_filtered_dict.items():
        # 检查是否是之前未被过滤的股票
        if info.get('是否被过滤', False):
            continue  # 跳过已被过滤的股票
        
        # 检查此股票是否在当前过滤列表中（需要被新过滤）
        if stock_code in current_filtered_dict:
            old_dist = info.get('板块分布数', 0)
            new_dist = current_filtered_dict[stock_code]
            print(f"  ✗ {stock_code} ({info['名称']}) 现在需要过滤 旧分布:{old_dist}个 新分布:{new_dist}个")
            newly_filtered_count += 1
            unfiltered_stocks.append(stock_code)
    
    print(f"\n【状态变化汇总】:")
    if restored_count > 0:
        print(f"  - 共恢复股票: {restored_count} 个")
    if kept_filtered_count > 0:
        print(f"  - 维持过滤股票: {kept_filtered_count} 个")
    if newly_filtered_count > 0:
        print(f"  - 新增过滤股票: {newly_filtered_count} 个")
    else:
        print(f"  - 新增过滤股票: 0 个")
    if removed_count > 0:
        print(f"  - 已删除股票: {removed_count} 个")
    else:
        print(f"  - 维持未过滤股票: {len([k for k,v in previously_filtered_dict.items() if not v.get('是否被过滤', False) and k not in unfiltered_stocks]) if previously_filtered_dict else 0} 个")
    
    return restored_count, kept_filtered_count




def load_filtered_stocks(filtered_stocks_file):
    """加载之前被过滤的股票列表"""
    if filtered_stocks_file and filtered_stocks_file.exists():
        with open(filtered_stocks_file, 'r', encoding='utf-8') as f:
            return {line.strip(): 0 for line in f if line.strip()}  # 作为一个字典
    return {}


def save_filtered_stocks(filtered_stocks_dict, filtered_stocks_file):
    """保存被过滤的股票列表"""
    if filtered_stocks_file and filtered_stocks_dict:
        with open(filtered_stocks_file, 'w', encoding='utf-8') as f:
            for stock in sorted(filtered_stocks_dict.keys()):
                f.write(stock + '\n')


def merge_with_existing(df_new, df_existing, PRINTFlag=False, filtered_stocks_to_exclude=None, suppress_output=False):
    """将新数据与现有数据合并，支持新增数据、删除数据的同步更新。
    
    Args:
        df_new: 新数据（未过滤）
        df_existing: 现有数据（已过滤）
        PRINTFlag: 是否打印详细信息
        filtered_stocks_to_exclude: 要从新数据中排除的被过滤股票列表
        suppress_output: 是否抑制新增/删除数据的打印输出（当pkl和txt内容不对等时用于避免虚假信息）
    """
    # 第一步：与其是在之后执行过滤，不如df_new中被过滤的股票排除掉
    # 这样df_existing不涅包含这些被过滤的股票记录，新数据也不涅包含，对比就是公平的
    if filtered_stocks_to_exclude:
        print(f"  - 正从新数据中排除 {len(filtered_stocks_to_exclude)} 个被过滤股票的记录...")
        df_new_filtered = df_new[~df_new['成分股代码'].isin(filtered_stocks_to_exclude)].copy()
        print(f"  - 排除后新数据记录数: {len(df_new_filtered)}")
    else:
        df_new_filtered = df_new.copy()
    # 创建一个key列来识别唯一的（板块-股票）组合
    df_new_filtered['_key'] = df_new_filtered['板块名称'] + '|' + df_new_filtered['成分股代码']
    df_existing['_key'] = df_existing['板块名称'] + '|' + df_existing['成分股代码']
    
    # 找出新增的记录（在df_new_filtered中但不在df_existing中）
    existing_keys = set(df_existing['_key'].values)
    new_keys = set(df_new_filtered['_key'].values)
    added_keys = new_keys - existing_keys
    deleted_keys = existing_keys - new_keys
    
    if not suppress_output:
        print(f"  - 识别到 {len(added_keys)} 条新增记录")
        print(f"  - 识别到 {len(deleted_keys)} 条删除记录")
        
        # 打印新增的数据
        if len(added_keys) > 0:
            print("\n【新增数据明细】:")
            added_data = df_new_filtered[df_new_filtered['_key'].isin(added_keys)][['板块名称', '成分股代码', '成分股名称']]
            # 按板块名称分组打印
            for block_name in sorted(added_data['板块名称'].unique()):
                block_added = added_data[added_data['板块名称'] == block_name]
                print(f"  {block_name} ({len(block_added)} 条):")
                for idx, row in block_added.iterrows():
                    print(f"    + {row['成分股代码']:10} {row['成分股名称']}")
        
        # 打印删除的数据
        if len(deleted_keys) > 0:
            print("\n【删除数据明细】:")
            deleted_data = df_existing[df_existing['_key'].isin(deleted_keys)][['板块名称', '成分股代码', '成分股名称']]
            # 按板块名称分组打印
            for block_name in sorted(deleted_data['板块名称'].unique()):
                block_deleted = deleted_data[deleted_data['板块名称'] == block_name]
                print(f"  {block_name} ({len(block_deleted)} 条):")
                for idx, row in block_deleted.iterrows():
                    print(f"    - {row['成分股代码']:10} {row['成分股名称']}")
    
    # 为新增记录标记当天日期
    today = datetime.now().strftime('%Y%m%d')
    df_new_filtered.loc[df_new_filtered['_key'].isin(added_keys), '设立日期'] = today
    
    # 删除临时key列
    df_new_filtered = df_new_filtered.drop('_key', axis=1)
    df_existing = df_existing.drop('_key', axis=1)
    
    # 合并两个数据框
    df_combined = pd.concat([df_existing, df_new_filtered], ignore_index=True)
    
    # 去重，基于['板块名称', '成分股代码']组合
    # keep='first' 保留第一个出现的记录（即现有数据）
    df_deduped = df_combined.drop_duplicates(subset=['板块名称', '成分股代码'], keep='first').copy()
    
    # 删除那些在新数据中不存在的记录（同步删除操作）
    if len(deleted_keys) > 0:
        df_deduped['_temp_key'] = df_deduped['板块名称'] + '|' + df_deduped['成分股代码']
        df_deduped = df_deduped[~df_deduped['_temp_key'].isin(deleted_keys)]
        df_deduped = df_deduped.drop('_temp_key', axis=1)
    
    # 重新计算每个板块的股票个数
    stock_counts = df_deduped.groupby(['板块名称'])['成分股代码'].transform('count').astype(int)
    df_deduped.loc[:, '股票个数'] = stock_counts
    
    # 重新排列列顺序
    df_deduped = df_deduped[['板块名称', '成分股代码', '成分股名称', '股票代码', '股票个数', '设立日期']]
    
    # 按板块名称、设立日期排序（先按板块聚合，再按日期排序）
    df_deduped = df_deduped.sort_values(by=['板块名称', '设立日期'], ignore_index=True)
    
    return df_deduped


def check_block_coverage(df, coverage_threshold=0.8, exclude_blocks=None):
    """
    检查每个板块中的股票是否被其他板块广泛涵盖
    
    规则：
    - 对该板块的每只股票，检查是否被其他板块涵盖（出现在其他板块中）
    - 统计有多少个其他板块不包含该板块的股票
    - 覆盖率 = (包含该板块股票的其他板块数) / (总板块数 - 1)
    - 如果覆盖率 > coverage_threshold（默认0.8），则该板块缺乏特征，标记为过滤
    
    Args:
        df: 包含所有板块数据的DataFrame
        coverage_threshold: 覆盖率阈值，默认0.8（80%）
        exclude_blocks: 要排除的板块名称列表（用于忽略某些特定板块的检查）
    
    Returns:
        weak_blocks: 需要过滤的弱特征板块列表
        block_coverage_info: 每个板块的覆盖率详细信息
    """
    if len(df) == 0:
        return [], {}
    
    if exclude_blocks is None:
        exclude_blocks = []
    
    # 获取所有板块
    blocks = df['板块名称'].unique()
    blocks_to_check = [b for b in blocks if b not in exclude_blocks]
    total_blocks = len(blocks)  # 总板块数
    
    block_coverage_info = {}
    weak_blocks = []
    
    # 预计算: 构建板块名不培到股票集合的映射
    block_to_stocks = {}
    for block_name in blocks:
        block_to_stocks[block_name] = set(df[df['板块名称'] == block_name]['成分股代码'].unique())
    
    print(f"\n【板块覆盖率分析】")
    print(f"检查 {len(blocks_to_check)} 个板块中的股票是否被其他板块涵盖")
    if exclude_blocks:
        print(f"已排除 {len(exclude_blocks)} 个板块")
    print(f"总板块数: {total_blocks}")
    print(f"覆盖率阈值: {coverage_threshold*100:.1f}% - 超过此阈值的板块将被标记为弱特征板块\n")
    
    for block_name in sorted(blocks_to_check):
        # 获取当前板块的所有股票
        block_stocks = set(df[df['板块名称'] == block_name]['成分股代码'].unique())
        block_stock_count = len(block_stocks)
        
        if block_stock_count == 0:
            continue
        
        # 高效的实现：使用预计算的block_to_stocks会映射
        covering_blocks_count = 0
        for other_block_name, other_stocks in block_to_stocks.items():
            if other_block_name == block_name:
                continue
            # 检查是否有交集
            if len(block_stocks & other_stocks) > 0:
                covering_blocks_count += 1
        
        # 覆盖率 = 包含该板块股票的其他板块数 / 总板块数
        coverage_ratio = covering_blocks_count / total_blocks
        
        block_coverage_info[block_name] = {
            '总股票数': block_stock_count,
            '涵盖板块数': covering_blocks_count,
            '覆盖率': coverage_ratio,
            '是否弱特征': coverage_ratio > coverage_threshold
        }
        
        # 如果覆盖率超过阈值，标记为弱特征板块
        if coverage_ratio > coverage_threshold:
            weak_blocks.append(block_name)
            status = "❌ 弱特征板块（涵盖率过高）"
        else:
            status = "✓ 保留"
        
        print(f"  {block_name:20} 总股票: {block_stock_count:4} | 涵盖板块: {covering_blocks_count:4} | 覆盖率: {coverage_ratio*100:5.1f}% | {status}")
    
    if weak_blocks:
        print(f"\n【弱特征板块过滤】检测到 {len(weak_blocks)} 个覆盖率 > {coverage_threshold*100:.1f}% 的弱特征板块:")
        for block in weak_blocks:
            coverage = block_coverage_info[block]['覆盖率']
            print(f"  × {block} (覆盖率: {coverage*100:.1f}%)")
    else:
        print(f"\n【弱特征板块过滤】未发现覆盖率 > {coverage_threshold*100:.1f}% 的弱特征板块")
    
    return weak_blocks, block_coverage_info


def check_and_restore_stocks(pkl_df, previously_filtered, current_filtered):
    """检查之前被过滤的股票是否可以恢复
    
    Args:
        pkl_df: 当前 pkl 中的数据（未过滤）
        previously_filtered: 之前被过滤的股票字典
        current_filtered: 当前需要过滤的股票字典
    
    Returns:
        restored_list: 可恢复的股票列表
    """
    restored_list = []
    for stock in previously_filtered.keys():
        if stock not in current_filtered and stock in pkl_df['成分股代码'].values:
            # 这个股票已经被恢复到pkl中
            distribution = previously_filtered[stock]
            print(f"  ✓ {stock} 已恢复（原分布: {distribution} 个板块）")
            restored_list.append(stock)
    return restored_list


def main():
    # 数据文件路径
    base_path = Path('D:\\Quant\\quantdata\\tdxexport\\')
    # 如果目录不存在就创建，存在就跳过
    if not base_path.exists():
        try:
            base_path.mkdir(exist_ok=True)
            print(f"✓ 创建目录: {base_path}")
        except Exception as e:
            print(f"❌ 创建目录失败: {str(e)}")
            return False
    else:
        print(f"✓ 目录已存在: {base_path}")
    
    # 获取当天日期
    today_date = datetime.now().strftime('%Y-%m-%d')
    print(f"\n【当前日期】{today_date}")
    print(f"="*60)
    print("\n")
    
    # PKL文件路径：使用日期命名
    alldata_path = base_path / 'alldata'
    try:
        alldata_path.mkdir(exist_ok=True)
    except Exception as e:
        print(f"❌ 创建目录失败: {str(e)}")
        return False
    
    # 根据日期生成pkl和csv文件名
    pkl_filename = f'tdx_blocks_{today_date}.pkl'
    csv_filename = f'tdx_blocks_{today_date}.csv'
    pkl_path = alldata_path / pkl_filename
    csv_path = alldata_path / csv_filename
    
    print(f"\n输出文件：")
    print(f"  PKL: {pkl_filename}")
    print(f"  CSV: {csv_filename}")
    print(f"="*60)
    print("\n")
    
    tempdata = base_path / 'tempdata'
    try:
        tempdata.mkdir(exist_ok=True)
    except Exception as e:
        print(f"❌ 创建目录失败: {str(e)}")
        return False

    filtered_stocks_path = tempdata / 'filtered_stocks.txt'
    filter_info_path = tempdata / 'filterInfo.json'  # 新增filterInfo文件
    
    # 加载之前的过滤信息
    old_filter_info = load_filter_info(filter_info_path)
    if len(old_filter_info) > 0:
        print(f"缓存中找到 {len(old_filter_info)} 个之前过滤的股票信息")
    
    # 检查PKL文件是否存在
    if os.path.exists(pkl_path):
        print(f"检测到现有的 {pkl_filename} 文件，正在进行增量更新...")
        # 读取txt文件：优先从txt_archive中读取（这里保存了历史备份）
        df_new = process_all_files(base_path, use_today_date=False)
        
        # 读取现有pkl数据
        with open(pkl_path, 'rb') as f:
            df_existing = pickle.load(f)
        
        print(f"  - 现有数据记录数: {len(df_existing)}")
        print(f"  - 新数据记录数: {len(df_new)}")
        
        # 先合并数据（去重但还没过滤）
        # 注意：这里我们传递一个特殊参数suppress_new_added_message来抑制新增数据信息的打印
        # 因为pkl中存储的是过滤后的数据，而df_new是未过滤的，所以会显示虚假的新增记录
        df_combined = merge_with_existing(df_new, df_existing, suppress_output=True)

        print(f"  - 合并后数据记录数: {len(df_combined)}")
        
        # 对合并后的数据进行过滤
        print(f"\n【根据板块分布过滤】:")
        df_final, current_filtered = filter_by_block_distribution(df_combined)
        
        # 根据之前的过滤信息检查是否需要恢复或保持
        if len(old_filter_info) > 0:
            print(f"\n【检查之前过滤股票的状态变化】:")
            check_and_restore_with_filter_info(old_filter_info, current_filtered, df_combined)
    else:
        print(f"未检测到现有的 {pkl_filename} 文件，将创建新的数据文件...")
        # 如果PKL文件不存在，从txt_archive读取初始数据
        df_combined = process_all_files(base_path, use_today_date=False)
        
        # 对初始数据进行过滤
        print(f"\n【根据板块分布过滤】:")
        df_final, current_filtered = filter_by_block_distribution(df_combined)
    
    # 更新过滤信息文件
    print(f"\n【更新过滤信息】:")
    new_filter_info = update_filter_info(df_combined, current_filtered, old_filter_info)
    save_filter_info(new_filter_info, filter_info_path)
    print(f"  - 已保存 {len(new_filter_info)} 个股票过滤信息至 {filter_info_path}")
    
    # 检查板块覆盖率：基于原始数据（df_combined）计算覆盖率，找出弱特征板块
    # 这个操作使用原始未过滤的数据来计算，确保覆盖率计算不受高频过滤的影响
    print(f"\n【板块覆盖率分析（基于原始数据）】:")
    weak_blocks, block_coverage_info = check_block_coverage(df_combined, coverage_threshold=0.8, exclude_blocks=None)
    
    # 在高频过滤后的结果上，进一步过滤掉整个弱特征板块
    if weak_blocks:
        print(f"\n【弱特征板块过滤】过滤掉 {len(weak_blocks)} 个弱特征板块...")
        before_count = len(df_final)
        df_final = df_final[~df_final['板块名称'].isin(weak_blocks)].copy()
        after_count = len(df_final)
        filtered_count = before_count - after_count
        print(f"  - 过滤前记录数: {before_count}")
        print(f"  - 过滤后记录数: {after_count}")
        print(f"  - 已过滤记录数: {filtered_count}")
    
    # 过滤股票数量太少的板块（执行顺序在覆盖率排除之后，手动排除之前）
    df_final, stock_count_excluded = filter_by_stock_count(df_final, min_count=MIN_STOCK_COUNT)
    
    # 应用手动排除板块（执行顺序在覆盖率排除之后）
    if MANUAL_EXCLUDE_BLOCKS:
        print(f"\n【手动排除板块】检测到 {len(MANUAL_EXCLUDE_BLOCKS)} 个需要排除的板块")
        df_final, _ = filter_by_manual_exclude(df_final, MANUAL_EXCLUDE_BLOCKS)
    
    # 更新股票个数字段（根据过滤后的实际数据重新统计）
    df_final = update_stock_count(df_final)
    
    # 显示统计信息
    print(f'\n【最终统计信息】:')
    print(f'  - 总记录数: {len(df_final)}')
    print(f'  - 独特板块数: {df_final["板块名称"].nunique()}')
    print(f'  - 独特股票数: {df_final["股票代码"].nunique()}')
    print(f'  - 已过滤股票: {len(current_filtered)}')
    
    # 保存为 pkl 文件
    with open(pkl_path, 'wb') as f:
        pickle.dump(df_final, f)
    print(f'\n✅ 已保存 PKL 文件: {pkl_path}')
    
    # 保存为 csv 文件
    df_final.to_csv(csv_path, index=False, encoding='utf-8-sig')
    print(f'✅ 已保存 CSV 文件: {csv_path}')
    
    # 显示样本数据
    print('\n样本数据（前10行）:')
    print(df_final.head(10))
    
    print(f"\n{'='*60}")
    print(f"【处理完成】")
    print(f"日期: {today_date}")
    print(f"输出文件: {pkl_filename}, {csv_filename}")
    print(f"输出位置: {alldata_path}")
    print(f"{'='*60}")
    
    return True


if __name__ == '__main__':
    main()