import pandas as pd
import numpy as np
import time
from typing import List, Tuple, Dict, Any
from tqdm import tqdm
from colorama import Fore, Style, init

# 初始化颜色输出
init(autoreset=True)


def list_to_dataframe(datalist: List[List[Any]]) -> pd.DataFrame:
    """
    将数据列表转换为DataFrame

    参数:
        datalist: 数据列表，每个元素为 [query1, query2, label] 格式

    返回:
        pandas DataFrame 对象，包含三列: query1, query2, label
    """
    if not datalist:
        print(Fore.YELLOW + "⚠️ 警告: 输入数据为空")
        return pd.DataFrame({'query1': [], 'query2': [], 'label': []})

    question1 = []
    question2 = []
    label = []
    invalid_rows = 0

    print(Fore.CYAN + "🔄 转换列表到DataFrame...")

    # 使用进度条处理数据
    with tqdm(total=len(datalist),
              bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
              desc=f"{Fore.YELLOW}🔧 数据处理进度") as pbar:

        for row in datalist:
            # 数据验证
            if len(row) != 3:
                print(Fore.YELLOW + f"⚠️ 警告: 格式不正确的行: {row}")
                invalid_rows += 1
                pbar.update(1)
                continue

            question1.append(row[0])
            question2.append(row[1])

            # 标签转换
            try:
                label.append(int(row[2]))
            except ValueError:
                print(Fore.YELLOW + f"⚠️ 警告: 标签转换失败: {row}")
                label.append(0)
                invalid_rows += 1

            pbar.update(1)

    # 创建DataFrame
    df = pd.DataFrame({'query1': question1, 'query2': question2, 'label': label})

    print(Fore.GREEN + f"✅ 转换完成! 有效行: {len(df)} | 无效行: {invalid_rows}")
    return df


def dataframe_to_list(dataframe: pd.DataFrame) -> List[List[Any]]:
    """
    将DataFrame转换为数据列表

    参数:
        dataframe: pandas DataFrame 对象，包含 query1, query2, label 三列

    返回:
        数据列表，每个元素为 [query1, query2, label] 格式
    """
    if dataframe.empty:
        print(Fore.YELLOW + "⚠️ 警告: DataFrame为空")
        return []

    data_list = []
    print(Fore.CYAN + "🔄 转换DataFrame到列表...")

    # 使用进度条处理数据
    with tqdm(total=len(dataframe),
              bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
              desc=f"{Fore.YELLOW}🔧 数据处理进度") as pbar:

        for idx, row in dataframe.iterrows():
            # 确保标签为整数类型
            try:
                row_list = [row['query1'], row['query2'], int(row['label'])]
                data_list.append(row_list)
            except Exception as e:
                print(Fore.YELLOW + f"⚠️ 转换错误: 行 {idx} - {e}")

            pbar.update(1)

    print(Fore.GREEN + f"✅ 转换完成! 转换行数: {len(data_list)}")
    return data_list


def sentence_set_pair(train_examples: List[List[Any]],
                      file_name: str = None,
                      random_state: int = 20) -> List[List[Any]]:
    """
    基于句子集合生成增强的问答对数据

    增强策略:
    1. 类别间负样本生成: 相同 vs 不同
    2. 相似样本生成: 相同 vs 相似
    3. 类别内正样本生成: 相同 vs 相同

    参数:
        train_examples: 原始训练数据列表
        file_name: 保存增强数据的文件路径
        random_state: 随机数种子

    返回:
        增强后的训练数据列表
    """
    start_time = time.time()

    print(Fore.CYAN + "=" * 70)
    print(Fore.YELLOW + "🚀 开始数据增强处理")
    print(Fore.CYAN + "=" * 70)

    # 初始数据检查
    if not train_examples:
        print(Fore.RED + "❌ 错误: 输入数据为空")
        return []

    print(Fore.BLUE + f"📊 原始数据量: {len(train_examples)} 条")

    # 转换为DataFrame
    df_train = list_to_dataframe(train_examples)

    # 数据验证
    if 'label' not in df_train.columns:
        print(Fore.RED + "❌ 错误: 数据中缺少'label'列")
        return []

    valid_labels = {0, 1, 2}
    unique_labels = set(df_train['label'].unique())
    invalid_labels = unique_labels - valid_labels

    if invalid_labels:
        print(Fore.YELLOW + f"⚠️ 警告: 发现无效标签: {invalid_labels}")
        df_train = df_train[df_train['label'].isin(valid_labels)]
        print(Fore.BLUE + f"  清理后数据量: {len(df_train)} 条")

    # 标签分布统计
    label_counts = df_train['label'].value_counts()
    print(Fore.MAGENTA + "\n📊 原始数据标签分布:")
    print(Fore.CYAN + f"  标签0(不同): {label_counts.get(0, 0)} 条")
    print(Fore.CYAN + f"  标签1(相似): {label_counts.get(1, 0)} 条")
    print(Fore.CYAN + f"  标签2(相同): {label_counts.get(2, 0)} 条")

    # 获取唯一query1列表
    query_1_list = list(np.unique(df_train['query1']))
    print(Fore.BLUE + f"\n🔍 唯一query1数量: {len(query_1_list)}")

    # 初始化新样本列表
    questions1 = []
    questions2 = []
    labels = []

    # 初始化策略计数器
    strategy_counts = {
        "类别间负样本": 0,
        "相似样本": 0,
        "类别内正样本": 0
    }

    # 进度条配置
    progress_bar = tqdm(
        total=len(query_1_list),
        bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
        desc=f"{Fore.YELLOW}🚀 数据增强进度",
        unit="query"
    )

    # 遍历每个query1构建句子集合
    for query_tag in query_1_list:
        df_query = df_train[df_train['query1'] == query_tag]

        # 按标签分组
        query_same_set = df_query[df_query['label'] == 2]['query2'].tolist()
        query_sim_set = df_query[df_query['label'] == 1]['query2'].tolist()
        query_diff_set = df_query[df_query['label'] == 0]['query2'].tolist()

        # 策略1: 类别间负样本生成 (相同 vs 不同)
        if query_same_set and query_diff_set:
            for query_1 in query_same_set:
                for query_2 in query_diff_set:
                    questions1.append(query_1)
                    questions2.append(query_2)
                    labels.append(0)
                    strategy_counts["类别间负样本"] += 1

        # 策略2: 相似样本生成 (相同 vs 相似)
        if query_same_set and query_sim_set:
            for query_1 in query_same_set:
                for query_2 in query_sim_set:
                    questions1.append(query_1)
                    questions2.append(query_2)
                    labels.append(1)
                    strategy_counts["相似样本"] += 1

        # 策略3: 类别内正样本生成 (相同 vs 相同)
        if len(query_same_set) >= 2:
            for i in range(len(query_same_set) - 1):
                for j in range(i + 1, len(query_same_set)):
                    questions1.append(query_same_set[i])
                    questions2.append(query_same_set[j])
                    labels.append(2)
                    strategy_counts["类别内正样本"] += 1

        # 更新进度条
        progress_bar.set_postfix({
            "新样本": f"{len(labels)}",
            "相同集": f"{len(query_same_set)}",
            "相似集": f"{len(query_sim_set)}",
            "不同集": f"{len(query_diff_set)}"
        })
        progress_bar.update(1)

    progress_bar.close()

    # 创建新DataFrame
    new_df = pd.DataFrame({'query1': questions1, 'query2': questions2, 'label': labels})

    if new_df.empty:
        print(Fore.RED + "❌ 错误: 未能生成任何增强样本")
        return []

    # 打印策略统计
    print(Fore.MAGENTA + "\n📊 增强策略统计:")
    for strategy, count in strategy_counts.items():
        print(Fore.CYAN + f"  {strategy}: {count} 条")

    print(Fore.BLUE + f"  生成样本总数: {len(new_df)} 条")

    # 采样策略
    print(Fore.MAGENTA + "\n⚖️ 开始样本采样...")

    # 按标签分组
    df_positive = new_df[new_df['label'] == 2]
    df_negative = new_df[new_df['label'] == 0]
    df_similar = new_df[new_df['label'] == 1]

    # 采样比例
    sample_fracs = {
        2: 0.3,  # 正样本采样30%
        0: 0.1,  # 负样本采样10%
        1: 0.15  # 相似样本采样15%
    }

    print(Fore.CYAN + f"  正样本采样比例: {sample_fracs[2] * 100}%")
    print(Fore.CYAN + f"  负样本采样比例: {sample_fracs[0] * 100}%")
    print(Fore.CYAN + f"  相似样本采样比例: {sample_fracs[1] * 100}%")

    # 执行采样
    sampled_dfs = []
    for label_value, frac in sample_fracs.items():
        df_group = new_df[new_df['label'] == label_value]
        if not df_group.empty:
            sampled = df_group.sample(frac=frac, random_state=random_state)
            sampled_dfs.append(sampled)
            print(Fore.BLUE + f"  标签{label_value}: 原始 {len(df_group)} → 采样 {len(sampled)}")

    # 合并采样结果
    new_df_sampled = pd.concat(sampled_dfs, ignore_index=True) if sampled_dfs else pd.DataFrame()

    if new_df_sampled.empty:
        print(Fore.RED + "❌ 错误: 采样后无有效数据")
        return []

    # 采样后标签分布
    sampled_counts = new_df_sampled['label'].value_counts()
    print(Fore.MAGENTA + "\n📊 采样后标签分布:")
    print(Fore.CYAN + f"  标签0(不同): {sampled_counts.get(0, 0)} 条")
    print(Fore.CYAN + f"  标签1(相似): {sampled_counts.get(1, 0)} 条")
    print(Fore.CYAN + f"  标签2(相同): {sampled_counts.get(2, 0)} 条")

    # 保存文件
    if file_name:
        try:
            print(Fore.MAGENTA + f"\n💾 保存增强数据到: {file_name}")
            new_df_sampled.to_csv(file_name, index=False)
            print(Fore.GREEN + f"✅ 文件保存成功! 样本数: {len(new_df_sampled)}")
        except Exception as e:
            print(Fore.RED + f"❌ 保存文件失败: {e}")

    # 转换为列表格式
    result_list = dataframe_to_list(new_df_sampled)

    # 性能统计
    end_time = time.time()
    duration = end_time - start_time
    samples_per_sec = len(result_list) / duration if duration > 0 else 0

    print(Fore.CYAN + "\n⏱️ 性能统计:")
    print(Fore.BLUE + f"  总耗时: {duration:.2f} 秒")
    print(Fore.BLUE + f"  处理速度: {samples_per_sec:.2f} 样本/秒")
    print(Fore.BLUE + f"  原始样本数: {len(train_examples)}")
    print(Fore.BLUE + f"  增强样本数: {len(result_list)}")
    print(Fore.BLUE + f"  增强比例: {len(result_list) / len(train_examples) * 100:.2f}%")

    print(Fore.CYAN + "=" * 70)
    print(Fore.YELLOW + f"✨ 数据增强完成! 生成 {len(result_list)} 条新样本")
    print(Fore.CYAN + "=" * 70)

    return result_list


if __name__ == '__main__':
    try:
        # 注意: 这里需要确保read_csv函数正确实现
        from utils import read_csv

        data_path = '../data/KUAKE/KUAKE-QQR_train.json'
        augment_save_path = '../data/KUAKE/KUAKE-QQR_augment.csv'

        print(Fore.CYAN + "=" * 70)
        print(Fore.YELLOW + f"📂 加载数据: {data_path}")

        train_examples = read_csv(data_path)

        if not train_examples:
            print(Fore.RED + "❌ 错误: 加载的数据为空")
        else:
            print(Fore.GREEN + f"✅ 数据加载成功! 样本数: {len(train_examples)}")
            result = sentence_set_pair(train_examples, augment_save_path)

            if result:
                print(Fore.GREEN + f"🎉 增强数据生成完成! 新样本数: {len(result)}")
            else:
                print(Fore.RED + "❌ 增强数据生成失败")
    except Exception as e:
        print(Fore.RED + f"❌ 程序运行出错: {e}")
        import traceback

        traceback.print_exc()