# 导入必要的库
import json
import numpy as np
import math
import time
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from colorama import Fore, Style, init
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from xgboost import plot_importance
from xgboost.sklearn import XGBClassifier

# 初始化颜色输出
init(autoreset=True)

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


# 定义分词器（字符级分词）
def tokenizer(x):
    """将字符串拆分为单个字符"""
    return [y for y in x]


# 计算余弦相似度
def cos_dist(a, b):
    """计算两个向量的余弦相似度"""
    if len(a) != len(b):  # 确保向量长度相同
        return 0.0

    part_up = 0.0  # 分子部分（点积）
    a_sq = 0.0  # 向量a的平方和
    b_sq = 0.0  # 向量b的平方和

    # 遍历向量元素计算点积和平方和
    for a1, b1 in zip(a, b):
        part_up += a1 * b1
        a_sq += a1 ** 2
        b_sq += b1 ** 2

    part_down = math.sqrt(a_sq * b_sq)  # 分母部分（模的乘积）
    if part_down == 0.0:  # 避免除以零
        return 0.0
    else:
        return part_up / part_down  # 返回余弦相似度


# 统计词频
def count_word(word, token):
    """统计单词在token列表中出现的次数"""
    return sum(1 for i in token if i == word)


# 构建临时词汇表
def temp_vocab(query1, query2):
    """为两个查询构建联合词汇表并计算词频"""
    token1 = tokenizer(query1)  # 查询1分词
    token2 = tokenizer(query2)  # 查询2分词
    token1set = set(token1)  # 转为集合去重
    token2set = set(token2)
    token = token1set.union(token2set)  # 取并集作为词汇表

    # 初始化词频字典
    vocab_dic1 = {}  # 查询1的词频
    vocab_dic2 = {}  # 查询2的词频

    # 计算每个词在查询1中的频率
    for word in token:
        vocab_dic1[word] = count_word(word, token1)

    # 计算每个词在查询2中的频率
    for word in token:
        vocab_dic2[word] = count_word(word, token2)

    return vocab_dic1, vocab_dic2


# 计算Jaccard相似度
def get_jaccard(a, b):
    """计算两个token列表的Jaccard相似度"""
    set_a = set(a)
    set_b = set(b)

    # 计算交集和并集
    intersection = set_a & set_b
    union = set_a | set_b

    # 返回交集大小和Jaccard相似度
    return len(intersection), len(intersection) / len(union) if union else 0.0


# 加载数据集并提取特征
def load_dataset(path, dataset_type="训练集"):
    """从JSON文件加载数据并提取特征"""
    print(Fore.CYAN + f"📂 加载{dataset_type}数据: {path}")
    start_time = time.time()

    contents = []
    with open(path, 'r', encoding='UTF-8') as input_data:
        try:
            json_content = json.load(input_data)  # 加载JSON数据
        except json.JSONDecodeError as e:
            print(Fore.RED + f"❌ JSON解析错误: {e}")
            return np.array([]), np.array([]) if dataset_type != "测试集" else np.array([])

        # 初始化存储列表
        labels = []  # 标签
        jacs = []  # Jaccard相似度
        coss = []  # 余弦相似度
        counts = []  # 共同字符数（交集大小）
        query_lengths = []  # 查询长度特征

        # 进度条配置
        progress_bar = tqdm(
            total=len(json_content),
            bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
            desc=f"{Fore.YELLOW}🔍 提取{dataset_type}特征",
            unit="样本"
        )

        # 处理每个数据块
        for i, block in enumerate(json_content):
            query1 = block.get('query1', '')  # 第一个查询，使用get避免KeyError
            query2 = block.get('query2', '')  # 第二个查询

            # 获取标签并处理异常
            label = block.get('label', None)
            if label is not None:
                try:
                    # 处理可能的字符串标签
                    label_val = int(label) if isinstance(label, (int, float, str)) and str(label).isdigit() else None
                    if label_val is None:
                        print(Fore.YELLOW + f"⚠️ 警告: 样本 {i} 有无效标签 '{label}', 使用默认值0")
                        label_val = 0
                except (ValueError, TypeError):
                    print(Fore.YELLOW + f"⚠️ 警告: 样本 {i} 有无效标签 '{label}', 使用默认值0")
                    label_val = 0

            # 构建词汇表并获取词频
            vocab1, vocab2 = temp_vocab(query1, query2)

            # 将词频字典转换为向量
            words_line1 = list(vocab1.values())
            words_line2 = list(vocab2.values())

            # 分词并计算Jaccard
            token1 = tokenizer(query1)
            token2 = tokenizer(query2)
            count, jac = get_jaccard(token1, token2)  # 获取交集大小和Jaccard

            # 计算余弦相似度
            cos = cos_dist(words_line1, words_line2)

            # 添加查询长度特征
            query_lengths.append((len(query1) + len(query2)) / 2)  # 平均长度

            # 存储特征
            coss.append(cos)
            counts.append(count)
            jacs.append(jac)

            # 如果是训练集或验证集，存储标签
            if dataset_type != "测试集" and label is not None:
                labels.append(label_val)

            # 更新进度条
            progress_bar.set_postfix({
                "特征": f"余弦:{cos:.2f} Jaccard:{jac:.2f} 共同字符:{count}"
            })
            progress_bar.update(1)

        progress_bar.close()

    # 将特征组合为数组并转置（每行一个样本）
    features = np.array((coss, counts, jacs, query_lengths)).T

    # 性能统计
    end_time = time.time()
    duration = end_time - start_time
    samples_per_sec = len(json_content) / duration if duration > 0 else 0

    print(Fore.GREEN + f"✅ {dataset_type}特征提取完成! 样本数: {len(json_content)}")
    print(Fore.BLUE + f"  耗时: {duration:.2f}秒 | 速度: {samples_per_sec:.2f}样本/秒")

    if dataset_type != "测试集":
        return features, np.array(labels)
    return features


# 可视化特征分布
def visualize_features(features, labels, title="特征分布"):
    """绘制特征分布图"""
    print(Fore.CYAN + f"\n📊 可视化特征分布: {title}")

    # 创建子图
    plt.figure(figsize=(15, 10))

    # 特征名称
    feature_names = ["余弦相似度", "共同字符数", "Jaccard相似度", "平均查询长度"]

    # 绘制每个特征的分布
    for i, name in enumerate(feature_names):
        plt.subplot(2, 2, i + 1)
        for label in np.unique(labels):
            # 确保有足够的数据点
            if np.sum(labels == label) > 1:
                sns.kdeplot(features[labels == label, i], label=f"标签 {label}", fill=True)
        plt.title(f"{name}分布")
        plt.xlabel(name)
        plt.ylabel("密度")
        plt.legend()

    plt.tight_layout()
    try:
        plt.savefig(f"feature_distribution_{title}.png")
        print(Fore.GREEN + f"✅ 特征分布图已保存为: feature_distribution_{title}.png")
    except Exception as e:
        print(Fore.RED + f"❌ 保存特征分布图失败: {e}")


# 训练XGBoost模型
def train_xgboost(features, labels):
    """训练XGBoost模型并返回训练好的分类器"""
    print(Fore.CYAN + "\n" + "=" * 70)
    print(Fore.YELLOW + "🧠 开始训练XGBoost模型")
    print(Fore.CYAN + "=" * 70)

    start_time = time.time()

    # 定义并训练XGBoost模型
    clf = XGBClassifier(
        colsample_bytree=0.8,  # 每棵树使用的特征比例
        learning_rate=0.1,  # 学习率
        max_depth=5,  # 树的最大深度
        subsample=1,  # 样本采样比例
        n_estimators=100,  # 树的数量
        eval_metric='mlogloss',  # 评估指标
        use_label_encoder=False,
        random_state=42  # 随机种子
    )

    print(Fore.BLUE + "🔄 正在训练模型...")
    clf.fit(features, labels)  # 训练模型

    # 训练时间统计
    end_time = time.time()
    duration = end_time - start_time

    print(Fore.GREEN + f"✅ 模型训练完成! 耗时: {duration:.2f}秒")
    print(Fore.BLUE + f"  树的数量: {clf.n_estimators}")
    print(Fore.BLUE + f"  最大深度: {clf.max_depth}")

    # 特征重要性可视化
    plt.figure(figsize=(10, 6))
    try:
        plot_importance(clf, max_num_features=10, importance_type='weight')
        plt.title("特征重要性")
        plt.tight_layout()
        plt.savefig("feature_importance.png")
        print(Fore.GREEN + f"✅ 特征重要性图已保存为: feature_importance.png")
    except Exception as e:
        print(Fore.RED + f"❌ 特征重要性可视化失败: {e}")

    return clf


# 评估模型性能
def evaluate_model(clf, features, labels, dataset_name="验证集"):
    """评估模型性能并生成报告"""
    print(Fore.CYAN + f"\n📊 评估模型在{dataset_name}上的性能")

    start_time = time.time()

    # 预测
    predict = clf.predict(features)

    # 计算准确率
    acc = accuracy_score(labels, predict)

    # 生成分类报告
    report = classification_report(labels, predict, target_names=['标签0', '标签1', '标签2'], zero_division=0)

    # 生成混淆矩阵
    cm = confusion_matrix(labels, predict)

    # 评估时间统计
    end_time = time.time()
    duration = end_time - start_time

    print(Fore.GREEN + f"✅ {dataset_name}评估完成! 耗时: {duration:.2f}秒")
    print(Fore.BLUE + f"  {dataset_name}准确率: {acc:.4f}")
    print(Fore.CYAN + "\n分类报告:")
    print(report)

    # 可视化混淆矩阵
    plt.figure(figsize=(8, 6))
    try:
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                    xticklabels=['预测0', '预测1', '预测2'],
                    yticklabels=['真实0', '真实1', '真实2'])
        plt.title(f"{dataset_name}混淆矩阵")
        plt.ylabel('真实标签')
        plt.xlabel('预测标签')
        plt.tight_layout()
        plt.savefig(f"confusion_matrix_{dataset_name}.png")
        print(Fore.GREEN + f"✅ 混淆矩阵已保存为: confusion_matrix_{dataset_name}.png")
    except Exception as e:
        print(Fore.RED + f"❌ 混淆矩阵可视化失败: {e}")

    return acc


# 主函数
def main():
    print(Fore.CYAN + "=" * 70)
    print(Fore.YELLOW + "🌟 基于字符特征的XGBoost查询相似度分类器")
    print(Fore.CYAN + "=" * 70)

    # 文件路径
    train_path = '../data/KUAKE/KUAKE-QQR_train.json'
    dev_path = '../data/KUAKE/KUAKE-QQR_dev.json'
    test_path = '../data/KUAKE/KUAKE-QQR_test.json'
    output_path = '../prediction_result/KUAKE-QQR_test_pred_jac.json'

    # 1. 加载并处理训练数据
    train_features, train_labels = load_dataset(train_path, "训练集")

    # 检查训练数据是否有效
    if len(train_features) == 0 or len(train_labels) == 0:
        print(Fore.RED + "❌ 错误: 训练数据加载失败，程序终止")
        return

    # 可视化训练集特征分布
    visualize_features(train_features, train_labels, "训练集")

    # 2. 训练XGBoost模型
    clf = train_xgboost(train_features, train_labels)

    # 3. 加载并处理验证数据
    dev_features, dev_labels = load_dataset(dev_path, "验证集")

    # 检查验证数据是否有效
    if len(dev_features) > 0 and len(dev_labels) > 0:
        # 评估验证集性能
        dev_acc = evaluate_model(clf, dev_features, dev_labels, "验证集")
    else:
        print(Fore.YELLOW + "⚠️ 警告: 验证数据加载失败，跳过验证评估")
        dev_acc = 0.0

    # 4. 处理测试集并输出预测结果
    print(Fore.CYAN + "\n" + "=" * 70)
    print(Fore.YELLOW + "🔮 开始测试集预测")
    print(Fore.CYAN + "=" * 70)

    # 加载测试数据
    test_features = load_dataset(test_path, "测试集")

    # 检查测试数据是否有效
    if len(test_features) == 0:
        print(Fore.RED + "❌ 错误: 测试数据加载失败，程序终止")
        return

    # 预测测试集标签
    test_predictions = clf.predict(test_features)

    # 保存预测结果
    print(Fore.MAGENTA + f"\n💾 保存预测结果到: {output_path}")

    try:
        with open(test_path, 'r', encoding='UTF-8') as input_data, \
                open(output_path, 'w', encoding='UTF-8') as output_data:

            json_content = json.load(input_data)  # 加载测试数据

            # 进度条配置
            progress_bar = tqdm(
                total=len(json_content),
                bar_format=f"{Fore.GREEN}{{l_bar}}{Fore.BLUE}{{bar}}{Fore.RESET}{{r_bar}}",
                desc=f"{Fore.YELLOW}📝 写入预测结果",
                unit="样本"
            )

            # 更新每个样本的标签
            for i, block in enumerate(json_content):
                # 确保索引在预测结果范围内
                if i < len(test_predictions):
                    block['label'] = str(test_predictions[i])
                else:
                    block['label'] = "0"  # 默认值
                progress_bar.update(1)

            progress_bar.close()

            # 保存JSON文件
            json.dump(json_content, output_data, indent=2, ensure_ascii=False)
            print(Fore.GREEN + f"✅ 预测结果保存成功! 样本数: {len(json_content)}")
    except Exception as e:
        print(Fore.RED + f"❌ 保存预测结果失败: {e}")

    print(Fore.CYAN + "=" * 70)
    print(Fore.YELLOW + f"✨ 所有操作完成!")
    print(Fore.CYAN + "=" * 70)


if __name__ == "__main__":
    main()