import os
from collections import Counter
from datetime import datetime

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, BertModel
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, mean_absolute_error, matthews_corrcoef
from sklearn.model_selection import KFold, StratifiedKFold
from torch.utils.tensorboard import SummaryWriter
import seaborn as sns
import matplotlib.pyplot as plt

# 初始化 TensorBoard 记录器
writer = SummaryWriter(log_dir="runs/experiment_1")


def load_bert_model(bert_model_path):
    """
    加载预训练的BERT模型和分词器。

    :param bert_model_path: str, BERT模型权重的路径
    :return: tokenizer 和 bert_model，返回加载的BERT分词器和模型
    """
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    bert_model = BertModel.from_pretrained(bert_model_path)
    return tokenizer, bert_model

def load_and_process_data(text_csv_path, video_features_dir, tokenizer, bert_model,cache_path="data/cmu_mosei/tensor", max_video_feature_dim=1463,name='name'):
    """
    加载并对齐文本数据和视频特征数据，返回处理后的张量（带进度条版本）

    :param text_csv_path: str, 文本CSV文件的路径，包含 'name', 'english', 'label' 列
    :param video_features_dir: str, 视频特征文件的目录路径
    :param tokenizer: BERT分词器，用于文本的编码
    :param bert_model: 加载的BERT模型，用于提取文本特征
    :param max_video_feature_dim: int, 视频特征的最大维度，默认为 1464
    :return: video_features, text_features, labels，返回视频特征、文本特征和标签的张量
    """
    # 加载文本数据
    df_text = pd.read_csv(text_csv_path)
    aligned_features = []

    # 定义固定缓存路径
    cache_files = {
        'video': f'{cache_path}/video_cache.pt',
        'text': f'{cache_path}/text_cache_{name}.pt',
        'labels': f'{cache_path}/labels_cache_{name}.pt'
    }
    print(cache_files['video'])

    # 如果缓存存在直接加载
    if all(os.path.exists(f) for f in cache_files.values()):
        print("直接加载缓存数据")
        return (
            torch.load(cache_files['video']),
            torch.load(cache_files['text']),
            torch.load(cache_files['labels'])
        )

    # 初始化进度条
    with tqdm(total=len(df_text),
              desc="🚀 处理多模态数据",
              unit="样本",
              bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]") as pbar:

        for _, row in df_text.iterrows():
            sample_name = row['name']
            english_text = row['english']
            label = row['label']

            # 更新进度条基础信息
            pbar.set_postfix_str(f"处理中: {sample_name[:12]}...", refresh=False)

            # 跳过空文本
            if pd.isna(english_text):
                pbar.set_postfix_str(f"跳过空文本: {sample_name[:12]}...", refresh=True)
                pbar.update(1)
                continue

            # 获取视频特征文件路径
            video_feature_file = os.path.join(video_features_dir, f"{sample_name}_features.csv")
            if not os.path.exists(video_feature_file):
                pbar.set_postfix_str(f"缺失视频文件: {sample_name[:12]}...", refresh=True)
                pbar.update(1)
                continue

            try:
                # ================= 视频特征处理 =================
                video_feature = pd.read_csv(video_feature_file).select_dtypes(include=['float', 'int']).fillna(0)
                video_feature_tensor = torch.tensor(video_feature.values, dtype=torch.float32)

                # 显示维度变化信息
                dim_info = f"视频维度: {video_feature_tensor.shape} → ({video_feature_tensor.shape[0]}, {max_video_feature_dim})"
                pbar.set_postfix_str(f"{dim_info} | 样本: {sample_name[:12]}...", refresh=False)

                # 特征维度对齐
                if video_feature_tensor.shape[1] < max_video_feature_dim:
                    padding = max_video_feature_dim - video_feature_tensor.shape[1]
                    video_feature_tensor = torch.nn.functional.pad(video_feature_tensor, (0, padding))
                elif video_feature_tensor.shape[1] > max_video_feature_dim:
                    video_feature_tensor = video_feature_tensor[:, :max_video_feature_dim]

                # ================= 文本特征处理 =================
                inputs = tokenizer(english_text,
                                   return_tensors='pt',
                                   padding='max_length',
                                   truncation=True,
                                   max_length=512)
                with torch.no_grad():
                    outputs = bert_model(**inputs)
                text_embedding = outputs.last_hidden_state[:, 0, :]  # 使用[CLS] token

                # ================= 数据对齐 =================
                aligned_features.append((
                    video_feature_tensor.mean(dim=0),  # 视频特征时序平均
                    text_embedding.squeeze(0),  # 文本特征压缩维度
                    label
                ))

                # 更新成功计数
                pbar.set_postfix_str(f"已处理: {sample_name[:12]}... ✅", refresh=True)

            except Exception as e:
                pbar.set_postfix_str(f"处理失败: {sample_name[:12]}... ❌ ({str(e)})", refresh=True)
            finally:
                pbar.update(1)

    # 数据验证
    if not aligned_features:
        raise ValueError("💥 严重错误：未找到任何有效数据！请检查：\n"
                         "1. 文件路径是否正确\n"
                         "2. 特征文件命名是否规范\n"
                         "3. 数据内容是否完整")

    # 数据打包
    video_features = torch.stack([f[0] for f in aligned_features])
    text_features = torch.stack([f[1] for f in aligned_features])
    labels = torch.tensor([f[2] for f in aligned_features])

    # 保存处理结果
    print("保存处理结果")
    torch.save(video_features, cache_files['video'])
    torch.save(text_features, cache_files['text'])
    torch.save(labels, cache_files['labels'])

    # 使用tqdm的打印方法避免进度条错乱
    tqdm.write("\n" + "=" * 50)
    tqdm.write(f"📊 最终数据集统计：")
    tqdm.write(f"  视频特征维度：{video_features.shape}")
    tqdm.write(f"  文本特征维度：{text_features.shape}")
    tqdm.write(f"  标签数量：{len(labels)}")
    tqdm.write("=" * 50)

    return video_features, text_features, labels


def build_model(visual_dim, text_dim, num_classes):
    """
    构建多模态情感分析模型。

    :param visual_dim: int, 视频特征的维度
    :param text_dim: int, 文本特征的维度
    :param num_classes: int, 分类任务的类别数量
    :return: model，返回构建好的模型
    """

    class MultiModalEmotionModel(nn.Module):
        def __init__(self, visual_dim, text_dim, num_classes):
            super(MultiModalEmotionModel, self).__init__()
            # 视觉分支
            self.visual_branch = nn.Sequential(
                nn.Linear(visual_dim, 128),
                nn.ReLU(),
                nn.Dropout(0.3),
            )
            # 文本分支
            self.text_branch = nn.Sequential(
                nn.Linear(text_dim, 128),
                nn.ReLU(),
                nn.Dropout(0.3),
            )
            # 融合分支
            self.fusion = nn.Sequential(
                nn.Linear(128 * 2, 64),
                nn.ReLU(),
                nn.Dropout(0.3),
                nn.Linear(64, num_classes)
            )

        def forward(self, visual_input, text_input):
            visual_out = self.visual_branch(visual_input)
            text_out = self.text_branch(text_input)
            fused = torch.cat((visual_out, text_out), dim=1)
            return self.fusion(fused)

    return MultiModalEmotionModel(visual_dim, text_dim, num_classes)


def train_and_log(model, video_features, text_features, labels, num_classes=6, lr=0.001, epochs=300, val_data=None):
    """
    训练模型并记录训练过程的指标，包括Loss，准确率等。

    :param model: nn.Module, 待训练的模型
    :param video_features: torch.Tensor, 视频特征张量
    :param text_features: torch.Tensor, 文本特征张量
    :param labels: torch.Tensor, 标签张量
    :param num_classes: int, 分类任务的类别数量
    :param lr: float, 学习率
    :param epochs: int, 训练轮数
    :param val_data: tuple, 验证集数据 (val_video_features, val_text_features, val_labels)
    :return: model，训练后的模型
    """

    # 生成带时间戳的日志目录，例如: runs/20231010_143000_experiment1
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    log_dir =  f"runs/{timestamp}"
    writer = SummaryWriter(log_dir=log_dir)  # 关键修改：指定日志目录

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(), lr=lr)

    for epoch in range(epochs):
        model.train()
        optimizer.zero_grad()
        outputs = model(video_features, text_features)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # 记录训练过程
        writer.add_scalar('Loss/train', loss.item(), epoch)

        # 每隔10轮输出一次训练信息
        if epoch % 10 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.4f}")

    return model


def evaluate_model(model, video_features, text_features, labels):
    """
    评估模型的性能，包括准确率，F1分数，混淆矩阵和验证损失。

    :param model: nn.Module, 训练后的模型
    :param video_features: torch.Tensor, 视频特征张量
    :param text_features: torch.Tensor, 文本特征张量
    :param labels: torch.Tensor, 标签张量
    :param loss_fn: nn.Module, 用于计算损失的损失函数（如 CrossEntropyLoss）
    :return: None，打印模型评估结果
    """
    model.eval()

    val_loss = 0.0
    with torch.no_grad():
        # 前向传播
        outputs = model(video_features, text_features)

        # 计算损失
        criterion = nn.CrossEntropyLoss()
        loss = criterion(outputs, labels)
        val_loss += loss.item()

        # 获取预测结果
        _, predicted = torch.max(outputs, 1)
        true_labels = labels.cpu().numpy()
        preds = predicted.cpu().numpy()

        # 计算评估指标
        accuracy = accuracy_score(true_labels, preds)
        f1_macro = f1_score(true_labels, preds, average='macro')
        f1_weighted = f1_score(true_labels, preds, average='weighted')
        cm = confusion_matrix(true_labels, preds)

        # 打印评估结果
        print(f"Validation Loss: {val_loss:.4f}")
        print(f"Accuracy: {accuracy:.2f}")
        print(f"F1 Score (Macro): {f1_macro:.2f}")
        print(f"F1 Score (Weighted): {f1_weighted:.2f}")
        print("Confusion Matrix:")
        print(cm)

        # 绘制混淆矩阵
        plt.figure(figsize=(10, 8), dpi=300)
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False)
        plt.xlabel('Predicted Labels', fontsize=16)
        plt.ylabel('True Labels', fontsize=16)
        plt.title('Confusion Matrix', fontsize=18)
        plt.show()


def k_fold_cross_validation(video_features, text_features, labels, num_classes=6, k=5, lr=0.0001, epochs=100):
    """
    K折交叉验证（修正版），解决数据对齐问题
    """
    kf = KFold(n_splits=k, shuffle=True, random_state=42)
    fold = 1

    for train_idx, val_idx in kf.split(video_features, labels):
        # 创建目录和记录器
        fold_writer = SummaryWriter(log_dir=f"runs/mer/fold_{fold}")

        # 划分数据
        train_video = video_features[train_idx]
        train_text = text_features[train_idx]
        train_labels = labels[train_idx]

        val_video = video_features[val_idx]
        val_text = text_features[val_idx]
        val_labels = labels[val_idx]

        # 创建数据加载器
        train_dataset = TensorDataset(train_video, train_text, train_labels)
        train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

        # 初始化模型和优化器
        model = build_model(visual_dim=1463, text_dim=768, num_classes=num_classes)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.AdamW(model.parameters(), lr=lr)

        # 训练循环
        print(f"\n=== Fold {fold}/{k} 训练开始 ===")
        for epoch in range(epochs):
            # 训练阶段
            model.train()
            epoch_loss = 0.0
            for batch_v, batch_t, batch_l in train_loader:
                optimizer.zero_grad()
                outputs = model(batch_v, batch_t)
                loss = criterion(outputs, batch_l)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            # 记录训练损失
            avg_train_loss = epoch_loss / len(train_loader)
            fold_writer.add_scalar('Loss/train', avg_train_loss, epoch)

            # 验证阶段
            model.eval()
            with torch.no_grad():
                val_outputs = model(val_video, val_text)
                val_loss = criterion(val_outputs, val_labels)
                _, predicted = torch.max(val_outputs, 1)

                # 计算指标
                acc = accuracy_score(val_labels.numpy(), predicted.numpy())
                f1 = f1_score(val_labels.numpy(), predicted.numpy(), average='macro')

                # 记录验证指标
                fold_writer.add_scalar('Loss/val', val_loss.item(), epoch)
                fold_writer.add_scalar('Accuracy/val', acc, epoch)
                fold_writer.add_scalar('F1/val', f1, epoch)

            # 打印进度
            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch + 1}/{epochs}")
                print(len(train_video))
                print(f"Train Loss: {avg_train_loss:.4f} | Val Loss: {val_loss.item():.4f}")
                print(f"Val Acc: {acc:.4f} | Val F1: {f1:.4f}\n")

        fold_writer.close()
        fold += 1


def k_fold_cross_validation_config(video_features, text_features, labels, num_classes=6, k=5, lr=0.0001, epochs=100,
                            model_name="default"):
    """
    K折交叉验证（保持原有结构，仅添加新功能）
    """
    kf = KFold(n_splits=k, shuffle=True, random_state=42)
    fold = 1  # 保持原有fold计数方式
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    for train_idx, val_idx in kf.split(video_features, labels):

        # 保持原有目录结构，仅添加model_name参数
        fold_writer = SummaryWriter(log_dir=f"runs/{model_name}/{timestamp}/fold_{fold}")

        # ==== 保持原有数据划分代码不变 ====
        train_video = video_features[train_idx]
        train_text = text_features[train_idx]
        train_labels = labels[train_idx]
        val_video = video_features[val_idx]
        val_text = text_features[val_idx]
        val_labels = labels[val_idx]

        # ==== 保持原有数据加载器代码不变 ====
        train_dataset = TensorDataset(train_video, train_text, train_labels)
        train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

        # ==== 保持模型初始化代码不变 ====
        model = build_model(visual_dim=1463, text_dim=768, num_classes=num_classes)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.AdamW(model.parameters(), lr=lr)

        # ==== 训练循环（保持原有结构）====
        print(f"\n=== Fold {fold}/{k} 训练开始 ===")
        for epoch in range(epochs):
            # ==== 保持原有训练步骤不变 ====
            model.train()
            epoch_loss = 0.0
            for batch_v, batch_t, batch_l in train_loader:
                optimizer.zero_grad()
                outputs = model(batch_v, batch_t)
                loss = criterion(outputs, batch_l)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            # ==== 保持原有验证步骤 ====
            model.eval()
            with torch.no_grad():
                val_outputs = model(val_video, val_text)
                val_loss = criterion(val_outputs, val_labels)
                _, predicted = torch.max(val_outputs, 1)

                # ==== 新增指标计算 ====
                y_true = val_labels.numpy()
                y_pred = predicted.numpy()
                y_probs = torch.softmax(val_outputs, dim=1).numpy()

                # 新增MAE和相关系数
                mae = mean_absolute_error(y_true, y_probs.argmax(axis=1))
                corr = matthews_corrcoef(y_true, y_pred)

                # 保持原有指标计算
                acc = accuracy_score(y_true, y_pred)
                f1 = f1_score(y_true, y_pred, average='macro')

            # ==== 保持原有日志记录 ====
            fold_writer.add_scalar('Loss/val', val_loss.item(), epoch)
            fold_writer.add_scalar('Accuracy/val', acc, epoch)
            fold_writer.add_scalar('F1/val', f1, epoch)

            # ==== 新增指标记录 ====
            fold_writer.add_scalar('MAE/val', mae, epoch)
            fold_writer.add_scalar('Correlation/val', corr, epoch)

            # ==== 保持原有打印格式 ====
            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch + 1}/{epochs}")
                print(f"Train Loss: {epoch_loss / len(train_loader):.4f} | Val Loss: {val_loss.item():.4f}")
                # 保持原有指标打印
                print(f"Val Acc: {acc:.4f} | Val F1: {f1:.4f}")
                # 新增指标打印
                print(f"Val MAE: {mae:.4f} | Val Corr: {corr:.4f}\n")

            # ==== 新增混淆矩阵保存 ====
            if epoch == epochs - 1:  # 仅在最后一代保存

                # 确保结果目录存在
                save_dir = f"results/{model_name}/{timestamp}"
                os.makedirs(save_dir, exist_ok=True)  # 关键修复：创建目录
                # 原始计数矩阵
                plt.figure(figsize=(10, 8))
                sns.heatmap(confusion_matrix(y_true, y_pred),
                            annot=True, fmt='d',cmap='Blues')
                plt.savefig(f"{save_dir}/fold_{fold}_cm_count.png")  # 修改路径格式
                plt.close()

                # 百分比归一化矩阵
                plt.figure(figsize=(10, 8))
                cm_norm = confusion_matrix(y_true, y_pred).astype('float') / confusion_matrix(y_true, y_pred).sum(
                    axis=1)[:, np.newaxis]
                sns.heatmap(cm_norm, annot=True, fmt='.2%',cmap='Blues')
                plt.savefig(f"{save_dir}/fold_{fold}_cm_ratio.png")
                plt.close()

        fold_writer.close()
        fold += 1  # 保持原有fold计数方式


def evaluate_libreface_emotion_recognition_from_folder(folder_path, label_mapping, label_dict,
                                                       predicted_label_column='facial_expression'):
    """
    从文件夹中读取多个CSV文件，使用 libreface 预测的情绪进行评估，并计算准确率、F1得分、混淆矩阵等。

    :param folder_path: str, 包含多个CSV文件的文件夹路径
    :param label_mapping: dict, 映射关系，用于将 libreface 的预测情绪转换为标签
    :param label_dict: dict, 真实标签字典，键为文件名（不含扩展名），值为真实情绪标签
    :param predicted_label_column: str, 预测情绪所在的列名，默认为 'facial_expression'
    :return: None
    """
    all_preds = []
    all_labels = []

    # 遍历文件夹中的所有 CSV 文件
    for filename in os.listdir(folder_path):
        if filename.endswith('.csv'):
            file_path = os.path.join(folder_path, filename)
            df = pd.read_csv(file_path)

            # 获取文件名（去掉扩展名）
            file_base_name = os.path.splitext(filename)[0]

            # 去掉 `_features` 后缀
            file_base_name = file_base_name.replace('_features', '')

            # 获取该文件的真实标签
            true_label = label_dict.get(file_base_name, None)
            # print(f"Processing file: {file_base_name}, True label: {true_label}")
            if true_label is None:
                print(f"Warning: 文件 {filename} 不在 label_dict 中，跳过。")
                continue

            # 获取每帧的预测情绪
            predicted_emotions = df[predicted_label_column].tolist()

            # 统计情绪占比，找出出现次数最多的情绪
            emotion_count = Counter(predicted_emotions)

            most_common_emotion, _ = emotion_count.most_common(1)[0]

            # 将预测情绪映射为标签
            predicted_label = label_mapping.get(most_common_emotion, -1)  # -1 代表未知情绪

            # 累积预测和真实标签
            all_preds.append(predicted_label)
            all_labels.append(true_label)

    # 计算评估指标
    if all_labels and all_preds:
        accuracy = accuracy_score(all_labels, all_preds)
        f1 = f1_score(all_labels, all_preds, average='weighted')
        cm = confusion_matrix(all_labels, all_preds)

        print("Libreface Emotion Recognition Evaluation:")
        print(f"Accuracy: {accuracy:.4f}")
        print(f"F1 Score: {f1:.4f}")
        print(f"Confusion Matrix:\n{cm}")
    else:
        print("没有足够的数据进行评估，请检查文件夹内容和标签字典。")


# 3. 主函数
def test_single_model(true_labels,name):
    # 读取数据

    if name == "mer":
        folder_path = './data/mer/out_feature'  # 假设libreface输出文件夹
        # mer
        label_mapping = {
            'Happiness': 1,
            'Sadness': 5,
            'Anger': 3,
            'Surprise': 4,
            'Fear': 0,
            'Neutral': 2
        }

    if name == "mosei":
        folder_path = './data/out_feature_mosei'  # 假设libreface输出文件夹
        label_mapping = {
            'Happiness':6 ,
            'Sadness': 2,
            'Anger': 0,
            'Surprise': 5,
            'Disgust': 1,
            'Fear':3,
            'Neutral':4
        }


    labels = true_labels
    evaluate_libreface_emotion_recognition_from_folder(folder_path, label_mapping, labels)


def load_labels(csv_path):
    df = pd.read_csv(csv_path)
    # 假设 CSV 中有 'name' 和 'label' 列
    # 去掉 'name' 列中带有 '_features' 后缀的部分
    # # 返回标签与文件名对齐的字典
    labels_dict = {row['name']: row['label'] for _, row in df.iterrows()}

    label_list = df['label'].tolist()
    print(len(labels_dict))
    return labels_dict

if __name__ == "__main__":
    #
    # bert_model_path = './bert/mer_lr_3.00e-05'
    # model_path = "models/mer_3e-5.pth"
    #
    #
    # # 加载 BERT 模型和分词器
    # tokenizer, bert_model = load_bert_model(bert_model_path)
    # #
    # # 加载并处理数据
    # video_features, text_features, labels = load_and_process_data("data/mer/text/merged_file_with_labels.csv",
    #                                                               "./data/mer/out_feature", tokenizer, bert_model)
    #
    #
    # # 使用K折交叉验证训练和评估模型
    # k_fold_cross_validation(video_features=video_features, text_features=text_features, labels=labels, k=5,
    #                         num_classes=6, epochs=100, lr=0.0001)
    #
    # # 最终模型训练
    # model = build_model(visual_dim=1463, text_dim=768, num_classes=6)
    #
    # # 划分验证集
    # val_size = int(0.2 * len(video_features))
    # indices = torch.randperm(len(video_features))
    #
    # # 训练时传入验证集
    # model = train_and_log(
    #     model,
    #     video_features[indices[val_size:]],
    #     text_features[indices[val_size:]],
    #     labels[indices[val_size:]],lr=0.001,epochs=10,num_classes=6
    # )
    #
    # # 评估最终模型
    # evaluate_model(model, video_features[indices[:val_size]],
    #                text_features[indices[:val_size]],
    #                labels[indices[:val_size]])
    #
    #
    # torch.save(model.state_dict(), model_path)


    dict_label = load_labels('./data/cmu_mosei/metadata/merged_data_clean_small.csv')

    # dict_label = load_labels('./data/mer/text/merged_file_with_labels.csv')
    labels = dict_label
    #
    test_single_model(labels,"mosei")