import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
import pandas as pd
import os
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
from datetime import datetime
import csv

# 为了能从当前目录导入其他模块，设置路径
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from multimodal_vectorizer import MultimodalVectorizer
from scoring_model import ComprehensiveScoringModel

class CsvInterviewDataset(Dataset):
    """从CSV文件加载面试数据的Dataset"""
    def __init__(self, csv_path, vectorizer):
        self.vectorizer = vectorizer
        if not os.path.exists(csv_path):
            raise FileNotFoundError(f"数据集文件未找到: {csv_path}")
        
        # 使用pandas的error_bad_lines参数来跳过有问题的行
        try:
            df = pd.read_csv(csv_path, error_bad_lines=False, warn_bad_lines=True)
        except TypeError:
            # 对于pandas 1.3.0+版本，参数名称变更
            df = pd.read_csv(csv_path, on_bad_lines='skip')
        
        print(f"成功加载数据集，共 {len(df)} 行")
        
        self.interviews = defaultdict(list)
        print("正在从CSV加载并向量化数据...")
        # 使用tqdm来显示进度条，需要 pip install tqdm
        try:
            from tqdm import tqdm
            iterator = tqdm(df.iterrows(), total=df.shape[0], desc="向量化数据")
        except ImportError:
            iterator = df.iterrows()

        for _, row in iterator:
            self.interviews[row['interview_id']].append(row)
            
        self.interview_ids = list(self.interviews.keys())
        print(f"数据加载完成，共 {len(self.interview_ids)} 场面试。")

    def __len__(self):
        return len(self.interview_ids)

    def __getitem__(self, idx):
        interview_id = self.interview_ids[idx]
        turns = self.interviews[interview_id]
        
        sequence_vectors = []
        for turn in turns:
            # 直接提取所有字段，不再使用嵌套结构
            raw_data = {
                "text": turn["text"], 
                "audio_emotion": turn["audio_emotion"],
                "stress": turn["stress"], 
                "expression_seq": turn["expression_seq"],
                "eyeBlink": turn["eyeBlink"], 
                "mouthPuff": turn["mouthPuff"], 
                "eyeUpLeft": turn["eyeUpLeft"], 
                "eyeUpRight": turn["eyeUpRight"],
                "earTouch": turn["earTouch"], 
                "noseTouch": turn["noseTouch"],
                "speech_rate": turn["speech_rate"]
            }
            vec = self.vectorizer.vectorize(raw_data)["feature_vector"]
            sequence_vectors.append(torch.tensor(vec, dtype=torch.float32))
        
        sequence_tensor = torch.stack(sequence_vectors)
        score = torch.tensor([turns[0]['ground_truth_score']], dtype=torch.float32)
        
        return sequence_tensor, score

def pad_collate_fn(batch):
    sequences, scores = zip(*batch)
    lengths = torch.tensor([len(seq) for seq in sequences])
    padded_sequences = nn.utils.rnn.pad_sequence(sequences, batch_first=True, padding_value=0)
    scores = torch.stack(scores, 0)
    return padded_sequences, lengths, scores

class EarlyStopping:
    """早停机制，当验证集性能不再提升时停止训练"""
    def __init__(self, patience=7, min_delta=0, verbose=False):
        self.patience = patience
        self.min_delta = min_delta
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.val_loss_min = np.Inf

    def __call__(self, val_loss, model, path):
        score = -val_loss

        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
        elif score < self.best_score + self.min_delta:
            self.counter += 1
            if self.verbose:
                print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.save_checkpoint(val_loss, model, path)
            self.counter = 0

    def save_checkpoint(self, val_loss, model, path):
        if self.verbose:
            print(f'验证损失减少 ({self.val_loss_min:.6f} --> {val_loss:.6f})。保存模型...')
        torch.save(model.state_dict(), path)
        self.val_loss_min = val_loss

def train(project_root, epochs=50, batch_size=16, learning_rate=0.001, patience=10, val_ratio=0.2):
    """
    训练面试评分模型
    
    参数:
    - project_root: 项目根目录
    - epochs: 训练轮数
    - batch_size: 批次大小
    - learning_rate: 学习率
    - patience: 早停耐心值
    - val_ratio: 验证集比例
    """
    csv_path = os.path.join(project_root, "interviews_dataset.csv")
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    print("--- 1. 初始化向量化工具和数据集 ---")
    vectorizer = MultimodalVectorizer(modality_dim=128)
    full_dataset = CsvInterviewDataset(csv_path=csv_path, vectorizer=vectorizer)
    
    # 分割训练集和验证集
    dataset_size = len(full_dataset)
    val_size = int(val_ratio * dataset_size)
    train_size = dataset_size - val_size
    train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size])
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=pad_collate_fn)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, collate_fn=pad_collate_fn)
    
    print(f"训练集大小: {train_size} 场面试, 验证集大小: {val_size} 场面试")

    print("--- 2. 初始化模型、损失函数和优化器 ---")
    model = ComprehensiveScoringModel(input_dim=384, hidden_dim=128, n_layers=2, dropout=0.3)
    model = model.to(device)
    
    criterion = nn.MSELoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-4)
    
    # 学习率调度器
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.5, patience=5, verbose=True
    )
    
    # 早停机制
    model_dir = os.path.join(project_root, "model")
    os.makedirs(model_dir, exist_ok=True)
    model_path = os.path.join(model_dir, "scoring_model.pth")
    early_stopping = EarlyStopping(patience=patience, verbose=True)
    
    # 记录训练过程
    train_losses = []
    val_losses = []
    
    print("--- 3. 开始训练 ---")
    for epoch in range(epochs):
        # 训练阶段
        model.train()
        train_loss = 0.0
        
        for sequences, lengths, scores in train_loader:
            sequences = sequences.to(device)
            lengths = lengths.to(device)
            scores = scores.to(device)
            
            optimizer.zero_grad()
            predictions = model(sequences, lengths)
            loss = criterion(predictions, scores)
            loss.backward()
            
            # 梯度裁剪，防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()
            train_loss += loss.item()
        
        avg_train_loss = train_loss / len(train_loader)
        train_losses.append(avg_train_loss)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        
        with torch.no_grad():
            for sequences, lengths, scores in val_loader:
                sequences = sequences.to(device)
                lengths = lengths.to(device)
                scores = scores.to(device)
                
                predictions = model(sequences, lengths)
                loss = criterion(predictions, scores)
                val_loss += loss.item()
        
        avg_val_loss = val_loss / len(val_loader)
        val_losses.append(avg_val_loss)
        
        # 更新学习率
        scheduler.step(avg_val_loss)
        
        print(f"Epoch [{epoch+1:02d}/{epochs}], "
              f"Train Loss: {avg_train_loss:.4f}, Train RMSE: {np.sqrt(avg_train_loss):.4f}, "
              f"Val Loss: {avg_val_loss:.4f}, Val RMSE: {np.sqrt(avg_val_loss):.4f}")
        
        # 早停检查
        early_stopping(avg_val_loss, model, model_path)
        if early_stopping.early_stop:
            print(f"早停触发，在第 {epoch+1} 轮停止训练")
            break
    
    # 如果没有触发早停，保存最终模型
    if not early_stopping.early_stop:
        torch.save(model.state_dict(), model_path)
    
    print("--- 4. 训练完成，保存模型 ---")
    print(f"模型已成功保存至: {model_path}")
    
    # 绘制损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(train_losses, label='训练损失')
    plt.plot(val_losses, label='验证损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss (MSE)')
    plt.title('训练和验证损失曲线')
    plt.legend()
    
    # 保存损失曲线图
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    plot_path = os.path.join(model_dir, f"loss_curve_{timestamp}.png")
    plt.savefig(plot_path)
    plt.close()
    print(f"损失曲线已保存至: {plot_path}")

if __name__ == "__main__":
    # 定位到项目根目录
    project_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
    
    # 检查数据集是否存在
    if not os.path.exists(os.path.join(project_root_dir, "interviews_dataset.csv")):
        print("未找到数据集，请确保interviews_dataset.csv文件存在于项目根目录。")
    else:
        # 使用命令行参数
        import argparse
        parser = argparse.ArgumentParser(description="训练面试评分模型")
        parser.add_argument("--epochs", type=int, default=50, help="训练轮数")
        parser.add_argument("--batch_size", type=int, default=16, help="批次大小")
        parser.add_argument("--lr", type=float, default=0.001, help="学习率")
        parser.add_argument("--patience", type=int, default=10, help="早停耐心值")
        parser.add_argument("--val_ratio", type=float, default=0.2, help="验证集比例")
        
        args = parser.parse_args()
        
        train(
            project_root=project_root_dir,
            epochs=args.epochs,
            batch_size=args.batch_size,
            learning_rate=args.lr,
            patience=args.patience,
            val_ratio=args.val_ratio
        ) 