import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
import os

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载模型和分词器
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"
model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True
).to(device)

# ====================== 解析CSV数据集 ======================
def load_csv_dataset(file_path):
    """从CSV文件中加载情感分类数据集"""
    try:
        df = pd.read_csv(file_path, encoding='gbk')
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except UnicodeDecodeError:
        try:
            df = pd.read_csv(file_path, encoding='utf-8-sig')
            print(f"成功加载 {len(df)} 条数据")
            print(f"数据集列名: {list(df.columns)}")
            return df
        except Exception as e:
            print(f"尝试utf-8-sig编码失败: {e}")
            return None
    except Exception as e:
        print(f"加载数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/Simplified_Chinese_Multi-Emotion_Dialogue_Dataset.csv"
dataset = load_csv_dataset(dataset_path)
if dataset is None:
    exit(1)

# 提取标签列表并去重
emotions = dataset['label'].unique().tolist()
print(f"情感类别: {emotions}")
emotion_ids = {emo: tokenizer.encode(emo)[0] for emo in emotions}
print("情感Token ID映射:", emotion_ids)

# ====================== 可视化工具函数 ======================
def visualize_prompt_evolution(prompt_history, initial_prompt, output_dir="prompt_visualizations"):
    """可视化提示词优化过程"""
    os.makedirs(output_dir, exist_ok=True)
    initial_tokens = tokenizer.tokenize(initial_prompt)
    cmap = LinearSegmentedColormap.from_list("custom_cmap", ["lightblue", "darkblue"])
    
    plt.figure(figsize=(12, 8))
    for i, (epoch, prompt) in enumerate(prompt_history.items()):
        tokens = tokenizer.tokenize(prompt)
        similarity_scores = [1.0 if t == initial_tokens[j] else 0.0 
                            for j, t in enumerate(tokens) if j < len(initial_tokens)]
        similarity_scores += [0.0] * (len(tokens) - len(initial_tokens))
        
        plt.subplot(len(prompt_history), 1, i+1)
        bars = plt.bar(tokens, similarity_scores, color=cmap(np.array(similarity_scores)))
        plt.ylim(0, 1.1)
        plt.title(f"Epoch {epoch}: {prompt}")
        plt.ylabel("Token相似度")
        plt.xticks(rotation=45, ha="right")
        
        for bar, score in zip(bars, similarity_scores):
            if score > 0.8:
                bar.set_color('green')
            elif score > 0.5:
                bar.set_color('orange')
            else:
                bar.set_color('red')
    
    plt.tight_layout()
    plt.savefig(f"{output_dir}/prompt_evolution.png", dpi=300)
    plt.close()
    print("提示词演变图已保存")

def visualize_embedding_changes(embedding_history, output_dir="prompt_visualizations"):
    """可视化嵌入向量的变化"""
    os.makedirs(output_dir, exist_ok=True)
    epochs = list(embedding_history.keys())
    initial_emb = embedding_history[epochs[0]]
    
    similarities = [F.cosine_similarity(initial_emb, emb, dim=1).mean().item() 
                   for emb in embedding_history.values()]
    
    plt.figure(figsize=(10, 6))
    plt.plot(epochs, similarities, marker='o', color='purple')
    plt.title("提示词嵌入向量相似度变化")
    plt.xlabel("Epoch")
    plt.ylabel("余弦相似度")
    plt.grid(True)
    plt.savefig(f"{output_dir}/embedding_similarity.png", dpi=300)
    plt.close()
    print("嵌入向量变化图已保存")

# ====================== 训练过程 ======================
# 划分训练集和测试集
train_data, test_data = train_test_split(dataset, test_size=0.98, random_state=42)

# 初始化提示词
initial_prompt = """请判断以下文本表达的主要情感或情绪，仅从以下类别中选择：[伤心、生气、关心、惊讶、开心、平静、厌恶、疑问].要求：直接返回情感类别，无需解释，若包含多种情感，选择最强烈的一种，不能使用类别外的词汇"""
initial_inputs = tokenizer(initial_prompt, return_tensors="pt").to(device)
with torch.no_grad():
    initial_embedding = model.get_input_embeddings()(initial_inputs.input_ids)  # [1, seq_len, hidden_dim]

# 将提示词嵌入设为可训练参数
prompt_embedding = initial_embedding.clone().requires_grad_(True).to(device)
optimizer = torch.optim.Adam([prompt_embedding], lr=0.0001)  # 调整学习率

# 记录训练历史
prompt_history = {0: initial_prompt}
embedding_history = {0: prompt_embedding.detach().cpu()}
max_epochs = 10
segment_size = 1  # 每次优化1个Token
num_segments = initial_inputs.input_ids.shape[1]  # 提示词的Token数

for epoch in range(1, max_epochs+1):
    total_loss = 0.0
    correct = 0
    total = len(train_data)
    
    for idx, sample in train_data.iterrows():
        text = sample['text']
        target_emotion = sample['label']
        target_id = emotion_ids[target_emotion]
        
        # 分段优化提示词Token
        best_tokens = []
        for seg_idx in range(num_segments):
            # 提取当前Token的嵌入向量
            seg_emb = prompt_embedding[:, seg_idx:seg_idx+1, :]  # [1, 1, hidden_dim]
            seg_vector = seg_emb.squeeze(1)  # [1, hidden_dim]
            
            # 计算与词汇表的余弦相似度
            embedding_matrix = model.get_input_embeddings().weight
            similarities = F.cosine_similarity(seg_vector, embedding_matrix, dim=1)
            top_indices = torch.topk(similarities, k=5, largest=True).indices  # 取Top5相似Token
            
            # 筛选有效Token（过滤标点和单字符，可根据需求调整）
            valid_tokens = []
            for tid in top_indices:
                token_text = tokenizer.decode([tid.item()])
                if len(token_text.strip()) > 0 and not token_text.isspace():
                    valid_tokens.append((tid, token_text))
            
            if valid_tokens:
                best_tid, best_text = valid_tokens[0]  # 选最相似的Token
            else:
                best_tid, best_text = top_indices[0], tokenizer.decode([top_indices[0].item()])
            
            best_tokens.append(best_text)
        
        # 构建优化后的提示词
        optimized_prompt = "".join(best_tokens)
        prompt_history[epoch] = optimized_prompt
        
        # 构建模型输入
        input_text = f"{optimized_prompt}{text} 仅回复：[伤心、生气、关心、惊讶、开心、平静、厌恶、疑问]中的一种."
        inputs = tokenizer(input_text, return_tensors="pt", truncation=True, ).to(device)
        
        # 替换提示词部分的嵌入向量
        prompt_len = initial_inputs.input_ids.shape[1]
        input_embeds = model.get_input_embeddings()(inputs.input_ids)
        input_embeds[:, :prompt_len] = prompt_embedding  # 用可训练嵌入替换
        
        # 前向传播计算损失
        outputs = model(inputs_embeds=input_embeds)
        logits = outputs.logits

        loss = -F.log_softmax(logits[:, -1, :], dim=1)[0, target_id]
        total_loss += loss.item()
        
        # 计算预测结果
        with torch.no_grad():
            generated_ids = model.generate(
                inputs_embeds=input_embeds,
                max_new_tokens=3
            )
            generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
            prediction = next((emo for emo in emotions if emo in generated_text), None)
            if prediction == target_emotion:
                correct += 1
        
        # 反向传播
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
    
    # 记录嵌入向量历史
    embedding_history[epoch] = prompt_embedding.detach().cpu()
    # print(f"embedding_history:{embedding_history}")
    
    # 打印训练进度
    avg_loss = total_loss / total
    accuracy = correct / total
    print(f"Epoch {epoch}/{max_epochs} | 损失: {avg_loss:.4f} | 准确率: {accuracy:.4f} | 提示词: {optimized_prompt}")
    
    # 早停条件（示例：准确率>90%停止）
    if accuracy > 0.99:
        print("达到早停条件，提前终止训练")
        break

# ====================== 结果可视化 ======================
# visualize_prompt_evolution(prompt_history, initial_prompt)
# visualize_embedding_changes(embedding_history)

# ====================== 模型测试 ======================
test_correct = 0
test_total = len(test_data)
# print(f"prompt_embedding:{prompt_embedding.shape},{prompt_embedding}")


for idx, sample in test_data.iterrows():
    text = sample['text']
    target_emotion = sample['label']
    optimized_prompt = prompt_history.get(epoch, initial_prompt)
    
    input_text = f"{optimized_prompt}:{text} 仅回复：[伤心、生气、关心、惊讶、开心、平静、厌恶、疑问]中的一种."
    inputs = tokenizer(input_text, return_tensors="pt", truncation=True).to(device)
    
    with torch.no_grad():
        test_inputs = tokenizer(
            f"{optimized_prompt}:{text} 仅回复：[伤心、生气、关心、惊讶、开心、平静、厌恶、疑问]中的一种.",
            return_tensors="pt"
        ).to(model.device)

        test_inputs["attention_mask"] = torch.ones_like(test_inputs["input_ids"])
        
        # 获取模型输入嵌入
        input_embeds = model.get_input_embeddings()(test_inputs.input_ids)
        num_prompt_tokens = min(initial_inputs.input_ids.shape[1], input_embeds.shape[1])
        input_embeds[:, :num_prompt_tokens] = prompt_embedding[:, :num_prompt_tokens, :]

        with torch.no_grad():
            generated_ids = model.generate(
                inputs_embeds=input_embeds,
                max_new_tokens=3
            )
        generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
        prediction = next((emo for emo in emotions if emo in generated_text), "未识别")

        
        print(f"输入: {text}")
        print(f"预测: {prediction} | 真实标签: {target_emotion} | 结果: {'✅' if target_emotion in prediction else '❌'}")
        print("-"*22)

        if prediction == target_emotion:
            test_correct += 1

test_accuracy = test_correct / test_total
print(f"\n测试集准确率: {test_accuracy:.4f}")
print(f"最终优化提示词: {prompt_history.get(epoch, initial_prompt)}")