import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import pandas as pd
from sklearn.model_selection import train_test_split
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4" 

# 设置模型路径和加载
model_name = "/home/ZJQ/.cache/modelscope/hub/models/mota0user/roberta_rl_llama_model"

# 加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForSequenceClassification.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map="auto",  # 指定使用第三块GPU
    num_labels=2,       # 二分类任务
    problem_type="single_label_classification"
)

# 获取模型的实际最大序列长度，RoBERTa通常为512
max_seq_length = 512

# 加载数据集
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/data5000.xlsx"
dataset = load_excel_dataset(dataset_path)

# 创建情感标签到索引的映射
if dataset is not None and 'lable' in dataset.columns:
    emotions = dataset['lable'].unique().tolist()
    emotion_to_id = {emo: i for i, emo in enumerate(emotions)}
    id_to_emotion = {i: emo for i, emo in enumerate(emotions)}
    
    # 为数据添加数字标签列
    dataset['label_id'] = dataset['lable'].map(emotion_to_id)
    
    # 划分数据集
    train_data, test_data = train_test_split(dataset, test_size=0.01, random_state=42)
else:
    print("数据集加载失败或缺少必要列，程序退出")
    exit(1)


# 初始化提示词:P Cot Prompt,  
initial_prompt = """P -Role: Sentiment Analyst.
P - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
P - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text.
P - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
P - Goals: Provide a concise sentiment assessment of movie reviews.
P - Constraints: Only output "Positive" or "Negative."
P - Let's think step by step:
1. Analyze the review's emotional tone.
2. Determine if it is Positive or Negative.
3. Output the result."""

# 将提示词转换为嵌入并设置为可训练
def initialize_prompt_embedding(tokenizer, model, prompt_text, max_seq_length):
    # 对提示词进行编码，限制最大长度
    prompt_encoded = tokenizer(
        prompt_text, 
        return_tensors="pt", 
        truncation=True
        # max_length=min(max_seq_length // 2, 100)  # 限制提示词长度
    )
    prompt_inputs = {k: v.to(model.device) for k, v in prompt_encoded.items()}
    
    # 从字典中获取input_ids
    if 'input_ids' not in prompt_inputs:
        raise ValueError("编码结果中缺少input_ids")
    
    # 获取提示词的嵌入表示
    with torch.no_grad():
        prompt_embedding = model.get_input_embeddings()(prompt_inputs['input_ids'])
    
    # 使嵌入可训练
    prompt_embedding = prompt_embedding.detach().clone().requires_grad_(True)
    return prompt_embedding, prompt_inputs['input_ids'].shape[1]

# 训练提示词
def train_prompt(model, tokenizer, train_data, initial_prompt, emotion_to_id, epochs=15, lr=0.001, max_seq_length=512):
    # 初始化提示词嵌入
    prompt_embedding, num_segments = initialize_prompt_embedding(
        tokenizer, model, initial_prompt, max_seq_length
    )
    
    # 设置优化器
    optimizer = torch.optim.Adam([prompt_embedding], lr=lr)
    
    # 记录最佳准确率和对应的提示词
    best_accuracy = 0.0
    best_prompt_tokens = None
    
    device = next(model.parameters()).device
    
    for epoch in range(epochs):
        model.train()
        total_loss = 0.0
        total_correct = 0
        total_samples = 0
        
        for _, sample in train_data.iterrows():
            text = sample['text']
            true_label = sample['label_id']
            
            # 构建输入文本，严格限制总长度
            full_text = f"{initial_prompt}{text}"
            
            # 手动截断文本以避免过长
            if len(full_text) > 2000:  # 进一步缩短截断长度
                full_text = full_text[:2000]
            
            inputs = tokenizer(
                full_text, 
                return_tensors="pt", 
                truncation=True,
                max_length=max_seq_length,
                padding="max_length"
            ).to(device)
            
            # 获取输入嵌入
            input_embeds = model.get_input_embeddings()(inputs['input_ids'])
            
            # 用可训练的提示词嵌入替换原始提示词部分
            min_len = min(input_embeds.shape[1], prompt_embedding.shape[1])
            input_embeds[:, :min_len] = prompt_embedding[:, :min_len, :]
            
            # 处理token_type_ids（如果需要）
            token_type_ids = inputs.get("token_type_ids", None)
            if token_type_ids is not None:
                # 确保token_type_ids长度与输入匹配
                if token_type_ids.shape[1] < input_embeds.shape[1]:
                    token_type_ids = F.pad(token_type_ids, (0, input_embeds.shape[1] - token_type_ids.shape[1]))
            else:
                # 如果模型需要但没有token_type_ids，创建全0的
                if hasattr(model.config, "use_token_type_ids") and model.config.use_token_type_ids:
                    token_type_ids = torch.zeros_like(inputs['input_ids'])
                else:
                    token_type_ids = None
            
            # 处理attention_mask
            attention_mask = inputs.get("attention_mask", None)
            if attention_mask is not None and attention_mask.shape[1] < input_embeds.shape[1]:
                attention_mask = F.pad(attention_mask, (0, input_embeds.shape[1] - attention_mask.shape[1]))
            
            # 模型推理
            try:
                outputs = model(
                    inputs_embeds=input_embeds,
                    token_type_ids=token_type_ids,
                    attention_mask=attention_mask
                )
                logits = outputs.logits
            except Exception as e:
                print(f"模型推理错误: {e}")
                print(f"input_embeds.shape: {input_embeds.shape}")
                print(f"token_type_ids.shape: {token_type_ids.shape if token_type_ids is not None else 'None'}")
                print(f"attention_mask.shape: {attention_mask.shape if attention_mask is not None else 'None'}")
                continue
            
            # 计算损失
            loss = F.cross_entropy(logits, torch.tensor([true_label]).to(device))
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # 统计
            total_loss += loss.item()
            predictions = torch.argmax(logits, dim=1)
            total_correct += (predictions == true_label).sum().item()
            total_samples += 1
        
        # 计算准确率
        if total_samples > 0:
            accuracy = total_correct / total_samples
            avg_loss = total_loss / total_samples
            print(f"Epoch {epoch+1}/{epochs} | Loss: {avg_loss:.4f} | Accuracy: {accuracy:.4f}")
            
            # 保存最佳提示词
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                
                # 找到与优化后的嵌入最接近的token
                best_prompt_tokens = []
                embedding_matrix = model.get_input_embeddings().weight
                
                for seg_idx in range(min_len):
                    seg_emb = prompt_embedding[:, seg_idx, :].squeeze(0)
                    similarities = F.cosine_similarity(seg_emb, embedding_matrix, dim=1)
                    best_token_id = torch.argmax(similarities).item()
                    best_prompt_tokens.append(best_token_id)
        else:
            print(f"Epoch {epoch+1}/{epochs} | 没有有效的训练样本")
    
    # 解码最佳提示词
    best_prompt_text = tokenizer.decode(best_prompt_tokens) if best_prompt_tokens else initial_prompt
    return best_prompt_text

# 测试函数 - 逐条处理数据
def evaluate_model(model, tokenizer, test_data, prompt, max_seq_length=512):
    model.eval()  # 设置为评估模式
    device = next(model.parameters()).device  # 获取模型所在设备
    total_correct = 0
    total_samples = 0
    
    # 为测试数据添加提示词，限制总长度
    for _, sample in test_data.iterrows():
        text = sample['text']
        true_label = sample['label_id']
        
        # 构建输入文本并手动截断
        full_text = f"{prompt}{text}"
        if len(full_text) > 2000:
            full_text = full_text[:2000]
        
        inputs = tokenizer(
            full_text, 
            return_tensors="pt", 
            truncation=True,
            max_length=max_seq_length,
            padding="max_length"
        ).to(device)
        
        # 模型推理
        with torch.no_grad():
            try:
                outputs = model(**inputs)
                logits = outputs.logits
                prediction = torch.argmax(logits, dim=1).item()
            except Exception as e:
                print(f"评估时模型推理错误: {e}")
                continue
        
        # 统计正确预测数
        if prediction == true_label:
            total_correct += 1
        total_samples += 1
    
    # 计算准确率
    accuracy = total_correct / total_samples if total_samples > 0 else 0
    return accuracy

# 训练提示词
if __name__ == "__main__":
    print("开始训练提示词...")
    best_prompt = train_prompt(
        model, tokenizer, train_data, initial_prompt, emotion_to_id, 
        epochs=5, max_seq_length=max_seq_length
    )

    print("\n训练后的提示词:")
    print(best_prompt)

    # 使用原始提示词评估
    print("\n使用原始提示词评估:")
    original_accuracy = evaluate_model(
        model, tokenizer, test_data, initial_prompt, max_seq_length=max_seq_length
    )
    print(f"测试集准确率: {original_accuracy:.4f}")

    # 使用优化后的提示词评估
    print("\n使用优化后的提示词评估:")
    optimized_accuracy = evaluate_model(
        model, tokenizer, test_data, best_prompt, max_seq_length=max_seq_length
    )
    print(f"测试集准确率: {optimized_accuracy:.4f}")