import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from sklearn.model_selection import train_test_split
import pandas as pd
import os

# 设置模型路径和加载
model_name = "/home/ZJQ/.cache/modelscope/hub/models/mota0user/roberta_rl_llama_model"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForSequenceClassification.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map="cuda:2",  # 指定使用第三块GPU
    num_labels=2,       # 二分类任务
    problem_type="single_label_classification"
)

# 加载数据集
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/data1.xlsx"
dataset = load_excel_dataset(dataset_path)

# 创建情感标签到索引的映射
emotions = dataset['lable'].unique().tolist()
emotion_to_id = {emo: i for i, emo in enumerate(emotions)}
id_to_emotion = {i: emo for i, emo in enumerate(emotions)}

# 为数据添加数字标签列
dataset['label_id'] = dataset['lable'].map(emotion_to_id)

train_data, test_data = train_test_split(dataset, test_size=0.2, random_state=42)

# 训练参数
max_epochs = 27
batch_size = 1  # 设置batch_size为1

# 初始化提示词
initial_prompt = """
P - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
P - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text.
P - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
P - Goals: Provide a concise sentiment assessment of movie reviews.
P - Constraints: Only output "Positive" or "Negative. """

# 编码初始提示词
initial_inputs = tokenizer(initial_prompt, return_tensors="pt").to(model.device)
prompt_length = initial_inputs.input_ids.shape[1]  # 记录提示词长度

# 创建可训练的提示词嵌入
with torch.no_grad():
    initial_embedding = model.get_input_embeddings()(initial_inputs.input_ids)
    
prompt_embedding = initial_embedding.clone().requires_grad_(True)
optimizer = torch.optim.Adam([prompt_embedding], lr=0.00001)

# 创建数据加载器
def create_data_loader(df, tokenizer, max_len=512, batch_size=1):
    inputs = []
    labels = []
    
    for _, row in df.iterrows():
        inputs.append(row['text'])
        labels.append(row['label_id'])
    
    # 为提示词预留空间
    encodings = tokenizer(inputs, truncation=True, padding='max_length', 
                         max_length=max_len-prompt_length, return_tensors='pt')
    dataset = torch.utils.data.TensorDataset(
        encodings['input_ids'], 
        encodings['attention_mask'], 
        torch.tensor(labels)
    )
    
    return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)

train_loader = create_data_loader(train_data, tokenizer, batch_size=batch_size)
test_loader = create_data_loader(test_data, tokenizer, batch_size=batch_size)

# 训练循环
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
model.train()

for epoch in range(max_epochs):
    total_loss = 0
    correct_predictions = 0
    total_predictions = 0
    
    for batch in train_loader:
        input_ids, attention_mask, labels = [t.to(device) for t in batch]
        
        # 扩展提示词嵌入以匹配批次大小
        batch_prompt_embedding = prompt_embedding.expand(input_ids.shape[0], -1, -1)
        
        # 获取文本嵌入
        text_embeddings = model.get_input_embeddings()(input_ids)
        
        # 拼接提示词嵌入和文本嵌入
        combined_embeddings = torch.cat([batch_prompt_embedding, text_embeddings], dim=1)
        
        # 为注意力掩码添加提示词部分（全为1，表示关注）
        prompt_attention_mask = torch.ones(input_ids.shape[0], prompt_length, dtype=torch.long, device=device)
        combined_attention_mask = torch.cat([prompt_attention_mask, attention_mask], dim=1)
        
        optimizer.zero_grad()
        
        # 直接使用嵌入作为输入
        outputs = model(inputs_embeds=combined_embeddings, 
                       attention_mask=combined_attention_mask, 
                       labels=labels)
        loss = outputs.loss
        # print(f"loss:{loss}")
        
        logits = outputs.logits
        # print(f"logits:{logits}")
        
        total_loss += loss.item()
        
        # 计算准确率
        predictions = torch.argmax(logits, dim=1)
        correct_predictions += (predictions == labels).sum().item()
        total_predictions += labels.size(0)

        loss.backward()
        optimizer.step()
    
    avg_loss = total_loss / len(train_loader)
    accuracy = correct_predictions / total_predictions
    print(f"Epoch {epoch+1}/{max_epochs} | Loss: {avg_loss:.4f} | Acc: {accuracy:.4f}")

# 保存优化后的提示词
optimized_prompt_embedding = prompt_embedding.detach()
print("优化后的提示词嵌入形状:", optimized_prompt_embedding.shape)

# 测试集评估
model.eval()
test_correct = 0
test_total = 0

with torch.no_grad():
    for batch in test_loader:
        input_ids, attention_mask, labels = [t.to(device) for t in batch]
        
        # 使用优化后的提示词嵌入
        batch_prompt_embedding = optimized_prompt_embedding.expand(input_ids.shape[0], -1, -1)
        
        # 获取文本嵌入
        text_embeddings = model.get_input_embeddings()(input_ids)
        
        # 拼接提示词嵌入和文本嵌入
        combined_embeddings = torch.cat([batch_prompt_embedding, text_embeddings], dim=1)
        
        # 为注意力掩码添加提示词部分（全为1，表示关注）
        prompt_attention_mask = torch.ones(input_ids.shape[0], prompt_length, dtype=torch.long, device=device)
        combined_attention_mask = torch.cat([prompt_attention_mask, attention_mask], dim=1)
        
        # 使用嵌入作为输入
        outputs = model(inputs_embeds=combined_embeddings, 
                       attention_mask=combined_attention_mask)
        logits = outputs.logits
        predictions = torch.argmax(logits, dim=1)
        
        test_correct += (predictions == labels).sum().item()
        test_total += labels.size(0)
        # print(f"predictions:{predictions}")
        # print(f"labels:{labels}")



print(f"\n测试集准确率: {test_correct / test_total:.4f}")