import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np

# 设置模型路径和加载
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"

model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-14B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map="auto"  # 启用多卡自动并行
)

# 加载数据集
# 读取XLSX数据集（新增/修改部分）
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        # 尝试读取XLSX文件（支持.xlsx和.xls格式）
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/data1.xlsx"
dataset = load_excel_dataset(dataset_path)

emotions = dataset['lable'].unique().tolist()
emotion_ids = {emo: tokenizer.encode(emo)[0] for emo in emotions}

train_data, test_data = train_test_split(dataset, test_size=0.5, random_state=42)

# 初始化提示词
initial_prompt = """- Role: Sentiment Analyst
- Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
- Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text.
- Skills: You can swiftly analyze text to determine if it is Positive or Negative.
- Goals: Provide a concise sentiment assessment of movie reviews.
- Constraints: Only output "Positive" or "Negative."
- Workflow:
1. Analyze the review's emotional tone.
2. Determine if it is Positive or Negative.
3. Output the result. """
initial_inputs = tokenizer(initial_prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
    initial_embedding = model.get_input_embeddings()(initial_inputs.input_ids)

prompt_embedding = initial_embedding.clone().requires_grad_(True)
optimizer = torch.optim.Adam([prompt_embedding], lr=0.0001)

prompt_history = {0: initial_prompt}
embedding_history = {0: prompt_embedding.detach().cpu()}
max_epochs = 10
num_segments = initial_inputs.input_ids.shape[1]

for epoch in range(1, max_epochs+1):
    total_loss, correct = 0.0, 0

    for _, sample in train_data.iterrows():
        text, target_emotion = sample['text'], sample['lable']
        target_id = emotion_ids[target_emotion]

        best_tokens = []
        for seg_idx in range(num_segments):
            seg_emb = prompt_embedding[:, seg_idx:seg_idx+1, :].squeeze(1)
            embedding_matrix = model.get_input_embeddings().weight
            similarities = F.cosine_similarity(seg_emb, embedding_matrix, dim=1)
            top_indices = torch.topk(similarities, k=5).indices

            for tid in top_indices:
                token_text = tokenizer.decode([tid.item()])
                if token_text.strip():
                    best_tokens.append(token_text)
                    break

        optimized_prompt = "".join(best_tokens)
        prompt_history[epoch] = optimized_prompt

        input_text = f"{optimized_prompt}{text} Only output [Positive] or [Negative]."
        inputs = tokenizer(input_text, return_tensors="pt", truncation=True).to(model.device)

        prompt_len = initial_inputs.input_ids.shape[1]
        input_embeds = model.get_input_embeddings()(inputs.input_ids)
        input_embeds[:, :prompt_len] = prompt_embedding

        outputs = model(inputs_embeds=input_embeds)
        logits = outputs.logits
        loss = -F.log_softmax(logits[:, -1, :], dim=1)[0, target_id]
        total_loss += loss.item()

        with torch.no_grad():
            generated_ids = model.generate(inputs_embeds=input_embeds, max_new_tokens=3)
            generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
            prediction = next((emo for emo in emotions if emo in generated_text), None)
            if prediction == target_emotion:
                correct += 1

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

    embedding_history[epoch] = prompt_embedding.detach().cpu()
    avg_loss = total_loss / len(train_data)
    accuracy = correct / len(train_data)
    print(f"Epoch {epoch}/{max_epochs} | Loss: {avg_loss:.4f} | Acc: {accuracy:.4f} | Prompt: {optimized_prompt}")
    if accuracy > 0.99:
        break

# 测试集评估
test_correct = 0
for _, sample in test_data.iterrows():
    text, target_emotion = sample['text'], sample['lable']
    optimized_prompt = prompt_history.get(epoch, initial_prompt)
    input_text = f"{optimized_prompt}:{text} Only output [Positive] or [Negative]."

    test_inputs = tokenizer(input_text, return_tensors="pt", truncation=True).to(model.device)
    test_inputs["attention_mask"] = torch.ones_like(test_inputs["input_ids"])

    input_embeds = model.get_input_embeddings()(test_inputs.input_ids)
    num_prompt_tokens = min(initial_inputs.input_ids.shape[1], input_embeds.shape[1])
    input_embeds[:, :num_prompt_tokens] = prompt_embedding[:, :num_prompt_tokens, :]

    with torch.no_grad():
        generated_ids = model.generate(inputs_embeds=input_embeds, max_new_tokens=3)
    generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
    prediction = next((emo for emo in emotions if emo in generated_text), "未识别")

    # print(f"输入: {text}\n预测: {prediction} | 标签: {target_emotion} | {'✅' if prediction == target_emotion else '❌'}\n")
    if prediction == target_emotion:
        test_correct += 1

print(f"\n测试集准确率: {test_correct / len(test_data):.4f}")
print(f"最终提示词: {prompt_history.get(epoch, initial_prompt)}")
