import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import os
# 设置模型路径和加载

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/iic/nlp_bert_document-segmentation_chinese-base"

 

model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"


# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-14B-Instruct"
# 
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2" 
# os.environ["CUDA_VISIBLE_DEVICES"] = "3,4" 


tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map="auto", # 启用多卡自动并行
    do_sample=False
)

for param in model.parameters():
    param.requires_grad = False

print(f"embedding_matrix:{model.get_input_embeddings().weight}")
# print(f"model:{model}")

print(f"lm_head: {model.lm_head.weight}")


# 加载数据集
# 读取XLSX数据集（新增/修改部分）
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        # 尝试读取XLSX文件（支持.xlsx和.xls格式）
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/data5000.xlsx"
dataset = load_excel_dataset(dataset_path)

emotions = dataset['lable'].unique().tolist()
emotion_ids = {emo: tokenizer.encode(emo)[0] for emo in emotions}

train_data, test_data = train_test_split(dataset, test_size=0.998, random_state=42)

# 初始化提示词: 原始
# initial_prompt = """- Role: Sentiment Analyst with Reflection Mechanism.
# - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
# - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text. You possess the ability to reflect on historical assessment errors, identify potential biases in language contexts, and optimize analysis logic iteratively.  
# - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
# - Goals: Provide a concise sentiment assessment of movie reviews.
# - Constraints: Only output "Positive" or "Negative."""


# 初始化提示词:Cot prompt
# initial_prompt = """-Role: Sentiment Analyst
# - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
# - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text.
# - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
# - Goals: Provide a concise sentiment assessment of movie reviews.
# - Constraints: Only output "Positive" or "Negative."
# - Let's think step by step:
# 1. Analyze the review's emotional tone.
# 2. Determine if it is Positive or Negative.
# 3. Output the result."""

# 初始化提示词:p prompt
# initial_prompt = """P - Role: Sentiment Analyst with Reflection Mechanism.
# P - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
# P - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text. You possess the ability to reflect on historical assessment errors, identify potential biases in language contexts, and optimize analysis logic iteratively.  
# P - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
# P - Goals: Provide a concise sentiment assessment of movie reviews.
# P - Constraints: Only output "Positive" or "Negative."""


# 初始化提示词:P Cot Prompt 反思
# initial_prompt = """P -Role: Sentiment Analyst.
# P - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
# P - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text.
# P - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
# P - Goals: Provide a concise sentiment assessment of movie reviews.
# P - Constraints: Only output "Positive" or "Negative."
# P - Let's think step by step:
# 1. Analyze the review's emotional tone.
# 2. Determine if it is Positive or Negative.
# 3. Output the result.
# P - Reflection mechanism: Please systematically reflect on whether the current conclusions are logically sound and factually supported, and avoid subjective speculation. If the argument is sufficient, output the results; otherwise, rethink the reasoning and then output the final results."""


# 初始化提示词:P Cot Prompt,  
initial_prompt = """P - Role: Sentiment Analyst with Reflection Mechanism.
P - Background: Users need a quick sentiment assessment of movie reviews to understand if they are Positive or Negative.
P - Profile: You are a sentiment analyst with expertise in evaluating emotional tones in text. You possess the ability to reflect on historical assessment errors, identify potential biases in language contexts, and optimize analysis logic iteratively.  
P - Skills: You can swiftly analyze text to determine if it is Positive or Negative.
P - Goals: Provide a concise sentiment assessment of movie reviews.
P - Constraints: Only output "Positive" or "Negative."
P - Let's think step by step:
1. Analyze the review's emotional tone.
2. Determine if it is Positive or Negative.
3. Output the result."""




initial_inputs = tokenizer(initial_prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
    initial_embedding = model.get_input_embeddings()(initial_inputs.input_ids)

prompt_embedding = initial_embedding.clone().requires_grad_(True)
optimizer = torch.optim.Adam([prompt_embedding], lr=0.0001)

prompt_history = {0: initial_prompt}
embedding_history = {0: prompt_embedding.detach().cpu()}
max_epochs = 10
num_segments = initial_inputs.input_ids.shape[1]

for epoch in range(1, max_epochs+1):
    total_loss, correct = 0.0, 0

    for _, sample in train_data.iterrows():
        text, target_emotion = sample['text'], sample['lable']
        target_id = emotion_ids[target_emotion]

        best_tokens = []
        for seg_idx in range(num_segments):
            seg_emb = prompt_embedding[:, seg_idx:seg_idx+1, :].squeeze(1)
            embedding_matrix = model.get_input_embeddings().weight
            similarities = F.cosine_similarity(seg_emb, embedding_matrix, dim=1)
            top_indices = torch.topk(similarities, k=5).indices

            for tid in top_indices:
                token_text = tokenizer.decode([tid.item()])
                if token_text.strip():
                    best_tokens.append(token_text)
                    break

        optimized_prompt = "".join(best_tokens)
        prompt_history[epoch] = optimized_prompt

        input_text = f"{optimized_prompt}{text} Only output [Positive] or [Negative]."
        inputs = tokenizer(input_text, return_tensors="pt", truncation=True).to(model.device)

        prompt_len = initial_inputs.input_ids.shape[1]
        input_embeds = model.get_input_embeddings()(inputs.input_ids)
        input_embeds[:, :prompt_len] = prompt_embedding

        outputs = model(inputs_embeds=input_embeds)
        logits = outputs.logits
        loss = -F.log_softmax(logits[:, -1, :], dim=1)[0, target_id]
        total_loss += loss.item()

        with torch.no_grad():
            generated_ids = model.generate(inputs_embeds=input_embeds, max_new_tokens=3)
            generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
            prediction = next((emo for emo in emotions if emo in generated_text), None)
            if prediction == target_emotion:
                correct += 1

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

    embedding_history[epoch] = prompt_embedding.detach().cpu()
    avg_loss = total_loss / len(train_data)
    accuracy = correct / len(train_data)
    print(f"Epoch {epoch}/{max_epochs} | Loss: {avg_loss:.4f} | Acc: {accuracy:.4f} | Prompt: {optimized_prompt}")
    if accuracy > 0.99:
        break

# 测试集评估
test_correct = 0
for _, sample in test_data.iterrows():
    text, target_emotion = sample['text'], sample['lable']
    optimized_prompt = prompt_history.get(epoch, initial_prompt)
    input_text = f"{optimized_prompt}:{text} Only output [Positive] or [Negative]."

    test_inputs = tokenizer(input_text, return_tensors="pt", truncation=True).to(model.device)
    test_inputs["attention_mask"] = torch.ones_like(test_inputs["input_ids"])

    input_embeds = model.get_input_embeddings()(test_inputs.input_ids)
    num_prompt_tokens = min(initial_inputs.input_ids.shape[1], input_embeds.shape[1])
    input_embeds[:, :num_prompt_tokens] = prompt_embedding[:, :num_prompt_tokens, :]

    with torch.no_grad():
        generated_ids = model.generate(inputs_embeds=input_embeds, max_new_tokens=3)
    generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
    prediction = next((emo for emo in emotions if emo in generated_text), "未识别")

    # print(f"输入: {text}\n预测: {prediction} | 标签: {target_emotion} | {'✅' if prediction == target_emotion else '❌'}\n")
    if prediction == target_emotion:
        test_correct += 1

print(f"\n测试集准确率: {test_correct / len(test_data):.4f}")
print(f"最终提示词: {prompt_history.get(epoch, initial_prompt)}")
