import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
from tqdm import tqdm
import os
import re

# 设置GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4"
# 
# 设置设备
if torch.cuda.device_count() > 0:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print("未发现GPU，将使用CPU运行")
    device_map = None

# 加载模型和分词器


model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map=device_map,
    torch_dtype=torch.float16,
    trust_remote_code=True
)

# 确保有pad_token和eos_token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
if tokenizer.eos_token is None:
    tokenizer.eos_token = tokenizer.pad_token

# 打印模型在各GPU上的分布情况
if device_map == "auto":
    print("模型分布情况:")
    for device, param in model.hf_device_map.items():
        print(f"  {device}: {param}")

def load_excel_dataset(file_path):
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        # 校验核心列是否存在
        required_cols = ['text', 'lable']
        if not all(col in df.columns for col in required_cols):
            raise ValueError(f"数据集缺少核心列！需包含 {required_cols}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

# 批量推理函数（仅保留新生成内容）
def batch_inference(data, batch_size=5):
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]

    sentiment_prompt_template = "Determine the sentiment of this movie review as positive or negative. Review: {text}.  Just answer positive or negative"
    # sentiment_prompt_template = "As a Film Review Sentiment Analyzer, you are an expert in quickly and accurately identifying the emotional tone of film reviews. Users need to determine if a review is positive, negative, or neutral to understand public opinion or reflect on the movie. Your skills include swiftly analyzing text to assess sentiment based on word choice and tone. Your goal is to provide a quick and accurate assessment of the sentiment expressed in a film review. You will read the review, analyze its sentiment, and output the category as either positive, negative, or neutral, without any additional commentary.{text}. Only respond Positive or Negative."
    start_time = time.time()

    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_labels = []
        input_lengths = []  # 存储每个prompt的token长度

        # 准备批次数据并记录输入长度
        for _, row in batch.iterrows():
            text = row['text'].strip()
            actual_label = row['lable'].strip()
            prompt = sentiment_prompt_template.format(text=text)
            prompts.append(prompt)
            actual_labels.append(actual_label)
            
            # 计算当前prompt的token长度
            input_ids = tokenizer.encode(prompt, add_special_tokens=False)
            input_lengths.append(len(input_ids))

        # 编码输入
        inputs = tokenizer(
            prompts, 
            return_tensors="pt", 
            padding=True, 
            truncation=True
        )
        input_ids = inputs.input_ids.to(model.device)
        attention_mask = inputs.attention_mask.to(model.device)

        with torch.no_grad():
            outputs = model.generate(
                input_ids=input_ids,
                attention_mask=attention_mask,
                max_new_tokens=4,  # 限制新增token数量
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,  # 遇到eos_token立即停止
            )

        # 解码生成的文本（仅保留新增部分）并计算准确率
        for i, (output, actual_label, input_len) in enumerate(zip(outputs, actual_labels, input_lengths)):
            # 仅截取模型新生成的部分（排除原始prompt）
            generated_tokens = output[input_len:]  # 从输入长度之后开始截取
            generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
            

            # 判断模型输出是否包含正确标签
            if actual_label.lower() in generated_text.lower() :
                correct_count += 1
            
            # 打印部分结果用于调试
            if i < 2:  # 每个批次只打印前2个结果
                print(f"\n文本: {prompts[i]}")
                print(f"生成结果: {generated_text}")  # 仅显示新生成内容
                print(f"实际标签: {actual_label}")
                print(f"判断: {'正确' if actual_label.lower() in generated_text.lower() else '错误'}")

    end_time = time.time()
    print(f"\n推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count

# 主程序执行
if __name__ == "__main__":
    # 加载数据集
    # dataset_path = "/home/ZJQ/pypro/data/data.xlsx"
    dataset_path = "/home/ZJQ/pypro/data/data5000.xlsx"
    # dataset_path = "/home/ZJQ/pypro/data/data100.xlsx"

    dataset = load_excel_dataset(dataset_path)
    
    if dataset is None or len(dataset) == 0:
        print("没有数据可处理，程序退出")
        exit()
    
    emotions = dataset['lable'].unique().tolist()
    emotion_ids = {emo: tokenizer.encode(emo)[0] for emo in emotions}
    print(f"情感类别: {emotions}")
    print(f"情感标签-Token映射: {emotion_ids}")
    
    # 执行推理
    try:
        correct_count, total_count = batch_inference(
            dataset,
            batch_size=1  # 根据GPU内存调整
        )

        # 计算并输出准确率
        accuracy = correct_count / total_count if total_count > 0 else 0
        print(f"\n【情感分类测试集准确率】: {accuracy:.4f}")
        print(f"正确预测数: {correct_count}, 测试集总样本数: {total_count}")
    except Exception as e:
        print(f"推理过程中发生错误: {e}")
        exit()
    