from transformers import OPTForSequenceClassification, AutoTokenizer
from datasets import load_dataset
import torch

# 指定要加载的OPT模型名称
model_name = r"/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/tests/opt_imdb_model"

# 1. 加载 IMDb 数据集
dataset = load_dataset("/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/datasets/IMDb_movie_reviews")

# 2. 提取 8 个句子
sentences = dataset["train"]["text"][:8]

# 3. 加载 OPT 的 tokenizer 和模型
# 加载预训练的OPT模型和对应的tokenizer
model = OPTForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 打印模型结构
print(model)

# 打印tokenizer信息
print(tokenizer)

# 4. Tokenize 输入句子
inputs = tokenizer(sentences, padding=True, truncation=True, return_tensors="pt")

score_layer = model.score

# 5. 获取隐藏状态
with torch.no_grad():
    outputs = model.model(**inputs)

# 6. 隐藏状态是 outputs.last_hidden_state
hidden_states = outputs.last_hidden_state
print("Hidden states shape:", hidden_states.shape)  # [batch_size, sequence_length, hidden_size]

input_truncate = hidden_states[:,:512,:]
print("truncate_input_shape", input_truncate.shape)

output_logtis = score_layer(input_truncate)
print("output_logits_shape", output_logtis.shape)

last_sentence_last_token_output = output_logtis[-1, -1, :]
print("last_sentence_last_token_output", last_sentence_last_token_output)

last_token_output = output_logtis[:, -1, :]
print("last_token_output", last_token_output)

last_sentence_last_token_score = output_logtis[-1, -1, 1] - output_logtis[-1, -1, 0]
print("last_sentence_last_token_score", last_sentence_last_token_score)

all_token_score = output_logtis[:, :, 1] - output_logtis[:, :, 0]
print("all_token_score shape", all_token_score.shape)

# 7. 获取最后一个 token 的隐藏状态（score 层的输入）
last_token_hidden_states = hidden_states[:, -1, :]
print("Last token hidden states shape:", last_token_hidden_states.shape)  # [batch_size, hidden_size]