import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import json

start = time.time()

# 模型路径
model_dir = "/home/xuyd/.cache/huggingface/hub/models--deepseek-ai--deepseek-moe-16b-chat/snapshots/eefd8ac7e8dc90e095129fe1a537d5e236b2e57c"
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_dir, torch_dtype=torch.bfloat16, trust_remote_code=True)
model.eval()
model.to("cuda:0")

# 从 txt 文件中读取多行 prompt
prompt_file = "./economics.txt" 
with open(prompt_file, "r") as f:
    # 读取所有行并去除空白字符
    prompts = [line.strip() for line in f.readlines() if line.strip()]  

# 初始化结果存储
all_generated_sentences = []

# 逐行处理每个 prompt
for prompt in prompts:
    print(f"Processing prompt: {prompt}")
    
    # 将 prompt 转换为 token IDs
    input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cuda:0")

    # 初始化变量
    generated_token_ids = input_ids[0].tolist()  # 将输入 token IDs 转换为列表
    generated_sentences = []  # 用于存储每次生成的完整句子

    # 生成文本并获取每次生成的 token 的 logits
    output = model.generate(
        input_ids,
        max_length=500,
        output_scores=True,  # 启用输出 logits
        return_dict_in_generate=True,  # 返回字典格式的结果
    )

    # 获取每次生成的 token 的 logits
    scores = output.scores  # 这是一个列表，包含每次生成的 token 的 logits

    # 逐步生成 token 并记录完整句子
    for logits in scores:
        # 获取新生成的 token ID
        new_token_id = torch.argmax(logits, dim=-1).item()
        
        # 将新生成的 token ID 添加到已生成的 token 列表中
        generated_token_ids.append(new_token_id)
        
        # 解码当前的完整 token 列表为句子
        current_sentence = tokenizer.decode(generated_token_ids, skip_special_tokens=True)
        
        # 记录当前的完整句子
        generated_sentences.append(current_sentence)
        print(f"Generated Sentence: {generated_sentences}")

    # 解码最终的完整文本
    generated_text = tokenizer.decode(generated_token_ids, skip_special_tokens=True)
    print("Final Generated Text:", generated_text)

    # # 打印每次生成的完整句子
    # print("Generated Sentences Step-by-Step:")
    # for i, sentence in enumerate(generated_sentences):
    #     if i > 0:
    #         print(f"Decode {i}: {sentence}")

    # 将当前 prompt 的生成结果保存到总结果中
    all_generated_sentences.append({
        "prompt": prompt,
        "generated_text": generated_text,
        "step_by_step": generated_sentences[:-1], 
    })

# 将结果保存到 JSON 文件
output_file = "./generated_sentences_new.json"
with open(output_file, "w") as f:
    json.dump(all_generated_sentences, f, indent=4)

print(f"Results saved to {output_file}")

end = time.time()
print("Time taken:", end - start)