from transformers import AutoTokenizer, AutoModel

import argparse
# 假设tokenizer和model是您已经定义好的，这里不再展示定义过程

# 创建解析命令行参数的parser
parser = argparse.ArgumentParser(description='Generate text from a prompt.')
# 添加一个命令行参数，命名为'prompt'，这是用户将提供的提示文本
parser.add_argument('--prompt', type=str, required=True, help='The prompt to generate text from.')

# 解析命令行输入的参数
args = parser.parse_args()



# transformers 指定显卡
model_path="/home/ubuntu/model_test/THUDM/codegeex2-6b"
import os 
# 使用第一张与第三张GPU卡
# os.environ["CUDA_VISIBLE_DEVICES"] = "0, 2"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device='cuda')

# tokenizer = AutoTokenizer.from_pretrained("THUDM/codegeex2-6b", trust_remote_code=True)
# model = AutoModel.from_pretrained("THUDM/codegeex2-6b", trust_remote_code=True, device='cuda')
model = model.eval()

# remember adding a language tag for better performance
# prompt = "# language: Python\n# write a bubble sort function\n"
# prompt = "# language: Java\n# write a bubble sort function\n"
# prompt = "# language: js\n# write a bubble sort function\n"
# prompt = "use java language  to  write a bubble sort function\n"

# prompt = "// language: C++\n // write a bubble sort function\n"

# prompt = "// language: Java\n // 写一个类，有名字，id，年龄，地址，公司的字段\n"
# prompt = "// language: Java\n //Write a class with fields for name, ID, age, address, and company\n"
# prompt = "// language: Java\n // Write a class with fields for name, ID, age, address, and company\n"
# prompt="""
# from top.starp.util import file_util

# file_util.re"""
# prompt="""# 提取文章关键词"""


# 使用命令行参数中的prompt
prompt = args.prompt

prompt="""
# 提取文章关键词

from gensim.summarization import keywords"""

# 提取文章关键词
# prompt = " // Write a class with fields for name, ID, age, address, and company\n"
# prompt = " #递归读取所有文件的路径\n"
# prompt = " #写一个django登录\n"
# prompt = "#递归读取指定文件夹下所有的.mp3文件，收集所有文件的文件名、完整路径，生成json字符串，然后写入到allmp3.json文件中\n"

# prompt = " #递归读取指定文件夹下所有的.mp3文件，收集所有文件的文件名、完点路径，生成son字符串，然后写入到allmp3.json文件中\n"
 
inputs = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(inputs, max_length=256, top_k=1)
response = tokenizer.decode(outputs[0])

print("response")
print(response)