
import csv
from transformers import LlamaTokenizer, LlamaForCausalLM
from transformers import AutoTokenizer,AutoModelForCausalLM
import argparse
import torch
import numpy as np
import os
import datetime
import json
from tqdm import tqdm

from graph import ChineseGraph
from ner import MatchExtract
from text import Text

parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="/home/llm_user/Baichuan2-7B-Chat")
parser.add_argument("--lora_weights", type=str, default="")
parser.add_argument("--data_dir", type=str, default="../data")
parser.add_argument("--save_dir", type=str, default="../results/not_specified")
parser.add_argument("--max_length", type=int, default=2048)
parser.add_argument("--load_in_8bit", action='store_true')
parser.add_argument("--with_conf", action='store_true')
parser.add_argument("--cot", action='store_true')
args = parser.parse_args()

tokenizer_class = LlamaTokenizer if 'llama' in args.model_name_or_path else AutoTokenizer
model_class = LlamaForCausalLM if 'llama' in args.model_name_or_path else AutoModelForCausalLM
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, trust_remote_code=True)
model = model_class.from_pretrained(args.model_name_or_path,
                                    trust_remote_code=True,
                                    load_in_8bit=args.load_in_8bit,
                                    device_map="auto"
                                    )

inputs = tokenizer('你好', return_tensors="pt").to("cuda")
output = model.generate(inputs['input_ids'],max_new_tokens=8)[0,inputs['input_ids'].shape[1]:]
sentence = tokenizer.decode(output)
print(sentence)