# model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
# tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
import sys
from typing import Optional

from datasets import load_dataset

from llama import Llama
import os
os.environ["RANK"] = "0"
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '5678'


#win平台没有NCCL
if sys.platform == "win32":
     os.environ["PL_TORCH_DISTRIBUTED_BACKEND"] = "gloo"

raw_datasets = load_dataset("glue", "mrpc")
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
generator = Llama.build(
    ckpt_dir="/model_path/llama2/llama-2-13b",
    tokenizer_path="/model_path/llama2/llama-2-13b",
    max_seq_len=max_seq_len,
    max_batch_size=max_batch_size,
)

tokenizer = generator.tokenizer
model = generator.model
print(model)
