import os import sys import torch from dotenv import find_dotenv, load_dotenv from llamafactory.chat import ChatModel from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig found_dotenv = find_dotenv(".env") if len(found_dotenv) == 0: found_dotenv = find_dotenv(".env.example") print(f"loading env vars from: {found_dotenv}") load_dotenv(found_dotenv, override=False) path = os.path.dirname(found_dotenv) print(f"Adding {path} to sys.path") sys.path.append(path) from llm_toolkit.translation_utils import * model_name = os.getenv("MODEL_NAME") adapter_name_or_path = os.getenv("ADAPTER_NAME_OR_PATH") load_in_4bit = os.getenv("LOAD_IN_4BIT") == "true" data_path = os.getenv("DATA_PATH") results_path = os.getenv("RESULTS_PATH") print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path) def load_model( model_name, max_seq_length=2048, dtype=torch.bfloat16, load_in_4bit=False, adapter_name_or_path=None, ): print(f"loading model: {model_name}") if adapter_name_or_path: template = "llama3" if "llama-3" in model_name.lower() else "chatml" args = dict( model_name_or_path=model_name, adapter_name_or_path=adapter_name_or_path, # load the saved LoRA adapters template=template, # same to the one in training finetuning_type="lora", # same to the one in training quantization_bit=4 if load_in_4bit else None, # load 4-bit quantized model ) chat_model = ChatModel(args) return chat_model.engine.model, chat_model.engine.tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) bnb_config = BitsAndBytesConfig( load_in_4bit=load_in_4bit, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=False, bnb_4bit_compute_dtype=dtype, ) model = AutoModelForCausalLM.from_pretrained( model_name, quantization_config=bnb_config, torch_dtype=dtype, trust_remote_code=True, device_map="auto", ) return model, tokenizer gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(1) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") model, tokenizer = load_model( model_name, load_in_4bit=load_in_4bit, adapter_name_or_path=adapter_name_or_path ) gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(2) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") datasets = load_translation_dataset(data_path, tokenizer) print("Evaluating model: " + model_name) predictions = eval_model(model, tokenizer, datasets["test"]) gpu_stats = torch.cuda.get_device_properties(0) start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) print(f"(3) GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") print(f"{start_gpu_memory} GB of memory reserved.") if adapter_name_or_path is not None: model_name += "_" + adapter_name_or_path.split("/")[-1] save_results( model_name, results_path, datasets["test"], predictions, debug=True, ) metrics = calc_metrics(datasets["test"]["english"], predictions, debug=True) print(metrics)