import argparse

import torch
from datasets import load_dataset
from peft import LoraConfig
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, default_data_collator, AutoTokenizer
from sklearn.model_selection import train_test_split
from trl import SFTTrainer
from peft import get_peft_config, PeftModel, PeftConfig, get_peft_model, LoraConfig, TaskType
import evaluate
import torch
import numpy as np

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="meta-llama/Llama-2-7b-hf", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--max_length', default=64, type=int, help='lujing')
parser.add_argument('--lora_alpha', default=16, type=int, help='lujing')
parser.add_argument('--lora_dropout', default=0.1, type=int, help='lujing')

args = parser.parse_args()


def return_prompt_and_responses(samples):
    return {
        "prompt": [
            "Question: " + question + "\n\nAnswer: "
            for question in samples["question"]
        ],
        "chosen": samples["response_j"],  # rated better than k
        "rejected": samples["response_k"],  # rated worse than j
    }


rawdataset = load_dataset("parquet",
                       split="train",
                       data_dir="/datasets/lvwerra/stack-exchange-paired/data/finetune")


original_columns = rawdataset.column_names
rawdataset.map(
    return_prompt_and_responses,
    batched=True,
    remove_columns=original_columns
)

rawdataset=rawdataset.train_test_split(test_size=0.005, seed=0)





train_dataloader = DataLoader(
    rawdataset["train"], shuffle=True, collate_fn=default_data_collator, batch_size=args.batch_size, pin_memory=True
)
eval_dataloader = DataLoader(rawdataset["test"], collate_fn=default_data_collator, batch_size=args.batch_size,
                             pin_memory=True)






peft_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all"
)
# add LoRA layers on top of the quantized base model
peft_config = LoraConfig(
    r=args.model_name_or_path,
    task_type="CAUSAL_LM",
    lora_alpha=args.lora_alpha,
    lora_dropout=args.lora_dropout,
    target_modules=["key", "query", "value"],
    bias="none",
)

args.model_name_or_path = "/model_path/llama2/Llama-2-7b-hf"
base_model = AutoModelForCausalLM.from_pretrained(
    args.model_name_or_path,
)

model = get_peft_model(base_model, peft_config)
model.print_trainable_parameters()

base_model.config.use_cache = False


for epoch in range(args.num_epochs):
    model.train()
    total_loss = 0
    for step, batch in enumerate(tqdm(train_dataloader)):
        batch = {k: v.to(model.device) for k, v in batch.items()}

        outputs = model(**batch)
        loss = outputs.loss
        total_loss += loss.detach().float()
        # 反向传播
        # model.backward(loss)
        # optimizer.step()

model.save_pretrained("output_dir****")
"""
以下做推理
"""

peft_model_id = "output_dir****"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)

model = model.to(args.device)
model.eval()
inputs = tokenizer("Tweet text : @HondaCustSvc Your customer service has been horrible during the recall process. I will never purchase a Honda again. Label :", return_tensors="pt")

with torch.no_grad():
  outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10)
  print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0])
'complaint'
