import argparse
from dataclasses import dataclass
from typing import Union, Optional, List, Dict, Any

from datasets import load_dataset
from torch.optim import AdamW

from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, default_data_collator, AutoTokenizer, \
    PreTrainedTokenizerBase

from peft import PeftModel, PeftConfig, get_peft_model, LoraConfig, TaskType
import evaluate
import torch
import numpy as np
from transformers.utils import PaddingStrategy

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="/model_path/llama2/Llama-2-7b-hf", type=str, help='lujing')
parser.add_argument('--dataset_name_or_path', default="/datasets_path/lvwerra/stack-exchange-paired/data/finetune", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--device', default="cuda" if torch.cuda.is_available() else "cpu", type=str, help='lujing')
parser.add_argument('--max_length', default=64, type=int, help='lujing')
parser.add_argument('--lora_alpha', default=16, type=int, help='lujing')
parser.add_argument('--lora_dropout', default=0.1, type=int, help='lujing')

args = parser.parse_args()



args.model_name_or_path = "D:/model_path/Llama-2-7b-hf"
args.dataset_name_or_path="D:\\datasets_path\\lvwerra\\stack-exchange-paired\\data\\finetune"
def return_etl_data(samples):
    """Prepare the text from a sample of the dataset."""
    # print(samples)

    return {"questions": samples['question'], "responses": samples['response_j']}


rawdataset = load_dataset("parquet",
                          split="train",
                          data_dir=args.dataset_name_or_path)

original_columns = rawdataset.column_names

rawdataset = rawdataset.map(
    return_etl_data,
    batched=True,
    remove_columns=original_columns
)


# We need to define a special data collator that batches the data in our j vs k format.
@dataclass
class RewardDataCollatorWithPadding:
    tokenizer: PreTrainedTokenizerBase
    padding: Union[bool, str, PaddingStrategy] = True
    max_length: Optional[int] = 512
    pad_to_multiple_of: Optional[int] = None
    return_tensors: str = "pt"

    def __call__(self, features: List[Dict[str, Any]]):
        tokenizer.pad_token = tokenizer.eos_token
        #将数组组装
        features = [f"Question: {fdata['questions']}\n\nAnswer: {fdata['responses']}" for fdata in features]

        batch = self.tokenizer.batch_encode_plus(batch_text_or_text_pairs=features, padding=self.padding,
                                                 max_length=self.max_length,
                                                 pad_to_multiple_of=self.pad_to_multiple_of,
                                                 return_tensors=self.return_tensors, )

        return batch


rawdataset = rawdataset.train_test_split(test_size=0.005, seed=0)




base_model = AutoModelForCausalLM.from_pretrained(
    args.model_name_or_path,
)
print(base_model)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)

# add LoRA layers on top of the quantized base model
peft_config = LoraConfig(
    r=16,
    task_type="CAUSAL_LM",
    lora_alpha=args.lora_alpha,
    lora_dropout=args.lora_dropout,

    bias="none",
)


train_dataloader = DataLoader(
    rawdataset["train"], shuffle=True, collate_fn=RewardDataCollatorWithPadding(tokenizer=tokenizer ),
    batch_size=args.batch_size, pin_memory=True
)
eval_dataloader = DataLoader(rawdataset["test"], collate_fn=RewardDataCollatorWithPadding(tokenizer ),
                             batch_size=args.batch_size,
                             pin_memory=True)

model = get_peft_model(base_model, peft_config)
model.print_trainable_parameters()
optimizer = AdamW(model.parameters())
base_model.config.use_cache = False
model.to(args.device)
for epoch in range(args.num_epochs):
    model.train()
    total_loss = 0
    for step, batch in enumerate(tqdm(train_dataloader)):

        batch = {k: v.to(model.device) for k, v in batch.items()}

        outputs = model(**batch)
        loss = outputs.loss
        total_loss += loss.detach().float()
        # 梯度清零
        optimizer.zero_grad()
        # 反向传播
        loss.backward(loss)
        # 梯度进行更新
        optimizer.step()

model.save_pretrained("output/stfllama2")
"""
以下做推理
"""

peft_model_id = "output/stfllama2"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)

model = model.to(args.device)
model.eval()
inputs = tokenizer(
    "Tweet text : @HondaCustSvc Your customer service has been horrible during the recall process. I will never purchase a Honda again. Label :",
    return_tensors="pt")

with torch.no_grad():
    outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10)
    print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0])
'complaint'


# python model/rlhf/STFTrain.py