import argparse
import os

import deepspeed
import torch
from datasets import load_dataset, load_from_disk
from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, default_data_collator
from peft import PeftModel, PeftConfig, PromptTuningConfig, TaskType, PromptTuningInit, get_peft_model

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="meta-llama/Llama-2-7b-hf", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--max_length', default=64, type=int, help='lujing')
# parser.add_argument('--deepspeed', default="", type=str, help='lujing')
# parser.add_argument('--deepspeed_config', default="", type=str, help='lujing')


parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()

tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path,
                                             # device_map='cpu',
                                             # device_map='auto',
                                             # torch_dtype=torch.float16,
                                             # load_in_8bit=True,
                                             # use_auth_token=True,
                                             )

peft_config = PromptTuningConfig(
    task_type=TaskType.CAUSAL_LM,
    prompt_tuning_init=PromptTuningInit.TEXT,
    num_virtual_tokens=3,
    prompt_tuning_init_text="Classify if the tweet is a complaint or not:",
    tokenizer_name_or_path=args.model_name_or_path,
)

dataset = load_from_disk("./testbert/twitter_complaints")
dataset["train"][0]
{"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2}

classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names]
dataset = dataset.map(
    lambda x: {"text_label": [classes[label] for label in x["Label"]]},
    batched=True,
    num_proc=1,
)


def preprocess_function(examples):
    batch_size = len(examples[args.text_column])
    inputs = [f"{args.text_column} : {x} Label : " for x in examples[args.text_column]]
    targets = [str(x) for x in examples[args.label_column]]
    model_inputs = tokenizer(inputs)
    labels = tokenizer(targets)
    for i in range(batch_size):
        sample_input_ids = model_inputs["input_ids"][i]
        label_input_ids = labels["input_ids"][i] + [tokenizer.pad_token_type_id]
        # print(i, sample_input_ids, label_input_ids)
        model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
        labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
        model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
    # print(model_inputs)
    for i in range(batch_size):
        sample_input_ids = model_inputs["input_ids"][i]
        label_input_ids = labels["input_ids"][i]
        model_inputs["input_ids"][i] = [tokenizer.pad_token_type_id] * (
                args.max_length - len(sample_input_ids)
        ) + sample_input_ids
        model_inputs["attention_mask"][i] = [0] * (args.max_length - len(sample_input_ids)) + model_inputs[
            "attention_mask"
        ][i]
        labels["input_ids"][i] = [-100] * (args.max_length - len(sample_input_ids)) + label_input_ids
        model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:args.max_length])
        model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:args.max_length])
        labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:args.max_length])
    model_inputs["labels"] = labels["input_ids"]
    return model_inputs


processed_datasets = dataset.map(
    preprocess_function,
    batched=True,
    num_proc=1,
    remove_columns=dataset["train"].column_names,
    load_from_cache_file=False,
    desc="Running tokenizer on dataset",
)
# if tokenizer.pad_token_id is None:
#     tokenizer.pad_token_id = tokenizer.eos_token_id
# target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
#

train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["test"]

train_dataloader = DataLoader(
    train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.batch_size, pin_memory=True
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=args.batch_size,
                             pin_memory=True)

model = get_peft_model(model, peft_config)
print(model.print_trainable_parameters())
print("--------------------------------------使用deepspeed加载------------------------------------------------")
estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=3, num_nodes=1)

model, optimizer, _, _ = deepspeed.initialize(args=args, model=model,
                                              model_parameters=model.parameters())

# inputs = tokenizer(
#     "I have no water and the bill is current and paid. Can you do something about this",
#     return_tensors="pt",
# )
# outputs = model.generate(
#     input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
# )


print("-----------------------------------------初始化结束-------------------------------------------------")

print(model)
# deepspeed --include="localhost:0,1,2" testbert/deepspeedllamahf.py --deepspeed_config testbert/ds_config3.json
for epoch in range(args.num_epochs):
    model.train()
    total_loss = 0
    for step, batch in enumerate(tqdm(train_dataloader)):
        batch = {k: v.to(model.device) for k, v in batch.items()}

        outputs = model(**batch)
        loss = outputs.loss
        total_loss += loss.detach().float()

        model.backward(loss)
        optimizer.step()

    model.eval()
    eval_loss = 0
    eval_preds = []
    for step, batch in enumerate(tqdm(eval_dataloader)):
        batch = {k: v.to(model.device) for k, v in batch.items()}
        with torch.no_grad():
            outputs = model(**batch)
        loss = outputs.loss
        eval_loss += loss.detach().float()
        eval_preds.extend(
            tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
        )

    eval_epoch_loss = eval_loss / len(eval_dataloader)
    eval_ppl = torch.exp(eval_epoch_loss)
    train_epoch_loss = total_loss / len(train_dataloader)
    train_ppl = torch.exp(train_epoch_loss)
    print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")

# deepspeed --include="localhost:0,1,2"  testbert/deepspeedllamahfpeft.py --model_name_or_path /root/autodl-tmp/model_path/llama2/Llama-2-7b-hf/  --deepspeed_config testbert/ds_config2.json
