import argparse
import os
from dataclasses import dataclass
from typing import Union, Optional, List, Dict, Any
import sys




o_path = os.getcwd()  # 返回当前工作目录
sys.path.append(o_path)  # 添加自己指定的搜索路径
from datasets import load_dataset
from torch.optim import AdamW

from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, default_data_collator, AutoTokenizer, \
    PreTrainedTokenizerBase, TrainingArguments, is_bitsandbytes_available, LlamaTokenizer

from peft import PeftModel, PeftConfig, get_peft_model, LoraConfig, TaskType
import evaluate
import torch
import numpy as np
from transformers.utils import PaddingStrategy, import_utils
from trl import SFTTrainer
from trl.trainer import ConstantLengthDataset

from baseconf import BASE_DISK

parser = argparse.ArgumentParser(add_help=True, description='lijing')
# 对模型和数据进行判断，linux和win的路径不一样
parser.add_argument('--model_name_or_path', type=str, help='lujing',
                    default="/model_path/llama2/Llama-2-7B-bf16-sharded" if os.path.exists(
                        "/etc") else BASE_DISK + ":/model_path/Llama-2-7b-hf", )
parser.add_argument('--dataset_name_or_path', type=str, help='lujing',
                    default="/datasets_path/wudaoqa/qa_final" if os.path.exists(
                        "/etc") else BASE_DISK + ":\datasets_path\wudaoqa\qa_final",
                    )
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument("--max_steps", type=int, default=100) #也是num_epochs
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--device', default="cuda" if torch.cuda.is_available() else "cpu", type=str, help='lujing')
parser.add_argument('--max_length', default=2048, type=int, help='lujing')
parser.add_argument('--lora_alpha', default=16, type=int, help='lujing')
parser.add_argument('--lora_dropout', default=0.1, type=int, help='lujing')


parser.add_argument("--output_dir", type=str, default="output/stfllama2")

parser.add_argument("--eval_freq", default=1000, type=int)
parser.add_argument("--save_freq", default=1000, type=int)
parser.add_argument("--log_freq", default=1, type=int) #跑一个epoch 就保存一次
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--lr_scheduler_type", type=str, default="cosine")
parser.add_argument("--num_warmup_steps", type=int, default=100)
parser.add_argument("--weight_decay", type=float, default=0.05)
parser.add_argument("--optim", type=str, default="paged_adamw_32bit")

parser.add_argument("--no_fp16", action="store_false", default=False)
parser.add_argument("--bf16", action="store_true", default=False)
parser.add_argument("--no_gradient_checkpointing", action="store_false", default=False)
parser.add_argument("--gradient_accumulation_steps", action="store_false", default=1)

args = parser.parse_args()

import_utils.is_accelerate_available()
import_utils.is_bitsandbytes_available()


def return_etl_data(samples):
    """Prepare the text from a sample of the dataset."""
    truns = samples['turns']
    assert truns[0]['role'] == "user"

    return {"text": f"Question: {truns[0]['text']}\n\nAnswer: {truns[1]['text']}"}


rawdataset = load_dataset("json",
                          split="train",
                          data_dir=args.dataset_name_or_path)

rawdataset = rawdataset.select(range(10))

original_columns = rawdataset.column_names

rawdataset = rawdataset.map(
    return_etl_data,
    # batched=True,
    remove_columns=original_columns
)

# 对数据进行切割
rawdataset = rawdataset.train_test_split(test_size=0.005, seed=0)


# 仅仅是计算比率，没有其他用处
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
    """
    Estimate the average number of characters per token in the dataset.
    """
    total_characters, total_tokens = 0, 0
    for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
        text = example['text']
        total_characters += len(text)
        if tokenizer.is_fast:
            total_tokens += len(tokenizer(text).tokens())
        else:
            total_tokens += len(tokenizer.tokenize(text))
    # total_characters是原本的长度  total_tokens是转为token的长度 一般转换后会有subword的情况，所以token会增多
    return total_characters / total_tokens


# 加载模型 用于处理数据
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.float16,
)

model = AutoModelForCausalLM.from_pretrained(
    pretrained_model_name_or_path=args.model_name_or_path,
    quantization_config=bnb_config,
    torch_dtype=torch.half,

)
model.config.use_cache = False

# add LoRA layers on top of the quantized base model
peft_config = LoraConfig(
    r=16,#这个是秩 矩阵的信息量
    task_type="CAUSAL_LM",
    lora_alpha=args.lora_alpha,
    lora_dropout=args.lora_dropout,

    bias="none",
)
# model = get_peft_model(base_model, peft_config)




tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, )
tokenizer.pad_token = tokenizer.eos_token
chars_per_token = chars_token_ratio(rawdataset["train"], tokenizer)  # 默认是3.6  可能时间有变化
optim = "paged_adamw_32bit"
training_args = TrainingArguments(
    output_dir=args.output_dir,
    dataloader_drop_last=True,
    evaluation_strategy="steps",
    max_steps=args.num_epochs, #==epoch
    eval_steps=args.eval_freq,
    save_steps=args.save_freq,
    logging_steps=args.log_freq,
    per_device_train_batch_size=args.batch_size,
    per_device_eval_batch_size=args.batch_size,
    learning_rate=args.learning_rate,
    lr_scheduler_type=args.lr_scheduler_type,
    warmup_steps=args.num_warmup_steps,
    optim=optim,
    gradient_accumulation_steps=args.gradient_accumulation_steps,
    gradient_checkpointing=not args.no_gradient_checkpointing,
    fp16=not args.no_fp16,
    # bf16=args.bf16,
    weight_decay=args.weight_decay,
    run_name="llama-7b-finetuned",
    report_to="tensorboard",
    ddp_find_unused_parameters=False,

)


trainer = SFTTrainer(
    tokenizer=tokenizer,
    model=model,
    args=training_args,
    train_dataset=rawdataset["train"],
    eval_dataset=rawdataset["test"],
    dataset_text_field="text",
    max_seq_length=args.max_length,#  256
    peft_config=peft_config,
    packing=True,  # 对ConstantLengthDataset 进行组合，会多用显存
)


def print_trainable_parameters(model):
    """
    Prints the number of trainable parameters in the model.
    """
    trainable_params = 0
    all_param = 0
    for _, param in model.named_parameters():
        all_param += param.numel()
        if param.requires_grad:
            trainable_params += param.numel()
    print(
        f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
    )


print_trainable_parameters(trainer.model)

print("Training...")
trainer.train()

print("Saving last checkpoint of the model")
model_to_save = trainer.model.module if hasattr(trainer.model, 'module') else trainer.model #有可能会有分布式训练
model_to_save.save_pretrained(os.path.join(args.output_dir, "final_checkpoint/"))

#使用
# lora_config = LoraConfig.from_pretrained('outputs')
# model = get_peft_model(model, lora_config)






# 也可以把权重合并进行保存
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
tokenizer = LlamaTokenizer.from_pretrained(args.model_name_or_path)


config = PeftConfig.from_pretrained(os.path.join(args.output_dir, "final_checkpoint/"))
lora_model = PeftModel.from_pretrained(model, os.path.join(args.output_dir, "final_checkpoint/"))


model = lora_model.merge_and_unload() #模型合并，后续可以用llama的模型直接使用



final_output_path=os.path.join(args.output_dir, "final_outputs")
print(f"Saving the target model to {final_output_path}")
model.save_pretrained(final_output_path)
tokenizer.save_pretrained(final_output_path)


#利用
# CUDA_VISIBLE_DEVICES="0,1" python rlhfqlora/STFHGTrain.py