import sys
import gradio as gr
from transformers.integrations import TensorBoardCallback
from torch.utils.tensorboard import SummaryWriter
from transformers import TrainingArguments
from transformers import Trainer, HfArgumentParser
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn as nn
from peft import get_peft_model, LoraConfig, TaskType
from dataclasses import dataclass, field
import datasets
import os
from transformers.trainer_callback import  TrainerCallback ,TrainerState ,TrainerControl
import gc

#tokenizer = AutoTokenizer.from_pretrained("/root/autodl-tmp/ChatGML-ALL/models/chatglm-6b", trust_remote_code=True)
tokenizer = None
model = None
global_model_name = None
class Config:
    def __init__(self,model_name,dataset_path,lora_rank,per_device_train_batch_size
                 ,gradient_accumulation_steps,max_steps,save_steps,save_total_limit,
                 learning_rate,fp16,remove_unused_columns,logging_steps,output_dir
                 ):
        self.model_name = model_name
        self.dataset_path = dataset_path
        self.lora_rank = lora_rank
        self.per_device_train_batch_size = per_device_train_batch_size
        self.gradient_accumulation_steps = gradient_accumulation_steps
        self.max_steps = max_steps
        self.save_steps = save_steps
        self.save_total_limit = save_total_limit
        self.learning_rate = learning_rate
        self.fp16 = fp16
        self.remove_unused_columns = remove_unused_columns
        self.logging_steps = logging_steps
        self.output_dir = output_dir
    def toJson(self):
        return self.__dict__.__str__().replace("\'", "\"")





@dataclass
class FinetuneArguments:
    dataset_path: str = field(default="data/alpaca")
    model_path: str = field(default="output")
    lora_rank: int = field(default=8)


class CastOutputToFloat(nn.Sequential):
    def forward(self, x):
        return super().forward(x).to(torch.float32)


def data_collator(features: list) -> dict:
    len_ids = [len(feature["input_ids"]) for feature in features]
    longest = max(len_ids)
    input_ids = []
    labels_list = []
    for ids_l, feature in sorted(zip(len_ids, features), key=lambda x: -x[0]):
        ids = feature["input_ids"]
        seq_len = feature["seq_len"]
        labels = (
            [-100] * (seq_len - 1) + ids[(seq_len - 1) :] + [-100] * (longest - ids_l)
        )
        ids = ids + [tokenizer.pad_token_id] * (longest - ids_l)
        _ids = torch.LongTensor(ids)
        labels_list.append(torch.LongTensor(labels))
        input_ids.append(_ids)
    input_ids = torch.stack(input_ids)
    labels = torch.stack(labels_list)
    return {
        "input_ids": input_ids,
        "labels": labels,
    }


class ModifiedTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False):
        return model(
            input_ids=inputs["input_ids"],
            labels=inputs["labels"],
        ).loss

    def save_model(self, output_dir=None, _internal_call=False):
        from transformers.trainer import TRAINING_ARGS_NAME

        os.makedirs(output_dir, exist_ok=True)
        torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
        saved_params = {
            k: v.to("cpu") for k, v in self.model.named_parameters() if v.requires_grad
        }
        torch.save(saved_params, os.path.join(output_dir, "adapter_model.bin"))



def fine(config,progress: gr.Progress=None):
    tmpArgv=[]
    tmpArgv.extend(sys.argv)

    tainArgvs = ["--dataset_path", config.dataset_path,
                 "--lora_rank", str(config.lora_rank),
                 "--per_device_train_batch_size", str(config.per_device_train_batch_size),
                 "--gradient_accumulation_steps", str(config.gradient_accumulation_steps),
                 "--max_steps", str(config.max_steps),
                 "--save_steps", str(config.save_steps),
                 "--save_total_limit", str(config.save_total_limit),
                 "--learning_rate", str(config.learning_rate),

                 "--logging_steps", str(config.logging_steps),
                 "--output_dir", config.output_dir
                 ]
    if config.fp16:
        tainArgvs.append("--fp16")
    if config.remove_unused_columns:
        tainArgvs.append("--remove_unused_columns")
        tainArgvs.append("true")
    else:
        tainArgvs.append("--remove_unused_columns")
        tainArgvs.append("false")
    sys.argv.extend(tainArgvs)
    execute(config,progress)
    sys.argv=tmpArgv
class MyCallback(TrainerCallback):
    def __init__(self, progress=None):
        self.progress = progress


    def on_train_begin(self, args, state: TrainerState, control, **kwargs):
        if self.progress is not None:
            self.progress(0,"训练开始")

    def on_train_end(self, args, state: TrainerState, control, **kwargs):
        if self.progress is not None:
            self.progress(1,"训练结束")


    def on_step_end(self, args: TrainingArguments, state: TrainerState, control:TrainerControl, **kwargs):
        step = state.global_step*1.0
        stepNum = state.max_steps
        if self.progress is not None:
            self.progress(step/stepNum,"训练进度: "+str(state.global_step)+"/"+str(stepNum))



def unload_model():
    global model
    model = None
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        print("cuda cache cleared")

def execute(config: Config,progress: gr.Progress=None):
    writer = SummaryWriter()
    finetune_args, training_args = HfArgumentParser(
        (FinetuneArguments, TrainingArguments)
    ).parse_args_into_dataclasses()


    if progress is not None:
        progress(0,"加载基础模型开始")
    global global_model_name
    global tokenizer
    global model

    if model is None or config.model_name != global_model_name :
        # init model
        unload_model()
        tokenizer = AutoTokenizer.from_pretrained(config.model_name, trust_remote_code=True)
        model = AutoModel.from_pretrained(
            config.model_name,  trust_remote_code=True, device_map="auto"
        )
        global_model_name = config.model_name

    if progress is not None:
        progress(1, "加载基础模型结束")

    model.gradient_checkpointing_enable()
    model.enable_input_require_grads()
    model.is_parallelizable = True
    model.model_parallel = True
    model.lm_head = CastOutputToFloat(model.lm_head)

    model.config.use_cache = (
        False  # silence the warnings. Please re-enable for inference!
    )

    # setup peft
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,
        r=finetune_args.lora_rank,
        lora_alpha=32,
        lora_dropout=0.1,
    )
    model = get_peft_model(model, peft_config)

    # load dataset
    dataset = datasets.load_from_disk(finetune_args.dataset_path)
    print(f"\n{len(dataset)=}\n")

    # start train
    myCallback = MyCallback(progress)
    trainer = ModifiedTrainer(
        model=model,
        train_dataset=dataset,
        args=training_args,
        callbacks=[TensorBoardCallback(writer),myCallback],
        data_collator=data_collator,
    )
    trainer.train()
    writer.close()
    # save model
    model.save_pretrained(training_args.output_dir)


if __name__ == "__main__":
    pr = gr.Progress()

    # config = Config(
    #     model_name="/root/autodl-tmp/ChatGML-ALL/models/chatglm-6b",
    #     dataset_path="data/alpaca_data",
    #     lora_rank=8,
    #     per_device_train_batch_size=1,
    #     gradient_accumulation_steps=1,
    #     max_steps=10,
    #     save_steps=5,
    #     save_total_limit=2,
    #     learning_rate=1e-5,
    #     fp16=True,
    #     remove_unused_columns=False,
    #     logging_steps=50,
    #     output_dir="lora/test-lora",
    # )
    # fine(config=config)


