# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
os.environ["FLAGS_use_cuda_managed_memory"] = "true"
from dataclasses import dataclass, field
from functools import partial
from typing import Dict, List, Union
import json
from PIL import Image
import numpy as np
import paddle
from sklearn.metrics import accuracy_score
from paddlenlp.trainer import Trainer
from paddlenlp.data import DataCollatorWithPadding
from paddlenlp.datasets import load_dataset
from paddlenlp.metrics import BLEU, Rouge1, Rouge2, RougeL
from paddlenlp.trainer import PdArgumentParser, TrainingArguments, get_last_checkpoint
from paddlenlp.transformers import ChatGLMTokenizer
from visualglm import VisualGLM, VisualGLMConfig
from visualglm import BlipImageEvalProcessor
from paddlenlp.utils.log import logger


@dataclass
class DataArgument:
    task_name_or_path: str = field(default="./fewshot-data/", metadata={"help": "Path to data"})
    src_length: int = field(default=128, metadata={"help": "The max length of source text."})
    tgt_length: int = field(default=180, metadata={"help": "The max length of target text."})
    num_beams: int = field(default=5, metadata={"help": "The number of beams."})
    generate_num: int = field(default=100, metadata={"help": "Save first k examples generation result in dev dataset"})
    src_length: int = field(default=256, metadata={"help": "Source length for generation."})
    tgt_length: int = field(default=512, metadata={"help": "Target length for generation."})


@dataclass
class ModelArgument:
    model_name_or_path: str = field(
        default="./visualglm", metadata={"help": "Build-in pretrained model name or the path to local model."}
    )
    prefix_tuning: bool = field(default=False, metadata={"help": "Whether to use Prefix Tuning technique"})
    lora: bool = field(default=False, metadata={"help": "Whether to use LoRA technique"})
    do_generation: bool = field(default=False, metadata={"help": "Whether to do generation for evaluation"})


def init_args(model_args:ModelArgument, data_args:DataArgument, training_args:TrainingArguments):
    model_args.model_name_or_path = "THUDM/chatglm-6b"
    model_args.do_generation = True
    
    # data_args.task_name_or_path = "./fewshot-data/"
    data_args.task_name_or_path = "./visualglm_sample_2w"
    data_args.src_length = 128
    data_args.tgt_length = 128

    training_args.max_steps = 3
    training_args.learning_rate = 3e-5
    training_args.warmup_steps = 20
    training_args.eval_steps = 1
    training_args.logging_steps = 20
    training_args.save_steps = 30
    training_args.save_total_limit = 1
    training_args.output_dir = "./checkpoints/visualglm"
    training_args.per_device_eval_batch_size = 4
    training_args.per_device_train_batch_size = 4
    training_args.gradient_accumulation_steps = 32
    training_args.fp16 = True
    training_args.fp16_opt_level = "O2"
    training_args.recompute = False
    training_args.do_train = True
    training_args.do_eval = True
    # training_args.dataloader_num_workers = 1
    # training_args.tensor_parallel_degree = 1
    

def read_local_dataset(path):
    with open(path, "r", encoding="utf-8") as fp:
        for line in fp:
            yield json.loads(line.strip())


def convert_pretrain_example(example, processor:BlipImageEvalProcessor, image_length:int, tokenizer:ChatGLMTokenizer, data_args, is_test=True):
    image = Image.open(example['img']).convert('RGB')
    image = processor(image)
    input0 = tokenizer.encode("<img>", add_special_tokens=False)['input_ids']
    input1 = [tokenizer.pad_token_id] * image_length
    input2 = tokenizer.encode("</img>问："+example['prompt']+"\n答：", add_special_tokens=False)['input_ids']
    a_ids = sum([input0, input1, input2], [])
    if is_test:
        if len(a_ids) > data_args.src_length - 2:
            a_ids = a_ids[: data_args.src_length - 2]
        a_ids = tokenizer.build_inputs_with_special_tokens(a_ids)
        a_ids += [tokenizer.pad_token_id] * (data_args.src_length - len(a_ids))
        labels = tokenizer(example['label'], max_length=data_args.tgt_length, truncation=True, padding="max_length")["input_ids"]
        pre_image = len(input0)
        return {
            "images": image,
            "input_ids": a_ids,
            "labels": labels,
            "pre_image_length": pre_image
        }
    else:
        b_ids = tokenizer.encode(text=example['label'], add_special_tokens=False)['input_ids']
        if len(a_ids) > data_args.src_length - 1:
            a_ids = a_ids[: data_args.src_length - 1]
        if len(b_ids) > data_args.tgt_length - 2:
            b_ids = b_ids[: data_args.tgt_length - 2]
        pre_image = len(input0)
        input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)

        context_length = input_ids.index(tokenizer.bos_token_id)
        mask_position = context_length - 1
        labels = [-100] * context_length + input_ids[mask_position+1:]
        max_seq_length = data_args.src_length + data_args.tgt_length
        pad_len = max_seq_length - len(input_ids)
        input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
        labels = labels + [tokenizer.pad_token_id] * pad_len
        labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
        return {
                "images": image,
                "input_ids": input_ids,
                "labels": labels,
                "pre_image_length": pre_image
        }


class ChatGLMTrainer(Trainer):
    def __init__(self, data_args, do_generation: bool, **kwargs):
        super().__init__(**kwargs)
        self.data_args = data_args
        self.do_generation = do_generation

    def evaluate(self, eval_dataset=None, ignore_keys=None, metric_key_prefix: str = "eval", **gen_kwargs):
        gen_kwargs = gen_kwargs.copy()
        gen_kwargs["max_length"] = (
            self.data_args.generation_max_length
            if hasattr(self.data_args, "generation_max_length")
            else self.data_args.tgt_length
        )
        gen_kwargs["num_beams"] = self.data_args.num_beams
        self._gen_kwargs = gen_kwargs

        return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)

    def predict(self, test_dataset, ignore_keys=None, metric_key_prefix: str = "test", **gen_kwargs):
        gen_kwargs = gen_kwargs.copy()
        gen_kwargs["max_length"] = (
            self.data_args.generation_max_length
            if hasattr(self.data_args, "generation_max_length")
            else self.data_args.tgt_length
        )
        gen_kwargs["num_beams"] = self.data_args.num_beams
        self._gen_kwargs = gen_kwargs

        return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)

    def prediction_step(
        self,
        model,
        inputs,
        prediction_loss_only: bool,
        ignore_keys=None,
    ):
        if prediction_loss_only:
            return super().prediction_step(model, inputs, prediction_loss_only, ignore_keys)
        elif not self.do_generation:
            loss, logits, labels = super().prediction_step(model, inputs, prediction_loss_only, ignore_keys)
            lm_logits = logits[0]
            all_preds = []
            all_labels = []
            for p, l in zip(lm_logits[..., :-1, :].argmax(axis=-1), labels[..., 1:]):
                all_preds.append(p[l != -100])
                all_labels.append(l[l != -100])

            return (loss, all_preds, all_labels)

        loss = None

        n_token_id = self.tokenizer.convert_tokens_to_ids("<n>")
        model.eval()
        with paddle.no_grad():
            generated_tokens = model.generate(
                **inputs,
                **self._gen_kwargs.copy(),
                decode_strategy="sampling",
                top_k=1,
                bos_token_id=self.tokenizer.bos_token_id,
                eos_token_id=self.tokenizer.end_token_id,
                pad_token_id=self.tokenizer.pad_token_id,
                use_cache=True,
            )[0]
            all_preds = []
            for pred_tokens in generated_tokens:
                if len(pred_tokens) == 0:
                    all_preds.append([])
                    continue
                pred_tokens = pred_tokens[pred_tokens != self.tokenizer.pad_token_id]
                if len(pred_tokens) == 0:
                    all_preds.append([])
                    continue
                pred_tokens = pred_tokens[pred_tokens != n_token_id].tolist()
                all_preds.append(pred_tokens)
            max_pred_length = max([len(x) for x in all_preds])
            for index, preds in enumerate(all_preds):
                all_preds[index] = preds + [-100] * (max_pred_length - len(preds))
            all_preds = paddle.to_tensor(all_preds)

            if "labels" in inputs:
                all_labels = []
                for index, label in enumerate(inputs["labels"]):
                    label = label[:max_pred_length]
                    if len(all_preds[index]) != 0:
                        label[all_preds[index] == -100] = -100
                    all_labels.append(label)
                all_labels = paddle.to_tensor(all_labels)
            else:
                all_labels = None

        return (loss, all_preds, all_labels)

    def log(self, logs: Dict[str, float], **kwargs) -> None:
        if "loss" in logs:
            logs["ppl"] = np.exp(logs["loss"])
        if "eval_loss" in logs:
            logs["eval_ppl"] = np.exp(logs["eval_loss"])

        super(ChatGLMTrainer, self).log(logs, **kwargs)
        

def compute_metrics_do_generation(eval_preds):
    rouge1 = Rouge1()
    rouge2 = Rouge2()
    rougel = RougeL()
    bleu4 = BLEU(n_size=4)
    predictions = [x[x != -100] for x in eval_preds.predictions]
    references = [x[x != -100] for x in eval_preds.label_ids]

    # for pred in predictions:

    rouge1_score = rouge1.score(predictions, references)
    rouge2_score = rouge2.score(predictions, references)
    for pred, ref in zip(predictions, references):
        rougel.add_inst(pred, [ref])
        bleu4.add_inst(pred, [ref])
    return {
        "rouge1": rouge1_score,
        "rouge2": rouge2_score,
        "rougel": rougel.score(),
        "bleu4": bleu4.score(),
    }


def compute_metrics(eval_preds):
    predictions = [x[x != -100] for x in eval_preds.predictions]
    references = [x[x != -100] for x in eval_preds.label_ids]
    accuracy = accuracy_score(y_true=np.array(references).flatten(), y_pred=np.array(predictions).flatten())
    return {
        "accuracy": accuracy,
    }

 
def main():
    # paddle.set_default_dtype('float16')
    path = os.path.dirname(os.path.abspath(__file__))
    parser = PdArgumentParser((ModelArgument, DataArgument, TrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()
    init_args(model_args, data_args, training_args)

    training_args.print_config(model_args, "Model")
    training_args.print_config(data_args, "Data")

    paddle.set_device(training_args.device)

    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, world_size: {training_args.world_size}, "
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16 or training_args.bf16}"
    )

    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 1:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    dtype = paddle.get_default_dtype()
    if training_args.fp16_opt_level == "O2":
        if training_args.fp16:
            dtype = "float16"

    # Load the pretrained language model.
    model:VisualGLM = VisualGLM.from_pretrained(
        "./visualglm",
        load_state_as_np=True,
        dtype=dtype,
        tensor_parallel_degree=training_args.tensor_parallel_degree,
        tensor_parallel_rank=training_args.tensor_parallel_rank,
        )
    # config = VisualGLMConfig.from_json_file(os.path.join(path, './visualglm/config.json'))
    # model = VisualGLM(config)
    
    model.mark_only_language_projection_as_trainable()
    model.print_trainable_parameters()
    
    tokenizer = ChatGLMTokenizer.from_pretrained(model_args.model_name_or_path)

    # Load the dataset.
    train_ds = load_dataset(
        read_local_dataset, path=os.path.join(data_args.task_name_or_path, "dataset.jsonl"), lazy=False
    )
    dev_ds = load_dataset(
        read_local_dataset, path=os.path.join(data_args.task_name_or_path, "dataset.jsonl"), lazy=False
    )
    processor = BlipImageEvalProcessor(224)
    trans_func = partial(convert_pretrain_example, processor=processor, image_length=model.image_length, tokenizer=tokenizer, data_args=data_args)
    
    if model_args.do_generation:
        train_ds = train_ds.map(partial(trans_func, is_test=False))
        test_ds = dev_ds.map(trans_func)
    else:
        train_ds = train_ds.map(partial(trans_func, is_test=False))
        test_ds = dev_ds.map(partial(trans_func, is_test=False))

    collate_fn = DataCollatorWithPadding(
        tokenizer=tokenizer, max_length=data_args.src_length + data_args.tgt_length, padding=True
    )

    trainer = ChatGLMTrainer(
        model=model,
        args=training_args,
        train_dataset=train_ds,
        eval_dataset=dev_ds,
        tokenizer=tokenizer,
        compute_metrics=compute_metrics_do_generation if model_args.do_generation else compute_metrics,
        data_collator=collate_fn,
        data_args=data_args,
        do_generation=model_args.do_generation,
    )
    # if training_args.fp16_opt_level == "O2":
    #     trainer.disable_autocast_context_manager()

    if training_args.do_train:
        train_result = trainer.train(resume_from_checkpoint=last_checkpoint)
        # trainer.save_model(merge_tensor_parallel=training_args.tensor_parallel_degree > 1)
        trainer.log_metrics("train", train_result.metrics)
        trainer.save_metrics("train", train_result.metrics)
        trainer.save_state()

    if training_args.do_eval:
        eval_result = trainer.evaluate(test_ds)
        trainer.log_metrics("test", eval_result)


if __name__ == "__main__":
    with paddle.amp.auto_cast(enable=False):
        main()
