# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""QwenVL task script"""
import argparse
import io
import os
import sys
import time
import json
import logging

import mindspore as ms
import numpy as np
import PIL
import webdataset as wds
import ftfy

sys.path.append(os.getcwd())
sys.path.append(os.path.join(os.getcwd(), ".."))
sys.path.append(os.path.join(os.getcwd(), "../../"))

from mindspore.communication.management import get_group_size, get_rank
from mindspore.parallel._auto_parallel_context import auto_parallel_context

from mindformers import MindFormerConfig, CLIPVisionConfig, MindFormerRegister, MindFormerModuleType, \
    BatchNormalize, BatchToTensor, build_profile_cb
from mindformers.models import build_processor
from mindformers.core.context import build_context
from mindformers.tools.utils import str2bool

from qwen.qwen_config import QwenConfig
from qwenvl import QwenVL
from qwenvl_config import QwenVLConfig
from qwenvl_dataloader import QwenVLDataLoader
from qwenvl_dataset import QwenVLDataset
from qwenvl_processor import QwenVLImageProcessor, BatchResizeV2
from qwenvl_processor import QwenVLProcessor
from qwenvl_tokenizer import QwenVLTokenizer
from qwenvl_transform import QwenVLTransform
from qwenvl_chat import chat_with_batch_query_wds


class SynchronizeOp(ms.nn.Cell):
    def __init__(self):
        super(SynchronizeOp, self).__init__()
        self.allreduce_sum = ms.ops.AllReduce(ms.ops.ReduceOp.SUM)

    def construct(self, x):
        return self.allreduce_sum(x)


class Synchronizer:
    def __init__(self, rank_size=1) -> None:
        self.synchronize = SynchronizeOp()
        self.rank_size = rank_size

    def __call__(self):
        print(f"Start sync...")
        parallel_mode = auto_parallel_context().get_parallel_mode()
        full_batch = auto_parallel_context().get_full_batch()
        auto_parallel_context().set_parallel_mode(ms.context.ParallelMode.DATA_PARALLEL)
        auto_parallel_context().set_full_batch(False)
        self.synchronize(ms.Tensor(np.ones([1,]).astype(np.int32)))
        auto_parallel_context().set_parallel_mode(parallel_mode)
        auto_parallel_context().set_full_batch(full_batch)
        print(f"Finish sync...")


class WdsImageProcessor(QwenVLImageProcessor):
    def preprocess(self, images: ms.Tensor, **kwargs):
        r"""
        Preprocess Required By Base Processor.

        Args:
            images (ms.Tensor, PIL.Image, numpy.array, List[PIL.Image]): A batch of images.

        Return:
            A 4-rank tensor for a batch of images.
        """

        kwargs.pop("other", None)
        if isinstance(images, list):
            return ms.Tensor(np.row_stack([np.expand_dims(item, axis=0) for item in images]))
        if len(images.shape) == 4:
            return ms.Tensor(images)
        return ms.Tensor(np.expand_dims(images, axis=0))


class WdsProcessor(QwenVLProcessor):
    def __call__(self, image_input=None, text_input=None, task="caption"):
        """call function"""
        output = {}

        if isinstance(text_input, str):
            text_input = [text_input]

        # image_output = []
        text_output = []
        img_pos_output = []
        for text_input_item in text_input:
            padded_text, img_path = self.processor_text(text_input_item)
            text_input_id, img_pos = self.text_transform({"task": task, task: padded_text}, template={task: "{}"})
            # image_in_a_text = self.image_processor([load_image(img_path_item) for img_path_item in img_path if img_path_item])

            text_output.append(text_input_id)
            # image_output.append(image_in_a_text)
            img_pos_output.append(img_pos)

        output["text"] = text_output
        output['image'] = self.image_processor(image_input)
        output['img_pos'] = img_pos_output

        return output


def register_modules():
    MindFormerRegister.register_cls(QwenVLConfig, MindFormerModuleType.CONFIG)
    MindFormerRegister.register_cls(QwenVLDataset, MindFormerModuleType.DATASET)
    MindFormerRegister.register_cls(QwenVLDataLoader, MindFormerModuleType.DATASET_LOADER)
    MindFormerRegister.register_cls(QwenVLTokenizer, MindFormerModuleType.TOKENIZER)
    MindFormerRegister.register_cls(QwenVLTransform, MindFormerModuleType.TRANSFORMS)
    MindFormerRegister.register_cls(QwenVLProcessor, MindFormerModuleType.PROCESSOR)
    MindFormerRegister.register_cls(QwenVLImageProcessor, MindFormerModuleType.PROCESSOR)
    MindFormerRegister.register_cls(WdsProcessor, MindFormerModuleType.PROCESSOR)
    MindFormerRegister.register_cls(WdsImageProcessor, MindFormerModuleType.PROCESSOR)

    MindFormerRegister.register_cls(BatchNormalize, MindFormerModuleType.TRANSFORMS)
    MindFormerRegister.register_cls(BatchToTensor, MindFormerModuleType.TRANSFORMS)


DISTRIBUTED = False


def get_dist_info():
    if DISTRIBUTED:
        rank = get_rank()
        world_size = get_group_size()
    else:
        rank = 0
        world_size = 1
    return rank, world_size


def print_log(str):
    rank, _ = get_dist_info()
    if rank == 0:
        print(str, flush=True)


# Get files of current device, specified by rank
def get_tar_files(tar_file_root, node_rank, world_size, node_distribute):
    names = os.listdir(tar_file_root)
    tar_files = []
    for name in names:
        if ".tar" in name:
            tar_file = f"file:{tar_file_root}/{name}"
            tar_files.append(tar_file)
    tar_files.sort()
    tar_cnt = len(tar_files)
    rank, world_size = get_dist_info()
    tar_list = [i for i in range(tar_cnt)]
    split_list = np.array_split(tar_list, world_size)
    rank_files = [tar_files[i] for i in split_list[rank]]
    print_log(f"rank_length: {len(rank_files)}, all cnt: {tar_cnt}, "
              f"rank: {rank}, node_rank: {node_rank}, "
              f"world_size:{world_size}, node_distribute:{node_distribute}")
    return rank_files


def load_success_file(savefile):
    if not os.path.isfile(savefile):
        return {}
    with open(savefile, 'r', encoding='utf-8') as f:
        infos = f.readlines()
    success_map = {}
    for info in infos:
        info = info.strip()
        info = json.loads(info)
        key = info['key']
        caption = info['caption']
        success_map[key] = caption
    return success_map


def expand2square(pil_img, background_color=(255, 255, 255)):
    width, height = pil_img.size
    if width == height:
        return pil_img
    elif width > height:
        result = PIL.Image.new(pil_img.mode, (width, width), background_color)
        result.paste(pil_img, (0, (width - height) // 2))
        return result
    else:
        result = PIL.Image.new(
            pil_img.mode, (height, height), background_color)
        result.paste(pil_img, ((height - width) // 2, 0))
        return result


class PreProcess:
    def __init__(self, success_map, tokenizer, mode="cn"):
        self.success_map = success_map
        rank, _ = get_dist_info()
        print(f"rank: {rank}, len success_map: {len(success_map)}")
        mean = (0.48145466, 0.4578275, 0.40821073)
        std = (0.26862954, 0.26130258, 0.27577711)
        image_size = 448
        self.resize = BatchResizeV2(image_size, interpolation="bicubic")
        self.to_tensor = BatchToTensor()
        self.normalize = BatchNormalize(mean, std, is_hwc=False)
        self.tokenizer = tokenizer
        if mode == "cn":
            prompt = '详细描述图片的内容。'
        elif mode == "en":
            prompt = 'Please describe this image in detial.'
        else:
            raise InterruptedError
        input_data = dict(prompt=prompt, img_path="")
        self.query = self.tokenize_to_input_ids(input_data)

    def preprocess_img(self, img):
        img = self.resize(img)
        img = self.to_tensor(img)
        img = self.normalize(img)
        return img

    def tokenize_to_input_ids(self, data):
        query = self.tokenizer.from_list_format([
            {
                'image': data['img_path']
            },  # Either a local path or an url
            {
                'text': data['prompt']
            },
        ])

        return query

    def __call__(self, sample):
        img_buffer, json_info, img_key = sample
        if img_key in self.success_map:
            # fliter
            return None
        with io.BytesIO(img_buffer) as stream:
            img = PIL.Image.open(stream)
            img.load()
            img = img.convert("RGB")
        img = expand2square(img)
        img_tensor = self.preprocess_img(img)
        return (img_tensor, self.query, img_key)


def filter_no_caption_or_no_image(sample):
    has_caption = ('json' in sample)
    has_image = (
        'png' in sample or 'jpg' in sample or 'jpeg' in sample or 'webp' in sample)
    return has_caption and has_image


def filter_success_img(sample):
    return sample is not None


def log_and_continue(exn):
    """Call in an exception handler to ignore any exception, issue a warning, and continue."""
    logging.exception(f'Handling webdataset error ({repr(exn)}). Ignoring.')
    return True


def ms_collation_fn(samples, combine_tensors=True, combine_scalars=True):
    """Take a collection of samples (dictionaries) and create a batch.

    If `tensors` is True, `ndarray` objects are combined into
    tensor batches.

    :param dict samples: list of samples
    :param bool tensors: whether to turn lists of ndarrays into a single ndarray
    :returns: single sample consisting of a batch
    :rtype: dict

    """
    assert isinstance(samples[0], (list, tuple)), type(samples[0])
    batched = list(zip(*samples))
    result = []
    for b in batched:
        if isinstance(b[0], (int, float)):
            if combine_scalars:
                b = np.array(list(b))
        elif isinstance(b[0], ms.Tensor):
            if combine_tensors:
                import mindspore

                b = mindspore.stack(list(b))
        elif isinstance(b[0], np.ndarray):
            if combine_tensors:
                b = np.array(list(b))
        else:
            b = list(b)
        result.append(b)
    return result


def to_ms(sample):
    # sample: (imgs, queries, img_keys)
    # imgs: numpy.ndarray
    imgs, queries, img_keys = sample
    ms_tensor = ms.Tensor.from_numpy(imgs)
    return ms_tensor, queries, img_keys


def build_dataloader(input_shards, batch_size, workers, success_map, tokenizer, mode="cn"):

    preprocess_ins = PreProcess(success_map, tokenizer, mode=mode)
    pipeline = [
        wds.SimpleShardList(input_shards),
        wds.split_by_worker,    # TODO: replace with mindspore split by worker
        # at this point, we have an iterator over the shards assigned to each worker
        wds.tarfile_to_samples(handler=log_and_continue),
        wds.select(filter_no_caption_or_no_image),
        wds.to_tuple("jpg", "json", "__key__"),
        wds.map(preprocess_ins, log_and_continue),
        wds.select(filter_success_img),
        wds.batched(batch_size, collation_fn=ms_collation_fn, partial=False),
        wds.map(to_ms, log_and_continue)
    ]

    dataset = wds.DataPipeline(*pipeline)
    dataloader = wds.WebLoader(
        dataset,
        batch_size=None,
        shuffle=False,
        num_workers=workers,
        persistent_workers=workers > 0)
    return dataloader


def run_single_tar(model, processor, tar_file, batch_size, workers, savefile, mode="cn", max_length=512, max_new_tokens=200):
    input_shards = [tar_file]
    success_map = load_success_file(savefile)
    dataloader = build_dataloader(input_shards, batch_size, workers, success_map, processor.tokenizer, mode=mode)
    d_start = time.time()
    model_time = 0
    data_time = 0
    success_cnt = 0
    fw = open(savefile, 'a', encoding='utf-8')
    for _i, batch in enumerate(dataloader):
        if _i % 10:
            print_log(f"Process batch: {_i}")
        data_time += (time.time() - d_start)
        images, querys, img_keys = batch
        m_start = time.time()
        try:
            results = chat_with_batch_query_wds(
                model=model,
                processor=processor,
                query=querys,
                history=None,
                images=images,
                max_length=max_length,
                max_new_tokens=max_new_tokens
            )

            success_cnt += images.shape[0]
            print(results)
        except Exception as e:
            print(f"error: e:{e}, index: {_i}")
            continue

        model_time += (time.time() - m_start)
        if len(results) != len(querys):
            print_log(f"error file, results: {len(results)}, "
                    f"querys:{len(querys)}, name: {tar_file}")
            continue

        for (caption, img_key) in zip(results, img_keys):
            caption = caption.strip()
            caption = caption.replace("\n", "").replace("\t", "")
            caption = ftfy.fix_text(caption)
            info = dict(key=img_key, caption=caption)
            ann_str = json.dumps(info, ensure_ascii=False)
            if img_key in success_map:
                print_log(f"Warning! key is duplicate, key: {img_key}")
            fw.write(f"{ann_str}\n")
            fw.flush()
        d_start = time.time()
    fw.close()
    return (model_time, data_time, success_cnt)


def sync_result(success_save_root, world_size):
    while True:
        run_rank_list = []
        for i in range(world_size):
            success_file = os.path.join(success_save_root, f"{i}.success")
            if not os.path.isfile(success_file):
                run_rank_list.append(i)
        if len(run_rank_list) > 0:
            print_log(f"Waiting: run_rank_list: {run_rank_list}")
            time.sleep(60)
            continue
        else:
            break
    print_log("Done !")


def main(args):
    """main function."""
    config = args.config
    use_parallel = args.use_parallel
    use_past = args.use_past
    ckpt = args.load_checkpoint
    vocab_file = args.vocab_file
    image_size = args.image_size
    seq_length = args.seq_length
    device_id = args.device_id
    batch_size = args.batch_size
    max_new_tokens = args.max_new_tokens

    yaml_path = os.path.expanduser(config)
    assert os.path.exists(yaml_path)

    config = MindFormerConfig(os.path.realpath(yaml_path))
    if vocab_file:
        assert os.path.exists(vocab_file)
        config.processor.tokenizer.vocab_file = vocab_file
        config.train_dataset.tokenizer.vocab_file = vocab_file
    if use_parallel is not None:
        config.use_parallel = use_parallel
    else:
        use_parallel = config.use_parallel
    if device_id is not None:
        config.context.device_id = device_id

    # init context
    build_context(config)

    synchronizer = None
    if use_parallel:
        global DISTRIBUTED
        DISTRIBUTED = True
        node_rank, world_size = get_dist_info()
        synchronizer = Synchronizer(rank_size=world_size)
    else:
        node_rank, world_size = get_dist_info()
    print(f"Node rank: {node_rank}")
    print(f"World size: {world_size}")

    if config.profile:
        config.profile_cb = build_profile_cb(config)

    if use_past is not None:
        config.model.model_config.use_past = use_past
        config.model.model_config.text_config.use_past = use_past

    if seq_length is not None:
        config.model.model_config.text_config.seq_length = seq_length
        config.processor.tokenizer.max_length = seq_length

    if ckpt is not None:
        config.model.model_config.checkpoint_name_or_path = ckpt

    if image_size is None:
        image_size = config.model.model_config.vision_config.image_size

    if batch_size is not None:
        config.runner_config.batch_size = batch_size
        config.model.model_config.text_config.batch_size = batch_size
    else:
        batch_size = config.runner_config.batch_size

    config.model.model_config.vision_config.image_size = image_size
    config.processor.image_processor.image_size = image_size

    model_config = config.model.model_config
    model_config.text_config = QwenConfig(**model_config.text_config)
    model_config.vision_config = CLIPVisionConfig(**model_config.vision_config)
    model_config = QwenVLConfig(**model_config)
    model = QwenVL(model_config)
    model.load_checkpoint(model_config)
    processor = build_processor(config.processor)

    save_root = args.save_root
    tar_file_root = args.tar_file_root

    node_distribute = args.node_distribute
    workers = args.workers
    mode = args.mode
    if node_distribute:
        success_save_root = os.path.join(save_root, "SUCCESS")
        os.makedirs(success_save_root, exist_ok=True)
        success_file = os.path.join(success_save_root, f"{node_rank}.success")
    os.makedirs(save_root, exist_ok=True)

    tar_files = get_tar_files(tar_file_root, node_rank, world_size, node_distribute)

    if len(tar_files) < 1:
        print_log("tar files is empty")
        if node_distribute:
            fw = open(success_file, "w")
            fw.close()
            if node_rank == 0:
                sync_result(success_save_root, world_size)
        return

    for i, tar_file in enumerate(tar_files):
        print_log(f"process: {tar_file}, {i} / {len(tar_files)}")
        start = time.time()
        _, fullname = os.path.split(tar_file)
        savefile = os.path.join(save_root, fullname.replace(".tar", '.json'))
        (model_time, data_time, cnt) = run_single_tar(model, processor, tar_file, batch_size, workers, savefile, mode,
                                                      max_length=seq_length, max_new_tokens=max_new_tokens)
        cnt += 1e-8
        total_time = time.time() - start
        print_log(f"Avg Time: model: {model_time/(cnt)}, data: {data_time/cnt}, "
                  f"cnt: {cnt}, total: {total_time}")

    if use_parallel:
        assert synchronizer is not None
        synchronizer()

    if node_distribute:
        fw = open(success_file, "w")
        fw.close()
        if node_rank == 0:
            sync_result(success_save_root, world_size)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', default='qwenvl/run_qwenvl_stage1_910b.yaml', type=str,
                        help='config file path.')
    parser.add_argument('--mode', default='cn', type=str, help='infer prompt language mode')
    parser.add_argument('--load_checkpoint', default=None, type=str, help='checkpoint name or dir to load.')
    parser.add_argument('--vocab_file', default=None, type=str, help='tokenizer model')
    parser.add_argument('--tar_file_root', default='', type=str, help='path to data')
    parser.add_argument('--save_root', default='./save_result', type=str, help='save_root')
    parser.add_argument('--image_size', default=448, type=int, help='image size')
    parser.add_argument('--seq_length', default=512, type=int, help='seq_length')
    parser.add_argument('--max_new_tokens', default=512, type=int, help='max length for predict output.')
    parser.add_argument('--use_parallel', default=False, type=str2bool, help='open parallel for model.')
    parser.add_argument('--device_id', default=-1, type=int,
                        help='ID of the target device, the value must be in [0, device_num_per_host-1]')
    parser.add_argument('--batch_size', default=32, type=int, help='batch size')
    parser.add_argument('--use_past', default=None, type=str2bool, help='use_past')
    parser.add_argument('--node_distribute', action='store_true', help='single node distribute')
    parser.add_argument('--workers', default=4, type=int)

    args = parser.parse_args()

    if args.device_id == -1:
        args.device_id = int(os.getenv("DEVICE_ID", "0"))

    register_modules()

    main(args)
