import os
import argparse
import time

from collections import OrderedDict
from typing import Optional, Union, List, Tuple
import json
import copy
import logging
import random
from mindspore.dataset import GeneratorDataset
from PIL import Image
from mindformers.tools.register import MindFormerRegister, MindFormerModuleType
from mindformers.models.blip2.video_chat2_processor import VideoChat2VideoProcessor

# ms.set_context(device_target='CPU')
video_format_list = ('.mp4', '.avi', '.flv', '.mpeg', '.f4v', '.mkv')
logger = logging.getLogger(__name__)


class MultiImgCapDataLoader:
    """Multiple Image-Caption Dataloader"""
    _default_column_names = ["image", "text"]

    def __new__(cls,
                dataset_dir: str,
                annotation_files: List[str],
                image_dirs: List[str],
                column_names: Optional[Union[List[str], Tuple[str]]] = None,
                stage: Optional[str] = "train",
                repeat_images: Optional[bool] = False,
                shuffle: Optional[bool] = True,
                Vid: Optional[bool] = False,
                **kwargs):
        r"""
        MultiImgCapDataLoader Dataloader API.

        Args:
            dataset_dir (str): The directory which is the parent dir of these datasets.
            annotation_files list[str]: the list of files contains annotations.
            image_dirs list[str]: the list of dir contains images.
                                  (one-to-one matching to ' annotation_files')
            column_names (Optional[Union[List[str], Tuple[str]]]): The output column names,
                a tuple or a list of string with length 2
            stage (Optional[str]): The supported key words are in ["train", "eval"]
            repeat_images (Optional[bool]): whether repeat image when it has multiple
                                            corresponding captions.
            shuffle (Optional[bool]): whether to shuffle the dataset.

        Return:
            A GeneratorDataset for loading multiple image-caption datasets

        Raises:
            ValueError: Error input for dataset_dir, and column_names.
            TypeError: Type error for column_names.
        """
        if len(image_dirs) != len(annotation_files):
            raise ValueError(
                "the number of image_dirs should be equal to annotation_files!"
            )

        for i, _ in enumerate(annotation_files):
            annotation_files[i] = os.path.join(dataset_dir,
                                               annotation_files[i])
            if not os.path.isfile(annotation_files[i]):
                raise ValueError(f"{annotation_files[i]} is not existed.")

        for i, _ in enumerate(image_dirs):
            image_dirs[i] = os.path.join(dataset_dir, image_dirs[i])
            if not os.path.isdir(image_dirs[i]):
                raise ValueError(f"{image_dirs[i]} is not existed.")

        if column_names is None:
            column_names = cls._default_column_names

        if Vid:
            num_frames = kwargs.pop("num_frames", 1)
            processor = VideoChat2VideoProcessor(num_frames=num_frames)
            multicap_dataset = MultiVideoCapDataSet(image_dirs, annotation_files,
                                                    stage, repeat_images, random_shuffle=shuffle, processor=processor)
        else:
            multicap_dataset = MultiImgCapDataSet(image_dirs, annotation_files,
                                                  stage, repeat_images, random_shuffle=shuffle)

        return GeneratorDataset(multicap_dataset,
                                column_names,
                                shuffle=shuffle,
                                **kwargs)


class MultiImgCapDataSet:
    """MultiImgCapDataSet API.

        Args:
            image_dirs (str): The directory which contains images.
            annotation_files list[str]: the list of files contains annotations.
            stage (Optional[str]): The supported key words are in ["train", "eval"]
            repeat_images (Optional[bool]): whether repeat image when it has multiple corresponding captions.

        Return:
            A Dataset for loading multiple image-caption datasets

    """

    def __init__(self,
                 image_dirs,
                 annotation_files,
                 stage="train",
                 repeat_images=False,
                 media_type="image",
                 system="", role=("Human", "Assistant"),
                 start_token="<Image>", end_token="</Image>",
                 random_shuffle=True,  # if True, shuffle the QA list
                 processor=None
                 ):
        self.annotation = []
        self.media_type = media_type
        if system:
            assert system[-1] == " ", "' ' should be add in the end of system, thus '###' will be tokenized into one token."
        self.begin_signal = "###"
        self.end_signal = " "
        self.start_token = start_token
        self.end_token = end_token
        self.system = system
        self.role = role
        self.random_shuffle = random_shuffle
        self.processor = processor
        # instruction location and number
        logger.info(f"Random shuffle: {self.random_shuffle}")
        if stage in ("train", "eval"):
            for i, annotation_file in enumerate(annotation_files):
                with open(annotation_file, 'r', encoding='utf-8') as file:
                    new_annotation = json.load(file)[:1024]
                for new_ann in new_annotation:
                    new_ann[self.media_type] = os.path.join(image_dirs[i],
                                                            new_ann[self.media_type])
                self.annotation.extend(new_annotation)
            if stage == "eval":
                self.txt2img = {}
                self.img2txt = {}

                if repeat_images:
                    new_annotation = []

                txt_id = 0
                for img_id, ann in enumerate(self.annotation):
                    self.img2txt[img_id] = []
                    for i, caption in enumerate(ann["caption"]):
                        self.img2txt[img_id].append(txt_id)
                        self.txt2img[txt_id] = img_id
                        if repeat_images:
                            temp = copy.deepcopy(ann)
                            temp.update({"caption": caption})
                            new_annotation.append(temp)
                        txt_id += 1
                if repeat_images:
                    self.annotation = new_annotation
        else:
            raise ValueError("unsupported stage.")

    def process_qa(self, qa, msg=""):
        cur_instruction = ""
        # randomly shuffle qa for conversation
        if isinstance(qa, str):
            qa = eval(qa)
        if self.random_shuffle and len(qa) > 1:
            random.shuffle(qa)
        if "i" in qa[0].keys() and qa[0]["i"] != "":
            cur_instruction = qa[0]["i"] + self.end_signal

        conversation = self.system
        # add instruction as system message
        if cur_instruction:
            conversation += cur_instruction

        # rstrip() for the extra " " in msg
        conversation += (
                self.begin_signal + self.role[0] + ": " +
                self.start_token + self.end_token + msg.rstrip() + self.end_signal
        )

        for sentence in qa:
            q = sentence["q"]
            a = sentence["a"]
            if q != "":
                conversation += (self.begin_signal + self.role[0] + ": " + q + self.end_signal)
            else:
                # no question, often in caption dataset
                pass
            conversation += (self.begin_signal + self.role[1] + ": " + a + self.end_signal)
        conversation += self.begin_signal

        if cur_instruction:
            cur_instruction += qa[0]["q"]
        return conversation, cur_instruction.strip()

    def __getitem__(self, index):
        ann = self.annotation[index]
        image = Image.open(ann["image"]).convert("RGB")
        conversation, instruction = self.process_qa(ann["QA"])
        return image, {"conversation": conversation, "instruction": instruction}

    def __len__(self):
        return len(self.annotation)

    def display_item(self, index):
        """display item

        Args:
            index (int): index

        Returns:
            out (OrderedDict): item info
        """
        sample, ann = self[index], self.annotation[index]

        return OrderedDict({
            "file": ann[self.media_type],
            "caption": ann["QA"],
            "image": sample[0],
        })


@MindFormerRegister.register(MindFormerModuleType.DATASET_LOADER)
class MultiVideoCapDataLoader(MultiImgCapDataLoader):
    """Multiple Image-Caption Dataloader"""

    def __new__(cls,
                dataset_dir: str,
                annotation_files: List[str],
                image_dirs: List[str],
                column_names: Optional[Union[List[str], Tuple[str]]] = None,
                stage: Optional[str] = "train",
                repeat_images: Optional[bool] = False,
                shuffle: Optional[bool] = True,
                **kwargs):
        return super().__new__(cls,
                               dataset_dir,
                               annotation_files,
                               image_dirs,
                               column_names=column_names,
                               stage=stage,
                               repeat_images=repeat_images,
                               shuffle=shuffle,
                               Vid=True,
                               **kwargs)


class MultiVideoCapDataSet(MultiImgCapDataSet):

    def __init__(self,
                 image_dirs,
                 annotation_files,
                 stage="train",
                 repeat_images=False,
                 media_type="video",
                 system="", role=("Human", "Assistant"),
                 start_token="<Video>", end_token="</Video>",
                 random_shuffle=True,
                 processor=None
                 ):
        super().__init__(
            image_dirs, annotation_files, stage, repeat_images=repeat_images, media_type=media_type,
            system=system, role=role,
            start_token=start_token, end_token=end_token,
            random_shuffle=random_shuffle, processor=processor
        )

    def __getitem__(self, index):
        ann = self.annotation[index]
        conversation, instruction = self.process_qa(ann["QA"])
        ann["QA"] = str(ann["QA"])
        if self.processor is None:
            return ann["video"], {"conversation": conversation, "instruction": instruction}
        return self.processor(ann["video"]), {"conversation": conversation, "instruction": instruction}


if __name__ == '__main__':
    start_time = time.time()
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_dirs', nargs='+', default=['/home/zhangyouwen/work/data/mobile_video_data/video_train'], type=str,
                        help='set video path.')
    parser.add_argument('--dataset_dir', default='/home/zhangyouwen/work/data/mobile_video_data', type=str,
                        help='set dataset_dir')
    parser.add_argument('--annotation_files', nargs='+', default=['/home/zhangyouwen/work/data/mobile_video_data/train.json'], type=str,
                        help='set annotation_files')
    parser.add_argument('--stage', default='train', type=str,
                        help='set stage')
    args = parser.parse_args()
    dataset = MultiVideoCapDataLoader(args.dataset_dir, args.annotation_files, args.image_dirs, ["image", "text", "conversation", "instruction"], shuffle=False)
    video_path_list = list()
    caption_dict = list()
    cnt = 0
    for data in dataset.create_dict_iterator():
        cnt += 1
        if cnt > 10:
            break
        video_path_list.append(str(data["image"]))
        caption_dict.append(eval(str(data["text"])))
