# @Time   : 2020/11/27
# @Author : Xiaolei Wang
# @Email  : wxl1999@foxmail.com

# UPDATE:
# @Time   : 2020/12/2
# @Author : Xiaolei Wang
# @Email  : wxl1999@foxmail.com

import torch
from tqdm import tqdm
import os
import json
import random


from crslab.config import DATASET_PATH
from crslab.data.dataloader.base import BaseDataLoader
from crslab.data.dataloader.utils import add_start_end_token_idx, padded_tensor, truncate, merge_utt


class KBRDDataLoader(BaseDataLoader):
    """Dataloader for model KBRD.

    Notes:
        You can set the following parameters in config:

        - ``'context_truncate'``: the maximum length of context.
        - ``'response_truncate'``: the maximum length of response.
        - ``'entity_truncate'``: the maximum length of mentioned entities in context.

        The following values must be specified in ``vocab``:

        - ``'pad'``
        - ``'start'``
        - ``'end'``
        - ``'pad_entity'``

        the above values specify the id of needed special token.

    """

    def __init__(self, opt, dataset, vocab):
        """

        Args:
            opt (Config or dict): config for dataloader or the whole system.
            dataset: data for model.
            vocab (dict): all kinds of useful size, idx and map between token and idx.

        """
        super().__init__(opt, dataset)
        self.pad_token_idx = vocab['pad']
        self.start_token_idx = vocab['start']
        self.end_token_idx = vocab['end']
        self.pad_entity_idx = vocab['pad_entity']
        self.context_truncate = opt.get('context_truncate', None)
        self.response_truncate = opt.get('response_truncate', None)
        self.entity_truncate = opt.get('entity_truncate', None)

        # for prompt
        if 'prompt' in opt.opt.keys():
            tokenize = opt['tokenize']
            prompt = opt['prompt']['raw_prompt']
            self.dpath = os.path.join(DATASET_PATH, "redial", tokenize)
            self._load_vocab()
            self.items_set = set()
            self._create_items_set()
            with open(os.path.join(self.dpath, 'movieID2labelID.json'), 'r', encoding='utf-8') as fr:
                self.idx2label = json.load(fr)
            self.idx_prompt, self.unk_pos = self._create_meta_prompt(prompt)

    def _load_vocab(self):
        self.tok2ind = json.load(open(os.path.join(self.dpath, 'token2id.json'), 'r', encoding='utf-8'))
        self.ind2tok = {idx: word for word, idx in self.tok2ind.items()}

    def _create_items_set(self):
        for k in self.tok2ind.keys():
            if '@' in k and '@' != k:
                self.items_set.add(self.tok2ind[k])

    def _create_meta_prompt(self, raw_prompt):
        idx_prompt = []
        prompt_lst = raw_prompt.split(' ')
        unk_pos = prompt_lst.index('__unk__')

        for word in prompt_lst:
            idx_prompt.append(self.tok2ind[word])

        return idx_prompt, unk_pos

    def _create_prompt(self, response):
        prompt = [i for i in self.idx_prompt]

        # 先做一个，每个response只关心第一个item
        for idx in response:
            if idx in self.items_set:
                prompt[self.unk_pos] = idx
                label = self.idx2label[str(idx)] # 一个列表，包含movie idx对应的label idx
                if len(label) > 0:
                    return prompt, label[0]
        # 没有提及电影，label为0
        return prompt, 0

    def rec_process_fn(self):
        augment_dataset = []
        for conv_dict in tqdm(self.dataset):
            if conv_dict['role'] == 'Recommender':
                for movie in conv_dict['items']:
                    augment_conv_dict = {'context_entities': conv_dict['context_entities'], 'item': movie}
                    augment_dataset.append(augment_conv_dict)
        return augment_dataset

    def rec_batchify(self, batch):
        batch_context_entities = []
        batch_movies = []
        for conv_dict in batch:
            batch_context_entities.append(conv_dict['context_entities'])
            batch_movies.append(conv_dict['item'])

        return {
            "context_entities": batch_context_entities,
            "item": torch.tensor(batch_movies, dtype=torch.long)
        }

    def conv_process_fn(self, *args, **kwargs):
        return self.retain_recommender_target()

    def conv_batchify(self, batch):
        batch_context_tokens = []
        batch_context_entities = []
        batch_response = []
        for conv_dict in batch:
            batch_context_tokens.append(
                truncate(merge_utt(conv_dict['context_tokens']), self.context_truncate, truncate_tail=False))
            batch_context_entities.append(conv_dict['context_entities'])
            batch_response.append(
                add_start_end_token_idx(truncate(conv_dict['response'], self.response_truncate - 2),
                                        start_token_idx=self.start_token_idx,
                                        end_token_idx=self.end_token_idx))

        return {
            "context_tokens": padded_tensor(batch_context_tokens, self.pad_token_idx, pad_tail=False),
            "context_entities": batch_context_entities,
            "response": padded_tensor(batch_response, self.pad_token_idx)
        }

    def get_prompt_data(self, batch_size, shuffle=True):
        return self.get_data(self.prompt_batchify, batch_size, shuffle, self.prompt_process_fn)

    def prompt_process_fn(self, *args, **kwargs):
        return self.retain_recommender_target()

    def prompt_batchify(self, batch):
        batch_context_tokens = []
        batch_context_entities = []
        batch_response = []
        batch_prompts = []
        batch_promtps_labels = []
        for conv_dict in batch:
            batch_context_tokens.append(
                truncate(merge_utt(conv_dict['context_tokens']), self.context_truncate, truncate_tail=False))
            batch_context_entities.append(conv_dict['context_entities'])
            batch_response.append(
                add_start_end_token_idx(truncate(conv_dict['response'], self.response_truncate - 2),
                                        start_token_idx=self.start_token_idx,
                                        end_token_idx=self.end_token_idx))
            prompt_, label_ = self._create_prompt(batch_response[-1])
            # TODO: 利用全部标签
            batch_prompts.append(prompt_)
            batch_promtps_labels.append(label_)

        return {
            "context_tokens": padded_tensor(batch_context_tokens, self.pad_token_idx, pad_tail=False),
            "context_entities": batch_context_entities,
            "response": padded_tensor(batch_response, self.pad_token_idx),
            "prompts": padded_tensor(batch_prompts, self.pad_token_idx, pad_tail=False),
            "labels": torch.tensor(batch_promtps_labels, dtype=torch.long),
        }

    def policy_batchify(self, *args, **kwargs):
        pass
