# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2017 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements data process strategies.
"""

import torch
import os
import json
import logging
import numpy as np
from collections import Counter
from torch.utils.data import Dataset
import tqdm
import pickle
import logging
import itertools
import numpy as np
import copy
logger = logging.getLogger('brc')

#class BRCDataset(torch.utils.data.Dataset):
def display(message):
    logger.info(message)

class BRCDataset(Dataset):
    """
    This module implements the APIs for loading and using baidu reading comprehension dataset
    """
    def __init__(self, max_p_num, max_p_len, max_q_len, is_train,
                 files, rank=0, world_size=1, single_batch=True):
        self.rank = rank
        self.world_size = world_size
        self.is_train = is_train
        self.single_batch = single_batch
        self.max_p_num = max_p_num
        self.max_p_len = max_p_len
        self.max_q_len = max_q_len

        self.dataset = []
        for file in files:
            logger.info('loading data from {}'.format(file))
            subset = self._load_dataset(file, train=self.is_train)
            logger.info('data size of {}:{}'.format(file, len(subset)))
            self.dataset += subset
        logger.info('dataset size: {} questions.'.format(len(self.dataset)))
    
    def _load_dataset(self, data_path, train=False):
        """
        Loads the dataset
        Args:
            data_path: the data file to load
        """
        cache_path = os.path.join('cache', data_path.replace('/', '_') + '.{}.{}.pkl'.format(self.rank, self.world_size))
        os.makedirs(os.path.dirname(cache_path), exist_ok=True)
        if os.path.exists(cache_path):
            display('load cached from %s' % cache_path)
            with open(cache_path, 'rb') as cache:
                dataset = pickle.load(cache)
                return dataset
        assert os.path.exists(data_path)
        total, valid, bad = 0, 0, 0
        with open(data_path) as fin:
            data_set = []
            if self.rank == 0:
                fin = tqdm.tqdm(fin)
            for lidx, line in enumerate(fin):
                if self.rank == 0:
                    fin.set_description('processing %d ' % (lidx))
                #divide data
                if lidx % self.world_size != self.rank:
                    continue
                total += 1

                sample = json.loads(line.strip())
                if train:
                    if len(sample['answer_spans']) == 0:
                        continue
                    if sample['answer_spans'][0][1] >= self.max_p_len:
                        continue
                    if len(sample['match_scores']) == 0 or sample['match_scores'][0] < 0.1:
                        continue
                valid += 1

                if 'answer_docs' in sample:
                    sample['answer_passages'] = sample['answer_docs']

                sample['question_tokens'] = sample['segmented_question']

                for d_idx, doc in enumerate(sample['documents']):
                    most_related_para = doc.get('most_related_para', -1)
                    if self.is_train: 
                        is_selected = doc.get('is_selected', False)
                        if not is_selected: continue
                        if len(doc['segmented_paragraphs']) <= 1: continue
                        if most_related_para >= self.max_p_num: continue
                    new_sample = {'question_id': sample['question_id'], 'doc_idx': d_idx, 'question_tokens': sample['segmented_question']}
                    psg = {'passage_tokens': doc['segmented_paragraphs'],
                           'most_related_para': most_related_para}
                    new_sample.update(psg)
                    data_set.append(new_sample)
        with open(cache_path, 'wb') as cache:
            logger.info('dump cache into %s' % cache_path)
            pickle.dump(data_set, cache)
        logger.info('total:{} valid:{} bad:{}'.format(total, valid, total-valid)) 

        return data_set

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        sample = self.dataset[idx]
       
        one_piece = { 'question_id': sample['question_id'],
                      'doc_idx': sample['doc_idx'],
                      'question_token_ids': sample['question_token_ids'],
                      'question_length': min(self.max_q_len, len(sample['question_token_ids'])),
                      'passage_token_ids': [],
                      'passage_length': [],
                      'most_related_para': sample['most_related_para']}
        for para_idx, para in enumerate(sample['passage_tokens']):
            if para_idx < self.max_p_num:
                para_token_ids = sample['passage_token_ids'][para_idx]
                one_piece['passage_token_ids'].append(para_token_ids)
                one_piece['passage_length'].append(min(len(para_token_ids), self.max_p_len))
        if not self.single_batch:
            while len(one_piece['passage_token_ids']) < self.max_p_num:
                one_piece['passage_token_ids'].append([])
                one_piece['passage_length'].append(0)
            assert len(one_piece['passage_token_ids']) == self.max_p_num, '{} vs {}'.format(len(one_piece['passage_token_ids']), self.max_p_num)
        else:
            assert len(one_piece['passage_token_ids']) <= self.max_p_num, '{} vs {}'.format(len(one_piece['passage_token_ids']), self.max_p_num)
        while len(one_piece['passage_token_ids']) < 1:
            one_piece['passage_token_ids'].append([])
            one_piece['passage_length'].append(0)
        assert len(one_piece['passage_length']) == len(one_piece['passage_token_ids']), '{} vs {}'.format(len(one_piece['passage_length']), len(one_piece['passage_token_ids']))
        return one_piece

    def word_iter(self, set_name=None):
        """
        Iterates over all the words in the dataset
        Args:
            set_name: if it is set, then the specific set will be used
        Returns:
            a generator
        """
        data_set = self.dataset
        if data_set is not None:
            for sample in data_set:
                for token in sample['question_tokens']:
                    yield token
                for paragraph in sample['passage_tokens']:
                    for token in paragraph:
                        yield token

    def convert_to_ids(self, vocab):
        """
        Convert the question and passage in the original dataset to ids
        Args:
            vocab: the vocabulary on this dataset
        """
        for data_set in [self.dataset]:
            #if data_set is None:
            #    continue
            for sample_idx, sample in enumerate(data_set):
                if sample_idx % 1000 == 0:
                    logger.info('convert to ids: sample {}'.format(sample_idx))
                sample['question_token_ids'] = vocab.convert_to_ids(sample['question_tokens'])
                sample['passage_token_ids'] = []
                for paragraph in sample['passage_tokens']:
                    sample['passage_token_ids'].append(vocab.convert_to_ids(paragraph))

