# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import os
import time
import pickle
import argparse
import mindspore
import numpy as np
from tqdm import tqdm
import mindspore.ops as P
import mindspore.dataset as ds
import mindspore.common.dtype as mstype
from mindspore.mindrecord import FileWriter


# 没完全写完，读取方式有一定的不同，但是可以确认没有致命问题
# 我是按照修改最少的方式写的
def unison_shuffle(data, seed=None):
    """
    Shuffle data
    """
    if seed is not None:
        np.random.seed(seed)

    y = np.array(data[b'y'])
    c = np.array(data[b'c'])
    r = np.array(data[b'r'])

    assert len(y) == len(c) == len(r)
    p = np.random.permutation(len(y))
    print(p)
    shuffle_data = {b'y': y[p], b'c': c[p], b'r': r[p]}
    return shuffle_data


def split_c(c, split_id):
    """
    Split
    c is a list, example context
    split_id is a integer, conf[_EOS_]
    return nested list
    """
    turns = [[]]
    for _id in c:
        if _id != split_id:
            turns[-1].append(_id)
        else:
            turns.append([])
    if turns[-1] == [] and len(turns) > 1:
        turns.pop()
    return turns


def normalize_length(_list, length, cut_type='tail'):
    """_list is a list or nested list, example turns/r/single turn c
       cut_type is head or tail, if _list len > length is used
       return a list len=length and min(read_length, length)
    """
    real_length = len(_list)
    if real_length == 0:
        return [0] * length, 0

    if real_length <= length:
        if not isinstance(_list[0], list):
            _list.extend([0] * (length - real_length))
        else:
            _list.extend([[]] * (length - real_length))
        return _list, real_length

    if cut_type == 'head':
        return _list[:length], length
    if cut_type == 'tail':
        return _list[-length:], length


class DAM_dataset():
    def __init__(self, data_path, max_turn_num=10, max_turn_len=50, EOS=28270, mode="train"):
        super(DAM_dataset, self).__init__()
        self.cast = P.Cast()
        self.max_turn_num = max_turn_num
        self.max_turn_len = max_turn_len
        self.EOS = EOS
        self.batch_size = 256
        with open(data_path, "rb") as f:
            train, val, test = pickle.load(f, encoding="bytes")
        if mode == "train":
            self.data = train
        elif mode == "val":
            self.data = val
        else:
            self.data = test
        self.turns = []
        self.tt_turns_len = []
        self.every_turn_len = []

        self.response = []
        self.response_len = []

        self.label = []
        batch_len = int(len(self.data[b'y']) / self.batch_size)
        for batch_index in tqdm(range(batch_len)):
            _turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label = self.build_one_batch(self.data,
                                                                                                            batch_index,
                                                                                                            turn_cut_type='tail',
                                                                                                            term_cut_type='tail')

            self.turns.extend(_turns)
            self.tt_turns_len.extend(_tt_turns_len)
            self.every_turn_len.extend(_every_turn_len)

            self.response.extend(_response)
            self.response_len.extend(_response_len)

            self.label.extend(_label)
        print("read over")

    def produce_one_sample(self, data, index, split_id, max_turn_num, max_turn_len, turn_cut_type='tail',
                           term_cut_type='tail'):
        '''max_turn_num=10
           max_turn_len=50
           return y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len
        '''
        c = data[b'c'][index]
        r = data[b'r'][index][:]
        y = data[b'y'][index]

        turns = split_c(c, split_id)
        # normalize turns_c length, nor_turns length is max_turn_num
        nor_turns, turn_len = normalize_length(turns, max_turn_num, turn_cut_type)

        nor_turns_nor_c = []
        term_len = []
        # nor_turn_nor_c length is max_turn_num, element is a list length is max_turn_len
        for c in nor_turns:
            # nor_c length is max_turn_len
            nor_c, nor_c_len = normalize_length(c, max_turn_len, term_cut_type)
            nor_turns_nor_c.append(nor_c)
            term_len.append(nor_c_len)

        nor_r, r_len = normalize_length(r, max_turn_len, term_cut_type)

        return y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len

    def build_one_batch(self, data, batch_index, turn_cut_type='tail', term_cut_type='tail'):
        _turns = []
        _tt_turns_len = []
        _every_turn_len = []

        _response = []
        _response_len = []

        _label = []

        for i in range(self.batch_size):
            index = batch_index * self.batch_size + i
            y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len = self.produce_one_sample(data, index, self.EOS,
                                                                                           self.max_turn_num,
                                                                                           self.max_turn_len,
                                                                                           turn_cut_type, term_cut_type)

            _label.append(y)
            _turns.append(nor_turns_nor_c)
            _response.append(nor_r)
            _every_turn_len.append(term_len)
            _tt_turns_len.append(turn_len)
            _response_len.append(r_len)

        return _turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label

    def __len__(self):
        return len(self.turns)

    def __getitem__(self, index):
        turns = self.turns[index]
        # tt_turns_len = self.tt_turns_len[index]
        turn_len = self.every_turn_len[index]
        response = self.response[index]
        response_len = self.response_len[index]
        label = self.label[index]

        return turns, turn_len, response, response_len, label


def get_dataset(data_path, mode, repeat, batch_size, shard_num=None, shard_id=None, num_workers=1):
    print('init DAM_dataset...')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    dataset_DAM = DAM_dataset(data_path=data_path, mode=mode)
    print('finish init DAM_dataset...')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    dataset_size = dataset_DAM.__len__()
    if mode == "train":
        data_set = ds.GeneratorDataset(source=dataset_DAM,
                                       column_names=["turn_data", "turn_len", "res", "res_len", "label"],
                                       shuffle=True, num_shards=shard_num, shard_id=shard_id)
    else:
        data_set = ds.GeneratorDataset(source=dataset_DAM,
                                       column_names=["turn_data", "turn_len", "res", "res_len", "label"],
                                       shuffle=False)
    print('finish generate dataset')
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    data_set = data_set.batch(batch_size, drop_remainder=True)
    data_set = data_set.repeat(repeat)
    return data_set, dataset_size


def data2mindrecord(data_path, mindrecord_file, mode, shard_num=1):
    MINDRECORD_FILE = mindrecord_file + '_' + mode + '.mindrecord'
    if os.path.exists(MINDRECORD_FILE):
        os.remove(MINDRECORD_FILE)
        if os.path.exists(MINDRECORD_FILE + '.db'):
            os.remove(MINDRECORD_FILE + '.db')

    writer = FileWriter(file_name=MINDRECORD_FILE, shard_num=shard_num)
    schema = {"turns": {"type": "int32", "shape": [10, 50]},
              "turn_len": {"type": "int32", "shape": [-1]},
              "response": {"type": "int32", "shape": [-1]},
              "response_len": {"type": "int32", "shape": [-1]},
              "labels": {"type": "int32", "shape": [-1]}, }
    writer.add_schema(schema, "test dataset")
    dataset = DAM_dataset(data_path=data_path, mode=mode)
    dataset_size = dataset.__len__()
    count = 0
    data = []
    for i in tqdm(range(dataset_size)):
        count += 1
        turns, turn_len, response, response_len, label = dataset[i]
        sample = {"turns": np.array(turns),
                  "turn_len": np.array(turn_len),
                  "response": np.array(response),
                  "response_len": np.array(response_len),
                  "labels": np.array(label)}
        data.append(sample)
        if count % 100 == 0:
            writer.write_raw_data(data)
            data.clear()
        if count % 100000 == 0:
            print('Have handle {}w lines.'.format(count // 10000))
    if data:
        writer.write_raw_data(data)
    writer.commit()


if __name__ == '__main__':
    # data2mindrecord("../data/douban/data.pkl", "../data/douban/data", mode='test', shard_num=1)
    data2mindrecord("../data/ubuntu/data_small.pkl", "../data/ubuntu/data", mode='test', shard_num=1)
