# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import time
import pickle
import mindspore
import numpy as np
from tqdm import tqdm
from mindspore import context
import mindspore.dataset as ds
import mindspore.ops as P
from mindspore.common.tensor import Tensor
from mindspore.mindrecord import FileWriter
from mindspore.communication.management import get_rank, get_group_size

import config as config


def unison_shuffle(data, seed=None):
    """
    Shuffle data
    """
    if seed is not None:
        np.random.seed(seed)

    y = np.array(data[b'y'])
    c = np.array(data[b'c'])
    r = np.array(data[b'r'])

    assert len(y) == len(c) == len(r)
    p = np.random.permutation(len(y))
    print(p)
    shuffle_data = {b'y': y[p], b'c': c[p], b'r': r[p]}
    return shuffle_data


def split_c(c, split_id):
    """
    Split
    c is a list, example context
    split_id is a integer, conf[_EOS_]
    return nested list
    """
    turns = [[]]
    for _id in c:
        if _id != split_id:
            turns[-1].append(_id)
        else:
            turns.append([])
    if turns[-1] == [] and len(turns) > 1:
        turns.pop()
    return turns


def normalize_length(_list, length, cut_type='tail'):
    """_list is a list or nested list, example turns/r/single turn c
       cut_type is head or tail, if _list len > length is used
       return a list len=length and min(read_length, length)
    """
    real_length = len(_list)
    if real_length == 0:
        return [0] * length, 0

    if real_length <= length:
        if not isinstance(_list[0], list):
            _list.extend([0] * (length - real_length))
        else:
            _list.extend([[]] * (length - real_length))
        return _list, real_length

    if cut_type == 'head':
        return _list[:length], length
    if cut_type == 'tail':
        return _list[-length:], length


def produce_one_sample(data, index, split_id, max_turn_num, max_turn_len, turn_cut_type='tail',
                       term_cut_type='tail'):
    '''max_turn_num=10
       max_turn_len=50
       return y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len
    '''
    c = data[b'c'][index]
    r = data[b'r'][index][:]
    y = data[b'y'][index]

    turns = split_c(c, split_id)
    # normalize turns_c length, nor_turns length is max_turn_num
    nor_turns, turn_len = normalize_length(turns, max_turn_num, turn_cut_type)

    nor_turns_nor_c = []
    term_len = []
    # nor_turn_nor_c length is max_turn_num, element is a list length is max_turn_len
    for c in nor_turns:
        # nor_c length is max_turn_len
        nor_c, nor_c_len = normalize_length(c, max_turn_len, term_cut_type)
        nor_turns_nor_c.append(nor_c)
        term_len.append(nor_c_len)

    nor_r, r_len = normalize_length(r, max_turn_len, term_cut_type)

    return y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len


def data2mindrecord(file_path, data_path, mode, config):
    print('config._EOS_: ', config._EOS_)
    MINDRECORD_FILE = file_path
    if os.path.exists(MINDRECORD_FILE):
        os.remove(MINDRECORD_FILE)
        if os.path.exists(MINDRECORD_FILE + '.db'):
            os.remove(MINDRECORD_FILE + '.db')

    writer = FileWriter(file_name=MINDRECORD_FILE, shard_num=1)
    schema = {"turns": {"type": "int32", "shape": [config.max_turn_num, config.max_turn_len]},
              "turn_len": {"type": "int32", "shape": [-1]},
              "response": {"type": "int32", "shape": [-1]},
              "response_len": {"type": "int32", "shape": [-1]},
              "label": {"type": "int32", "shape": [-1]}, }
    writer.add_schema(schema, mode + "dataset")

    with open(data_path, "rb") as f:
        train, val, test = pickle.load(f, encoding="bytes")
        print('train_data.len: ', len(train[b'y']))
        print('eval_data.len: ', len(val[b'y']))
        print('test_data.len: ', len(test[b'y']))
    if mode == "train":
        data = train
    elif mode == "val":
        data = val
    else:
        data = test

    max_turn_num = 9
    max_turn_len = 50
    EOS = config._EOS_
    print('_EOS_: ', EOS)

    data_len = int(len(data[b'y']))
    print('data_len: ', data_len)
    data_list = []
    count = 0
    for index in tqdm(range(data_len)):
        count += 1
        y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len = produce_one_sample(data, index, EOS,
                                                                                       max_turn_num,
                                                                                       max_turn_len,
                                                                                       turn_cut_type='tail',
                                                                                       term_cut_type='tail')
        sample = {"turns": np.array(nor_turns_nor_c),
                  "turn_len": np.array(term_len),
                  "response": np.array(nor_r),
                  "response_len": np.array(r_len),
                  "label": np.array(y)}
        data_list.append(sample)
        if count % 100 == 0:
            writer.write_raw_data(data_list)
            data_list.clear()
        if count % 100000 == 0:
            print('Have handle {}w lines.'.format((count / 100000) * 10))
    if data_list:
        writer.write_raw_data(data_list)
    print('total {} lines.'.format(count))
    writer.commit()
    print("read over")


if __name__ == '__main__':
    context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=7)

    # 转换ubuntu语料
    # config = config.ubuntu_parse_args()
    # data_path = "../data/ubuntu/data.pkl"
    # mode = "val"
    # file_path = "../data/ubuntu/data_small_" + mode + ".mindrecord"

    # 转换豆瓣语料
    config = config.douban_parse_args()
    data_path = "../data/douban/data.pkl"
    mode = "val"
    file_path = "../data/douban/data_" + mode + ".mindrecord"

    print('preprocess: ', data_path)
    print('mode: ', mode)
    data2mindrecord(file_path=file_path, data_path=data_path, mode=mode, config=config)
    dataset = ds.MindDataset(file_path, columns_list=["turns", "turn_len", "response", "response_len", "label"],
                             shuffle=False)
    print(dataset.get_dataset_size())
    dataset = dataset.batch(256, drop_remainder=True)
    dataset = dataset.repeat(1)
    print('dataset_len: ', 256 * dataset.get_dataset_size())
    print('dataset_size: ', dataset.get_dataset_size())
    i = 1
    for data in dataset.create_dict_iterator():
        print('------------------------------')
        # turns = data["turns"]
        # every_turn_len = data["turn_len"]
        # response = data["response"]
        response_len = data["response"]
        print(i, response_len)
        i += 1
        # label = data["label"]
        # print("turn_data: {}\n{}".format(turns.shape, turns))  # (256, 9, 50)
        # # print("turns中的一句话：{}\n{}".format(turns[:, 1].shape, turns[0, 0]))  # (256, 50)
        # print("turn_len: {}\n{}".format(every_turn_len.shape, every_turn_len))  # (256, 9)
        # print("res: {}\n{}".format(response.shape, response))  # (256, 50)
        # print("res_len: {}\n{}".format(response_len.shape, response_len))  # (256, 1)
        # print("label: {}\n{}".format(label.shape, label))  # (256, 1)
        # turn = P.Unstack(1)(turns)
        # print(turn)
        print('------------------------------')
        # break
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
