#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File  : data.py
# Author: anyongjin
# Date  : 2020/8/18
import numpy as np
import pickle
import os
from DistillBert.utils import logger


def pickle_read(fdata):
    return pickle.load(fdata)


def build_generator_from_cache(path, batch_size, shuffle_size=0,
                               epoch_num=0, epoch_dic=None, read_fn=None):
    from random import shuffle
    if batch_size > shuffle_size > 0:
        raise Exception('shuffle_size cant be less than batch_size!')
    if read_fn is None:
        read_fn = pickle_read
    fin = open(path, 'rb')
    file_name = os.path.basename(path)
    if epoch_dic is None:
        epoch_dic = {}
    epoch_dic[file_name] = 1
    shuffle_cache = []
    while True:
        read_size = max(batch_size, shuffle_size) - len(shuffle_cache)
        cur_read_size = 0
        while cur_read_size < read_size:
            try:
                read_rows = read_fn(fin)
            except EOFError:
                # reach end of file
                if epoch_num > epoch_dic[file_name] or epoch_num == 0:
                    fin.seek(0)
                    epoch_dic[file_name] += 1
                    # logger.info(f'switch to epoch {epoch_dic[file_name]} for {file_name}')
                    continue
                break
            shuffle_cache.extend(read_rows)
            cur_read_size += len(read_rows)
        if shuffle_size > 0:
            shuffle(shuffle_cache)
        batch_rows = shuffle_cache[:batch_size]
        shuffle_cache = shuffle_cache[batch_size:]
        if len(batch_rows) == 0:
            break
        yield batch_rows
    fin.close()


def build_data_cache(path, batch_size, col_processers):
    import pandas as pd
    fin = pd.read_csv(path, names=['label', 'text'], dtype=str)
    if len(fin) == 0:
        raise Exception(f'file {path} is empty!')
    fcache = open(path + '.pkl', 'wb')
    csv_off = 0
    while True:
        batch_data = fin[csv_off:csv_off + batch_size]
        csv_off += len(batch_data)
        if len(batch_data) == 0:
            break
        batch_cols = []
        for item in col_processers:
            col_data = list(batch_data[item['name']])
            if 'processer' in item and item['processer']:
                processer = item['processer']
                col_data = [processer(line) for line in col_data]
            batch_cols.append(col_data)
        batch_rows = list(zip(*batch_cols))
        pickle.dump(batch_rows, fcache)


def build_generator(path, batch_size, col_processers=None, shuffle_size=0,
                    epoch_num=0, epoch_dic=None, read_fn=None):
    if batch_size > shuffle_size > 0:
        raise Exception('shuffle_size cant be less than batch_size!')
    cache_path = path + '.pkl'
    if not os.path.isfile(cache_path) and path.endswith('.csv'):
        assert col_processers is not None, 'col_processers is required for csv data file'
        logger.warning(f'no cache found, building:{path}')
        build_data_cache(path, batch_size, col_processers)
        logger.info(f'cache build complete:{path}')
    data_path = cache_path if os.path.isfile(cache_path) else path
    return build_generator_from_cache(data_path, batch_size, shuffle_size, epoch_num,
                                      epoch_dic=epoch_dic, read_fn=read_fn)


def build_generators(data_dir, batch_size, col_processers=None, shuffle_size=0,
                     epoch_num=0, epoch_dic=None, read_fn=None):
    assert shuffle_size == 0 or shuffle_size >= batch_size, 'shuffle_size cant not be less than batch_size'
    import math
    if os.path.isfile(data_dir):
        names = [os.path.basename(data_dir)]
        data_dir = os.path.dirname(data_dir)
    else:
        names = os.listdir(data_dir)
        names = [n for n in names if not n.endswith('.pkl')]
    if epoch_dic is None:
        epoch_dic = {}
    generators = []
    factor_map = {}
    sum_factor = 0
    for name in names:
        # 'vName.10.csv' ,'kName.csv'
        arr = name.split('.')
        try:
            factor = max(1, int(arr[1]))
        except:
            factor = 1
        factor_map[name] = factor
        sum_factor += factor
    base_fetch_num = max(1, round(batch_size / sum_factor))
    shuffle_factor = shuffle_size / batch_size
    from io import StringIO
    msg_builder = StringIO()
    msg_builder.write('Fetch Num for every File in Batch:\n')
    for name in names:
        cur_fetch_num = base_fetch_num * factor_map[name]
        cur_shuffle_size = math.ceil(shuffle_factor * cur_fetch_num)
        msg_builder.write(f'{name:>10}:{cur_fetch_num}\t')
        path = os.path.join(data_dir, name)
        generators.append(build_generator(path, cur_fetch_num,
                                          col_processers=col_processers,
                                          shuffle_size=cur_shuffle_size,
                                          epoch_num=epoch_num,
                                          epoch_dic=epoch_dic,
                                          read_fn=read_fn))
    logger.info(msg_builder.getvalue())
    return generators


def build_data_generator(data_dir, batch_size, col_processers=None,
                         shuffle_fac=0, epoch_num=1, epoch_dic=None, read_fn=None):
    '''
    从一个或多个文件中同时读取构造成分批的训练或测试数据
    :param data_dir: train(test) data dir or path
    :param batch_size: batch size in total
    :param col_processers: csv field processer(tokenize and label to label_id)
    :param shuffle_fac: 0 for no shuffle, or represent the factor size to be shuffle before get batch data
    :param epoch_num: 0 for repeat forever.
    :param epoch_dic: used to log epoch index for each file
    :param read_fn: read data file in batch for Non-CSV file and return a list(do not return all data)
    :return:
    '''
    set_name = os.path.basename(data_dir)
    generators = build_generators(data_dir, batch_size,
                                  col_processers=col_processers,
                                  shuffle_size=shuffle_fac * batch_size,
                                  epoch_num=epoch_num,
                                  epoch_dic=epoch_dic,
                                  read_fn=read_fn)
    count = 0
    while True:
        count += 1
        try:
            batch_rows_all = [next(gen) for gen in generators]
            all_rows = [row for group in batch_rows_all for row in group]
            real_batch_size = len(all_rows)
            batch_cols = list(zip(*all_rows))
        except StopIteration:
            break
        np_batch_cols = [np.array(col) for col in batch_cols]
        # logger.info(f'generate {count}th batch size:{real_batch_size} for {set_name}')
        yield tuple(np_batch_cols)


def build_cols_from_iter(data_iter):
    '''
    build columns data from iter
    :param data_iter: should be one epoch
    :return:
    '''
    column_datas = []
    for batch_data in data_iter:
        for i, col in enumerate(batch_data):
            while i >= len(column_datas):
                column_datas.append([])
            column_datas[i].append(col)
    for i, col_data in enumerate(column_datas):
        column_datas[i] = np.concatenate(col_data)
    return tuple(column_datas)


def build_data_list(data_dir, batch_size, col_processers):
    data_gen = build_data_generator(data_dir, batch_size, col_processers,
                                    shuffle_fac=0, epoch_num=1)
    return build_cols_from_iter(data_gen)


class DataLoader:
    '''
    class for data load.
    csv data file is supported with `col_processers` args.
    other format file would be read with `read_fn`
    '''
    def __init__(self, data_dir, col_processers=None, batch_size=1024,
                 shuffle_fac=0, epoch_num=1, read_fn=None):
        '''
        :param data_dir: data file path or directory
        :param col_processers: processers for csv format
        :param batch_size:
        :param shuffle_fac:
        :param epoch_num:
        :param read_fn: file read fn for other data format
        '''
        self.data_dir = data_dir
        self.col_processers = col_processers
        self.batch_size = batch_size
        self.shuffle_fac = shuffle_fac
        self.epoch_num = epoch_num
        self.epoch_dic = {}
        self.read_fn = read_fn
        self.data_iter = None
        self.data_cols = None

    def build(self, rebuild=False):
        '''
        build data iter from data dir with given args
        :param rebuild:
        :return:
        '''
        if self.data_iter and not rebuild:
            return self.data_iter
        self.epoch_dic = {}
        self.data_iter = build_data_generator(self.data_dir, self.batch_size,
                                              col_processers=self.col_processers,
                                              shuffle_fac=self.shuffle_fac,
                                              epoch_num=self.epoch_num,
                                              epoch_dic=self.epoch_dic,
                                              read_fn=self.read_fn)
        return self.data_iter

    def get_data_cols(self):
        '''
        get and return columns of data
        :return:
        '''
        if self.data_cols:
            return self.data_cols
        if self.data_iter is None:
            self.build()
        self.data_cols = build_cols_from_iter(self.data_iter)
        return self.data_cols

    def get_min_epoch(self):
        return min(self.epoch_dic.values())

