# encoding = utf8
import os
import pickle
import re
import math
import codecs
import random

import numpy as np
import jieba
import pandas as pd
from tqdm import tqdm

jieba.initialize()

def get_data_with_windows(name = 'train'):
    with open(f'data/prepare/dict.pkl','rb') as f:
        map_dict = pickle.load(f)

    def item2id(data,w2i):
        return [w2i[x] if x in w2i else w2i['UNK'] for x in data]
    results = []
    root = os.path.join('data/prepare',name)
    files = os.listdir(root)
    print(files)

    for file in tqdm(files):
        result = []
        path = os.path.join(root,file)
        samples=pd.read_csv(path,sep=',')
        num_samples = len(samples)
        sep_index = [-1] + samples[samples['word'] == 'sep'].index.tolist() + [num_samples]
        # ----------------------获取句子并将句子全部转换成id-----------------
        for i in range(len(sep_index)-1):
            start = sep_index[i] + 1
            end = sep_index[i + 1]
            data = []
            for feature in samples.columns:
                data.append(item2id(list(samples[feature])[start:end],map_dict[feature][1]))
            result.append(data)

    # ----------------------数据增强----------------------------
        two = []
        for i in range(len(result)-1):
            first = result[i]
            second = result[i+1]
            two.append([first[k] + second[k] for k in range(len(first))])

        three = []
        for i in range(len(result)-2):
            first = result[i]
            second = result[i+1]
            third = result[i+2]
            three.append([first[k] + second[k] + third[k] for k in range(len(first))])

        results.extend(result + two + three)
    with open(f'data/prepare/' + name + '.pkl','wb') as f:
        pickle.dump(results,f)

def get_dict(path):
    with open(path,'rb') as f:
        dict = pickle.load(f)
    return dict

class BatchManager(object):
    def __init__(self, batch_size, name='train'):
        with open(f'data/prepare/' + name + '.pkl', 'rb') as f:
            data = pickle.load(f)
        self.batch_data = self.sort_and_pad(data, batch_size)
        self.len_data = len(self.batch_data)

    def sort_and_pad(self, data, batch_size):
        num_batch = int(math.ceil(len(data) / batch_size))  # 总共有多少批次
        print(len(data[0][0]))
        sorted_data = sorted(data, key=lambda x: len(x[0]))  # 按句子长度排序
        batch_data = list()
        for i in range(num_batch):
            batch_data.append(self.pad_data(sorted_data[i * int(batch_size): (i + 1) * int(batch_size)]))
        return batch_data

    @staticmethod
    def pad_data(data):
        chars = []
        bounds = []
        flags = []
        radicals = []
        pinyins = []
        targets = []
        max_length = max([len(sentence[0]) for sentence in data])
        for line in data:
            char, bound, flag, target, radical, pinyin = line
            padding = [0] * (max_length - len(char))
            chars.append(char + padding)
            bounds.append(bound + padding)
            flags.append(flag + padding)
            targets.append(target + padding)
            radicals.append(radical + padding)
            pinyins.append(pinyin + padding)
        return [chars, bounds, flags, radicals, pinyins, targets]

    def iter_batch(self,shuffle=False):
        if shuffle:
            random.shuffle(self.batch_data)
        for idx in range(self.len_data):
            yield self.batch_data[idx]

if __name__ == '__main__':
    # 1执行这两
    get_data_with_windows('train')
    # get_data_with_windows('test')

    # train_data = BatchManager(10,'train')
    # print(train_data)

    # with open(f'data/prepare/dict.pkl', 'rb') as f:
    #     data = pickle.load(f)
    #     print()