#!/usr/bin/env python
# coding: utf-8
import random
import numpy as np
import re

labels = ['体育', '娱乐', '家居', '彩票', '房产', '教育', '时尚', '时政', '星座', '游戏', '社会', '科技', '股票', '财经', '军事']
labels_dic = {item: idx for idx, item in enumerate(labels)}

class THUReader:
    def __init__(self, data_file, batchsize, max_seq_length, tokenizer=None):
        with open(data_file, "r") as fr:
            data = [line.split('\t') for line in fr]
        self.dataset = [(item[1], labels_dic[item[0]]) for item in data]
        length = len(self.dataset)
        self.batchsize = batchsize
        self.length = (length // batchsize) * batchsize
        self.max_seq_length = max_seq_length
        self.tokenizer = tokenizer
        self.label_ids = np.zeros([self.length], dtype=np.int64)
        self.sents = []
        for (idx, item) in enumerate(data[:self.length]):
            self.label_ids[idx] = labels_dic[item[0]]
            self.sents.append(item[1])
        ##################convert into data batchs#################
        #         self.label_ids = self.label_ids.reshape(-1, batchsize)
        self.sents = np.array(self.sents)  # .reshape(-1, batchsize)
        self.zh_pattern = re.compile(u'[\u4e00-\u9fa5]+')

    def __len__(self):
        return len(self.sents)

    def contains_CN_Char(self, string):
        return self.zh_pattern.search(string)

    def Pad_Sequence(self, ipt_ids, mlm_labels=None):
        max_sent_len = max([len(ids) for ids in ipt_ids])
        ipt_tensors = np.ones([len(ipt_ids), max_sent_len], dtype=np.int64) * 102
        attn_masks = np.ones([len(ipt_ids), max_sent_len], dtype=np.int64)
        if mlm_labels is not None:
            labels = np.ones([len(ipt_ids), max_sent_len], dtype=np.int64) * -1

        for i in range(len(ipt_ids)):
            ipt_tensors[i, :len(ipt_ids[i])] = ipt_ids[i]
            if mlm_labels is not None:
                labels[i, :len(ipt_ids[i])] = mlm_labels[i]
            attn_masks[i, len(ipt_ids[i]):] = 0

        if mlm_labels is None:
            return ipt_tensors, attn_masks
        else:
            return ipt_tensors, attn_masks, labels

    def sent_to_tokens(self, sent):
        words = sent.split(" ")
        tokens = [self.tokenizer.cls_token]
        for word in words:
            if self.contains_CN_Char(word):
                tokens.extend(list(word))
            else:
                tokens.append(word)
            if len(tokens) + 1 > self.max_seq_length:
                break
        return tokens[:self.max_seq_length - 1] + [self.tokenizer.sep_token]

    def sents2ids(self, sents):
        sent_tokens = [self.sent_to_tokens(sent) for sent in sents]
        text_inputs = [np.array(self.tokenizer.convert_tokens_to_ids(tokens)) for tokens in sent_tokens]
        input_ids, att_masks = self.Pad_Sequence(text_inputs)
        return input_ids, att_masks

    def reset_batchsize(self, new_batch_size):
        self.batchsize = new_batch_size

    def sample(self):
        batch_idx = random.sample(list(range(len(self.sents))), self.batchsize)
        return self.sents2ids(self.sents[batch_idx]) + (self.label_ids[batch_idx],)

    def iter(self):
        idxs = random.sample(list(range(len(self.sents))), len(self.sents))
        for i in range(0, len(idxs), self.batchsize):
            iter_idxs = idxs[i:i + self.batchsize]
            yield self.sents2ids(self.sents[iter_idxs]) + (self.label_ids[iter_idxs],)


# test_file = './THUCnews/cnews.test.txt'
# val_file = './THUCnews/cnews.val.txt'
# train_file = './THUCnews/cnews.train.txt'
# tokenizer = BertTokenizer.from_pretrained('./publish/')
# val_reader = THUReader('./THUCnews/cnews.val.txt', batchsize=20, max_seq_length=512, tokenizer=tokenizer)
