import jieba
import jieba.posseg
import os
import numpy as np
import pandas as pd
import re
import pdb
from tqdm import tqdm
import torch
import pickle
from itertools import zip_longest

jieba.add_word("#LOC#", tag="ns")
jieba.add_word("#PER#", tag="nr")
jieba.add_word("#TIME#", tag="t")
jieba.add_word("#ORG#", tag="nt")
jieba.add_word("#ID#", tag="n")
jieba.add_word("#CID#", tag="n")
jieba.add_word("#NUM#", tag="n")

class Example(object):
    def __init__(self, **kwargs):
        for key, value in kwargs.items():
            self.__setattr__(key,value)

class DatasetIterater(object):
    def __init__(self, dataset, batch_size, device=None):
        self.batch_size = batch_size
        self.dataset = dataset
        self.n_batches = len(dataset) // batch_size
        self.residue = False  # 记录batch数量是否为整数
        if len(dataset) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    def _to_tensor(self, batch):
        nids = []
        sentids = []
        labels = []
        pois = []
        for ex in batch:
            nids.append(ex.nid)
            sentids.append(ex.ids)
            labels.append(ex.label)
            pois.append(ex.poi)
        
        # sort
        sentLength = [len(sent) for sent in sentids]
        orderlist = [it for it in zip(range(len(batch)),sentLength)]
        orderlist.sort(key=lambda x:x[1],reverse=True)
        orderIds = [it[0] for it in orderlist]

        inputMat = torch.Tensor([it for it in zip_longest(*sentids,fillvalue=0)]).transpose(1,0)[orderIds].long()
        labelMat = torch.Tensor(labels)[orderIds].long()

        return nids,inputMat,labelMat,pois

    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.dataset[self.index * self.batch_size: len(self.dataset)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.dataset[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


class VocabCLS(object):
    def __init__(self):
        self.itos = ["<PAD>","<UNK>"]
        self.stoi = {"<PAD>":0,"<UNK>":1}
        self.stoc = {}
        self.n_words = 2
        self.istrim = False

    def word2idx(self, word):
        if word in self.stoi.keys():
            return self.stoi[word]
        else:
            return self.stoi["<UNK>"]

    def _addWord(self, word):
        if word not in self.stoi:
            self.stoi[word] = self.n_words
            self.stoc[word] = 1
            self.itos.append(word)
            self.n_words += 1
        else:
            self.stoc[word] += 1
        return self.n_words

    def addSentence(self, sentence):
        ids = []
        for word in sentence.split(' '):
            ids.append(self._addWord(word))
        return ids

    def trim(self, min_count=0):
        keep_words = []
        freq = [(k,v) for k,v in self.stoc.items() if v>=min_count]
        freq.sort(key=lambda x:x[0], reverse=True)
        self.stoc = dict(freq)
        self.itos = ["<PAD>","<UNK>"]+[it[0] for it in freq]
        self.n_words = len(self.itos)
        self.stoi = {word:idx for idx,word in zip(range(self.n_words),self.itos)}
        self.istrim=True
            
    def sentence2idx(self, sentence):
        '''
        转换单个句子
        '''   
        assert isinstance(sentence, str)
        return [self.word2idx(word) for word in sentence.split(" ")]


def textClean(text):
    text = re.sub(r"<.+?>","",text).replace("&nbsp;","").replace("\u30003","").replace("\t","")
    rs = jieba.posseg.cut(text)
    sentence = " ".join([it.word for it in rs if it.flag not in ["x","m"]])
    return sentence



if __name__ == "__main__":
    with open(os.path.join("data/processedData","train.cache"),"rb") as f:
        examples = pickle.load(f)
    iter = DatasetIterater(examples,20)
    for batch in iter:
        pass
    pdb.set_trace()