import re
import json
import math
import numpy as np
import pandas as pd
import pdb
import datetime
import time
import os
import sys
from tqdm import tqdm, trange
import jieba
import random
import pickle
import itertools
from collections import Counter
import jieba
import matplotlib.pyplot as plt
# import networkx as nx
from joblib import Parallel, delayed


class Lang(object):
    '''字典类
    '''
    def __init__(self, name=None):
        self.name = name
        self.word2index = {}
        self.word2count = {}
        self.index2word = {0: "PAD_token",1:"Unknown"}
        self.n_words = 2

    def addSentence(self, sentence):
        ids = []
        for word in sentence.split(' '):
            self.addWord(word)
            ids.append(self.word2index[word])
        return ids

    def word2idx(self, word):
        if word in self.word2index.keys():
            return self.word2index[word]
        else:
            return 1  # Unknown
    
    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.n_words
            self.word2count[word] = 1
            self.index2word[self.n_words] = word
            self.n_words += 1
        else:
            self.word2count[word] += 1  

    def fit_transform(self, sentences):
        '''把词转为词标，返回相同结构
        '''
        word_ids = []
        for line in sentences:
            assert isinstance(line,str)
            word_ids.append(self.addSentence(line))
        return word_ids
        
    def fit(self, sentences):
        for line in sentences:
            if isinstance(line,str):
                for word in line.split(' '):
                    self.addWord(word)
                
    def transform(self, sentences):
        '''输入一个list的以空格分词的句子
        return list of list of word id
        '''
        word_ids = []
        for line in sentences:
            if isinstance(line,str):
                word_ids.append([self.word2idx(word) for word in line.split(' ')])
        return word_ids  

    def sentence2idx(self, sentence):
        '''
        转换单个句子
        '''   
        assert isinstance(sentence, str)
        return [self.word2idx(word) for word in sentence.split(" ")]

    def trim(self, min_count):
        keep_words = []
        for k, v in self.word2count.items():
            if v >= min_count:
                keep_words.append(k)

        print('keep_words {} / {} = {:.4f}'.format(
            len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
        ))
        
        # Reinitialize dictionaries
        self.tmp_word2count = self.word2count.copy()
        
        self.word2index = {}
        self.word2count = {}
        self.index2word = {0: "PAD_token",1:"Unknown"}
        self.num_words = 2 # Count default tokens

        for word in keep_words:
            self.addWord(word)
            self.word2count[word]=self.tmp_word2count[word]
            
        del(self.tmp_word2count)

    def save(self, filename, path):
        savepath = os.path.split(filename)[0]
        os.makedirs(savepath, exist_ok=True)
        with open(filename, "wb") as f:
            pickle.dump(self, f)

    def load(filename, path):
        with open(filename, "rb") as f:
            data = pickle.load(f)
        return data
        
        
def zeroPadding(seqs, fillvalue=0, max_seq_length=None):
    if max_seq_length:
        assert max_seq_length>0
        seqs = [seq[:min(len(seq),max_seq_length)] for seq in seqs]
    return np.array(list(itertools.zip_longest(*seqs, fillvalue=fillvalue)))
