import torch
from torch.utils import data
from collections import Counter, defaultdict
import os, pickle, codecs, csv, time
from tqdm import tqdm
import panlp.segment as segment     # Employ panlp(HanLP) instand of jieba
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import matplotlib

# 标准分词器和定制分词器（合并为更长的组织机构名等）
tokenizer = segment.StandardTokenizer()
# tokenizer = segment.CalibratedTokenizer()

PAD = '<PAD>'
UNK = '<UNK>'

class NanningDataset(data.Dataset):

    def __init__(self, dataset_fn="nanning.trainset", data_dir=".", max_len=160):
        self.dataset_fn = dataset_fn
        self.data_dir = data_dir
        self.max_len = max_len      # 默认训练集最大长度 
        self.dataset_tensor = torch.LongTensor()
        
        
        if os.path.isfile(os.path.join(data_dir, dataset_fn)) and \
            os.path.isfile(os.path.join(data_dir, "vocab.pkl")) and \
            os.path.isfile(os.path.join(data_dir, "word_freq.pkl")) and \
            os.path.isfile(os.path.join(data_dir, "labels.pkl")):
            
            # 读取csv文件到dataset_tensor, 注意无列头
            df = pd.read_csv(os.path.join(data_dir, dataset_fn), sep="\t", header=None, quoting=csv.QUOTE_NONE)
            self.dataset_tensor = torch.LongTensor(df.values)
     
            with codecs.open(os.path.join(data_dir, "vocab.pkl"), mode="rb") as f: self.vocab = pickle.load(f)
            with codecs.open(os.path.join(data_dir, "word_freq.pkl"), mode="rb") as f: self.word_freq = pickle.load(f)
            with codecs.open(os.path.join(data_dir, "labels.pkl"), mode="rb") as f: self.labels = pickle.load(f)
            self.word2id, self.id2word, self.label2id, self.id2label = self.init_id_mapping(self.vocab, self.labels)
            
            del df
            #print("Vocabulary length=%d, Labels length=%d" % (len(self.vocab), len(self.labels)))
        else:
            print("Dataset instance initialized and %s not be loaded" % dataset_fn)

        return

    def __getitem__(self, index):
        #train_set.loc[:, 0]  train_set.loc[:, 1:]
        return self.dataset_tensor[index, 1:self.max_len+1], self.dataset_tensor[index, 0]

    def __len__(self):
        #num_batches_per_epoch = round( len(self.train_set) / self.batch_size )
        return len(self.dataset_tensor)

    # 对文本进行处理，可选择是否分词
    def seg_sent(self, doc, use_char=False):
        rtn = []
        # 去除回车换行TAB空格，替换英文标点为中文标点
        if doc:
            doc = doc.replace("\n","").replace("\r","").replace("\t","").replace(" ","").strip()
            if not use_char:
                tokens = tokenizer.seg(doc)
                rtn = [segment.get_word(token) for token in tokens]
            else:
                rtn = [char for char in doc]
        return rtn

    # 构建词汇表（含padding字符）和词频表
    def build_vocab(self, all_words, min_freq=1):
        counter = Counter(all_words)
        all_word_freq =  dict(counter.most_common(500000))  #降序输出所有词频，不超过50万个词，需要随着语料的增加而增加
        word_freq = {}
        for idx, key in enumerate(all_word_freq):
            if all_word_freq[key] >= min_freq: word_freq[key] = all_word_freq[key]
        vocab = [ PAD ] + [ UNK ] + list(word_freq.keys())      #确保填充字符和未知字符也在词汇表中 
        return vocab, word_freq

    # 初始化词汇和类别使用的映射表
    def init_id_mapping(self, vocab, labels):
        #print("vocab=%s\n\nlabels=%s\n" % (vocab, labels))
        print("Vocabulary length=%d, Labels length=%d" % (len(vocab), len(labels)))
        
        word2id, id2word, label2id, id2label = {}, {}, {}, {}
        # 为保证id不变，要确保顺序不能错
        for i, word in enumerate(vocab):
            word2id[word] = i
        id2word = dict(zip(word2id.values(), word2id.keys()))

        #labels1 = list(labels).sort()
        #print("labels1=%s\n" % labels1)
        for i, label in enumerate(labels):
            label2id[label] = i
        id2label = dict(zip(label2id.values(), label2id.keys()))

        return word2id, id2word, label2id, id2label

    # 保存训练集和测试集文件
    def save_dataset(self, dataset_fn, x_, y_, max_len):
        with codecs.open(dataset_fn, mode="w", encoding="utf-8") as fo, \
            codecs.open(dataset_fn + ".log", mode="w", encoding="utf-8") as flog:

            #flog.write("column(%d) is type of %s\n" % (0, type(x_.iloc[:, 0])))
            lst_desc = x_.iloc[:, 0].tolist()
            lst_cat = y_.tolist()

            #flog.write("lst_desc length=%d\n" % len(lst_desc))
            for idx in range(len(lst_desc)):
                try:
                    #flog.write("lst_desc[%d] is type of %s\n" % (idx, type(lst_desc[idx])))
                    ln_desc = lst_desc[idx][:max_len]
                    ln_desc.extend( [ PAD ] * (max_len - len(ln_desc)))
                    #flog.write("ln_desc length=%d and value=%s\n" % (len(ln_desc), ln_desc))
                    #desc_ids = [ str(self.word2id[word]) for word in ln_desc ]
                    desc_ids = []
                    for word in ln_desc:
                        try:
                            desc_ids.append( str(self.word2id[ word ]) )
                        except KeyError:
                            desc_ids.append( str(self.word2id[ UNK ]) )
                    category_id = str(self.label2id[ lst_cat[idx] ])
                    #print("Desc=%s\nCategoryId(%s)=DescIds(%s)\n" % (desc_str, category_id, desc_ids))
                    fo.write(category_id + "\t" + "\t".join(desc_ids) + "\n")
                except KeyError as e:
                    flog.write("Line #%d error: %s\tx=%s\ty=%s\n" % (idx, e, lst_desc[idx], lst_cat[idx]) )
                except AttributeError as e:
                    flog.write("Line #%d error: %s\tx=%s\ty=%s\n" % (idx, e, lst_desc[idx], lst_cat[idx]) )
                except TypeError as e:
                    flog.write("Line #%d error: %s\tx=%s\ty=%s\n" % (idx, e, lst_desc[idx], lst_cat[idx]) )
            fo.close()
            flog.close()

    # 处理语料文件，生成训练集和测试集文件
    def proc_corpus(self, corpus_fn, trainset_fn, testset_fn, data_dir, use_char=False, max_len=160, min_freq=1):
        # 待生成词汇表、分类、词语映射到id、类别映射到id、词频
        self.vocab, self.word_freq, self.labels = [], {}, set()
        self.word2id, self.id2word, self.label2id, self.id2label = {}, {}, {}, {}
        
        print("Process corpus csv file %s ..." % corpus_fn)
        df = pd.read_csv(os.path.join(data_dir, corpus_fn), sep='\t', quoting=csv.QUOTE_NONE)

        # Drop irrelative columns
        selected = ['Category', 'Descript']
        not_selected = list(set(df.columns) - set(selected))
        df.drop(not_selected, axis = 1)
        df = df.dropna(axis = 0, how = 'any', subset = selected)
        
        # Generate the random series as index with permutation
        #df = df.reindex(np.random.permutation(df.index))

        # 对dataframe每一行分词，并获取词汇表、词频表、类别信息
        all_words = []
        print("Segment sentences ...")
        for index, row in tqdm(df.iterrows()):
            #print(row["Category"], row["Descript"])
            self.labels.add(row["Category"])
            row["Descript"] = self.seg_sent(row["Descript"], use_char)    # 是否需要分词
            all_words.extend(row["Descript"])
       
        # 
        print("Build vocabulary and totally has %d words ..." % len(all_words))
        if len(all_words) > 0:
            self.vocab, self.word_freq = self.build_vocab(all_words, min_freq)

        # 保存df, vocab, word_freq, labels
        with codecs.open(os.path.join(data_dir, "vocab.pkl"), mode="wb") as f: pickle.dump(self.vocab, f)
        with codecs.open(os.path.join(data_dir, "word_freq.pkl"), mode="wb") as f: pickle.dump(self.word_freq, f)
        with codecs.open(os.path.join(data_dir, "labels.pkl"), mode="wb") as f: pickle.dump(self.labels, f)
        #df.to_csv(os.path.join(data_dir, "sentences.csv"), sep="\t", encoding="utf-8")
        
        # 初始化词汇和类别映射表
        self.word2id, self.id2word, self.label2id, self.id2label = self.init_id_mapping(self.vocab, self.labels)
        # 划分训练集和测试集
        x_train, x_test, y_train, y_test = train_test_split(df.iloc[:, 1:], df.iloc[:, 0], test_size=0.2)
        #x_train.to_csv(os.path.join(data_dir, "x_train.csv"), sep="\t", index=False, header=False, encoding="utf-8")
        #y_train.to_csv(os.path.join(data_dir, "y_train.csv"), sep="\t", index=False, header=False, encoding="utf-8")
        #x_test.to_csv(os.path.join(data_dir, "x_test.csv"), sep="\t", index=False, header=False, encoding="utf-8")
        #y_test.to_csv(os.path.join(data_dir, "y_test.csv"), sep="\t", index=False, header=False, encoding="utf-8")

        # 将输入dataframe的汉字数字化 Descript, Category => ID
        print("Write to train set %s ..." % trainset_fn)
        self.save_dataset(os.path.join(data_dir, trainset_fn), x_train, y_train, max_len)

        print("Write to test set %s ..." % testset_fn)
        self.save_dataset(os.path.join(data_dir, testset_fn), x_test, y_test, max_len)

        del df, x_train, x_test, y_train, y_test
        print("Done")
        
        return

if __name__=="__main__":
    #10K samples:  "nanning_train_samples_10k.csv"
    #GTE100 samples: "train_nanning_2cols_xlbs3-20171120.csv" "train_nanning_2cols_xlbs3-10k.csv"
    corpus_fn, trainset_fn, testset_fn, data_dir = "nanning_train_samples_10k.csv", "nanning.trainset", "nanning.testset", "."
    
    # 如果训练用文件不存在而语料文件存在，则生成训练和测试用文件
    dataset_nn = NanningDataset(trainset_fn, data_dir, max_len=120)
    if not os.path.isfile(os.path.join(data_dir, trainset_fn)) and \
        not os.path.isfile(os.path.join(data_dir, testset_fn)):
        
        if os.path.isfile(os.path.join(data_dir, corpus_fn)):
            dataset_nn.proc_corpus(corpus_fn, trainset_fn, testset_fn, data_dir, \
                                    use_char=False, max_len=120, min_freq=2)
    else:
        a = np.array(list(dataset_nn.word_freq.values()))
        # 出现频数<100的词汇个数
        bins = np.arange(0, 100, 1)
        hist, bins = np.histogram(a, bins)
        print("Word freq stats:\nbins=%s\nhistogram=%s\n" % (bins, hist))
        #plt.show()


    '''
    training_iter = data.DataLoader(dataset = training_set,
                                    batch_size = 3,
                                    num_workers = 2)
    for data, label in training_iter:
        print(data)
        print(label)
    '''
