# coding: utf-8
import torch
import nltk
import pickle
import logging
import os
import codecs


class Dictionary(object):
    def __init__(self, words, max_vocab_size=None):
        # max_vocab_size 如果是None，那么就表示无限大
        # <unk>固定表示未知单词，必须要在max_vocab_size里面占据一个
        freq_dist = nltk.FreqDist(words)
        words_in_dict = list(map(lambda x: x[0], freq_dist.most_common(max_vocab_size)))
        if '_unk' not in words_in_dict:
            words_in_dict[-1] = '_unk'
        self.idx2word = []
        self.word2idx = {}
        for idx, word in enumerate(words_in_dict):
            self.idx2word.append(word)
            self.word2idx[word] = idx

    def __len__(self):
        return len(self.idx2word)

    def to_idx(self, word):
        if word in self.word2idx:
            return self.word2idx[word]
        else:
            return self.word2idx['_unk']

    def to_word(self, idx):
        if idx < len(self):
            return self.idx2word[idx]
        else:
            raise Exception()

    def save(self, save_path):
        with open(save_path, 'wb') as f:
            pickle.dump(self, f)


class Corpus(object):
    def __init__(self, list_of_files, lower_all=True, max_vocab_size=None, dictionary=None):
        # 默认一行代表一个句子，会加入<bos>和<eos>
        self.lower_all = lower_all
        self.list_of_files = list_of_files
        logging.info('Loading corpus..')
        logging.warning('Option lower_all: {}'.format(lower_all))
        if dictionary is None:
            logging.warning('Making dictionary...'.format(lower_all))
            words = []
            for filename in list_of_files:
                words.extend(self.__filename2words(filename))
            self.dictionary = Dictionary(words, max_vocab_size=max_vocab_size)
        else:
            logging.warning('Skip making dictionary'.format(lower_all))
            self.dictionary = dictionary
        logging.info('Loading corpus finished. Use dict vocab_size: {}'.format(len(self.dictionary)))

    def __filename2words(self, filename):
        words = []
        with codecs.open(filename, encoding='utf-8') as f:  # 读出来是unicode
            for line in f:
                if self.lower_all:
                    line = line.lower()
                line = line.replace('<unk>', '_unk')  # 统一改成这种表达
                word_tokens = nltk.tokenize.word_tokenize(line)
                if len(line) > 0:  # 只有不为0的我们才加入
                    words.extend(['_bos'] + word_tokens + ['_eos'])

        return words

    def get_ids(self, filename_list=None):
        # 当filename_list 为 None时，就默认获得所有的
        if filename_list is None:
            filename_list = self.list_of_files
        ids = []
        for filename in filename_list:
            words = self.__filename2words(filename)
            unk_rate = float(sum([1 for word in words if word == '_unk'])) / len(words)
            logging.info('filename: {} unk_rate {:.2f}'.format(filename, unk_rate))
            temp = []
            for i, word in enumerate(words):
                temp.append(self.dictionary.to_idx(word))
            ids.extend(temp)
        return torch.LongTensor(ids)


class CorpusTVT(Corpus):

    # corpus 必须具有一个train.txt、一个valid.txt、一个test.txt

    def __init__(self, path='./data/wikitext-2', **kwargs):
        self.train_file = os.path.join(path, 'train.txt')
        self.valid_file = os.path.join(path, 'valid.txt')
        self.test_file = os.path.join(path, 'test.txt')
        super(CorpusTVT, self).__init__(
            [self.train_file, self.valid_file, self.test_file],
            **kwargs
        )

    @property
    def train(self):
        return self.get_ids([self.train_file])

    @property
    def valid(self):
        return self.get_ids([self.valid_file])

    @property
    def test(self):
        return self.get_ids([self.test_file])
