# -*- coding: utf-8 -*-

import pickle
import tqdm
import os

from collections import Counter
from .. io import saveVocabPkl


class TorchVocab(object):
    """定义将用于数字化字段的词汇对象。
    参数:
        freqs: 一个collections.Counter对象，其中包含用于构建Vocab的数据中token的频率；
        stoi: 一个collections.defaultdict实例，它将token字符串映射到数字标识符；
        itos: 由其数字标识符索引的token字符串的列表；
    """

    def __init__(self,
                 counter,
                 max_size=None,
                 min_freq=1,
                 specials=['<pad>', '<oov>'],
                 vectors=None,
                 unk_init=None,
                 vectors_cache=None):
        """从collections.Counter创建一个Vocab对象。
        Arguments:
            counter: collections.Counter对象可以保存在数据中找到的每个值的频率；
            max_size: 词汇表的最大是多大，或者“None”表示没有最大值。默认值：None；
            min_freq: 在词汇表中包含token的最低频率。小于1的值将设置为1。默认值：1；
            specials: 除 <unk> token外，还将在词汇表中添加特殊token（例如padding或eos）。默认值：['<pad>']
            vectors: 可用预训练向量或自定义预训练向量之一（see Vocab.load_vectors)或上述向量的列表
            unk_init (callback): 默认情况下，将词汇表外的单词向量初始化为零向量；可以是任何接受张量并返回相同大小张量的函数
                                 Default: torch.Tensor.zero_
            vectors_cache: 缓存向量的目录。默认值：'.vector_cache'
        """
        self.freqs = counter
        self.max_size = max_size
        self.min_freq = min_freq
        self.itos = list(specials)
        self.stoi = {}
        self.specials = specials
        self.vectors = None
        self.__genItos(counter)

        # 必须加载词向量
        if vectors is not None:
            self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
        else:
            assert unk_init is None and vectors_cache is None

    def __genItos(self, counter):
        """获取itos"""
        counter = counter.copy()
        min_freq = max(self.min_freq, 1)

        # 按频率顺序构建词汇时，不计算特殊token的频率
        for tok in self.specials:
            del counter[tok]

        # 最大的词汇列表大小是设置的最大值max_size+特殊token个数
        max_size = None if self.max_size is None else self.max_size + len(self.itos)

        # 按频率排序，然后按字母顺序
        words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
        words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
        for word, freq in words_and_frequencies:
            if freq < min_freq or len(self.itos) == max_size:
                break
            self.itos.append(word)
        self.stoi = {tok: i for i, tok in enumerate(self.itos)}  # stoi只是itos的反向dict

    def __eq__(self, other):
        if self.freqs != other.freqs:
            return False
        if self.stoi != other.stoi:
            return False
        if self.itos != other.itos:
            return False
        if self.vectors != other.vectors:
            return False
        return True

    def __len__(self):
        return len(self.itos)

    def vocabRerank(self):
        self.stoi = {word: i for i, word in enumerate(self.itos)}

    def extend(self, v, sort=False):
        words = sorted(v.itos) if sort else v.itos
        for w in words:
            if w not in self.stoi:
                self.itos.append(w)
                self.stoi[w] = len(self.itos) - 1


class Vocab(TorchVocab):
    """词典对象"""

    def __init__(self, counter, max_size=None, min_freq=1):
        self.pad_index = 0
        self.unk_index = 1
        self.eos_index = 2
        self.sos_index = 3
        self.mask_index = 4
        super(Vocab, self).__init__(counter,
                                    specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
                                    max_size=max_size,
                                    min_freq=min_freq)

    def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
        raise NotImplementedError

    def from_seq(self, seq, join=False, with_pad=False):
        raise NotImplementedError

    @staticmethod
    def load_vocab(vocab_path: str) -> 'Vocab':
        """加载.pkl格式的文件"""
        pkl_file = vocab_path.replace(os.path.basename(vocab_path).split(".")[1], "pkl")
        if not os.path.exists(pkl_file):
            print("new build .pkl vocab  ", pkl_file)
            saveVocabPkl(vocab_path, pkl_file)
            with open(pkl_file, "rb") as f:
                return pickle.load(f)
        else:
            print("load existed .pkl vocab  ", pkl_file)
            with open(pkl_file, "rb") as f:
                return pickle.load(f)

    def save_vocab(self, vocab_path):
        """把self实例对象序列化保存"""
        with open(vocab_path, "wb") as f:
            pickle.dump(self, f)


class WordVocab(Vocab):
    """使用文本文件构建Vocab"""

    def __init__(self, texts, max_size=None, min_freq=1):
        counter = Counter()
        for line in tqdm.tqdm(texts):
            if isinstance(line, list):
                words = line
            else:
                words = line.replace("\n", "").replace("\t", "").split()

            for word in words:
                counter[word] += 1

        super().__init__(counter, max_size=max_size, min_freq=min_freq)

    def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
        if isinstance(sentence, str):
            sentence = sentence.split()

        seq = [self.stoi.get(word, self.unk_index) for word in sentence]

        if with_eos:
            seq += [self.eos_index]  # this would be index 1
        if with_sos:
            seq = [self.sos_index] + seq

        origin_seq_len = len(seq)

        if seq_len is None:
            pass
        elif len(seq) <= seq_len:
            seq += [self.pad_index for _ in range(seq_len - len(seq))]
        else:
            seq = seq[:seq_len]

        return (seq, origin_seq_len) if with_len else seq

    def from_seq(self, seq, join=False, with_pad=False):
        words = [self.itos[idx]
                 if idx < len(self.itos)
                 else "<%d>" % idx
                 for idx in seq
                 if not with_pad or idx != self.pad_index]
        return " ".join(words) if join else words


def build():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--corpus_path", required=True, type=str)
    parser.add_argument("-o", "--output_path", required=True, type=str)
    parser.add_argument("-s", "--vocab_size", type=int, default=None)
    parser.add_argument("-e", "--encoding", type=str, default="utf-8")
    parser.add_argument("-m", "--min_freq", type=int, default=1)
    args = parser.parse_args()

    with open(args.corpus_path, "r", encoding=args.encoding) as f:
        vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)

    print("VOCAB SIZE:", len(vocab))
    vocab.save_vocab(args.output_path)
