# -*- coding: utf-8 -*-

from collections import Counter
import argparse
import json
import torch
from typing import List
from cpg.utils import read_corpus, pad_sents

DATAPATH = '/data/pangchaoxu/poetry_generation/Data/ccpc'

def parse_args(in_args=None):
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--data_dir', type=str, default= DATAPATH + '/juejv.json',
                            help='Data directory')
    arg_parser.add_argument('--vocab_file', type=str, default=DATAPATH + '/vocab.json',
                            help='Data directory')
    arg_parser.add_argument('--vocab-size', type=int, default=50000,
                            help='The maximun number of the vocab')
    arg_parser.add_argument('--sent-freq-cutoff', type=int, default=10,
                            help='Frequency cutoff')
    arg_info = arg_parser.parse_args(args=in_args)
    return arg_info

class VocabEntry(object):
    def __init__(self, word2id=None):

        if word2id:
            self.word2id = word2id
        else:
            self.word2id = dict()
            self.word2id['<pad>'] = 0   # Pad Token
            self.word2id['<s>'] = 1     # Start Token
            self.word2id['</s>'] = 2    # End Token
            self.word2id['<unk>'] = 3   # Unknown Token

        self.unk_id = self.word2id['<unk>']
        self.pad_id = self.word2id['<pad>']
        self.id2word = {v: k for k, v in self.word2id.items()}

    def __setitem__(self, key, value):
        raise ValueError('vocabulary is readonly')

    def __len__(self):
        return len(self.word2id)
    
    def __getitem__(self, word):
        return self.word2id.get(word, None)

    def __repr__(self):
        return 'Vocabulary[size=%d]' % len(self)

    def get_id2word(self, wid):
        return self.id2word[wid]

    def get_word2id(self):
        return self.word2id

    def add(self, word):
        """
        Add word to VocabEntry
        """
        if word not in self.word2id:
            wid = self.word2id[word] = len(self)
            self.id2word[wid] = word
            return wid
        else:
            return self.word2id[word]

    def words2indices(self, sents):
        """
        Convert list of words or list of sentences into list or list of list of indices.
        """

        if type(sents[0]) == list:
            return [[self.word2id.get(w, self.unk_id) for w in s] for s in sents]
        else:
            return [self.word2id.get(w, self.unk_id) for w in sents]

    def indices2words(self, word_ids):
        return [self.id2word[w_id] for w_id in word_ids]

    def to_input_tensor(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
        """
        Convert a batch of sentences into tensors with necessary paddings.
        @return : tensor of (max_sent_length, batch_size)
        """
        word_ids = self.words2indices(sents)
        sents_t = pad_sents(word_ids, self.word2id['<pad>'])
        sents_var = torch.tensor(sents_t, dtype=torch.long, device=device)
        return torch.t(sents_var)

    @staticmethod
    def from_corpus(corpus, size, freq_cutoff):
        """
        Given a corpus construct a VocabEntry
        @param corpus (list[list[str]]): corpus
        @param size (int): # of words in vocabulary
        @param freq_cutoff (int): if a word occurs n < freq_cutoff times, drop the word
        @returns vocab_entry (VocabEntry): VocabEntry instance produced from provided corpus
        """
        vocab_entry = VocabEntry()
        word_freq = Counter([w for sent in corpus for w in sent])
        valid_words = [w for w, v in word_freq.items() if v >= freq_cutoff]
        print('number of word types: {}, number of word types w/ frequency >= {}: {}'
              .format(len(word_freq), freq_cutoff, len(valid_words)))

        top_k_words = sorted(valid_words, key=lambda w: word_freq[w], reverse=True)[:size]
        for word in top_k_words:
            vocab_entry.add(word)

        return vocab_entry


class Vocab(object):
    def __init__(self, sent_vocab: VocabEntry):
        self.sent = sent_vocab

    @staticmethod
    def build(sent_sents, vocab_size, freq_cutoff) -> 'Vocab':
        print('initialize sentence vocabulary ..')
        sent = VocabEntry.from_corpus(sent_sents, vocab_size, freq_cutoff)
        return Vocab(sent)

    def save(self, file_path):
        json.dump(dict(sent_word2id=self.sent.word2id), 
                  open(file_path, 'w'), indent=2)

    @staticmethod
    def load(file_path):
        entry = json.load(open(file_path, 'r'))
        sent_word2id = entry['sent_word2id']
        return Vocab(VocabEntry(sent_word2id))

    def __repr__(self):
        return 'Vocab (sentence %d words in total.)' % (len(self.sent))


if __name__ == '__main__':
    in_argv = parse_args()
    args = in_argv.__dict__
    print(args)
    print('read in data file: %s' % args['data_dir'])
    corpus = read_corpus(args['data_dir'])

    vocab = Vocab.build(corpus, args['vocab_size'], args['sent_freq_cutoff'])
    print('generated vocabulary, sentence %d words in total.' % (len(vocab.sent)))

    vocab.save(args['vocab_file'])
    print('vocabulary saved to %s' % args['vocab_file'])
