# -*- coding: utf-8 -*-


import numpy as np
from typing import List, Tuple, Dict, Set, Union
import torch
import torch.nn as nn
import torch.nn.utils
import torch.nn.functional as F
from torch.autograd import Variable
import cpg.utils as utils
from .transformer import make_transformer_decoder, PositionalEncoding, subsequent_mask

class BasicModel(nn.Module):
    def __init__(self):
        super(BasicModel, self).__init__()

    def get_embeddings(self, input, start=0):
        batch_first = False if self.__class__.__name__ == 'LSTMModel' else True

        sent_padded = self.vocab.to_input_tensor(input, device=self.device).T # Tensor: (b, s_len)
        input_emb = self.embedding(sent_padded)     # Tensor: (b, s_len, h)

        if self.__class__.__name__ == 'TransformerModel':
            input_emb = self.position_embedding(input_emb, start)   # Tensor: (b, s_len, emb)

        if self.trainable_pe:
            rhythm_emb = torch.zeros(size=(33, input_emb.size(2))).to(self.device) #(33, h)
            sent_emb = torch.zeros(size=(33, input_emb.size(2))).to(self.device) #(33, h)
            if input[0][0] == '<s>':
                rhythm_emb += self.rhythm_embedding_2.expand(rhythm_emb.size())
                rhythm_emb[15] = self.rhythm_embedding_1
                rhythm_emb[31] = self.rhythm_embedding_1
                
                sent_emb[1:8] += self.seven_sentence_embedding
                sent_emb[9:16] += self.seven_sentence_embedding
                sent_emb[17:24] += self.seven_sentence_embedding
                sent_emb[25:32] += self.seven_sentence_embedding

            elif input[0][0]== '<f>':
                rhythm_emb += self.rhythm_embedding_2.expand(rhythm_emb.size())
                rhythm_emb[11] = self.rhythm_embedding_1
                rhythm_emb[23] = self.rhythm_embedding_1
                
                sent_emb[1:6] += self.five_sentence_embedding
                sent_emb[7:12] += self.five_sentence_embedding
                sent_emb[13:18] += self.five_sentence_embedding
                sent_emb[19:24] += self.five_sentence_embedding
            else:
                raise Exception("unkown start token {}".format(input[0][0]))
            
            input_emb += rhythm_emb[:input_emb.size(1)].unsqueeze(0).expand(input_emb.size())
            input_emb += sent_emb[:input_emb.size(1)].unsqueeze(0).expand(input_emb.size())
        return input_emb if batch_first else torch.transpose(input_emb, 0, 1)

    def select_idxes(self, output, beam_size):
        """
        randomly select sqrt(beam_size) candidates from top_beam_size output 
        by the probability of each candidate.
        """
        output[self.vocab['<unk>']] = 0
        char_idxes = torch.topk(output, beam_size).indices.cpu().numpy()
        prob = (output[char_idxes] / torch.sum(output[char_idxes])).cpu().numpy()
        return np.random.choice(char_idxes, size=int(np.sqrt(beam_size)), replace=False, p=prob)
        # return char_idxes

    def generate_poetry(self, batch_first, mode='prefix', prefix='好', head='我好想你', beam_size=1, start_token='<s>', seed=0):
        np.random.seed(seed)
        end_token = start_token[0] + '/' + start_token[1:]
        mode_options = ['prefix', 'head']
        candidates = []
        if mode == 'prefix':
            prefix_sents = [[start_token] + list(prefix)]
        elif mode == 'head':
            prefix_sents = [[start_token] + [head[0]]]
        else:
            raise ValueError(f'Invalid mode: {mode}. Options: {mode_options}')
            
        candidates.append({'wordlist':[*prefix_sents[0]], \
                        'sent_num':0,
                        'prob':0})
        update = True   #record whether candidates update during loop, if not then end the loop
        while update:
            new_candidates = []
            update = True
            for candi in candidates:
                if candi['sent_num'] == 4 or candi['wordlist'][-1] == end_token:
                    new_candidates.append(candi)
                    update = False
                    continue
                output, _ = self.forward([candi['wordlist']])
                last_char = output[0, -1, :] if batch_first else output[-1, 0, :]     #  get the last char
                last_char_prob = torch.softmax(last_char, dim=-1).detach()    
                char_idxes = self.select_idxes(last_char_prob, beam_size)
                for char_idx in char_idxes:
                    new_char = self.vocab.get_id2word(char_idx.item())
                    # if not utils.comply_rule(''.join(candi['wordlist']) + new_char):
                        #   new_char = '|'
                    if mode == 'head' and candi['wordlist'][-1] == '|':
                        new_char = head[candi['sent_num']]
                    new_candidates.append({'wordlist':[*list(candi['wordlist']), new_char], \
                                'sent_num':candi['sent_num'] + int(new_char=='|'),
                                'prob':candi['prob'] + np.log(last_char_prob[char_idx.item()].item())})
            
            # sort new_candidates by prob/len(wordlist), choosen beam_size best candidates
            new_candidates = sorted(new_candidates, key=lambda x: -x['prob'])
            candidates = new_candidates[:beam_size]
        chars = candidates[0]['wordlist']

        return ''.join(chars)

    def Jaccard_similarity(self, poem1, poem2, n):
        """
        calculate the n-gram jaccard similarity of two poem
        """
        grams_1 = set([sent[i:i+n] for sent in poem1.split('|') for i in range(len(sent)-n+1)])
        grams_2 = set([sent[i:i+n] for sent in poem2.split('|') for i in range(len(sent)-n+1)])
        return len(grams_1.intersection(grams_2)) / len(grams_1.union(grams_2))

    def evalute_diversity(self):
        batch_first = False if self.__class__.__name__ == 'LSTMModel' else True
        prefixes = ['春', '夏', '秋', '冬', '东', '西', '南', '北', '大', '小', '日', '月']
        heads = ['春夏秋冬', '日月精华', '东西南北', '海纳百川', '天气之子', '仁者无敌', '一日千里']
        generate_num = 2 #poem number generated for each topic
        prefix_poems = [[] for i in range(len(prefixes))]
        head_poems = [[] for i in range(len(heads))]
        for i, prefix in enumerate(prefixes):
            for j in range(generate_num):
                prefix_poems[i].append(self.generate_poetry(batch_first=batch_first, mode='prefix', prefix=prefix, beam_size=3, start_token='<s>', seed=2021+j))
            for j in range(generate_num):
                prefix_poems[i].append(self.generate_poetry(batch_first=batch_first, mode='prefix', prefix=prefix, beam_size=3, start_token='<f>',seed=2021+j))
        for i, head in enumerate(heads):
            for j in range(generate_num):
                head_poems[i].append(self.generate_poetry(batch_first=batch_first ,mode='head', head=head, beam_size=3, start_token='<s>', seed=2021+j))
            for j in range(generate_num):
                head_poems[i].append(self.generate_poetry(batch_first=batch_first ,mode='head', head=head, beam_size=3, start_token='<f>', seed=2021+j))
        
        # calculate the inter jaccard similarity
        prefix_inter_js = 0
        for i1 in range(len(prefixes)):
            for i2 in range(i1 + 1, len(prefixes)):
                for j1 in range(generate_num):
                    for j2 in range(generate_num):
                        prefix_inter_js += self.Jaccard_similarity(prefix_poems[i1][j1], prefix_poems[i2][j2], 2)
        prefix_inter_js /= (len(prefixes) * (len(prefixes) - 1) * generate_num * generate_num // 2)

        head_inter_js = 0
        for i1 in range(len(heads)):
            for i2 in range(i1, len(heads)):
                for j1 in range(generate_num):
                    for j2 in range(generate_num):                
                        head_inter_js += self.Jaccard_similarity(head_poems[i1][j1], head_poems[i2][j2], 2)
        head_inter_js /= (len(heads) * (len(heads) - 1) * generate_num * generate_num // 2)

        # calculate the intra jaccard similarity
        prefix_intra_js = 0
        for i in range(len(prefixes)):
            for j1 in range(generate_num):
                for j2 in range(j1+1, generate_num):
                    prefix_intra_js += self.Jaccard_similarity(prefix_poems[i][j1], prefix_poems[i][j2], 2)
        prefix_intra_js /= (generate_num * (generate_num - 1) * len(prefixes) // 2)

        head_intra_js = 0
        for i in range(len(heads)):
            for j1 in range(generate_num):
                for j2 in range(j1+1, generate_num):
                    head_intra_js += self.Jaccard_similarity(head_poems[i][j1], head_poems[i][j2], 2)
        head_intra_js /= (generate_num * (generate_num - 1) * len(heads) // 2)

        return {"prefix_inter_js": prefix_inter_js, 
                "prefix_intra_js": prefix_intra_js,
                "head_inter_js": head_inter_js,
                "head_intra_js": head_intra_js,
                "prefix_poems": [poem for poems in prefix_poems for poem in poems],
                "head_poems": [poem for poems in head_poems for poem in poems]}

class LSTMModel(BasicModel):
    """
    Simple language model:
    - LSTM 
    - Softmax 
    """

    def __init__(self, setting, vocab, device):
        super(LSTMModel, self).__init__()

        self.batch_size = setting.train_batch_size
        self.embed_size = setting.embed_size
        self.hidden_size = setting.hidden_size
        self.layer_num = 1#setting.layer_num
        self.dropout_rate = setting.dropout
        self.trainable_pe = setting.trainable_pe
        self.vocab = vocab
        self.device = device

        if setting.trainable_pe:
            self.rhythm_embedding_1 = nn.Parameter(torch.randn(1, self.embed_size), requires_grad=True)
            self.rhythm_embedding_2 = nn.Parameter(torch.randn(1, self.embed_size), requires_grad=True)
            self.seven_sentence_embedding = nn.Parameter(torch.randn(7, self.embed_size), requires_grad=True)
            self.five_sentence_embedding = nn.Parameter(torch.randn(5, self.embed_size), requires_grad=True)

        # initial hidden state: can be Variable or Parameter
        if setting.init_state_param:
            self.init_h = nn.Parameter(torch.randn(self.layer_num, self.batch_size, self.hidden_size).to(self.device))
            self.init_c = nn.Parameter(torch.randn(self.layer_num, self.batch_size, self.hidden_size).to(self.device))
        else:
            self.init_h = Variable(torch.zeros(self.layer_num, self.batch_size, self.hidden_size).to(self.device))
            self.init_c = Variable(torch.zeros(self.layer_num, self.batch_size, self.hidden_size).to(self.device))

        self.embedding = nn.Embedding(len(self.vocab), self.embed_size, padding_idx=vocab['<pad>'])
        self.lstm = nn.LSTM(input_size=self.embed_size,
                            hidden_size=self.hidden_size,
                            num_layers=self.layer_num)

        self.linear = nn.Linear(self.hidden_size, len(self.vocab))
        self.dropout = nn.Dropout(self.dropout_rate)

    def get_init_hidden(self, batch_size):
        assert batch_size <= self.batch_size
        init_h = self.init_h[:, :batch_size, :].contiguous()
        init_c = self.init_c[:, :batch_size, :].contiguous()
        return (init_h, init_c)

    def forward(self, input: List[List[str]], init_hidden=None, **args) -> torch.Tensor:
        """
        Take a mini-batch of input sentences, compute the output hidden states.
        @param input: size([b, s_len])
        """
        init_hidden = init_hidden or self.get_init_hidden(len(input))   # the second dim must equal the batch size.

        sent_emb = self.get_embeddings(input)

        outputs, hidden = self.lstm(sent_emb, init_hidden)  # outputs (s_len, b, h)
        # if input[0] == '<s>':
        outputs = self.dropout(self.linear(outputs))   # outputs (s_len, b, vocab_size)
        return outputs, hidden

    def get_loss_on_batch(self, sent_batch: List[List[str]], **kwargs_dict1) -> torch.Tensor:
        """
        For training.
        Take a batch of samples [(List[str], List[str]), ...], compute the cross entropy loss.
        """
        input = [b[0] for b in sent_batch]
        target = [b[1] for b in sent_batch]

        outputs, _ = self(input)
        target_padded = self.vocab.to_input_tensor(target, device=self.device)   # (s_len, b)

        preds = outputs.reshape(outputs.size(0)*outputs.size(1), -1)
        targets = target_padded.reshape(-1)

        criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=self.vocab.pad_id)

        loss = criterion(preds, targets)
        return loss

    def get_eval_on_batch(self, sent_batch: List[List[str]], **func_kwargs) -> Dict:
        """
        For training, evaluate the model performance.
        Take a batch of samples [(List[str], List[str]), ...], return the mean perplexity.
        """
        loss = self.get_loss_on_batch(sent_batch).item()
        batch_info = dict(perplexity=2**loss)
        return batch_info

    """
    Chinese Poetry Generation
    (Note: these codes can be seperated from the model.)
    """


class TransformerModel(BasicModel):
    """
    Transformer-based language model
    - transformer decoder
    - softmax 
    """

    def __init__(self, setting, vocab, device):
        super(TransformerModel, self).__init__()
        self.batch_size = setting.train_batch_size
        self.embed_size = setting.embed_size
        self.hidden_size = setting.hidden_size
        self.layer_num = setting.layer_num
        self.dropout_rate = setting.dropout
        self.max_seq_len = setting.max_sent_len
        self.ff_size = setting.ff_size
        self.trainable_pe = setting.trainable_pe
        self.vocab = vocab
        self.device = device
        if setting.trainable_pe:
            self.rhythm_embedding_1 = nn.Parameter(torch.randn(1, self.embed_size), requires_grad=True)
            self.rhythm_embedding_2 = nn.Parameter(torch.randn(1, self.embed_size), requires_grad=True)
            self.seven_sentence_embedding = nn.Parameter(torch.randn(7, self.embed_size), requires_grad=True)
            self.five_sentence_embedding = nn.Parameter(torch.randn(5, self.embed_size), requires_grad=True)

        if setting.init_state_param:
            self.init_memory = nn.Parameter(torch.zeros(self.batch_size, self.max_seq_len, self.hidden_size).to(self.device))
        else:
            self.init_memory = Variable(torch.zeros(self.batch_size, self.max_seq_len, self.hidden_size).to(self.device))
        
        self.embedding = nn.Embedding(len(self.vocab), self.embed_size, padding_idx=vocab['<pad>'])
        self.position_embedding = PositionalEncoding(self.embed_size, dropout=self.dropout_rate, max_len=self.max_seq_len)
        self.transformer = make_transformer_decoder(self.layer_num, self.hidden_size, ff_size=self.ff_size, dropout=self.dropout_rate)

        self.linear = nn.Linear(self.hidden_size, len(self.vocab))

    # def get_embeddings(self, inputs: List[List[str]], start=0) -> torch.Tensor:
    #     sent_padded = self.vocab.to_input_tensor(inputs, device=self.device).T  # Tensor: (b, s_len)
    #     x_emb = self.embedding(sent_padded) 
    #     inputs_emb = self.position_embedding(x_emb, start)   # Tensor: (b, s_len, emb)
    #     return inputs_emb

    def get_init_memory(self, batch_size, seq_len):
        assert batch_size <= self.batch_size
        memory = self.init_memory[:batch_size, :seq_len, :].contiguous()
        return memory

    def forward(self, input: List[List[str]], init_memory=None, start=0) -> torch.Tensor:
        """
        Take a mini-batch of input sentences, compute the output hidden states.
        @param input: size([b, s_len])
        """
        init_memory = init_memory if init_memory is not None else self.get_init_memory(len(input), len(input[0]))
        sent_emb = self.get_embeddings(input, start)      # Tensor: (b, s_len, emb)

        tgt_mask = subsequent_mask(sent_emb.size(1)).to(self.device)
        outputs = self.transformer(sent_emb, init_memory, src_mask=None, tgt_mask=tgt_mask) 
        new_memory = outputs             # new_memory (b, s_len, h)
        outputs = self.linear(outputs)   # outputs (b, s_len, vocab_size)
        return outputs, new_memory

    def get_loss_on_batch(self, sent_batch: List[List[str]], **kwargs_dict1) -> torch.Tensor:
        """
        For training.
        Take a batch of samples [(List[str], List[str]), ...], compute the cross entropy loss.
        """
        input = [b[0] for b in sent_batch]
        target = [b[1] for b in sent_batch]

        outputs, _ = self(input)
        target_padded = self.vocab.to_input_tensor(target, device=self.device).T   # (b, s_len)

        preds = outputs.reshape(outputs.size(0)*outputs.size(1), -1)
        targets = target_padded.reshape(-1)

        criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=self.vocab.pad_id)

        loss = criterion(preds, targets)
        return loss

    def get_eval_on_batch(self, sent_batch: List[List[str]], **func_kwargs) -> Dict:
        """
        For training, evaluate the model performance.
        Take a batch of samples [(List[str], List[str]), ...], return the mean perplexity.
        """
        loss = self.get_loss_on_batch(sent_batch).item()
        batch_info = dict(perplexity=2**loss)
        return batch_info

     



if __name__ == '__main__':
    pass
