import math
import random

import torch
import torch.nn as nn
import torch.nn.functional as F

from torch.autograd import Variable


class Encoder(nn.Module):
    
    def __init__(self, channel_in, channel_out, hidden_size, kernel_size, strides, num_tokens, n_layers=1, dropout=0.5):
        super().__init__()

        self.hidden_size = hidden_size
        self.dropout = dropout
        
        if num_tokens:
            self.token_in_count = True
            self.token_embedding = nn.Embedding(num_tokens, channel_in)
        
        self.cnn = nn.Sequential(
            nn.Conv1d(channel_in, channel_out, kernel_size, strides),
            nn.PReLU()
        )
        if n_layers == 1:
            dropout = 0
       
        self.gru = nn.GRU(channel_out, self.hidden_size, n_layers, dropout=dropout, 
                          bidirectional=True, batch_first=True)
        self.init_params()
    
    def init_params(self):
        nn.init.uniform_(self.gru.bias_ih_l0[self.hidden_size: 2 * self.hidden_size])
        nn.init.uniform_(self.gru.bias_hh_l0[self.hidden_size:2 * self.hidden_size])
        nn.init.uniform_(self.gru.bias_ih_l0_reverse[self.hidden_size:2 * self.hidden_size])
        nn.init.uniform_(self.gru.bias_hh_l0_reverse[self.hidden_size: 2 * self.hidden_size])
    
    def forward(self, x, tokens, hidden=None):
        b, t, k = x.size()
        if tokens is not None:
            assert tokens.size(0) == b
            tok_emb = self.token_embedding(tokens)
            tok_emb = tok_emb.expand(t, b, k).transpose(0, 1)
            x = tok_emb + x
        
        x = x.permute(0, 2, 1) # batch_size * feature_size * seq_in
        x = self.cnn(x)
        
        x = x.permute(0, 2, 1).contiguous() # batch_size * seq_out * feature_size_ 
        outputs, hidden = self.gru(x, hidden)
        
        outputs = (outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:])
        return outputs, hidden
    

class Attention(nn.Module):
    
    def __init__(self, attn_dim):
        super().__init__()
        self.attn_dim = attn_dim
        self.attn = nn.Sequential(
            nn.Linear(attn_dim * 2, attn_dim),
            nn.ReLU()
        )
        self.v = nn.Parameter(torch.rand(attn_dim))
        self.init_params()
    
    def init_params(self):
        stdv = 1. / math.sqrt(self.v.size(0))
        self.v.data.uniform_(-stdv, stdv)
    
    def forward(self, hidden, encoder_outputs):
        timestep = encoder_outputs.size(1)
        h = hidden.repeat(1, timestep, 1)
        
        attn_energy = self.score(h, encoder_outputs)
        return F.softmax(attn_energy, dim=1).unsqueeze(1)
    
    def score(self, hidden, encoder_outputs):
        energy = self.attn(torch.cat([hidden, encoder_outputs], 2))
        v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1)
        energy = energy.permute(0, 2, 1)
        energy = torch.bmm(v, energy)
        return energy.squeeze(1)
    
    
class Decoder(nn.Module):
    
    def __init__(self, hidden_size, output_size, n_layers=1, dropout=0.2):
        super().__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        if self.n_layers == 1:
            dropout = 0
        
        self.attention = Attention(hidden_size)
        self.gru = nn.GRU(hidden_size + output_size, hidden_size, n_layers, 
                          dropout=dropout, batch_first=True)
        self.out = nn.Linear(hidden_size * 2, output_size)
        self.dropout = dropout
        
    def forward(self, input, last_hidden, encoder_outputs):
        embeded = input.unsqueeze(0)
        
        attn_weights = self.attention(last_hidden[-1], encoder_outputs)
        context = attn_weights.bmm(encoder_outputs)
        
        rnn_input = torch.cat([embeded, context], dim=2)
        output, hidden = self.gru(rnn_input, last_hidden)
        output = output.squeeze(1)
        context = context.squeeze(1)
        output = self.out(torch.cat([output, context], 1))
        return output, hidden
        

class Seq2Seq(nn.Module):
    
    def __init__(self, encoder, decoder, teacher_forcing_ratio=0.5):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.teacher_forcing_ratio = teacher_forcing_ratio
    
    def forward(self, src, target, tokens, teacher_forcing_ratio=None):
        batch_size = src.size(0)
        max_len = target.size(1)
        vocab_size = self.decoder.output_size
        
        outputs = Variable(torch.zeros(batch_size, max_len, vocab_size))
        
        encoder_output, hidden = self.encoder(src, tokens)
       
        hidden = hidden[:self.decoder.n_layers]
        if teacher_forcing_ratio == None:
            teacher_forcing_ratio = self.teacher_forcing_ratio
        is_teacher = random.random() < teacher_forcing_ratio
        output = Variable(target.data[:, 0, :] if is_teacher else outputs[:, 0, :])
        for t in range(1, max_len):
            output, hidden = self.decoder(output, hidden, encoder_output)
            outputs[:, t, :] = output
            is_teacher = random.random() < teacher_forcing_ratio
            output = Variable(target.data[:, t, :] if is_teacher else output)
        
        return outputs
        
