import torch
import torch.nn as nn
import torch.nn.functional as F




class SelfAttentionConv(nn.Module):
    
    def __init__(self, k, headers=8, kernel_size=5, mask_next=True, mask_diag=False):
        super().__init__()
        
        self.k, self.headers, self.kernel_size = k, headers, kernel_size
        self.mask_next = mask_next
        self.mask_diag = mask_diag
        
        h = headers
        
        padding = (kernel_size - 1) 
        self.padding_operator = nn.ConstantPad1d((padding, 0), 0)
        
        self.toqueries = nn.Conv1d(k, k*h, kernel_size, padding=0, bias=True)
        self.tokeys = nn.Conv1d(k, k*h, kernel_size, padding=0, bias=True)
        self.tovalues = nn.Conv1d(k, k*h, kernel_size= 1, padding=0, bias=False)
        
        self.unifyheads = nn.Linear(k*h, k)
        
    
    def forward(self, x):
        
        b, t, k = x.size()
        h = self.headers
        assert self.k == k,  'Number of time series '+str(k) +' didn t much the number of k '+str(self.k)+' in the initiaalization of the attention layer.'
        
        x = x.transpose(1, 2)
        
        x_padded = self.padding_operator(x)
        
        queries = self.toqueries(x_padded).view(b, k, h, t)
        keys = self.tokeys(x_padded).view(b, k, h, t)
        values = self.tovalues(x).view(b, k, h, t)
        
        queries = queries.transpose(1, 2)
        queries = queries.transpose(2, 3)
        
        values = values.transpose(1, 2)
        values = values.transpose(2, 3)
        
        keys = keys.transpose(1, 2)
        keys = keys.transpose(2, 3)
        
        mod = k ** 0.25
        queries = queries / mod
        keys = keys / mod
        
        queries = queries.transpose(1, 2).contiguous().view(b*h, t, k)
        keys = keys.transpose(1, 2).contiguous().view(b*h, t, k)
        values = values.transpose(1, 2).contiguous().view(b*h, t, k)
    
        weights = torch.bmm(queries, keys.transpose(1, 2))
        
        
        if self.mask_next:
            if self.mask_diag:
                indices = torch.triu_indices(t, t, offset=0)
                weights[:, indices[0], indices[1]] = float("-inf")
            else:
                indices = torch.triu_indices(t, t, offset=1)
                weights[:, indices[0], indices[1]] = float("-inf")
        
        weights = F.softmax(weights, dim=2)
        
        output = torch.bmm(weights, values)
        output = output.view(b, h, t, k)
        output = output.transpose(1, 2).contiguous().view(b, t, k*h)
        return self.unifyheads(output) # shape(b, t, k)
    
    
class ConvTransformerBlock(nn.Module):
    
    def __init__(self, k, headers, kernel_size=5, mask_next=True, mask_diag=False, dropout_proba=0.2):
        super().__init__()
        
        self.attention = SelfAttentionConv(k, headers, kernel_size, mask_next, mask_diag)
        
        self.norm1 = nn.LayerNorm(k)
        self.norm2 = nn.LayerNorm(k)
        
        self.feedforward = nn.Sequential(
            nn.Linear(k, 4*k),
            nn.ReLU(),
            nn.Linear(4*k, k)
            )
        self.dropout = nn.Dropout(p=dropout_proba)
        self.activation = nn.ReLU()
        
    def forward(self, x, train=False):
        x = self.attention(x) + x
        
        if train:
            x = self.dropout(x)
            
        x = self.norm1(x)
        
        x = self.feedforward(x) + x
        
        x = self.norm2(x)
        
        return x
    
    
class TransformerBlock(nn.Module):
    
    
    def __init__(self, hidden_size, headers, dropout_prob):
        super().__init__()
        self.attn = nn.MultiheadAttention(hidden_size, headers, dropout=dropout_prob)
        

class ForcastConvTransformer(nn.Module):
    
    def __init__(self, input_size, k, headers, depth, seq_length, hidden_size, kernel_size=5, 
                 mask_next=True, mask_diag=False, dropout_prob=0.2, 
                 num_tokens=None):
        super().__init__()
        
        self.token_in_count = False
        # if num_tokens:
        #     self.token_in_count = True
        #     self.token_embedding = nn.Embedding(num_tokens, k)
        
        self.position_embedding = nn.Embedding(seq_length, k)
        self.k = k
        self.seq_length = seq_length
        self.linear = nn.Sequential(
            nn.Linear(input_size, k), 
            nn.ReLU())
        tblocks = []
        for _ in range(depth):
            tblocks.append(ConvTransformerBlock(self.k, headers, kernel_size, mask_next, 
                                                mask_diag, dropout_prob))
        self.transformer_blocks = tblocks
        self.hidden_size = hidden_size
        self.gru = nn.GRU(self.k, hidden_size, 2, dropout=dropout_prob, bidirectional=True, batch_first=True)
        self.out = nn.Linear(hidden_size, 1)
        
    def forward(self, x, tokens = None, train=False):
        b, t, h = x.size()
        k = self.k
        
        x = x.view(b*t, h)
        x = self.linear(x)
        x = x.reshape(b, t, k)

        pos = torch.arange(t)
        self.pos_emb = self.position_embedding(pos).expand(b, t, k)

        assert self.token_in_count == (not (tokens is None))
        
        if tokens is not None:
            assert tokens.size(0) == b 
            self.tok_emb = self.token_embedding(tokens)
            self.tok_emb = self.tok_emb.expand(t, b, k).transpose(0, 1)
        if tokens is not None:
            x = self.pos_emb + self.tok_emb + x
        else:
            x = self.pos_emb + x
        
        for block_fn in self.transformer_blocks:
            x = block_fn(x, train)
        
        outputs, hidden = self.gru(x)
        output = outputs[:, -1, :]
        output = output[:, :self.hidden_size] + output[:, self.hidden_size:]
        output = self.out(output)
        return output
        
        
