import torch.nn.functional as F
import copy
import torch
import math
import torch.nn as nn
from .input import *


def attenttion(query, key, value, mask=None, dropout=None):
    d_k = query.size(-1)
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
    if mask is not None:
        scores = scores.masked_fill(mask == 0, -1e9)
    p_attn = torch.softmax(scores, dim=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return torch.matmul(p_attn, value), p_attn


def clones(module_obj, obj_num):
    return nn.ModuleList([copy.deepcopy(module_obj) for _ in range(obj_num)])


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, head, dropout_p=0.1):
        assert d_model % head == 0
        super().__init__()
        self.d_k = d_model // head
        self.head = head
        self.dropout = nn.Dropout(dropout_p)
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attn = None

    def forward(self, query, key, value, mask=None):
        if mask is not None:
            mask = mask.unsqueeze(0)
        batch_size = query.size(0)
        new_query, new_key, new_value = [
            model(x).reshape(batch_size, -1, self.head, self.d_k).transpose(1, 2)
            for model, x in zip(self.linears, (query, key, value))
        ]
        data, self.attn = attenttion(new_query, new_key, new_value, mask, self.dropout)
        data = data.transpose(1, 2).reshape(batch_size, -1, self.head * self.d_k)
        return self.linears[-1](data)


class FeedForward(nn.Module):
    def __init__(self, d_model, d_ff, dropout_p=0.1):
        super().__init__()
        self.linear1 = nn.Linear(in_features=d_model, out_features=d_ff)
        self.linear2 = nn.Linear(in_features=d_ff, out_features=d_model)
        self.dropout = nn.Dropout(dropout_p)

    def forward(self, data):
        data = self.linear1(data)
        data = self.dropout(F.relu(data))
        return self.linear2(data)


class LayerNorm(nn.Module):
    def __init__(self, d_model):
        super().__init__()
        self.k = nn.Parameter(torch.ones(d_model))
        self.b = nn.Parameter(torch.zeros(d_model))
        self.eps = 1e-6

    def forward(self, data):
        mean = data.mean(-1, keepdim=True)
        std = data.std(-1, keepdim=True)
        return self.k * (data - mean) / (std + self.eps) + self.b
