# -*- coding: utf-8 -*-


import torch
import torch.nn as nn
import numpy as np
from typing import List


class CNNLayer(nn.Module):

    def __init__(self, in_channel, out_channel, kernel_size, emb_dim, stride, max_len):
        super(CNNLayer, self).__init__()
        self.conv = nn.Conv2d(in_channel, out_channel, (kernel_size, emb_dim), (stride, stride))
        self.pool = nn.MaxPool1d(max_len-kernel_size+1, 1)
        self.relu = nn.ReLU()

    def forward(self, x):
        out = self.conv(x).squeeze_(3)
        out = self.pool(out).squeeze_(2)
        out = self.relu(out)
        return out


class TextCNN(nn.Module):

    def __init__(self,
                 embedding_module: nn.Module,
                 tgt_size: int,
                 emb_dim: int,
                 filter_sizes: List,
                 filter_nums: int,
                 max_len: int = 32,
                 pad_idx: int = 0,
                 keep_prob: int = 0.5,
                 use_cuda: bool = True):
        super(TextCNN, self).__init__()
        self.device = torch.device("cuda:0" if use_cuda and torch.cuda.is_available() else "cpu:0")
        self.filter_sizes = filter_sizes
        self.filter_nums = filter_nums
        self.embeddings = embedding_module
        self.pe = self.positional_encoding(max_len, emb_dim, self.device)
        self.layerNorm = nn.LayerNorm(emb_dim)
        self.convs = nn.ModuleList([CNNLayer(1, filter_nums, kernel_size, emb_dim, 1, max_len) for kernel_size in filter_sizes])
        self.linear = nn.Linear(filter_nums * len(filter_sizes), tgt_size)
        self.dropout = nn.Dropout(keep_prob)
        self.to(self.device)

    @staticmethod
    def positional_encoding(max_len, emb_dim, device):
        pe = torch.LongTensor(max_len, emb_dim)
        pos = torch.arange(0, max_len, 1.0).unsqueeze_(1)
        k = torch.exp(-np.log(10000) * torch.arange(0, emb_dim, 2.) / emb_dim)
        pe[:, 0::2] = torch.sin(pos * k)
        pe[:, 1::2] = torch.cos(pos * k)
        return pe.to(device)

    def forward(self, entities: torch.LongTensor, tokens: torch.LongTensor, masks: torch.ByteTensor = None):
        emb = self.embeddings(entities, tokens, masks)
        expand_emb = emb.unsqueeze_(1)

        pooled_output = [conv(expand_emb) for conv in self.convs]
        out = torch.cat(pooled_output, dim=1).view(-1, self.filter_nums * len(self.filter_sizes))
        out = self.dropout(out)
        out = self.linear(out)
        return out


