import sys
import numpy as np
import math
import torch
import torch.nn as nn
from nets.Decoder import Decoder
from nets.Encoder import Encoder
from nets.ActivationFunction import Squareplus, Softplus
from sseHp import Hyperparameter

hp_sse = Hyperparameter()  # 因为是类，所以需要创建实例


# 笑容对比实验
# LSTM,GRU,CNN,Linear

class myLSTM(nn.Module):
    def __init__(self):
        super(myLSTM, self).__init__()
        self.net = nn.LSTM(4, 1)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myGRU(nn.Module):
    def __init__(self):
        super(myGRU, self).__init__()
        self.net = nn.GRU(4, 1)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myBiLSTM(nn.Module):
    def __init__(self):
        super(myBiLSTM, self).__init__()
        self.net = nn.LSTM(4, 1, 2)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myBiGRU(nn.Module):
    def __init__(self):
        super(myBiGRU, self).__init__()
        self.net = nn.GRU(4, 1, 2)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myBiGRU_softmax(nn.Module):
    def __init__(self):
        super(myBiGRU_softmax, self).__init__()
        self.net = nn.GRU(4, 1, 2)
        self.activation = nn.Softmax()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myBiGRU_softplus(nn.Module):
    def __init__(self):
        super(myBiGRU_softplus, self).__init__()
        self.net = nn.GRU(4, 1, 2)
        self.activation = Softplus()

    def forward(self, input):
        pro, _ = self.net(input)
        pro = self.activation(pro)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myCNN(nn.Module):
    def __init__(self, d_model):
        super(myCNN, self).__init__()
        # self.net = nn.Conv1d(4, 1)
        self.src_emb = nn.Conv1d(1, d_model, 1)
        self.conv1 = nn.Conv2d(1, 8, 3)
        self.conv2 = nn.Conv2d(8, 4, 2, 2)
        self.conv3 = nn.Conv2d(4, 1, 1, 4)
        self.flatten = nn.Flatten()
        self.linear1 = nn.Linear(8, 1)
        # self.linear2 = nn.Linear(128, 64)
        # self.linear3 = nn.Linear(64, 32)
        # self.linear4 = nn.Linear(32, 16)
        # self.linear5 = nn.Linear(16, 1)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        conv1d_ = self.src_emb(input.unsqueeze(1))
        conv1d_ = self.activation(conv1d_)
        conv1d_ = conv1d_.unsqueeze(1)
        conv_1 = self.conv1(conv1d_)
        conv_1 = self.activation(conv_1)
        conv_2 = self.conv2(conv_1)
        conv_2 = self.activation(conv_2)
        conv_3 = self.conv3(conv_2)
        conv_3 = self.activation(conv_3)
        fl = self.flatten(conv_3)
        fl = self.linear1(fl)
        fl = self.activation(fl)
        # fl = self.linear2(fl)
        # fl = self.activation(fl)
        # fl = self.linear3(fl)
        # fl = self.activation(fl)
        # fl = self.linear4(fl)
        # fl = self.activation(fl)
        # fl = self.linear5(fl)
        pro = self.activation(fl)
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind


class myLinear(nn.Module):
    def __init__(self):
        super(myLinear, self).__init__()
        self.net1 = nn.Linear(4, 16)
        self.net2 = nn.Linear(16, 64)
        self.net3 = nn.Linear(64, 16)
        self.net4 = nn.Linear(16, 4)
        self.net5 = nn.Linear(4, 1)
        self.activation = nn.Sigmoid()

    def forward(self, input):
        pro = self.activation(self.net5(self.activation(
            self.net4(self.activation(self.net3(self.activation(self.net2(self.activation(self.net1(input))))))))))
        ind = torch.argsort(pro.detach().clone(), dim=0)
        return pro, ind
