import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init

class EmotionRecognitionModel(nn.Module):
    def __init__(self, input_size, num_classes):
        super(EmotionRecognitionModel, self).__init__()
        # 定义卷积层
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=1)


        # 定义全连接层
        self.fc1 = nn.Linear(64, num_classes)

    def forward(self, x):
        # 在第二维度上增加一个维度，变成(batch_size, input_size, sequence_length)
        if min(x.shape) == 0:
            return torch.tensor([1,0,0]).cuda()
        x = x.view(-1,1,1)
        x = self.conv1(x)
        x = (x-torch.mean(x))/torch.std(x)
        # 全连接层
        x = x.reshape(-1,64)
        x = self.fc1(x)
        # Softmax分类层
        x = torch.mean(x, dim=0)
        x = F.softmax(x, dim=0)
        return x

class AttentionModule(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(AttentionModule, self).__init__()
        self.fc = nn.Linear(input_size, hidden_size)
        self.attention = nn.Linear(hidden_size, 1)

    def forward(self, x):
        # x: input data
        x = x.view(-1,1)
        x = self.fc(x)
        attn_weights = self.attention(x)
        context = attn_weights * x
        context = (context-torch.mean(context))/torch.std(context)
        return torch.sum(context, dim=0)

class FeedForwardNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(FeedForwardNN, self).__init__()
        self.fc1 = nn.Linear(2, hidden_size)
        self.fc2 = nn.Linear(hidden_size, output_size)
        for i in range(1):
            exec("self.p"+str(i)+"= nn.PReLU()")

    def forward(self, x):
        x = x.view(-1,2)
        x = self.p0(self.fc1(x))
        x = (x-torch.mean(x))/torch.std(x)
        x = self.fc2(x)
        return x
class fowardNN(nn.Module):
    def __init__(self,hidden_size, output_size,attention_size):
        super(fowardNN, self).__init__()
        # Create model instances
        self.attention_module = AttentionModule(1, attention_size)
        self.ff_nn = FeedForwardNN(attention_size, hidden_size, output_size)
        self.mp = torch.nn.Parameter(torch.tensor([0.3,0.3,0.3]), requires_grad=True)
    def forward(self,input_data):
        # Create input data
        # Forward pass
        if min(input_data.shape) == 0:
            return torch.tensor([1, 0, 0]).cuda()
        context = self.attention_module(input_data)
        output = self.ff_nn(context) + torch.mean(input_data)
       #print(context, output)
        output = torch.mean(output, dim=0)
        output = F.softmax(output,dim=0)
        return output