
import torch.nn as nn 
import torch.nn.functional as F  
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch
from typing import List, Tuple, Dict

class LstmModel(nn.Module):
    """
    lstm模型
    """
    def __init__(self, embed_size: int, hidden_size: int, feature_size: int, device: torch.device):
        super(LstmModel, self).__init__()
        self.device = device
        self.embed_size = embed_size 
        self.hidden_size = hidden_size 

        self.embeddings = nn.Linear(feature_size, embed_size)## 通过一个线性层提取特征。
        self.bn = nn.BatchNorm1d(25)
        self.encoder = nn.LSTM(embed_size, hidden_size, bidirectional=True, bias=True)
        self.middle_layer = nn.Linear(2 * hidden_size, hidden_size, bias=True) ## 整合双向特征
        self.out_layer = nn.Linear(hidden_size, 3, bias=True)
        self.drop_out = nn.Dropout(0.3)
        
    def forward(self, source, target=None):
        # source_length = [len(s) for s in source]
        # source_padded = self.vocab.to_input_tensor(source, self.device)
        # target = torch.tensor(target, dtype=torch.long, device=self.device)
        source = source.to(self.device)
        # source = source.permute(0, 2, 1)

        embed_out = self.embeddings(source)
        embed_out = F.relu(embed_out)
        # embed_out = self.drop_out(embed_out)
        # embed_out = self.bn(embed_out)

        # embed_out = self.bn(embed_out)
        embed_out = embed_out.permute(1, 0, 2)
        # print(embed_out.shape)
        # X = pack_padded_sequence(embed_out, source_length)
        enc_hiddens, (last_hidden, last_cell) = self.encoder(embed_out)
        # print(last_hidden.shape)
        # enc_hiddens, _ = pad_packed_sequence(enc_hiddens, padding_value=self.vocab.word2id["<pad>"])
        ## features:(batch_size, 2*hidden_size)
        features = torch.cat((last_hidden[1], last_hidden[0]), dim=1)
        middle_out = F.relu(self.middle_layer(features))
        # middle_out = self.drop_out(middle_out)
        ## out: (batch_size, 2)
        out = self.out_layer(middle_out)
        # print(out.shape)
        # print(target.shape)
        if target is not None:
            target = target.to(self.device)
            target = target.view(-1)
            # print(target.shape)
            # print(target)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return out, loss_func(out, target)
        else :
            return out

class SimpleClassifier(nn.Module):
    """
    模型
    """
    def __init__(self, embed_size: int, hidden_size: int, feature_size: int, device: torch.device):
        super(SimpleClassifier, self).__init__()
        self.device = device
        
        self.embed_size = embed_size
        self.hidden_size = hidden_size 

        self.embeddings_1 = nn.Linear(feature_size, embed_size)## 通过一个线性层提取特征。
        self.drop_out = nn.Dropout(0.5)
        self.embeddings_2 = nn.Linear(embed_size, hidden_size)
        self.out_layer = nn.Linear(hidden_size, 2, bias=True)
        self.relu = nn.ReLU()

    def forward(self, source, target=None):
        
        embed_out = self.embeddings_1(source)
        embed_out_relu = self.relu(embed_out)
        # embed_out_relu_drop = self.drop_out(embed_out_relu)
        embed_out_2 = self.embeddings_2(embed_out_relu)
        embed_out_2_relu = self.relu(embed_out_2)
        
        out = self.out_layer(embed_out_2_relu)
        # print(out.shape)
       
        if target is not None:
            target = target.view(-1)
            # print(target.shape)
            # print(target)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return loss_func(out, target)
        else :
            return out
    
   
class CLSTM(nn.Module):
    """
    lstm模型
    """
    def __init__(self, hidden_size: int, device: torch.device):
        super(CLSTM, self).__init__()
        self.device = device
        self.hidden_size = hidden_size 
        self.conv_1 = nn.Conv2d(in_channels=1, out_channels=40, kernel_size=(1, 5))
        self.max_pool_1 = nn.MaxPool2d(kernel_size=(1, 2))
    
        self.conv_2 = nn.Conv2d(in_channels=40, out_channels=40, kernel_size=(1,50))
 
        self.max_pool_2 = nn.MaxPool2d(kernel_size=(1,20))
        feature_size = 40 * 59 # 40个卷积核，59通道
        # self.embeddings = nn.Linear(feature_size, embed_size)## 通过一个线性层提取特征。
        self.encoder = nn.LSTM(feature_size, hidden_size, bidirectional=False, bias=True)
        self.middle_layer = nn.Linear(hidden_size, hidden_size, bias=True) ## 整合特征
        self.out_layer = nn.Linear(hidden_size, 2, bias=True)
        
    def forward(self, source, target=None):
        source = source.to(self.device)
        batch_size = source.shape[0]
        time_step = source.shape[1]
        channel_num = source.shape[2]
        source = source.view((batch_size,channel_num, time_step))
        source = source.unsqueeze(1) ## (batch_size, 1, channel_num, time_step)

        conv_1_out = self.conv_1(source)
        max_pool_1_out = self.max_pool_1(conv_1_out)
        conv_2_out = self.conv_2(max_pool_1_out)
        max_pool_2_out = self.max_pool_2(conv_2_out)
        max_pool_2_out_reshape = max_pool_2_out.view((batch_size, -1, max_pool_2_out.shape[-1]))
        time_step = max_pool_2_out_reshape.shape[2]
        features_size = max_pool_2_out_reshape.shape[1]
        max_pool_2_out_reshape = max_pool_2_out_reshape.view((time_step, batch_size, features_size))
        # embed_out = self.embeddings(source)
        # print(embed_out.shape)
        # X = pack_padded_sequence(embed_out, source_length)
        enc_hiddens, (last_hidden, last_cell) = self.encoder(max_pool_2_out_reshape)
        # print(last_hidden.shape)
        # enc_hiddens, _ = pad_packed_sequence(enc_hiddens, padding_value=self.vocab.word2id["<pad>"])
        ## features:(batch_size, hidden_size)
        features = last_hidden[0]
        middle_out = F.relu(self.middle_layer(features))
        ## out: (batch_size, 2)
        out = self.out_layer(middle_out)
        # print(out.shape)
        
        if target is not None:
            target = target.to(self.device)
            target = target.view(-1)
            # print(target.shape)
            # print(target)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return loss_func(out, target)
        else :
            return out
       

class CNN(nn.Module):
    """
    cnn模型
    """
    def __init__(self, device="cpu"):
        super(CNN, self).__init__()
        self.device = device
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=(5, 5)),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )
 
        self.layer2 = nn.Sequential(
            nn.MaxPool2d(kernel_size=(2, 2)),
            nn.Dropout(0.5)
        )
 
        self.layer3 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=(3, 3)),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )
 
        self.layer4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=(2, 2))
        )

        self.layer5 = nn.Sequential(
            nn.Conv2d(128, 64, kernel_size=(3, 3)),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )

        self.layer6 = nn.Sequential(
            nn.MaxPool2d(kernel_size=(2, 2))
        )

        self.layer7 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=(3, 3)),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=(2, 2)),
            nn.Dropout(0.5)
        )
 
        self.fc = nn.Sequential(
            nn.Linear(128*1*21, 128),
            nn.ReLU(inplace=True),
            nn.Linear(128, 64), 
            nn.ReLU(inplace=True),
            nn.Linear(64, 2)
        )
    

    def forward(self, x, labels=None):
        x = x.to(self.device)
        x = x.permute(0, 2, 1)
        x = torch.unsqueeze(x, 1)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = self.layer6(x)
        x = self.layer7(x)
        batch = x.shape[0]
        x = x.view(batch, -1)
        x = self.fc(x)
        if labels is not None:
            labels = labels.to(self.device)
            # print(labels.shape)
            # print(x.shape)
            labels = labels.view(-1)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return x, loss_func(x, labels)

        return x

class Cnn2D(nn.Module): 
    def __init__(self, device="cpu"):
        super(Cnn2D, self).__init__()   
        self.device = device     
        self.conv1 = nn.Conv2d(1, 64, kernel_size=5)
        self.relu = nn.ReLU(inplace=True)
        self.batch_norm1 = nn.BatchNorm2d(64)
        self.dropout = nn.Dropout(0.5)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(64, 128, kernel_size=3)
        # self.batch_norm2 = nn.BatchNorm2d(128)
        # self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(128, 64, kernel_size=3)
        # self.batch_norm3 = nn.BatchNorm2d(64)
        # self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv4 = nn.Conv2d(64, 128, kernel_size=3)
        # self.batch_norm4 = nn.BatchNorm2d(128)
        # self.dropout = nn.Dropout(0.5)
        self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(128*18,128)
        self.fc2 = nn.Linear(128,3)
        
    def forward(self, x, labels=None):
        x = x.to(self.device)
        x = x.permute(0, 2, 1)
        x = x.unsqueeze(1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.batch_norm1(x)
        # x = self.dropout(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        x = self.relu(x)
        # x = self.batch_norm2(x)
        x = self.maxpool(x)

        x = self.conv3(x)
        x = self.relu(x)
        # x = self.batch_norm3(x)
        x = self.maxpool(x)
        x = self.conv4(x)
        x = self.relu(x)
        
        # x = self.batch_norm4(x)
        x = self.maxpool(x)      
        # print(x.shape)
        x=x.view(x.size()[0],-1)
        
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)

        if labels is not None:
            labels = labels.to(self.device)
            labels = labels.view(-1)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return x, loss_func(x, labels)

        return x

class Cnn2DDouble(nn.Module): 
    def __init__(self, device="cpu"):
        super(Cnn2DDouble, self).__init__()   
        self.device = device
        self.cnn_model1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=5),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,3)),
            nn.Conv2d(64, 128, kernel_size=3),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),
            nn.Conv2d(128, 64, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(1,2)),
            nn.Conv2d(64, 128, kernel_size=3),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(1,1)),

            nn.Conv2d(128, 64, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=1),
        )
        self.fc1 = nn.Sequential(
            nn.Linear(64 * 4 * 20, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(128, 32),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5)
        )

        self.cnn_model2 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=5),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,3)),
            nn.Conv2d(64, 128, kernel_size=3),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),
            nn.Conv2d(128, 64, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            
            nn.MaxPool2d(kernel_size=2, stride=(1,2)),
            nn.Conv2d(64, 128, kernel_size=3),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(1,1)),

            nn.Conv2d(128, 64, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=1),
        )
        self.fc2 = nn.Sequential(
            nn.Linear(64 * 4 * 20, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(128, 32),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
        )
        
        self.fc3 = nn.Linear(64, 3)

    def forward(self, x, labels=None):
        ## x: [x1, x2]
        x1 = x[0]
        x2 = x[1]
        x1 = x1.to(self.device)
        x2 = x2.to(self.device)
        x1 = x1.permute(0, 2, 1)
        x2 = x2.permute(0, 2, 1)
        x1 = x1.unsqueeze(1)
        x2 = x2.unsqueeze(1)
        
        x1 = self.cnn_model1(x1)
        x2 = self.cnn_model2(x2)


        x1 = x1.view(x1.shape[0], -1)
        x2 = x2.view(x2.shape[0], -1)

        x1 = self.fc1(x1)
        x2 = self.fc2(x2)
        # x = x1 + x2 
        x = torch.cat((x1, x2), dim=-1)
        x = self.fc3(x)

        if labels is not None:
            labels = labels.to(self.device)
            labels = labels.view(-1)
            # print(labels[0])
            # print(x[0])
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return x, loss_func(x, labels)

        return x


class CNN3D(nn.Module):

    def __init__(self, device="cpu"):
        super(CNN3D, self).__init__()
        self.device = device
        self.conv1 = nn.Conv3d(1, 64, kernel_size=(1, 5, 5))
        self.relu = nn.ReLU(inplace=True)
        # self.batch_norm1 = nn.BatchNorm2d(64)
        self.dropout = nn.Dropout(0.5)
        self.maxpool = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 1, 1))
        self.conv2 = nn.Conv3d(64, 128, kernel_size=(2, 3, 3))
        self.maxpool2 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 1, 1))
        # self.batch_norm2 = nn.BatchNorm2d(128)
        # self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv3d(128, 64, kernel_size=(1, 3, 3))
        self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(1, 1, 1))
        # self.batch_norm3 = nn.BatchNorm2d(64)
        # self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv4 = nn.Conv3d(64, 128, kernel_size=(1, 3, 3))
        # self.batch_norm4 = nn.BatchNorm2d(128)
        # self.dropout = nn.Dropout(0.5)
        self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=2)
        self.fc1 = nn.Linear(128*4*24*43,128)
        self.fc2 = nn.Linear(128,3)
    
    def forward(self, x, labels=None):
        ## x : (batch_size, time, channel)
        batch_size, time, channel = x.shape
        x = x.to(self.device)
        # x = x.permute(0, 2, 1)
        x = x.view((batch_size, 10, channel, int(time / 10)))
        # print(x.shape)
        x = x.unsqueeze(1)
        x = self.conv1(x)
        x = self.relu(x)
        # x = self.batch_norm1(x)
        # x = self.dropout(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        x = self.relu(x)
        # x = self.batch_norm2(x)
        x = self.maxpool2(x)

        x = self.conv3(x)
        x = self.relu(x)
        # x = self.batch_norm3(x)
        x = self.maxpool3(x)
        x = self.conv4(x)
        x = self.relu(x)
        
        # x = self.batch_norm4(x)
        x = self.maxpool4(x)      
        # print(x.shape)
        x=x.view(x.size()[0],-1)
        
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)

        if labels is not None:
            labels = labels.to(self.device)
            labels = labels.view(-1)
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return x, loss_func(x, labels)

        return x

class Cnn2DLarge(nn.Module): 
    def __init__(self, device="cpu"):
        super(Cnn2DLarge, self).__init__()   
        self.device = device
        self.cnn_model1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=(5, 5)),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),

            nn.Conv2d(64, 128, kernel_size=(2, 3)),
            # nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),

            nn.Conv2d(128, 64, kernel_size=(2, 3)),
            # nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),

            nn.Conv2d(64, 128, kernel_size=(2, 3)),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=(2,2)),
        )
        self.fc1 = nn.Sequential(
            nn.Linear(128 * 2 * 18, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(128, 3),
        )

    def forward(self, x, labels=None):
        ## x: [x1, x2]
        x1 = x

        x1 = x1.to(self.device)
        
        x1 = x1.permute(0, 2, 1)
        
        x1 = x1.unsqueeze(1)
        
        x1 = self.cnn_model1(x1)

        # print(x1.shape)

        x1 = x1.view(x1.shape[0], -1)

        x1 = self.fc1(x1)

        if labels is not None:
            labels = labels.to(self.device)
            labels = labels.view(-1)
            # print(labels[0])
            # print(x[0])
            loss_func = nn.CrossEntropyLoss(reduction="sum")
            return x1, loss_func(x1, labels)

        return x1