import torch
from torch import nn
from utils.timeloader import getLoader
from utils.Save import SavingMethod
from params.CNN_ATTENTION_params import *
class MHAttention(nn.Module,):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3,bias = False):
        super(MHAttention, self).__init__()
        self.W_q = nn.Linear(input_dim,num_hiddens)
        self.W_k = nn.Linear(input_dim,num_hiddens)
        self.W_v = nn.Linear(input_dim,num_hiddens)
        self.attn = nn.MultiheadAttention(embed_dim= num_hiddens,num_heads=num_head,dropout=dropout,batch_first=True)
        self.W_o = nn.Linear(num_hiddens,input_dim)

    def forward(self,x):
        attn_output,_= self.attn(self.W_q(x),self.W_k(x),self.W_v(x))
        return self.W_o(attn_output)


class AttentionBlock(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3):
        super(AttentionBlock, self).__init__()
        self.attn = MHAttention(input_dim,num_hiddens,num_head,dropout)
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.fc = nn.Sequential(
            nn.Linear(input_dim,input_dim * 2),
            nn.ReLU(),
            nn.Linear(input_dim * 2, input_dim)
        )

    def forward(self, x):
        x_ = self.attn(x)
        x_ = self.norm1(x + x_)
        x = self.fc(x_)
        return self.norm2(x + x_)


class SENET(nn.Module):
    def __init__(self,channel,reduction = 4):
        super(SENET, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self,x):
        b,t,_ = x.size()
        y = self.avg_pool(x).view(b,t)
        y = self.fc(y).view(b,t,1)
        return x * y.expand_as(x)

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv1d(in_channels=1,out_channels=20,stride=4,kernel_size=17,padding=8),
            SENET(channel=20),
            nn.MaxPool1d(kernel_size=5,stride=2,padding=2),
            nn.ReLU(),
            nn.Conv1d(in_channels=20,out_channels=40,stride=2,kernel_size=5,padding=2),
            SENET(40),
            nn.MaxPool1d(kernel_size=5, stride=2, padding=2),
            nn.ReLU(),
            nn.Conv1d(in_channels=40, out_channels=1, stride=1, kernel_size=3, padding=1),
        )

    def forward(self,x):
        b = x.shape[0]
        c = x.shape[1]
        x = x.view(b * c, -1)
        x = x.unsqueeze(1)
        x = self.cnn(x)
        x = x.view(b,c,-1)
        return x


class MODEL(nn.Module):
    def __init__(self,input_size):
        super(MODEL, self).__init__()
        self.cnn = CNN()
        self.attn = nn.Sequential(
            AttentionBlock(64, 128,2),
            AttentionBlock(64, 128,2),
        )
        self.fc = nn.Linear(64,1)
        self.out = nn.Linear(30,1)
    def forward(self,x):
        x = self.cnn(x)
        x = self.attn(x)
        x = self.fc(x).squeeze(2)
        return self.out(x)

def main():
    loader1 = getLoader('./learn_files', batch_size=batch_size, bearing_condition=bearing_condition,
                        bearing_label=1, window=window, is_shuffle=False)
    loader2 = getLoader('./learn_files', batch_size=batch_size, bearing_condition=bearing_condition,
                        bearing_label=2, window=window, is_shuffle=False)
    test_loader = getLoader('./test_files', batch_size=batch_size, bearing_condition=bearing_condition,
                            bearing_label=5, window=window)

    model = MODEL(input_dim).to('cuda')

    optimizer = torch.optim.Adam(model.parameters(), lr=lr,weight_decay=0.001)
    loss_fun = torch.nn.L1Loss().to('cuda')

    Saving = SavingMethod(model_name=['./CONVAttn.py', './params/Attention_CNN_FC_params.py'])

    for epoch in range(epochs):
        total_loss = 0
        for i, bearing in enumerate([loader1, loader2]):
            for x, y, l in bearing:
                x, l = x.to('cuda'), l.to('cuda')
                l_pre = model(x)
                loss = loss_fun(l_pre, l)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                total_loss += loss
        print('epoch:{},bearing:{},loss:{}'.format(epoch, 5, total_loss))
        Saving.savetrain_loss([str(epoch), '1', '12', '', '', str(total_loss)])
        Saving.savefig(model, test_loader, epoch)
        if epoch % 10 == 0:
            input('please input')

if __name__ == '__main__':
    # model = MODEL(2048)
    # x = torch.randn(size=(16,30,2048))
    # y = model(x)
    # print(y.shape)
    main()