import torch
from torch import nn
from params.Attention_CNN_FC_params import *
from utils.timeloader import getLoader
import csv
class SENET(nn.Module):
    def __init__(self,channel,reduction = 4):
        super(SENET, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self,x):
        b,t,_ = x.size()
        y = self.avg_pool(x).view(b,t)
        y = self.fc(y).view(b,t,1)
        return x * y.expand_as(x)


class MHAttention(nn.Module,):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3,bias = False):
        super(MHAttention, self).__init__()
        self.W_q = nn.Linear(input_dim,num_hiddens)
        self.W_k = nn.Linear(input_dim,num_hiddens)
        self.W_v = nn.Linear(input_dim,num_hiddens)
        self.attn = nn.MultiheadAttention(embed_dim= num_hiddens,num_heads=num_head,dropout=dropout,batch_first=True)
        self.W_o = nn.Linear(num_hiddens,input_dim)

    def forward(self,x):
        attn_output,_= self.attn(self.W_q(x),self.W_k(x),self.W_v(x))
        return self.W_o(attn_output)


class AttentionBlock(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3):
        super(AttentionBlock, self).__init__()
        self.attn = MHAttention(input_dim,num_hiddens,num_head,dropout)
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.fc = nn.Sequential(
            nn.Linear(input_dim,input_dim * 2),
            nn.ReLU(),
            nn.Linear(input_dim * 2, input_dim)
        )

    def forward(self, x):
        x_ = self.attn(x)
        x_ = self.norm1(x + x_)
        x = self.fc(x_)
        return self.norm2(x + x_)


class MODEL(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,window,dropout = 0.3):
        super(MODEL, self).__init__()
        self.se = SENET(channel=window)
        self.attn = nn.Sequential(
            AttentionBlock(input_dim,num_hiddens,num_head)
        )
        self.cnn = nn.Sequential(
            nn.Conv1d(in_channels=window,out_channels=window * 2,kernel_size=7,stride=2,padding=3 ),
            nn.ReLU(),
            nn.BatchNorm1d(num_features=window * 2),
            nn.MaxPool1d(kernel_size=3,stride=1,padding=1),
            nn.Conv1d(in_channels=window * 2, out_channels=window,kernel_size=5,stride=2,padding=2),
            nn.ReLU(),
            nn.BatchNorm1d(num_features=window),
            nn.MaxPool1d(kernel_size=3,stride=1,padding=1),
            nn.Conv1d(in_channels=window, out_channels=3, kernel_size=3, stride=2, padding=1),
            nn.ReLU(),
            nn.BatchNorm1d(3),
        )
        self.fc = nn.Sequential(
            nn.Linear(768,256),
            nn.LeakyReLU(),
            nn.Linear(256,64),
            nn.LeakyReLU(),
            nn.Linear(64,1)
        )

    def forward(self,x):
        x= self.attn(x) + self.se(x)
        x = self.cnn(x)
        x = x.contiguous().view(x.shape[0],-1)
        x = self.fc(x)
        return x

def save(los,path):
    with open(path,'w',newline='') as f:
        writer = csv.writer(f)
        writer.writerows(los)

def test(model,test_loss,test_loader):
    test_l = 0
    for x, y, l in test_loader:
        x, l = x.to('cuda'), l.to('cuda')
        l_pre = model(x)
        delta = loss_fun(l_pre, l)
        test_l += delta
    test_loss.append([test_l])

if __name__ == '__main__':
    loader1 = getLoader('./learn_files', batch_size=batch_size, bearing_condition=bearing_condition, bearing_label=1,
                        window=window)
    loader2 = getLoader('./learn_files', batch_size=batch_size, bearing_condition=bearing_condition, bearing_label=2,
                        window=window)
    test_loader = getLoader('./test_files', batch_size=batch_size, bearing_condition=bearing_condition, bearing_label=3,
                            window=window)

    model = MODEL(input_dim, num_hiddens, num_head, window).to('cuda')

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    loss_fun = torch.nn.L1Loss().to('cuda')

    train_loss = []
    test_loss = []
    for epoch in range(100):
        if epoch % 10 == 0:
            save(test_loss,'D://img/testloss.csv')
            save(train_loss,'D://img/trainloss.csv')
            input('请输入以开启下面十个epoch')
            print('start')
        total_loss = 0
        for i, bearing in enumerate([loader1, loader2]):
            for x, y, l in bearing:
                x, l = x.to('cuda'), l.to('cuda')
                l_pre = model(x)
                loss = loss_fun(l_pre, l)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                total_loss += loss
                del loss,x,l,l_pre
                torch.cuda.empty_cache()
        print('epoch:{},bearing:{},loss:{}'.format(epoch, 5, total_loss))
        train_loss.append([total_loss])
        test(model,test_loss,test_loader)
    save(test_loss, 'D://img/testloss.csv')
    save(train_loss, 'D://img/trainloss.csv')