import torch
from torch import nn
from utils.loader import getLoader
from utils.Save import SavingMethod


class WDCNN(nn.Module):
    def __init__(self, in_channel=1, out_channel=10):
        super(WDCNN, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv1d(in_channel, 16, kernel_size=8,stride=4,padding=4),
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True),
        )
        self.pol =    nn.MaxPool1d(kernel_size=2,stride=2)


        self.layer2 = nn.Sequential(
            nn.Conv1d(16, 32, kernel_size=3,padding=1),
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2))

        self.layer3 = nn.Sequential(
            nn.Conv1d(32, 64, kernel_size=3,padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2)
        )  # 32, 12,12     (24-2) /2 +1

        self.layer4 = nn.Sequential(
            nn.Conv1d(64, 64, kernel_size=3,padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2)
        )  # 32, 12,12     (24-2) /2 +1

        self.layer5 = nn.Sequential(
            nn.Conv1d(64, 64, kernel_size=3),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2)
            # nn.AdaptiveMaxPool1d(4)
        )  # 32, 12,12     (24-2) /2 +1

        self.fc=nn.Sequential(
            nn.Flatten(),
            nn.Linear(1216,256),
            nn.ReLU(inplace=True),
            nn.Linear(256, 100),
            nn.ReLU(inplace=True),
            nn.Linear(100, out_channel)
        )


    def forward(self, x):
        x = x.unsqueeze(1)
        x = self.layer1(x) #[16 64]
        x = self.pol(x)  # [16 64]
        x = self.layer2(x)  #[32 124]
        x = self.layer3(x)#[64 61]
        x = self.layer4(x)#[64 29]
        x = self.layer5(x)#[64 13]
        x = self.fc(x)
        return x

def main():
    bearing_condition = 1
    batch_size = 64
    epochs = 300
    lr = 1e-3
    loader1 = getLoader('../baseline/LSTM/learn_files', batch_size=batch_size,
                        bearing_condition=bearing_condition, bearing_label=1)
    loader2 = getLoader('../baseline/LSTM/learn_files', batch_size=batch_size,
                        bearing_condition=bearing_condition, bearing_label=2)
    test_loader = getLoader('../baseline/LSTM/test_files', batch_size=batch_size,
                            bearing_condition=bearing_condition, bearing_label=7)

    model = WDCNN(out_channel=1).to('cuda')

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    loss_fun = torch.nn.L1Loss().to('cuda')

    Saving = SavingMethod(
        model_name=['./WDCNN.py'])

    for epoch in range(epochs):
        total_loss = 0
        for i, bearing in enumerate([loader1, loader2]):
            for x, y, l in bearing:
                x, l = x.to('cuda'), l.to('cuda')
                l_pre = model(x)
                loss = loss_fun(l_pre, l)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                total_loss += loss
        print('epoch:{},bearing:{},loss:{}'.format(epoch, 3, total_loss))
        Saving.savetrain_loss([str(epoch), '1', '12', '', '', str(total_loss)])
        Saving.savefig(model, test_loader, epoch)
        if epoch > 50:
            input()


if __name__ == '__main__':
    # a = torch.randn(size=(16,2048))
    # model = WDCNN(out_channel=1)
    # y = model(a)
    # print(a.shape)
    main()
