import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import time
from sklearn.metrics import mean_squared_error
from config import *

# from data_regression import get_dataset


class oneD_Net(nn.Module):
    def __init__(self, input_channel, out_channel):
        super(oneD_Net, self).__init__()
        self.input_channel = input_channel
        self.out_channel = out_channel
        self.conv1 = nn.Sequential(
            nn.Conv1d(
                in_channels=self.input_channel,
                out_channels=40,
                kernel_size=3,
                stride=2,
                padding=3,
                bias=False,
            ),
            nn.BatchNorm1d(40),
            nn.LeakyReLU(inplace=True),
            nn.MaxPool1d(kernel_size=3, stride=2),
        )
        self.conv2 = nn.Sequential(
            nn.Conv1d(40, 80, kernel_size=3, stride=2, padding=3, bias=False),
            nn.BatchNorm1d(80),
            nn.LeakyReLU(inplace=True),
            nn.MaxPool1d(kernel_size=3, stride=2),
        )
        self.conv3 = nn.Sequential(
            nn.Conv1d(80, 160, kernel_size=3, stride=2, padding=3, bias=False),
            nn.BatchNorm1d(160),
        )
        self.avg_pool = nn.AdaptiveAvgPool1d(1)

        self.shortcut = nn.Sequential(
            nn.Conv1d(
                self.input_channel, 160, kernel_size=3, stride=2, padding=3, bias=False
            ),
            nn.BatchNorm1d(160),
            nn.MaxPool1d(kernel_size=3, stride=2),
        )
        self.liner1 = nn.Linear(160, 64)
        self.liner2 = nn.Linear(64, 16)
        self.liner3 = nn.Linear(16, self.out_channel)

        # self.liner4 = nn.Linear(160, self.input_channel)
        # self.liner5 = nn.Linear(self.input_channel, 16)

    def forward(self, x0):
        x0 = x0.unsqueeze(-1)
        x = self.conv1(x0)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.avg_pool(x)
        x += self.shortcut(x0)

        x = x.view(-1, 160)
        x = self.liner1(x)
        x = nn.LeakyReLU(inplace=True)(x)
        x = self.liner2(x)
        x = nn.LeakyReLU(inplace=True)(x)
        x = self.liner3(x)

        return x

        # x = x0.unsqueeze(-1)
        # x = self.conv1(x)
        # x = self.conv2(x)
        # x = self.conv3(x)
        # x = self.avg_pool(x)

        # x = x.view(-1, 160)
        # x = self.liner4(x)
        # x += x0
        # x = nn.LeakyReLU(inplace=True)(x)
        # x = self.liner5(x)
        # x = nn.LeakyReLU(inplace=True)(x)
        # x = self.liner3(x)

        # return x


def oneD_Net_train(model, x_train, y_train, batch_size, epochs, lr):
    model = model
    model.train()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.8)
    criterion = nn.MSELoss()

    train_dataset = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train))
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=True,
        drop_last=True,
    )

    for epoch in range(epochs):
        start = time.time()
        for batch_idx, (x_batch, y_batch) in enumerate(train_loader):
            optimizer.zero_grad()
            y_pre = model(x_batch)
            loss = criterion(y_pre, y_batch)
            loss.backward()
            optimizer.step()
            # print(f'Epoch: {epoch} [{batch_idx * len(x_batch)}/{len(train_loader.dataset)}],loss: {loss.data.item()}, cost: {time.time() - start}')
        scheduler.step()


def oneD_Net_predict(model, x_test, y_test):
    model = model
    model.eval()

    predict = torch.tensor([])
    accuracy = 0.0
    x = torch.Tensor(x_test)
    predict = model(x).detach().numpy()

    accuracy = np.zeros(y_test.shape[1])
    for i in range(y_test.shape[1]):
        accuracy[i] = mean_squared_error(y_test[:, i], predict[:, i])

    print(f"Test Error: {accuracy}")
    # print(predict)
    return accuracy, predict


if __name__ == "__main__":
    feature_data = np.load(feature_data_file_path)
    x_train, x_test, y_train, y_test = get_dataset(feature_data)

    net = oneD_Net(
        input_channel=len(in_feature_name_list), out_channel=len(out_feature_name_list)
    )
    oneD_Net_train(net, x_train, y_train, 64, 500, 0.05)
    DM_ResNet_acc, DM_ResNet_pred = oneD_Net_predict(net, x_test, y_test)
