import numpy as np
import torch
import torch.nn as nn
import torch.optim as opt
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

from config import *


class DM_NN:
    def __init__(self, x_train, x_test, y_train, y_test):
        class Model(nn.Module):
            def __init__(self, in_features, out_features):
                super(Model, self).__init__()
                self.hidden = [128, 256]
                self.predict = nn.Sequential(
                    nn.Linear(
                        in_features=in_features, out_features=self.hidden[0], bias=True
                    ),
                    nn.Sigmoid(),
                    nn.Linear(
                        in_features=self.hidden[0],
                        out_features=self.hidden[1],
                        bias=True,
                    ),
                    nn.Sigmoid(),
                    nn.Linear(
                        in_features=self.hidden[1], out_features=out_features, bias=True
                    ),
                )

            def forward(self, input):
                return self.predict(input)

        self.model = Model(
            in_features=len(in_feature_name_list),
            out_features=len(out_feature_name_list),
        )
        self.x_train, self.x_test, self.y_train, self.y_test = (
            x_train,
            x_test,
            y_train,
            y_test,
        )

    def train(self, batch_size, epochs, lr):
        criterion = nn.MSELoss()
        optimizer = opt.Adam(self.model.parameters(), lr=lr)
        # optimizer = opt.SGD(self.model.parameters(), lr=lr)
        self.model.train()
        for epoch in range(epochs):
            for i in range(0, len(self.x_train), batch_size):
                inputs = torch.Tensor(self.x_train[i : i + batch_size])
                outputs = torch.Tensor(self.y_train[i : i + batch_size])
                optimizer.zero_grad()
                pred = self.model(inputs)
                loss = criterion(pred, outputs)
                loss.backward()
                optimizer.step()
                # print(
                #     f"[epoch: {epoch + 1}, batch: {(i // batch_size) + 1}] loss: {round(loss.item(), 3)}"
                # )

    def evaluate(self):
        self.model.eval()
        accuracy = 0.0
        inputs = torch.Tensor(self.x_test)
        outputs = self.y_test
        pred = self.model(inputs).detach().numpy()

        accuracy = np.zeros(self.y_test.shape[1])
        for i in range(self.y_test.shape[1]):
            accuracy[i] = mean_squared_error(outputs[:, i], pred[:, i])

        print(f"Test Error: {accuracy}")
        return accuracy, pred

    def predict(self, inputs):
        return self.model(torch.Tensor(inputs)).detach().numpy()

    def save_model(self, save_path):
        torch.save(self.model.state_dict(), save_path)
