import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
import numpy as np

num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = np.random.normal(0, 1, (num_examples, num_inputs))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=0)
train_data = torch.from_numpy(np.concatenate((X_train, y_train.reshape(-1, 1)), axis=1))
n_feature = train_data.shape[1]


class Regression(nn.Module):

    def __init__(self, eps):
        super(Regression, self).__init__()
        self.w = nn.Parameter(torch.tensor(np.random.normal(0, 0.01, (n_feature, 1))))
        self.b = nn.Parameter(torch.zeros(1))
        self.eps = eps

    def forward(self, x):
        w = self.w / self.w.norm()
        x = torch.matmul(x, w) + self.b
        return x[x > self.eps].sum() - x[x < -self.eps].sum()


def perdict(train_data, test_data):
    model = Regression(1)
    opt = optim.SGD(model.parameters(), lr=0.01)
    for _ in range(500):
        loss = model(train_data)
        opt.zero_grad()
        loss.backward()
        opt.step()
    yy = -(torch.matmul(test_data, model.w[:-1]) + model.b) / model.w[-1]
    return yy.data

def evaluation(y, predictions):
    mae = mean_absolute_error(y, predictions)
    mse = mean_squared_error(y, predictions)
    rmse = np.sqrt(mean_squared_error(y, predictions))
    r_squared = r2_score(y, predictions)
    return mae, mse, rmse, r_squared


y_perdict = perdict(train_data, torch.from_numpy(X_test))
print(evaluation(y_test, y_perdict))