import argparse
import os
import pdb
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf


os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
parser = argparse.ArgumentParser()
parser.add_argument('--dim_x', type=int, default=2)
parser.add_argument('--dim_y', type=int, default=1)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--scale', type=int, default=0)
parser.add_argument('--test_split', type=float, default=0.2)
parser.add_argument('--epoch', type=int, default=10)
parser.add_argument('--test_per', type=int, default=5)
args = parser.parse_args()


@tf.function
def MSE(pred, true):
    """|| x - y ||^2"""
    return tf.math.reduce_sum(tf.square(pred - true))


# @tf.function
def grad(X, Y, W, b):
    """X: [n, dim_x]
    Y: [n, dim_y]
    W: [dim_x, dim_y]
    b: [1, dim_y]
    """
    n = X.shape[0]
    dw = tf.zeros_like(W)
    db = tf.zeros_like(b)
    # pdb.set_trace()
    for i in tf.range(n):
        x = X[i:i+1]  # [1, dim_x]
        y = Y[i:i+1]  # [1, dim_y]
        # tf.print(x.shape, y.shape)
        # dL/dW = 2 * dot(x.t, xW + b - y)
        dw += 2 * tf.matmul(tf.transpose(x),
                            tf.matmul(x, W) + b - y)
        # dL/db = 2(xW + b - y)
        db += 2 * (tf.matmul(x, W) + b - y)
        # tf.print(dw.shape, db.shape)
    return dw / n, db / n


df = pd.read_csv("house.csv", encoding="gbk")
# print(df.describe())
X = df.iloc[:, :args.dim_x].values.astype(np.float32)
Y = df.iloc[:, args.dim_x].values.astype(np.float32)
print(X.shape, Y.shape)
# X, Y = tf.constant(X), tf.constant(Y)

# shuffle
idx = np.random.permutation(X.shape[0])
X, Y = X[idx], Y[idx]

# split
n_test = int(X.shape[0] * args.test_split)
X_test, Y_test = X[:n_test], Y[:n_test]
X_train, Y_train = X[n_test:], Y[n_test:]

# scale
if args.scale:
    scaler = MinMaxScaler()
    for i in range(X_train.shape[1]):
        scaler.fit(X_train[:, i:i+1])
        X_train[:, i] = scaler.transform(X_train[:, i:i+1]).flatten()
        X_test[:, i] = scaler.transform(X_test[:, i:i+1]).flatten()

X_test, Y_test = tf.constant(X_test), tf.constant(Y_test)
X_train, Y_train = tf.constant(X_train), tf.constant(Y_train)

# model: z = xW + b
W = tf.Variable(tf.random.normal([args.dim_x, args.dim_y]))
b = tf.Variable(tf.zeros([1, args.dim_y]))


for epoch in range(args.epoch):
    pred = tf.matmul(X_train, W) + b
    loss = MSE(pred, Y_train)
    dW, db = grad(X_train, Y_train, W, b)
    W.assign_sub(args.lr * dW)
    b.assign_sub(args.lr * db)

    print("---", epoch, "---")
    print("train MSE:", loss.numpy())
    pred = tf.matmul(X_test, W) + b
    loss = MSE(pred, Y_test)
    print("test MSE:", loss.numpy())
