from python_ai.common.xcommon import sep
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score

tf.compat.v1.set_random_seed(777)

sep('Load data')
x_data, y_data = load_boston(return_X_y=True)
y_data = y_data.reshape(-1, 1)
std = StandardScaler()
x_data = std.fit_transform(x_data)
y_data = std.fit_transform(y_data)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.7, random_state=777)
_, n = x_train.shape

with tf.name_scope('model'):
    sep('Placeholder')
    x = tf.compat.v1.placeholder(tf.float32, shape=[None, n], name='placeholder_x')
    y = tf.compat.v1.placeholder(tf.float32, shape=[None, 1], name='placeholder_y')

    sep('w and b')
    w = tf.Variable(np.zeros([n, 1]), dtype=tf.float32, name='weight')
    b = tf.Variable(0., name='bias')

    sep('model')
    h = tf.matmul(x, w) + b
    h = tf.identity(h, name='h')

with tf.name_scope('cost'):
    sep('Cost')
    cost = tf.reduce_mean((h - y) ** 2) / 2
    cost = tf.identity(cost, name='cost')

with tf.name_scope('gradient_descent'):
    sep('Train')
    train = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)\
        .minimize(cost)

with tf.name_scope('score'):
    r2 = 1 - tf.reduce_mean((h - y) ** 2) / tf.reduce_mean((y - tf.reduce_mean(y)) ** 2)

sep('Session')
with tf.compat.v1.Session() as sess:
    with tf.compat.v1.summary.FileWriter('./log/multi_feature_lin_regr', sess.graph) as fw:
        pass
    sess.run(tf.compat.v1.global_variables_initializer())
    iters = 2000
    LIM = 3
    group = iters // 10
    j_his = np.zeros(iters)
    r2_his = np.zeros(iters)
    for i in range(iters):
        w_v, b_v, cost_v, _, r2_v = sess.run([w, b, cost, train, r2], feed_dict={x: x_train, y: y_train})
        j_his[i] = cost_v
        r2_his[i] = r2_v
        if i % group == 0:
            print(f'#{i + 1} cost = {cost_v}, r2 = {r2_v}, w[:{LIM}] = {np.transpose(w_v[:LIM])}, b = {b_v}')
    if i % group != 0:
        print(f'#{i + 1} cost = {cost_v}, w[:{LIM}] = {np.transpose(w_v[:LIM])}, b = {b_v}')
    h_train = sess.run(h, feed_dict={x: x_train})
    print(f'R2 score of training set: {r2_score(y_train, h_train)}')
    print(f'R2 score of training set: {sess.run(r2, feed_dict={x: x_train, y: y_train})} by tf')
    h_test = sess.run(h, feed_dict={x: x_test})
    print(f'R2 score of testing set: {r2_score(y_test, h_test)}')
    print(f'R2 score of training set: {sess.run(r2, feed_dict={x: x_test, y: y_test})} by tf')

sep('figure')
plt.figure(figsize=[8, 8])
spr = 2
spc = 2
spn = 0

sep('history of cost function value')
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(j_his, label='Cost function')
plt.legend()

sep('history of r2 value')
spn += 1
plt.subplot(spr, spc, spn)
plt.plot(r2_his, label='R2')
plt.legend()

sep('train set visualization')
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Training set')
plt.scatter(y_train, y_train, s=1, color='b', label='target')
plt.scatter(y_train, h_train, s=1, color='y', label='hypothesis')
plt.legend()

sep('test set visualization')
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Testing set')
plt.scatter(y_test, y_test, s=1, color='b', label='target')
plt.scatter(y_test, h_test, s=1, color='y', label='hypothesis')
plt.legend()
