import tensorflow.compat.v1 as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import os
import sys
from python_ai.common.xcommon import *
import python_ai.ML.lin_regression.xlib as xlib

ALPHA = 1e-2
ITERS = 1000
FILE_NAME = os.path.basename(__file__)

tf.random.set_random_seed(777)

path = '../data-01-test-score.csv'
data = np.loadtxt(path, delimiter=',')
data_ori = data.copy()

# scale
print(data.mean(axis=0), data.std(axis=0))
scaler = StandardScaler()
data = scaler.fit_transform(data)
mu = scaler.mean_
# var = scaler.var_
sigma = scaler.scale_
print(mu, sigma)
# print(mu, var, np.sqrt(var), scale)
# sys.exit(0)

x_data = data[:, :-1]
y_data = data[:, -1:]

with tf.variable_scope('placeholders'):
    x = tf.placeholder(tf.float32, shape=[None, 3], name='ph_x')  # ATTENTION shape
    y = tf.placeholder(tf.float32, shape=[None, 1], name='ph_y')

with tf.variable_scope('hypothesis'):
    w = tf.Variable(tf.random.normal([3, 1]), dtype=tf.float32, name='w')
    b = tf.Variable(tf.random.normal([1, 1]), dtype=tf.float32, name='b')

    h = tf.add(tf.matmul(x, w), b, name='h')

with tf.variable_scope('cost_func'):
    e = tf.subtract(h, y, name='e')
    j = tf.divide(
        tf.reduce_mean(
            tf.matmul(
                tf.transpose(e), e
            )
        ),
        2.,
        name='j'
    )
    tf.summary.scalar('cost_scalar', j)

with tf.variable_scope('metric'):
    r2 = tf.subtract(
        1.,
        tf.matmul(
            tf.transpose(e),
            e
        ) / tf.cast(tf.shape(x)[0], dtype=tf.float32)
        /
        tf.reduce_mean(
            (y - tf.reduce_mean(y)) ** 2
        ),
        name='r2'
    )
    tf.summary.scalar('r2_scalar', r2[0][0])

with tf.variable_scope('bp'):
    dw = tf.divide(
        tf.matmul(
            tf.transpose(x),
            e
        ),
        tf.cast(tf.shape(x)[0], dtype=tf.float32),
        name='dw'
    )
    db = tf.reduce_mean(e, name='db')
    update = [
        tf.assign(w, w - ALPHA * dw, name='update_w'),
        tf.assign(b, b - ALPHA * db, name='update_b')
    ]

summary = tf.summary.merge_all()

with tf.Session() as sess:

    with tf.summary.FileWriter(os.path.join('_log', FILE_NAME)) as fw:
        fw.add_graph(sess.graph)

        sess.run(tf.global_variables_initializer())

        j_history = np.zeros(ITERS)
        GROUP = ITERS // 20
        for i in range(ITERS):
            cost, _, r2_value, sum_value = sess.run([j, update, r2, summary], feed_dict={x: x_data, y: y_data})
            j_history[i] = cost
            fw.add_summary(sum_value, i)
            if i % GROUP == 0:
                print(f'cost = {cost}, r2 = {r2_value}')
        if i % GROUP != 0:
            print(f'cost = {cost}, r2 = {r2_value}')

        plt.plot(j_history)

        w_now = w.eval()
        b_now = b.eval()
        print('now:', b_now, w_now)
        w_ori, b_ori = inverse_transform_weights(w_now, b_now, mu, sigma)
        print('inv:', b_ori, w_ori)
        sess.run([tf.assign(w, w_ori.reshape(3, 1)), tf.assign(b, b_ori.reshape(1, 1))])

        print("Your score will be ", sess.run(h, feed_dict={x: [[100, 70, 101]]}))

        print("Other scores will be ", sess.run(h,
                                                feed_dict={x: [[60, 70, 110], [90, 100, 80]]}))

        theta_ori = xlib.scale_theta_back(data_ori[:, :-1], data_ori[:, -1], np.concatenate([b_now, w_now], axis=0))
        b_ori = theta_ori[0][0]
        w_ori = theta_ori[1:].ravel()
        print('inv by xlib:', b_ori, w_ori)
        sess.run([tf.assign(w, w_ori.reshape(3, 1)), tf.assign(b, b_ori.reshape(1, 1))])

        print("Your score will be ", sess.run(h, feed_dict={x: [[100, 70, 101]]}))

        print("Other scores will be ", sess.run(h,
                                                feed_dict={x: [[60, 70, 110], [90, 100, 80]]}))