from python_ai.common.xcommon import *
import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import os
import matplotlib.pyplot as plt

np.random.seed(777)
tf.set_random_seed(777)
file_name = os.path.basename(__file__)

ver = 'v1.5'
batch_size = 100
n_epoch = 100
learning_rate = 0.001
n_fc1_hidden = 1024
n_digits = 10
n_vcode_len = 4
n_onehot = n_digits * n_vcode_len
save_path = './_save/' + file_name + '_' + ver

base_path = r'../../../../../large_data/DL1/vcode_data/'
train_path = base_path + 'train'
test_path = base_path + 'test'

def laod_data(path, type):
    batch_for_print = 200
    data = []
    labels = []
    one_hot_seed = np.eye(n_digits)

    n = 0
    for filename in os.listdir(path):
        n += 1
        vcode_str = os.path.splitext(filename)[0]
        vcode_int_arr = [int(ch) for ch in vcode_str][:n_vcode_len]
        labels.append(one_hot_seed[vcode_int_arr])

        data.append(plt.imread(train_path + '/' + filename))
        if n % batch_for_print == 0:
            print(f'{n} {type} samples loaded.')
    if n % batch_for_print != 0:
        print(f'{n} {type} samples loaded.')
    print(f'{type} LOADED!')

    data = np.array(data, dtype=np.float32) / 255.0
    y = np.array(labels)
    y = y.reshape([-1, n_onehot])
    return data, y

if os.path.exists(save_path + '.meta'):
    x_train, y_train = np.array([]), np.array([])

else:
    x_train, y_train = laod_data(train_path, 'train')

x_test, y_test = laod_data(test_path, 'test')

check_shape(x_train, 'x_train')
check_shape(y_train, 'y_train')
check_shape(x_test, 'x_test')
check_shape(y_test, 'y_test')

# shuffle
m_train = len(x_train)
m_test, pic_h, pic_w, pic_ch = x_test.shape
a = np.random.permutation(m_train)
x_train = x_train[a]
y_train = y_train[a]

# mini-batch
g_batch_i = 0


def next_batch(batch_size):
    global g_batch_i
    bx = x_train[g_batch_i:g_batch_i + batch_size]
    by = y_train[g_batch_i:g_batch_i + batch_size]
    g_batch_i += batch_size
    return bx, by


# placeholder
with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.float32, [None, pic_h, pic_w, pic_ch], 'ph_x')
    ph_y = tf.placeholder(tf.float32, [None, n_onehot], 'ph_y')

# 3.	建第一个卷积层、relu（8分）16*16*1==>16*16*32
with tf.variable_scope('C1'):
    filter1 = tf.Variable(tf.random.normal([3, 3, pic_ch, 32]), dtype=tf.float32, name='filter1')
    conv1 = tf.nn.conv2d(ph_x, filter1, strides=[1, 1, 1, 1], padding='SAME', name='conv1')
    relu1 = tf.nn.relu(conv1, 'relu1')
    # 4.	对第1层的数据做最大池化（8分）==>8*8*32
    pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

# 5.	建第二个卷积层、relu（6分）8*8*32==>8*8*64
with tf.variable_scope('C2'):
    filter2 = tf.Variable(tf.random.normal([3, 3, 32, 64]), dtype=tf.float32, name='filter2')
    conv2 = tf.nn.conv2d(pool1, filter2, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
    relu2 = tf.nn.relu(conv2, 'relu1')
    # 6.	对第2层的最大池化（6分）==>4*4*64
    pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

# 7.	全连接层（8分）4*4*64==>10
with tf.variable_scope('FC'):
    fc_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
    fc_in = tf.reshape(pool2, [-1, fc_dim], name='fc_in')
    fc1 = tsf.contrib.layers.fully_connected(fc_in, n_fc1_hidden, activation_fn=tf.nn.relu)
    logits = tsf.contrib.layers.fully_connected(fc1, n_onehot, activation_fn=None)

# 8.	计算代价或损失函数（8分）
with tf.variable_scope('Cost_acc_summary'):
    cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=ph_y))
    logits_separated = tf.reshape(logits, [-1, n_digits, n_vcode_len])
    y_separaed = tf.reshape(ph_y, [-1, n_digits, n_vcode_len])
    predict = tf.argmax(logits_separated, axis=1)
    print(predict)
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                predict,
                tf.argmax(y_separaed, axis=1)
            ),
            dtype=tf.float32
        )
    )
    tf.summary.scalar('cost', cost)
    tf.summary.scalar('acc', acc)
    summary = tf.summary.merge_all()

# 9.	使用adam优化器（8分）
with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    if os.path.exists(save_path + '.meta'):
        saver.restore(sess, save_path)
        print('SESSION LOADED!')
    else:
        with tf.summary.FileWriter('./_log/' + file_name, sess.graph) as fw:
            print('TRAINING STARTED!')
            g_step = -1
            # 10.	一共迭代15次，每次选择总样本的前80%做为训练集数据（8分）
            for epoch in range(n_epoch):
                total_batch = int(np.ceil(m_train / batch_size))
                g_batch_i = 0
                group = int(np.ceil(total_batch / 10))
                for i in range(total_batch):
                    g_step += 1
                    # 11.	分批次训练，每批100个训练样本（8分）
                    bx, by = next_batch(batch_size)
                    _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                    fw.add_summary(sv, g_step)
                    if i % group == 0:
                        print(f'g_step#{g_step+1}: epoch#{epoch+1}: batch#{i+1}: cost = {costv}, acc= {accv}')
                        fw.flush()
                    if np.isclose(1.0, accv):
                        break
                if i % group != 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc= {accv}')
                    fw.flush()
                if np.isclose(1.0, accv):
                    print('TRAINING CONVERGED!')
                    break
            print('TRAINING OVER!')
            saver.save(sess, save_path)
            print('SESSION SAVED!')

    # 12.	选取最后的20%数据做测试集，计算准确率（8分）
    accv = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
    print('测试集，计算准确率')
    print(accv)

    # 13.	抽一个测试样本进行验证（8分）
    print('抽一个测试样本进行验证')
    rand_idx = np.random.randint(0, m_test - 1)
    x_verif = x_test[rand_idx:rand_idx+1]
    y_verif = y_test[rand_idx:rand_idx+1]
    y_verif = y_verif.reshape([-1, n_digits, n_vcode_len])
    print(f'Target: {np.argmax(y_verif, axis=1)}')
    h_verif = sess.run(predict, feed_dict={ph_x: x_verif})
    print(f'Hypothesis: {h_verif}')
