import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
import numpy as np
import matplotlib.pyplot as plt
import os
from python_ai.common.xcommon import *
import pickle
import datetime
import re
import pandas as pd

print('Started')
tf.random.set_seed(1)
np.random.seed(1)

VER = 'v5.0'
ALPHA = 0.0001
BATCH_SIZE = 64
FILE_NAME = os.path.basename(__file__)
SAVE_DIR = os.path.join('_save', FILE_NAME, VER)
os.makedirs(SAVE_DIR, exist_ok=True)
WEIGHTS_TAG = 'weights'
PICKLE_TAG = 'pickle'
SAVE_PREFIX = SAVE_DIR + '/' + WEIGHTS_TAG
PICKLE_PREFIX = SAVE_DIR + '/' + PICKLE_TAG
LOG_DIR = os.path.join('_log', FILE_NAME, VER)
print(f'ALPHA = {ALPHA}, BATCH_SIZE = {BATCH_SIZE}')


def load_img(path):
    path = tf.strings.lower(path)
    path_len = tf.strings.length(path)
    digits = tf.strings.substr(path, path_len - 8, 4)
    digistList = tf.strings.unicode_split(digits, 'UTF-8')
    digistTensor = tf.strings.to_number(digistList, out_type=tf.int32)

    img = tf.io.read_file(path)
    img = tf.image.decode_jpeg(img, channels=3)
    img = tf.cast(img, tf.float32) / 255.

    return img, digistTensor


vcode_dir_train = '../../../../large_data/DL1/_many_files/vcode_data/train/*.jpg'
vcode_dir_test = '../../../../large_data/DL1/_many_files/vcode_data/test/*.jpg'


def ds_from_dir(dir):
    return tf.data.Dataset.list_files(dir) \
        .map(load_img) \
        .shuffle(2000) \
        .batch(BATCH_SIZE) \
        .prefetch(tf.data.experimental.AUTOTUNE)


print('Loading test set ...')
ds_test = ds_from_dir(vcode_dir_test)
print('Loaded.')

model = keras.Sequential([
    layers.Conv2D(6, (5, 5)),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D(strides=(2, 2), padding='same'),

    layers.Conv2D(16, (3, 3)),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D(strides=(2, 2), padding='same'),

    layers.Conv2D(32, (3, 3)),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D(strides=(2, 2), padding='same'),

    layers.Conv2D(64, (3, 3)),
    layers.BatchNormalization(),
    layers.ReLU(),
    layers.MaxPool2D(strides=(2, 2), padding='same'),

    layers.Flatten(),
    layers.Dense(120, activation=activations.relu),
    layers.Dense(84, activation=activations.relu),
    layers.Dense(40, activation=None),
    layers.Reshape((4, 10)),
    layers.Softmax(),
])
model.build(input_shape=(None, 60, 160, 3))
model.summary(line_length=120)


def my_acc(y_true, y_pred):
    # y_true = tf.argmax(y_true, axis=2, output_type=tf.int32)  # spare here
    y_true = tf.cast(y_true, dtype=tf.int32)
    y_pred_labels = tf.argmax(y_pred, axis=2, output_type=tf.int32)

    eq = y_true == y_pred_labels
    eq = tf.reduce_all(eq, axis=1)
    acc = tf.reduce_mean(tf.cast(eq, dtype=tf.float32))
    return acc


def my_acc_digit(y_true, y_pred):
    # y_true = tf.argmax(y_true, axis=2, output_type=tf.int32)  # spare here
    y_true = tf.cast(y_true, dtype=tf.int32)
    y_pred_labels = tf.argmax(y_pred, axis=2, output_type=tf.int32)

    eq = y_true == y_pred_labels
    acc = tf.reduce_mean(tf.cast(eq, dtype=tf.float32))
    return acc


model.compile(loss=losses.sparse_categorical_crossentropy,
              optimizer=optimizers.Adam(lr=ALPHA),
              metrics=['accuracy', metrics.sparse_categorical_accuracy, my_acc, my_acc_digit],
              )

EPOCH_BASE = 0

# load epoch infos
files = os.listdir(SAVE_DIR)
weights_regexp = re.compile(f'^{WEIGHTS_TAG}-(\\d+)-([^\\-\\.]+)\\.')
pickle_regexp = re.compile(f'^{PICKLE_TAG}-(\\d+)-([^\\-\\.]+)\\.')
epoch_infos = []
for file_name in files:
    path = os.path.join(SAVE_DIR, file_name)
    base, ext = os.path.splitext(file_name)
    ext = ext.lower()
    matcher = pickle_regexp.match(file_name)
    if matcher is not None:
        info_row = []
        groups = matcher.groups()
        info_row.append(int(groups[0]))
        info_row.append(groups[1])
        with open(path, 'br') as f:
            info = pickle.load(f)
            info_row.append(info)
        epoch_infos.append(info_row)

# sort it
df = pd.DataFrame(epoch_infos)
df = df.sort_values(by=[0, 1], axis=0)
epoch_infos = []
for row in df.index.sort_values():
    info_row = df.iloc[row]
    epoch_infos.append([info_row[0], info_row[1], info_row[2]])


def show_epoch_infos():
    for info_row in epoch_infos:
        print(f'epoch#{info_row[0]}, dt={info_row[1]}, {info_row[2]}')


xin_regexp = re.compile(r'^(\d+)\-([^\-]+)$')
while True:
    show_epoch_infos()
    print('Which epoch as base? (Input 0 for training from scratch, -1 for follow the latest.)')
    xin = input()
    matcher = xin_regexp.match(xin)
    if matcher is None:
        try:
            EPOCH_BASE = int(xin)
            if 0 == EPOCH_BASE:
                break
            elif -1 == EPOCH_BASE:
                groups = epoch_infos[-1]
                EPOCH_BASE = int(groups[0])
                break
        except:
            pass
        print("Input's format is wrong! Try again!")
        continue
    groups = matcher.groups()
    file_name = PICKLE_TAG + '-' + groups[0] + '-' + groups[1] + '.pickle'
    path = os.path.join(SAVE_DIR, file_name)
    if not os.path.exists(path):
        print('Saved model with this label is not exited! Try again!')
        continue
    EPOCH_BASE = int(groups[0])
    break
print(f'EPOCH_BASE: {EPOCH_BASE}')

if EPOCH_BASE > 0:
    print('Loading ...')
    model.load_weights(SAVE_PREFIX + '-' + groups[0] + '-' + groups[1])
    print('Loaded.')

while True:
    print('How many epochs to train? Input 0 for directly using trained weights.')
    xin = input()
    try:
        n_epoch = int(xin)
    except ValueError as ex:
        print('Input format wrong, try again!')
        continue
    if n_epoch < 0:
        print('Epoch should be not negative! Try again')
        continue
    if n_epoch == 0 and EPOCH_BASE == 0:
        print('There is no trained weights loaded! Try to specify a positive epoch number!')
        continue
    EPOCHS = n_epoch
    break

if EPOCHS > 0:
    print('Loading train set ...')
    ds_train = ds_from_dir(vcode_dir_train)
    print('Loaded.')


    class MyCallback(keras.callbacks.Callback):

        def on_epoch_end(self, epoch, logs=None):
            epoch_real = EPOCH_BASE + epoch + 1
            sep(f'epoch#{epoch + 1}, real epoch#{epoch_real}')

            rand = str(epoch_real) + '-' + rand_name_on_now()
            prefix = SAVE_PREFIX + '-' + rand
            pickle_path = PICKLE_PREFIX + '-' + rand + '.pickle'

            print('Saving ...')
            self.model.save_weights(prefix)
            logs['alpha'] = ALPHA
            logs['batch_size'] = BATCH_SIZE
            with open(pickle_path, 'bw') as f:
                pickle.dump(logs, f)
            print('Saved.')


    history = model.fit(
        ds_train, epochs=EPOCHS,
        validation_data=ds_test,
        callbacks=[
            keras.callbacks.TensorBoard(log_dir=LOG_DIR, update_freq='batch', profile_batch=0),
            MyCallback()
        ],

    )

scores = model.evaluate(ds_test)
print(scores)

for x_test, y_test in ds_test:
    pred = model.predict(x_test)
    break


def target2label(vec):
    return ''.join([str(i) for i in vec])


spr = 5
spc = 5
spn = 0
plt.figure(figsize=(14, 7))
for i in range(spr * spc):
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.axis('off')
    plt.imshow(x_test[i], cmap='gray')
    y_true = y_test[i].numpy()
    y_pred = pred[i].argmax(axis=1)
    y_true = target2label(y_true)
    y_pred = target2label(y_pred)
    marker = 'V' if y_true == y_pred else 'X'
    title = target2label(f'{y_true}: {y_pred} ({marker})')
    plt.title(title)

plt.show()
