import math

from toolz import curry

import numpy as np
import scipy.io as sio

import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.metrics import Mean

import tensorflow_addons as tfa

from hanser.tpu import setup
from hanser.datasets import prepare
from hanser.train.lr_schedule import CosineLR
from hanser.train.cls import SuperLearner

from modelDesign import Encoder, Decoder
from transform import cutmix_batch
from metric import NMSE, nmse

mat = sio.loadmat("../raw_data/H_4T4R.mat")
data = mat["H_4T4R"]
data = data.astype('float32')
data = np.reshape(data, (len(data), 24, 16, 2))
x_train, x_test = data[:-60000], data[-60000:]

# data = np.load("../user_data/H_4T4R.npy")
# x_train, x_test = data[:200], data[600:800]

@curry
def transform(input, training):
    target = input
    return input, target


def batch_transform(input, target):
    return cutmix_batch(input, alpha=0.2, hard=True)

mul = 1 # 8 on TPU
num_train_examples = len(x_train)
num_test_examples = len(x_test)
batch_size = 64 * mul
eval_batch_size = batch_size * 2
steps_per_epoch = num_train_examples // batch_size
test_steps = math.ceil(num_test_examples / eval_batch_size)

ds = tf.data.Dataset.from_tensor_slices((x_train,))
ds_test = tf.data.Dataset.from_tensor_slices((x_test,))

ds_train = prepare(ds, batch_size, transform(training=True),
                   batch_transform=batch_transform, training=True, buffer_size=1000000)
ds_test = prepare(ds_test, eval_batch_size, transform(training=False), training=False)
ds_train, ds_test = setup([ds_train, ds_test], fp16=False)

feedback_bits = 375
B = 3

input_shape = (24, 16, 2)
Encoder_input = Input(shape=input_shape, name="encoder_input")
Encoder_output, rx1 = Encoder(Encoder_input, 128, 2, rezero=True, feedback_bits=feedback_bits, B=B)
# Encoder_output, rx1 = Encoder(Encoder_input, 32, 2, rezero=True, feedback_bits=feedback_bits, B=B)
encoder = Model(inputs=Encoder_input, outputs=[Encoder_output, rx1], name='encoder')

Decoder_input = Input(shape=(feedback_bits,), name='decoder_input')
Decoder_output, rx2 = Decoder(Decoder_input, 128, 27, rezero=True, feedback_bits=feedback_bits, B=B)
# Decoder_output, rx2 = Decoder(Decoder_input, 32, 2, rezero=True, feedback_bits=feedback_bits, B=B)
decoder = Model(inputs=Decoder_input, outputs=[Decoder_output, rx2], name="decoder")

autoencoder_input = Input(shape=input_shape, name="original_img")
encoder_out, rx1 = encoder(autoencoder_input)
decoder_out, rx2 = decoder(encoder_out)
model = Model(inputs=autoencoder_input, outputs=[decoder_out, rx1, rx2], name='autoencoder')
model.build((None, *input_shape))
model.summary()

def criterion(y_true, preds):
    y_pred, rx1, rx2 = preds
    loss1 = nmse(y_true, y_pred)
    loss2 = tf.keras.losses.mean_squared_error(rx1, rx2)
    return loss1 + loss2

base_lr = 1e-3
epochs = 75
# epochs = 5
lr_shcedule = CosineLR(base_lr * mul, steps_per_epoch, epochs=epochs,
                       min_lr=0, warmup_epoch=5, warmup_min_lr=0)
optimizer = tfa.optimizers.LAMB(lr_shcedule, weight_decay_rate=1e-3)

train_metrics = {
    'loss': Mean(),
}

eval_metrics = {
    'nmse': NMSE(),
}
learner = SuperLearner(model, criterion, optimizer,
                     train_metrics=train_metrics, eval_metrics=eval_metrics,
                     work_dir="../user_data/CIF", multiple_steps=True)

learner.fit(ds_train, epochs, ds_test, val_freq=1,
            steps_per_epoch=steps_per_epoch, val_steps=test_steps,
            reuse_train_iterator=True)

encoder.save_weights("../user_data/encoder_m.h5")
decoder.save_weights("../user_data/decoder_m.h5")