import argparse
import os
import tensorflow as tf
from tensorflow import keras as K
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from alexnet import AlexNet


os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
parser = argparse.ArgumentParser(description='homework 5')
parser.add_argument('--dataset', type=str, default="mnist",
                    help="{mnist, cifar10}")
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_class', type=int, default=10)
parser.add_argument('--epoch', type=int, default=5)
args = parser.parse_args()
# os.system("clear")


if args.dataset == "mnist":
    dataset = K.datasets.mnist
else:  # cifar10
    dataset = K.datasets.cifar10
(x_train, y_train), (x_test, y_test) = dataset.load_data()
print(x_train.max(), x_test.min())  # 225
x_train, x_test = x_train / 255.0, x_test / 255.0   # [n, 28, 28], [n]
print("data:", type(x_train), x_train.shape, y_test.shape)


train_ds = tf.data.Dataset.from_tensor_slices(
    (x_train, y_train)).shuffle(10000).batch(args.batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(
    (x_test, y_test)).batch(args.batch_size)


class Net(K.Model):
    def __init__(self):
        super(Net, self).__init__()
        self.base_net = AlexNet()
        self.clf = K.layers.Dense(args.n_class)
    def call(self, x, training=False):
        fea = self.base_net(x, training)
        logit = self.clf(fea)
        return fea, logit


model = Net()
criterion = K.losses.SparseCategoricalCrossentropy(
    from_logits=True)  # `Sparse` for NOT one-hot
optimizer = K.optimizers.Adam()

train_loss = K.metrics.Mean(name='train_loss')
train_accuracy = K.metrics.SparseCategoricalAccuracy(name='train_accuracy')

test_loss = K.metrics.Mean(name='test_loss')
test_accuracy = K.metrics.SparseCategoricalAccuracy(name='test_accuracy')


@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        _, pred = model(images, True)
        loss = criterion(labels, pred)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
    train_accuracy(labels, pred)
    return loss


@tf.function
def test_step(images, labels):
    _, pred = model(images)
    t_loss = criterion(labels, pred)

    test_loss(t_loss)
    test_accuracy(labels, pred)

    pred = tf.argmax(pred, axis=1)
    labels = tf.cast(labels, "int64")
    n_correct = tf.reduce_sum(tf.cast(pred == labels, "float32"))
    return n_correct


loss_list, acc_list = [], []
for epoch in range(args.epoch):
    # 在下一个epoch开始时，重置评估指标
    train_loss.reset_states()
    train_accuracy.reset_states()
    test_loss.reset_states()
    test_accuracy.reset_states()

    for images, labels in train_ds:
        if args.dataset == "mnist":
            images = tf.expand_dims(images, 3)
            images = tf.tile(images, tf.constant([1, 1, 1, 3]))
        images = tf.image.resize(images, [227, 227])
        l = train_step(images, labels)
        loss_list.append(l.numpy())

    n_corr = 0
    for images, labels in test_ds:
        if args.dataset == "mnist":
            images = tf.expand_dims(images, 3)
            images = tf.tile(images, tf.constant([1, 1, 1, 3]))
        images = tf.image.resize(images, [227, 227])
        _n_corr = test_step(images, labels)
        n_corr += _n_corr.numpy()
    acc_list.append(n_corr / y_test.shape[0])

    template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
    print(template.format(epoch+1,
                          train_loss.result(),
                          train_accuracy.result()*100,
                          test_loss.result(),
                          test_accuracy.result()*100))


# plot loss
fig = plt.figure()
plt.title("loss-{}".format(args.dataset))
plt.plot(np.arange(len(loss_list)), loss_list)
# plt.show()
fig.savefig("loss.{}.png".format(args.dataset))

# plot accuracy
fig = plt.figure()
plt.title("accuracy-{}".format(args.dataset))
plt.plot(np.arange(len(acc_list)), acc_list)
# plt.show()
fig.savefig("accuracy.{}.png".format(args.dataset))

# T-SNE
fea_list = []
Y = []
for i, (images, labels) in enumerate(test_ds):
    if args.dataset == "mnist":
        images = tf.expand_dims(images, 3)
        images = tf.tile(images, tf.constant([1, 1, 1, 3]))
    images = tf.image.resize(images, [227, 227])
    fea, _ = model(images)
    fea_list.append(fea.numpy())
    Y.append(labels)
    if i > 5:
        break

F = np.vstack(fea_list)
Y = np.concatenate(Y).flatten()
print("Y:", Y.shape)
tsne = TSNE(n_components=2, init="pca", random_state=0)
F = tsne.fit_transform(F)
x_min, x_max = np.min(F, 0), np.max(F, 0)
F = (F - x_min) / (x_max - x_min)
fig = plt.figure()
plt.title("T-SNE-{}".format(args.dataset))
plt.scatter(F[:, 0], F[:, 1], c=Y, cmap="coolwarm")
fig.savefig("tsne.{}.png".format(args.dataset))

