# from model import BaseModel

import tensorflow as tf
from tensorflow import keras
# from tensorflow.python.client import device_lib
import tempfile
import numpy as np
import json
import base64
import argparse
import zipfile
import os
import io
from PIL import Image
import csv
import logging
import time
import msgpack

class InvalidDatasetFormatException(Exception): pass
logger = logging.getLogger('Vgg16')
# log.configure_logging('train-1111')
class Vgg16Model(object):
    def __init__(self, **knobs):
        super().__init__(**knobs)
        config = tf.ConfigProto()
        # config.gpu_options.allow_growth = True
        self._graph = tf.Graph()
        self._sess = tf.Session(graph=self._graph, config=config)
        self._image_size = int(knobs.get('image_size', 32))

    def train(self, dataset_path: str, **train_args):
        # return
        self._image_size = int(train_args.get('image_size'))
        image_size = self._image_size
        batch_size = train_args.get('batch_size', 16)
        max_epochs = train_args.get('max_epochs', 1)
        learning_rate = train_args.get('learning_rate',0.01)
        logger.log(logging.INFO, 'Begin loading train dataset')
        pil_images, image_classes, num_samples, num_classes = self._load_dataset(dataset_path, 'RGB')
        pil_images = [x.resize([image_size, image_size]) for x in pil_images]

        # Convert to numpy arrays
        images = [np.asarray(x) for x in pil_images]
        images = np.asarray(images)
        # self._model = self._build_model(num_classes, image_size)

        # dataset = utils.dataset.load_dataset_of_image_files(dataset_path, min_image_size=32, 
        #                                                     max_image_size=max_image_size, mode='RGB')
        # self._image_size = dataset.image_size
        # num_classes = dataset.classes
        # (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])
        # (images, self._normalize_mean, self._normalize_std) = utils.dataset.normalize_images(images)
        # images = np.asarray(images)
        classes = np.asarray(keras.utils.to_categorical(image_classes))
        logger.log(logging.INFO, 'Finish loading train dataset')
        with self._graph.as_default():
            with self._sess.as_default():
                self._model = self._build_model(num_classes, image_size, learning_rate)
                logger.log(logging.INFO, "Start model training")
                self._model.fit(
                    images, 
                    classes, 
                    epochs=max_epochs, 
                    validation_split=0.05,
                    batch_size=batch_size,
                    callbacks=[
                        tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2),
                        # tf.keras.callbacks.LambdaCallback(on_epoch_end=self._on_train_epoch_end)
                    ]
                )
                logger.log(logging.INFO, "Model training Finished")
                # Compute train accuracy
                (loss, accuracy) = self._model.evaluate(images, classes)
                logger.log(logging.INFO, 'Train loss: {}'.format(loss))
                logger.log(logging.INFO, 'Train accuracy: {}'.format(accuracy))


    def evaluate(self, dataset_path: str) -> float:
        logger.log(logging.INFO, 'Begin loading evaluate dataset')
        pil_images, image_classes, num_samples, num_classes = self._load_dataset(dataset_path, 'RGB')
        pil_images = [x.resize([self._image_size, self._image_size]) for x in pil_images]

        # Convert to numpy arrays
        images = [np.asarray(x) for x in pil_images]
        images = np.asarray(images)
        
        classes = np.asarray(keras.utils.to_categorical(image_classes))
        logger.log(logging.INFO, 'Finish loading evaluate dataset')
        with self._graph.as_default():
            with self._sess.as_default():
                (loss, accuracy) = self._model.evaluate(images, classes)
                return accuracy

    def predict(self, query):
        encoded = io.BytesIO(query)
        pil_image = Image.open(encoded).convert('RGB')
        images = [np.asarray(pil_image.resize([self._image_size, self._image_size]))]
        images = np.asarray(images)
        with self._graph.as_default():
            with self._sess.as_default():
                probs = self._model.predict(images)
        # print(probs.tostring().decode())
        lres = probs[0].tolist()
        lres2 = [str(l) for l in lres]
        res = '['+','.join(lres2)+']'
        return res

    def save(self,path):
        params = {}

        # Save model parameters
        with tempfile.NamedTemporaryFile() as tmp:
            # Save whole model to temp h5 file
            with self._graph.as_default():
                with self._sess.as_default():
                    self._model.save(tmp.name)
        
            # Read from temp h5 file & encode it to base64 string
            with open(tmp.name, 'rb') as f:
                h5_model_bytes = f.read()

            params['model_base64'] = base64.b64encode(h5_model_bytes).decode('utf-8')

        # Save pre-processing params
        params['image_size'] = self._image_size
        params_bytes = msgpack.packb(params, use_bin_type=True)
        # params['normalize_mean'] = json.dumps(self._normalize_mean)
        # params['normalize_std'] = json.dumps(self._normalize_std)
        with open(path, 'wb') as f:
            f.write(params_bytes)
        

    def load(self, path):
        with open(path, 'rb') as f:
            params_bytes = f.read()
            params = msgpack.unpackb(params_bytes, raw=False)

            with tempfile.NamedTemporaryFile() as tmp:
            # Convert back to bytes & write to temp file
                model_bytes = base64.b64decode(params['model_base64'].encode('utf-8'))
                with open(tmp.name, 'wb') as f:
                    f.write(model_bytes)

                with self._graph.as_default():
                    with self._sess.as_default():
                        self._model = keras.models.load_model(tmp.name)
            
            self._image_size = int(params['image_size'])
        # # Load model parameters
        # h5_model_base64 = params['model_base64']

        # with tempfile.NamedTemporaryFile() as tmp:
        #     # Convert back to bytes & write to temp file
        #     h5_model_bytes = base64.b64decode(h5_model_base64.encode('utf-8'))
        #     with open(tmp.name, 'wb') as f:
        #         f.write(h5_model_bytes)

        #     # Load model from temp file
        #     with self._graph.as_default():
        #         with self._sess.as_default():
        #             self._model = keras.models.load_model(tmp.name)
        
        # # Load pre-processing params
        # self._image_size = params['image_size']
        # self._normalize_mean = json.loads(params['normalize_mean'])
        # self._normalize_std = json.loads(params['normalize_std'])
    def load_checkpoint(self, ckpt):
        pass

    def save_checkpoint(self, ckpt):
        pass

    def destroy(self):
        pass
    
    def _build_model(self, num_classes, image_size, lr):
        # lr = 0.01

        model = keras.applications.VGG16(
            include_top=True,
            input_shape=(image_size, image_size, 3),
            weights=None, 
            classes=num_classes
        )

        model.compile(
            optimizer=keras.optimizers.Adam(lr=lr),
            loss='categorical_crossentropy',
            metrics=['accuracy']
        )
        model
        return model
    
    def _load_dataset(self, dataset_path, mode):
        # Create temp directory to unzip to
        with tempfile.TemporaryDirectory() as d:
            dataset_zipfile = zipfile.ZipFile(dataset_path, 'r')
            dataset_zipfile.extractall(path=d)
            dataset_zipfile.close()

            # Read images.csv, and read image paths & classes
            image_paths = []
            image_classes = []
            images_csv_path = os.path.join(d, 'images.csv')
            try:
                with open(images_csv_path, mode='r') as f:
                    reader = csv.DictReader(f)
                    (image_paths, image_classes) = zip(*[(row['path'], int(row['class'])) for row in reader])
            except Exception as e:
                # traceback.print_stack()
                raise InvalidDatasetFormatException()

            # Load images from files
            full_image_paths = [os.path.join(d, x) for x in image_paths]
            pil_images = _load_pil_images(full_image_paths, mode=mode)

        num_classes = len(set(image_classes))
        num_samples = len(image_paths)

        return (pil_images, image_classes, num_samples, num_classes)

def _load_pil_images(image_paths, mode='RGB'):
    pil_images = []
    for image_path in image_paths:
        with open(image_path, 'rb') as f:
            encoded = io.BytesIO(f.read())
            pil_image = Image.open(encoded).convert(mode)
            pil_images.append(pil_image)
    
    return pil_images

if __name__ == "__main__":
    a = Vgg16Model()
    # a.train('/home/luqin2/fashion_mnist_for_image_classification_train.zip',**{'image_size':32, 'batch_size':32, 'max_epochs': 1, 'learning_rate': 0.1})
    #a.save("/home/luqin2/param/test2")
    a.load("/home/luqin2/param/test2")
    with open('/home/luqin2/0-1.png','rb') as f:
        pre = a.predict(f.read())
        print(pre)
    