package com.mindspore.styletransferdemo;

import android.annotation.SuppressLint;
import android.content.Context;
import android.content.res.AssetFileDescriptor;
import android.graphics.Bitmap;
import android.os.SystemClock;
import android.util.Log;

import org.tensorflow.lite.Delegate;
import org.tensorflow.lite.Interpreter;
import org.tensorflow.lite.gpu.GpuDelegate;

import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.HashMap;

public class StyleTransferModelExecutor {

    private static final String TAG = "StyleTransferModelExecutor";
    private static final int STYLE_IMAGE_SIZE = 256;
    private static final int CONTENT_IMAGE_SIZE = 384;
    private static final int BOTTLENECK_SIZE = 100;
    private static final String STYLE_PREDICT_INT8_MODEL = "style_predict_quantized_256.tflite";
    private static final String STYLE_TRANSFER_INT8_MODEL = "style_transfer_quantized_384.tflite";
    private static final String STYLE_PREDICT_FLOAT16_MODEL = "style_predict_f16_256.tflite";
    private static final String STYLE_TRANSFER_FLOAT16_MODEL = "style_transfer_f16_384.tflite";

    private Context context;
    private GpuDelegate gpuDelegate;
    private int numberThreads;
    private Interpreter interpreterPredict;
    private Interpreter interpreterTransform;
    private long fullExecutionTime;
    private long preProcessTime;
    private long stylePredictTime;
    private long styleTransferTime;
    private long postProcessTime;
    private boolean useGPU;

    public StyleTransferModelExecutor(Context context, boolean useGPU) {
        this.context = context;
        this.useGPU = useGPU;
        this.numberThreads = 4;
        if (useGPU) {
            interpreterPredict = getInterpreter(context, STYLE_PREDICT_FLOAT16_MODEL, useGPU);
            interpreterTransform = getInterpreter(context, STYLE_TRANSFER_FLOAT16_MODEL, useGPU);
        } else {
            interpreterPredict = getInterpreter(context, STYLE_PREDICT_INT8_MODEL, useGPU);
            interpreterTransform = getInterpreter(context, STYLE_TRANSFER_INT8_MODEL, useGPU);
        }
    }

    private Interpreter getInterpreter(Context context, String modelName, boolean useGpu) {
        Interpreter.Options tfliteOptions = new Interpreter.Options();
        tfliteOptions.setNumThreads(this.numberThreads);
        this.gpuDelegate = (GpuDelegate) null;
        if (useGpu) {
            this.gpuDelegate = new GpuDelegate();
            tfliteOptions.addDelegate((Delegate) this.gpuDelegate);
        }

        tfliteOptions.setNumThreads(this.numberThreads);
        return new Interpreter(loadModelFile(context, modelName), tfliteOptions);
    }

    private MappedByteBuffer loadModelFile(Context context, String modelFile) {
        try {
            AssetFileDescriptor fileDescriptor = context.getAssets().openFd(modelFile);
            FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
            FileChannel fileChannel = inputStream.getChannel();
            long startOffset = fileDescriptor.getStartOffset();
            long declaredLength = fileDescriptor.getDeclaredLength();
            MappedByteBuffer retFile = fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
            fileDescriptor.close();
            return retFile;
        } catch (IOException e) {
            e.printStackTrace();
        }
        return null;
    }

    @SuppressLint("LongLogTag")
//    public ModelExecutionResult execute(String contentImagePath, String styleImageName) {
    public ModelExecutionResult execute(Bitmap contentImage, Bitmap styleBitmap) {
        Log.i(TAG, "running models");

        fullExecutionTime = SystemClock.uptimeMillis();
        preProcessTime = SystemClock.uptimeMillis();

//        Bitmap contentImage = ImageUtils.decodeBitmap(new File(contentImagePath));
        ByteBuffer contentArray =
                ImageUtils.bitmapToByteBuffer(contentImage, CONTENT_IMAGE_SIZE, CONTENT_IMAGE_SIZE, 0, 255);

//        Bitmap styleBitmap =
//                ImageUtils.loadBitmapFromResources(context, "thumbnails/" + styleImageName);
        ByteBuffer input = ImageUtils.bitmapToByteBuffer(styleBitmap, STYLE_IMAGE_SIZE, STYLE_IMAGE_SIZE, 0, 255);

        Object[] inputsForPredict = new Object[]{input};
        HashMap outputsForPredict = new HashMap();

        float[][][][] styleBottleneck = new float[1][][][]; // 1 1 1 100

        for (int x = 0; x < 1; x++) {
            float[][][] arrayThree = new float[1][][];
            for (int y = 0; y < 1; y++) {
                float[][] arrayTwo = new float[1][];
                for (int z = 0; z < 1; z++) {
                    float[] arrayOne = new float[BOTTLENECK_SIZE];
                    arrayTwo[z] = arrayOne;
                }
                arrayThree[y] = arrayTwo;
            }
            styleBottleneck[x] = arrayThree;
        }
        outputsForPredict.put(0, styleBottleneck);

        preProcessTime = SystemClock.uptimeMillis() - preProcessTime;
        stylePredictTime = SystemClock.uptimeMillis();

        // The results of this inference could be reused given the style does not change
        // That would be a good practice in case this was applied to a video stream.
        interpreterPredict.runForMultipleInputsOutputs(inputsForPredict, outputsForPredict);
        stylePredictTime = SystemClock.uptimeMillis() - stylePredictTime;
        Log.d(TAG, "Style Predict Time to run: " + stylePredictTime);

        Object[] inputsForStyleTransfer = new Object[]{contentArray, styleBottleneck};
        HashMap outputsForStyleTransfer = new HashMap();
        float[][][][] outputImage = new float[1][][][];  // 1 384 384 3
        for (int x = 0; x < 1; x++) {
            float[][][] arrayThree = new float[CONTENT_IMAGE_SIZE][][];
            for (int y = 0; y < CONTENT_IMAGE_SIZE; y++) {
                float[][] arrayTwo = new float[CONTENT_IMAGE_SIZE][];
                for (int z = 0; z < CONTENT_IMAGE_SIZE; z++) {
                    float[] arrayOne = new float[3];
                    arrayTwo[z] = arrayOne;
                }
                arrayThree[y] = arrayTwo;
            }
            outputImage[x] = arrayThree;
        }
        outputsForStyleTransfer.put(0, outputImage);

        styleTransferTime = SystemClock.uptimeMillis();
        interpreterTransform.runForMultipleInputsOutputs(inputsForStyleTransfer, outputsForStyleTransfer);
        styleTransferTime = SystemClock.uptimeMillis() - styleTransferTime;
        Log.d(TAG, "Style apply Time to run: " + styleTransferTime);

        postProcessTime = SystemClock.uptimeMillis();
        Bitmap styledImage =
                ImageUtils.convertArrayToBitmap(outputImage, CONTENT_IMAGE_SIZE, CONTENT_IMAGE_SIZE);
        postProcessTime = SystemClock.uptimeMillis() - postProcessTime;

        fullExecutionTime = SystemClock.uptimeMillis() - fullExecutionTime;
        Log.d(TAG, "Time to run everything: $" + fullExecutionTime);

        return new ModelExecutionResult(styledImage,
                preProcessTime,
                stylePredictTime,
                styleTransferTime,
                postProcessTime,
                fullExecutionTime,
                formatExecutionLog());
    }


    private String formatExecutionLog() {
        StringBuilder sb = new StringBuilder();
        sb.append("Input Image Size:" + CONTENT_IMAGE_SIZE * CONTENT_IMAGE_SIZE)
                .append("\nGPU enabled:" + useGPU)
                .append("\nNumber of threads: " + numberThreads)
                .append("\nPre-process execution time: " + preProcessTime + " ms")
                .append("\nPredicting style execution time: " + stylePredictTime + " ms")
                .append("\nTransferring style execution time: " + styleTransferTime + " ms")
                .append("\nPost-process execution time: " + postProcessTime + " ms")
                .append("\nFull execution time: " + fullExecutionTime + " ms");
        return sb.toString();
    }

}
