{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model loaded successfully!\n",
      "Model: \"sequential_14\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
      " layer1 (Dense)              (None, 20)                120       \n",
      "                                                                 \n",
      " layer3 (Dense)              (None, 7)                 140       \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 260\n",
      "Trainable params: 260\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "try:\n",
    "    # Load full model\n",
    "    h5file = 'savemodel/lhs_3_size50000_noembed_full.h5'\n",
    "    model = tf.keras.models.load_model(\n",
    "        h5file,\n",
    "        compile=False\n",
    "    )\n",
    "    \n",
    "    # Recompile model\n",
    "    model.compile(\n",
    "        optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n",
    "        loss='mse',\n",
    "        metrics=['mae']\n",
    "    )\n",
    "    \n",
    "    print(\"Model loaded successfully!\")\n",
    "    model.summary()\n",
    "\n",
    "except FileNotFoundError:\n",
    "    print(f\"Error: File {h5file} not found\")\n",
    "except Exception as e:\n",
    "    print(f\"Error loading model: {str(e)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:absl:Found untraced functions such as _update_step_xla while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: C:\\Users\\forla\\AppData\\Local\\Temp\\tmp0qgk2ybx\\assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: C:\\Users\\forla\\AppData\\Local\\Temp\\tmp0qgk2ybx\\assets\n",
      "C:\\Users\\forla\\AppData\\Roaming\\Python\\Python39\\site-packages\\tensorflow\\lite\\python\\convert.py:789: UserWarning: Statistics for quantized inputs were expected, but not specified; continuing anyway.\n",
      "  warnings.warn(\"Statistics for quantized inputs were expected, but not \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model quantized and saved successfully!\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "# Define representative dataset generator\n",
    "def representative_dataset():\n",
    "    import pandas as pd\n",
    "    dataset = pd.read_csv('data/lhs_3_size50000.csv').values.astype('float32')\n",
    "    for i in range(100):\n",
    "        yield [dataset[i:i+1, :6]]\n",
    "\n",
    "# Convert to TFLite with int8 quantization\n",
    "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
    "converter.optimizations = [tf.lite.Optimize.DEFAULT]\n",
    "converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n",
    "converter.inference_input_type = tf.int8\n",
    "converter.inference_output_type = tf.int8\n",
    "converter.representative_dataset = representative_dataset\n",
    "\n",
    "# Convert and save quantized model\n",
    "quantized_tflite_model = converter.convert()\n",
    "with open('savemodel/quantized_model_int8.tflite', 'wb') as f:\n",
    "    f.write(quantized_tflite_model)\n",
    "\n",
    "print(\"Model quantized and saved successfully!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Performance Comparison:\n",
      "Original Model MAE: 3.368353\n",
      "Quantized Model MAE: 3.368718\n",
      "Original Model Inference Time: 0.73s\n",
      "Quantized Model Inference Time: 0.80s\n",
      "Original Model Size: 26.06KB\n",
      "Quantized Model Size: 1.90KB\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import time\n",
    "import os\n",
    "\n",
    "# Load data\n",
    "data = pd.read_csv('data/lhs_3_size50000.csv')\n",
    "X = data.iloc[:, :6].values.astype('float32')\n",
    "y = data.iloc[:, 6:].values.astype('float32')\n",
    "\n",
    "# Load original model\n",
    "original_model = tf.keras.models.load_model('savemodel/lhs_3_size50000_noembed_full.h5')\n",
    "\n",
    "# Load quantized model\n",
    "interpreter = tf.lite.Interpreter(model_path='savemodel/quantized_model_int8.tflite')\n",
    "interpreter.allocate_tensors()\n",
    "\n",
    "# Get input and output details\n",
    "input_details = interpreter.get_input_details()\n",
    "output_details = interpreter.get_output_details()\n",
    "\n",
    "# Performance testing functions\n",
    "def test_original_model(X):\n",
    "    start_time = time.time()\n",
    "    predictions = original_model.predict(X, verbose=0)\n",
    "    end_time = time.time()\n",
    "    return predictions, end_time - start_time\n",
    "\n",
    "def test_quantized_model(X):\n",
    "    start_time = time.time()\n",
    "    predictions = []\n",
    "    \n",
    "    # Get quantization parameters\n",
    "    input_scale = input_details[0]['quantization_parameters']['scales'][0]\n",
    "    input_zero_point = input_details[0]['quantization_parameters']['zero_points'][0]\n",
    "    output_scale = output_details[0]['quantization_parameters']['scales'][0]\n",
    "    output_zero_point = output_details[0]['quantization_parameters']['zero_points'][0]\n",
    "    \n",
    "    for i in range(len(X)):\n",
    "        # Quantize the input from float32 to int8\n",
    "        input_data = X[i:i+1]\n",
    "        input_data_quantized = np.round(input_data / input_scale + input_zero_point)\n",
    "        input_data_quantized = np.clip(input_data_quantized, -128, 127).astype(np.int8)\n",
    "        \n",
    "        # Set input tensor\n",
    "        interpreter.set_tensor(input_details[0]['index'], input_data_quantized)\n",
    "        interpreter.invoke()\n",
    "        \n",
    "        # Get output tensor and dequantize from int8 to float32\n",
    "        output_data = interpreter.get_tensor(output_details[0]['index'])\n",
    "        output_data_float = (output_data.astype(np.float32) - output_zero_point) * output_scale\n",
    "        predictions.append(output_data_float)\n",
    "    \n",
    "    predictions = np.vstack(predictions)\n",
    "    end_time = time.time()\n",
    "    return predictions, end_time - start_time\n",
    "\n",
    "# Run tests\n",
    "orig_pred, orig_time = test_original_model(X)\n",
    "quant_pred, quant_time = test_quantized_model(X)\n",
    "\n",
    "# Calculate metrics\n",
    "orig_mae = np.mean(np.abs(y - orig_pred))\n",
    "quant_mae = np.mean(np.abs(y - quant_pred))\n",
    "\n",
    "# Print results\n",
    "print(\"\\nPerformance Comparison:\")\n",
    "print(f\"Original Model MAE: {orig_mae:.6f}\")\n",
    "print(f\"Quantized Model MAE: {quant_mae:.6f}\")\n",
    "print(f\"Original Model Inference Time: {orig_time:.2f}s\")\n",
    "print(f\"Quantized Model Inference Time: {quant_time:.2f}s\")\n",
    "print(f\"Original Model Size: {os.path.getsize('savemodel/lhs_2_size50000_noembed_full.h5')/1024:.2f}KB\")\n",
    "print(f\"Quantized Model Size: {os.path.getsize('savemodel/quantized_model_int8.tflite')/1024:.2f}KB\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Quantization Parameters:\n",
      "Input Scale: [0.01167953]\n",
      "Input Zero Point: [0]\n",
      "\n",
      "Output Scale: [0.00390625]\n",
      "Output Zero Point: [-128]\n"
     ]
    }
   ],
   "source": [
    "# Get input and output details\n",
    "interpreter = tf.lite.Interpreter(model_path='savemodel/quantized_model_int8.tflite')\n",
    "interpreter.allocate_tensors()\n",
    "\n",
    "input_details = interpreter.get_input_details()\n",
    "output_details = interpreter.get_output_details()\n",
    "\n",
    "# Print quantization parameters\n",
    "print(\"\\nQuantization Parameters:\")\n",
    "print(\"Input Scale:\", input_details[0]['quantization_parameters']['scales'])\n",
    "print(\"Input Zero Point:\", input_details[0]['quantization_parameters']['zero_points'])\n",
    "print(\"\\nOutput Scale:\", output_details[0]['quantization_parameters']['scales'])\n",
    "print(\"Output Zero Point:\", output_details[0]['quantization_parameters']['zero_points'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'name': 'serving_default_layer1_input:0',\n",
       "  'index': 0,\n",
       "  'shape': array([1, 6]),\n",
       "  'shape_signature': array([-1,  6]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.011679529212415218, 0),\n",
       "  'quantization_parameters': {'scales': array([0.01167953], dtype=float32),\n",
       "   'zero_points': array([0]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}},\n",
       " {'name': 'sequential_14/layer3/MatMul',\n",
       "  'index': 1,\n",
       "  'shape': array([ 7, 20]),\n",
       "  'shape_signature': array([ 7, 20]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.02093064971268177, 0),\n",
       "  'quantization_parameters': {'scales': array([0.02093065], dtype=float32),\n",
       "   'zero_points': array([0]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}},\n",
       " {'name': 'sequential_14/layer1/MatMul',\n",
       "  'index': 2,\n",
       "  'shape': array([20,  6]),\n",
       "  'shape_signature': array([20,  6]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.01701778545975685, 0),\n",
       "  'quantization_parameters': {'scales': array([0.01701779], dtype=float32),\n",
       "   'zero_points': array([0]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}},\n",
       " {'name': 'sequential_14/layer1/MatMul;sequential_14/layer1/Relu',\n",
       "  'index': 3,\n",
       "  'shape': array([ 1, 20]),\n",
       "  'shape_signature': array([-1, 20]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.027647482231259346, -128),\n",
       "  'quantization_parameters': {'scales': array([0.02764748], dtype=float32),\n",
       "   'zero_points': array([-128]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}},\n",
       " {'name': 'sequential_14/layer3/MatMul1',\n",
       "  'index': 4,\n",
       "  'shape': array([1, 7]),\n",
       "  'shape_signature': array([-1,  7]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.3314700424671173, 2),\n",
       "  'quantization_parameters': {'scales': array([0.33147004], dtype=float32),\n",
       "   'zero_points': array([2]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}},\n",
       " {'name': 'StatefulPartitionedCall:0',\n",
       "  'index': 5,\n",
       "  'shape': array([1, 7]),\n",
       "  'shape_signature': array([-1,  7]),\n",
       "  'dtype': numpy.int8,\n",
       "  'quantization': (0.00390625, -128),\n",
       "  'quantization_parameters': {'scales': array([0.00390625], dtype=float32),\n",
       "   'zero_points': array([-128]),\n",
       "   'quantized_dimension': 0},\n",
       "  'sparsity_parameters': {}}]"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tensor_details = interpreter.get_tensor_details()\n",
    "tensor_details"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tf2",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
