import math
import numpy as np
import re
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.quantization import fake_quant_with_min_max_vars

attr_str = "inputNHWC_1x224x224x3_outputNHWC_1x112x112x32_kernelHW_3x3_strideHW_2x2_padTopBottomLeftRight_0x1x0x1_dilationHW_1x1"
attr_str = "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1"
nums = re.findall(r"\d+\.?\d*", attr_str)
nums = list(map(int, nums))
N, IH, IW, CI, _, OH, OW, CO, KH, KW, strideH, strideW, padTop, padBottom, padLeft, padRight, dilationH, dilationW = nums
input = tf.placeholder(name="input", dtype=tf.float32, shape=(N, IH, IW, CI))
output = tf.nn.conv2d(input,
                      tf.get_variable("w0", dtype=tf.float32, shape=(KH, KW, CI, CO)),
                      strides=[1, strideH, strideW, 1],
                      padding='SAME',
                      # padding=[[0, 0], [0, 0], [0, 0], [0, 0]],
                      use_cudnn_on_gpu=False,
                      data_format="NHWC",
                      dilations=[1, dilationH, dilationW, 1])
output = tf.identity(output, name="output")

# input = tf.placeholder(name="input", dtype=tf.float32, shape=(1, 8, 128, 250))
# output = tf.identity(input)
# output = tf.pad(output, paddings=((0, 0), (1, 1), (1, 1), (0, 0)))
# output = tf.squeeze(output, 0)
# output = tf.reshape(output, [1] + output.shape.as_list())
# output = tf.nn.depthwise_conv2d_native(output,
#                                        tf.get_variable("w1", dtype=tf.float32, shape=(1, 1, 16, 1)),
#                                        strides=[1, 1, 1, 1],
#                                        padding='SAME',
#                                        data_format="NHWC",
#                                        dilations=[1, 1, 1, 1])
# output = tf.nn.max_pool2d(output,
#                           ksize=[1, 2, 2, 1],
#                           strides=[1, 1, 1, 1],
#                           padding='SAME',
#                           data_format="NHWC")
# output = fake_quant_with_min_max_vars(output, tf.constant(-1, tf.float32), tf.constant(1, tf.float32))
# output = tf.identity(output, name="output")

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # export pb: TensorFlow保存模型为PB文件 https://zhuanlan.zhihu.com/p/32887066
    constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['output'])
    with tf.gfile.FastGFile('../models/pb/fp32.pb', mode='wb') as f:
        f.write(constant_graph.SerializeToString())

    # export float32 tflite
    converter = tf.lite.TFLiteConverter.from_session(sess, [input], [output])
    open("../models/tflite/fp32.tflite", "wb").write(converter.convert())

    # export quantized tflite
    converter = tf.lite.TFLiteConverter.from_session(sess, [input], [output])
    converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
    converter.quantized_input_stats = {"input": (128, 128)}
    converter.default_ranges_stats = (0, 1)
    open("../models/tflite/uint8.tflite", "wb").write(converter.convert())
