import os 
import re 
import json 
import numpy as np 

from collections import OrderedDict

# the flexbuffer's function
def _indirect(buf, offset, parentWidth = 1):
    # switch (size) {
    #     case 1: return this._view.getUint8(offset);
    #     case 2: return this._view.getUint16(offset, true);
    #     case 4: return this._view.getUint32(offset, true);
    #     case 8: return this._view.getUint64(offset, true);
    # }
    return (offset - uint(buf, offset, parentWidth))

def uint(buf, offset, size=1):
    value_buf = buf[offset:offset+size]
    if(size == 1):
        return np.frombuffer(value_buf, dtype='uint8')[0]
    elif(size == 2):
        return np.frombuffer(value_buf, dtype='uint16')[0]
    elif(size == 4):
        return np.frombuffer(value_buf, dtype='uint32')[0]
    elif(size == 8):
        return np.frombuffer(value_buf, dtype='uint64')[0]
    else:
        raise Exception("Illegal byteWidth!")

def _int(buf, offset, size=1):
    value_buf = buf[offset:offset+size]
    if(size == 1):
        return np.frombuffer(value_buf, dtype='int8')[0]
    elif(size == 2):
        return np.frombuffer(value_buf, dtype='int16')[0]
    elif(size == 4):
        return np.frombuffer(value_buf, dtype='int32')[0]
    elif(size == 8):
        return np.frombuffer(value_buf, dtype='int64')[0]
    else:
        raise Exception("Illegal byteWidth!")

def _float(buf, offset, size):
    value_buf = buf[offset:offset+size]
    if(size == 4):
        return np.frombuffer(value_buf, dtype=np.float32)[0]
    if(size == 8):
        return np.frombuffer(value_buf, dtype=np.float64)[0]
    else:
        raise Exception("Illegal byteWidth!")   

def string(buf, offset):
    def strlen(buf):
        str_len = 0
        while(buf[str_len] != 0):
            str_len += 1 
        return str_len
    string = buf[offset:offset+strlen(buf[offset:])].decode()
    return string

def set_uint(buf, offset, size=1, value=None):
    buf = list(buf)
    if(size == 1):
        value_bytes = np.asarray(value, dtype=np.uint8).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 2):
        value_bytes = np.asarray(value, dtype=np.uint16).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 4):
        value_bytes = np.asarray(value, dtype=np.uint32).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 8):
        value_bytes = np.asarray(value, dtype=np.uint64).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    else:
        raise Exception("Illegal byteWidth!")
    return bytes(buf)

def set_int(buf, offset, size=1, value=None):
    buf = list(buf)
    if(size == 1):
        value_bytes = np.asarray(value, dtype=np.int8).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 2):
        value_bytes = np.asarray(value, dtype=np.int16).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 4):
        value_bytes = np.asarray(value, dtype=np.int32).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 8):
        value_bytes = np.asarray(value, dtype=np.int64).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    else:
        raise Exception("Illegal byteWidth!")
    return bytes(buf)

def set_float(buf, offset, size, value=None):
    buf = list(buf)
    if(size == 4):
        value_bytes = np.asarray(value, dtype=np.float32).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    elif(size == 8):
        value_bytes = np.asarray(value, dtype=np.float64).tobytes()
        buf[offset:offset+size] = list(value_bytes)
    else:
        raise Exception("Illegal byteWidth!")   
    return bytes(buf)

def reference_read(buf, offset, parentWidth, width, type):
    if(type == 0x01):
        return _int(buf, offset, parentWidth)
    if(type == 0x03):
        return _float(buf, offset, parentWidth)
    if(type == 0x04):
        offset = _indirect(buf, offset, parentWidth)
        return string(buf, offset)
    if(type == 0x1a):
        return uint(buf, offset, parentWidth)

def reference_write(buf, offset, parentWidth, width, type, value):
    if(type == 0x01):
        return set_int(buf, offset, parentWidth, value)
    if(type == 0x03):
        return set_float(buf, offset, parentWidth, value)
    if(type == 0x04):
        raise Exception("Do not support string now!")
    if(type == 0x1a):
        return set_uint(buf, offset, parentWidth, value)

def _typedVector(buf, offset, byteWidth, type, size=None):
    size = uint(buf, offset - byteWidth, byteWidth) if size is None else size 
    vector = []
    for i in range(size):
        vector.append(reference_read(buf, offset + (i*byteWidth), byteWidth, 1, type))
    return vector

def _vector(buf, offset, byteWidth):
    size = uint(buf, offset - byteWidth, byteWidth)
    packedTypeOffset = int(offset + (size * byteWidth))
    vector = []
    for i in range(size):
        packedType = uint(buf, packedTypeOffset + i, 1)
        vector.append(reference_read(buf, offset + i * byteWidth, byteWidth, 1 << (packedType & 3), packedType >> 2))
    return vector

def set_vector(buf, offset, byteWidth, vector_keys, custom_options):
    size = uint(buf, offset - byteWidth, byteWidth)
    packedTypeOffset = offset + (size * byteWidth)
    for i in range(size):
        packedType = uint(buf, packedTypeOffset + i, 1)
        key = vector_keys[i]
        value = custom_options[key]
        buf = reference_write(buf, offset + i * byteWidth, byteWidth, 1 << (packedType & 3), packedType >> 2, value)
    return buf    

# the custom options uses the flexbuffer to store, need to decode
def parse_custom_options(flex_buffer):
    end = len(flex_buffer);

    parentWidth = flex_buffer[end - 1]
    packedType = flex_buffer[end - 2]

    _offset = end - 2 - parentWidth
    _byteWidth = 1 << (packedType & 3)
    # Type = packedType >> 2, is MAP, omit

    offset = _indirect(flex_buffer, _offset)
    keysOffset = offset - _byteWidth * 3
    keysVectorOffset = int(keysOffset - uint(flex_buffer, keysOffset, _byteWidth)) # in case int32 - uint64 to float
    keysByteWidth = uint(flex_buffer, keysOffset + _byteWidth, _byteWidth)

    keys = _typedVector(flex_buffer, keysVectorOffset, int(keysByteWidth), 0x04)
    value = _vector(flex_buffer, int(offset), int(_byteWidth))

    map_dict = {}
    for i in range(len(keys)):
        map_dict[keys[i]] = value[i]
    
    return map_dict

def set_custiom_options(flex_buffer, custom_options):
    end = len(flex_buffer);

    parentWidth = flex_buffer[end - 1]
    packedType = flex_buffer[end - 2]

    _offset = end - 2 - parentWidth
    _byteWidth = 1 << (packedType & 3)
    # Type = packedType >> 2, is MAP, omit

    offset = _indirect(flex_buffer, _offset)
    keysOffset = offset - _byteWidth * 3
    keysVectorOffset = int(keysOffset - uint(flex_buffer, keysOffset, _byteWidth))
    keysByteWidth = uint(flex_buffer, keysOffset + _byteWidth, _byteWidth)

    keys = _typedVector(flex_buffer, keysVectorOffset, keysByteWidth, 0x04)
    new_flex_buffer = set_vector(flex_buffer, offset, _byteWidth, keys, custom_options)
    
    return new_flex_buffer    

def add_post_processing_nodes(json_ctx, tpl_file, custom_options, op_inputs):
    tflite_operator_codes = json_ctx['operator_codes']
    tflite_operator_codes_len = len(tflite_operator_codes)

    tflite_tensors = json_ctx['subgraphs'][0]["tensors"]
    tflite_tensors_len = len(tflite_tensors)

    tflite_operators = json_ctx['subgraphs'][0]["operators"]

    outputs = json_ctx['subgraphs'][0]["outputs"]

    buffers = json_ctx['buffers']
    buffers_len = len(buffers)
    # tensors: output(0-3), anchor_input
    with open(tpl_file, "r") as f:
        blank_nodes = eval(f.read())[0]
        operator_codes = blank_nodes['operator_codes']
        tensors = blank_nodes['tensors']
        operators = blank_nodes['operators']
    
    # update the operator_codes
    tflite_operator_codes += [operator_codes]

    # allocate the buffer for the tensors
    tensor_shape = [[1, custom_options['max_detections'] * custom_options['max_classes_per_detection'], 4], 
                    [1, custom_options['max_detections'] * custom_options['max_classes_per_detection']], 
                    [1, custom_options['max_detections'] * custom_options['max_classes_per_detection']], 
                    [1]]
    tensors_idx = []
    [output1, output2, output3, output4, anchor] = tensors
    for tensor, shape in zip(tensors[:4], tensor_shape):
        tensor['buffer'] = buffers_len 
        tensor['shape'] += shape
        buffers += [{}]
        buffers_len += 1

        tflite_tensors += [tensor]
        tensors_idx += [tflite_tensors_len]
        tflite_tensors_len += 1

    # calcute and allocate the anchor
    anchor['buffer'] = buffers_len
    anchor_buffers = np.load('../prior_boxes.npy').astype("float32")
    anchor['shape'] = list(anchor_buffers.shape)
    buffers += [{"data":list(anchor_buffers.tobytes())}]

    tflite_tensors += [anchor]

    # update the new outputs to the tflite post processing
    outputs = tensors_idx

    # custom_operations inside the operators
    custom_options_raw = bytes(operators['custom_options'])
    ubyte_custom_options = set_custiom_options(custom_options_raw, custom_options)
    operators['custom_options'] = list(ubyte_custom_options)

    # add the inputs/outptus for the operators
    operators["inputs"] = op_inputs + [tflite_tensors_len]
    operators["outputs"] = tensors_idx

    # the opcode_index
    operators['opcode_index'] = tflite_operator_codes_len
    tflite_operators += [operators]


    # update json with these new elements
    json_ctx['operator_codes'] = tflite_operator_codes
    json_ctx['subgraphs'][0]["tensors"] = tflite_tensors
    json_ctx['subgraphs'][0]["operators"] = tflite_operators
    json_ctx['subgraphs'][0]["outputs"] = outputs
    json_ctx['buffers'] = buffers

    return json_ctx

def del_concat_node_at_last(json_ctx):
    # concat operators will always at the last
    # del the concat op first
    operators = json_ctx['subgraphs'][0]['operators']
    operators_len = len(operators)

    concat_pos = -1
    for op in operators[::-1]:
        if 'builtin_options_type' in op.keys() and op['builtin_options_type'] == "ConcatenationOptions" and  \
           len(op['inputs']) == 2:
           break
        concat_pos -= 1
    concat_op = operators[concat_pos]
    concat_input1_idx, concat_input2_idx = concat_op['inputs']
    operators = operators[:operators_len + concat_pos]

    json_ctx['subgraphs'][0]['operators'] = operators

    return json_ctx, concat_input1_idx, concat_input2_idx

def insert_dequantize_node(json_ctx, concat_output):
    def add_dequantize(json_ctx, input_tensor_idx):

        tensors = json_ctx['subgraphs'][0]['tensors']
        operators = json_ctx['subgraphs'][0]['operators']
        operator_codes = json_ctx['operator_codes']
        datas = json_ctx['buffers']

        input_tensor = tensors[input_tensor_idx]

        if ('quantization' in input_tensor.keys()) and ('type' in input_tensor.keys()):
            # allocate the opcode index
            builtin_code_idx = -1
            for idx, code in enumerate(operator_codes):
                # the dequantize's code is 6
                if code['deprecated_builtin_code'] == 6:
                    builtin_code_idx = idx
                    break
            if(builtin_code_idx == -1):
                builtin_code_idx = len(operator_codes)
                # push a operator_code:
                dequantize_operator_code_dict = {
                    "deprecated_builtin_code": 6,
                    "version": 2
                }
                operator_codes.append(dequantize_operator_code_dict)
    
            # allocate the tensor
            shape = input_tensor['shape']
            name = "Dequantize_%s"%input_tensor['name']
            tensor_dict = {
                "shape": shape,
                "name": name,
                "buffer": len(datas)
            }
            # need to allocate a buffer at tail
            buffer_dict = {}
            datas.append(buffer_dict)
            tensors.append(tensor_dict)

            # allocate the operator
            operator_dict = {
                "opcode_index": builtin_code_idx,
                "inputs": [input_tensor_idx],
                "outputs": [len(tensors) - 1]
            }
            operators.append(operator_dict)

            json_ctx['operator_codes'] = operator_codes
            json_ctx['subgraphs'][0]['tensors'] = tensors
            json_ctx['subgraphs'][0]['operators'] = operators
            json_ctx['buffers'] = datas

            input_tensor_idx = len(tensors) - 1

        return json_ctx, input_tensor_idx
        

    new_json_ctx = json_ctx
    tensors = json_ctx["subgraphs"][0]["tensors"]

    o1, o2 = concat_output
    tensor_o1 = tensors[o1]
    tensor_o2 = tensors[o2]

    new_json_ctx, o1 = add_dequantize(new_json_ctx, o1)
    new_json_ctx, o2 = add_dequantize(new_json_ctx, o2)
    
    return new_json_ctx, o1, o2

def OrderJson(json_ctx):
    # i think we only need to order the operators in the subgraphs
    operators = json_ctx['subgraphs'][0]['operators']
    ordered_operators = []
    for operator in operators:
        ordered_operators += [OrderedDict(operator)]
    json_ctx['subgraphs'][0]['operators'] = ordered_operators
    return json_ctx

def pad_json_datas(json_ctx):
    tensors = json_ctx['subgraphs'][0]['tensors']
    datas = json_ctx['buffers']

    tensor_without_data_idxs = [0]
    for idx, tensor in enumerate(tensors):
        if not('buffer' in tensor.keys()):
            tensor_without_data_idxs += [idx]
    max_idx_placehold = max(tensor_without_data_idxs)
    data_len = len(datas)

    the_padding_value = np.maximum(max_idx_placehold-data_len, 0)
    padding_data = [{}] * the_padding_value

    datas += padding_data
    json_ctx['buffers'] = datas

    return json_ctx

def make_sure_float(json_ctx, tensor_idx):
    tensors = json_ctx['subgraphs'][0]['tensors']

    for idx in tensor_idx:
        quantization = tensors[idx]['quantization']
        scale = [str(1.0 / 256) for i in quantization['scale']]
        zero = [str(i) for i in quantization['zero_point']]
        tensors[idx]['quantization']['scale'] = scale 
        tensors[idx]['quantization']['zero_point'] = zero

    return json_ctx

if __name__ == "__main__":

    custom_options = {
        "x_scale":10.0,
        "y_scale":10.0,
        "w_scale":5.0,
        "h_scale":5.0,
        "detections_per_class":100,
        "max_classes_per_detection":1,
        "max_detections":5,
        "nms_iou_threshold":0.45,
        "nms_score_threshold":0.3,
        "num_classes":1,
        "use_regular_nms":0,
        "_output_quantized":1,
        "_support_output_type_float_in_quantized_op":1
    }

    # which tensor's quantization you want to format
    format_string_tensor = [139]
    model_name = "weights.46-2.68_quant"
    os.system("flatc.exe -t ./schema.fbs -- %s.tflite"%model_name)
    json_file = "%s.json"%(model_name)
    json_pad_file = "%s_pad.json"%(model_name)
    json_saved_file = "%s_resaved.json"%(model_name)
    if not os.path.exists(json_pad_file):
        f = open(json_file, 'r+')
        with open(json_pad_file, "w") as j:
            for idx, i in enumerate(f.readlines()):
                _word = re.search(".*?:", i)
                if(_word):
                    s = _word.start() 
                    e = _word.end()
                    word = i[s:e-1].strip()
                    i = i.replace(word, '"' + word + '"' )
                j.write(i)
            j.close()
        f.close()

    json_ctx = json.load(open(json_pad_file), object_pairs_hook=OrderedDict)
    # new_json_ctx = OrderJson(json_ctx)
    # extent the data, some tensor has no buffer attr, will occupy the idx of its pos in the tensors
    # and will always bigger than the len(buffer), we pad it first
    new_json_ctx = pad_json_datas(json_ctx)
    # make sure quantization format to string
    new_json_ctx = make_sure_float(new_json_ctx, format_string_tensor)
    new_json_ctx, concat_input1_idx, concat_input2_idx = del_concat_node_at_last(json_ctx)
    # the input of the post quantization is float, need to insert a dequantize node
    new_json_ctx, concat_input1_idx, concat_input2_idx = insert_dequantize_node(new_json_ctx, [concat_input1_idx, concat_input2_idx])
    new_json_ctx = add_post_processing_nodes(new_json_ctx, "./post_processing_tensor.txt", custom_options, [concat_input1_idx, concat_input2_idx])

    json_saved_file = model_name + "_with_post_processing.json"

    datas = json.dumps(new_json_ctx, ensure_ascii=False,indent=4)
    with open(json_saved_file,'w+') as f: 
        f.write(datas)
        f.close()

    if(0 == os.system("flatc.exe -b ./schema.fbs %s"%json_saved_file)):
        os.remove(model_name + ".json")
        os.remove(model_name + "_pad.json")
        os.remove(model_name + "_with_post_processing.json")