#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow as tf
import os
import numpy as np
from lanenet_model.lanenet import LaneNet
from local_utils.config_utils import parse_config_utils

def convert_to_tflite():
    """
    将LaneNet模型转换为TFLite格式，并进行优化和量化
    """
    # 1. 加载配置
    CFG = parse_config_utils.lanenet_cfg
    
    # 2. 创建输入占位符
    input_tensor = tf.placeholder(
        dtype=tf.float32,
        shape=[1, 256, 512, 3],
        name='input_tensor'
    )
    
    # 3. 创建LaneNet模型实例
    lanenet = LaneNet(phase='test', cfg=CFG)
    
    # 4. 构建模型推理图
    binary_seg_pred, instance_seg_pred = lanenet.inference(
        input_tensor=input_tensor,
        name='LaneNet'
    )
    
    # 5. 创建会话并加载权重
    sess = tf.Session()
    
    # 初始化变量
    sess.run(tf.global_variables_initializer())
    
    # 创建saver并加载权重
    saver = tf.train.Saver()
    weights_path = '../lanenet_ts/tusimple_lanenet.ckpt'
    
    try:
        saver.restore(sess=sess, save_path=weights_path)
        print('成功加载预训练权重')
    except:
        raise Exception('加载预训练权重失败')
    
    # 6. 保存为SavedModel格式
    export_dir = './saved_model'
    if os.path.exists(export_dir):
        os.system(f'rm -rf {export_dir}')
    os.makedirs(export_dir)
    
    # 创建SavedModel Builder
    builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
    
    # 定义输入输出张量信息
    tensor_info_input = tf.saved_model.utils.build_tensor_info(input_tensor)
    tensor_info_binary_seg = tf.saved_model.utils.build_tensor_info(binary_seg_pred)
    tensor_info_instance_seg = tf.saved_model.utils.build_tensor_info(instance_seg_pred)
    
    # 创建签名定义
    prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(
        inputs={'input_tensor': tensor_info_input},
        outputs={
            'binary_seg_pred': tensor_info_binary_seg,
            'instance_seg_pred': tensor_info_instance_seg
        },
        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
    )
    
    # 保存模型
    builder.add_meta_graph_and_variables(
        sess,
        [tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            'serving_default': prediction_signature
        }
    )
    
    builder.save()
    print(f"SavedModel已保存到: {export_dir}")
    
    try:
        # 7. 转换为TFLite格式，并进行float32优化和量化
        converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
        
        # 配置转换器选项
        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
        converter.allow_custom_ops = True
        
        # 使用优化策略来减少模型大小
        converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]  # 默认优化选项
        
        # 设置推理类型为float32
        converter.inference_type = tf.float32
        
        # 执行转换
        tflite_model = converter.convert()
        
        # 8. 保存TFLite模型
        tflite_path = './lanenet_float32_optimized.tflite'
        os.makedirs(os.path.dirname(tflite_path), exist_ok=True)
        with open(tflite_path, 'wb') as f:
            f.write(tflite_model)
        print(f"TFLite模型已成功保存到: {tflite_path}")
        
        # 9. 验证转换后的模型（可选）
        interpreter = tf.lite.Interpreter(model_path=tflite_path)
        interpreter.allocate_tensors()
        
        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()
        
        print("\nTFLite模型信息:")
        print("输入详情:", input_details)
        print("输出详情:", output_details)
        
    except Exception as e:
        print(f"转换过程中出现错误: {str(e)}")
    finally:
        # 清理会话
        sess.close()

if __name__ == '__main__':
    # 禁用GPU以避免CUDA错误（可选）
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    convert_to_tflite()
