# uamcf/adapters/tflite.py
import os
import time
import numpy as np
import tracemalloc
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import ModelAdapter
from ..utils.logger import get_logger


class TFLiteAdapter(ModelAdapter):
    """
    TensorFlow Lite模型适配器，实现TFLite特定的接口
    """

    def __init__(self):
        """初始化TFLite适配器"""
        super().__init__()
        try:
            import tensorflow as tf
            self.tf = tf
            self.logger.info("TFLite adapter initialized")
        except ImportError:
            self.logger.error("TensorFlow is not installed. Please install with 'pip install tensorflow'")

    def load_model(self, model_path: str) -> Any:
        """
        加载TFLite模型

        Args:
            model_path: 模型文件路径

        Returns:
            加载的TFLite模型
        """
        try:
            # 加载TFLite模型
            interpreter = self.tf.lite.Interpreter(model_path=model_path)
            interpreter.allocate_tensors()

            # 获取输入输出细节
            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()

            # 存储模型信息
            model_info = {
                "interpreter": interpreter,
                "input_details": input_details,
                "output_details": output_details,
                "path": model_path
            }

            self.logger.info(f"Model loaded from {model_path}")
            return model_info

        except Exception as e:
            self.logger.error(f"Failed to load model: {str(e)}")
            return None

    def save_model(self, model: Any, save_path: str) -> str:
        """
        保存TFLite模型

        Args:
            model: 要保存的模型
            save_path: 保存路径

        Returns:
            实际保存的路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)

            # 从模型字典中提取原始路径
            if isinstance(model, dict) and "path" in model:
                original_path = model["path"]

                # 复制文件
                with open(original_path, 'rb') as src_file:
                    model_content = src_file.read()
                    with open(save_path, 'wb') as dst_file:
                        dst_file.write(model_content)

                self.logger.info(f"Model saved to {save_path}")
                return save_path
            else:
                self.logger.error("Model does not contain path information")
                return ""

        except Exception as e:
            self.logger.error(f"Failed to save model: {str(e)}")
            return ""

    def get_model_info(self, model: Any) -> Dict:
        """
        获取TFLite模型基本信息

        Args:
            model: TFLite模型

        Returns:
            模型信息字典
        """
        info = {}

        try:
            # 从模型字典中提取解释器和详细信息
            if isinstance(model, dict):
                interpreter = model.get("interpreter")
                input_details = model.get("input_details", [])
                output_details = model.get("output_details", [])
            else:
                self.logger.error("Model is not in the expected format")
                return info

            # 基本信息
            info["type"] = "TFLite"

            # 输入信息
            inputs = []
            for detail in input_details:
                input_info = {
                    "name": detail.get("name", ""),
                    "shape": detail.get("shape", []).tolist()
                }
                inputs.append(input_info)
            info["inputs"] = inputs

            # 输出信息
            outputs = []
            for detail in output_details:
                output_info = {
                    "name": detail.get("name", ""),
                    "shape": detail.get("shape", []).tolist()
                }
                outputs.append(output_info)
            info["outputs"] = outputs

            # 获取张量数量
            if interpreter:
                tensor_details = interpreter.get_tensor_details()
                info["tensor_count"] = len(tensor_details)

            # 模型大小
            info["size_bytes"] = self.get_model_size(model)

            # 参数数量估计
            info["estimated_parameters"] = self.get_parameter_count(model)

        except Exception as e:
            self.logger.error(f"Failed to get model info: {str(e)}")

        return info

    def get_layers_info(self, model: Any) -> Dict[str, Dict]:
        """
        获取TFLite模型的层信息

        Args:
            model: TFLite模型

        Returns:
            层信息字典，键为层名称，值为层属性
        """
        layers_info = {}

        try:
            # 从模型字典中提取解释器
            if isinstance(model, dict) and "interpreter" in model:
                interpreter = model["interpreter"]
            else:
                self.logger.error("Model does not contain a valid interpreter")
                return {}

            # 获取张量详情
            tensor_details = interpreter.get_tensor_details()

            # 遍历所有张量，作为层的近似
            for i, detail in enumerate(tensor_details):
                tensor_name = detail.get("name", f"tensor_{i}")

                # 创建层信息
                layer_info = {
                    "index": i,
                    "shape": detail.get("shape", []).tolist(),
                    "dtype": str(detail.get("dtype", "")),
                    "quantization": str(detail.get("quantization", ""))
                }

                # 如果是权重张量，尝试估计参数数量
                if detail.get("name", "").lower().find("weight") >= 0 or detail.get("name", "").lower().find(
                        "kernel") >= 0:
                    # 获取张量形状
                    shape = detail.get("shape", [])
                    if shape:
                        layer_info["parameters"] = np.prod(shape)

                layers_info[tensor_name] = layer_info

        except Exception as e:
            self.logger.error(f"Failed to get layers info: {str(e)}")

        return layers_info

    def get_layer(self, model: Any, layer_name: str) -> Any:
        """
        获取指定名称的层

        Args:
            model: TFLite模型
            layer_name: 层名称

        Returns:
            层对象
        """
        try:
            # 从模型字典中提取解释器
            if isinstance(model, dict) and "interpreter" in model:
                interpreter = model["interpreter"]
            else:
                self.logger.error("Model does not contain a valid interpreter")
                return None

            # 获取张量详情
            tensor_details = interpreter.get_tensor_details()

            # 查找指定名称的张量
            for detail in tensor_details:
                if detail.get("name") == layer_name:
                    # 创建层对象（实际上是张量详情）
                    return detail

            self.logger.warning(f"Layer not found: {layer_name}")
            return None

        except Exception as e:
            self.logger.error(f"Failed to get layer: {str(e)}")
            return None

    def replace_layer(self, model: Any, layer_name: str, new_layer: Any) -> bool:
        """
        替换模型中的层

        Args:
            model: TFLite模型
            layer_name: 层名称
            new_layer: 新层

        Returns:
            是否成功替换
        """
        self.logger.error("TFLite models do not support direct layer replacement. Convert to TensorFlow model first.")
        return False

    def generate_sample_input(self, model: Any) -> Any:
        """
        为TFLite模型生成示例输入

        Args:
            model: TFLite模型

        Returns:
            示例输入
        """
        try:
            # 从模型字典中提取输入详情
            if isinstance(model, dict) and "input_details" in model:
                input_details = model["input_details"]
            else:
                self.logger.error("Model does not contain input details")
                return None

            # 创建输入数据列表
            inputs = []

            for detail in input_details:
                # 获取输入形状和类型
                shape = detail.get("shape", [])
                dtype = detail.get("dtype", np.float32)

                # 创建随机输入
                if dtype == np.float32:
                    input_data = np.random.randn(*shape).astype(np.float32)
                elif dtype == np.int32 or dtype == np.int64:
                    input_data = np.random.randint(0, 10, size=shape).astype(dtype)
                else:
                    input_data = np.random.randn(*shape).astype(np.float32)

                inputs.append(input_data)

            return inputs

        except Exception as e:
            self.logger.error(f"Failed to generate sample input: {str(e)}")
            return None

    def measure_inference_time(self, model: Any, input_data: Any, num_runs: int = 10) -> float:
        """
        测量TFLite模型推理时间

        Args:
            model: TFLite模型
            input_data: 输入数据
            num_runs: 运行次数

        Returns:
            平均推理时间（毫秒）
        """
        try:
            # 从模型字典中提取解释器和输入输出详情
            if isinstance(model, dict):
                interpreter = model.get("interpreter")
                input_details = model.get("input_details", [])
                output_details = model.get("output_details", [])
            else:
                self.logger.error("Model is not in the expected format")
                return 0.0

            # 如果没有提供输入数据，生成示例输入
            if input_data is None:
                input_data = self.generate_sample_input(model)
                if input_data is None:
                    self.logger.error("Failed to generate sample input")
                    return 0.0

            # 预热
            for _ in range(3):
                # 设置输入张量
                if isinstance(input_data, list):
                    for i, data in enumerate(input_data):
                        if i < len(input_details):
                            interpreter.set_tensor(input_details[i]["index"], data)
                else:
                    interpreter.set_tensor(input_details[0]["index"], input_data)

                # 运行推理
                interpreter.invoke()

                # 获取输出
                for output_detail in output_details:
                    _ = interpreter.get_tensor(output_detail["index"])

            # 测量时间
            start_time = time.time()
            for _ in range(num_runs):
                # 设置输入张量
                if isinstance(input_data, list):
                    for i, data in enumerate(input_data):
                        if i < len(input_details):
                            interpreter.set_tensor(input_details[i]["index"], data)
                else:
                    interpreter.set_tensor(input_details[0]["index"], input_data)

                # 运行推理
                interpreter.invoke()

                # 获取输出
                for output_detail in output_details:
                    _ = interpreter.get_tensor(output_detail["index"])

            end_time = time.time()

            # 计算平均时间（毫秒）
            avg_time = (end_time - start_time) / num_runs * 1000

            return avg_time

        except Exception as e:
            self.logger.error(f"Failed to measure inference time: {str(e)}")
            return 0.0

    def measure_memory_usage(self, model: Any, input_data: Any = None) -> float:
        """
        测量TFLite模型内存使用

        Args:
            model: TFLite模型
            input_data: 输入数据

        Returns:
            内存用量（MB）
        """
        try:
            # 从模型字典中提取解释器和输入输出详情
            if isinstance(model, dict):
                interpreter = model.get("interpreter")
                input_details = model.get("input_details", [])
                output_details = model.get("output_details", [])
            else:
                self.logger.error("Model is not in the expected format")
                return 0.0

            # 如果没有提供输入数据，生成示例输入
            if input_data is None:
                input_data = self.generate_sample_input(model)
                if input_data is None:
                    self.logger.error("Failed to generate sample input")
                    return 0.0

            # 开始内存追踪
            tracemalloc.start()

            # 设置输入张量
            if isinstance(input_data, list):
                for i, data in enumerate(input_data):
                    if i < len(input_details):
                        interpreter.set_tensor(input_details[i]["index"], data)
            else:
                interpreter.set_tensor(input_details[0]["index"], input_data)

            # 运行推理
            interpreter.invoke()

            # 获取输出
            for output_detail in output_details:
                _ = interpreter.get_tensor(output_detail["index"])

            # 获取内存快照
            current, peak = tracemalloc.get_traced_memory()

            # 停止内存追踪
            tracemalloc.stop()

            # 返回峰值内存（MB）
            memory_mb = peak / (1024 * 1024)

            return memory_mb

        except Exception as e:
            self.logger.error(f"Failed to measure memory usage: {str(e)}")
            return 0.0

    def evaluate_accuracy(self, model: Any, eval_data: Any) -> float:
        """
        评估TFLite模型准确率

        Args:
            model: TFLite模型
            eval_data: 评估数据

        Returns:
            准确率
        """
        try:
            # 从模型字典中提取解释器和输入输出详情
            if isinstance(model, dict):
                interpreter = model.get("interpreter")
                input_details = model.get("input_details", [])
                output_details = model.get("output_details", [])
            else:
                self.logger.error("Model is not in the expected format")
                return 0.0

            # 检查评估数据格式
            if not isinstance(eval_data, tuple) or len(eval_data) != 2:
                self.logger.error("eval_data should be a tuple of (inputs, labels)")
                return 0.0

            inputs, labels = eval_data

            # 处理批量评估
            correct = 0
            total = 0

            if isinstance(inputs, list) and len(inputs) > 0:
                # 对每个样本进行评估
                for i, input_sample in enumerate(inputs):
                    if i >= len(labels):
                        break

                    label = labels[i]

                    # 设置输入
                    interpreter.set_tensor(input_details[0]["index"], input_sample)

                    # 运行推理
                    interpreter.invoke()

                    # 获取输出
                    output = interpreter.get_tensor(output_details[0]["index"])

                    # 获取预测
                    prediction = np.argmax(output)

                    # 检查是否正确
                    if prediction == label:
                        correct += 1

                    total += 1

            else:
                # 单个批次评估
                # 设置输入
                interpreter.set_tensor(input_details[0]["index"], inputs)

                # 运行推理
                interpreter.invoke()

                # 获取输出
                output = interpreter.get_tensor(output_details[0]["index"])

                # 获取预测
                predictions = np.argmax(output, axis=1)

                # 计算正确预测数量
                correct = np.sum(predictions == labels)
                total = len(labels)

            # 计算准确率
            accuracy = correct / total if total > 0 else 0.0

            return accuracy

        except Exception as e:
            self.logger.error(f"Failed to evaluate accuracy: {str(e)}")
            return 0.0

    def fine_tune(self, model: Any, train_data: Any, iterations: int = 5,
                  early_stopping: bool = True) -> Any:
        """
        微调TFLite模型

        Args:
            model: TFLite模型
            train_data: 训练数据
            iterations: 训练迭代次数
            early_stopping: 是否使用早停

        Returns:
            微调后的模型
        """
        self.logger.error("TFLite models do not support direct fine-tuning. Convert to TensorFlow model first.")
        return model

    def get_model_size(self, model: Any) -> int:
        """
        获取TFLite模型大小（字节）

        Args:
            model: TFLite模型

        Returns:
            模型大小（字节）
        """
        try:
            # 从模型字典中提取原始路径
            if isinstance(model, dict) and "path" in model:
                model_path = model["path"]
                if os.path.exists(model_path):
                    return os.path.getsize(model_path)

            self.logger.warning("Model path not available, size estimation not possible")
            return 0

        except Exception as e:
            self.logger.error(f"Failed to get model size: {str(e)}")
            return 0

    def get_parameter_count(self, model: Any) -> int:
        """
        获取TFLite模型参数数量

        Args:
            model: TFLite模型

        Returns:
            参数数量
        """
        try:
            # 从模型字典中提取解释器
            if isinstance(model, dict) and "interpreter" in model:
                interpreter = model["interpreter"]
            else:
                self.logger.error("Model does not contain a valid interpreter")
                return 0

            # 获取张量详情
            tensor_details = interpreter.get_tensor_details()

            # 计算权重参数数量
            param_count = 0
            for detail in tensor_details:
                tensor_name = detail.get("name", "").lower()
                # 只计算权重张量
                if "weight" in tensor_name or "kernel" in tensor_name or "filter" in tensor_name:
                    shape = detail.get("shape", [])
                    if shape:
                        param_count += np.prod(shape)

            return int(param_count)

        except Exception as e:
            self.logger.error(f"Failed to get parameter count: {str(e)}")
            return 0

    def measure_layer_latency(self, model: Any, sample_input: Any) -> Dict[str, float]:
        """
        测量TFLite模型各层的延迟

        Args:
            model: TFLite模型
            sample_input: 样本输入

        Returns:
            各层延迟字典
        """
        self.logger.warning("Layer-wise latency measurement is not supported in TFLite")
        return {}

    def clone_model(self, model: Any) -> Any:
        """
        克隆TFLite模型

        Args:
            model: 原始模型

        Returns:
            克隆后的模型
        """
        try:
            # 从模型字典中提取解释器和详细信息
            if isinstance(model, dict):
                interpreter = model.get("interpreter")
                input_details = model.get("input_details", [])
                output_details = model.get("output_details", [])
                path = model.get("path")
            else:
                self.logger.error("Model is not in the expected format")
                return model

            # TFLite模型需要从文件加载，因此我们重新从路径加载
            if path and os.path.exists(path):
                return self.load_model(path)
            else:
                self.logger.error("Cannot clone model without valid path")
                return model

        except Exception as e:
            self.logger.error(f"Failed to clone model: {str(e)}")
            return model

    def export_model(self, model: Any, output_path: str, format: str = "native") -> str:
        """
        导出TFLite模型到指定格式

        Args:
            model: TFLite模型
            output_path: 输出路径
            format: 导出格式 ('native', 'tflite')

        Returns:
            导出文件路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)

            # 从模型字典中提取原始路径
            if isinstance(model, dict) and "path" in model:
                model_path = model["path"]

                # 只支持TFLite格式导出
                if format == "native" or format == "tflite":
                    # 复制文件
                    with open(model_path, 'rb') as src_file:
                        model_content = src_file.read()
                        with open(output_path, 'wb') as dst_file:
                            dst_file.write(model_content)

                    self.logger.info(f"Model exported to {output_path}")
                    return output_path
                else:
                    self.logger.error(f"Unsupported export format for TFLite: {format}")
                    return ""
            else:
                self.logger.error("Model does not contain path information")
                return ""

        except Exception as e:
            self.logger.error(f"Failed to export model: {str(e)}")
            return ""