# uamcf/adapters/tensorflow.py
import os
import time
import copy
import numpy as np
import tracemalloc
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import ModelAdapter
from ..utils.logger import get_logger


class TensorFlowAdapter(ModelAdapter):
    """
    TensorFlow模型适配器，实现TensorFlow特定的接口
    """

    def __init__(self):
        """初始化TensorFlow适配器"""
        super().__init__()
        try:
            import tensorflow as tf
            self.tf = tf
            self.logger.info("TensorFlow adapter initialized")

            # 设置GPU内存增长
            gpus = tf.config.experimental.list_physical_devices('GPU')
            if gpus:
                try:
                    for gpu in gpus:
                        tf.config.experimental.set_memory_growth(gpu, True)
                    self.logger.info(f"Found {len(gpus)} GPU(s), memory growth enabled")
                except RuntimeError as e:
                    self.logger.warning(f"GPU memory growth setting failed: {str(e)}")
        except ImportError:
            self.logger.error("TensorFlow is not installed. Please install with 'pip install tensorflow'")

    def load_model(self, model_path: str) -> Any:
        """
        加载TensorFlow模型

        Args:
            model_path: 模型文件路径

        Returns:
            加载的TensorFlow模型
        """
        try:
            # 检查模型格式
            if os.path.isdir(model_path):
                # SavedModel格式
                model = self.tf.keras.models.load_model(model_path)
            elif model_path.endswith('.h5') or model_path.endswith('.hdf5'):
                # HDF5格式
                model = self.tf.keras.models.load_model(model_path)
            elif model_path.endswith('.pb'):
                # 冻结图模型
                self.logger.warning("Direct loading of .pb files requires custom handling")
                return None
            else:
                self.logger.error(f"Unsupported model format: {model_path}")
                return None

            self.logger.info(f"Model loaded from {model_path}")
            return model

        except Exception as e:
            self.logger.error(f"Failed to load model: {str(e)}")
            return None

    def save_model(self, model: Any, save_path: str) -> str:
        """
        保存TensorFlow模型

        Args:
            model: 要保存的模型
            save_path: 保存路径

        Returns:
            实际保存的路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)

            # 根据路径格式决定保存方式
            if save_path.endswith('.h5') or save_path.endswith('.hdf5'):
                # HDF5格式
                model.save(save_path)
            else:
                # SavedModel格式
                model.save(save_path)

            self.logger.info(f"Model saved to {save_path}")
            return save_path

        except Exception as e:
            self.logger.error(f"Failed to save model: {str(e)}")
            return ""

    def get_model_info(self, model: Any) -> Dict:
        """
        获取TensorFlow模型基本信息

        Args:
            model: TensorFlow模型

        Returns:
            模型信息字典
        """
        info = {}

        try:
            # 模型类型
            info["type"] = model.__class__.__name__

            # 参数数量
            info["parameters"] = self.get_parameter_count(model)

            # 模型大小
            info["size_bytes"] = self.get_model_size(model)

            # 层数
            if hasattr(model, 'layers'):
                info["layer_count"] = len(model.layers)

            # 是否使用了BN层
            if hasattr(model, 'layers'):
                has_bn = any('batch_normalization' in layer.__class__.__name__.lower()
                             for layer in model.layers)
                info["has_batch_norm"] = has_bn

        except Exception as e:
            self.logger.error(f"Failed to get model info: {str(e)}")

        return info

    def get_layers_info(self, model: Any) -> Dict[str, Dict]:
        """
        获取TensorFlow模型的层信息

        Args:
            model: TensorFlow模型

        Returns:
            层信息字典，键为层名称，值为层属性
        """
        layers_info = {}

        try:
            if not hasattr(model, 'layers'):
                self.logger.error("Model does not have layers attribute")
                return {}

            for i, layer in enumerate(model.layers):
                name = layer.name or f"layer_{i}"

                # 基本层信息
                layer_info = {
                    "type": layer.__class__.__name__,
                    "index": i,
                    "is_trainable": layer.trainable
                }

                # 参数信息
                params = layer.count_params()
                layer_info["parameters"] = params

                # 输入/输出形状
                if hasattr(layer, 'input_shape'):
                    layer_info["input_shape"] = list(layer.input_shape[1:])  # 排除批次维度

                if hasattr(layer, 'output_shape'):
                    layer_info["output_shape"] = list(layer.output_shape[1:])  # 排除批次维度

                # 如果是卷积层，添加卷积特定信息
                if 'conv' in layer.__class__.__name__.lower():
                    if hasattr(layer, 'kernel_size'):
                        layer_info["kernel_size"] = list(layer.kernel_size)
                    if hasattr(layer, 'filters'):
                        layer_info["filters"] = layer.filters
                    if hasattr(layer, 'strides'):
                        layer_info["strides"] = list(layer.strides)
                    if hasattr(layer, 'padding'):
                        layer_info["padding"] = layer.padding

                # 内存估计（粗略）
                memory_size = params * 4  # 假设float32，每个参数4字节
                layer_info["memory_size"] = memory_size

                layers_info[name] = layer_info

        except Exception as e:
            self.logger.error(f"Failed to get layers info: {str(e)}")

        return layers_info

    def get_layer(self, model: Any, layer_name: str) -> Any:
        """
        获取指定名称的层

        Args:
            model: TensorFlow模型
            layer_name: 层名称

        Returns:
            层对象
        """
        try:
            if not hasattr(model, 'get_layer'):
                self.logger.error("Model does not support get_layer method")
                return None

            return model.get_layer(layer_name)

        except Exception as e:
            self.logger.error(f"Failed to get layer: {str(e)}")
            return None

    def replace_layer(self, model: Any, layer_name: str, new_layer: Any) -> bool:
        """
        替换模型中的层

        Args:
            model: TensorFlow模型
            layer_name: 层名称
            new_layer: 新层

        Returns:
            是否成功替换
        """
        try:
            self.logger.warning("Direct layer replacement in TensorFlow requires model reconstruction")

            # TensorFlow Keras不支持直接替换层，需要重建模型
            # 这里提供一个简化的实现，实际应用中可能需要更复杂的处理

            # 获取原始层信息
            original_layer = self.get_layer(model, layer_name)
            if original_layer is None:
                return False

            # 构建新模型
            inputs = model.inputs
            x = inputs
            outputs = []

            # 标记是否已替换
            replaced = False

            # 遍历所有层
            for layer in model.layers:
                if layer.name == layer_name:
                    # 使用新层替换
                    if isinstance(x, list):
                        x = new_layer(x)
                    else:
                        x = new_layer(x)
                    replaced = True
                else:
                    # 保持原有层
                    x = layer(x)

                # 如果是输出层，保存输出
                if layer.name in [output.name.split('/')[0] for output in model.outputs]:
                    outputs.append(x)

            if not replaced or not outputs:
                self.logger.error("Failed to rebuild model with replaced layer")
                return False

            # 重建模型
            new_model = self.tf.keras.Model(inputs=inputs, outputs=outputs)

            # 复制原始模型的属性到新模型
            # 注：这是一个简化处理，可能不适用于所有情况

            # 返回新模型，但这里没有更新原始模型引用
            # 调用者需要使用返回的新模型
            # 这是一个设计限制

            self.logger.warning("Model rebuilt with replaced layer, but original model reference unchanged")

            return True

        except Exception as e:
            self.logger.error(f"Failed to replace layer: {str(e)}")
            return False

    def generate_sample_input(self, model: Any) -> Any:
        """
        为TensorFlow模型生成示例输入

        Args:
            model: TensorFlow模型

        Returns:
            示例输入
        """
        try:
            # 获取模型输入形状
            if hasattr(model, 'input_shape'):
                input_shape = model.input_shape

                # 输入可能是多个
                if isinstance(input_shape, list):
                    return [self.tf.random.normal(shape) for shape in input_shape]

                # 创建随机输入
                # 注意：第一维是批次大小
                batch_size = 1
                if input_shape[0] is None:
                    shape = (batch_size,) + input_shape[1:]
                else:
                    shape = input_shape

                return self.tf.random.normal(shape)

            else:
                # 无法确定输入形状，返回默认值
                self.logger.warning("Could not determine model input shape, using default")
                return self.tf.random.normal((1, 224, 224, 3))

        except Exception as e:
            self.logger.error(f"Failed to generate sample input: {str(e)}")
            # 返回一个默认输入
            return self.tf.random.normal((1, 224, 224, 3))

    def measure_inference_time(self, model: Any, input_data: Any, num_runs: int = 10) -> float:
        """
        测量TensorFlow模型推理时间

        Args:
            model: TensorFlow模型
            input_data: 输入数据
            num_runs: 运行次数

        Returns:
            平均推理时间（毫秒）
        """
        try:
            # 准备输入数据
            if input_data is None:
                input_data = self.generate_sample_input(model)

            # 预热
            for _ in range(3):
                _ = model(input_data)

            # 测量时间
            start_time = time.time()
            for _ in range(num_runs):
                _ = model(input_data)
            end_time = time.time()

            # 计算平均时间（毫秒）
            avg_time = (end_time - start_time) / num_runs * 1000

            return avg_time

        except Exception as e:
            self.logger.error(f"Failed to measure inference time: {str(e)}")
            return 0.0

    def measure_memory_usage(self, model: Any, input_data: Any = None) -> float:
        """
        测量TensorFlow模型内存使用

        Args:
            model: TensorFlow模型
            input_data: 输入数据

        Returns:
            内存用量（MB）
        """
        try:
            # 开始内存追踪
            tracemalloc.start()

            # 准备输入数据
            if input_data is None:
                input_data = self.generate_sample_input(model)

            # 运行推理
            _ = model(input_data)

            # 获取内存快照
            current, peak = tracemalloc.get_traced_memory()

            # 停止内存追踪
            tracemalloc.stop()

            # 返回峰值内存（MB）
            memory_mb = peak / (1024 * 1024)

            return memory_mb

        except Exception as e:
            self.logger.error(f"Failed to measure memory usage: {str(e)}")
            return 0.0

    def evaluate_accuracy(self, model: Any, eval_data: Any) -> float:
        """
        评估TensorFlow模型准确率

        Args:
            model: TensorFlow模型
            eval_data: 评估数据

        Returns:
            准确率
        """
        try:
            # 检查数据类型
            if isinstance(eval_data, self.tf.data.Dataset):
                # TensorFlow数据集
                results = model.evaluate(eval_data)

                # 返回准确率
                if isinstance(results, list) and len(results) > 1:
                    # 假设第二个指标是准确率
                    return results[1]
                else:
                    # 单一指标，通常是损失
                    return 0.0
            else:
                self.logger.error("Evaluation data should be a TensorFlow Dataset")
                return 0.0

        except Exception as e:
            self.logger.error(f"Failed to evaluate accuracy: {str(e)}")
            return 0.0

    def fine_tune(self, model: Any, train_data: Any, iterations: int = 5,
                  early_stopping: bool = True) -> Any:
        """
        微调TensorFlow模型

        Args:
            model: TensorFlow模型
            train_data: 训练数据
            iterations: 训练迭代次数
            early_stopping: 是否使用早停

        Returns:
            微调后的模型
        """
        try:
            # 检查数据类型
            if not isinstance(train_data, self.tf.data.Dataset):
                self.logger.error("Training data should be a TensorFlow Dataset")
                return model

            # 配置回调
            callbacks = []
            if early_stopping:
                callbacks.append(self.tf.keras.callbacks.EarlyStopping(
                    monitor='val_loss',
                    patience=2,
                    restore_best_weights=True
                ))

            # 使用较低的学习率进行微调
            optimizer = self.tf.keras.optimizers.Adam(learning_rate=0.0001)

            # 编译模型（如果尚未编译）
            if not model.optimizer:
                model.compile(
                    optimizer=optimizer,
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy']
                )

            # 微调
            history = model.fit(
                train_data,
                epochs=iterations,
                callbacks=callbacks,
                verbose=1
            )

            self.logger.info("Fine-tuning completed")
            return model

        except Exception as e:
            self.logger.error(f"Failed to fine-tune model: {str(e)}")
            return model

    def get_model_size(self, model: Any) -> int:
        """
        获取TensorFlow模型大小（字节）

        Args:
            model: TensorFlow模型

        Returns:
            模型大小（字节）
        """
        try:
            # 计算权重占用的内存
            weights_size = 0
            for weight in model.weights:
                weights_size += self.tf.size(weight).numpy() * weight.dtype.size

            return weights_size

        except Exception as e:
            self.logger.error(f"Failed to get model size: {str(e)}")
            return 0

    def get_parameter_count(self, model: Any) -> int:
        """
        获取TensorFlow模型参数数量

        Args:
            model: TensorFlow模型

        Returns:
            参数数量
        """
        try:
            if hasattr(model, 'count_params'):
                return model.count_params()

            # 手动计算
            total_params = 0
            for weight in model.weights:
                total_params += self.tf.size(weight).numpy()

            return total_params

        except Exception as e:
            self.logger.error(f"Failed to get parameter count: {str(e)}")
            return 0

    def measure_layer_latency(self, model: Any, sample_input: Any) -> Dict[str, float]:
        """
        测量TensorFlow模型各层的延迟

        Args:
            model: TensorFlow模型
            sample_input: 样本输入

        Returns:
            各层延迟字典（毫秒）
        """
        try:
            # 准备输入数据
            if sample_input is None:
                sample_input = self.generate_sample_input(model)

            latency_results = {}

            # 为每一层创建子模型并测量延迟
            for i, layer in enumerate(model.layers):
                # 跳过输入层
                if i == 0 and isinstance(layer, self.tf.keras.layers.InputLayer):
                    continue

                # 创建到当前层的子模型
                if i == 0:
                    inputs = model.inputs
                    outputs = layer(inputs)
                    sub_model = self.tf.keras.Model(inputs=inputs, outputs=outputs)
                else:
                    # 通过从原始模型提取层到当前层来构建子模型
                    inputs = model.inputs
                    x = inputs
                    for j in range(i + 1):
                        layer_j = model.layers[j]
                        x = layer_j(x)
                    sub_model = self.tf.keras.Model(inputs=inputs, outputs=x)

                # 测量当前子模型的延迟
                sub_model_time = self.measure_inference_time(sub_model, sample_input, num_runs=5)

                # 如果不是第一个测量的层，则计算差值获取当前层的延迟
                if i > 0:
                    prev_layer = model.layers[i - 1].name
                    if prev_layer in latency_results:
                        layer_time = sub_model_time - latency_results[prev_layer]
                    else:
                        layer_time = sub_model_time
                else:
                    layer_time = sub_model_time

                latency_results[layer.name] = layer_time

            return latency_results

        except Exception as e:
            self.logger.error(f"Failed to measure layer latency: {str(e)}")
            return {}

    def clone_model(self, model: Any) -> Any:
        """
        克隆TensorFlow模型

        Args:
            model: 原始模型

        Returns:
            克隆后的模型
        """
        try:
            # TensorFlow提供了内置的克隆功能
            return self.tf.keras.models.clone_model(model)

        except Exception as e:
            self.logger.error(f"Failed to clone model: {str(e)}")
            return model

    def export_model(self, model: Any, output_path: str, format: str = "native") -> str:
        """
        导出TensorFlow模型到指定格式

        Args:
            model: TensorFlow模型
            output_path: 输出路径
            format: 导出格式 ('native', 'h5', 'saved_model', 'tflite')

        Returns:
            导出文件路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)

            # 根据format选择导出方式
            if format == "native" or format == "saved_model":
                # SavedModel格式
                model.save(output_path)
                return output_path

            elif format == "h5":
                # HDF5格式
                model.save(output_path, save_format='h5')
                return output_path

            elif format == "tflite":
                # TFLite格式
                converter = self.tf.lite.TFLiteConverter.from_keras_model(model)
                tflite_model = converter.convert()

                with open(output_path, 'wb') as f:
                    f.write(tflite_model)

                return output_path

            else:
                self.logger.error(f"Unsupported export format: {format}")
                return ""

        except Exception as e:
            self.logger.error(f"Failed to export model: {str(e)}")
            return ""