from typing import Dict, List, Optional, Tuple, Any
import torch
import torch.nn as nn
import numpy as np
from .data_storage import DataStorage

class ComputationManager:
    """Manages computation tasks on worker nodes.
    
    This class handles the execution of CNN computations, including forward and
    backward propagation, and manages intermediate results with the data storage.
    """
    
    def __init__(self, data_storage: DataStorage, device: str = 'cuda:0'):
        """Initialize the computation manager.
        
        Args:
            data_storage: Data storage manager for caching results
            device: PyTorch device to use for computation
        """
        self.data_storage = data_storage
        self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
        self.current_model: Optional[nn.Module] = None
        self.layer_outputs: Dict[str, torch.Tensor] = {}
    
    def load_model(self, model: nn.Module) -> None:
        """Load a PyTorch model for computation.
        
        Args:
            model: PyTorch model to load
        """
        self.current_model = model.to(self.device)
        self.current_model.eval()  # Set to evaluation mode
    
    def execute_layer(self, layer_id: str, input_ids: List[str],
                      layer_config: Dict[str, Any]) -> Optional[str]:
        """Execute computation for a specific layer.
        
        Args:
            layer_id: Identifier for the layer
            input_ids: List of input tensor identifiers
            layer_config: Configuration for the layer execution
            
        Returns:
            Optional[str]: Identifier of the output tensor if successful
        """
        try:
            # Load input tensors
            inputs = []
            for input_id in input_ids:
                tensor = self.data_storage.load_tensor(input_id)
                if tensor is None:
                    raise ValueError(f"Input tensor {input_id} not found")
                inputs.append(torch.from_numpy(tensor).to(self.device))
            
            # Execute layer
            with torch.no_grad():
                if len(inputs) == 1:
                    input_tensor = inputs[0]
                else:
                    input_tensor = torch.cat(inputs, dim=layer_config.get('concat_dim', 1))
                
                output = self._execute_layer_forward(input_tensor, layer_config)
                
                # Store result
                output_id = f"{layer_id}_output"
                output_np = output.cpu().numpy()
                
                if self.data_storage.store_tensor(
                    output_id,
                    output_np,
                    metadata={
                        'layer_id': layer_id,
                        'shape': list(output_np.shape),
                        'config': layer_config
                    }
                ):
                    return output_id
                else:
                    raise RuntimeError("Failed to store output tensor")
                
        except Exception as e:
            print(f"Error executing layer {layer_id}: {str(e)}")
            return None
    
    def _execute_layer_forward(self, input_tensor: torch.Tensor,
                              layer_config: Dict[str, Any]) -> torch.Tensor:
        """Execute forward computation for a layer.
        
        Args:
            input_tensor: Input tensor
            layer_config: Layer configuration
            
        Returns:
            torch.Tensor: Output tensor
        """
        layer_type = layer_config['type']
        
        if layer_type == 'conv2d':
            layer = nn.Conv2d(
                in_channels=layer_config['in_channels'],
                out_channels=layer_config['out_channels'],
                kernel_size=layer_config['kernel_size'],
                stride=layer_config.get('stride', 1),
                padding=layer_config.get('padding', 0)
            ).to(self.device)
            
            # Load weights if provided
            if 'weights' in layer_config:
                weights = torch.from_numpy(np.array(layer_config['weights']))
                layer.weight.data = weights.to(self.device)
            if 'bias' in layer_config:
                bias = torch.from_numpy(np.array(layer_config['bias']))
                layer.bias.data = bias.to(self.device)
                
            return layer(input_tensor)
            
        elif layer_type == 'pool2d':
            if layer_config.get('pool_type', 'max') == 'max':
                layer = nn.MaxPool2d(
                    kernel_size=layer_config['kernel_size'],
                    stride=layer_config.get('stride', None),
                    padding=layer_config.get('padding', 0)
                )
            else:
                layer = nn.AvgPool2d(
                    kernel_size=layer_config['kernel_size'],
                    stride=layer_config.get('stride', None),
                    padding=layer_config.get('padding', 0)
                )
            return layer(input_tensor)
            
        elif layer_type == 'relu':
            return torch.relu(input_tensor)
            
        elif layer_type == 'batch_norm':
            layer = nn.BatchNorm2d(
                num_features=layer_config['num_features'],
                eps=layer_config.get('eps', 1e-5),
                momentum=layer_config.get('momentum', 0.1)
            ).to(self.device)
            
            if 'running_mean' in layer_config:
                layer.running_mean = torch.from_numpy(
                    np.array(layer_config['running_mean'])
                ).to(self.device)
            if 'running_var' in layer_config:
                layer.running_var = torch.from_numpy(
                    np.array(layer_config['running_var'])
                ).to(self.device)
                
            return layer(input_tensor)
            
        else:
            raise ValueError(f"Unsupported layer type: {layer_type}")
    
    def get_output_shape(self, layer_id: str) -> Optional[Tuple[int, ...]]:
        """Get the shape of a layer's output tensor.
        
        Args:
            layer_id: Layer identifier
            
        Returns:
            Optional[Tuple[int, ...]]: Shape of the output tensor
        """
        output_id = f"{layer_id}_output"
        metadata = self.data_storage.get_metadata(output_id)
        if metadata and 'shape' in metadata:
            return tuple(metadata['shape'])
        return None
    
    def clear_layer_cache(self, layer_id: str) -> None:
        """Clear cached data for a specific layer.
        
        Args:
            layer_id: Layer identifier
        """
        output_id = f"{layer_id}_output"
        self.data_storage.remove_tensor(output_id)
    
    def get_memory_usage(self) -> Dict[str, float]:
        """Get current GPU memory usage statistics.
        
        Returns:
            Dict[str, float]: Memory usage statistics in MB
        """
        if torch.cuda.is_available():
            return {
                'allocated': torch.cuda.memory_allocated(self.device) / 1024 / 1024,
                'cached': torch.cuda.memory_reserved(self.device) / 1024 / 1024
            }
        return {'allocated': 0.0, 'cached': 0.0}