"""
CUDA/HIP analyzer for GPU programming support.

Provides analysis of CUDA kernels, device functions, memory operations,
and HIP portability layer code.
"""

import re
from typing import Dict, List, Any, Optional, Set, Tuple
from pathlib import Path
import logging

from ...core.error_handler import AnalysisError, log_info, log_debug


class CudaHipAnalyzer:
    """Analyzer for CUDA and HIP GPU programming constructs."""
    
    def __init__(self):
        """Initialize CUDA/HIP analyzer."""
        self.kernel_launches = []
        self.device_functions = []
        self.memory_operations = []
        self.synchronization_points = []
        self.stream_operations = []
        self.gpu_types = set()
        
        # CUDA/HIP keywords and patterns
        self.cuda_keywords = {
            '__global__', '__device__', '__host__', '__constant__', '__shared__',
            '__restrict__', '__noinline__', '__forceinline__'
        }
        
        self.hip_keywords = {
            '__global__', '__device__', '__host__', '__constant__', '__shared__'
        }
        
        # Memory operation patterns
        self.cuda_memory_functions = {
            'cudaMalloc', 'cudaFree', 'cudaMemcpy', 'cudaMemcpyAsync',
            'cudaMemset', 'cudaMemsetAsync', 'cudaMallocManaged',
            'cudaHostAlloc', 'cudaFreeHost', 'cudaMemPrefetchAsync'
        }
        
        self.hip_memory_functions = {
            'hipMalloc', 'hipFree', 'hipMemcpy', 'hipMemcpyAsync',
            'hipMemset', 'hipMemsetAsync', 'hipMallocManaged',
            'hipHostMalloc', 'hipHostFree', 'hipMemPrefetchAsync'
        }
        
        # CUDA/HIP types
        self.gpu_type_patterns = {
            r'dim3\b', r'cudaStream_t\b', r'cudaEvent_t\b', r'cudaError_t\b',
            r'hipStream_t\b', r'hipEvent_t\b', r'hipError_t\b',
            r'float[2-4]\b', r'int[2-4]\b', r'uint[2-4]\b', r'double[2-4]\b'
        }
    
    def analyze_gpu_code(self, ast_data: Dict[str, Any], source_code: str = None) -> Dict[str, Any]:
        """
        Analyze GPU-specific constructs in the code.
        
        Args:
            ast_data: AST data from the C++ parser
            source_code: Raw source code for pattern matching
            
        Returns:
            Dictionary containing GPU analysis results
        """
        gpu_analysis = {
            'kernel_launches': [],
            'device_functions': [],
            'memory_operations': [],
            'synchronization_points': [],
            'stream_operations': [],
            'gpu_types_used': [],
            'cuda_includes': [],
            'hip_includes': [],
            'programming_model': self._detect_programming_model(ast_data, source_code),
            'performance_hints': []
        }
        
        # Analyze functions for GPU attributes
        gpu_analysis['device_functions'] = self._analyze_device_functions(ast_data['functions'])
        
        # Analyze kernel launches from function calls
        gpu_analysis['kernel_launches'] = self._analyze_kernel_launches(ast_data['function_calls'], source_code)
        
        # Analyze memory operations
        gpu_analysis['memory_operations'] = self._analyze_memory_operations(ast_data['function_calls'])
        
        # Analyze synchronization points
        gpu_analysis['synchronization_points'] = self._analyze_synchronization(ast_data['function_calls'])
        
        # Analyze stream operations
        gpu_analysis['stream_operations'] = self._analyze_stream_operations(ast_data['function_calls'])
        
        # Analyze GPU-specific types
        gpu_analysis['gpu_types_used'] = self._analyze_gpu_types(ast_data['variables'], ast_data['functions'])
        
        # Analyze includes
        gpu_analysis['cuda_includes'], gpu_analysis['hip_includes'] = self._analyze_gpu_includes(ast_data['includes'])
        
        # Generate performance hints
        gpu_analysis['performance_hints'] = self._generate_performance_hints(gpu_analysis)
        
        return gpu_analysis
    
    def _detect_programming_model(self, ast_data: Dict[str, Any], source_code: str = None) -> str:
        """Detect whether this is CUDA, HIP, or mixed code."""
        cuda_indicators = 0
        hip_indicators = 0
        
        # Check includes
        for include in ast_data['includes']:
            file_name = include['file'].lower()
            if any(cuda_header in file_name for cuda_header in ['cuda', 'cublas', 'cufft', 'curand']):
                cuda_indicators += 2
            elif any(hip_header in file_name for hip_header in ['hip/', 'rocblas', 'rocfft', 'rocrand']):
                hip_indicators += 2
        
        # Check function calls
        for call in ast_data['function_calls']:
            func_name = call['function_name']
            if func_name.startswith('cuda'):
                cuda_indicators += 1
            elif func_name.startswith('hip'):
                hip_indicators += 1
            elif func_name.startswith('rocblas') or func_name.startswith('roc'):
                hip_indicators += 1
        
        # Check source code patterns if available
        if source_code:
            if 'hipLaunchKernelGGL' in source_code:
                hip_indicators += 3
            if re.search(r'<<<.*>>>', source_code):
                cuda_indicators += 3
        
        if cuda_indicators > hip_indicators:
            return 'CUDA'
        elif hip_indicators > cuda_indicators:
            return 'HIP'
        elif cuda_indicators > 0 or hip_indicators > 0:
            return 'Mixed CUDA/HIP'
        else:
            return 'Standard C++'
    
    def _analyze_device_functions(self, functions: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Analyze functions for GPU device attributes."""
        device_functions = []
        
        for func in functions:
            gpu_attrs = []
            
            # Check function attributes
            for attr in func.get('attributes', []):
                attr_name = attr.get('name', '').lower()
                if any(keyword in attr_name for keyword in ['global', 'device', 'host']):
                    gpu_attrs.append(attr_name)
            
            # Check for GPU keywords in function signature
            func_signature = func.get('signature', '')
            for keyword in self.cuda_keywords.union(self.hip_keywords):
                if keyword in func_signature:
                    gpu_attrs.append(keyword)
            
            if gpu_attrs:
                device_func = {
                    'name': func['name'],
                    'qualified_name': func['qualified_name'],
                    'location': func['location'],
                    'gpu_attributes': gpu_attrs,
                    'is_kernel': '__global__' in gpu_attrs,
                    'is_device_function': '__device__' in gpu_attrs,
                    'is_host_device': '__host__' in gpu_attrs and '__device__' in gpu_attrs,
                    'parameters': func.get('parameters', []),
                    'return_type': func.get('return_type', {}),
                    'template_info': func.get('template_info', {}) if func.get('is_template') else None
                }
                
                # Analyze shared memory usage
                device_func['shared_memory_usage'] = self._analyze_shared_memory_usage(func)
                
                # Analyze thread synchronization
                device_func['uses_syncthreads'] = self._uses_syncthreads(func)
                
                device_functions.append(device_func)
        
        return device_functions
    
    def _analyze_kernel_launches(self, function_calls: List[Dict[str, Any]], source_code: str = None) -> List[Dict[str, Any]]:
        """Analyze kernel launch patterns."""
        kernel_launches = []
        
        # Analyze CUDA-style kernel launches (<<<grid, block>>>)
        if source_code:
            cuda_launch_pattern = r'(\w+)\s*<<<\s*([^>]+)\s*>>>\s*\([^)]*\)'
            for match in re.finditer(cuda_launch_pattern, source_code):
                kernel_name = match.group(1)
                grid_block = match.group(2)
                
                # Parse grid and block dimensions
                dimensions = [dim.strip() for dim in grid_block.split(',')]
                
                launch_info = {
                    'kernel_name': kernel_name,
                    'launch_style': 'cuda_chevron',
                    'grid_config': self._parse_grid_config(dimensions),
                    'location': {'line': source_code[:match.start()].count('\n') + 1},
                    'source_pattern': match.group(0)
                }
                
                kernel_launches.append(launch_info)
        
        # Analyze HIP-style kernel launches (hipLaunchKernelGGL)
        for call in function_calls:
            if call['function_name'] == 'hipLaunchKernelGGL':
                args = call.get('arguments', [])
                if len(args) >= 4:
                    launch_info = {
                        'kernel_name': args[0].get('value', 'unknown') if args else 'unknown',
                        'launch_style': 'hip_ggl',
                        'grid_config': {
                            'grid_dim': args[1].get('value', 'unknown') if len(args) > 1 else 'unknown',
                            'block_dim': args[2].get('value', 'unknown') if len(args) > 2 else 'unknown',
                            'shared_memory': args[3].get('value', '0') if len(args) > 3 else '0',
                            'stream': args[4].get('value', 'default') if len(args) > 4 else 'default'
                        },
                        'location': call['location'],
                        'arguments': args[5:] if len(args) > 5 else []
                    }
                    kernel_launches.append(launch_info)
        
        return kernel_launches
    
    def _parse_grid_config(self, dimensions: List[str]) -> Dict[str, str]:
        """Parse CUDA kernel launch configuration."""
        config = {
            'grid_dim': 'unknown',
            'block_dim': 'unknown',
            'shared_memory': '0',
            'stream': 'default'
        }
        
        if len(dimensions) >= 1:
            config['grid_dim'] = dimensions[0].strip()
        if len(dimensions) >= 2:
            config['block_dim'] = dimensions[1].strip()
        if len(dimensions) >= 3:
            config['shared_memory'] = dimensions[2].strip()
        if len(dimensions) >= 4:
            config['stream'] = dimensions[3].strip()
        
        return config
    
    def _analyze_memory_operations(self, function_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Analyze GPU memory operations."""
        memory_ops = []
        
        all_memory_funcs = self.cuda_memory_functions.union(self.hip_memory_functions)
        
        for call in function_calls:
            func_name = call['function_name']
            
            if func_name in all_memory_funcs:
                mem_op = {
                    'function': func_name,
                    'location': call['location'],
                    'arguments': call.get('arguments', []),
                    'operation_type': self._classify_memory_operation(func_name),
                    'api_type': 'CUDA' if func_name.startswith('cuda') else 'HIP',
                    'is_async': 'Async' in func_name,
                    'is_managed': 'Managed' in func_name
                }
                
                memory_ops.append(mem_op)
        
        return memory_ops
    
    def _classify_memory_operation(self, func_name: str) -> str:
        """Classify the type of memory operation."""
        if 'Malloc' in func_name or 'Alloc' in func_name:
            return 'allocation'
        elif 'Free' in func_name:
            return 'deallocation'
        elif 'Memcpy' in func_name:
            return 'transfer'
        elif 'Memset' in func_name:
            return 'initialization'
        elif 'Prefetch' in func_name:
            return 'prefetch'
        else:
            return 'unknown'
    
    def _analyze_synchronization(self, function_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Analyze synchronization operations."""
        sync_ops = []
        
        sync_functions = {
            'cudaDeviceSynchronize', 'cudaStreamSynchronize', 'cudaEventSynchronize',
            '__syncthreads', '__syncwarp', 'cudaThreadSynchronize',
            'hipDeviceSynchronize', 'hipStreamSynchronize', 'hipEventSynchronize'
        }
        
        for call in function_calls:
            func_name = call['function_name']
            
            if func_name in sync_functions:
                sync_op = {
                    'function': func_name,
                    'location': call['location'],
                    'sync_scope': self._get_sync_scope(func_name),
                    'api_type': 'CUDA' if func_name.startswith('cuda') else 'HIP' if func_name.startswith('hip') else 'Device'
                }
                
                sync_ops.append(sync_op)
        
        return sync_ops
    
    def _get_sync_scope(self, func_name: str) -> str:
        """Determine the scope of synchronization."""
        if 'Device' in func_name:
            return 'device'
        elif 'Stream' in func_name:
            return 'stream'
        elif 'Event' in func_name:
            return 'event'
        elif 'syncthreads' in func_name:
            return 'block'
        elif 'syncwarp' in func_name:
            return 'warp'
        else:
            return 'unknown'
    
    def _analyze_stream_operations(self, function_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Analyze CUDA/HIP stream operations."""
        stream_ops = []
        
        stream_functions = {
            'cudaStreamCreate', 'cudaStreamDestroy', 'cudaStreamWaitEvent',
            'cudaEventCreate', 'cudaEventDestroy', 'cudaEventRecord',
            'hipStreamCreate', 'hipStreamDestroy', 'hipStreamWaitEvent',
            'hipEventCreate', 'hipEventDestroy', 'hipEventRecord'
        }
        
        for call in function_calls:
            func_name = call['function_name']
            
            if func_name in stream_functions:
                stream_op = {
                    'function': func_name,
                    'location': call['location'],
                    'operation_type': self._classify_stream_operation(func_name),
                    'api_type': 'CUDA' if func_name.startswith('cuda') else 'HIP'
                }
                
                stream_ops.append(stream_op)
        
        return stream_ops
    
    def _classify_stream_operation(self, func_name: str) -> str:
        """Classify stream operation type."""
        if 'Create' in func_name:
            return 'creation'
        elif 'Destroy' in func_name:
            return 'destruction'
        elif 'Wait' in func_name:
            return 'synchronization'
        elif 'Record' in func_name:
            return 'event_recording'
        else:
            return 'unknown'
    
    def _analyze_gpu_types(self, variables: List[Dict[str, Any]], functions: List[Dict[str, Any]]) -> List[str]:
        """Analyze usage of GPU-specific types."""
        gpu_types = set()
        
        # Check variable types
        for var in variables:
            var_type = var.get('type', {})
            type_name = var_type.get('spelling', '')
            
            for pattern in self.gpu_type_patterns:
                if re.search(pattern, type_name):
                    gpu_types.add(type_name)
        
        # Check function parameter and return types
        for func in functions:
            # Check return type
            return_type = func.get('return_type', {})
            return_type_name = return_type.get('spelling', '')
            
            for pattern in self.gpu_type_patterns:
                if re.search(pattern, return_type_name):
                    gpu_types.add(return_type_name)
            
            # Check parameter types
            for param in func.get('parameters', []):
                param_type = param.get('type', {})
                param_type_name = param_type.get('spelling', '')
                
                for pattern in self.gpu_type_patterns:
                    if re.search(pattern, param_type_name):
                        gpu_types.add(param_type_name)
        
        return list(gpu_types)
    
    def _analyze_gpu_includes(self, includes: List[Dict[str, Any]]) -> Tuple[List[str], List[str]]:
        """Analyze CUDA and HIP includes."""
        cuda_includes = []
        hip_includes = []
        
        for include in includes:
            file_name = include['file'].lower()
            
            # CUDA includes
            cuda_headers = [
                'cuda', 'cuda_runtime', 'cublas', 'cufft', 'curand', 'cusparse',
                'cudnn', 'npp', 'thrust/', 'cub/', 'cooperative_groups'
            ]
            
            if any(cuda_header in file_name for cuda_header in cuda_headers):
                cuda_includes.append(include['file'])
            
            # HIP includes
            hip_headers = [
                'hip/', 'rocblas', 'rocfft', 'rocrand', 'rocsparse',
                'miopen', 'rccl'
            ]
            
            if any(hip_header in file_name for hip_header in hip_headers):
                hip_includes.append(include['file'])
        
        return cuda_includes, hip_includes
    
    def _analyze_shared_memory_usage(self, func: Dict[str, Any]) -> Dict[str, Any]:
        """Analyze shared memory usage in a function."""
        # This would require more detailed AST analysis
        # For now, return basic structure
        return {
            'static_shared_memory': 0,  # bytes
            'dynamic_shared_memory': False,
            'shared_memory_variables': []
        }
    
    def _uses_syncthreads(self, func: Dict[str, Any]) -> bool:
        """Check if function uses thread synchronization."""
        # This would require analyzing function body for __syncthreads() calls
        # For now, return False as placeholder
        return False
    
    def _generate_performance_hints(self, gpu_analysis: Dict[str, Any]) -> List[Dict[str, str]]:
        """Generate performance optimization hints."""
        hints = []
        
        # Check for synchronization patterns
        if len(gpu_analysis['synchronization_points']) > 5:
            hints.append({
                'category': 'synchronization',
                'severity': 'warning',
                'message': 'Frequent synchronization detected. Consider batching operations or using streams.',
                'suggestion': 'Use asynchronous operations and CUDA streams to overlap computation and memory transfer.'
            })
        
        # Check memory operations
        sync_memory_ops = [op for op in gpu_analysis['memory_operations'] if not op['is_async']]
        if len(sync_memory_ops) > 3:
            hints.append({
                'category': 'memory',
                'severity': 'info',
                'message': 'Multiple synchronous memory operations detected.',
                'suggestion': 'Consider using asynchronous memory operations to improve performance.'
            })
        
        # Check for unified memory usage
        managed_ops = [op for op in gpu_analysis['memory_operations'] if op['is_managed']]
        if managed_ops:
            hints.append({
                'category': 'memory',
                'severity': 'info',
                'message': 'Unified/managed memory detected.',
                'suggestion': 'Monitor memory access patterns and consider explicit prefetching for better performance.'
            })
        
        return hints