"""
C++ type system analyzer.

Provides analysis of C++ type system including auto/decltype deduction,
template type parameters, move semantics, const-correctness, and GPU types.
"""

from typing import Dict, List, Any, Optional, Set, Tuple
from collections import defaultdict
import re

from ...core.error_handler import log_debug, log_info


class TypeAnalyzer:
    """Analyzes C++ type system and type-related patterns."""
    
    def __init__(self):
        """Initialize the type analyzer."""
        self.type_cache = {}
        self.type_aliases = {}
        self.template_instantiations = {}
        
        # C++ fundamental types
        self.fundamental_types = {
            'void', 'bool', 'char', 'signed char', 'unsigned char',
            'short', 'unsigned short', 'int', 'unsigned int',
            'long', 'unsigned long', 'long long', 'unsigned long long',
            'float', 'double', 'long double',
            'char16_t', 'char32_t', 'wchar_t'
        }
        
        # C++11/14/17 type traits
        self.type_traits = {
            'is_same', 'is_base_of', 'is_convertible', 'is_constructible',
            'is_trivial', 'is_pod', 'is_polymorphic', 'is_abstract',
            'enable_if', 'conditional', 'decay', 'remove_cv', 'add_const'
        }
        
        # GPU-specific types
        self.gpu_types = {
            'cuda': {
                'dim3', 'cudaStream_t', 'cudaEvent_t', 'cudaError_t',
                'float2', 'float3', 'float4', 'int2', 'int3', 'int4',
                'uint2', 'uint3', 'uint4', 'double2', 'double3', 'double4'
            },
            'hip': {
                'hipStream_t', 'hipEvent_t', 'hipError_t',
                'float2', 'float3', 'float4', 'int2', 'int3', 'int4'
            }
        }
    
    def analyze_types(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Comprehensive type analysis of C++ code.
        
        Args:
            ast_data: AST data from the parser
            
        Returns:
            Type analysis results
        """
        analysis_result = {
            'type_usage': {},
            'auto_deductions': [],
            'template_instantiations': [],
            'type_aliases': [],
            'const_correctness': {},
            'move_semantics': {},
            'reference_analysis': {},
            'gpu_types': {
                'cuda_types': [],
                'hip_types': []
            },
            'type_safety_issues': [],
            'recommendations': []
        }
        
        # Analyze type usage patterns
        analysis_result['type_usage'] = self._analyze_type_usage(ast_data)
        
        # Analyze auto/decltype usage
        analysis_result['auto_deductions'] = self._analyze_auto_deductions(ast_data)
        
        # Analyze template instantiations
        analysis_result['template_instantiations'] = self._analyze_template_instantiations(ast_data)
        
        # Analyze type aliases (typedef/using)
        analysis_result['type_aliases'] = self._analyze_type_aliases(ast_data)
        
        # Analyze const-correctness
        analysis_result['const_correctness'] = self._analyze_const_correctness(ast_data)
        
        # Analyze move semantics
        analysis_result['move_semantics'] = self._analyze_move_semantics(ast_data)
        
        # Analyze reference types
        analysis_result['reference_analysis'] = self._analyze_reference_types(ast_data)
        
        # Analyze GPU-specific types
        analysis_result['gpu_types'] = self._analyze_gpu_types(ast_data)
        
        # Identify type safety issues
        analysis_result['type_safety_issues'] = self._identify_type_safety_issues(analysis_result)
        
        # Generate recommendations
        analysis_result['recommendations'] = self._generate_type_recommendations(analysis_result)
        
        return analysis_result
    
    def _analyze_type_usage(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:
        """Analyze overall type usage patterns."""
        type_usage = {
            'fundamental_types': defaultdict(int),
            'user_defined_types': defaultdict(int),
            'template_types': defaultdict(int),
            'pointer_types': defaultdict(int),
            'reference_types': defaultdict(int),
            'const_types': defaultdict(int),
            'most_used_types': [],
            'type_complexity_score': 0.0
        }
        
        # Analyze variable types
        for var in ast_data.get('variables', []):
            type_info = var.get('type', {})
            type_spelling = type_info.get('spelling', '')
            
            # Categorize the type
            self._categorize_type(type_spelling, type_usage)
        
        # Analyze function parameter and return types
        for func in ast_data.get('functions', []):
            # Return type
            return_type = func.get('return_type', {})
            return_spelling = return_type.get('spelling', '')
            if return_spelling:
                self._categorize_type(return_spelling, type_usage)
            
            # Parameter types
            for param in func.get('parameters', []):
                param_type = param.get('type', {})
                param_spelling = param_type.get('spelling', '')
                if param_spelling:
                    self._categorize_type(param_spelling, type_usage)
        
        # Calculate most used types
        all_types = {
            **type_usage['fundamental_types'],
            **type_usage['user_defined_types'],
            **type_usage['template_types']
        }
        type_usage['most_used_types'] = sorted(
            all_types.items(), key=lambda x: x[1], reverse=True
        )[:10]
        
        # Calculate complexity score
        type_usage['type_complexity_score'] = self._calculate_type_complexity_score(type_usage)
        
        return type_usage
    
    def _categorize_type(self, type_spelling: str, type_usage: Dict[str, Any]):
        """Categorize a type and update usage counters."""
        # Clean up type spelling
        clean_type = type_spelling.strip()
        
        # Check for fundamental types
        base_type = re.sub(r'[*&\\s]*(const\\s+)?', '', clean_type)
        if base_type in self.fundamental_types:
            type_usage['fundamental_types'][base_type] += 1
        elif '<' in clean_type and '>' in clean_type:
            # Template type
            template_name = clean_type.split('<')[0].strip()
            type_usage['template_types'][template_name] += 1
        else:
            # User-defined type
            type_usage['user_defined_types'][base_type] += 1
        
        # Check for modifiers
        if '*' in clean_type:
            type_usage['pointer_types'][clean_type] += 1
        if '&' in clean_type:
            type_usage['reference_types'][clean_type] += 1
        if 'const' in clean_type:
            type_usage['const_types'][clean_type] += 1
    
    def _analyze_auto_deductions(self, ast_data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """Analyze auto and decltype usage."""
        auto_usages = []
        
        for var in ast_data.get('variables', []):
            type_info = var.get('type', {})
            type_spelling = type_info.get('spelling', '')
            
            if 'auto' in type_spelling or 'decltype' in type_spelling:
                auto_usage = {
                    'variable_name': var['name'],
                    'declared_type': type_spelling,
                    'deduced_type': self._deduce_auto_type(var, ast_data),
                    'location': var.get('location', {}),
                    'deduction_context': self._get_deduction_context(var),
                    'complexity': self._calculate_deduction_complexity(type_spelling)
                }
                auto_usages.append(auto_usage)
        
        # Also check function return types
        for func in ast_data.get('functions', []):
            return_type = func.get('return_type', {})
            return_spelling = return_type.get('spelling', '')
            
            if 'auto' in return_spelling or 'decltype' in return_spelling:
                auto_usage = {
                    'function_name': func['name'],
                    'declared_type': return_spelling,
                    'deduced_type': 'unknown',  # Would need flow analysis
                    'location': func.get('location', {}),
                    'deduction_context': 'function_return',
                    'complexity': self._calculate_deduction_complexity(return_spelling)
                }
                auto_usages.append(auto_usage)
        
        return auto_usages\n    \n    def _analyze_template_instantiations(self, ast_data: Dict[str, Any]) -> List[Dict[str, Any]]:\n        """Analyze template instantiations."""\n        instantiations = []\n        \n        for instantiation in ast_data.get('template_instantiations', []):\n            inst_info = {\n                'template_name': instantiation.get('template_name'),\n                'template_args': instantiation.get('template_arguments', []),\n                'instantiation_location': instantiation.get('location', {}),\n                'specialization_level': len(instantiation.get('template_arguments', [])),\n                'arg_types': []\n            }\n            \n            # Analyze template arguments\n            for arg in instantiation.get('template_arguments', []):\n                arg_analysis = {\n                    'arg_value': arg.get('value', ''),\n                    'arg_type': arg.get('type', 'unknown'),\n                    'is_type_argument': arg.get('kind') == 'type',\n                    'is_value_argument': arg.get('kind') == 'value'\n                }\n                inst_info['arg_types'].append(arg_analysis)\n            \n            instantiations.append(inst_info)\n        \n        return instantiations\n    \n    def _analyze_type_aliases(self, ast_data: Dict[str, Any]) -> List[Dict[str, Any]]:\n        """Analyze typedef and using declarations."""\n        aliases = []\n        \n        for typedef in ast_data.get('typedefs', []):\n            alias_info = {\n                'alias_name': typedef['name'],\n                'underlying_type': typedef.get('underlying_type', {}),\n                'location': typedef.get('location', {}),\n                'is_template_alias': typedef.get('is_template', False),\n                'complexity_reduction': self._calculate_alias_benefit(typedef)\n            }\n            aliases.append(alias_info)\n            \n            # Store in type aliases map\n            self.type_aliases[typedef['name']] = typedef.get('underlying_type', {})\n        \n        return aliases\n    \n    def _analyze_const_correctness(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:\n        """Analyze const-correctness patterns."""\n        const_analysis = {\n            'const_variables': 0,\n            'mutable_variables': 0,\n            'const_methods': 0,\n            'const_parameters': 0,\n            'const_return_types': 0,\n            'const_violations': [],\n            'const_correctness_score': 0.0\n        }\n        \n        # Analyze variables\n        for var in ast_data.get('variables', []):\n            type_spelling = var.get('type', {}).get('spelling', '')\n            if 'const' in type_spelling:\n                const_analysis['const_variables'] += 1\n            else:\n                const_analysis['mutable_variables'] += 1\n        \n        # Analyze functions\n        for func in ast_data.get('functions', []):\n            # Check if method is const\n            if func.get('is_const'):\n                const_analysis['const_methods'] += 1\n            \n            # Check return type\n            return_type = func.get('return_type', {}).get('spelling', '')\n            if 'const' in return_type:\n                const_analysis['const_return_types'] += 1\n            \n            # Check parameters\n            for param in func.get('parameters', []):\n                param_type = param.get('type', {}).get('spelling', '')\n                if 'const' in param_type:\n                    const_analysis['const_parameters'] += 1\n        \n        # Calculate const-correctness score\n        total_items = (const_analysis['const_variables'] + const_analysis['mutable_variables'] +\n                      const_analysis['const_methods'] + const_analysis['const_parameters'])\n        const_items = (const_analysis['const_variables'] + const_analysis['const_methods'] +\n                      const_analysis['const_parameters'])\n        \n        if total_items > 0:\n            const_analysis['const_correctness_score'] = const_items / total_items\n        \n        return const_analysis\n    \n    def _analyze_move_semantics(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:\n        """Analyze move semantics usage."""\n        move_analysis = {\n            'rvalue_references': [],\n            'move_constructors': [],\n            'move_assignments': [],\n            'perfect_forwarding': [],\n            'std_move_usage': [],\n            'move_semantics_score': 0.0\n        }\n        \n        # Analyze rvalue references\n        for var in ast_data.get('variables', []):\n            type_spelling = var.get('type', {}).get('spelling', '')\n            if '&&' in type_spelling:\n                rvalue_ref = {\n                    'variable_name': var['name'],\n                    'type': type_spelling,\n                    'location': var.get('location', {})\n                }\n                move_analysis['rvalue_references'].append(rvalue_ref)\n        \n        # Analyze function parameters for move semantics\n        for func in ast_data.get('functions', []):\n            # Check for move constructors/assignments\n            if func['name'] == func.get('class_name'):  # Constructor\n                for param in func.get('parameters', []):\n                    param_type = param.get('type', {}).get('spelling', '')\n                    if '&&' in param_type:\n                        move_constructor = {\n                            'class_name': func.get('class_name'),\n                            'location': func.get('location', {})\n                        }\n                        move_analysis['move_constructors'].append(move_constructor)\n            \n            # Check for perfect forwarding patterns\n            if func.get('is_template'):\n                template_params = func.get('template_info', {}).get('parameters', [])\n                for param in func.get('parameters', []):\n                    param_type = param.get('type', {}).get('spelling', '')\n                    if '&&' in param_type and any('T' in tp.get('name', '') for tp in template_params):\n                        perfect_forward = {\n                            'function_name': func['name'],\n                            'parameter': param['name'],\n                            'location': func.get('location', {})\n                        }\n                        move_analysis['perfect_forwarding'].append(perfect_forward)\n        \n        # Look for std::move usage in function calls\n        for call in ast_data.get('function_calls', []):\n            if call.get('function_name') == 'std::move':\n                move_usage = {\n                    'location': call.get('location', {}),\n                    'caller': call.get('caller_function'),\n                    'argument': call.get('arguments', [{}])[0].get('expression', 'unknown')\n                }\n                move_analysis['std_move_usage'].append(move_usage)\n        \n        # Calculate move semantics adoption score\n        total_classes = len(set(func.get('class_name') for func in ast_data.get('functions', []) if func.get('class_name')))\n        move_enabled_classes = len(move_analysis['move_constructors'])\n        \n        if total_classes > 0:\n            move_analysis['move_semantics_score'] = move_enabled_classes / total_classes\n        \n        return move_analysis\n    \n    def _analyze_reference_types(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:\n        """Analyze lvalue and rvalue reference usage."""\n        ref_analysis = {\n            'lvalue_references': 0,\n            'rvalue_references': 0,\n            'reference_parameters': [],\n            'reference_returns': [],\n            'dangling_reference_risks': []\n        }\n        \n        # Analyze all types for references\n        for var in ast_data.get('variables', []):\n            type_spelling = var.get('type', {}).get('spelling', '')\n            if '&&' in type_spelling:\n                ref_analysis['rvalue_references'] += 1\n            elif '&' in type_spelling and '&&' not in type_spelling:\n                ref_analysis['lvalue_references'] += 1\n        \n        # Analyze function signatures\n        for func in ast_data.get('functions', []):\n            # Check return type\n            return_type = func.get('return_type', {}).get('spelling', '')\n            if '&' in return_type:\n                ref_return = {\n                    'function_name': func['name'],\n                    'return_type': return_type,\n                    'is_rvalue_ref': '&&' in return_type,\n                    'location': func.get('location', {})\n                }\n                ref_analysis['reference_returns'].append(ref_return)\n            \n            # Check parameters\n            for param in func.get('parameters', []):\n                param_type = param.get('type', {}).get('spelling', '')\n                if '&' in param_type:\n                    ref_param = {\n                        'function_name': func['name'],\n                        'parameter_name': param['name'],\n                        'parameter_type': param_type,\n                        'is_rvalue_ref': '&&' in param_type,\n                        'location': func.get('location', {})\n                    }\n                    ref_analysis['reference_parameters'].append(ref_param)\n        \n        return ref_analysis\n    \n    def _analyze_gpu_types(self, ast_data: Dict[str, Any]) -> Dict[str, Any]:\n        """Analyze GPU-specific type usage."""\n        gpu_analysis = {\n            'cuda_types': [],\n            'hip_types': [],\n            'vector_types': [],\n            'device_types': [],\n            'stream_types': []\n        }\n        \n        # Check all variables and function types\n        all_types = []\n        \n        # Collect from variables\n        for var in ast_data.get('variables', []):\n            type_spelling = var.get('type', {}).get('spelling', '')\n            if type_spelling:\n                all_types.append({\n                    'type': type_spelling,\n                    'context': 'variable',\n                    'name': var['name'],\n                    'location': var.get('location', {})\n                })\n        \n        # Collect from functions\n        for func in ast_data.get('functions', []):\n            return_type = func.get('return_type', {}).get('spelling', '')\n            if return_type:\n                all_types.append({\n                    'type': return_type,\n                    'context': 'return_type',\n                    'name': func['name'],\n                    'location': func.get('location', {})\n                })\n        \n        # Categorize GPU types\n        for type_info in all_types:\n            type_spelling = type_info['type']\n            \n            # Check CUDA types\n            for cuda_type in self.gpu_types['cuda']:\n                if cuda_type in type_spelling:\n                    gpu_analysis['cuda_types'].append({\n                        'type_name': cuda_type,\n                        'full_type': type_spelling,\n                        'usage_context': type_info['context'],\n                        'location': type_info['location']\n                    })\n            \n            # Check HIP types\n            for hip_type in self.gpu_types['hip']:\n                if hip_type in type_spelling:\n                    gpu_analysis['hip_types'].append({\n                        'type_name': hip_type,\n                        'full_type': type_spelling,\n                        'usage_context': type_info['context'],\n                        'location': type_info['location']\n                    })\n            \n            # Check for vector types (float2, int4, etc.)\n            if re.search(r'(float|int|uint|double)[2-4]\\b', type_spelling):\n                gpu_analysis['vector_types'].append(type_info)\n        \n        return gpu_analysis\n    \n    def _identify_type_safety_issues(self, analysis_result: Dict[str, Any]) -> List[Dict[str, Any]]:\n        """Identify potential type safety issues."""\n        issues = []\n        \n        # Check const-correctness\n        const_score = analysis_result['const_correctness']['const_correctness_score']\n        if const_score < 0.5:\n            issues.append({\n                'category': 'const_correctness',\n                'severity': 'warning',\n                'message': f'Low const-correctness score ({const_score:.2f})',\n                'recommendation': 'Consider adding more const qualifiers to improve code safety'\n            })\n        \n        # Check move semantics adoption\n        move_score = analysis_result['move_semantics']['move_semantics_score']\n        if move_score < 0.3 and len(analysis_result['move_semantics']['rvalue_references']) == 0:\n            issues.append({\n                'category': 'move_semantics',\n                'severity': 'info',\n                'message': 'Limited use of move semantics',\n                'recommendation': 'Consider implementing move constructors and assignments for better performance'\n            })\n        \n        # Check for complex auto deductions\n        complex_autos = [auto for auto in analysis_result['auto_deductions'] if auto['complexity'] > 3]\n        if complex_autos:\n            issues.append({\n                'category': 'auto_complexity',\n                'severity': 'warning',\n                'message': f'{len(complex_autos)} complex auto deductions found',\n                'recommendation': 'Consider explicit type declarations for better readability'\n            })\n        \n        return issues\n    \n    def _generate_type_recommendations(self, analysis_result: Dict[str, Any]) -> List[str]:\n        \"\"\"Generate type-related recommendations.\"\"\"\n        recommendations = []\n        \n        # Const-correctness recommendations\n        if analysis_result['const_correctness']['const_correctness_score'] < 0.7:\n            recommendations.append(\n                \"Improve const-correctness by adding const qualifiers to variables and methods that don't modify state\"\n            )\n        \n        # Auto usage recommendations\n        if len(analysis_result['auto_deductions']) == 0:\n            recommendations.append(\n                \"Consider using 'auto' for complex template types to improve code maintainability\"\n            )\n        elif len(analysis_result['auto_deductions']) > 20:\n            recommendations.append(\n                \"Review excessive 'auto' usage - explicit types may improve code readability\"\n            )\n        \n        # Move semantics recommendations\n        if analysis_result['move_semantics']['move_semantics_score'] < 0.5:\n            recommendations.append(\n                \"Implement move constructors and move assignment operators for resource-owning classes\"\n            )\n        \n        # GPU type recommendations\n        gpu_types = analysis_result['gpu_types']\n        if gpu_types['cuda_types'] and gpu_types['hip_types']:\n            recommendations.append(\n                \"Mixed CUDA and HIP types detected - ensure proper abstraction for portability\"\n            )\n        \n        return recommendations\n    \n    def _deduce_auto_type(self, var: Dict[str, Any], ast_data: Dict[str, Any]) -> str:\n        \"\"\"Attempt to deduce the actual type for auto variables.\"\"\"\n        # This would require complex flow analysis\n        # For now, return placeholder\n        return \"deduced_type_unknown\"\n    \n    def _get_deduction_context(self, var: Dict[str, Any]) -> str:\n        \"\"\"Get the context of auto type deduction.\"\"\"\n        # Check if there's an initializer\n        if var.get('initializer'):\n            return 'initializer'\n        elif var.get('is_parameter'):\n            return 'parameter'\n        else:\n            return 'declaration'\n    \n    def _calculate_deduction_complexity(self, type_spelling: str) -> int:\n        \"\"\"Calculate complexity score for type deduction.\"\"\"\n        complexity = 0\n        \n        if 'decltype' in type_spelling:\n            complexity += 2\n        if 'auto' in type_spelling:\n            complexity += 1\n        if '&&' in type_spelling:\n            complexity += 2\n        if '<' in type_spelling and '>' in type_spelling:\n            complexity += 1\n        \n        return complexity\n    \n    def _calculate_alias_benefit(self, typedef: Dict[str, Any]) -> float:\n        \"\"\"Calculate the benefit of a type alias.\"\"\"\n        underlying_type = typedef.get('underlying_type', {}).get('spelling', '')\n        alias_name = typedef['name']\n        \n        # Simple heuristic: longer underlying type = higher benefit\n        if len(underlying_type) > len(alias_name) * 2:\n            return 1.0  # High benefit\n        elif len(underlying_type) > len(alias_name):\n            return 0.5  # Medium benefit\n        else:\n            return 0.1  # Low benefit\n    \n    def _calculate_type_complexity_score(self, type_usage: Dict[str, Any]) -> float:\n        \"\"\"Calculate overall type complexity score.\"\"\"\n        total_types = sum(type_usage['fundamental_types'].values())\n        template_types = sum(type_usage['template_types'].values())\n        pointer_types = sum(type_usage['pointer_types'].values())\n        \n        if total_types == 0:\n            return 0.0\n        \n        # Higher score = more complex types\n        complexity = (template_types * 2 + pointer_types * 1.5) / total_types\n        return min(complexity, 10.0)  # Cap at 10.0