"""
索引映射分析器

分析ES索引结构、字段类型检测、映射兼容性检查等功能。
"""

from typing import Dict, List, Optional, Any, Set, Tuple
from .logger import get_module_logger
from .exceptions import ESIndexNotFoundError, create_error_from_exception
from .time_utils import TimeFieldDetector

logger = get_module_logger(__name__)


class MappingAnalyzer:
    """索引映射分析器
    
    分析ES索引的映射结构，提供字段类型检测、兼容性检查等功能。
    """
    
    def __init__(self, es_client):
        """初始化映射分析器
        
        Args:
            es_client: ES客户端
        """
        self.es_client = es_client
        self.time_detector = TimeFieldDetector()
    
    def analyze_index_mapping(self, index: str) -> Dict[str, Any]:
        """分析索引映射
        
        Args:
            index: 索引名称
            
        Returns:
            映射分析结果
        """
        logger.info(f"开始分析索引映射: {index}")
        
        try:
            # 获取映射信息
            mapping_response = self.es_client.get_mapping(index)
            
            # 提取第一个索引的映射（处理通配符情况）
            index_names = list(mapping_response.keys())
            if not index_names:
                raise ESIndexNotFoundError(index)
            
            # 分析每个匹配的索引
            analysis_results = {}
            
            for idx_name in index_names:
                mapping = mapping_response[idx_name]
                analysis_results[idx_name] = self._analyze_single_mapping(idx_name, mapping)
            
            # 如果只有一个索引，直接返回其分析结果
            if len(analysis_results) == 1:
                return list(analysis_results.values())[0]
            
            # 多个索引时，返回汇总分析
            return self._aggregate_mapping_analysis(analysis_results)
            
        except Exception as e:
            logger.error(f"分析索引映射失败: {index}, {e}")
            raise create_error_from_exception(e, f"分析索引映射失败: {index}")
    
    def _analyze_single_mapping(self, index_name: str, mapping: Dict[str, Any]) -> Dict[str, Any]:
        """分析单个索引的映射
        
        Args:
            index_name: 索引名称
            mapping: 映射信息
            
        Returns:
            分析结果
        """
        properties = mapping.get('mappings', {}).get('properties', {})
        
        # 展平字段结构
        flattened_fields = self._flatten_properties(properties)
        
        # 字段类型统计
        field_types = self._analyze_field_types(flattened_fields)
        
        # 时间字段检测
        time_fields = self.time_detector.detect_time_fields(mapping)
        
        # 字段深度分析
        field_depth_stats = self._analyze_field_depth(flattened_fields)
        
        # 特殊字段检测
        special_fields = self._detect_special_fields(flattened_fields)
        
        return {
            'index_name': index_name,
            'total_fields': len(flattened_fields),
            'field_types': field_types,
            'time_fields': [{'path': path, 'score': score} for path, score in time_fields],
            'suggested_time_field': time_fields[0][0] if time_fields else None,
            'field_depth_stats': field_depth_stats,
            'special_fields': special_fields,
            'flattened_fields': flattened_fields,
            'raw_mapping': mapping
        }
    
    def _flatten_properties(self, properties: Dict[str, Any], prefix: str = "") -> Dict[str, Dict[str, Any]]:
        """展平属性结构
        
        Args:
            properties: 属性字典
            prefix: 字段路径前缀
            
        Returns:
            展平后的字段字典
        """
        flattened = {}
        
        for field_name, field_def in properties.items():
            current_path = f"{prefix}.{field_name}" if prefix else field_name
            
            if isinstance(field_def, dict):
                if 'type' in field_def:
                    # 这是一个字段定义
                    flattened[current_path] = field_def
                
                # 检查嵌套属性
                if 'properties' in field_def:
                    nested_fields = self._flatten_properties(field_def['properties'], current_path)
                    flattened.update(nested_fields)
                
                # 检查fields（多字段映射）
                if 'fields' in field_def:
                    for sub_field, sub_def in field_def['fields'].items():
                        sub_path = f"{current_path}.{sub_field}"
                        flattened[sub_path] = sub_def
        
        return flattened
    
    def _analyze_field_types(self, fields: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """分析字段类型分布
        
        Args:
            fields: 字段字典
            
        Returns:
            字段类型统计
        """
        type_counts = {}
        type_examples = {}
        
        for field_path, field_def in fields.items():
            field_type = field_def.get('type', 'unknown')
            
            # 统计类型数量
            type_counts[field_type] = type_counts.get(field_type, 0) + 1
            
            # 记录类型示例
            if field_type not in type_examples:
                type_examples[field_type] = []
            if len(type_examples[field_type]) < 3:  # 只保留前3个示例
                type_examples[field_type].append(field_path)
        
        return {
            'type_distribution': type_counts,
            'type_examples': type_examples,
            'most_common_type': max(type_counts.items(), key=lambda x: x[1])[0] if type_counts else None
        }
    
    def _analyze_field_depth(self, fields: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """分析字段深度统计
        
        Args:
            fields: 字段字典
            
        Returns:
            字段深度统计
        """
        depths = [field_path.count('.') for field_path in fields.keys()]
        
        if not depths:
            return {'max_depth': 0, 'avg_depth': 0, 'depth_distribution': {}}
        
        depth_distribution = {}
        for depth in depths:
            depth_distribution[depth] = depth_distribution.get(depth, 0) + 1
        
        return {
            'max_depth': max(depths),
            'avg_depth': sum(depths) / len(depths),
            'depth_distribution': depth_distribution
        }
    
    def _detect_special_fields(self, fields: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
        """检测特殊字段
        
        Args:
            fields: 字段字典
            
        Returns:
            特殊字段分类
        """
        special_fields = {
            'text_fields': [],
            'keyword_fields': [],
            'numeric_fields': [],
            'date_fields': [],
            'boolean_fields': [],
            'geo_fields': [],
            'nested_fields': [],
            'object_fields': [],
            'analyzed_fields': [],
            'not_analyzed_fields': []
        }
        
        numeric_types = {'integer', 'long', 'short', 'byte', 'double', 'float', 'half_float', 'scaled_float'}
        geo_types = {'geo_point', 'geo_shape'}
        
        for field_path, field_def in fields.items():
            field_type = field_def.get('type', '')
            
            if field_type == 'text':
                special_fields['text_fields'].append(field_path)
                if field_def.get('analyzer'):
                    special_fields['analyzed_fields'].append(field_path)
            elif field_type == 'keyword':
                special_fields['keyword_fields'].append(field_path)
                special_fields['not_analyzed_fields'].append(field_path)
            elif field_type in numeric_types:
                special_fields['numeric_fields'].append(field_path)
            elif field_type in ['date', 'date_nanos']:
                special_fields['date_fields'].append(field_path)
            elif field_type == 'boolean':
                special_fields['boolean_fields'].append(field_path)
            elif field_type in geo_types:
                special_fields['geo_fields'].append(field_path)
            elif field_type == 'nested':
                special_fields['nested_fields'].append(field_path)
            elif field_type == 'object':
                special_fields['object_fields'].append(field_path)
        
        return special_fields
    
    def _aggregate_mapping_analysis(self, analysis_results: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """聚合多个索引的映射分析结果
        
        Args:
            analysis_results: 各索引的分析结果
            
        Returns:
            聚合分析结果
        """
        aggregated = {
            'indices_analyzed': list(analysis_results.keys()),
            'total_indices': len(analysis_results),
            'field_consistency': {},
            'common_fields': set(),
            'unique_fields': {},
            'type_conflicts': [],
            'time_field_suggestions': []
        }
        
        # 收集所有字段
        all_fields = {}
        for idx_name, result in analysis_results.items():
            for field_path, field_def in result['flattened_fields'].items():
                if field_path not in all_fields:
                    all_fields[field_path] = {}
                all_fields[field_path][idx_name] = field_def
        
        # 分析字段一致性
        for field_path, index_defs in all_fields.items():
            indices_with_field = set(index_defs.keys())
            
            if len(indices_with_field) == len(analysis_results):
                # 所有索引都有这个字段
                aggregated['common_fields'].add(field_path)
                
                # 检查类型一致性
                types = {field_def.get('type') for field_def in index_defs.values()}
                if len(types) > 1:
                    aggregated['type_conflicts'].append({
                        'field': field_path,
                        'types': dict(index_defs)
                    })
            else:
                # 只有部分索引有这个字段
                for idx in indices_with_field:
                    if idx not in aggregated['unique_fields']:
                        aggregated['unique_fields'][idx] = []
                    aggregated['unique_fields'][idx].append(field_path)
        
        # 聚合时间字段建议
        for idx_name, result in analysis_results.items():
            if result['suggested_time_field']:
                aggregated['time_field_suggestions'].append({
                    'index': idx_name,
                    'field': result['suggested_time_field']
                })
        
        return aggregated
    
    def check_mapping_compatibility(self, source_index: str, target_index: str) -> Dict[str, Any]:
        """检查映射兼容性
        
        Args:
            source_index: 源索引
            target_index: 目标索引
            
        Returns:
            兼容性检查结果
        """
        logger.info(f"检查映射兼容性: {source_index} -> {target_index}")
        
        try:
            # 分析两个索引的映射
            source_analysis = self.analyze_index_mapping(source_index)
            
            # 检查目标索引是否存在
            if self.es_client.index_exists(target_index):
                target_analysis = self.analyze_index_mapping(target_index)
                return self._compare_mappings(source_analysis, target_analysis)
            else:
                # 目标索引不存在，返回创建建议
                return {
                    'compatible': True,
                    'target_exists': False,
                    'recommended_mapping': self._generate_recommended_mapping(source_analysis),
                    'issues': [],
                    'warnings': []
                }
                
        except Exception as e:
            logger.error(f"检查映射兼容性失败: {e}")
            raise create_error_from_exception(e, "映射兼容性检查失败")
    
    def _compare_mappings(self, source: Dict[str, Any], target: Dict[str, Any]) -> Dict[str, Any]:
        """比较两个映射
        
        Args:
            source: 源映射分析
            target: 目标映射分析
            
        Returns:
            比较结果
        """
        issues = []
        warnings = []
        
        source_fields = source['flattened_fields']
        target_fields = target['flattened_fields']
        
        # 检查字段兼容性
        for field_path, source_def in source_fields.items():
            if field_path in target_fields:
                target_def = target_fields[field_path]
                
                # 检查类型兼容性
                source_type = source_def.get('type')
                target_type = target_def.get('type')
                
                if source_type != target_type:
                    if self._are_types_compatible(source_type, target_type):
                        warnings.append({
                            'type': 'type_difference',
                            'field': field_path,
                            'source_type': source_type,
                            'target_type': target_type,
                            'severity': 'medium'
                        })
                    else:
                        issues.append({
                            'type': 'type_incompatible',
                            'field': field_path,
                            'source_type': source_type,
                            'target_type': target_type,
                            'severity': 'high'
                        })
            else:
                # 目标索引缺少字段
                warnings.append({
                    'type': 'missing_field',
                    'field': field_path,
                    'severity': 'low'
                })
        
        # 检查额外字段
        extra_fields = set(target_fields.keys()) - set(source_fields.keys())
        if extra_fields:
            warnings.append({
                'type': 'extra_fields',
                'fields': list(extra_fields),
                'severity': 'low'
            })
        
        return {
            'compatible': len(issues) == 0,
            'target_exists': True,
            'issues': issues,
            'warnings': warnings,
            'compatibility_score': self._calculate_compatibility_score(issues, warnings)
        }
    
    def _are_types_compatible(self, source_type: str, target_type: str) -> bool:
        """检查类型是否兼容
        
        Args:
            source_type: 源类型
            target_type: 目标类型
            
        Returns:
            是否兼容
        """
        # 定义兼容的类型组
        compatible_groups = [
            {'text', 'keyword'},  # 文本类型可以互相兼容
            {'integer', 'long', 'short', 'byte'},  # 整数类型
            {'double', 'float', 'half_float'},  # 浮点类型
            {'date', 'date_nanos'},  # 日期类型
        ]
        
        for group in compatible_groups:
            if source_type in group and target_type in group:
                return True
        
        return source_type == target_type
    
    def _calculate_compatibility_score(self, issues: List[Dict], warnings: List[Dict]) -> float:
        """计算兼容性分数
        
        Args:
            issues: 问题列表
            warnings: 警告列表
            
        Returns:
            兼容性分数 (0-100)
        """
        base_score = 100.0
        
        # 根据问题严重程度扣分
        for issue in issues:
            severity = issue.get('severity', 'medium')
            if severity == 'high':
                base_score -= 20.0
            elif severity == 'medium':
                base_score -= 10.0
            else:
                base_score -= 5.0
        
        # 警告扣分较少
        for warning in warnings:
            severity = warning.get('severity', 'low')
            if severity == 'medium':
                base_score -= 3.0
            else:
                base_score -= 1.0
        
        return max(0.0, base_score)
    
    def _generate_recommended_mapping(self, source_analysis: Dict[str, Any]) -> Dict[str, Any]:
        """生成推荐的映射配置
        
        Args:
            source_analysis: 源索引分析结果
            
        Returns:
            推荐的映射配置
        """
        # 基于源索引生成映射
        properties = {}
        
        for field_path, field_def in source_analysis['flattened_fields'].items():
            # 构建嵌套结构
            path_parts = field_path.split('.')
            current = properties
            
            for i, part in enumerate(path_parts[:-1]):
                if part not in current:
                    current[part] = {'type': 'object', 'properties': {}}
                current = current[part]['properties']
            
            # 设置最终字段
            final_field = path_parts[-1]
            current[final_field] = field_def.copy()
        
        return {
            'mappings': {
                'properties': properties
            }
        }
    
    def suggest_mapping_optimizations(self, index: str) -> List[Dict[str, Any]]:
        """建议映射优化
        
        Args:
            index: 索引名称
            
        Returns:
            优化建议列表
        """
        analysis = self.analyze_index_mapping(index)
        suggestions = []
        
        # 检查未使用的text字段
        text_fields = analysis['special_fields']['text_fields']
        if len(text_fields) > 10:
            suggestions.append({
                'type': 'too_many_text_fields',
                'description': f'发现 {len(text_fields)} 个text字段，考虑将不需要全文搜索的字段改为keyword',
                'impact': 'performance',
                'fields': text_fields[:5]  # 只显示前5个
            })
        
        # 检查深度嵌套
        max_depth = analysis['field_depth_stats']['max_depth']
        if max_depth > 5:
            suggestions.append({
                'type': 'deep_nesting',
                'description': f'字段嵌套深度达到 {max_depth} 层，可能影响查询性能',
                'impact': 'performance',
                'max_depth': max_depth
            })
        
        # 检查字段数量
        total_fields = analysis['total_fields']
        if total_fields > 1000:
            suggestions.append({
                'type': 'too_many_fields',
                'description': f'索引包含 {total_fields} 个字段，可能影响性能',
                'impact': 'performance',
                'total_fields': total_fields
            })
        
        return suggestions
