import json
import re
from typing import List, Dict, Set, Tuple
from sqlalchemy.orm import sessionmaker
from myapp import db
from myapp.models.model_train_model import Training_Model
from myapp.models.model_dependency import Model_Dependency
from myapp.models.model_dataset import Dataset
from myapp.models.model_job import Pipeline, Task
from myapp.models.model_version import Model_Version

class ModelDependencyAnalyzer:
    """模型依赖关系自动分析器"""
    
    def __init__(self):
        self.session = db.session
    
    def analyze_all_dependencies(self, model_id: int) -> List[Dict]:
        """分析模型的所有依赖关系"""
        dependencies = []
        
        # 1. 分析父子模型依赖
        dependencies.extend(self._analyze_parent_child_dependencies(model_id))
        
        # 2. 分析Pipeline依赖
        dependencies.extend(self._analyze_pipeline_dependencies(model_id))
        
        # 3. 分析数据集依赖
        dependencies.extend(self._analyze_dataset_dependencies(model_id))
        
        # 4. 分析手动配置依赖
        dependencies.extend(self._analyze_manual_dependencies(model_id))
        
        # 5. 分析版本依赖
        dependencies.extend(self._analyze_version_dependencies(model_id))
        
        return dependencies
    
    def _analyze_parent_child_dependencies(self, model_id: int) -> List[Dict]:
        """分析父子模型依赖关系"""
        dependencies = []
        model = self.session.query(Training_Model).get(model_id)
        
        if model and model.parent_model_id:
            dependencies.append({
                'source_model_id': model_id,
                'target_model_id': model.parent_model_id,
                'dependency_type': 'model_inheritance',
                'dependency_strength': 1.0,
                'description': f'模型 {model.name} 继承自父模型',
                'auto_generated': True
            })
        
        return dependencies
    
    def _analyze_pipeline_dependencies(self, model_id: int) -> List[Dict]:
        """分析Pipeline中的依赖关系"""
        dependencies = []
        model = self.session.query(Training_Model).get(model_id)
        
        if not model or not model.pipeline_id:
            return dependencies
        
        # 获取Pipeline信息
        pipeline = self.session.query(Pipeline).get(model.pipeline_id)
        if not pipeline:
            return dependencies
        
        try:
            # 解析Pipeline的DAG结构
            dag_json = json.loads(pipeline.dag_json) if pipeline.dag_json else {}
            
            # 分析Pipeline中的任务依赖
            tasks = self.session.query(Task).filter_by(pipeline_id=model.pipeline_id).all()
            
            for task in tasks:
                # 分析任务参数中的模型引用
                if task.args:
                    model_refs = self._extract_model_references(task.args)
                    for ref_model_id in model_refs:
                        if ref_model_id != model_id:
                            dependencies.append({
                                'source_model_id': model_id,
                                'target_model_id': ref_model_id,
                                'dependency_type': 'pipeline_flow',
                                'dependency_strength': 0.8,
                                'description': f'Pipeline任务流依赖: {task.name}',
                                'auto_generated': True
                            })
            
            # 分析DAG中的边关系
            if 'edges' in dag_json:
                dependencies.extend(self._analyze_dag_edges(dag_json['edges'], model_id))
                
        except Exception as e:
            print(f"分析Pipeline依赖时出错: {e}")
        
        return dependencies
    
    def _analyze_dataset_dependencies(self, model_id: int) -> List[Dict]:
        """分析数据集依赖关系"""
        dependencies = []
        model = self.session.query(Training_Model).get(model_id)
        
        if not model or not model.training_dataset:
            return dependencies
        
        # 解析训练数据集字段
        dataset_names = self._parse_dataset_names(model.training_dataset)
        
        for dataset_name in dataset_names:
            # 查找使用相同数据集的其他模型
            related_models = self.session.query(Training_Model).filter(
                Training_Model.id != model_id,
                Training_Model.training_dataset.like(f'%{dataset_name}%')
            ).all()
            
            for related_model in related_models:
                dependencies.append({
                    'source_model_id': model_id,
                    'target_model_id': related_model.id,
                    'dependency_type': 'shared_dataset',
                    'dependency_strength': 0.6,
                    'description': f'共享数据集: {dataset_name}',
                    'auto_generated': True
                })
        
        return dependencies
    
    def _analyze_manual_dependencies(self, model_id: int) -> List[Dict]:
        """分析手动配置的依赖关系"""
        dependencies = []
        model = self.session.query(Training_Model).get(model_id)
        
        if not model or not model.dependencies:
            return dependencies
        
        try:
            # 解析dependencies字段
            deps_data = json.loads(model.dependencies) if model.dependencies else []
            if isinstance(deps_data, str):
                deps_data = [deps_data]
            
            for dep in deps_data:
                if isinstance(dep, dict):
                    target_id = dep.get('model_id')
                    if target_id:
                        dependencies.append({
                            'source_model_id': model_id,
                            'target_model_id': target_id,
                            'dependency_type': dep.get('type', 'manual'),
                            'dependency_strength': dep.get('strength', 1.0),
                            'description': dep.get('description', '手动配置依赖'),
                            'auto_generated': False
                        })
                elif isinstance(dep, str):
                    # 尝试解析字符串中的模型ID
                    model_ids = self._extract_model_ids_from_string(dep)
                    for target_id in model_ids:
                        dependencies.append({
                            'source_model_id': model_id,
                            'target_model_id': target_id,
                            'dependency_type': 'manual',
                            'dependency_strength': 1.0,
                            'description': f'手动配置依赖: {dep}',
                            'auto_generated': False
                        })
        except Exception as e:
            print(f"分析手动依赖时出错: {e}")
        
        return dependencies
    
    def _analyze_version_dependencies(self, model_id: int) -> List[Dict]:
        """分析版本依赖关系"""
        dependencies = []
        
        # 查找同一模型的不同版本
        model = self.session.query(Training_Model).get(model_id)
        if not model:
            return dependencies
        
        # 查找相同名称的其他版本
        other_versions = self.session.query(Training_Model).filter(
            Training_Model.name == model.name,
            Training_Model.id != model_id
        ).all()
        
        for other_model in other_versions:
            # 比较版本号，建立版本演进依赖
            if self._is_newer_version(model.version, other_model.version):
                dependencies.append({
                    'source_model_id': model_id,
                    'target_model_id': other_model.id,
                    'dependency_type': 'version_evolution',
                    'dependency_strength': 0.9,
                    'description': f'版本演进: {other_model.version} -> {model.version}',
                    'auto_generated': True
                })
        
        return dependencies
    
    def sync_dependencies_to_db(self, model_id: int):
        """将分析结果同步到数据库"""
        # 删除现有的自动生成的依赖关系
        self.session.query(Model_Dependency).filter(
            Model_Dependency.source_model_id == model_id,
            Model_Dependency.description.like('%auto_generated%')
        ).delete()
        
        # 分析新的依赖关系
        dependencies = self.analyze_all_dependencies(model_id)
        
        # 插入新的依赖关系
        for dep in dependencies:
            existing = self.session.query(Model_Dependency).filter(
                Model_Dependency.source_model_id == dep['source_model_id'],
                Model_Dependency.target_model_id == dep['target_model_id'],
                Model_Dependency.dependency_type == dep['dependency_type']
            ).first()
            
            if not existing:
                new_dep = Model_Dependency(
                    source_model_id=dep['source_model_id'],
                    target_model_id=dep['target_model_id'],
                    dependency_type=dep['dependency_type'],
                    dependency_strength=dep['dependency_strength'],
                    description=dep['description']
                )
                self.session.add(new_dep)
        
        self.session.commit()
    
    # 辅助方法
    def _extract_model_references(self, args_text: str) -> List[int]:
        """从任务参数中提取模型引用"""
        model_ids = []
        # 使用正则表达式查找模型ID模式
        patterns = [
            r'model_id[=:]\s*(\d+)',
            r'model[=:]\s*(\d+)',
            r'/model/(\d+)/',
            r'model_(\d+)'
        ]
        
        for pattern in patterns:
            matches = re.findall(pattern, args_text, re.IGNORECASE)
            model_ids.extend([int(match) for match in matches])
        
        return list(set(model_ids))
    
    def _parse_dataset_names(self, dataset_text: str) -> List[str]:
        """解析数据集名称"""
        if not dataset_text:
            return []
        
        # 支持多种分隔符
        separators = [',', ';', '\n', '|']
        names = [dataset_text]
        
        for sep in separators:
            new_names = []
            for name in names:
                new_names.extend(name.split(sep))
            names = new_names
        
        return [name.strip() for name in names if name.strip()]
    
    def _extract_model_ids_from_string(self, text: str) -> List[int]:
        """从字符串中提取模型ID"""
        model_ids = re.findall(r'\b\d+\b', text)
        return [int(id_str) for id_str in model_ids]
    
    def _is_newer_version(self, version1: str, version2: str) -> bool:
        """比较版本号"""
        if not version1 or not version2:
            return False
        
        try:
            # 简单的版本比较逻辑
            v1_parts = [int(x) for x in version1.split('.') if x.isdigit()]
            v2_parts = [int(x) for x in version2.split('.') if x.isdigit()]
            
            # 补齐长度
            max_len = max(len(v1_parts), len(v2_parts))
            v1_parts.extend([0] * (max_len - len(v1_parts)))
            v2_parts.extend([0] * (max_len - len(v2_parts)))
            
            return v1_parts > v2_parts
        except:
            return version1 > version2
    
    def _analyze_dag_edges(self, edges: List[Dict], model_id: int) -> List[Dict]:
        """分析DAG边关系"""
        dependencies = []
        # 这里可以根据具体的DAG结构来分析任务间的依赖关系
        # 需要根据实际的DAG JSON格式来实现
        return dependencies