"""
问题识别系统服务实现

提供研究质量问题的自动检测功能，包括：
- 问题自动检测
- 问题分类和优先级评估
- 问题报告生成
- 问题解决建议
"""
import re
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
from dataclasses import asdict

from ..models.analysis_models import QualityIssue, QualityAssessment


class IssueDetectionService:
    """问题识别系统服务"""
    
    def __init__(self):
        # 问题检测规则配置
        self.detection_rules = {
            'methodology': {
                'sample_size_too_small': {
                    'threshold': 30,
                    'severity': 'high',
                    'description': '样本量过小，可能影响结果的统计显著性'
                },
                'no_control_group': {
                    'severity': 'medium',
                    'description': '缺少对照组，实验设计不够严谨'
                },
                'no_randomization': {
                    'severity': 'medium',
                    'description': '未进行随机化处理，可能存在选择偏差'
                },
                'insufficient_variables': {
                    'threshold': 1,
                    'severity': 'medium',
                    'description': '控制变量不足，可能存在混杂因素'
                }
            },
            'data_quality': {
                'high_missing_rate': {
                    'threshold': 0.2,
                    'severity': 'high',
                    'description': '数据缺失率过高，可能影响分析结果'
                },
                'low_data_completeness': {
                    'threshold': 0.7,
                    'severity': 'medium',
                    'description': '数据完整性较低，需要改进数据收集'
                },
                'unreliable_source': {
                    'severity': 'high',
                    'description': '数据来源不可靠，影响研究可信度'
                }
            },
            'analysis': {
                'inappropriate_method': {
                    'severity': 'high',
                    'description': '分析方法不适合数据类型或研究目标'
                },
                'no_statistical_test': {
                    'severity': 'medium',
                    'description': '缺少统计显著性检验'
                },
                'multiple_testing_issue': {
                    'severity': 'medium',
                    'description': '多重比较问题，需要调整显著性水平'
                }
            },
            'reproducibility': {
                'no_code_available': {
                    'severity': 'high',
                    'description': '代码不可获得，影响研究可重现性'
                },
                'no_data_available': {
                    'severity': 'high',
                    'description': '数据不可获得，无法验证研究结果'
                },
                'poor_documentation': {
                    'threshold': 0.6,
                    'severity': 'medium',
                    'description': '文档质量较差，影响研究理解和重现'
                }
            },
            'novelty': {
                'limited_literature_review': {
                    'threshold': 0.5,
                    'severity': 'medium',
                    'description': '文献综述覆盖面不足'
                },
                'no_research_gaps': {
                    'severity': 'high',
                    'description': '未识别明确的研究空白'
                },
                'limited_innovation': {
                    'threshold': 1,
                    'severity': 'medium',
                    'description': '创新点不足，研究贡献有限'
                }
            },
            'impact': {
                'no_theoretical_contribution': {
                    'severity': 'medium',
                    'description': '缺少理论贡献'
                },
                'no_practical_application': {
                    'severity': 'medium',
                    'description': '缺少实践应用价值'
                },
                'limited_societal_impact': {
                    'severity': 'low',
                    'description': '社会影响有限'
                }
            },
            'presentation': {
                'poor_writing_clarity': {
                    'threshold': 0.6,
                    'severity': 'medium',
                    'description': '写作清晰度不足，影响理解'
                },
                'poor_organization': {
                    'threshold': 0.6,
                    'severity': 'medium',
                    'description': '文章结构组织不佳'
                },
                'incomplete_content': {
                    'threshold': 0.7,
                    'severity': 'medium',
                    'description': '内容不完整，缺少重要信息'
                }
            },
            'ethics': {
                'no_ethics_approval': {
                    'severity': 'critical',
                    'description': '缺少必要的伦理审批'
                },
                'bias_not_considered': {
                    'severity': 'high',
                    'description': '未考虑潜在偏见问题'
                },
                'bias_not_mitigated': {
                    'severity': 'medium',
                    'description': '已识别偏见但未采取缓解措施'
                }
            }
        }
        
        # 问题优先级权重
        self.severity_weights = {
            'critical': 1.0,
            'high': 0.8,
            'medium': 0.6,
            'low': 0.4
        }
    
    async def detect_quality_issues(self, research_data: Dict[str, Any], 
                                   assessment: Optional[QualityAssessment] = None) -> List[QualityIssue]:
        """检测研究质量问题"""
        issues = []
        
        try:
            # 检测各类问题
            methodology_issues = await self._detect_methodology_issues(research_data)
            data_quality_issues = await self._detect_data_quality_issues(research_data)
            analysis_issues = await self._detect_analysis_issues(research_data)
            reproducibility_issues = await self._detect_reproducibility_issues(research_data)
            novelty_issues = await self._detect_novelty_issues(research_data)
            impact_issues = await self._detect_impact_issues(research_data)
            presentation_issues = await self._detect_presentation_issues(research_data)
            ethics_issues = await self._detect_ethics_issues(research_data)
            
            # 合并所有问题
            all_issues = (
                methodology_issues + data_quality_issues + analysis_issues +
                reproducibility_issues + novelty_issues + impact_issues +
                presentation_issues + ethics_issues
            )
            
            # 如果有评估结果，基于评估分数检测额外问题
            if assessment:
                score_based_issues = await self._detect_score_based_issues(assessment)
                all_issues.extend(score_based_issues)
            
            # 排序和去重
            issues = await self._prioritize_and_deduplicate_issues(all_issues)
            
            return issues
            
        except Exception as e:
            print(f"问题检测失败: {e}")
            return []
    
    async def _detect_methodology_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测方法论问题"""
        issues = []
        
        experimental_design = research_data.get('experimental_design', {})
        
        # 检测样本量问题
        sample_size = experimental_design.get('sample_size', 0)
        if sample_size < self.detection_rules['methodology']['sample_size_too_small']['threshold']:
            issues.append(QualityIssue(
                issue_type='methodology',
                severity='high',
                description=f"样本量过小 ({sample_size}个)，建议至少30个样本",
                location='experimental_design.sample_size',
                suggested_fix='增加样本量至少到30个以上，确保统计检验的有效性',
                impact_assessment='可能导致统计检验功效不足，结果不可靠'
            ))
        
        # 检测对照组问题
        if not experimental_design.get('control_group') and not experimental_design.get('control_variables'):
            issues.append(QualityIssue(
                issue_type='methodology',
                severity='medium',
                description='缺少对照组或控制变量',
                location='experimental_design',
                suggested_fix='设置适当的对照组或明确控制变量',
                impact_assessment='可能无法排除混杂因素的影响'
            ))
        
        # 检测随机化问题
        if not experimental_design.get('randomization'):
            issues.append(QualityIssue(
                issue_type='methodology',
                severity='medium',
                description='未进行随机化处理',
                location='experimental_design.randomization',
                suggested_fix='采用随机化方法分配实验对象',
                impact_assessment='可能存在选择偏差，影响结果的内部效度'
            ))
        
        # 检测控制变量不足
        control_vars = experimental_design.get('control_variables', [])
        if len(control_vars) < 2:
            issues.append(QualityIssue(
                issue_type='methodology',
                severity='medium',
                description=f'控制变量不足 (仅{len(control_vars)}个)',
                location='experimental_design.control_variables',
                suggested_fix='增加更多相关的控制变量',
                impact_assessment='可能存在未控制的混杂因素'
            ))
        
        return issues
    
    async def _detect_data_quality_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测数据质量问题"""
        issues = []
        
        data_info = research_data.get('data_info', {})
        
        # 检测数据完整性问题
        completeness = data_info.get('completeness', 0.5)
        if completeness < 0.7:
            issues.append(QualityIssue(
                issue_type='data_quality',
                severity='high' if completeness < 0.5 else 'medium',
                description=f'数据完整性较低 ({completeness:.1%})',
                location='data_info.completeness',
                suggested_fix='改进数据收集方法，减少缺失值',
                impact_assessment='数据缺失可能导致分析结果偏差'
            ))
        
        # 检测数据来源可靠性
        if not data_info.get('reliable_source'):
            issues.append(QualityIssue(
                issue_type='data_quality',
                severity='high',
                description='数据来源可靠性存疑',
                location='data_info.reliable_source',
                suggested_fix='使用权威、可信的数据源',
                impact_assessment='不可靠的数据源会严重影响研究可信度'
            ))
        
        # 检测数据量问题
        data_size = data_info.get('data_size', 0)
        if data_size < 100:
            issues.append(QualityIssue(
                issue_type='data_quality',
                severity='medium',
                description=f'数据量较小 ({data_size}条记录)',
                location='data_info.data_size',
                suggested_fix='收集更多数据以提高分析的可靠性',
                impact_assessment='数据量不足可能影响统计分析的有效性'
            ))
        
        return issues
    
    async def _detect_analysis_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测分析方法问题"""
        issues = []
        
        analysis_methods = research_data.get('analysis_methods', [])
        
        # 检测分析方法缺失
        if not analysis_methods:
            issues.append(QualityIssue(
                issue_type='analysis',
                severity='high',
                description='未指定分析方法',
                location='analysis_methods',
                suggested_fix='选择适合研究目标和数据类型的分析方法',
                impact_assessment='缺少分析方法无法得出有效结论'
            ))
        
        # 检测统计检验缺失
        has_statistical_test = any('statistical' in str(method).lower() or 'test' in str(method).lower() 
                                  for method in analysis_methods)
        if not has_statistical_test and len(analysis_methods) > 0:
            issues.append(QualityIssue(
                issue_type='analysis',
                severity='medium',
                description='缺少统计显著性检验',
                location='analysis_methods',
                suggested_fix='添加适当的统计检验方法',
                impact_assessment='无法确定结果的统计显著性'
            ))
        
        # 检测方法过于简单
        if len(analysis_methods) == 1 and 'basic' in str(analysis_methods[0]).lower():
            issues.append(QualityIssue(
                issue_type='analysis',
                severity='medium',
                description='分析方法过于简单',
                location='analysis_methods',
                suggested_fix='考虑使用更复杂的分析方法',
                impact_assessment='简单的分析方法可能无法充分挖掘数据价值'
            ))
        
        return issues
    
    async def _detect_reproducibility_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测可重现性问题"""
        issues = []
        
        # 检测代码可用性
        if not research_data.get('code_available'):
            issues.append(QualityIssue(
                issue_type='reproducibility',
                severity='high',
                description='代码不可获得',
                location='code_available',
                suggested_fix='提供完整的实现代码',
                impact_assessment='无法验证和重现研究结果'
            ))
        elif not research_data.get('code_documented'):
            issues.append(QualityIssue(
                issue_type='reproducibility',
                severity='medium',
                description='代码缺少文档',
                location='code_documented',
                suggested_fix='为代码添加详细的注释和说明',
                impact_assessment='难以理解和使用代码'
            ))
        
        # 检测数据可用性
        if not research_data.get('data_available'):
            issues.append(QualityIssue(
                issue_type='reproducibility',
                severity='high',
                description='数据不可获得',
                location='data_available',
                suggested_fix='在符合隐私和伦理要求的前提下公开数据',
                impact_assessment='无法验证分析结果的正确性'
            ))
        elif not research_data.get('data_documented'):
            issues.append(QualityIssue(
                issue_type='reproducibility',
                severity='medium',
                description='数据缺少说明文档',
                location='data_documented',
                suggested_fix='提供数据字典和收集方法说明',
                impact_assessment='难以正确理解和使用数据'
            ))
        
        # 检测文档质量
        documentation = research_data.get('documentation', {})
        if isinstance(documentation, dict):
            completeness = documentation.get('completeness', 0.5)
            clarity = documentation.get('clarity', 0.5)
            
            if completeness < 0.6:
                issues.append(QualityIssue(
                    issue_type='reproducibility',
                    severity='medium',
                    description=f'文档完整性不足 ({completeness:.1%})',
                    location='documentation.completeness',
                    suggested_fix='补充缺失的文档内容',
                    impact_assessment='不完整的文档影响研究的可理解性'
                ))
            
            if clarity < 0.6:
                issues.append(QualityIssue(
                    issue_type='reproducibility',
                    severity='medium',
                    description=f'文档清晰度不足 ({clarity:.1%})',
                    location='documentation.clarity',
                    suggested_fix='改进文档的表达清晰度',
                    impact_assessment='不清晰的文档影响研究的可重现性'
                ))
        
        return issues
    
    async def _detect_novelty_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测新颖性问题"""
        issues = []
        
        # 检测文献综述问题
        literature_review = research_data.get('literature_review', {})
        if isinstance(literature_review, dict):
            coverage = literature_review.get('coverage_score', 0.5)
            if coverage < 0.6:
                issues.append(QualityIssue(
                    issue_type='novelty',
                    severity='medium',
                    description=f'文献综述覆盖面不足 ({coverage:.1%})',
                    location='literature_review.coverage_score',
                    suggested_fix='扩大文献搜索范围，包含更多相关研究',
                    impact_assessment='可能遗漏重要的相关工作'
                ))
            
            research_gaps = literature_review.get('research_gaps', [])
            if not research_gaps:
                issues.append(QualityIssue(
                    issue_type='novelty',
                    severity='high',
                    description='未识别明确的研究空白',
                    location='literature_review.research_gaps',
                    suggested_fix='深入分析现有研究，识别具体的研究空白',
                    impact_assessment='缺少明确的研究动机和贡献定位'
                ))
        
        # 检测创新点问题
        innovation_aspects = research_data.get('innovation_aspects', [])
        if not innovation_aspects:
            issues.append(QualityIssue(
                issue_type='novelty',
                severity='high',
                description='缺少明确的创新点',
                location='innovation_aspects',
                suggested_fix='明确阐述研究的创新之处',
                impact_assessment='研究价值和贡献不明确'
            ))
        elif len(innovation_aspects) < 2:
            issues.append(QualityIssue(
                issue_type='novelty',
                severity='medium',
                description='创新点较少',
                location='innovation_aspects',
                suggested_fix='挖掘更多的创新点和贡献',
                impact_assessment='研究的新颖性可能不足'
            ))
        
        return issues
    
    async def _detect_impact_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测影响力问题"""
        issues = []
        
        # 检测理论贡献
        theoretical_contributions = research_data.get('theoretical_contributions', [])
        if not theoretical_contributions:
            issues.append(QualityIssue(
                issue_type='impact',
                severity='medium',
                description='缺少理论贡献',
                location='theoretical_contributions',
                suggested_fix='明确阐述研究的理论价值和贡献',
                impact_assessment='研究的学术价值可能有限'
            ))
        
        # 检测实践应用
        practical_applications = research_data.get('practical_applications', [])
        if not practical_applications:
            issues.append(QualityIssue(
                issue_type='impact',
                severity='medium',
                description='缺少实践应用价值',
                location='practical_applications',
                suggested_fix='探讨研究结果的实际应用场景',
                impact_assessment='研究的实用价值可能不足'
            ))
        
        # 检测社会影响
        societal_benefits = research_data.get('societal_benefits', [])
        if not societal_benefits:
            issues.append(QualityIssue(
                issue_type='impact',
                severity='low',
                description='社会影响有限',
                location='societal_benefits',
                suggested_fix='考虑研究对社会的潜在积极影响',
                impact_assessment='研究的社会价值可能不明显'
            ))
        
        return issues
    
    async def _detect_presentation_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测表达质量问题"""
        issues = []
        
        # 检测写作质量
        writing_quality = research_data.get('writing_quality', {})
        if isinstance(writing_quality, dict):
            clarity_score = writing_quality.get('clarity_score', 0.5)
            if clarity_score < 0.6:
                issues.append(QualityIssue(
                    issue_type='presentation',
                    severity='medium',
                    description=f'写作清晰度不足 ({clarity_score:.1%})',
                    location='writing_quality.clarity_score',
                    suggested_fix='改进语言表达，提高文章可读性',
                    impact_assessment='影响读者理解和研究传播'
                ))
        
        # 检测结构质量
        structure_quality = research_data.get('structure_quality', {})
        if isinstance(structure_quality, dict):
            organization_score = structure_quality.get('organization_score', 0.5)
            if organization_score < 0.6:
                issues.append(QualityIssue(
                    issue_type='presentation',
                    severity='medium',
                    description=f'文章结构组织不佳 ({organization_score:.1%})',
                    location='structure_quality.organization_score',
                    suggested_fix='重新组织文章结构，提高逻辑性',
                    impact_assessment='影响文章的逻辑流畅性'
                ))
        
        # 检测内容完整性
        content_completeness = research_data.get('content_completeness', 0.5)
        if content_completeness < 0.7:
            issues.append(QualityIssue(
                issue_type='presentation',
                severity='medium',
                description=f'内容不完整 ({content_completeness:.1%})',
                location='content_completeness',
                suggested_fix='补充缺失的重要内容',
                impact_assessment='不完整的内容影响研究的完整性'
            ))
        
        return issues
    
    async def _detect_ethics_issues(self, research_data: Dict[str, Any]) -> List[QualityIssue]:
        """检测伦理问题"""
        issues = []
        
        # 检测伦理审批
        if not research_data.get('ethics_approval') and not research_data.get('ethics_considered'):
            issues.append(QualityIssue(
                issue_type='ethics',
                severity='critical',
                description='缺少伦理审批或伦理考虑',
                location='ethics_approval',
                suggested_fix='获得必要的伦理审批或说明伦理考虑',
                impact_assessment='可能违反研究伦理规范'
            ))
        
        # 检测偏见问题
        bias_analysis = research_data.get('bias_analysis', {})
        if isinstance(bias_analysis, dict):
            if not bias_analysis.get('bias_identified'):
                issues.append(QualityIssue(
                    issue_type='ethics',
                    severity='high',
                    description='未考虑潜在偏见问题',
                    location='bias_analysis.bias_identified',
                    suggested_fix='分析研究中可能存在的偏见',
                    impact_assessment='可能存在未识别的偏见影响结果'
                ))
            elif not bias_analysis.get('bias_mitigated'):
                issues.append(QualityIssue(
                    issue_type='ethics',
                    severity='medium',
                    description='已识别偏见但未采取缓解措施',
                    location='bias_analysis.bias_mitigated',
                    suggested_fix='采取适当措施缓解识别出的偏见',
                    impact_assessment='偏见可能影响研究结果的公正性'
                ))
        
        return issues
    
    async def _detect_score_based_issues(self, assessment: QualityAssessment) -> List[QualityIssue]:
        """基于评估分数检测问题"""
        issues = []
        
        # 基于维度分数检测问题
        for dimension, score in assessment.dimension_scores.items():
            if score < 0.4:  # 严重问题
                issues.append(QualityIssue(
                    issue_type=dimension,
                    severity='high',
                    description=f'{dimension}维度评分过低 ({score:.2f})',
                    location=f'assessment.{dimension}',
                    suggested_fix=f'重点改进{dimension}相关问题',
                    impact_assessment=f'{dimension}问题严重影响研究质量'
                ))
            elif score < 0.6:  # 中等问题
                issues.append(QualityIssue(
                    issue_type=dimension,
                    severity='medium',
                    description=f'{dimension}维度有改进空间 ({score:.2f})',
                    location=f'assessment.{dimension}',
                    suggested_fix=f'适当改进{dimension}相关方面',
                    impact_assessment=f'{dimension}问题影响整体研究质量'
                ))
        
        # 基于整体分数检测问题
        if assessment.overall_score < 0.5:
            issues.append(QualityIssue(
                issue_type='overall',
                severity='critical',
                description=f'研究整体质量不达标 ({assessment.overall_score:.2f})',
                location='assessment.overall_score',
                suggested_fix='全面改进研究的各个方面',
                impact_assessment='整体质量问题严重影响研究价值'
            ))
        
        return issues
    
    async def _prioritize_and_deduplicate_issues(self, issues: List[QualityIssue]) -> List[QualityIssue]:
        """问题优先级排序和去重"""
        # 去重（基于问题类型和描述）
        unique_issues = []
        seen_issues = set()
        
        for issue in issues:
            issue_key = (issue.issue_type, issue.description)
            if issue_key not in seen_issues:
                unique_issues.append(issue)
                seen_issues.add(issue_key)
        
        # 按严重程度排序
        severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3}
        unique_issues.sort(key=lambda x: severity_order.get(x.severity, 4))
        
        return unique_issues
    
    async def classify_issues_by_category(self, issues: List[QualityIssue]) -> Dict[str, List[QualityIssue]]:
        """按类别分类问题"""
        categories = {}
        
        for issue in issues:
            category = issue.issue_type
            if category not in categories:
                categories[category] = []
            categories[category].append(issue)
        
        return categories
    
    async def classify_issues_by_severity(self, issues: List[QualityIssue]) -> Dict[str, List[QualityIssue]]:
        """按严重程度分类问题"""
        severities = {'critical': [], 'high': [], 'medium': [], 'low': []}
        
        for issue in issues:
            if issue.severity in severities:
                severities[issue.severity].append(issue)
        
        return severities
    
    async def generate_issue_report(self, issues: List[QualityIssue]) -> str:
        """生成问题报告"""
        if not issues:
            return "未发现质量问题。"
        
        report_lines = [
            "# 研究质量问题报告",
            f"检测时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"发现问题总数: {len(issues)}",
            ""
        ]
        
        # 按严重程度分类
        severity_classification = await self.classify_issues_by_severity(issues)
        
        # 添加严重程度统计
        report_lines.append("## 问题严重程度分布")
        for severity, issue_list in severity_classification.items():
            if issue_list:
                severity_names = {
                    'critical': '严重',
                    'high': '高',
                    'medium': '中等',
                    'low': '低'
                }
                report_lines.append(f"- {severity_names[severity]}: {len(issue_list)}个")
        report_lines.append("")
        
        # 按类别分类
        category_classification = await self.classify_issues_by_category(issues)
        
        # 添加详细问题列表
        report_lines.append("## 详细问题列表")
        
        for severity in ['critical', 'high', 'medium', 'low']:
            severity_issues = severity_classification[severity]
            if severity_issues:
                severity_names = {
                    'critical': '🚨 严重问题',
                    'high': '⚠️ 高优先级问题',
                    'medium': '📋 中等优先级问题',
                    'low': '💡 低优先级问题'
                }
                report_lines.append(f"\n### {severity_names[severity]}")
                
                for i, issue in enumerate(severity_issues, 1):
                    report_lines.extend([
                        f"\n**{i}. {issue.description}**",
                        f"- 类型: {issue.issue_type}",
                        f"- 位置: {issue.location}",
                        f"- 建议修复: {issue.suggested_fix}",
                        f"- 影响评估: {issue.impact_assessment}"
                    ])
        
        # 添加改进优先级建议
        critical_and_high = severity_classification['critical'] + severity_classification['high']
        if critical_and_high:
            report_lines.extend([
                "",
                "## 改进优先级建议",
                "建议优先解决以下问题:",
            ])
            for i, issue in enumerate(critical_and_high[:5], 1):  # 最多显示5个高优先级问题
                report_lines.append(f"{i}. {issue.description}")
        
        return "\n".join(report_lines)
    
    async def get_issue_statistics(self, issues: List[QualityIssue]) -> Dict[str, Any]:
        """获取问题统计信息"""
        if not issues:
            return {'total': 0, 'by_severity': {}, 'by_category': {}}
        
        # 按严重程度统计
        severity_stats = {}
        for severity in ['critical', 'high', 'medium', 'low']:
            count = sum(1 for issue in issues if issue.severity == severity)
            severity_stats[severity] = count
        
        # 按类别统计
        category_stats = {}
        for issue in issues:
            category = issue.issue_type
            category_stats[category] = category_stats.get(category, 0) + 1
        
        # 计算问题严重程度分数
        severity_score = sum(
            self.severity_weights.get(issue.severity, 0.5) for issue in issues
        ) / len(issues) if issues else 0
        
        return {
            'total': len(issues),
            'by_severity': severity_stats,
            'by_category': category_stats,
            'severity_score': severity_score,
            'most_common_category': max(category_stats.items(), key=lambda x: x[1])[0] if category_stats else None,
            'most_severe_level': next((sev for sev in ['critical', 'high', 'medium', 'low'] 
                                     if severity_stats.get(sev, 0) > 0), 'none')
        }