import json
import logging
from typing import Dict, List, Optional
from datetime import datetime
from .llm_service import LLMService
from .prompt_templates import ANSWER_SCORING_TEMPLATES

class AnswerScorer:
    """答案智能评分服务"""
    
    def __init__(self, llm_service: LLMService = None):
        self.llm_service = llm_service or LLMService(provider='deepseek')
        self.logger = logging.getLogger(__name__)
    
    def score_code_answer(self, problem_description: str, standard_answer: str, 
                         student_answer: str, language: str = 'python',
                         test_cases: List[Dict] = None) -> Dict:
        """评分编程题答案"""
        try:
            template = ANSWER_SCORING_TEMPLATES['code']
            prompt = template.format(
                problem_description=problem_description,
                standard_answer=standard_answer,
                student_answer=student_answer,
                language=language,
                test_cases=json.dumps(test_cases, ensure_ascii=False) if test_cases else "[]"
            )
            
            expected_format = """{
    "score": 85,
    "max_score": 100,
    "feedback": "详细的评价反馈",
    "syntax_check": {
        "valid": true,
        "errors": []
    },
    "logic_check": {
        "correct": true,
        "issues": []
    },
    "code_quality": {
        "readability": "良好",
        "efficiency": "良好",
        "suggestions": []
    },
    "detailed_analysis": "详细的分析说明"
}"""
            
            result = self.llm_service.generate_structured_data(prompt, expected_format)
            
            # 验证评分结果
            validation = self._validate_scoring_result(result, 'code')
            if not validation['valid']:
                self.logger.warning(f"代码评分验证失败: {validation['errors']}")
            
            return {
                'success': True,
                'scoring_result': result,
                'validation': validation,
                'scored_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"代码评分失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def score_choice_answer(self, question: str, correct_answer: str, 
                          student_answer: str) -> Dict:
        """评分选择题答案"""
        try:
            template = ANSWER_SCORING_TEMPLATES['choice']
            prompt = template.format(
                question=question,
                correct_answer=correct_answer,
                student_answer=student_answer
            )
            
            expected_format = """{
    "score": 5,
    "max_score": 5,
    "correct": true,
    "feedback": "答案正确/错误"
}"""
            
            result = self.llm_service.generate_structured_data(prompt, expected_format)
            
            # 验证评分结果
            validation = self._validate_scoring_result(result, 'choice')
            if not validation['valid']:
                self.logger.warning(f"选择题评分验证失败: {validation['errors']}")
            
            return {
                'success': True,
                'scoring_result': result,
                'validation': validation,
                'scored_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"选择题评分失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def score_blank_answer(self, question: str, standard_answer: str, 
                         student_answer: str) -> Dict:
        """评分填空题答案"""
        try:
            template = ANSWER_SCORING_TEMPLATES['blank']
            prompt = template.format(
                question=question,
                standard_answer=standard_answer,
                student_answer=student_answer
            )
            
            expected_format = """{
    "score": 4,
    "max_score": 5,
    "correct_count": 4,
    "total_count": 5,
    "feedback": "详细反馈",
    "detailed_scores": [
        {
            "blank_index": 1,
            "correct": true,
            "score": 1
        }
    ]
}"""
            
            result = self.llm_service.generate_structured_data(prompt, expected_format)
            
            # 验证评分结果
            validation = self._validate_scoring_result(result, 'blank')
            if not validation['valid']:
                self.logger.warning(f"填空题评分验证失败: {validation['errors']}")
            
            return {
                'success': True,
                'scoring_result': result,
                'validation': validation,
                'scored_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"填空题评分失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def score_short_answer(self, question: str, reference_answer: str, 
                         student_answer: str, scoring_criteria: List[Dict] = None) -> Dict:
        """评分简答题答案"""
        try:
            template = ANSWER_SCORING_TEMPLATES['short']
            prompt = template.format(
                question=question,
                reference_answer=reference_answer,
                scoring_criteria=json.dumps(scoring_criteria, ensure_ascii=False) if scoring_criteria else "[]",
                student_answer=student_answer
            )
            
            expected_format = """{
    "score": 8,
    "max_score": 10,
    "feedback": "总体评价",
    "detailed_scores": [
        {
            "criterion": "评分要点1",
            "score": 3,
            "max_score": 3,
            "feedback": "该要点评价"
        }
    ],
    "content_analysis": {
        "relevance": "答案相关性分析",
        "completeness": "完整性分析",
        "accuracy": "准确性分析"
    },
    "improvement_suggestions": [
        "改进建议1",
        "改进建议2"
    ]
}"""
            
            result = self.llm_service.generate_structured_data(prompt, expected_format)
            
            # 验证评分结果
            validation = self._validate_scoring_result(result, 'short')
            if not validation['valid']:
                self.logger.warning(f"简答题评分验证失败: {validation['errors']}")
            
            return {
                'success': True,
                'scoring_result': result,
                'validation': validation,
                'scored_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"简答题评分失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def batch_score_answers(self, submissions: List[Dict]) -> Dict:
        """批量评分答案"""
        try:
            results = []
            total_count = len(submissions)
            success_count = 0
            
            for submission in submissions:
                try:
                    problem_type = submission.get('problem_type', 'code')
                    
                    if problem_type == 'code':
                        result = self.score_code_answer(
                            submission['problem_description'],
                            submission['standard_answer'],
                            submission['student_answer'],
                            submission.get('language', 'python'),
                            submission.get('test_cases', [])
                        )
                    elif problem_type == 'choice':
                        result = self.score_choice_answer(
                            submission['question'],
                            submission['correct_answer'],
                            submission['student_answer']
                        )
                    elif problem_type == 'blank':
                        result = self.score_blank_answer(
                            submission['question'],
                            submission['standard_answer'],
                            submission['student_answer']
                        )
                    elif problem_type == 'short':
                        result = self.score_short_answer(
                            submission['question'],
                            submission['reference_answer'],
                            submission['student_answer'],
                            submission.get('scoring_criteria', [])
                        )
                    else:
                        result = {
                            'success': False,
                            'error': f"不支持的题目类型: {problem_type}"
                        }
                    
                    results.append({
                        'submission_id': submission.get('id'),
                        'result': result
                    })
                    
                    if result['success']:
                        success_count += 1
                        
                except Exception as e:
                    self.logger.error(f"批量评分单个答案失败: {str(e)}")
                    results.append({
                        'submission_id': submission.get('id'),
                        'result': {
                            'success': False,
                            'error': str(e)
                        }
                    })
            
            return {
                'success': True,
                'results': results,
                'summary': {
                    'total_count': total_count,
                    'success_count': success_count,
                    'success_rate': success_count / total_count if total_count > 0 else 0
                },
                'scored_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"批量评分失败: {str(e)}")
            return {
                'success': False,
                'error': str(e)
            }
    
    def _validate_scoring_result(self, result: Dict, answer_type: str) -> Dict:
        """验证评分结果"""
        validation = {
            'valid': True,
            'errors': [],
            'warnings': []
        }
        
        # 检查必需字段
        required_fields = ['score', 'max_score', 'feedback']
        for field in required_fields:
            if field not in result:
                validation['valid'] = False
                validation['errors'].append(f"缺少必需字段: {field}")
        
        # 检查分数合理性
        if 'score' in result and 'max_score' in result:
            score = result['score']
            max_score = result['max_score']
            
            if score < 0:
                validation['warnings'].append("分数不能为负数")
            if score > max_score:
                validation['warnings'].append("分数不能超过满分")
            if max_score <= 0:
                validation['errors'].append("满分必须大于0")
        
        # 检查题目类型特定字段
        if answer_type == 'code':
            if 'syntax_check' not in result:
                validation['warnings'].append("代码评分缺少语法检查")
            if 'logic_check' not in result:
                validation['warnings'].append("代码评分缺少逻辑检查")
        
        elif answer_type == 'choice':
            if 'correct' not in result:
                validation['warnings'].append("选择题评分缺少正确性判断")
        
        elif answer_type == 'blank':
            if 'correct_count' not in result or 'total_count' not in result:
                validation['warnings'].append("填空题评分缺少正确数量统计")
        
        elif answer_type == 'short':
            if 'detailed_scores' not in result:
                validation['warnings'].append("简答题评分缺少详细分数")
            if 'content_analysis' not in result:
                validation['warnings'].append("简答题评分缺少内容分析")
        
        return validation
    
    def get_scoring_stats(self) -> Dict:
        """获取评分统计信息"""
        return {
            'provider': self.llm_service.get_provider_info(),
            'supported_types': ['code', 'choice', 'blank', 'short'],
            'scoring_features': [
                '语法检查',
                '逻辑验证',
                '代码质量评估',
                '内容相关性分析',
                '完整性检查',
                '改进建议生成'
            ]
        }
