import re
import json
import statistics
from collections import defaultdict
from vocabulary_estimator import VocabularyEstimator

class BatchProcessor:
    def __init__(self):
        self.estimator = VocabularyEstimator()
    
    def parse_word_list(self, word_list_text):
        """
        解析词汇列表文本
        格式: 词A，认识；词B，认识；词C，不认识；词D，不认识；....
        """
        test_data = []
        
        # 分割文本
        items = re.split(r'[；;]', word_list_text.strip())
        
        for item in items:
            item = item.strip()
            if not item:
                continue
            
            # 解析单词和状态
            parts = re.split(r'[，,]', item)
            if len(parts) >= 2:
                word = parts[0].strip()
                status = parts[1].strip()
                
                # 判断是否认识
                known = status in ['认识', '知道', 'yes', 'true', '1', '是']
                
                if word:  # 确保单词不为空
                    test_data.append({
                        'word': word,
                        'known': known
                    })
        
        return test_data
    
    def estimate_batch_with_corpus_algorithm(self, test_data):
        """
        使用与语料分析相同的算法估算词汇量
        """
        # 提取已知和未知单词
        known_words = [item['word'] for item in test_data if item.get('known', False)]
        unknown_words = [item['word'] for item in test_data if not item.get('known', False)]
        
        if not known_words and not unknown_words:
            return None
        
        # 使用与语料分析相同的算法
        # 1. 词汇复杂度分析
        all_words = known_words + unknown_words
        avg_word_length = statistics.mean([len(word) for word in all_words])
        word_length_variance = statistics.variance([len(word) for word in all_words]) if len(all_words) > 1 else 0
        
        # 2. 词频分析
        frequency_ranks = [self.estimator.get_word_frequency_rank(word) for word in all_words]
        avg_frequency_rank = statistics.mean(frequency_ranks)
        rare_words_ratio = len([r for r in frequency_ranks if r > 10000]) / len(frequency_ranks)
        
        # 3. 词汇级别分析
        level_counts = defaultdict(int)
        for word in all_words:
            level = self.estimator.get_word_level(word)
            level_counts[level] += 1
        
        total_words = len(all_words)
        level_ratios = {level: count/total_words for level, count in level_counts.items()}
        
        # 4. 学术词汇识别
        academic_words, weighted_academic_score = self.estimator.get_academic_words(all_words)
        academic_ratio = len(academic_words) / total_words
        weighted_academic_ratio = weighted_academic_score / total_words
        
        # 5. 计算综合复杂度得分
        complexity_score = self.estimator.calculate_corpus_complexity(
            avg_word_length, word_length_variance, avg_frequency_rank, 
            rare_words_ratio, level_ratios, weighted_academic_ratio, 
            0, 0  # 句型复杂度设为0，因为词汇测试没有句子
        )
        
        # 6. 基于复杂度估算词汇量
        vocabulary_estimate = self.estimator.map_complexity_to_vocabulary(complexity_score)
        
        # 7. 计算置信度
        confidence = self.estimator.calculate_corpus_confidence(
            total_words, avg_word_length, avg_frequency_rank, 
            level_ratios, academic_ratio, 0
        )
        
        # 8. 获取等级信息
        level_name, level_display, level_color = self.estimator.get_vocabulary_level(vocabulary_estimate)
        level_info = self.estimator.get_level_description(level_name)
        
        return {
            'vocabulary_size': int(vocabulary_estimate),
            'confidence': confidence,
            'level': {
                'name': level_name,
                'display': level_display,
                'color': level_color,
                'info': level_info
            },
            'analysis': {
                'total_words': total_words,
                'known_words': len(known_words),
                'unknown_words': len(unknown_words),
                'accuracy': len(known_words) / total_words if total_words > 0 else 0,
                'avg_word_length': round(avg_word_length, 2),
                'avg_frequency_rank': int(avg_frequency_rank),
                'rare_words_ratio': round(rare_words_ratio, 3),
                'academic_ratio': round(academic_ratio, 3),
                'complexity_score': round(complexity_score, 3),
                'level_distribution': level_ratios
            }
        }
    
    def process_batch(self, word_list_text):
        """
        处理批量词汇测试 - 使用与语料分析相同的算法
        返回多个测试结果
        """
        # 解析词汇列表
        test_data = self.parse_word_list(word_list_text)
        
        if not test_data:
            raise ValueError("无法解析词汇列表，请检查格式")
        
        results = []
        
        # 分批处理（每批50个单词）
        batch_size = 50
        for i in range(0, len(test_data), batch_size):
            batch = test_data[i:i+batch_size]
            
            # 使用与语料分析相同的算法
            result = self.estimate_batch_with_corpus_algorithm(batch)
            
            if result is None:
                continue
            
            # 统计信息
            known_count = sum(1 for item in batch if item['known'])
            unknown_count = len(batch) - known_count
            
            batch_result = {
                'batch_id': i // batch_size + 1,
                'words': batch,
                'estimate': result['vocabulary_size'],
                'confidence': result['confidence'],
                'level': result['level'],
                'analysis': result['analysis'],
                'known_count': known_count,
                'unknown_count': unknown_count,
                'accuracy': known_count / len(batch) if batch else 0
            }
            
            results.append(batch_result)
        
        return results
    
    def process_multiple_batches(self, batch_list):
        """
        处理多个批次的测试数据
        """
        all_results = []
        
        for i, batch_text in enumerate(batch_list):
            try:
                batch_results = self.process_batch(batch_text)
                all_results.extend(batch_results)
            except Exception as e:
                print(f"处理第{i+1}批数据时出错: {e}")
        
        return all_results
    
    def generate_test_report(self, results):
        """
        生成测试报告
        """
        if not results:
            return "无测试结果"
        
        # 计算总体统计
        total_estimate = sum(r['estimate'] for r in results)
        avg_estimate = total_estimate / len(results)
        avg_confidence = sum(r['confidence'] for r in results) / len(results)
        
        total_known = sum(r['known_count'] for r in results)
        total_unknown = sum(r['unknown_count'] for r in results)
        total_words = total_known + total_unknown
        
        report = {
            'summary': {
                'total_batches': len(results),
                'total_words': total_words,
                'total_known': total_known,
                'total_unknown': total_unknown,
                'overall_accuracy': total_known / total_words if total_words > 0 else 0,
                'average_estimate': int(avg_estimate),
                'average_confidence': round(avg_confidence, 3)
            },
            'batch_details': results,
            'recommendations': self.generate_recommendations(avg_estimate, avg_confidence)
        }
        
        return report
    
    def generate_recommendations(self, estimate, confidence):
        """
        基于估算结果生成建议
        """
        recommendations = []
        
        if estimate < 3000:
            recommendations.append("建议从基础词汇开始学习，重点关注小学和初中词汇")
        elif estimate < 6000:
            recommendations.append("词汇量处于中等水平，建议加强高中词汇学习")
        elif estimate < 12000:
            recommendations.append("词汇量良好，建议准备四级考试")
        elif estimate < 18000:
            recommendations.append("词汇量优秀，建议准备六级考试")
        else:
            recommendations.append("词汇量非常优秀，可以考虑准备更高级别的考试")
        
        if confidence < 0.5:
            recommendations.append("测试置信度较低，建议增加测试样本数量")
        elif confidence < 0.7:
            recommendations.append("测试置信度中等，建议进行更多测试以提高准确性")
        else:
            recommendations.append("测试置信度较高，结果较为可靠")
        
        return recommendations
    
    def validate_batch_format(self, word_list_text):
        """
        验证批处理格式是否正确
        """
        try:
            test_data = self.parse_word_list(word_list_text)
            if not test_data:
                return False, "未找到有效的词汇数据"
            
            # 检查格式
            for item in test_data:
                if not item['word'] or len(item['word']) < 2:
                    return False, f"发现无效单词: {item['word']}"
            
            return True, f"格式正确，共解析出 {len(test_data)} 个单词"
            
        except Exception as e:
            return False, f"格式验证失败: {str(e)}"
    
    def export_results(self, results, format='json'):
        """
        导出测试结果
        """
        if format == 'json':
            return json.dumps(results, ensure_ascii=False, indent=2)
        elif format == 'csv':
            # 转换为CSV格式
            csv_lines = ['batch_id,estimate,confidence,known_count,unknown_count,accuracy']
            for result in results:
                csv_lines.append(f"{result['batch_id']},{result['estimate']},{result['confidence']},"
                               f"{result['known_count']},{result['unknown_count']},{result['accuracy']}")
            return '\n'.join(csv_lines)
        else:
            raise ValueError("不支持的导出格式") 