import re
import json
import random
import statistics
from collections import Counter, defaultdict
import os

class VocabularyEstimator:
    def __init__(self):
        self.word_frequency_data = {}
        self.vocabulary_levels = {}
        self.load_vocabulary_data()
        
    def load_vocabulary_data(self):
        """加载词汇数据"""
        # 加载不同级别的词汇表
        vocabulary_files = {
            'primary': 'data/小学英语大纲词汇.txt',
            'middle': 'data/中考英语词汇表.txt',
            'high': 'data/Highschool_edited.txt',
            'cet4': 'data/CET4_edited.txt',
            'cet6': 'data/CET6_edited.txt'
        }
        
        for level, file_path in vocabulary_files.items():
            if os.path.exists(file_path):
                words = self.load_words_from_file(file_path)
                self.vocabulary_levels[level] = set(words)
                
        # 加载COCA词频数据
        coca_file = 'english-wordlists/COCA_20000.txt'
        if os.path.exists(coca_file):
            self.load_coca_frequency(coca_file)
            
    def load_words_from_file(self, file_path):
        """从文件加载词汇"""
        words = []
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line and not line.startswith('#'):
                        # 提取英文单词
                        word_match = re.search(r'[a-zA-Z]+', line)
                        if word_match:
                            word = word_match.group().lower()
                            words.append(word)
        except Exception as e:
            print(f"加载文件 {file_path} 时出错: {e}")
        return words
    
    def load_coca_frequency(self, file_path):
        """加载COCA词频数据"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    parts = line.strip().split()
                    if len(parts) >= 2:
                        word = parts[0].lower()
                        try:
                            frequency = int(parts[1])
                            self.word_frequency_data[word] = frequency
                        except ValueError:
                            continue
        except Exception as e:
            print(f"加载COCA数据时出错: {e}")
    
    def get_word_level(self, word):
        """获取单词的难度级别"""
        word = word.lower()
        
        if word in self.vocabulary_levels.get('primary', set()):
            return 'primary'
        elif word in self.vocabulary_levels.get('middle', set()):
            return 'middle'
        elif word in self.vocabulary_levels.get('high', set()):
            return 'high'
        elif word in self.vocabulary_levels.get('cet4', set()):
            return 'cet4'
        elif word in self.vocabulary_levels.get('cet6', set()):
            return 'cet6'
        else:
            return 'advanced'
    
    def get_word_frequency_rank(self, word):
        """获取单词的词频排名"""
        word = word.lower()
        if word in self.word_frequency_data:
            return self.word_frequency_data[word]
        return 20000  # 默认排名
    
    def calculate_level_scores(self, known_words, unknown_words):
        """计算各级别的得分"""
        level_scores = defaultdict(lambda: {'known': 0, 'total': 0})
        
        # 统计已知单词的级别分布
        for word in known_words:
            level = self.get_word_level(word)
            level_scores[level]['known'] += 1
            level_scores[level]['total'] += 1
        
        # 统计未知单词的级别分布
        for word in unknown_words:
            level = self.get_word_level(word)
            level_scores[level]['total'] += 1
        
        return level_scores
    
    def estimate_vocabulary(self, test_data):
        """
        估算词汇量 - 优化版本
        test_data: 包含单词和是否认识的列表，格式如 [{'word': 'apple', 'known': True}, ...]
        """
        known_words = [item['word'] for item in test_data if item.get('known', False)]
        unknown_words = [item['word'] for item in test_data if not item.get('known', False)]
        
        if not known_words and not unknown_words:
            return 0, 0.0
        
        # 计算级别得分
        level_scores = self.calculate_level_scores(known_words, unknown_words)
        
        # 基于级别得分估算词汇量
        total_estimate = 0
        confidence_factors = []
        
        # 优化后的级别词汇量估算 - 更精确的级别划分和范围
        level_estimates = {
            'primary': 1500,      # 小学词汇量范围：1000-2000
            'middle': 4500,       # 初中词汇量范围：3000-6000  
            'high': 9000,         # 高中词汇量范围：6000-12000
            'cet4': 15000,        # 四级词汇量范围：12000-18000
            'cet6': 21500,        # 六级词汇量范围：18000-25000
            'advanced': 30000     # 高级词汇量范围：25000-35000+
        }
        
        # 级别权重 - 根据词汇量大小调整权重
        level_weights = {
            'primary': 1.0,       # 基础级别，权重1.0
            'middle': 1.2,        # 初中级别，权重1.2
            'high': 1.5,          # 高中级别，权重1.5
            'cet4': 1.8,          # 四级级别，权重1.8
            'cet6': 2.0,          # 六级级别，权重2.0
            'advanced': 2.5       # 高级级别，权重2.5
        }
        
        # 渐进式估算 - 考虑级别间的连续性
        cumulative_estimate = 0
        previous_level_estimate = 0
        mastered_levels = []  # 记录已掌握的级别
        
        # 按级别顺序处理（从低到高）
        level_order = ['primary', 'middle', 'high', 'cet4', 'cet6', 'advanced']
        
        for level in level_order:
            if level in level_scores and level_scores[level]['total'] > 0:
                scores = level_scores[level]
                accuracy = scores['known'] / scores['total']
                level_vocab = level_estimates[level]
                level_weight = level_weights[level]
                
                # 改进的准确率阈值和估算策略 - 更精细的掌握度分析
                if accuracy >= 0.9:
                    # 优秀掌握度：认为完全掌握该级别
                    level_estimate = level_vocab
                    confidence_factors.append(accuracy * 1.3)  # 高置信度
                    mastered_levels.append(level)
                elif accuracy >= 0.8:
                    # 良好掌握度：大部分掌握
                    level_estimate = level_vocab * (0.85 + 0.15 * accuracy)
                    confidence_factors.append(accuracy * 1.1)
                    mastered_levels.append(level)
                elif accuracy >= 0.65:
                    # 中等掌握度：部分掌握
                    level_estimate = level_vocab * (0.6 + 0.25 * accuracy)
                    confidence_factors.append(accuracy * 0.9)
                elif accuracy >= 0.45:
                    # 基础掌握度：少量掌握
                    level_estimate = level_vocab * (0.3 + 0.3 * accuracy)
                    confidence_factors.append(accuracy * 0.7)
                elif accuracy >= 0.25:
                    # 初步掌握度：很少掌握
                    level_estimate = level_vocab * (0.1 + 0.2 * accuracy)
                    confidence_factors.append(accuracy * 0.5)
                else:
                    # 低掌握度：可能未掌握该级别
                    level_estimate = level_vocab * accuracy * 0.3
                    confidence_factors.append(accuracy * 0.3)
                
                # 应用级别权重
                weighted_estimate = level_estimate * level_weight
                
                # 渐进式累加：考虑级别间的连续性
                if previous_level_estimate > 0:
                    # 如果前一级别掌握良好，当前级别估算可以适当提高
                    if accuracy >= 0.6:
                        continuity_bonus = min(0.15, previous_level_estimate / level_vocab * 0.08)
                        weighted_estimate *= (1 + continuity_bonus)
                
                cumulative_estimate += weighted_estimate
                previous_level_estimate = weighted_estimate
                
                # 如果某个级别准确率过低，可能影响后续级别估算
                if accuracy < 0.25 and level in ['primary', 'middle']:
                    # 基础级别掌握不好，后续级别估算需要降低
                    break
        
        total_estimate = cumulative_estimate
        
        # 计算置信度 - 考虑样本数量和分布
        if confidence_factors:
            base_confidence = statistics.mean(confidence_factors)
            
            # 样本数量调整
            total_samples = len(known_words) + len(unknown_words)
            sample_factor = min(1.0, total_samples / 80)  # 80个样本为基准
            
            # 级别覆盖度调整
            covered_levels = len([level for level in level_scores if level_scores[level]['total'] > 0])
            coverage_factor = min(1.0, covered_levels / 4)  # 至少覆盖4个级别
            
            # 掌握级别数量调整
            mastery_factor = min(1.0, len(mastered_levels) / 3)  # 至少掌握3个级别
            
            # 综合置信度
            confidence = base_confidence * (0.6 + 0.2 * sample_factor + 0.1 * coverage_factor + 0.1 * mastery_factor)
        else:
            confidence = 0.0
        
        # 优化词频调整因子
        if known_words:
            frequency_ranks = [self.get_word_frequency_rank(word) for word in known_words]
            avg_frequency_rank = statistics.mean(frequency_ranks)
            
            # 改进的词频调整策略
            if avg_frequency_rank < 3000:
                # 高频词汇多，可能低估了词汇量
                frequency_factor = 1.15 + (3000 - avg_frequency_rank) / 3000 * 0.25
            elif avg_frequency_rank < 8000:
                # 中高频词汇，微调
                frequency_factor = 1.05 + (8000 - avg_frequency_rank) / 5000 * 0.1
            elif avg_frequency_rank > 18000:
                # 低频词汇多，可能高估了词汇量
                frequency_factor = 0.85 - (avg_frequency_rank - 18000) / 7000 * 0.25
            else:
                # 中等频率，微调
                frequency_factor = 1.0 + (12000 - avg_frequency_rank) / 12000 * 0.05
            
            # 应用词频调整
            total_estimate *= max(0.75, min(1.4, frequency_factor))
        
        # 级别分布调整 - 根据测试词汇的级别分布进行微调
        if level_scores:
            # 计算测试词汇的级别分布
            test_level_distribution = {}
            total_test_words = sum(scores['total'] for scores in level_scores.values())
            
            for level, scores in level_scores.items():
                if scores['total'] > 0:
                    test_level_distribution[level] = scores['total'] / total_test_words
            
            # 根据分布调整估算
            if 'advanced' in test_level_distribution and test_level_distribution['advanced'] > 0.1:
                # 如果高级词汇比例较高，可能低估了
                if total_estimate < 25000:
                    total_estimate *= 1.1
            elif 'primary' in test_level_distribution and test_level_distribution['primary'] > 0.4:
                # 如果基础词汇比例过高，可能高估了
                if total_estimate > 8000:
                    total_estimate *= 0.9
        
        # 最终调整：确保估算结果在合理范围内
        total_estimate = max(500, min(50000, total_estimate))
        
        return int(total_estimate), min(1.0, confidence)
    
    def estimate_from_corpus(self, text):
        """从语料文本估算词汇量 - 优化版本"""
        # 提取英文单词
        words = re.findall(r'\b[a-zA-Z]+\b', text.lower())
        unique_words = list(set(words))
        
        # 过滤掉太短的单词
        unique_words = [word for word in unique_words if len(word) > 2]
        
        if not unique_words:
            return 0, 0.0
        
        # 1. 词汇复杂度分析
        avg_word_length = statistics.mean([len(word) for word in unique_words])
        word_length_variance = statistics.variance([len(word) for word in unique_words]) if len(unique_words) > 1 else 0
        
        # 2. 词频分析
        frequency_ranks = [self.get_word_frequency_rank(word) for word in unique_words]
        avg_frequency_rank = statistics.mean(frequency_ranks)
        rare_words_ratio = len([r for r in frequency_ranks if r > 10000]) / len(frequency_ranks)
        
        # 3. 词汇级别分析
        level_counts = defaultdict(int)
        for word in unique_words:
            level = self.get_word_level(word)
            level_counts[level] += 1
        
        total_words = len(unique_words)
        level_ratios = {level: count/total_words for level, count in level_counts.items()}
        
        # 4. 学术词汇识别 - 使用新的加权分数
        academic_words, weighted_academic_score = self.get_academic_words(unique_words)
        academic_ratio = len(academic_words) / total_words
        weighted_academic_ratio = weighted_academic_score / total_words  # 加权学术词汇比例
        
        # 5. 句型复杂度分析
        sentences = re.split(r'[.!?]+', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        avg_sentence_length = statistics.mean([len(s.split()) for s in sentences]) if sentences else 0
        
        # 6. 复合句分析
        complex_sentence_ratio = len([s for s in sentences if len(s.split()) > 20]) / len(sentences) if sentences else 0
        
        # 7. 计算综合复杂度得分 - 使用加权学术词汇分数
        complexity_score = self.calculate_corpus_complexity(
            avg_word_length, word_length_variance, avg_frequency_rank, 
            rare_words_ratio, level_ratios, weighted_academic_ratio, 
            avg_sentence_length, complex_sentence_ratio
        )
        
        # 8. 基于复杂度估算词汇量
        vocabulary_estimate = self.map_complexity_to_vocabulary(complexity_score)
        
        # 9. 计算置信度
        confidence = self.calculate_corpus_confidence(
            total_words, avg_word_length, avg_frequency_rank, 
            level_ratios, academic_ratio, avg_sentence_length
        )
        
        return int(vocabulary_estimate), min(1.0, confidence)
    
    def get_academic_words(self, words):
        """识别学术词汇 - 扩充版本"""
        # 基础学术词汇（初中-高中水平）
        basic_academic_words = {
            'research', 'analysis', 'study', 'data', 'information', 'knowledge',
            'theory', 'method', 'process', 'system', 'structure', 'function',
            'development', 'change', 'growth', 'increase', 'decrease', 'effect',
            'result', 'conclusion', 'evidence', 'example', 'fact', 'opinion',
            'problem', 'solution', 'question', 'answer', 'explanation', 'description',
            'comparison', 'difference', 'similarity', 'relationship', 'connection',
            'importance', 'significance', 'value', 'quality', 'quantity', 'amount',
            'level', 'degree', 'extent', 'range', 'variety', 'type', 'kind',
            'group', 'category', 'class', 'section', 'part', 'element', 'component'
        }
        
        # 中级学术词汇（高中-四级水平）
        intermediate_academic_words = {
            'methodology', 'framework', 'approach', 'strategy', 'technique',
            'procedure', 'protocol', 'standard', 'criterion', 'parameter',
            'variable', 'factor', 'aspect', 'dimension', 'perspective', 'viewpoint',
            'concept', 'principle', 'hypothesis', 'assumption', 'premise',
            'argument', 'reasoning', 'logic', 'rationale', 'justification',
            'evaluation', 'assessment', 'measurement', 'calculation', 'estimation',
            'prediction', 'forecast', 'projection', 'trend', 'pattern', 'tendency',
            'correlation', 'association', 'interaction', 'influence', 'impact',
            'outcome', 'consequence', 'implication', 'application', 'implementation',
            'integration', 'coordination', 'collaboration', 'cooperation', 'partnership'
        }
        
        # 高级学术词汇（四级-六级水平）
        advanced_academic_words = {
            'theoretical', 'empirical', 'analytical', 'systematic', 'comprehensive',
            'sophisticated', 'complex', 'intricate', 'elaborate', 'detailed',
            'thorough', 'extensive', 'profound', 'substantial', 'significant',
            'crucial', 'essential', 'fundamental', 'critical', 'vital',
            'innovative', 'creative', 'original', 'novel', 'unique',
            'distinctive', 'characteristic', 'representative', 'typical', 'conventional',
            'traditional', 'contemporary', 'modern', 'current', 'recent',
            'historical', 'evolutionary', 'developmental', 'progressive', 'gradual'
        }
        
        # 专业学术词汇（六级-高级水平）
        professional_academic_words = {
            'paradigm', 'phenomenon', 'mechanism', 'intervention', 'manipulation',
            'articulation', 'elaboration', 'conceptualization', 'operationalization',
            'quantification', 'qualification', 'dichotomy', 'continuum', 'spectrum',
            'trajectory', 'dynamics', 'ecology', 'infrastructure', 'architecture',
            'standardization', 'regulation', 'governance', 'stakeholder', 'constituency',
            'demographic', 'socioeconomic', 'psychometric', 'epidemiological',
            'pharmacological', 'neurological', 'anthropological', 'sociological',
            'psychological', 'philosophical', 'methodological', 'theoretical',
            'empirical', 'analytical', 'systematic', 'comprehensive', 'sophisticated'
        }
        
        # 超高级学术词汇（专业水平）
        super_advanced_academic_words = {
            'epistemological', 'ontological', 'metaphysical', 'hermeneutic',
            'phenomenological', 'existentialist', 'structuralist', 'poststructuralist',
            'deconstructionist', 'postmodernist', 'postcolonial', 'feminist',
            'intersectional', 'transdisciplinary', 'interdisciplinary', 'multidisciplinary',
            'cross-cultural', 'transnational', 'globalization', 'internationalization',
            'institutionalization', 'bureaucratization', 'rationalization', 'modernization',
            'industrialization', 'urbanization', 'digitalization', 'virtualization',
            'commodification', 'privatization', 'deregulation', 'liberalization',
            'democratization', 'pluralization', 'diversification', 'specialization'
        }
        
        # 按级别统计学术词汇
        basic_count = len([word for word in words if word in basic_academic_words])
        intermediate_count = len([word for word in words if word in intermediate_academic_words])
        advanced_count = len([word for word in words if word in advanced_academic_words])
        professional_count = len([word for word in words if word in professional_academic_words])
        super_advanced_count = len([word for word in words if word in super_advanced_academic_words])
        
        # 返回所有学术词汇（用于计算总数）
        all_academic_words = (basic_academic_words | intermediate_academic_words | 
                             advanced_academic_words | professional_academic_words | 
                             super_advanced_academic_words)
        
        academic_words = [word for word in words if word in all_academic_words]
        
        # 计算加权学术词汇分数（用于复杂度计算）
        weighted_academic_score = (
            basic_count * 1.0 +
            intermediate_count * 2.0 +
            advanced_count * 3.0 +
            professional_count * 4.0 +
            super_advanced_count * 5.0
        )
        
        return academic_words, weighted_academic_score
    
    def calculate_corpus_complexity(self, avg_word_length, word_length_variance, 
                                  avg_frequency_rank, rare_words_ratio, level_ratios,
                                  weighted_academic_ratio, avg_sentence_length, complex_sentence_ratio):
        """计算语料综合复杂度得分 - 优化版本"""
        
        # 词汇复杂度 (0-1) - 调整权重
        vocab_complexity = min(1.0, (avg_word_length - 4) / 4)  # 4-8字符范围
        vocab_complexity = max(0, vocab_complexity)
        
        # 词频复杂度 (0-1) - 增加权重
        freq_complexity = min(1.0, avg_frequency_rank / 20000)
        
        # 稀有词汇复杂度 (0-1) - 增加权重
        rare_complexity = rare_words_ratio
        
        # 级别分布复杂度 (0-1) - 重新设计权重分配
        level_complexity = (
            level_ratios.get('primary', 0) * 0.05 +    # 降低小学级权重
            level_ratios.get('middle', 0) * 0.25 +     # 提高初中级权重
            level_ratios.get('high', 0) * 0.45 +       # 提高高中级权重
            level_ratios.get('cet4', 0) * 0.65 +       # 提高四级权重
            level_ratios.get('cet6', 0) * 0.85 +       # 提高六级权重
            level_ratios.get('advanced', 0) * 1.0      # 保持高级权重
        )
        
        # 学术词汇复杂度 (0-1) - 使用加权学术词汇比例
        academic_complexity = min(1.0, weighted_academic_ratio * 8)  # 调整放大系数
        
        # 句型复杂度 (0-1) - 增加权重
        sentence_complexity = min(1.0, (avg_sentence_length - 10) / 20)  # 10-30词范围
        sentence_complexity = max(0, sentence_complexity)
        
        # 复合句复杂度 (0-1) - 增加权重
        compound_complexity = complex_sentence_ratio
        
        # 综合复杂度得分 - 重新分配权重
        complexity_score = (
            vocab_complexity * 0.10 +      # 降低词汇复杂度权重
            freq_complexity * 0.15 +       # 降低词频复杂度权重
            rare_complexity * 0.15 +       # 保持稀有词汇权重
            level_complexity * 0.30 +      # 提高级别分布权重
            academic_complexity * 0.20 +   # 大幅提高学术词汇权重
            sentence_complexity * 0.05 +   # 保持句型复杂度权重
            compound_complexity * 0.05     # 保持复合句复杂度权重
        )
        
        return max(0, min(1, complexity_score))
    
    def map_complexity_to_vocabulary(self, complexity_score):
        """将复杂度得分映射到词汇量 - 进一步优化版本"""
        # 重新设计映射函数，确保更好的区分度
        if complexity_score < 0.25:
            # 小学水平: 1000-4000词
            return 1000 + complexity_score * 12000
        elif complexity_score < 0.45:
            # 初中水平: 4000-8000词
            return 4000 + (complexity_score - 0.25) * 20000
        elif complexity_score < 0.65:
            # 高中水平: 8000-15000词
            return 8000 + (complexity_score - 0.45) * 35000
        elif complexity_score < 0.80:
            # 大学水平: 15000-25000词
            return 15000 + (complexity_score - 0.65) * 66667
        else:
            # 研究生/专业水平: 25000-40000词
            return 25000 + (complexity_score - 0.80) * 75000
    
    def calculate_corpus_confidence(self, total_words, avg_word_length, avg_frequency_rank,
                                  level_ratios, academic_ratio, avg_sentence_length):
        """计算语料分析置信度"""
        
        # 样本数量因子
        sample_factor = min(1.0, total_words / 100)
        
        # 词汇多样性因子
        diversity_factor = min(1.0, avg_word_length / 6)
        
        # 词频分布因子
        frequency_factor = min(1.0, avg_frequency_rank / 15000)
        
        # 级别分布因子
        level_factor = sum(level_ratios.values())  # 覆盖率
        
        # 学术词汇因子
        academic_factor = min(1.0, academic_ratio * 5)
        
        # 句型复杂度因子
        sentence_factor = min(1.0, avg_sentence_length / 25)
        
        # 综合置信度
        confidence = (
            sample_factor * 0.25 +
            diversity_factor * 0.20 +
            frequency_factor * 0.20 +
            level_factor * 0.15 +
            academic_factor * 0.10 +
            sentence_factor * 0.10
        )
        
        return max(0.3, min(1.0, confidence))  # 最低置信度0.3
    
    def validate_algorithm(self):
        """验证算法有效性"""
        validation_results = {
            'stability_test': self.stability_test(),
            'accuracy_test': self.accuracy_test(),
            'correlation_test': self.correlation_test()
        }
        return validation_results
    
    def stability_test(self):
        """稳定性测试"""
        test_words = list(self.word_frequency_data.keys())[:1000]
        results = []
        
        # 不同比例测试
        ratios = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
        lengths = [100, 200, 300]
        
        for ratio in ratios:
            for length in lengths:
                estimates = []
                for _ in range(10):  # 每个组合测试10次
                    sample_words = random.sample(test_words, min(length, len(test_words)))
                    known_count = int(length * ratio)
                    known_words = sample_words[:known_count]
                    unknown_words = sample_words[known_count:]
                    
                    test_data = ([{'word': w, 'known': True} for w in known_words] + 
                               [{'word': w, 'known': False} for w in unknown_words])
                    
                    estimate, _ = self.estimate_vocabulary(test_data)
                    estimates.append(estimate)
                
                results.append({
                    'ratio': ratio,
                    'length': length,
                    'mean': statistics.mean(estimates),
                    'std': statistics.stdev(estimates) if len(estimates) > 1 else 0,
                    'cv': statistics.stdev(estimates) / statistics.mean(estimates) if len(estimates) > 1 and statistics.mean(estimates) > 0 else 0
                })
        
        return results
    
    def accuracy_test(self):
        """准确性测试"""
        # 模拟不同词汇量水平的用户
        target_levels = [2000, 5000, 8000, 12000, 15000, 20000]
        results = []
        
        for target in target_levels:
            # 生成模拟测试数据
            test_data = self.generate_simulated_test_data(target)
            estimate, confidence = self.estimate_vocabulary(test_data)
            
            error = abs(estimate - target) / target
            results.append({
                'target': target,
                'estimate': estimate,
                'error': error,
                'confidence': confidence
            })
        
        return results
    
    def correlation_test(self):
        """相关性测试"""
        # 模拟四六级成绩与词汇量的相关性
        simulated_data = []
        
        for _ in range(100):
            # 随机生成词汇量
            vocab_size = random.randint(3000, 20000)
            
            # 基于词汇量生成四六级成绩
            cet4_score = min(710, max(200, int(vocab_size / 30 + random.gauss(0, 50))))
            cet6_score = min(710, max(200, int(vocab_size / 35 + random.gauss(0, 60))))
            
            simulated_data.append({
                'vocab_size': vocab_size,
                'cet4_score': cet4_score,
                'cet6_score': cet6_score
            })
        
        # 计算相关系数
        vocab_sizes = [d['vocab_size'] for d in simulated_data]
        cet4_scores = [d['cet4_score'] for d in simulated_data]
        cet6_scores = [d['cet6_score'] for d in simulated_data]
        
        cet4_corr = self.calculate_correlation(vocab_sizes, cet4_scores)
        cet6_corr = self.calculate_correlation(vocab_sizes, cet6_scores)
        
        return {
            'cet4_correlation': cet4_corr,
            'cet6_correlation': cet6_corr,
            'data': simulated_data[:10]  # 只返回前10条数据用于显示
        }
    
    def calculate_correlation(self, x, y):
        """计算相关系数"""
        if len(x) != len(y):
            return 0
        
        n = len(x)
        if n == 0:
            return 0
        
        sum_x = sum(x)
        sum_y = sum(y)
        sum_xy = sum(x[i] * y[i] for i in range(n))
        sum_x2 = sum(x[i] ** 2 for i in range(n))
        sum_y2 = sum(y[i] ** 2 for i in range(n))
        
        numerator = n * sum_xy - sum_x * sum_y
        denominator = ((n * sum_x2 - sum_x ** 2) * (n * sum_y2 - sum_y ** 2)) ** 0.5
        
        if denominator == 0:
            return 0
        
        return numerator / denominator
    
    def generate_simulated_test_data(self, target_vocab_size):
        """生成模拟测试数据"""
        test_data = []
        
        # 根据目标词汇量确定测试单词的难度分布
        if target_vocab_size < 3000:
            level_distribution = {'primary': 0.8, 'middle': 0.2}
        elif target_vocab_size < 6000:
            level_distribution = {'primary': 0.5, 'middle': 0.4, 'high': 0.1}
        elif target_vocab_size < 12000:
            level_distribution = {'primary': 0.3, 'middle': 0.4, 'high': 0.3}
        elif target_vocab_size < 18000:
            level_distribution = {'primary': 0.2, 'middle': 0.3, 'high': 0.3, 'cet4': 0.2}
        else:
            level_distribution = {'primary': 0.1, 'middle': 0.2, 'high': 0.3, 'cet4': 0.3, 'cet6': 0.1}
        
        # 生成测试单词
        all_test_words = []
        for level, ratio in level_distribution.items():
            level_words = list(self.vocabulary_levels.get(level, set()))
            if level_words:
                num_words = int(100 * ratio)
                sample_words = random.sample(level_words, min(num_words, len(level_words)))
                all_test_words.extend(sample_words)
        
        # 随机选择100个单词进行测试
        test_words = random.sample(all_test_words, min(100, len(all_test_words)))
        
        # 根据目标词汇量确定认识比例
        if target_vocab_size < 3000:
            known_ratio = 0.3
        elif target_vocab_size < 6000:
            known_ratio = 0.5
        elif target_vocab_size < 12000:
            known_ratio = 0.7
        elif target_vocab_size < 18000:
            known_ratio = 0.8
        else:
            known_ratio = 0.9
        
        known_count = int(len(test_words) * known_ratio)
        
        for i, word in enumerate(test_words):
            known = i < known_count
            test_data.append({'word': word, 'known': known})
        
        return test_data
    
    def generate_random_test_words(self, count=50, level_distribution=None):
        """
        生成随机测试词汇 - 优化版本
        count: 测试词汇数量
        level_distribution: 各级别词汇比例，如 {'primary': 0.2, 'middle': 0.3, 'high': 0.3, 'cet4': 0.2}
        """
        if level_distribution is None:
            # 优化默认分布：更均衡地覆盖各级别
            level_distribution = {
                'primary': 0.20,    # 小学级词汇 20%
                'middle': 0.25,     # 初中级词汇 25%
                'high': 0.25,       # 高中级词汇 25%
                'cet4': 0.20,       # 四级词汇 20%
                'cet6': 0.08,       # 六级词汇 8%
                'advanced': 0.02    # 高级词汇 2%
            }
        
        test_words = []
        level_word_counts = {}  # 记录每个级别实际选取的词汇数量
        
        # 从各级别词汇中按比例选取
        for level, ratio in level_distribution.items():
            if level in self.vocabulary_levels:
                level_words = list(self.vocabulary_levels[level])
                if level_words:
                    # 计算该级别应选取的词汇数量
                    level_count = max(1, int(count * ratio))
                    # 确保不超过该级别可用词汇数量
                    actual_count = min(level_count, len(level_words))
                    
                    # 随机选取词汇
                    selected_words = random.sample(level_words, actual_count)
                    test_words.extend(selected_words)
                    level_word_counts[level] = actual_count
                    
                    print(f"从{level}级别选取了{actual_count}个词汇")
        
        # 如果某些级别词汇不足，重新分配剩余配额
        if len(test_words) < count:
            remaining_count = count - len(test_words)
            print(f"词汇不足，需要补充{remaining_count}个词汇")
            
            # 按优先级补充：优先补充基础级别
            supplement_priority = ['primary', 'middle', 'high', 'cet4', 'cet6', 'advanced']
            
            for level in supplement_priority:
                if remaining_count <= 0:
                    break
                    
                if level in self.vocabulary_levels:
                    level_words = list(self.vocabulary_levels[level])
                    already_used = level_word_counts.get(level, 0)
                    available_words = [w for w in level_words if w not in test_words]
                    
                    if available_words:
                        supplement_count = min(remaining_count, len(available_words))
                        additional_words = random.sample(available_words, supplement_count)
                        test_words.extend(additional_words)
                        remaining_count -= supplement_count
                        print(f"从{level}级别补充了{supplement_count}个词汇")
        
        # 如果仍然不足，从COCA词频数据中补充
        if len(test_words) < count:
            remaining_count = count - len(test_words)
            coca_words = list(self.word_frequency_data.keys())
            if coca_words:
                # 从COCA词汇中随机选取补充
                available_coca_words = [w for w in coca_words if w not in test_words]
                additional_words = random.sample(available_coca_words, min(remaining_count, len(available_coca_words)))
                test_words.extend(additional_words)
                print(f"从COCA词频数据补充了{len(additional_words)}个词汇")
        
        # 随机打乱顺序
        random.shuffle(test_words)
        
        # 确保返回指定数量的词汇
        final_words = test_words[:count]
        
        # 统计最终分布
        final_distribution = {}
        for word in final_words:
            level = self.get_word_level(word)
            final_distribution[level] = final_distribution.get(level, 0) + 1
        
        print(f"最终词汇分布: {final_distribution}")
        
        return final_words
    
    def generate_adaptive_test_words(self, user_level=None, count=50):
        """
        生成自适应测试词汇 - 优化版本
        user_level: 用户预估水平 ('beginner', 'intermediate', 'advanced', 'expert')
        count: 测试词汇数量
        """
        if user_level == 'beginner':
            # 初学者：主要测试基础词汇，少量中级词汇
            level_distribution = {
                'primary': 0.50,    # 小学词汇 50%
                'middle': 0.35,     # 初中词汇 35%
                'high': 0.15,       # 高中词汇 15%
                'cet4': 0.0,        # 四级词汇 0%
                'cet6': 0.0,        # 六级词汇 0%
                'advanced': 0.0     # 高级词汇 0%
            }
        elif user_level == 'intermediate':
            # 中级：平衡各级别，重点在初中到四级
            level_distribution = {
                'primary': 0.15,    # 小学词汇 15%
                'middle': 0.30,     # 初中词汇 30%
                'high': 0.30,       # 高中词汇 30%
                'cet4': 0.20,       # 四级词汇 20%
                'cet6': 0.05,       # 六级词汇 5%
                'advanced': 0.0     # 高级词汇 0%
            }
        elif user_level == 'advanced':
            # 高级：重点测试四级到六级词汇
            level_distribution = {
                'primary': 0.05,    # 小学词汇 5%
                'middle': 0.15,     # 初中词汇 15%
                'high': 0.25,       # 高中词汇 25%
                'cet4': 0.35,       # 四级词汇 35%
                'cet6': 0.15,       # 六级词汇 15%
                'advanced': 0.05    # 高级词汇 5%
            }
        elif user_level == 'expert':
            # 专家级：重点测试六级和高级词汇
            level_distribution = {
                'primary': 0.02,    # 小学词汇 2%
                'middle': 0.08,     # 初中词汇 8%
                'high': 0.15,       # 高中词汇 15%
                'cet4': 0.25,       # 四级词汇 25%
                'cet6': 0.30,       # 六级词汇 30%
                'advanced': 0.20    # 高级词汇 20%
            }
        else:
            # 默认：平衡分布
            level_distribution = None
        
        return self.generate_random_test_words(count, level_distribution)
    
    def generate_adaptive_test_words_with_scores(self, cet4_score=None, cet6_score=None, count=50):
        """
        根据四六级成绩生成自适应测试词汇 - 新增函数
        cet4_score: 四级成绩 (0-710)
        cet6_score: 六级成绩 (0-710)
        count: 测试词汇数量
        """
        # 根据成绩确定用户水平
        user_level = self._determine_level_from_scores(cet4_score, cet6_score)
        
        # 根据成绩调整词汇分布
        level_distribution = self._get_level_distribution_from_scores(cet4_score, cet6_score)
        
        return self.generate_random_test_words(count, level_distribution)
    
    def _determine_level_from_scores(self, cet4_score, cet6_score):
        """
        根据四六级成绩确定用户水平
        """
        if cet6_score is not None:
            if cet6_score >= 600:
                return 'expert'
            elif cet6_score >= 500:
                return 'advanced'
            elif cet6_score >= 425:
                return 'intermediate'
            else:
                return 'beginner'
        elif cet4_score is not None:
            if cet4_score >= 600:
                return 'advanced'
            elif cet4_score >= 500:
                return 'intermediate'
            elif cet4_score >= 425:
                return 'beginner'
            else:
                return 'beginner'
        else:
            return None
    
    def _get_level_distribution_from_scores(self, cet4_score, cet6_score):
        """
        根据四六级成绩生成词汇分布
        """
        if cet6_score is not None:
            # 有六级成绩，重点测试六级及以上词汇
            if cet6_score >= 600:
                # 优秀六级水平
                return {
                    'primary': 0.02,    # 小学词汇 2%
                    'middle': 0.05,     # 初中词汇 5%
                    'high': 0.10,       # 高中词汇 10%
                    'cet4': 0.15,       # 四级词汇 15%
                    'cet6': 0.40,       # 六级词汇 40%
                    'advanced': 0.28    # 高级词汇 28%
                }
            elif cet6_score >= 500:
                # 良好六级水平
                return {
                    'primary': 0.05,    # 小学词汇 5%
                    'middle': 0.10,     # 初中词汇 10%
                    'high': 0.15,       # 高中词汇 15%
                    'cet4': 0.25,       # 四级词汇 25%
                    'cet6': 0.35,       # 六级词汇 35%
                    'advanced': 0.10    # 高级词汇 10%
                }
            elif cet6_score >= 425:
                # 及格六级水平
                return {
                    'primary': 0.10,    # 小学词汇 10%
                    'middle': 0.15,     # 初中词汇 15%
                    'high': 0.20,       # 高中词汇 20%
                    'cet4': 0.30,       # 四级词汇 30%
                    'cet6': 0.20,       # 六级词汇 20%
                    'advanced': 0.05    # 高级词汇 5%
                }
            else:
                # 六级未通过，按四级水平处理
                return self._get_level_distribution_from_scores(cet4_score, None)
        
        elif cet4_score is not None:
            # 只有四级成绩
            if cet4_score >= 600:
                # 优秀四级水平
                return {
                    'primary': 0.05,    # 小学词汇 5%
                    'middle': 0.10,     # 初中词汇 10%
                    'high': 0.20,       # 高中词汇 20%
                    'cet4': 0.45,       # 四级词汇 45%
                    'cet6': 0.15,       # 六级词汇 15%
                    'advanced': 0.05    # 高级词汇 5%
                }
            elif cet4_score >= 500:
                # 良好四级水平
                return {
                    'primary': 0.10,    # 小学词汇 10%
                    'middle': 0.15,     # 初中词汇 15%
                    'high': 0.25,       # 高中词汇 25%
                    'cet4': 0.35,       # 四级词汇 35%
                    'cet6': 0.10,       # 六级词汇 10%
                    'advanced': 0.05    # 高级词汇 5%
                }
            elif cet4_score >= 425:
                # 及格四级水平
                return {
                    'primary': 0.15,    # 小学词汇 15%
                    'middle': 0.20,     # 初中词汇 20%
                    'high': 0.30,       # 高中词汇 30%
                    'cet4': 0.25,       # 四级词汇 25%
                    'cet6': 0.08,       # 六级词汇 8%
                    'advanced': 0.02    # 高级词汇 2%
                }
            else:
                # 四级未通过
                return {
                    'primary': 0.25,    # 小学词汇 25%
                    'middle': 0.30,     # 初中词汇 30%
                    'high': 0.30,       # 高中词汇 30%
                    'cet4': 0.12,       # 四级词汇 12%
                    'cet6': 0.03,       # 六级词汇 3%
                    'advanced': 0.0     # 高级词汇 0%
                }
        else:
            # 没有成绩信息，使用平衡分布
            return {
                'primary': 0.20,    # 小学级词汇 20%
                'middle': 0.25,     # 初中级词汇 25%
                'high': 0.25,       # 高中级词汇 25%
                'cet4': 0.20,       # 四级词汇 20%
                'cet6': 0.08,       # 六级词汇 8%
                'advanced': 0.02    # 高级词汇 2%
            }
    
    def generate_balanced_test_words(self, count=50):
        """
        生成平衡测试词汇 - 确保各级别都有代表性词汇
        """
        # 强制平衡分布：每个级别至少有一定比例的词汇
        balanced_distribution = {
            'primary': 0.18,    # 小学级词汇 18%
            'middle': 0.22,     # 初中级词汇 22%
            'high': 0.22,       # 高中级词汇 22%
            'cet4': 0.22,       # 四级词汇 22%
            'cet6': 0.12,       # 六级词汇 12%
            'advanced': 0.04    # 高级词汇 4%
        }
        
        return self.generate_random_test_words(count, balanced_distribution)
    
    def generate_progressive_test_words(self, count=50, focus_levels=None):
        """
        生成渐进式测试词汇 - 根据重点级别调整分布
        focus_levels: 重点关注的级别列表，如 ['middle', 'high']
        """
        if focus_levels is None:
            focus_levels = ['middle', 'high']
        
        # 基础分布
        base_distribution = {
            'primary': 0.15,
            'middle': 0.20,
            'high': 0.20,
            'cet4': 0.20,
            'cet6': 0.15,
            'advanced': 0.10
        }
        
        # 调整重点级别的权重
        total_focus_weight = 0.4  # 重点级别总权重
        focus_weight_per_level = total_focus_weight / len(focus_levels)
        
        # 重新分配权重
        adjusted_distribution = base_distribution.copy()
        for level in focus_levels:
            if level in adjusted_distribution:
                adjusted_distribution[level] += focus_weight_per_level
        
        # 从非重点级别减少权重
        non_focus_levels = [level for level in base_distribution.keys() if level not in focus_levels]
        weight_to_reduce = total_focus_weight / len(non_focus_levels)
        
        for level in non_focus_levels:
            adjusted_distribution[level] = max(0.05, adjusted_distribution[level] - weight_to_reduce)
        
        # 归一化权重
        total_weight = sum(adjusted_distribution.values())
        normalized_distribution = {level: weight/total_weight for level, weight in adjusted_distribution.items()}
        
        return self.generate_random_test_words(count, normalized_distribution)
    
    def get_vocabulary_level(self, vocabulary_size):
        """
        根据词汇量判断等级 - 优化版本
        vocabulary_size: 估算的词汇量
        返回: (等级名称, 等级描述, 等级颜色)
        """
        # 更精细的等级划分，考虑词汇量的连续性和实际应用场景
        if vocabulary_size < 800:
            return ('pre-primary', '学前水平', 'secondary')
        elif vocabulary_size < 2000:
            return ('primary', '小学水平', 'success')
        elif vocabulary_size < 4000:
            return ('middle-low', '初中低年级水平', 'info')
        elif vocabulary_size < 6000:
            return ('middle', '初中水平', 'info')
        elif vocabulary_size < 9000:
            return ('high-low', '高中低年级水平', 'warning')
        elif vocabulary_size < 12000:
            return ('high', '高中水平', 'warning')
        elif vocabulary_size < 15000:
            return ('cet4-low', '四级低分水平', 'primary')
        elif vocabulary_size < 18000:
            return ('cet4', '大学四级水平', 'primary')
        elif vocabulary_size < 21000:
            return ('cet6-low', '六级低分水平', 'danger')
        elif vocabulary_size < 25000:
            return ('cet6', '大学六级水平', 'danger')
        elif vocabulary_size < 30000:
            return ('cet8', '专业八级水平', 'dark')
        elif vocabulary_size < 40000:
            return ('advanced', '高级水平', 'purple')
        else:
            return ('expert', '专家水平', 'purple')
    
    def get_level_description(self, level_name):
        """
        获取等级详细描述 - 优化版本
        """
        descriptions = {
            'pre-primary': {
                'name': '学前水平',
                'range': '0-800词',
                'description': '基础词汇，适合英语初学者',
                'features': ['认识基本日常词汇', '能进行简单对话', '词汇量有限', '适合英语启蒙']
            },
            'primary': {
                'name': '小学水平',
                'range': '800-2,000词',
                'description': '小学英语词汇量，掌握基础词汇',
                'features': ['掌握基础日常词汇', '能进行简单交流', '词汇量适中', '适合小学英语学习']
            },
            'middle-low': {
                'name': '初中低年级水平',
                'range': '2,000-4,000词',
                'description': '初中低年级英语词汇量，具备基本交流能力',
                'features': ['掌握日常交流词汇', '能理解简单文章', '词汇量良好', '适合初中低年级']
            },
            'middle': {
                'name': '初中水平',
                'range': '4,000-6,000词',
                'description': '初中英语词汇量，具备基本交流能力',
                'features': ['掌握日常交流词汇', '能理解简单文章', '词汇量良好', '适合初中毕业水平']
            },
            'high-low': {
                'name': '高中低年级水平',
                'range': '6,000-9,000词',
                'description': '高中低年级英语词汇量，具备较好英语能力',
                'features': ['掌握学术基础词汇', '能理解复杂文章', '词汇量丰富', '适合高中低年级']
            },
            'high': {
                'name': '高中水平',
                'range': '9,000-12,000词',
                'description': '高中英语词汇量，具备较好英语能力',
                'features': ['掌握学术基础词汇', '能理解复杂文章', '词汇量丰富', '适合高中毕业水平']
            },
            'cet4-low': {
                'name': '四级低分水平',
                'range': '12,000-15,000词',
                'description': '大学英语四级低分词汇量，具备良好英语能力',
                'features': ['掌握大学英语词汇', '能进行学术交流', '词汇量优秀', '四级考试及格水平']
            },
            'cet4': {
                'name': '大学四级水平',
                'range': '15,000-18,000词',
                'description': '大学英语四级词汇量，具备良好英语能力',
                'features': ['掌握大学英语词汇', '能进行学术交流', '词汇量优秀', '四级考试良好水平']
            },
            'cet6-low': {
                'name': '六级低分水平',
                'range': '18,000-21,000词',
                'description': '大学英语六级低分词汇量，具备优秀英语能力',
                'features': ['掌握高级英语词汇', '能进行专业交流', '词汇量卓越', '六级考试及格水平']
            },
            'cet6': {
                'name': '大学六级水平',
                'range': '21,000-25,000词',
                'description': '大学英语六级词汇量，具备优秀英语能力',
                'features': ['掌握高级英语词汇', '能进行专业交流', '词汇量卓越', '六级考试良好水平']
            },
            'cet8': {
                'name': '专业八级水平',
                'range': '25,000-30,000词',
                'description': '英语专业八级词汇量，具备专业英语能力',
                'features': ['掌握专业英语词汇', '能进行学术研究', '词汇量专业', '英语专业毕业水平']
            },
            'advanced': {
                'name': '高级水平',
                'range': '30,000-40,000词',
                'description': '高级英语词汇量，具备母语水平英语能力',
                'features': ['掌握母语水平词汇', '能进行深度交流', '词汇量精通', '接近母语者水平']
            },
            'expert': {
                'name': '专家水平',
                'range': '40,000+词',
                'description': '专家级英语词汇量，具备母语者水平英语能力',
                'features': ['掌握专家级词汇', '能进行专业研究', '词汇量精通', '达到母语者水平']
            }
        }
        return descriptions.get(level_name, descriptions['primary'])
    
    def estimate_vocabulary_with_level(self, test_data):
        """
        估算词汇量并返回等级信息
        """
        vocabulary_size, confidence = self.estimate_vocabulary(test_data)
        level_name, level_display, level_color = self.get_vocabulary_level(vocabulary_size)
        level_info = self.get_level_description(level_name)
        
        return {
            'vocabulary_size': vocabulary_size,
            'confidence': confidence,
            'level': {
                'name': level_name,
                'display': level_display,
                'color': level_color,
                'info': level_info
            }
        }
    
    def estimate_from_corpus_with_level(self, text):
        """
        从语料估算词汇量并返回等级信息
        """
        vocabulary_size, confidence = self.estimate_from_corpus(text)
        level_name, level_display, level_color = self.get_vocabulary_level(vocabulary_size)
        level_info = self.get_level_description(level_name)
        
        return {
            'vocabulary_size': vocabulary_size,
            'confidence': confidence,
            'level': {
                'name': level_name,
                'display': level_display,
                'color': level_color,
                'info': level_info
            }
        } 