#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
单词处理模块 - 处理爬取到的单词数据
"""

import json
import os
import re
import logging
from typing import List, Dict, Any
import requests
from urllib.parse import quote

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class WordProcessor:
    """单词处理器"""
    
    def __init__(self, input_dir="output", output_dir="processed_words"):
        self.input_dir = input_dir
        self.output_dir = output_dir
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
    
    def clean_word(self, word: str) -> str:
        """清理单词"""
        if not word:
            return ""
        
        # 移除多余的空格和特殊字符
        word = word.strip()
        word = re.sub(r'\s+', ' ', word)
        
        # 移除HTML标签
        word = re.sub(r'<[^>]+>', '', word)
        
        return word
    
    def clean_definition(self, definition: str) -> str:
        """清理释义"""
        if not definition:
            return ""
        
        # 移除多余的空格
        definition = definition.strip()
        definition = re.sub(r'\s+', ' ', definition)
        
        # 移除HTML标签
        definition = re.sub(r'<[^>]+>', '', definition)
        
        # 移除多余的标点符号
        definition = re.sub(r'[;；]+$', '', definition)
        
        return definition
    
    def validate_word_entry(self, entry: Dict[str, Any]) -> bool:
        """验证单词条目"""
        if not isinstance(entry, dict):
            return False
        
        # 检查必要字段
        required_fields = ['word', 'definition']
        for field in required_fields:
            if field not in entry or not entry[field]:
                return False
        
        # 检查单词是否为空或只包含空格
        word = self.clean_word(entry['word'])
        if not word:
            return False
        
        return True
    
    def process_word_entry(self, entry: Dict[str, Any]) -> Dict[str, Any]:
        """处理单个单词条目"""
        if not self.validate_word_entry(entry):
            return None
        
        processed_entry = {
            'word': self.clean_word(entry['word']),
            'definition': self.clean_definition(entry['definition']),
            'pronunciation': entry.get('pronunciation', ''),
            'audio_url': entry.get('audio_url', ''),
            'example': entry.get('example', ''),
            'category': entry.get('category', ''),
            'difficulty': entry.get('difficulty', ''),
            'frequency': entry.get('frequency', 0)
        }
        
        # 清理其他字段
        for key, value in processed_entry.items():
            if isinstance(value, str):
                processed_entry[key] = value.strip()
        
        return processed_entry
    
    def process_course_data(self, course_data: Dict[str, Any]) -> Dict[str, Any]:
        """处理课程数据"""
        if not isinstance(course_data, dict):
            logger.error("课程数据格式错误")
            return None
        
        processed_data = {
            'course_name': course_data.get('course_name', ''),
            'total_words': 0,
            'directories': []
        }
        
        directories = course_data.get('directories', [])
        if not isinstance(directories, list):
            logger.error("目录数据格式错误")
            return None
        
        total_words = 0
        
        for directory in directories:
            if not isinstance(directory, dict):
                continue
            
            processed_dir = {
                'name': directory.get('name', ''),
                'words': []
            }
            
            words = directory.get('words', [])
            if not isinstance(words, list):
                continue
            
            for word_entry in words:
                processed_entry = self.process_word_entry(word_entry)
                if processed_entry:
                    processed_dir['words'].append(processed_entry)
                    total_words += 1
            
            if processed_dir['words']:  # 只添加有单词的目录
                processed_data['directories'].append(processed_dir)
        
        processed_data['total_words'] = total_words
        
        return processed_data
    
    def remove_duplicates(self, course_data: Dict[str, Any]) -> Dict[str, Any]:
        """移除重复单词"""
        if not course_data or 'directories' not in course_data:
            return course_data
        
        seen_words = set()
        
        for directory in course_data['directories']:
            if 'words' not in directory:
                continue
            
            unique_words = []
            for word_entry in directory['words']:
                word = word_entry.get('word', '').lower()
                if word and word not in seen_words:
                    seen_words.add(word)
                    unique_words.append(word_entry)
            
            directory['words'] = unique_words
        
        # 重新计算总单词数
        total_words = sum(len(dir_data['words']) for dir_data in course_data['directories'])
        course_data['total_words'] = total_words
        
        return course_data
    
    def process_file(self, input_file: str, output_file: str = None) -> bool:
        """处理单个文件"""
        input_path = os.path.join(self.input_dir, input_file)
        
        if not os.path.exists(input_path):
            logger.error(f"输入文件不存在: {input_path}")
            return False
        
        if not output_file:
            output_file = input_file
        
        output_path = os.path.join(self.output_dir, output_file)
        
        try:
            # 读取原始数据
            with open(input_path, 'r', encoding='utf-8') as f:
                raw_data = json.load(f)
            
            logger.info(f"开始处理文件: {input_file}")
            
            # 处理数据
            processed_data = self.process_course_data(raw_data)
            if not processed_data:
                logger.error(f"处理文件失败: {input_file}")
                return False
            
            # 移除重复单词
            processed_data = self.remove_duplicates(processed_data)
            
            # 保存处理后的数据
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(processed_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"文件处理完成: {output_file}")
            logger.info(f"总单词数: {processed_data['total_words']}")
            
            return True
            
        except Exception as e:
            logger.error(f"处理文件时发生错误: {str(e)}")
            return False
    
    def process_all_files(self):
        """处理所有文件"""
        if not os.path.exists(self.input_dir):
            logger.error(f"输入目录不存在: {self.input_dir}")
            return
        
        json_files = [f for f in os.listdir(self.input_dir) if f.endswith('.json')]
        
        if not json_files:
            logger.info("没有找到需要处理的JSON文件")
            return
        
        logger.info(f"找到 {len(json_files)} 个文件需要处理")
        
        success_count = 0
        for json_file in json_files:
            if self.process_file(json_file):
                success_count += 1
        
        logger.info(f"处理完成，成功处理 {success_count}/{len(json_files)} 个文件")
    
    def generate_statistics(self):
        """生成统计信息"""
        if not os.path.exists(self.output_dir):
            logger.error(f"输出目录不存在: {self.output_dir}")
            return
        
        json_files = [f for f in os.listdir(self.output_dir) if f.endswith('.json')]
        
        if not json_files:
            logger.info("没有找到处理后的文件")
            return
        
        total_courses = len(json_files)
        total_words = 0
        course_stats = []
        
        for json_file in json_files:
            file_path = os.path.join(self.output_dir, json_file)
            
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                
                course_name = data.get('course_name', json_file.replace('.json', ''))
                word_count = data.get('total_words', 0)
                
                course_stats.append({
                    'course_name': course_name,
                    'word_count': word_count,
                    'file_name': json_file
                })
                
                total_words += word_count
                
            except Exception as e:
                logger.error(f"读取文件 {json_file} 时发生错误: {str(e)}")
        
        # 按单词数排序
        course_stats.sort(key=lambda x: x['word_count'], reverse=True)
        
        # 生成统计报告
        stats_report = {
            'total_courses': total_courses,
            'total_words': total_words,
            'average_words_per_course': total_words / total_courses if total_courses > 0 else 0,
            'courses': course_stats
        }
        
        # 保存统计报告
        stats_file = os.path.join(self.output_dir, 'statistics.json')
        with open(stats_file, 'w', encoding='utf-8') as f:
            json.dump(stats_report, f, ensure_ascii=False, indent=2)
        
        logger.info(f"统计报告已生成: {stats_file}")
        logger.info(f"总课程数: {total_courses}")
        logger.info(f"总单词数: {total_words}")
        logger.info(f"平均每课程单词数: {stats_report['average_words_per_course']:.1f}")

def main():
    """主函数"""
    processor = WordProcessor()
    
    # 处理所有文件
    processor.process_all_files()
    
    # 生成统计信息
    processor.generate_statistics()

if __name__ == "__main__":
    main()
