import jieba
from collections import Counter
import os
import re
import platform

class NovelWordAnalyzer:
    def __init__(self):
        # 初始化停用词列表
        self.stop_words = self._load_stop_words()
        # 配置jieba
        self._setup_jieba()
    
    def _setup_jieba(self):
        """配置jieba分词"""
        # 只在Linux/Unix系统上启用并行分词
        if platform.system() != 'Windows':
            try:
                jieba.enable_parallel(4)
            except Exception as e:
                print(f"并行分词不可用: {e}")
        jieba.initialize()
    
    def _load_stop_words(self):
        """加载停用词表"""
        stop_words_file = os.path.join(os.path.dirname(__file__), '停用词库.txt')
        if not os.path.exists(stop_words_file):
            raise FileNotFoundError(f"停用词文件不存在: {stop_words_file}")
        
        with open(stop_words_file, 'r', encoding='utf-8') as f:
            stop_words = {line.strip() for line in f}
        
        return stop_words
    
    def load_novel(self, file_path):
        """加载小说文件"""
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        encodings = ['utf-8', 'gbk', 'gb2312', 'gb18030']
        for encoding in encodings:
            try:
                with open(file_path, 'r', encoding=encoding) as f:
                    content = f.read()
                print(f"成功使用编码: {encoding}")
                return content
            except UnicodeDecodeError:
                continue
        raise ValueError("无法解码文件，尝试了多种编码格式")
    
    def preprocess_text(self, text):
        """预处理文本"""
        # 移除标点符号和特殊字符，保留中文和基本标点
        text = re.sub(r'[^\u4e00-\u9fa5，。！？；：“”‘’（）《》\s]', '', text)
        # 移除多余空白字符，保留换行符
        text = re.sub(r'\s+', ' ', text).strip()
        return text
    
    def segment_text(self, text):
        """对文本进行分词"""
        # 使用jieba进行精确模式分词
        return jieba.lcut(text)
    
    def filter_words(self, words):
        """过滤停用词和非中文字符"""
        chinese_pattern = re.compile(r'^[\u4e00-\u9fa5]+$')
        return [word for word in words if chinese_pattern.match(word) and word not in self.stop_words and len(word) >= 2]
    
    def categorize_words(self, words):
        """将词按照字数分类"""
        two_char, three_char, four_char = [], [], []
        for word in words:
            word_length = len(word)
            if word_length == 2:
                two_char.append(word)
            elif word_length == 3:
                three_char.append(word)
            elif word_length == 4:
                four_char.append(word)
        return two_char, three_char, four_char
    
    def get_top_words(self, words, top_n=20):
        """获取频率最高的词，排除并列次数太多的词"""
        if not words:
            return []
        word_count = Counter(words)
        sorted_words = sorted(word_count.items(), key=lambda x: (-x[1], x[0]))
        
        # 找出频率分布，排除频率超过20个词的情况
        freq_distribution = {}
        for word, count in sorted_words:
            if count not in freq_distribution:
                freq_distribution[count] = []
            freq_distribution[count].append(word)
        
        # 过滤掉频率对应的词数超过20的情况
        filtered_sorted = [word for word in sorted_words if len(freq_distribution[word[1]]) <= 20]
        
        # 取前20个
        return filtered_sorted[:top_n]
    
    def analyze_novel(self, file_path):
        """分析小说"""
        content = self.load_novel(file_path)
        cleaned_text = self.preprocess_text(content)
        words = self.segment_text(cleaned_text)
        filtered_words = self.filter_words(words)
        two_char, three_char, four_char = self.categorize_words(filtered_words)
        return {
            'two_char': self.get_top_words(two_char, 20),
            'three_char': self.get_top_words(three_char, 20),
            'four_char': self.get_top_words(four_char, 20)
        }
    
    def print_results(self, results):
        """打印结果"""
        print("\n" + "="*50)
        print("小说词组统计结果")
        print("="*50)
        
        # 2字词
        print("\n【2字词】频率前20:")
        print("-" * 30)
        self._print_words(results['two_char'])
        
        # 3字词
        print("\n【3字词】频率前20:")
        print("-" * 30)
        self._print_words(results['three_char'])
        
        # 4字词
        print("\n【4字词】频率前20:")
        print("-" * 30)
        self._print_words(results['four_char'])
    
    def _print_words(self, words):
        """辅助打印结果"""
        if words:
            for i, (word, count) in enumerate(words, 1):
                print(f"{i:2d}. {word}: {count}次")
        else:
            print("无结果")

def main():
    """主函数"""
    analyzer = NovelWordAnalyzer()
    
    print("中文小说词组统计工具")
    print("支持格式: TXT文件")
    print("=" * 40)
    
    while True:
        try:
            file_path = input("\n请输入小说文件路径(输入 'quit' 退出): ").strip()
            
            if file_path.lower() == 'quit':
                print("感谢使用！")
                break
            
            if not file_path:
                print("请输入有效的文件路径")
                continue
            
            # 分析小说
            results = analyzer.analyze_novel(file_path)
            
            # 显示结果
            analyzer.print_results(results)
            
        except FileNotFoundError as e:
            print(f"错误: {e}")
        except ValueError as e:
            print(f"错误: {e}")
        except Exception as e:
            print(f"发生未知错误: {str(e)}")

if __name__ == "__main__":
    main()
