import os
import re
import jieba
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Tuple, Optional
import logging
import glob

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class NovelDatasetLoader:
    """小说数据集加载器"""
    
    def __init__(self, base_path: str):
        self.base_path = base_path
        self.categories = []
        self.data = []
        self.labels = []
    
    def discover_categories(self) -> List[str]:
        """发现分类文件夹"""
        categories = []
        for item in os.listdir(self.base_path):
            item_path = os.path.join(self.base_path, item)
            if os.path.isdir(item_path):
                categories.append(item)
        self.categories = categories
        logger.info(f"发现 {len(categories)} 个分类: {categories}")
        return categories
    
    def load_novel_files(self, max_files_per_category: int = 100, sample_size: int = 50000) -> Tuple[List[str], List[str]]:
        """
        加载小说文件
        
        Args:
            max_files_per_category: 每个类别最大文件数
            sample_size: 每个文件采样字符数
        """
        self.data = []
        self.labels = []
        
        for category in self.categories:
            category_path = os.path.join(self.base_path, category)
            logger.info(f"加载分类 '{category}' 的文件...")
            
            # 查找所有txt文件
            txt_files = glob.glob(os.path.join(category_path, "**/*.txt"), recursive=True)
            logger.info(f"在 '{category}' 中找到 {len(txt_files)} 个txt文件")
            
            # 限制每个类别的文件数量
            files_to_process = txt_files[:max_files_per_category]
            
            for file_path in files_to_process:
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        content = f.read()
                    
                    # 如果文件太大，进行采样
                    if len(content) > sample_size:
                        # 从文件的不同位置采样，避免只取开头
                        start_pos = max(0, len(content) // 3)  # 从1/3处开始采样
                        content = content[start_pos:start_pos + sample_size]
                    
                    self.data.append(content)
                    self.labels.append(category)
                    
                except Exception as e:
                    logger.warning(f"读取文件 {file_path} 时出错: {e}")
                    continue
        
        logger.info(f"总共加载 {len(self.data)} 个样本")
        return self.data, self.labels
    
    def get_category_distribution(self) -> Dict[str, int]:
        """获取类别分布"""
        return dict(Counter(self.labels))
    
    def save_dataset_info(self, output_file: str = "dataset_info.csv"):
        """保存数据集信息"""
        df = pd.DataFrame({
            'text': self.data,
            'label': self.labels
        })
        df.to_csv(output_file, index=False, encoding='utf-8')
        logger.info(f"数据集信息已保存到 {output_file}")
        
        # 统计信息
        dist = self.get_category_distribution()
        logger.info("类别分布:")
        for category, count in dist.items():
            logger.info(f"  {category}: {count} 个样本")

class EnhancedNovelClassifier:
    """增强版小说分类器"""
    
    def __init__(self):
        self.vectorizer = TfidfVectorizer(
            max_features=2000, 
            stop_words=self.get_stop_words(),
            ngram_range=(1, 2)  # 包含1-gram和2-gram
        )
        self.classifiers = {
            'naive_bayes': MultinomialNB(),
            'random_forest': RandomForestClassifier(n_estimators=100, random_state=42),
            'svm': SVC(kernel='linear', probability=True, random_state=42)
        }
        self.selected_classifier = 'naive_bayes'
        self.is_trained = False
        self.category_mapping = {}
        self.reverse_category_mapping = {}
        
    def get_stop_words(self) -> List[str]:
        """获取中文停用词列表"""
        # 基础停用词
        stop_words = [
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', 
            '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', 
            '自己', '这', '那', '他', '她', '它', '我们', '你们', '他们', '这个', '那个'
        ]
        return stop_words
    
    def preprocess_text(self, text: str) -> str:
        """文本预处理"""
        # 去除特殊字符和数字
        text = re.sub(r'[^\u4e00-\u9fa5]', ' ', text)
        # 分词
        words = jieba.cut(text)
        # 过滤停用词和短词
        filtered_words = [word for word in words if len(word) > 1 and word not in self.get_stop_words()]
        return ' '.join(filtered_words)
    
    def set_categories(self, categories: List[str]):
        """设置类别映射"""
        self.category_mapping = {cat: idx for idx, cat in enumerate(categories)}
        self.reverse_category_mapping = {idx: cat for cat, idx in self.category_mapping.items()}
    
    def train(self, texts: List[str], labels: List[str], test_size: float = 0.2) -> Dict:
        """训练分类器"""
        logger.info("开始训练分类器...")
        
        # 设置类别
        unique_labels = list(set(labels))
        self.set_categories(unique_labels)
        
        # 文本预处理
        logger.info("文本预处理中...")
        processed_texts = [self.preprocess_text(text) for text in texts]
        
        # 特征提取
        logger.info("特征提取中...")
        X = self.vectorizer.fit_transform(processed_texts)
        y = np.array([self.category_mapping[label] for label in labels])
        
        # 分割训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42, stratify=y)
        
        # 评估不同分类器
        best_score = 0
        best_classifier = None
        
        logger.info("评估不同分类器...")
        for name, clf in self.classifiers.items():
            logger.info(f"训练 {name}...")
            clf.fit(X_train, y_train)
            score = clf.score(X_test, y_test)
            logger.info(f"{name} 准确率: {score:.4f}")
            
            if score > best_score:
                best_score = score
                best_classifier = name
                self.selected_classifier = name
        
        logger.info(f"选择最佳分类器: {best_classifier} (准确率: {best_score:.4f})")
        
        # 交叉验证
        final_clf = self.classifiers[best_classifier]
        cv_scores = cross_val_score(final_clf, X, y, cv=5)
        logger.info(f"交叉验证平均准确率: {cv_scores.mean():.4f} (+/- {cv_scores.std() * 2:.4f})")
        
        # 最终训练
        final_clf.fit(X, y)
        self.is_trained = True
        
        # 生成训练报告
        report = {
            'best_classifier': best_classifier,
            'best_score': best_score,
            'cv_scores': cv_scores,
            'feature_count': X.shape[1],
            'sample_count': len(texts),
            'category_count': len(unique_labels)
        }
        
        logger.info("分类器训练完成")
        return report
    
    def predict(self, text: str) -> Tuple[str, Dict[str, float]]:
        """预测文本分类"""
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        processed_text = self.preprocess_text(text)
        X = self.vectorizer.transform([processed_text])
        
        clf = self.classifiers[self.selected_classifier]
        probabilities = clf.predict_proba(X)[0]
        
        # 转换为类别概率
        category_probs = {}
        for idx, prob in enumerate(probabilities):
            category = self.reverse_category_mapping[idx]
            category_probs[category] = prob
        
        predicted_idx = np.argmax(probabilities)
        predicted_category = self.reverse_category_mapping[predicted_idx]
        
        return predicted_category, category_probs
    
    def evaluate(self, texts: List[str], labels: List[str]) -> Dict:
        """评估模型性能"""
        if not self.is_trained:
            raise ValueError("分类器尚未训练")
        
        y_true = [self.category_mapping[label] for label in labels]
        y_pred = []
        
        for text in texts:
            predicted_category, _ = self.predict(text)
            y_pred.append(self.category_mapping[predicted_category])
        
        accuracy = accuracy_score(y_true, y_pred)
        report = classification_report(y_true, y_pred, target_names=list(self.category_mapping.keys()))
        
        return {
            'accuracy': accuracy,
            'report': report,
            'confusion_matrix': confusion_matrix(y_true, y_pred)
        }
    
    def save_model(self, model_path: str) -> None:
        """保存模型"""
        model_data = {
            'vectorizer': self.vectorizer,
            'classifiers': self.classifiers,
            'selected_classifier': self.selected_classifier,
            'is_trained': self.is_trained,
            'category_mapping': self.category_mapping,
            'reverse_category_mapping': self.reverse_category_mapping
        }
        
        with open(model_path, 'wb') as f:
            pickle.dump(model_data, f)
        logger.info(f"模型已保存到 {model_path}")
    
    def load_model(self, model_path: str) -> None:
        """加载模型"""
        with open(model_path, 'rb') as f:
            model_data = pickle.load(f)
        
        self.vectorizer = model_data['vectorizer']
        self.classifiers = model_data['classifiers']
        self.selected_classifier = model_data['selected_classifier']
        self.is_trained = model_data['is_trained']
        self.category_mapping = model_data['category_mapping']
        self.reverse_category_mapping = model_data['reverse_category_mapping']
        logger.info(f"模型已从 {model_path} 加载")

def plot_training_results(report: Dict, output_file: str = "training_results.png"):
    """绘制训练结果"""
    fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=(15, 6))
    
    # 准确率比较
    classifiers = list(report.get('classifier_scores', {}).keys())
    scores = list(report.get('classifier_scores', {}).values())
    
    ax1.bar(classifiers, scores)
    ax1.set_title('不同分类器准确率比较')
    ax1.set_ylabel('准确率')
    ax1.set_ylim(0, 1)
    
    # 添加数值标签
    for i, v in enumerate(scores):
        ax1.text(i, v + 0.01, f'{v:.4f}', ha='center')
    
    # 交叉验证结果
    cv_scores = report.get('cv_scores', [])
    ax2.boxplot(cv_scores)
    ax2.set_title('交叉验证准确率分布')
    ax2.set_ylabel('准确率')
    
    plt.tight_layout()
    plt.savefig(output_file)
    plt.show()

def train_from_folders():
    """从文件夹训练模型的主函数"""
    
    # 配置参数
    BASE_PATH = input("请输入分类小说文件夹的根路径: ").strip()
    if not os.path.exists(BASE_PATH):
        print("路径不存在，请检查输入")
        return
    
    # 加载数据
    loader = NovelDatasetLoader(BASE_PATH)
    categories = loader.discover_categories()
    
    if not categories:
        print("未找到任何分类文件夹")
        return
    
    # 加载小说文件
    texts, labels = loader.load_novel_files(
        max_files_per_category=100,  # 可根据需要调整
        sample_size=50000            # 可根据需要调整
    )
    
    if not texts:
        print("未找到可用的训练数据")
        return
    
    # 显示数据分布
    distribution = loader.get_category_distribution()
    print("\n数据分布:")
    for category, count in distribution.items():
        print(f"  {category}: {count}")
    
    # 保存数据集信息
    loader.save_dataset_info("novel_dataset.csv")
    
    # 训练模型
    classifier = EnhancedNovelClassifier()
    training_report = classifier.train(texts, labels)
    
    # 显示训练结果
    print(f"\n训练完成!")
    print(f"最佳分类器: {training_report['best_classifier']}")
    print(f"测试准确率: {training_report['best_score']:.4f}")
    print(f"交叉验证平均准确率: {training_report['cv_scores'].mean():.4f}")
    
    # 保存模型
    model_path = "trained_novel_classifier.pkl"
    classifier.save_model(model_path)
    
    # 评估模型
    print("\n模型评估:")
    # 使用训练数据的一部分进行评估
    eval_texts = texts[:min(50, len(texts))]
    eval_labels = labels[:min(50, len(labels))]
    evaluation = classifier.evaluate(eval_texts, eval_labels)
    print(f"评估准确率: {evaluation['accuracy']:.4f}")
    print("\n分类报告:")
    print(evaluation['report'])
    
    return classifier, training_report

def predict_single_novel(classifier: EnhancedNovelClassifier):
    """使用训练好的模型预测单个小说"""
    file_path = input("请输入要分类的小说文件路径: ").strip()
    
    if not os.path.exists(file_path):
        print("文件不存在")
        return
    
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 采样部分内容
        sample_size = min(50000, len(content))
        sample_text = content[:sample_size]
        
        predicted_category, probabilities = classifier.predict(sample_text)
        
        print(f"\n分类结果: {predicted_category}")
        print("\n所有类别概率:")
        for category, prob in sorted(probabilities.items(), key=lambda x: x[1], reverse=True):
            print(f"  {category}: {prob:.4f}")
            
    except Exception as e:
        print(f"处理文件时出错: {e}")

def main():
    """主界面"""
    classifier = None
    
    while True:
        print("\n" + "="*50)
        print("小说分类模型训练系统")
        print("="*50)
        print("1. 从文件夹训练新模型")
        print("2. 加载现有模型")
        print("3. 分类单个小说")
        print("4. 退出")
        
        choice = input("请选择操作 (1-4): ").strip()
        
        if choice == '1':
            classifier, report = train_from_folders()
            
        elif choice == '2':
            model_path = input("请输入模型文件路径 (默认: trained_novel_classifier.pkl): ").strip()
            if not model_path:
                model_path = "trained_novel_classifier.pkl"
            
            if os.path.exists(model_path):
                classifier = EnhancedNovelClassifier()
                classifier.load_model(model_path)
                print("模型加载成功!")
            else:
                print("模型文件不存在")
                
        elif choice == '3':
            if classifier is None:
                print("请先训练或加载模型")
            else:
                predict_single_novel(classifier)
                
        elif choice == '4':
            print("再见!")
            break
            
        else:
            print("无效选择")

if __name__ == "__main__":
    # 检查依赖
    try:
        import jieba
        import sklearn
        import pandas
    except ImportError as e:
        print(f"缺少必要的库: {e}")
        print("请运行: pip install jieba scikit-learn pandas matplotlib seaborn")
        exit(1)
    
    main()