
import os
import jieba
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
from sklearn.preprocessing import LabelEncoder
import joblib
import re
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import TruncatedSVD

# 文本预处理类
class AdvancedTextPreprocessor:
    def __init__(self, custom_dict_path=None):
        # 加载自定义词典（如果有）
        if custom_dict_path and os.path.exists(custom_dict_path):
            jieba.load_userdict(custom_dict_path)
        
        # 中文停用词表
        self.stopwords = self.load_stopwords()
        
    def load_stopwords(self):
        """加载停用词表"""
        stopwords = set()
        # 基础停用词
        base_stopwords = {'的', '了', '在', '是', '我', '有', '和', '就', 
                         '不', '人', '都', '一', '一个', '上', '也', '很', 
                         '到', '说', '要', '去', '你', '会', '着', '没有',
                         '看', '好', '自己', '这', '那', '他', '她', '它'}
        stopwords.update(base_stopwords)
        
        # 可以从文件加载更多停用词
        try:
            with open('chinese_stopwords.txt', 'r', encoding='utf-8') as f:
                stopwords.update([line.strip() for line in f])
        except:
            pass
            
        return stopwords
    
    def clean_text(self, text):
        """文本清洗"""
        # 去除特殊字符和数字
        text = re.sub(r'[^\u4e00-\u9fa5]', ' ', text)
        # 去除多余空白
        text = re.sub(r'\s+', ' ', text)
        return text.strip()
    
    def advanced_cut(self, text, use_pos=False):
        """高级分词"""
        # 使用精确模式分词
        words = jieba.lcut(text)
        # 过滤停用词和单字
        words = [word for word in words if len(word) > 1 and word not in self.stopwords]
        return words
    
    def extract_features(self, text):
        """提取文本特征"""
        words = self.advanced_cut(text)
        return ' '.join(words)