import pandas as pd
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
from rf01_config import Config
from tqdm import tqdm  # 引入 tqdm 用于进度条

pd.set_option('display.expand_frame_repr', False)  # 避免宽表格换行
pd.set_option('display.max_columns', None)  # 确保所有列可见
def model_train():
    conf = Config()
    # 第一步：读取数据
    # 读取训练数据集
    df = pd.read_csv(conf.process_train_datapath)[:20000]  # 读取了20000行数据，实际要改为所有数据
    words = df["words"]
    labels = df["label"]
    # print(f"数据前五行为:{df.head(5)}")

    # 第二步：将文本转换为数值特征
    # 读取停用词文件
    stop_words = open(conf.stop_words_path, encoding="utf-8").read().split()
    tfidf = TfidfVectorizer(stop_words=stop_words)
    features = tfidf.fit_transform(words)

    # 查看特征,features是稀疏矩阵，稀疏矩阵的格式是 (row, column) value
    # print(f"特征:{features}",)
    # 特征维度:(20000, 34930)
    # print(f"特征维度:{features.shape}",)
    # print(f"特征名字:{list(tfidf.get_feature_names_out())}")
    # print(f"特征长度:{len(tfidf.get_feature_names_out())}")
    # print(tfidf.vocabulary_)
    # print(len(tfidf.vocabulary_))

    # 第三步：划分训练集和测试集，模型训练和模型预测评估
    # 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=22)

    # 训练随机森林模型
    model = RandomForestClassifier()  # 设置 verbose=1 以输出训练进度
    print("训练模型...")
    # 使用 tqdm 包装 model.fit 来显示进度条
    for _ in tqdm(range(1), desc="RandomForest模型训练进度...."):
        model.fit(x_train, y_train)

    # 模型预测并评估
    print("模型预测评估...")
    y_pred = model.predict(x_test)
    print("预测结果:", y_pred)
    print("准确率:", accuracy_score(y_test, y_pred))
    print("精确率 (micro):", precision_score(y_test, y_pred, average='micro'))
    print("召回率 (micro):", recall_score(y_test, y_pred, average='micro'))
    print("F1分数 (micro):", f1_score(y_test, y_pred, average='micro'))
    report = classification_report(y_test, y_pred)
    print(report)

    # 第四步：保存模型和向量化器
    print("保存模型和向量化器...")
    with open(conf.rf_model_save_path + '/rf_model_.pkl', 'wb') as f:
        pickle.dump(model, f)
    with open(conf.rf_model_save_path + '/tfidf_vectorizer_.pkl', 'wb') as f:
        pickle.dump(tfidf, f)

    print("模型和向量化器保存成功！")

if __name__ == '__main__':
    model_train()