# import re
#
# # 示例文本
# text = "这里有一百五十六个苹果，三千三百二十三亿八千六百四十五万六千七百八十九个橘子，以及abc123和XYZ。"
#
# # 正则表达式匹配数字和字母
# pattern = re.compile(r'[a-zA-Z0-9]+|零|一|二|三|四|五|六|七|八|九|十|百|千|万|亿')
#
# # 使用正则表达式替换文本中的数字和字母
# cleaned_text = re.sub(pattern, '', text)
#
# # 打印处理后的文本
# print(cleaned_text)

# import os
# import random
# import shutil

# path='../THUCNews/'
#
# cate_list = os.listdir(path)  # 获取未分词语料库中所有类别
#
# for mydir in cate_list:
#     train_dir = './train_corpus/' + mydir + "/"  # 拼出训练集存贮的对应目录路径如：train_corpus/体育/
#     if not os.path.exists(train_dir):  # 是否存在分词目录，如果没有则创建该目录
#         os.makedirs(train_dir)
#
#     test_dir = './test_corpus/' + mydir+'/' # 拼出测试存贮的对应目录路径如：test_corpus/体育/
#     if not os.path.exists(test_dir):
#         os.makedirs(test_dir)
#
#     class_path = path+mydir+'/' # 生成当前类别的目录 如 data/体育/
#
#     file_list = os.listdir(class_path) # 生成分类下文本名称列表
#     length = len(file_list)
#     print(mydir,length)

# from Tools import readbunchobj
# from joblib import dump
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.preprocessing import StandardScaler
# import numpy as np
# from sklearn import metrics
#
# # 导入训练集
# trainpath = "train_word_bag/tf_idf_space.dat"
# train_set = readbunchobj(trainpath)
#
# # 导入测试集
# testpath = "test_word_bag/test_tf_idf_space.dat"
# test_set = readbunchobj(testpath)
#
# # 对数据进行标准化或归一化
# scaler = StandardScaler(with_mean=False)
# train_set.tdm = scaler.fit_transform(train_set.tdm)
# test_set.tdm = scaler.transform(test_set.tdm)
#
# # KNN K个最近邻居
# k = 5  # 设定初始K值为5
# print("开始训练KNN模型...")
# KNN_clf = KNeighborsClassifier(n_neighbors=k, n_jobs=-1).fit(train_set.tdm, train_set.label)  # kNN模型
# print("KNN模型训练完成。")
#
# dump(KNN_clf, 'KNN_model.joblib')
#
# print("USE: KNN模型 预测分类结果")
# KNN_predicted = KNN_clf.predict(test_set.tdm)
# KNN_total = len(KNN_predicted)
#
# # 计算错误率、分类精度
# def metrics_result(actual, predict, total):
#     error_count = np.sum(predict != actual)
#     error_rate = float(error_count) * 100 / float(total)
#     print("模型错误率:", error_rate, "%")
#
#     print('精度:{0:.3f}'.format(metrics.precision_score(actual, predict, average='weighted')))
#     print('召回:{0:.3f}'.format(metrics.recall_score(actual, predict, average='weighted')))
#     print('f1-score:{0:.3f}'.format(metrics.f1_score(actual, predict, average='weighted')))
#
# print("\nKNN模型：")
# metrics_result(test_set.label, KNN_predicted, KNN_total)
#
# import os
# import jieba
# import re
# from Tools import savefile, readfile
# import random
#
# # 假设有一个停用词列表文件stopwords.txt，其中每行包含一个停用词
# def load_stopwords(stopwords_file):
#     with open(stopwords_file, 'r', encoding='utf-8') as f:
#         stopwords = set([line.strip() for line in f])
#     return stopwords
#
# def corpus_segment(corpus_path, seg_path, stopwords_file):
#     stopwords = load_stopwords(stopwords_file)  # 加载停用词列表
#     catelist = os.listdir(corpus_path)  # 获取corpus_path下的所有子目录
#
#     for mydir in catelist:
#         class_path = corpus_path + mydir + "/"  # 拼出分类子目录的路径如：train_corpus/体育/
#         seg_dir = seg_path + mydir + "/"  # 拼出分类分词目录的路径如：train_seg/体育/
#         if not os.path.exists(seg_dir):  # 是否存在分词目录
#             os.makedirs(seg_dir)  # 如果没有则创建该目录
#
#         file_list = os.listdir(class_path)  # 获取未分词语料库中某一类别中的所有文本
#         random.shuffle(file_list)  # 将文件列表随机打乱
#         selected_files = file_list[:100]  # 选取前100个文件进行处理
#
#         for file_path in selected_files:  # 遍历类别目录下的所有文件
#             fullname = class_path + file_path  # 拼出文件名全路径如：train_corpus/art/21.txt
#             content = readfile(fullname)  # 读取文件内容
#
#             # 将字节字符串转换为普通字符串
#             content = content.decode('utf-8')
#
#             # 使用正则表达式去除空白字符
#             content = re.sub(r'\s+', '', content)
#
#             # 删除数字和英文字符
#             filter_pattern = re.compile(r'[a-zA-Z0-9]+')
#             content = re.sub(filter_pattern, '', content)
#
#             # 进行分词
#             content_seg = jieba.cut(content)
#
#             # 去停用词
#             content_seg = [word for word in content_seg if word not in stopwords]
#
#             # 将分词结果转换为字符串
#             content_seg_str = ' '.join(content_seg)
#
#             # 将分词结果保存到文件
#             savefile(seg_dir + file_path, content_seg_str.encode('utf-8'))
#
# if __name__ == "__main__":
#     corpus_path = "./train_corpus/"  # 需要分词的语料库路径
#     seg_path = "./train_corpus_seg_test/"  # 分词之后的语料库储存路径
#     stopwords_file = "./stopwords.txt"  # 停用词文件路径
#     corpus_segment(corpus_path, seg_path, stopwords_file)
#     print("训练语料分词结束！！！")
#
#     corpus_path = "./test_corpus/"  # 需要分词的语料库路径
#     seg_path = "./test_corpus_seg_test/"  # 分词之后的语料库储存路径
#     corpus_segment(corpus_path, seg_path, stopwords_file)
#     print("测试语料分词结束！！！")
#
# import joblib
# from sklearn.feature_extraction.text import TfidfVectorizer
# import jieba
# from thop.fx_profile import null_print
#
# # 加载保存的模型和TfidfVectorizer
# # log_clf = joblib.load('LogisticRegression_model.joblib')
# bayes_clf = joblib.load('Bayes_model.joblib')
# # # rf_clf = joblib.load('RandomForest_model.joblib')
# # lgb_clf = joblib.load('LightGBM_model.joblib')
# # el_clf = joblib.load('EL_model_cv.joblib')
#
# vectorizer = joblib.load('vectorizer.joblib')
#
# def bayes_predict_text(text):
#     # 使用保存的vectorizer转换新文本
#     tfidf = vectorizer.transform([text])
#     # 使用加载的模型进行预测
#     predicted = bayes_clf.predict(tfidf)
#     return predicted[0]
#
# # 从命令行读取用户输入的文本
# if __name__ == '__main__':
#
#     with open('stopwords.txt', encoding='utf-8') as f:
#         con = f.readlines()
#         stop_words = set()
#         for i in con:
#             i = i.replace("\n", "")
#             stop_words.add(i)
#
#     # print(stop_words)
#     # print(len(stop_words))
#
#     while True:
#
#         print("\n请输入新闻（输入'exit'退出）:")
#         user_input = input()
#
#         if user_input.lower() == 'exit':
#             break
#         elif user_input == '':  # 检查是否为空字符串
#             print("请重新输入")
#             continue
#
#         # 输入处理
#         user_input = user_input.replace('\r\n', '').strip()
#         user_input = user_input.replace(' ', '').strip()
#
#         result = []
#         for word in jieba.lcut(user_input):
#             if word not in stop_words:
#                 result.append(word)
#
#         # 将分词结果列表转换为单个字符串
#         text = ' '.join(result)
#
#         # print(text)
#
#         bayes_predicted_label = bayes_predict_text(text)
#         # log_predicted_label = log_predict_text(text)
#         # # rf_predicted_label = log_predict_text(text)
#         # lgb_predicted_label = log_predict_text(text)
#         # el_predicted_label = log_predict_text(text)
#
#         # 输出预测结果
#         print(f"Bayes模型         预测的类别标签：{bayes_predicted_label}")
#         # print(f"Log模型           预测的类别标签：{log_predicted_label}")
#         # # print(f"RandomForest模型  预测的类别标签：{rf_predicted_label}")
#         # print(f"LightGBM模型      预测的类别标签：{lgb_predicted_label}")
#         # print(f"EL模型            预测的类别标签：{el_predicted_label}")


import numpy as np
from Tools import metrics_result
from Tools import readbunchobj

from joblib import dump
from sklearn.preprocessing import StandardScaler
# 在文件顶部导入LGBMClassifier
from lightgbm import LGBMClassifier

# 导入训练集
trainpath = "train_word_bag/tf_idf_space.dat"
train_set = readbunchobj(trainpath)

# 导入测试集
testpath = "test_word_bag/test_tf_idf_space.dat"
test_set = readbunchobj(testpath)

# 输出单词矩阵的类型
print("标准化/归一化前的矩阵形状:")
print(np.shape(train_set.tdm))
print(np.shape(test_set.tdm))

# 对数据进行标准化或归一化
scaler = StandardScaler(with_mean=False)
train_set.tdm = scaler.fit_transform(train_set.tdm)
test_set.tdm = scaler.transform(test_set.tdm)


# LightGBM
print("\n开始训练LightGBM模型...")
lgbm_clf = LGBMClassifier(verbosity=-1, n_estimators=1000, learning_rate=0.1, n_jobs=-1, early_stopping_rounds = 50)

lgbm_clf.fit(train_set.tdm, train_set.label)
print("LightGBM模型训练完成。")

dump(lgbm_clf, 'LightGBM_model.joblib')

print("USE: LightGBM模型 预测分类结果")
lgbm_predicted = lgbm_clf.predict(test_set.tdm)
lgbm_total = len(lgbm_predicted)
print("结束")

print("\nLightGBM模型：")
metrics_result(test_set.label, lgbm_predicted, lgbm_total)
