import os

from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, \
    confusion_matrix
import pickle
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from config import conf
from common import get_id2label
import pandas as pd
import jieba.posseg as psg
import jieba
from common import ensure_pdir_exist

pd.set_option('display.expand_frame_repr', False)  # 避免宽表格换行
pd.set_option('display.max_columns', None)  # 确保所有列可见


def train_validate_rf():
    stop_words = open(conf.stop_words_path, 'r', encoding='utf-8').read().split()
    transformer = TfidfVectorizer(stop_words=stop_words)
    data = pd.read_csv(conf.rf_config.preprocessed_train_path, sep='\t')
    # 打印text_cut为nan的行
    # print(data[data['text_cut'].isnull()])
    # data.info()
    x = transformer.fit_transform(data['text_cut'])
    print(x)
    print(x.shape)
    print(transformer.__dict__.keys())
    print(transformer.get_feature_names_out())
    print(len(transformer.get_feature_names_out()))
    print(len(transformer.vocabulary_))
    # print(transformer.vocabulary_)
    ensure_pdir_exist(conf.rf_config.tfidf_path)
    pickle.dump(transformer, open(conf.rf_config.tfidf_path, 'wb'))

    y = data['label']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
    model = RandomForestClassifier(n_jobs=-1)
    model.fit(x_train, y_train)
    validate(model, x_test, y_test)
    ensure_pdir_exist(conf.rf_config.rf_model_path)
    pickle.dump(model, open(conf.rf_config.rf_model_path, 'wb'))


def preprocess(path, target_path):
    data = pd.read_csv(path, sep='\t', names=['text', 'label'])
    data['text_cut'] = data['text'].apply(cut)
    ensure_pdir_exist(target_path)
    data.to_csv(target_path, sep='\t')


def validate(model, x_test, y_test):
    y_pred = model.predict(x_test)

    acc_score = accuracy_score(y_test, y_pred)
    pre_score = precision_score(y_test, y_pred, average='micro')
    rec_score = recall_score(y_test, y_pred, average='micro')
    f1_s = f1_score(y_test, y_pred, average='micro')
    c_report = classification_report(y_test, y_pred)
    cm = confusion_matrix(y_test, y_pred)
    print(f'准确率：{acc_score}')
    print(f'精确率：{pre_score}')
    print(f'召回率：{rec_score}')
    print(f'F1-score：{f1_s}')
    print(f'分类报告：{c_report}')
    print(f'混淆矩阵：\n{cm}')


_transformer = None
_model = None


def get_transformer():
    global _transformer
    if _transformer is None:
        _transformer = pickle.load(open(conf.rf_config.tfidf_path, 'rb'))
    return _transformer


def get_model():
    global _model
    if _model is None:
        _model = pickle.load(open(conf.rf_config.rf_model_path, 'rb'))
    return _model


def validate_after_train():
    data = pd.read_csv(conf.rf_config.preprocessed_train_path, sep='\t')
    x = get_transformer().transform(data['text_cut'])
    y = data['label']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
    model = get_model()
    validate(model, x_test, y_test)
    """
准确率：0.8275833333333333
精确率：0.8275833333333333
召回率：0.8275833333333333
F1-score：0.8275833333333333
分类报告：              precision    recall  f1-score   support

           0       0.87      0.83      0.85      3639
           1       0.89      0.86      0.88      3533
           2       0.75      0.79      0.77      3581
           3       0.89      0.90      0.90      3583
           4       0.84      0.75      0.79      3565
           5       0.82      0.78      0.80      3668
           6       0.79      0.82      0.81      3651
           7       0.92      0.87      0.90      3634
           8       0.88      0.83      0.85      3586
           9       0.68      0.84      0.75      3560

    accuracy                           0.83     36000
   macro avg       0.83      0.83      0.83     36000
weighted avg       0.83      0.83      0.83     36000

混淆矩阵：
[[3008   44  323   18   29   35   58   15   22   87]
 [  47 3056  118   27   29   46   40   20   35  115]
 [ 243   85 2815   27   89   19  148   19   32  104]
 [  11   18   24 3239   24   80   64   16   23   84]
 [  55   35  225   52 2679   78  102   32  140  167]
 [  25   78   29  114   66 2873  141   19   23  300]
 [  40   40  109   56   58  154 2987   30   32  145]
 [  10   17   20   17   15   56   72 3175   29  223]
 [  18   23   52   33  172   27   66   53 2965  177]
 [  20   35   28   40   39  150   91   77   84 2996]]
    """


def predict(text):
    transformer = get_transformer()
    model = get_model()
    id2label = get_id2label()
    x = transformer.transform([cut(text)])
    return id2label[model.predict(x)[0]]


def cut(text, filter_by_pos=False):
    """
    对文本分词
    :param text: 文本
    :param filter_by_pos: 是否按词性过滤词
    :return: 空格分隔的字符串
    """
    if filter_by_pos:
        words = []
        allow_pos = ['n', 'nr', 'ns', 'nt', 'v', 'a']
        for word, pos in psg.cut(text):
            if pos in allow_pos:
                words.append(word)
    else:
        words = jieba.lcut(text)
    return ' '.join(words)


def _test_predict():
    text = '名师详解考研复试英语听力备考策略'
    print(f'text={text} result={predict(text)}')
    text = '四六级考前阅读冲刺：如何发挥正常水平'
    print(f'text={text} result={predict(text)}')
    text = '广东雷州男子嗜水如命日饮百斤'
    print(f'text={text} result={predict(text)}')


if __name__ == '__main__':
    preprocess(conf.train_path, conf.rf_config.preprocessed_train_path)
    preprocess(conf.test_path, conf.rf_config.preprocessed_test_path)
    preprocess(conf.dev_path, conf.rf_config.preprocessed_dev_path)
    train_validate_rf()
    _test_predict()
    validate_after_train()
    pass
