# coding=utf-8

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split

from bot.openai_bot import OpenAIBot
from conf.config import logger


def get_embedding(text_list):
    openai_bot = OpenAIBot()
    vectors = openai_bot.embeddings(text_list=text_list)
    return vectors


def main():
    # # 步骤1：批量向量化数据
    # df = pd.read_csv(r"C:\Users\Administrator\Desktop\baike2.csv")
    # rows = len(df)
    # batch_size = 100
    # if rows % batch_size == 0:
    #     num_batches = rows // batch_size
    # else:
    #     num_batches = rows // batch_size + 1
    # title_vectors = []
    # for i in range(num_batches):
    #     start = i * batch_size
    #     end = min((i + 1) * batch_size, rows)
    #     text_list = df.title[start:end].tolist()
    #     logger.info(f"{start}, {end}")
    #     vectors = get_embedding(text_list)
    #     logger.info(f"{vectors}")
    #     title_vectors.extend(vectors)
    #
    # df['title_vector'] = title_vectors
    # df.to_csv(r"C:\Users\Administrator\Desktop\baike_embedded2.csv", index=False)

    # 步骤2：加载向量化数据，并训练
    df = pd.read_csv(r"C:\Users\Administrator\Desktop\baike_embedded2.csv")
    df['title_vector'] = df.title_vector.apply(eval).apply(np.array)

    # split data into train and test
    x_train, x_test, y_train, y_test = train_test_split(
        list(df.title_vector.values), df.type_id, test_size=0.2, random_state=42
    )
    # # 训练集各个类别的样本数量
    # logger.info(f"\n{y_train.value_counts()}")
    # # 测试集各个类别的样本数量
    # logger.info(f"\n{y_test.value_counts()}")

    # train random forest classifier
    clf = RandomForestClassifier(n_estimators=500)
    clf.fit(x_train, y_train)
    y_predict = clf.predict(x_test)

    report = classification_report(y_test, y_predict)
    logger.info(f"\n{report}")


if __name__ == '__main__':
    # main()
    pass
