import nltk
# nltk.download('stopwords')  # VPN：下载过了
import pymysql
import os
import copy

os.environ["TOKENIZERS_PARALLELISM"] = "false"
from bertopic import BERTopic


def add_stopwords(stopwordList=None):
    if stopwordList is None:
        stopwordList = []
    stopwords.extend(stopwordList)


def get_batch_data(date):
    instances = get_id_doc_1(date)
    doc_ids = [instance[0] for instance in instances]
    doc_ctns = [instance[1] for instance in instances]
    return doc_ids, doc_ctns


def get_id_doc_1(date):
    """
    返回list(tuple)
    """
    # Open database connection
    db = pymysql.connect(
        host='118.89.122.209',  # 连接主机, 默认127.0.0.1
        user='select_user',  # 用户名
        passwd='1234',  # 密码：本地mysql的数据
        port=3306,  # 端口，默认为3306
        db='poav_db',  # 数据库名称
        charset='utf8',  # 字符编码

    )
    # prepare a cursor object using cursor() method
    cursor = db.cursor()
    sql = f"""SELECT documentID,document,`date`
                        FROM user2doc
                        WHERE date = '{date}';"""
    try:
        # Execute the SQL command
        cursor.execute(sql)
        return list(cursor.fetchall())
    except Exception as e:
        db.rollback()


def date_topic_(stopword_list=None, date=None):
    topic_secondL = BERTopic.load("./static/resource/models/topic_9")
    doc_ids, doc_ctns = get_batch_data(date)  # =====================
    add_stopwords(stopword_list)
    topic_secondL.partial_fit(doc_ctns)
    # topic_pre[0][Document]/[Topic]/[Name]
    topic_pre = topic_secondL.get_document_info(doc_ctns)
    topic_secondL.save("./static/resource/models/topic_9")
    topic_topL = BERTopic.reduce_topics(copy.deepcopy(topic_secondL), doc_ctns, 9)
    # topic_aft[0][Document]/[Topic]/[Name]
    topic_aft = topic_secondL.get_document_info(doc_ctns)
    list = []
    for i in range(len(doc_ids)):
        new_data = {
            "id": i,
            "docID ": doc_ids[i],
            "topic_id_pre": topic_pre['Topic'][i],
            "topic_name_pre": topic_pre['Name'][i],
            "topic_id_aft": topic_aft['Topic'][i],
            "topic_name_aft": topic_aft['Name'][i],
        }
        list.append(new_data)
    return list


if __name__ == "__main__":
    stopwords = nltk.corpus.stopwords.words('english')  # 加载停用词
    # topic_secondL, topic_topL, doc_ids, doc_ctns = train()

    list = date_topic_(date="2023-04-01")
    print(list)
