from mylib_db import *
from mylib_utils import *
import re
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
import jieba
from jieba import analyse

sp_db_table = "biz_article"

# # 更新头部信息
# update_str = 'BatchId="%s", ValueUnit="%s", ProjectName="%s"' % \
#                  (TASK_ID, value_unit, project_name)
# print("更新文件头Head信息： %s" % (update_str))
# update_db(conf['DbSaveHeadTable'], update_str, query_conditon)


def getArticelText(data_key):
    # 先查询是否已经存在数据库中
    query_conditon = 'data_key="%s" ORDER BY create_time DESC' % data_key
    # print("query_conditon: %s" % query_conditon)
    ret = select_db(sp_db_table, query_conditon)
    # print(ret, len(ret))
    if len(ret) > 0:
        ret = dbQueryResult2JsonObj(ret, sp_db_table)
        return ret[0]
    else:
    	return ""


def remove_html_tags(text):
    clean_text = re.sub('<.*?>', ' ', text)  # 使用正则表达式去除HTML标签
    clean_text = re.sub('\s+', ' ', clean_text)  # 去除多余的空白字符
    return clean_text


def remove_provided_words(text, provided_words):
	for word in provided_words:
		text = text.replace(word, ' ')
	return text


def preprocess_text(text, stopwords, meaningless_words):
    # 去除停用词和无意义词
    words = text.split()
    filtered_words = [word for word in words if word not in stopwords and word not in meaningless_words]
    processed_text = ' '.join(filtered_words)
    return processed_text    


def prreprocess_stage(html_text):
    # 去除HTML标签并抽取文本
    clean_text = remove_html_tags(html_text)
    provided_words = ['内容资源由项目单位提供']
    clean_text = remove_provided_words(clean_text, provided_words)
    print(clean_text)

    # 加载停用词库
    stopwords_file = 'stopwords-zh-all.txt'  # 停用词库文件路径
    analyse.set_stop_words(stopwords_file)

    # 使用jieba分词进行中文文本处理
    seg_list = jieba.cut(clean_text)
    processed_text = ' '.join(seg_list)
    print(processed_text)

    # 停用词和无意义词列表
    stopwords = set(ENGLISH_STOP_WORDS)  # 使用sklearn自带的英文停用词
    meaningless_words = ['示例']  # 用户指定的无意义词

    # 预处理文本
    preprocessed_html_text = preprocess_text(processed_text, stopwords, meaningless_words)

    return preprocessed_html_text


def lda_algorithm(html_text, n_topics=1):
    # 预处理文本
    preprocessed_html_text = prreprocess_stage(html_text)
    print("输入文本：")
    print(preprocessed_html_text)
    
    #############################
    # 创建词袋模型
    vectorizer = CountVectorizer()
    corpus_vectorized = vectorizer.fit_transform([preprocessed_html_text])

    # 将词袋向量转换为词袋表示
    bow_corpus = corpus_vectorized.toarray()

    # 训练LDA模型
    lda_model = LatentDirichletAllocation(n_components=n_topics, random_state=42)
    lda_model.fit(bow_corpus)

    # 输出每个主题的词语分布
    feature_names = vectorizer.get_feature_names()
    for idx, topic in enumerate(lda_model.components_):
        print(f'Topic {idx + 1}:')
        topic_words = [feature_names[i] for i in topic.argsort()[:-6:-1]]    # 获取概率最高的前5个词语
        topic_probabilities = [topic[i] for i in topic.argsort()[:-6:-1]]    # 获取对应的概率值
        # print(top_words)
        for word, prob in zip(topic_words, topic_probabilities):
            print(f'{word}: {prob}')
        print()


def lda_algorithm2(corpus_list, html_text, topic_num=2):
    if len(corpus_list) == 0 or html_text == "":
    	print("语料或预测文本为空")
    	return

    # 创建一个包含文档的语料库
    # corpus = ['I love eating apples.',
    #           'Cars and bikes are my favorite vehicles.',
    #           'I enjoy driving cars and eating bananas.',
    #           'Drinking orange juice and eating fruits is healthy.']
    corpus = []
    for text in corpus_list:
        corpus.append(prreprocess_stage(text))

    # 创建词袋模型
    vectorizer = CountVectorizer()
    corpus_vectorized = vectorizer.fit_transform(corpus)

    # 将词袋向量转换为词袋表示
    bow_corpus = corpus_vectorized.toarray()

    # 训练LDA模型：主题数
    lda_model = LatentDirichletAllocation(n_components=topic_num, random_state=42)
    lda_model.fit(bow_corpus)

    # 输出每个主题的词语分布
    feature_names = vectorizer.get_feature_names()
    for idx, topic in enumerate(lda_model.components_):
        print(f'Topic {idx + 1}:')
        top_words_indices = topic.argsort()[:-5:-1]
        top_words = [feature_names[i] for i in top_words_indices]
        print(top_words)
        print()

    ###########################
    # 对新文档进行主题推断
    # new_doc = ['I like eating bananas and drinking juice.']
    new_doc = prreprocess_stage(html_text)
    new_doc_vectorized = vectorizer.transform([new_doc])
    new_doc_bow = new_doc_vectorized.toarray()
    topic_probabilities = lda_model.transform(new_doc_bow)
    print(f'Topic probabilities for the new document: {topic_probabilities}')


# 使用LDA直接提取文章主题词测试
def test_lda_result():
    #data_key = "44466c87684e6193f83c1fdea9da5d0d"
    data_key = "8e27f16ad5e9d28eabdb7d06298ecef6"
    textObj = getArticelText(data_key)
    if textObj != "":
        #print(textObj)
        print(textObj["contents_org"])
        print()
        html_text = textObj["title_org"] + " " + textObj["summary_org"] + " " + textObj["contents_org"]
        lda_algorithm(html_text)
    else:
        print("Not found!")


# 使用LDA先训练，再对预测的文章分类
def test_lda_result_with_training():
    # 用于训练的文章
    corpus_1_article_list = ["9ecd9b751632474c4366f69bad0b48cd",
                            "e73daee28e4e413c6495e01ad07a5525",
                            "a3726f739b7c77dc831c142c81b1ef0d",
                            "efb247c38820d726ffaf28d2281ef87d",
                            "8e27f16ad5e9d28eabdb7d06298ecef6"]
    # corpus_1_article_list = []
    # 用于训练的关键词
    corpus_1_keywords = ["航空", "航天", "航海", "航空航天探索", "航空技术", "航天技术", "航空航天医学", "航空航天发展", "深海探索", "航海技术"]
    # corpus_2_keywords = ["环境科学", "环境保护与检测", "污染与防治", "生态文明建设", "生态保护", "节能减排", "碳达峰", "碳中和"]
    # corpus_1_keywords.extend(corpus_2_keywords)

    topic_num = 2

    corpus_list = []
    for data_key in corpus_1_article_list:
        textObj = getArticelText(data_key)
        if textObj != "":
            #print(textObj)
            #print(textObj["contents_org"])
            html_text = textObj["title_org"] + " " + textObj["summary_org"] + " " + textObj["contents_org"]
            corpus_list.append(html_text)
    corpus_list.extend(corpus_1_keywords)

    # 用于判断分类文章
    data_key = "44466c87684e6193f83c1fdea9da5d0d"
    textObj = getArticelText(data_key)
    if textObj != "":
    	html_text = textObj["contents_org"]

    lda_algorithm2(corpus_list, html_text, topic_num)


# 有问题：主题词分类混乱！！！！
def test_lda_result_with_training2():
    corpus_list = ["航空", "航天", "航海", "航空航天探索", "航空技术", "航天技术", "航空航天医学", "航空航天发展", "深海探索", "航海技术"] \
              + ["环境科学", "环境保护与检测", "污染与防治", "生态文明建设", "生态保护", "节能减排", "碳达峰", "碳中和"] \
              + ["天文地理", "天文", "大气科学", "海洋", "地理", "地质"] \
              + ["历史文明", "中国科技史", "世界科技史", "文物考古", "中国历史文明发展", "世界历史文明发展", "中外人物传记", "民俗文化及历史文明"] \
              + ["工业技术", "矿业工程", "冶金工程", "机电工业", "轻工业", "化学工业等", "矿石"] \
              + ["数学", "古典数学", "数学基础", "代数", "几何", "数学分析", "运筹学", "计算数学", "应用数学"] \
              + ["物理", "物理学基础", "理论物理", "力学", "声光电磁", "热学", "半导体物理", "应用物理", "分子与原子物理", "原子核", "高能物理"] \
              + ["化学", "化学基础", "无机化学", "有机化学", "高分子化学", "物理化学", "分析化学", "应用化学", "晶体学"] \
              + ["能源科技", "化石能源", "太阳能", "水能", "风能", "地热能", "生物能", "氢能", "核能", "热力工程", "新兴能源"] \
              + ["军事科技", "军事理论", "中国军事", "世界军事", "常规武器", "尖端武器", "军事信息化", "军事发展", "军事科技"] \
              + ["建筑水利", "建筑科学", "水利工程"] \
              + ["交通运输", "综合运输", "铁路运输", "公路运输", "水路运输", "航空运输", "无人驾驶", "物联网交通"] \
              + ["农林牧渔", "农业", "林业", "畜牧业", "渔业", "农业技术", "农副加工", "农林牧渔生态运用"] \
              + ["健康管理", "食品安全", "养生保健", "营养科学", "体育科学", "生理卫生知识", "减肥健身", "美容护肤", "心理健康", "老龄健康", "儿童健康", "两性健康"] \
              + ["临床知识", "科学用药", "疾病诊断", "病伤残治疗与健康", "亚健康鉴别诊断", "日常医学救援", "中医中药"] \
              + ["安全科学", "安全管理", "安全工程", "灾难及其预防", "自然灾害安全", "公共卫生安全", "事故灾难应急安全"] \
              + ["信息技术", "电子技术", "通信技术", "自动化技术", "计算机技术", "网络技术", "多媒体技术", "遥控遥感技术", "信息安全", "新兴领域技术", "人工智能技术"] \
              + ["生命科学", "生物演化与分布", "普通生物学", "遗传学", "生理学", "生物化学", "生物物理学", "分子生物学", "生物工程学", "古生物学", "微生物学", "植物学", "动物学", "生物多样性"] \
              + ["材料科学", "材料学理论", "金属材料", "非金属材料", "复合材料", "特种材料"] \
              + ["科学家"] \
              + ["科学科幻", "科普动态", "科幻探索", "科幻文创"]
    
    topic_num = 21
    # 用于判断分类文章
    data_key = "44466c87684e6193f83c1fdea9da5d0d"
    textObj = getArticelText(data_key)
    if textObj != "":
    	html_text = textObj["contents_org"]

    lda_algorithm2(corpus_list, html_text, topic_num)


if __name__ == '__main__':
    test_lda_result()
    # test_lda_result_with_training()
    # test_lda_result_with_training2()
