import re

import nltk
from nltk import PorterStemmer, TrigramAssocMeasures
from nltk.collocations import BigramCollocationFinder, TrigramCollocationFinder
from nltk.corpus import stopwords
from nltk.metrics import BigramAssocMeasures
from elasticsearch import Elasticsearch

from conf import es_url

# 连接到 Elasticsearch
es = Elasticsearch(hosts=es_url)  # 更改为你的 Elasticsearch 主机和端口


# 从 Elasticsearch 获取文本数据
def fetch_text_data_from_es():
    index_name = "conversation"  # 更改为你的索引名称
    query = {
        "query": {
            "match": {
                "content": "like"
            }
        },
        "size": 10000  # 你可以根据需要更改文档数量
    }

    result = es.search(index=index_name, body=query)
    hits = result["hits"]["hits"]
    texts = [hit["_source"]["content"] for hit in hits]  # 更改为你的文本字段名称
    return texts


# 文本预处理函数
def preprocess_text(text):
    # 去除特殊字符和数字
    text = re.sub(r'[^a-zA-Z]', ' ', text)

    # 转换为小写
    text = text.lower()

    # 分词
    words = nltk.word_tokenize(text)

    # 去除停用词
    stop_words = set(stopwords.words('english'))
    words = [word for word in words if word not in stop_words]

    # 词干提取（使用Porter Stemmer）
    stemmer = PorterStemmer()
    words = [stemmer.stem(word) for word in words]

    return " ".join(words)


# 查找与 "work" 这个词的常用搭配
def find_collocations(texts):
    # 合并文本数据
    text = " ".join(texts)

    # 使用NLTK进行分词和预处理
    preprocessed_text = preprocess_text(text)
    words = nltk.word_tokenize(preprocessed_text)

    # 创建 BigramCollocationFinder 对象
    bigram_collocation_finder = BigramCollocationFinder.from_words(words)

    # 计算搭配度（可以选择不同的度量标准）
    bigram_scored = bigram_collocation_finder.score_ngrams(BigramAssocMeasures.likelihood_ratio)

    # 查找包含 "work" 的常用搭配
    target_word = "like"
    collocations_with_work = [bigram for bigram, _ in bigram_scored if target_word in bigram]

    # 打印包含 "work" 的常用搭配
    for bigram in collocations_with_work[:20]:
        print(" ".join(bigram))


# 查找与 "work" 这个词的常用三元搭配
def find_trigrams(texts):
    # 合并文本数据
    text = " ".join(texts)

    # 使用NLTK进行分词和预处理
    preprocessed_text = preprocess_text(text)
    words = nltk.word_tokenize(preprocessed_text)

    # 创建 TrigramCollocationFinder 对象
    trigram_collocation_finder = TrigramCollocationFinder.from_words(words)

    # 计算搭配度（可以选择不同的度量标准）
    trigram_scored = trigram_collocation_finder.score_ngrams(TrigramAssocMeasures.likelihood_ratio)

    # 查找包含 "work" 的常用三元搭配
    target_word = "like"
    trigrams_with_work = [trigram for trigram, _ in trigram_scored if target_word in trigram]

    # 打印包含 "work" 的常用三元搭配
    for trigram in trigrams_with_work[:20]:
        print(" ".join(trigram))


# 主函数
def main():
    texts = fetch_text_data_from_es()
    find_collocations(texts)
    find_trigrams(texts)


if __name__ == "__main__":
    main()