import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder, TrigramCollocationFinder
from nltk.metrics import BigramAssocMeasures, TrigramAssocMeasures

# 下载必要的nltk数据
# nltk.download('punkt')
# nltk.download('stopwords')

def extract_collocations(text):
    # 分词
    tokens = word_tokenize(text.lower())
    # 去除停用词
    stop_words = set(stopwords.words('english'))
    filtered_tokens = [token for token in tokens if token.isalpha() and token not in stop_words]

    # 寻找二元短语
    bigram_finder = BigramCollocationFinder.from_words(filtered_tokens)
    # 使用点对互信息（PMI）衡量短语的关联性
    bigrams = bigram_finder.nbest(BigramAssocMeasures.pmi, 10)

    # 统计词频
    fdist = FreqDist(filtered_tokens)
    frequent_words = [word for word, freq in fdist.most_common(10)]

    # 合并二元短语和高频词作为固定搭配和短语
    collocations = list(bigrams) + frequent_words
    return collocations

# 测试
# text = "buttons out of order,chalk on his hands and sleeves, the first to look out the window during lessons, and the last to leave the classroom for the day."
# result = extract_collocations(text)
# print(result)

def extract_collocations(sentence):
    # 分词
    tokens = word_tokenize(sentence.lower())
    # 去除停用词和非字母字符
    stop_words = set(stopwords.words('english'))
    filtered_tokens = [token for token in tokens if token.isalpha() and token not in stop_words]

    # 寻找二元固定搭配
    bigram_finder = BigramCollocationFinder.from_words(filtered_tokens)
    bigrams = bigram_finder.nbest(BigramAssocMeasures.pmi, 10)

    # 寻找三元固定搭配
    trigram_finder = TrigramCollocationFinder.from_words(filtered_tokens)
    trigrams = trigram_finder.nbest(TrigramAssocMeasures.pmi, 10)

    return bigrams + trigrams


# 示例句子
sentence = "I have a great deal of respect for my parents. They always give me a piece of advice."
collocations = extract_collocations(sentence)
print("提取到的固定搭配:")
for collocation in collocations:
    print(" ".join(collocation))


import spacy
spacy.prefer_gpu()
# 加载英语模型
nlp = spacy.load("en_core_web_sm")

def extract_collocations_spacy(sentence):
    doc = nlp(sentence)
    for phrase in doc._.phrases:
        print(phrase.text)
    collocations = []
    for token in doc:
        # 示例：查找名词和其修饰的形容词组成的搭配
        if token.dep_ == "amod" and token.head.pos_ == "NOUN":
            collocation = (token.text, token.head.text)
            collocations.append(collocation)
    return collocations

# sentence = "The beautiful garden attracts many visitors."
# result = extract_collocations_spacy(sentence)
# print(result)