from transformers import AutoTokenizer, AutoModelForQuestionAnswering

# 加载预训练的QA模型和分词器
model_name = "bert-base-uncased-finetuned-squad-v1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)


# 示例问题和上下文
question = "What is the capital of France?"
context = "France is a country in Western Europe. Its capital is Paris."

# 分词与标记化
inputs = tokenizer(question, context, return_tensors="pt", truncation=True, padding=True)

# 文本编码
input_ids = inputs["input_ids"]
attention_mask = inputs["attention_mask"]


# 示例：使用spaCy进行实体识别
import spacy

nlp = spacy.load("en_core_web_sm")
doc = nlp(context)

# 提取实体
entities = [(ent.text, ent.label_) for ent in doc.ents]
print(entities)


from nltk.corpus import wordnet as wn

def get_synonyms(word):
    synonyms = set()
    for syn in wn.synsets(word):
        for lemma in syn.lemmas():
            synonyms.add(lemma.name())
    return list(synonyms)

# 示例：替换文本中的某个词为其同义词
word_to_replace = "capital"
synonyms = get_synonyms(word_to_replace)
augmented_context = context.replace(word_to_replace, synonyms[0] if synonyms else word_to_replace)
print(augmented_context)

