File size: 4,017 Bytes
92e884b 346e7c2 84b9668 b829e56 92e884b 906b46b 92e884b 9d2d46c 92e884b 9d2d46c 92e884b 9d2d46c 906b46b 92e884b 74c9273 906b46b 92e884b 9d2d46c 8c90d60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
---
inference: false
license: cc-by-4.0
language:
- bg
metrics:
- accuracy
---
# 🇧🇬 KeyBERT-BG - Bulgarian Keyword Extraction
KeyBERT-BG is a model trained for keyword extraction in Bulgarian.
The used dataset is [this](https://www.kaggle.com/datasets/auhide/bulgarian-articles-with-keywords) custom one, which I've uploaded to Kaggle.
## Usage
Import the libraries:
```python
import re
from typing import Dict
from pprint import pprint
from transformers import AutoTokenizer, AutoModelForTokenClassification
```
Firstly, you'll have to define this method, since the text preprocessing is custom and the standard `pipeline` method won't suffice:
```python
def extract_keywords(
text: str,
model_id="auhide/keybert-bg",
max_len: int = 300,
id2group: Dict[int, str] = {
# Indicates that this is not a keyword.
0: "O",
# Begining of keyword.
1: "B-KWD",
# Additional keywords (might also indicate the end of a keyword sequence).
# You can merge these with the begining keyword `B-KWD`.
2: "I-KWD",
},
# Probability threshold based on which the keywords will be accepted.
# If their probabiliy is less than `threshold`, they won't be added to the list of keywords.
threshold=0.50
):
# Initialize the tokenizer and model.
tokenizer = AutoTokenizer.from_pretrained(model_id)
keybert = AutoModelForTokenClassification.from_pretrained(model_id)
# Preprocess the text.
# Surround punctuation with whitespace and convert multiple whitespaces
# into single ones.
text = re.sub(r"([,\.?!;:\'\"\(\)\[\]„”])", r" \1 ", text)
text = re.sub(r"\s+", r" ", text)
words = text.split()
# Tokenize the processed `text` (this includes padding or truncation).
tokens_data = tokenizer(
text.strip(),
padding="max_length",
max_length=max_len,
truncation=True,
return_tensors="pt"
)
input_ids = tokens_data.input_ids
attention_mask = tokens_data.attention_mask
# Predict the keywords.
out = keybert(input_ids, attention_mask=attention_mask).logits
# Softmax the last dimension so that the probabilities add up to 1.0.
out = out.softmax(-1)
# Based on the probabilities, generate the most probable keywords.
out_argmax = out.argmax(-1)
prediction = out_argmax.squeeze(0).tolist()
probabilities = out.squeeze(0)
return [
{
# Since the list of words does not have a [CLS] token, the index `i`
# is one step forward, which means that if we want to access the
# appropriate keyword we should use the index `i - 1`.
"entity": words[i - 1],
"entity_group": id2group[idx],
"score": float(probabilities[i, idx])
}
for i, idx in enumerate(prediction)
if (idx == 1 or idx == 2) and float(probabilities[i, idx]) > threshold
]
```
Choose a text and use the model on it. For example, I've chosen to use [this](https://novini.bg/biznes/biznes_tehnologii/781108) article.
Then, you can call `extract_keywords` on it and extract its keywords:
```python
# Reading the text from a file, since it is an article, and the text is large.
with open("input_text.txt", "r", encoding="utf-8") as f:
text = f.read()
# You can change the threshold based on your needs.
keywords = extract_keywords(text, threshold=0.5)
print("Keywords:")
pprint(keywords)
```
```sh
Keywords:
[{'entity': 'Туитър', 'entity_group': 'B-KWD', 'score': 0.9278278946876526},
{'entity': 'Илон', 'entity_group': 'B-KWD', 'score': 0.5862686634063721},
{'entity': 'Мъск', 'entity_group': 'B-KWD', 'score': 0.5289096832275391},
{'entity': 'изпълнителен',
'entity_group': 'B-KWD',
'score': 0.679943323135376},
{'entity': 'директор', 'entity_group': 'I-KWD', 'score': 0.6161141991615295}]
```
Please note that you can use the `pipeline` method from `transformers` but the results would be worse. |