Update README.md
Browse files
README.md
CHANGED
@@ -31,7 +31,7 @@ sentences = ["Ibukota Perancis adalah Paris",
|
|
31 |
"Pizza adalah makanan khas Italia",
|
32 |
"Saya kuliah di Carneige Melon University"]
|
33 |
|
34 |
-
model = SentenceTransformer('firqaaa/indo-sbert-finetuned-
|
35 |
embeddings = model.encode(sentences)
|
36 |
print(embeddings)
|
37 |
```
|
@@ -61,8 +61,8 @@ sentences = ["Ibukota Perancis adalah Paris",
|
|
61 |
|
62 |
|
63 |
# Load model from HuggingFace Hub
|
64 |
-
tokenizer = AutoTokenizer.from_pretrained('firqaaa/indo-sbert-finetuned-
|
65 |
-
model = AutoModel.from_pretrained('firqaaa/indo-sbert-finetuned-
|
66 |
|
67 |
# Tokenize sentences
|
68 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
|
|
31 |
"Pizza adalah makanan khas Italia",
|
32 |
"Saya kuliah di Carneige Melon University"]
|
33 |
|
34 |
+
model = SentenceTransformer('firqaaa/indo-sbert-finetuned-anli')
|
35 |
embeddings = model.encode(sentences)
|
36 |
print(embeddings)
|
37 |
```
|
|
|
61 |
|
62 |
|
63 |
# Load model from HuggingFace Hub
|
64 |
+
tokenizer = AutoTokenizer.from_pretrained('firqaaa/indo-sbert-finetuned-anli')
|
65 |
+
model = AutoModel.from_pretrained('firqaaa/indo-sbert-finetuned-anli')
|
66 |
|
67 |
# Tokenize sentences
|
68 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|