guilfb commited on
Commit
9cb0324
1 Parent(s): d7b5864

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -46,7 +46,7 @@ Then you can use the model like this:
46
  from sentence_transformers import SentenceTransformer
47
  sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
48
 
49
- model = SentenceTransformer('inokufu/bertheo')
50
  embeddings = model.encode(sentences)
51
  print(embeddings)
52
  ```
@@ -72,8 +72,8 @@ def mean_pooling(model_output, attention_mask):
72
  sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
73
 
74
  # Load model from HuggingFace Hub
75
- tokenizer = AutoTokenizer.from_pretrained('inokufu/bertheo')
76
- model = AutoModel.from_pretrained('inokufu/bertheo')
77
 
78
  # Tokenize sentences
79
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
 
46
  from sentence_transformers import SentenceTransformer
47
  sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
48
 
49
+ model = SentenceTransformer('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
50
  embeddings = model.encode(sentences)
51
  print(embeddings)
52
  ```
 
72
  sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
73
 
74
  # Load model from HuggingFace Hub
75
+ tokenizer = AutoTokenizer.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
76
+ model = AutoModel.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
77
 
78
  # Tokenize sentences
79
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')