nickprock commited on
Commit
0ca686e
1 Parent(s): 91fd2d0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -34,7 +34,7 @@ Then you can use the model like this:
34
  from sentence_transformers import SentenceTransformer
35
  sentences = ["Una ragazza si acconcia i capelli.", "Una ragazza si sta spazzolando i capelli."]
36
 
37
- model = SentenceTransformer('nickprock/sentence-bert-base-italian-xxl-cased')
38
  embeddings = model.encode(sentences)
39
  print(embeddings)
40
  ```
@@ -60,8 +60,8 @@ def mean_pooling(model_output, attention_mask):
60
  sentences = ['Una ragazza si acconcia i capelli.', 'Una ragazza si sta spazzolando i capelli.']
61
 
62
  # Load model from HuggingFace Hub
63
- tokenizer = AutoTokenizer.from_pretrained('nickprock/sentence-bert-base-italian-xxl-cased')
64
- model = AutoModel.from_pretrained('nickprock/sentence-bert-base-italian-xxl-cased')
65
 
66
  # Tokenize sentences
67
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
34
  from sentence_transformers import SentenceTransformer
35
  sentences = ["Una ragazza si acconcia i capelli.", "Una ragazza si sta spazzolando i capelli."]
36
 
37
+ model = SentenceTransformer('nickprock/sentence-bert-base-italian-xxl-uncased')
38
  embeddings = model.encode(sentences)
39
  print(embeddings)
40
  ```
60
  sentences = ['Una ragazza si acconcia i capelli.', 'Una ragazza si sta spazzolando i capelli.']
61
 
62
  # Load model from HuggingFace Hub
63
+ tokenizer = AutoTokenizer.from_pretrained('nickprock/sentence-bert-base-italian-xxl-uncased')
64
+ model = AutoModel.from_pretrained('nickprock/sentence-bert-base-italian-xxl-uncased')
65
 
66
  # Tokenize sentences
67
  encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')