manu commited on
Commit
6508e04
1 Parent(s): 3008398

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -1,3 +1,4 @@
 
1
  ---
2
  license: mit
3
  datasets:
@@ -53,10 +54,9 @@ model_name = "croissantllm/base_185k"
53
  tokenizer = AutoTokenizer.from_pretrained(model_name)
54
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
55
 
56
- print(f"Number of parameters: {model.num_parameters()}")
57
- print(f"Number of non-embedding parameters: {model.num_parameters(exclude_embeddings=True)}")
58
-
59
- inputs = tokenizer("I am so tired I could sleep right now. -> Je suis si fatigué que je pourrais m'endormir maintenant.\nHe is heading to the market. -> Il va au marché.\nWe are running on the beach. ->", return_tensors="pt").to(model.device)
60
  tokens = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=60, temperature=0.5)
61
  print(tokenizer.decode(tokens[0]))
62
 
@@ -64,4 +64,4 @@ print(tokenizer.decode(tokens[0]))
64
  inputs = tokenizer("Capitales: France -> Paris, Italie -> Rome, Allemagne -> Berlin, Espagne ->", return_tensors="pt", add_special_tokens=True).to(model.device)
65
  tokens = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=60)
66
  print(tokenizer.decode(tokens[0]))
67
- ```
 
1
+
2
  ---
3
  license: mit
4
  datasets:
 
54
  tokenizer = AutoTokenizer.from_pretrained(model_name)
55
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
56
 
57
+ inputs = tokenizer("I am so tired I could sleep right now. -> Je suis si fatigué que je pourrais m'endormir maintenant.
58
+ He is heading to the market. -> Il va au marché.
59
+ We are running on the beach. ->", return_tensors="pt").to(model.device)
 
60
  tokens = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=60, temperature=0.5)
61
  print(tokenizer.decode(tokens[0]))
62
 
 
64
  inputs = tokenizer("Capitales: France -> Paris, Italie -> Rome, Allemagne -> Berlin, Espagne ->", return_tensors="pt", add_special_tokens=True).to(model.device)
65
  tokens = model.generate(**inputs, max_length=100, do_sample=True, top_p=0.95, top_k=60)
66
  print(tokenizer.decode(tokens[0]))
67
+ ```