ariG23498 HF Staff commited on
Commit
d541133
·
verified ·
1 Parent(s): c966fcd

Upload google_gemma-3-1b-it_0.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. google_gemma-3-1b-it_0.py +28 -0
google_gemma-3-1b-it_0.py CHANGED
@@ -14,6 +14,34 @@
14
  try:
15
  from huggingface_hub import login
16
  login(new_session=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  with open('google_gemma-3-1b-it_0.txt', 'w') as f:
18
  f.write('Everything was good in google_gemma-3-1b-it_0.txt')
19
  except Exception as e:
 
14
  try:
15
  from huggingface_hub import login
16
  login(new_session=False)
17
+
18
+ # Use a pipeline as a high-level helper
19
+ from transformers import pipeline
20
+
21
+ pipe = pipeline("text-generation", model="google/gemma-3-1b-it")
22
+ messages = [
23
+ {"role": "user", "content": "Who are you?"},
24
+ ]
25
+ pipe(messages)
26
+
27
+ # Load model directly
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+
30
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it")
31
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it")
32
+ messages = [
33
+ {"role": "user", "content": "Who are you?"},
34
+ ]
35
+ inputs = tokenizer.apply_chat_template(
36
+ messages,
37
+ add_generation_prompt=True,
38
+ tokenize=True,
39
+ return_dict=True,
40
+ return_tensors="pt",
41
+ ).to(model.device)
42
+
43
+ outputs = model.generate(**inputs, max_new_tokens=40)
44
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
45
  with open('google_gemma-3-1b-it_0.txt', 'w') as f:
46
  f.write('Everything was good in google_gemma-3-1b-it_0.txt')
47
  except Exception as e: