vince62s commited on
Commit
32c49d3
1 Parent(s): af3b6da

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -19
README.md CHANGED
@@ -53,24 +53,17 @@ dtype: bfloat16
53
  ## 💻 Usage
54
 
55
  ```python
56
- !pip install -qU transformers accelerate
57
-
58
- from transformers import AutoTokenizer
59
- import transformers
60
  import torch
61
-
62
- model = "vince62s/Phi-2-psy"
63
- messages = [{"role": "user", "content": "What is a large language model?"}]
64
-
65
- tokenizer = AutoTokenizer.from_pretrained(model)
66
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
67
- pipeline = transformers.pipeline(
68
- "text-generation",
69
- model=model,
70
- torch_dtype=torch.float16,
71
- device_map="auto",
72
- )
73
-
74
- outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
75
- print(outputs[0]["generated_text"])
76
  ```
 
 
53
  ## 💻 Usage
54
 
55
  ```python
 
 
 
 
56
  import torch
57
+ from transformers import AutoModelForCausalLM, AutoTokenizer
58
+ torch.set_default_device("cuda")
59
+ model = AutoModelForCausalLM.from_pretrained("vince62s/phi-2-psy", torch_dtype="auto", trust_remote_code=True)
60
+ tokenizer = AutoTokenizer.from_pretrained("vince62s/phi-2-psy", trust_remote_code=True)
61
+ inputs = tokenizer('''def print_prime(n):
62
+ """
63
+ Print all primes between 1 and n
64
+ """''', return_tensors="pt", return_attention_mask=False)
65
+ outputs = model.generate(**inputs, max_length=200)
66
+ text = tokenizer.batch_decode(outputs)[0]
67
+ print(text)
 
 
 
 
68
  ```
69
+