edbeeching HF staff commited on
Commit
f668db3
1 Parent(s): f49478d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +20 -19
README.md CHANGED
@@ -55,32 +55,33 @@ This model is a fine-tuned version of [deepseek-ai/deepseek-math-7b-base](https:
55
  Here's how you can run the model using the `pipeline()` function from 🤗 Transformers:
56
 
57
  ```python
58
- # Install transformers from source - only needed for versions <= v4.34
59
- # pip install git+https://github.com/huggingface/transformers.git
60
- # pip install accelerate
61
-
62
  import torch
63
  from transformers import pipeline
64
 
65
- pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
66
 
67
- # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
68
  messages = [
69
- {
70
- "role": "system",
71
- "content": "You are a friendly chatbot who always responds in the style of a pirate",
72
- },
73
- {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
74
  ]
75
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
76
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
77
- print(outputs[0]["generated_text"])
78
- # <|system|>
79
- # You are a friendly chatbot who always responds in the style of a pirate.</s>
80
- # <|user|>
81
- # How many helicopters can a human eat in one sitting?</s>
82
- # <|assistant|>
83
- # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food!
 
 
 
 
 
 
 
 
 
84
  ```
85
  ## Bias, Risks, and Limitations
86
 
 
55
  Here's how you can run the model using the `pipeline()` function from 🤗 Transformers:
56
 
57
  ```python
58
+ import re
 
 
 
59
  import torch
60
  from transformers import pipeline
61
 
62
+ pipe = pipeline("text-generation", model="AI-MO/Numina-Math-7B", torch_dtype=torch.bfloat16, device_map="auto")
63
 
 
64
  messages = [
65
+ {"role": "user", "content": "For how many values of the constant $k$ will the polynomial $x^{2}+kx+36$ have two distinct integer roots?"},
 
 
 
 
66
  ]
67
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
68
+
69
+ gen_config = {
70
+ "max_new_tokens": 1024,
71
+ "do_sample": False,
72
+ "stop_strings": ["```output"],
73
+ "tokenizer": pipe.tokenizer,
74
+ }
75
+
76
+ outputs = pipe(prompt, **gen_config)
77
+ text = outputs[0]["generated_text"]
78
+
79
+ print(text)
80
+ python_code = re.findall(r"```python(.*?)```", text, re.DOTALL)[0]
81
+ # WARNING: This code will execute the python code in the string. We show this for eductional purposes only.
82
+ # Please refer to our full pipeline for a safer way to execute code.
83
+ exec(python_code)
84
+
85
  ```
86
  ## Bias, Risks, and Limitations
87