mrm8488 commited on
Commit
e5e4e5b
β€’
1 Parent(s): fc6a1e0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -7
README.md CHANGED
@@ -3,7 +3,7 @@ tags:
3
  - generated_from_trainer
4
  - code
5
  - coding
6
- - llama
7
  model-index:
8
  - name: gemma-2b-coder
9
  results: []
@@ -79,21 +79,23 @@ WIP
79
 
80
 
81
  ### Example of usage πŸ‘©β€πŸ’»
 
 
 
 
 
 
 
82
  ```py
83
  import torch
84
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
85
 
86
- model_id = "mrm8488/llama-2-coder-7b"
87
 
88
  tokenizer = AutoTokenizer.from_pretrained(model_id)
89
 
90
  model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda")
91
 
92
- def create_prompt(instruction):
93
- system = "You are a coding assistant that will help the user to resolve the following instruction:"
94
- instruction = "### Instruction: " + instruction
95
- return system + "\n" + instruction + "\n\n" + "### Solution:" + "\n"
96
-
97
  def generate(
98
  instruction,
99
  max_new_tokens=256,
 
3
  - generated_from_trainer
4
  - code
5
  - coding
6
+ - gemma
7
  model-index:
8
  - name: gemma-2b-coder
9
  results: []
 
79
 
80
 
81
  ### Example of usage πŸ‘©β€πŸ’»
82
+
83
+ I recommend install the following version of `torch`:
84
+
85
+ ```sh
86
+ pip install "torch>=2.1.1" -U
87
+ ```
88
+
89
  ```py
90
  import torch
91
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
92
 
93
+ model_id = "MAISAAI/gemma-2b-coder"
94
 
95
  tokenizer = AutoTokenizer.from_pretrained(model_id)
96
 
97
  model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda")
98
 
 
 
 
 
 
99
  def generate(
100
  instruction,
101
  max_new_tokens=256,