dahara1 commited on
Commit
40f8719
1 Parent(s): 3849685

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -8,12 +8,12 @@ Benchmark results are in progress.
8
  I will upload it at a later date.
9
 
10
 
11
- sample code
12
- '''
13
  pip install auto-gptq
14
- '''
15
 
16
- '''
17
  from transformers import AutoTokenizer
18
  from auto_gptq import AutoGPTQForCausalLM
19
 
@@ -34,7 +34,7 @@ prompt_template = f"### Instruction: {prompt}\n### Response:"
34
  tokens = tokenizer(prompt_template, return_tensors="pt").to("cuda:0").input_ids
35
  output = model.generate(input_ids=tokens, max_new_tokens=100, do_sample=True, temperature=0.8)
36
  print(tokenizer.decode(output[0]))
37
- '''
38
 
39
- See Also
40
  https://github.com/PanQiWei/AutoGPTQ/blob/main/docs/tutorial/01-Quick-Start.md
 
8
  I will upload it at a later date.
9
 
10
 
11
+ ### sample code
12
+ ```
13
  pip install auto-gptq
14
+ ```
15
 
16
+ ```
17
  from transformers import AutoTokenizer
18
  from auto_gptq import AutoGPTQForCausalLM
19
 
 
34
  tokens = tokenizer(prompt_template, return_tensors="pt").to("cuda:0").input_ids
35
  output = model.generate(input_ids=tokens, max_new_tokens=100, do_sample=True, temperature=0.8)
36
  print(tokenizer.decode(output[0]))
37
+ ```
38
 
39
+ ### See Also
40
  https://github.com/PanQiWei/AutoGPTQ/blob/main/docs/tutorial/01-Quick-Start.md