yamete4 commited on
Commit
f7d94c1
1 Parent(s): 3908efa

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +31 -28
README.md CHANGED
@@ -4,6 +4,9 @@ base_model: shpotes/codegen-350M-mono
4
  datasets:
5
  - flytech/python-codes-25k
6
  pipeline_tag: text-generation
 
 
 
7
  ---
8
 
9
  # Model Card for Model ID
@@ -72,34 +75,34 @@ Users (both direct and downstream) should be made aware of the risks, biases and
72
 
73
  ## How to Get Started with the Model
74
 
75
- import torch
76
- from transformers import AutoModelForCausalLM, AutoTokenizer
77
-
78
- tokenizer = AutoTokenizer.from_pretrained("shpotes/codegen-350M-mono")
79
- model = AutoModelForCausalLM.from_pretrained("shpotes/codegen-350M-mono", trust_remote_code=True)
80
-
81
- input_ids = tokenizer(
82
- context,
83
- truncation=True,
84
- padding=True,
85
- return_tensors='pt',
86
- pad_token_id=pad_token_id,
87
- ).input_ids
88
-
89
- input_ids_len = input_ids.shape[1]
90
-
91
- with torch.no_grad():
92
- input_ids = input_ids
93
- tokens = model.generate(
94
- input_ids,
95
- do_sample=True,
96
- num_return_sequences=num_return_sequences,
97
- temperature=temp,
98
- max_length=input_ids_len + max_length_sample,
99
- top_p=top_p,
100
- use_cache=True,
101
- )
102
- text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
103
 
104
 
105
  [More Information Needed]
 
4
  datasets:
5
  - flytech/python-codes-25k
6
  pipeline_tag: text-generation
7
+ tags:
8
+ - code
9
+ - text-generation-inference
10
  ---
11
 
12
  # Model Card for Model ID
 
75
 
76
  ## How to Get Started with the Model
77
 
78
+ import torch
79
+ from transformers import AutoModelForCausalLM, AutoTokenizer
80
+
81
+ tokenizer = AutoTokenizer.from_pretrained("shpotes/codegen-350M-mono")
82
+ model = AutoModelForCausalLM.from_pretrained("shpotes/codegen-350M-mono", trust_remote_code=True)
83
+
84
+ input_ids = tokenizer(
85
+ context,
86
+ truncation=True,
87
+ padding=True,
88
+ return_tensors='pt',
89
+ pad_token_id=pad_token_id,
90
+ ).input_ids
91
+
92
+ input_ids_len = input_ids.shape[1]
93
+
94
+ with torch.no_grad():
95
+ input_ids = input_ids
96
+ tokens = model.generate(
97
+ input_ids,
98
+ do_sample=True,
99
+ num_return_sequences=num_return_sequences,
100
+ temperature=temp,
101
+ max_length=input_ids_len + max_length_sample,
102
+ top_p=top_p,
103
+ use_cache=True,
104
+ )
105
+ text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
106
 
107
 
108
  [More Information Needed]