Yin Fang commited on
Commit
a90edb6
·
verified ·
1 Parent(s): 7d14422

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -110,13 +110,14 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
110
  tokenizer = AutoTokenizer.from_pretrained("zjunlp/chatcell-base")
111
  model = AutoModelForSeq2SeqLM.from_pretrained("zjunlp/chatcell-base")
112
  input_text="Detail the 100 starting genes for a Mix, ranked by expression level: "
 
113
  # Encode the input text and generate a response with specified generation parameters
114
  input_ids = tokenizer(input_text,return_tensors="pt").input_ids
115
  output_ids = model.generate(input_ids, max_length=512, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, do_sample=True)
 
116
  # Decode and print the generated output text
117
  output_text = tokenizer.decode(output_ids[0],skip_special_tokens=True)
118
  print(output_text)
119
-
120
  ```
121
 
122
 
 
110
  tokenizer = AutoTokenizer.from_pretrained("zjunlp/chatcell-base")
111
  model = AutoModelForSeq2SeqLM.from_pretrained("zjunlp/chatcell-base")
112
  input_text="Detail the 100 starting genes for a Mix, ranked by expression level: "
113
+
114
  # Encode the input text and generate a response with specified generation parameters
115
  input_ids = tokenizer(input_text,return_tensors="pt").input_ids
116
  output_ids = model.generate(input_ids, max_length=512, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, do_sample=True)
117
+
118
  # Decode and print the generated output text
119
  output_text = tokenizer.decode(output_ids[0],skip_special_tokens=True)
120
  print(output_text)
 
121
  ```
122
 
123