Hellisotherpeople commited on
Commit
979a861
β€’
1 Parent(s): e381da2

Update Text-Generation.py

Browse files
Files changed (1) hide show
  1. Text-Generation.py +2 -2
Text-Generation.py CHANGED
@@ -49,7 +49,7 @@ st.caption("The inspiration for this space: https://en.wikipedia.org/wiki/Gadsby
49
  form = st.sidebar.form("choose_settings")
50
  form.header("Model Settings")
51
 
52
- model_name = form.text_area("Enter the name of the pre-trained model from transformers that we are using for Text Generation", value = "eachadea/vicuna-7b-1.1")
53
  form.caption("This will download a new model, so it may take awhile or even break if the model is too large")
54
  percision = form.selectbox("What percision are we loading the model with?", ["8bit", "16bit", "32bit"], )
55
  form.caption("The lower the percision, the less ram the model takes and the faster it runs, but the quality is reduced")
@@ -88,7 +88,7 @@ else:
88
  factors = []
89
  chars_to_modify = {}
90
 
91
- generate_args = st.text_input('model.generate() arguments (in python dictionary format) ', '{"max_length": 50, "min_length": 50, "temperature": 2.0, "num_return_sequences": 1, "do_sample": False, "num_beams": 2, "repetition_penalty": 3.0}')
92
  st.caption("For more details on what these settings mean and a complete list of all settings, see here: https://huggingface.co/blog/how-to-generate and https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig and https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationMixin.generate")
93
 
94
 
 
49
  form = st.sidebar.form("choose_settings")
50
  form.header("Model Settings")
51
 
52
+ model_name = form.text_area("Enter the name of the pre-trained model from transformers that we are using for Text Generation", value = "facebook/opt-1.3b")
53
  form.caption("This will download a new model, so it may take awhile or even break if the model is too large")
54
  percision = form.selectbox("What percision are we loading the model with?", ["8bit", "16bit", "32bit"], )
55
  form.caption("The lower the percision, the less ram the model takes and the faster it runs, but the quality is reduced")
 
88
  factors = []
89
  chars_to_modify = {}
90
 
91
+ generate_args = st.text_input('model.generate() arguments (in python dictionary format) ', '{"max_new_tokens": 50, "min_new_tokens": 50, "temperature": 2.0, "num_return_sequences": 1, "do_sample": False, "num_beams": 2, "repetition_penalty": 3.0}')
92
  st.caption("For more details on what these settings mean and a complete list of all settings, see here: https://huggingface.co/blog/how-to-generate and https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig and https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationMixin.generate")
93
 
94