yonikremer commited on
Commit
d73a8e9
1 Parent(s): e67f273

improved error handling

Browse files
Files changed (2) hide show
  1. app.py +6 -3
  2. hanlde_form_submit.py +13 -0
app.py CHANGED
@@ -5,6 +5,7 @@ In the demo, the user can write a prompt
5
  """
6
 
7
  import streamlit as st
 
8
 
9
  from hanlde_form_submit import on_form_submit
10
  from on_server_start import main as on_server_start_main
@@ -36,7 +37,7 @@ with st.form("request_form"):
36
  submitted_prompt: str = st.text_area(
37
  label="Input for the model, It is highly recommended to write an English prompt.",
38
  help="Enter the prompt for the model. The model will generate a response based on this prompt.",
39
- max_chars=16384,
40
  )
41
 
42
  submitted: bool = st.form_submit_button(
@@ -48,6 +49,8 @@ with st.form("request_form"):
48
  if submitted:
49
  try:
50
  output = on_form_submit(selected_model_name, output_length, submitted_prompt)
51
- st.write(f"Generated text: {output}")
52
- except ValueError as e:
 
53
  st.error(e)
 
 
5
  """
6
 
7
  import streamlit as st
8
+ from torch.cuda import CudaError
9
 
10
  from hanlde_form_submit import on_form_submit
11
  from on_server_start import main as on_server_start_main
 
37
  submitted_prompt: str = st.text_area(
38
  label="Input for the model, It is highly recommended to write an English prompt.",
39
  help="Enter the prompt for the model. The model will generate a response based on this prompt.",
40
+ max_chars=2048,
41
  )
42
 
43
  submitted: bool = st.form_submit_button(
 
49
  if submitted:
50
  try:
51
  output = on_form_submit(selected_model_name, output_length, submitted_prompt)
52
+ except CudaError as e:
53
+ st.error("Out of memory. Please try a smaller model, shorter prompt, or a smaller output length.")
54
+ except (ValueError, TypeError, RuntimeError) as e:
55
  st.error(e)
56
+ st.write(f"Generated text: {output}")
hanlde_form_submit.py CHANGED
@@ -56,11 +56,24 @@ def on_form_submit(model_name: str, output_length: int, prompt: str) -> str:
56
  :param output_length: The size of the groups to use.
57
  :param prompt: The prompt to use.
58
  :return: The output of the model.
 
 
 
 
 
59
  """
60
  if model_name not in SUPPORTED_MODEL_NAMES:
61
  raise ValueError(f"The selected model {model_name} is not supported."
62
  f"Supported models are all the models in:"
63
  f" https://huggingface.co/models?pipeline_tag=text-generation&library=pytorch")
 
 
 
 
 
 
 
 
64
  pipeline = create_pipeline(
65
  model_name=model_name,
66
  group_size=output_length,
 
56
  :param output_length: The size of the groups to use.
57
  :param prompt: The prompt to use.
58
  :return: The output of the model.
59
+ :raises ValueError: If the model name is not supported, the output length is <= 0,
60
+ the prompt is empty or longer than
61
+ 16384 characters, or the output length is not an integer.
62
+ TypeError: If the output length is not an integer or the prompt is not a string.
63
+ RuntimeError: If the model is not found.
64
  """
65
  if model_name not in SUPPORTED_MODEL_NAMES:
66
  raise ValueError(f"The selected model {model_name} is not supported."
67
  f"Supported models are all the models in:"
68
  f" https://huggingface.co/models?pipeline_tag=text-generation&library=pytorch")
69
+ if output_length <= 0:
70
+ raise ValueError(f"The output length {output_length} must be > 0.")
71
+ if len(prompt) == 0:
72
+ raise ValueError(f"The prompt must not be empty.")
73
+ if not isinstance(prompt, str):
74
+ raise ValueError(f"The prompt must be a string.")
75
+ if not isinstance(output_length, int):
76
+ raise ValueError(f"The output length must be an integer.")
77
  pipeline = create_pipeline(
78
  model_name=model_name,
79
  group_size=output_length,