Abbeite commited on
Commit
61f9719
1 Parent(s): 70af133

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -38
app.py CHANGED
@@ -5,49 +5,38 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
  st.title('Chest and Physical Limitations LLM Query')
6
 
7
  # Initialize variables
8
- model_name = "Abbeite/chest_and_physical_limitations2"
 
9
 
10
- # Use st.cache to cache the model loading function
11
- @st.cache(allow_output_mutation=True, show_spinner=True)
12
  def load_model(model_name):
13
  try:
14
  tokenizer = AutoTokenizer.from_pretrained(model_name)
15
  model = AutoModelForCausalLM.from_pretrained(model_name)
16
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
17
- return generator, None # Return None for the error message on success
18
  except Exception as e:
19
- return None, str(e) # Return None for the generator and the exception message on failure
20
- return generator, None # Return the generator and None for the error
21
-
22
- # Load the model and check for errors
23
- # Attempt to load the model and receive an error message if loading fails
24
- generator, error_message = load_model(model_name)
25
-
26
- # If an error message was returned, display it using st.error
27
- if error_message:
28
- st.error(f"Failed to load model {model_name}: {error_message}")
 
 
 
 
 
 
 
 
 
 
 
 
29
  else:
30
- # Proceed with the rest of your Streamlit app logic if the model was loaded successfully
31
- user_input_section(generator)
32
-
33
- # User prompt input section wrapped in a function to control reruns
34
- def user_input_section(generator):
35
- if generator: # Check moved inside the function
36
- user_prompt = st.text_area("Enter your prompt here:")
37
-
38
- # Button to generate text
39
- if st.button('Generate'):
40
- if user_prompt:
41
- # Generate response
42
- try:
43
- response = generator(user_prompt, max_length=100, clean_up_tokenization_spaces=True)
44
- # Display the generated text
45
- st.text_area("Response:", value=response[0]['generated_text'], height=250, disabled=True)
46
- except Exception as e:
47
- st.error(f"Error generating response: {str(e)}")
48
- else:
49
- st.warning("Please enter a prompt.")
50
- else:
51
- st.error("Model could not be loaded. Please ensure the model name is correct and try again.")
52
-
53
- user_input_section(generator)
 
5
  st.title('Chest and Physical Limitations LLM Query')
6
 
7
  # Initialize variables
8
+ model_name = "Abbeite/chest_and_physical_limitations"
9
+ generator = None
10
 
11
+ # Function to check model existence and load it
 
12
  def load_model(model_name):
13
  try:
14
  tokenizer = AutoTokenizer.from_pretrained(model_name)
15
  model = AutoModelForCausalLM.from_pretrained(model_name)
16
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
17
+ return generator
18
  except Exception as e:
19
+ st.error(f"Failed to load model {model_name}: {str(e)}")
20
+ return None
21
+
22
+ # Load the model
23
+ generator = load_model(model_name)
24
+
25
+ # User prompt input
26
+ if generator: # Proceed only if the model is successfully loaded
27
+ user_prompt = st.text_area("Enter your prompt here:")
28
+
29
+ # Button to generate text
30
+ if st.button('Generate'):
31
+ if user_prompt:
32
+ # Generate response
33
+ try:
34
+ response = generator(user_prompt, max_length=50, clean_up_tokenization_spaces=True)
35
+ # Display the generated text
36
+ st.text_area("Response:", value=response[0]['generated_text'], height=250, disabled=True)
37
+ except Exception as e:
38
+ st.error(f"Error generating response: {str(e)}")
39
+ else:
40
+ st.warning("Please enter a prompt.")
41
  else:
42
+ st.error("Model could not be loaded. Please ensure the model name is correct and try again.")