Techdude01 commited on
Commit
7195f7c
·
verified ·
1 Parent(s): 95e9cc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -9,27 +9,29 @@ accelerator = Accelerator()
9
  # Print a description of the current configuration
10
  print("Accelerator State:", accelerator.state)
11
 
12
- # Define the path to your custom model
13
  model_path = (
14
  "../Models/models--sshleifer--distilbart-cnn-12-6/snapshots"
15
  "/a4f8f3ea906ed274767e9906dbaede7531d660ff"
16
  )
17
 
18
- # Initialize the text summarization pipeline
19
  try:
20
  text_summary = pipeline(
21
  "summarization",
22
- model=model_path,
23
- torch_dtype=torch.bfloat16, # Use bfloat16 for better performance on supported hardware
24
- device=0 if torch.cuda.is_available() else -1 # Use GPU if available
25
  )
26
  except Exception as e:
27
  print(f"Error initializing the summarization pipeline: {e}")
28
- raise
 
 
29
 
30
  # Define the Gradio interface function
31
  def summary(input_text):
32
  try:
 
33
  output = text_summary(input_text)
34
  return output[0]['summary_text']
35
  except Exception as e:
@@ -41,11 +43,11 @@ gr.close_all()
41
  # Define the Gradio interface
42
  demo = gr.Interface(
43
  fn=summary,
44
- inputs=[gr.Textbox(label="Input text to summarize", lines=6)],
45
- outputs=[gr.Textbox(label="Summarized text", lines=4)],
46
  title="GenAIProject01: Text Summarizer",
47
- description="THIS APPLICATION SUMMARIZE INPUT TEXT USING A PRE-TRAINED MODEL."
48
  )
49
 
50
  # Launch the Gradio app
51
- demo.launch()
 
9
  # Print a description of the current configuration
10
  print("Accelerator State:", accelerator.state)
11
 
12
+ # Define the path to your custom model or use a pre-trained Hugging Face model
13
  model_path = (
14
  "../Models/models--sshleifer--distilbart-cnn-12-6/snapshots"
15
  "/a4f8f3ea906ed274767e9906dbaede7531d660ff"
16
  )
17
 
18
+ # Initialize the text summarization pipeline for CPU usage
19
  try:
20
  text_summary = pipeline(
21
  "summarization",
22
+ model=model_path, # Use the custom model path
23
+ device=-1 # Force usage of CPU
 
24
  )
25
  except Exception as e:
26
  print(f"Error initializing the summarization pipeline: {e}")
27
+ print("Switching to the default model from Hugging Face.")
28
+ # Fallback to default model from Hugging Face if the custom model fails
29
+ text_summary = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1)
30
 
31
  # Define the Gradio interface function
32
  def summary(input_text):
33
  try:
34
+ # Generate summary using the text_summary pipeline
35
  output = text_summary(input_text)
36
  return output[0]['summary_text']
37
  except Exception as e:
 
43
  # Define the Gradio interface
44
  demo = gr.Interface(
45
  fn=summary,
46
+ inputs=gr.Textbox(label="Input Text to Summarize", lines=6, placeholder="Enter text here..."),
47
+ outputs=gr.Textbox(label="Summarized Text", lines=4),
48
  title="GenAIProject01: Text Summarizer",
49
+ description="THIS APPLICATION WILL BE USED TO SUMMARIZE INPUT TEXT USING A PRE-TRAINED MODEL."
50
  )
51
 
52
  # Launch the Gradio app
53
+ demo.launch(share=True)