Abbeite commited on
Commit
a071ebf
1 Parent(s): 9428921

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -2,13 +2,13 @@ import streamlit as st
2
  import logging
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
 
5
- # Set logging verbosity to CRITICAL to reduce noise
6
- logging.set_verbosity(logging.CRITICAL)
7
 
8
  # Cache the model and tokenizer to avoid reloading it every time
9
  @st.cache(allow_output_mutation=True, suppress_st_warning=True)
10
  def load_model():
11
- model_name = "Abbeite/chest_and_physical_limitations2" # Replace with your model name
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
  model = AutoModelForCausalLM.from_pretrained(model_name)
14
  return model, tokenizer
@@ -17,8 +17,8 @@ model, tokenizer = load_model()
17
 
18
  # Function to generate text with the model
19
  def generate_text(prompt):
20
- formatted_prompt = f"[INST] {prompt} [/INST]" # Format the prompt
21
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=500)
22
  result = pipe(formatted_prompt)
23
  return result[0]['generated_text']
24
 
 
2
  import logging
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
 
5
+ # Set the logger to display only CRITICAL messages
6
+ logging.basicConfig(level=logging.CRITICAL)
7
 
8
  # Cache the model and tokenizer to avoid reloading it every time
9
  @st.cache(allow_output_mutation=True, suppress_st_warning=True)
10
  def load_model():
11
+ model_name = "your-model-name" # Replace with your actual model name
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
  model = AutoModelForCausalLM.from_pretrained(model_name)
14
  return model, tokenizer
 
17
 
18
  # Function to generate text with the model
19
  def generate_text(prompt):
20
+ formatted_prompt = f"[INST] {prompt} [/INST]" # Format the prompt according to your specification
21
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=500)
22
  result = pipe(formatted_prompt)
23
  return result[0]['generated_text']
24