fffiloni commited on
Commit
da49ebc
1 Parent(s): 4a21b10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -74,7 +74,7 @@ from transformers import pipeline
74
  pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
75
 
76
  @spaces.GPU(enable_queue=True)
77
- def get_llm_idea(prompt):
78
  agent_maker_sys = f"""
79
  You are an AI whose job is to help users create their own chatbot whose personality will reflect the character or scene from an image described by users.
80
  In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
@@ -100,6 +100,8 @@ Example input: Can you suggest a good cigar brand for a man who enjoys smoking w
100
  <|user|>
101
  """
102
 
 
 
103
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
104
  return outputs
105
 
@@ -108,11 +110,10 @@ def infer(image_in):
108
  gr.Info("Getting image description...")
109
  user_prompt = get_caption_from_MD(image_in)
110
 
111
- prompt = f"{instruction.strip()}\n{user_prompt}</s>"
112
- #print(f"PROMPT: {prompt}")
113
 
114
  gr.Info("Building a system according to the image caption ...")
115
- outputs = get_llm_idea(prompt)
116
 
117
 
118
  pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
 
74
  pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
75
 
76
  @spaces.GPU(enable_queue=True)
77
+ def get_llm_idea(user_prompt):
78
  agent_maker_sys = f"""
79
  You are an AI whose job is to help users create their own chatbot whose personality will reflect the character or scene from an image described by users.
80
  In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
 
100
  <|user|>
101
  """
102
 
103
+ prompt = f"{instruction.strip()}\n{user_prompt}</s>"
104
+ #print(f"PROMPT: {prompt}")
105
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
106
  return outputs
107
 
 
110
  gr.Info("Getting image description...")
111
  user_prompt = get_caption_from_MD(image_in)
112
 
113
+
 
114
 
115
  gr.Info("Building a system according to the image caption ...")
116
+ outputs = get_llm_idea(user_prompt)
117
 
118
 
119
  pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'