or4cl3ai commited on
Commit
6152c1c
1 Parent(s): ab60a70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -4
app.py CHANGED
@@ -1,17 +1,34 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM
 
 
 
3
 
4
  # Load the SquanchNastyAI model from Hugging Face Spaces
5
  model = AutoModelForCausalLM.from_pretrained("or4cl3ai/SquanchNastyAI")
 
 
6
 
7
  # Define a function to generate a text response to a prompt
8
  def generate_response(prompt):
9
- inputs = model.prepare_inputs_for_generation(prompt, max_length=1024)
10
- outputs = model.generate(**inputs)
11
- return outputs[0]
 
 
 
12
 
13
  # Create a Gradio interface for the SquanchNastyAI model
14
- interface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
15
 
16
  # Launch the Gradio interface
17
  interface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM
3
+ from PIL import Image
4
+ from huggingface_hub import pipeline
5
+
6
 
7
  # Load the SquanchNastyAI model from Hugging Face Spaces
8
  model = AutoModelForCausalLM.from_pretrained("or4cl3ai/SquanchNastyAI")
9
+ # Initialize the pipeline for image generation
10
+ image_pipeline = pipeline("image-generation", model="google/vit-base-patch16-384")
11
 
12
  # Define a function to generate a text response to a prompt
13
  def generate_response(prompt):
14
+ return model.generate(prompt, max_length=1024)[0]
15
+
16
+ # Define a function to generate an image from a prompt
17
+ def generate_image(prompt):
18
+ image = image_pipeline(prompt)
19
+ return image
20
 
21
  # Create a Gradio interface for the SquanchNastyAI model
22
+ interface = gr.Interface(
23
+ fn=generate_response,
24
+ inputs="text",
25
+ outputs=["text", "image"],
26
+ components={
27
+ "text": gr.Text(),
28
+ "image": gr.Image(),
29
+ },
30
+ layout="row",
31
+ )
32
 
33
  # Launch the Gradio interface
34
  interface.launch()