ProPerNounpYK commited on
Commit
c8e431d
·
verified ·
1 Parent(s): 389a652

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -11
app.py CHANGED
@@ -1,18 +1,48 @@
 
 
 
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Text-to-Image model
5
- text_to_image = pipeline("text-to-image", model="ByteDance/SDXL-Lightning")
 
6
 
7
- # Chat model
8
- chat = pipeline("conversational", model="ProPerNounpYK/chat")
 
9
 
10
- interface = gr.Interface(
11
- fn=lambda input: chat(input, text_to_image(input)),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  inputs="text",
13
- outputs=["text", "image"],
14
- title="Text-to-Image Chat",
15
- description="Type something and get a response with an image!"
 
16
  )
17
 
18
- interface.launch()
 
 
1
+ import torch
2
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForImageGeneration
3
+ from PIL import Image
4
  import gradio as gr
 
5
 
6
+ # Load the language model
7
+ language_model = AutoModelForSequenceClassification.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
+ language_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
9
 
10
+ # Load the image generation model
11
+ image_model = AutoModelForImageGeneration.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
12
+ image_tokenizer = AutoTokenizer.from_pretrained("artificialguybr/CuteCartoonRedmond-V2")
13
 
14
+ # Define a function to generate an image based on a prompt
15
+ def generate_image(prompt):
16
+ input_ids = image_tokenizer.encode(prompt, return_tensors="pt")
17
+ output = image_model.generate(input_ids)
18
+ image = Image.fromarray(output[0].detach().numpy())
19
+ return image
20
+
21
+ # Define a function to have a conversation
22
+ def have_conversation(input_text):
23
+ input_ids = language_tokenizer.encode(input_text, return_tensors="pt")
24
+ output = language_model.generate(input_ids)
25
+ response = language_tokenizer.decode(output[0], skip_special_tokens=True)
26
+ return response
27
+
28
+ # Create a Gradio interface
29
+ iface = gr.Interface(
30
+ fn=have_conversation,
31
+ inputs="text",
32
+ outputs="text",
33
+ title="Converse with AI",
34
+ description="Talk to the AI and see its response!"
35
+ )
36
+
37
+ # Add an image generation feature to the interface
38
+ iface.add_component(
39
+ gr.Image(type="pil"),
40
  inputs="text",
41
+ outputs="image",
42
+ fn=generate_image,
43
+ title="Generate Image",
44
+ description="Enter a prompt and see the generated image!"
45
  )
46
 
47
+ # Launch the interface
48
+ iface.launch()