symlink commited on
Commit
4b979a9
1 Parent(s): 25692b8
Files changed (1) hide show
  1. app.py +166 -20
app.py CHANGED
@@ -1,28 +1,174 @@
1
- from transformers import pipeline, Conversation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
 
 
 
 
3
 
4
- chatbot = pipeline(model="llms-lab/LLaVA-NeXT-Video-32BQwen")
5
 
6
- message_list = []
7
- response_list = []
 
 
 
 
 
 
8
 
9
- def vanilla_chatbot(message, history, file=None):
10
- if file is not None:
11
- # Handle file processing here
12
- file_content = file.read()
13
- message += f"\n\nFile content:\n{file_content.decode('utf-8')}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- conversation = Conversation(text=message, past_user_inputs=message_list, generated_responses=response_list)
16
- conversation = chatbot(conversation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- return conversation.generated_responses[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- chatbot_interface = gr.Interface(
21
- fn=vanilla_chatbot,
22
- inputs=[gr.inputs.Textbox(lines=2, placeholder="Enter your message here..."), gr.inputs.File(optional=True)],
23
- outputs="text",
24
- title="Vanilla Chatbot",
25
- description="Enter text to start chatting or upload a file."
26
- )
27
 
28
- chatbot_interface.launch()
 
1
+ #from transformers import pipeline, Conversation
2
+ #import gradio as gr
3
+
4
+ #chatbot = pipeline(model="llava-hf/LLaVA-NeXT-Video-7B-hf")
5
+ #message_list = []
6
+ #response_list = []
7
+
8
+ #def vanilla_chatbot(message, history, file=None):
9
+ # if file is not None:
10
+ # Handle file processing here
11
+ # file_content = file.read()
12
+ # message += f"\n\nFile content:\n{file_content.decode('utf-8')}"
13
+
14
+ # conversation = Conversation(text=message, past_user_inputs=message_list, generated_responses=response_list)
15
+ # conversation = chatbot(conversation)
16
+
17
+ # return conversation.generated_responses[-1]
18
+
19
+ #chatbot_interface = gr.Interface(
20
+ # fn=vanilla_chatbot,
21
+ # inputs=[gr.inputs.Textbox(lines=2, placeholder="Enter your message here..."), gr.inputs.File(optional=True)],
22
+ # outputs="text",
23
+ # title="Vanilla Chatbot",
24
+ # description="Enter text to start chatting or upload a file."
25
+ #)
26
+
27
+ #chatbot_interface.launch()
28
+
29
  import gradio as gr
30
+ import numpy as np
31
+ import random
32
+ from diffusers import DiffusionPipeline
33
+ import torch
34
 
35
+ device = "cuda" if torch.cuda.is_available() else "cpu"
36
 
37
+ if torch.cuda.is_available():
38
+ torch.cuda.max_memory_allocated(device=device)
39
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
40
+ pipe.enable_xformers_memory_efficient_attention()
41
+ pipe = pipe.to(device)
42
+ else:
43
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
44
+ pipe = pipe.to(device)
45
 
46
+ MAX_SEED = np.iinfo(np.int32).max
47
+ MAX_IMAGE_SIZE = 1024
48
+
49
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
50
+
51
+ if randomize_seed:
52
+ seed = random.randint(0, MAX_SEED)
53
+
54
+ generator = torch.Generator().manual_seed(seed)
55
+
56
+ image = pipe(
57
+ prompt = prompt,
58
+ negative_prompt = negative_prompt,
59
+ guidance_scale = guidance_scale,
60
+ num_inference_steps = num_inference_steps,
61
+ width = width,
62
+ height = height,
63
+ generator = generator
64
+ ).images[0]
65
+
66
+ return image
67
+
68
+ examples = [
69
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
70
+ "A dog named Kendrick riding a green horse",
71
+ "A delicious ceviche cheesecake slice",
72
+ ]
73
+
74
+ css="""
75
+ #col-container {
76
+ margin: 0 auto;
77
+ max-width: 520px;
78
+ }
79
+ """
80
+
81
+ if torch.cuda.is_available():
82
+ power_device = "GPU"
83
+ else:
84
+ power_device = "CPU"
85
+
86
+ with gr.Blocks(css=css) as demo:
87
 
88
+ with gr.Column(elem_id="col-container"):
89
+ gr.Markdown(f"""
90
+ # Text-to-Image Gradio Template
91
+ Currently running on {power_device}.
92
+ """)
93
+
94
+ with gr.Row():
95
+
96
+ prompt = gr.Text(
97
+ label="Prompt",
98
+ show_label=False,
99
+ max_lines=1,
100
+ placeholder="Enter your prompt",
101
+ container=False,
102
+ )
103
+
104
+ run_button = gr.Button("Run", scale=0)
105
+
106
+ result = gr.Image(label="Result", show_label=False)
107
 
108
+ with gr.Accordion("Advanced Settings", open=False):
109
+
110
+ negative_prompt = gr.Text(
111
+ label="Negative prompt",
112
+ max_lines=1,
113
+ placeholder="Enter a negative prompt",
114
+ visible=False,
115
+ )
116
+
117
+ seed = gr.Slider(
118
+ label="Seed",
119
+ minimum=0,
120
+ maximum=MAX_SEED,
121
+ step=1,
122
+ value=0,
123
+ )
124
+
125
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
126
+
127
+ with gr.Row():
128
+
129
+ width = gr.Slider(
130
+ label="Width",
131
+ minimum=256,
132
+ maximum=MAX_IMAGE_SIZE,
133
+ step=32,
134
+ value=512,
135
+ )
136
+
137
+ height = gr.Slider(
138
+ label="Height",
139
+ minimum=256,
140
+ maximum=MAX_IMAGE_SIZE,
141
+ step=32,
142
+ value=512,
143
+ )
144
+
145
+ with gr.Row():
146
+
147
+ guidance_scale = gr.Slider(
148
+ label="Guidance scale",
149
+ minimum=0.0,
150
+ maximum=10.0,
151
+ step=0.1,
152
+ value=0.0,
153
+ )
154
+
155
+ num_inference_steps = gr.Slider(
156
+ label="Number of inference steps",
157
+ minimum=1,
158
+ maximum=12,
159
+ step=1,
160
+ value=2,
161
+ )
162
+
163
+ gr.Examples(
164
+ examples = examples,
165
+ inputs = [prompt]
166
+ )
167
 
168
+ run_button.click(
169
+ fn = infer,
170
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
171
+ outputs = [result]
172
+ )
 
 
173
 
174
+ demo.queue().launch()