HuanjinYao commited on
Commit
5886f69
1 Parent(s): ad04834

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -18,6 +18,12 @@ from PIL import Image
18
  from dc.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
19
 
20
 
 
 
 
 
 
 
21
 
22
 
23
  tokenizer = AutoTokenizer.from_pretrained('HuanjinYao/DenseConnector-v1.5-8B', use_fast=False)
@@ -63,7 +69,6 @@ def bot_streaming(message, history):
63
  # Handle the case where 'image' is not defined at all
64
  gr.Error("You need to upload an image for LLaVA to work.")
65
 
66
- print('process end')
67
  print('history', history)
68
 
69
 
@@ -94,8 +99,6 @@ def bot_streaming(message, history):
94
  image_tensor = image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True)
95
  inputs = inputs.to(device='cuda', non_blocking=True)
96
 
97
- print('image', image_tensor.shape)
98
- print('inputs', inputs.shape)
99
 
100
 
101
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
@@ -104,12 +107,9 @@ def bot_streaming(message, history):
104
 
105
 
106
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
107
-
108
- print('here?')
109
-
110
  thread.start()
111
 
112
- print('start')
113
 
114
  buffer = ""
115
  # time.sleep(0.5)
@@ -130,15 +130,28 @@ with gr.Blocks(fill_height=True, ) as demo:
130
  gr.ChatInterface(
131
  fn=bot_streaming,
132
  title="DenseConnector-v1.5-8B",
133
- # examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
134
- # {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
135
- description="Try [DenseConnector-v1.5-8B](https://huggingface.co/HuanjinYao/DenseConnector-v1.5-8B). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
136
  stop_btn="Stop Generation",
137
  multimodal=True,
138
  textbox=chat_input,
139
  chatbot=chatbot,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  )
141
- gr.Image(label="Upload an image to start")
142
 
143
 
144
  if __name__ == "__main__":
 
18
  from dc.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
19
 
20
 
21
+ PLACEHOLDER = """
22
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
23
+ <p style="font-size: 20px; margin-bottom: 2px; opacity: 0.65;">Upload an image to start the conversation.</p>
24
+ <p style="font-size: 20px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
25
+ </div>
26
+ """
27
 
28
 
29
  tokenizer = AutoTokenizer.from_pretrained('HuanjinYao/DenseConnector-v1.5-8B', use_fast=False)
 
69
  # Handle the case where 'image' is not defined at all
70
  gr.Error("You need to upload an image for LLaVA to work.")
71
 
 
72
  print('history', history)
73
 
74
 
 
99
  image_tensor = image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True)
100
  inputs = inputs.to(device='cuda', non_blocking=True)
101
 
 
 
102
 
103
 
104
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
 
107
 
108
 
109
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
110
+
 
 
111
  thread.start()
112
 
 
113
 
114
  buffer = ""
115
  # time.sleep(0.5)
 
130
  gr.ChatInterface(
131
  fn=bot_streaming,
132
  title="DenseConnector-v1.5-8B",
133
+ examples=[{"text": "Describe this movie.", "files": ["./Interstellar.jpg"]}],
134
+ description="Try [DenseConnector-v1.5-8B](https://huggingface.co/HuanjinYao/DenseConnector-v1.5-8B). Upload an image and start chatting about it. If you don't upload an image, you will receive an error.",
 
135
  stop_btn="Stop Generation",
136
  multimodal=True,
137
  textbox=chat_input,
138
  chatbot=chatbot,
139
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
140
+ additional_inputs=[
141
+ gr.Slider(minimum=0,
142
+ maximum=1,
143
+ step=0.1,
144
+ value=0.95,
145
+ label="Temperature",
146
+ render=False),
147
+ gr.Slider(minimum=128,
148
+ maximum=4096,
149
+ step=1,
150
+ value=512,
151
+ label="Max new tokens",
152
+ render=False ),
153
+ ],
154
  )
 
155
 
156
 
157
  if __name__ == "__main__":