rohanshaw commited on
Commit
32af491
1 Parent(s): ee68d89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, BitsAndBytesConfig
2
+ import torch
3
+
4
+ quantization_config = BitsAndBytesConfig(
5
+ load_in_4bit=True,
6
+ bnb_4bit_compute_dtype=torch.bfloat16
7
+ )
8
+
9
+ # 13B version can be loaded with llava-hf/llava-1.5-13b-hf
10
+ model_id = "llava-hf/llava-1.5-7b-hf"
11
+
12
+
13
+ pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
14
+
15
+ prompt = "USER: <image>\nDescribe this picture\n​ASSISTANT: "
16
+
17
+ outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200})
18
+ print(outputs[0]['generated_text'])
19
+
20
+ def update_conversation(new_message, history, image):
21
+
22
+ if image is None:
23
+ return "Please upload an image first using the widget on the left"
24
+
25
+ conversation_starting_from_image = [[user, assistant] for [user, assistant] in history if not assistant.startswith('Please')]
26
+
27
+ prompt = "USER: <image>\n"
28
+
29
+ for i in range(len(history)):
30
+ prompt+=history[i][0]+'\nASSISTANT: '+history[i][1]+"\nUSER: "
31
+
32
+ prompt = prompt+new_message+'\nASSISTANT: '
33
+
34
+ outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200
35
+ #, "do_sample" : True,
36
+ #"temperature" : 0.7
37
+ })[0]['generated_text']
38
+
39
+ return outputs[len(prompt)-6:]
40
+
41
+ import gradio as gr
42
+
43
+ with gr.Blocks() as demo:
44
+
45
+ with gr.Row():
46
+ image = gr.Image(type='pil', interactive=True)
47
+
48
+ gr.ChatInterface(
49
+ update_conversation, additional_inputs=[image]
50
+ )
51
+
52
+ demo.launch(debug=True)