Gurnam-AI commited on
Commit
fef2faa
1 Parent(s): b5e8fa6

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +144 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import vertexai
3
+ from vertexai.generative_models import GenerativeModel, Part
4
+ import vertexai.preview.generative_models as generative_models
5
+ import os
6
+ import mimetypes
7
+ import gradio as gr
8
+ import mimetypes
9
+ import tempfile
10
+
11
+
12
+ def get_credentials():
13
+ creds_json_str = os.getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON")
14
+ if creds_json_str is None:
15
+ raise ValueError("GOOGLE_APPLICATION_CREDENTIALS_JSON not found in environment")
16
+
17
+ # create a temporary file
18
+ with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as temp:
19
+ temp.write(creds_json_str) # write in json format
20
+ temp_filename = temp.name
21
+
22
+ return temp_filename
23
+
24
+ os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= get_credentials()
25
+
26
+ def get_matching_format(file_path):
27
+ file_formats = [
28
+ "image/png",
29
+ "image/jpeg",
30
+ "audio/aac",
31
+ "audio/flac",
32
+ "audio/mp3",
33
+ "audio/m4a",
34
+ "audio/mpeg",
35
+ "audio/mpga",
36
+ "audio/mp4",
37
+ "audio/opus",
38
+ "audio/pcm",
39
+ "audio/wav",
40
+ "audio/webm",
41
+ "video/x-flv",
42
+ "video/mov",
43
+ "video/mpeg",
44
+ "video/mpegps",
45
+ "video/mpg",
46
+ "video/mp4",
47
+ "video/webm",
48
+ "video/wmv",
49
+ "video/3gpp",
50
+ "application/pdf"
51
+ ]
52
+
53
+ mime_type, _ = mimetypes.guess_type(file_path)
54
+
55
+ if mime_type in file_formats:
56
+ return mime_type
57
+ else:
58
+ return None
59
+
60
+ def encode_file(file_path):
61
+ mime_type = get_matching_format(file_path)
62
+ if mime_type:
63
+ with open(file_path, 'rb') as file:
64
+ file_content = file.read()
65
+
66
+ encoded_content = base64.b64encode(file_content)
67
+ encoded_string = encoded_content.decode('utf-8')
68
+
69
+ return encoded_string
70
+ else:
71
+ return None
72
+
73
+ def multiturn_generate_content(file_path, user_query):
74
+ encoded_string = encode_file(file_path)
75
+ mime_type = get_matching_format(file_path)
76
+ if encoded_string:
77
+ vertexai.init(project="imgcp-ff81e7053b072ce5", location="us-central1")
78
+
79
+ model = GenerativeModel(
80
+ "gemini-1.5-flash-001",
81
+ )
82
+ chat = model.start_chat()
83
+
84
+ doc = Part.from_data(
85
+ mime_type=mime_type,
86
+ data=base64.b64decode(encoded_string)
87
+ )
88
+ return chat.send_message(
89
+ [doc, user_query],
90
+ generation_config={
91
+ "max_output_tokens": 8192,
92
+ "temperature": 1,
93
+ "top_p": 0.95,
94
+ }
95
+ ).text
96
+ else:
97
+ return "Model Error"
98
+
99
+
100
+
101
+
102
+ demo = gr.Blocks()
103
+
104
+
105
+ with demo:
106
+ gr.Blocks(theme="base")
107
+ # gr.Markdown("")
108
+
109
+ with gr.Tabs():
110
+ with gr.TabItem("Use Cases"):
111
+ gr.Markdown("""<h1>Gemini Multimodal</h1>""")
112
+ gr.Markdown("""<b>This Model performs well at a variety of multimodal tasks such as visual understanding, classification, summarization, and creating content from image, audio and video. It's adept at processing visual and text inputs such as photographs, documents, infographics, and screenshots.</b>""")
113
+
114
+ gr.Markdown("""<ul>
115
+ <li><b>Visual Information Seeking:</b> Use external knowledge combined with information extracted from the input image or video to answer questions.</li>
116
+ <li><b>Object Recognition:</b> Answer questions related to fine-grained identification of the objects in images and videos.</li>
117
+ <li><b>Digital Content Understanding:</b> Answer questions and extract information from visual content like infographics, charts, figures, tables, and web pages.</li>
118
+ <li><b>Structured Content Generation:</b> Generate responses based on multimodal inputs in formats like HTML and JSON.</li>
119
+ <li><b>Captioning and Description:</b> Generate descriptions of images and videos with varying levels of detail.</li>
120
+ <li><b>Reasoning:</b> Compositionally infer new information without memorization or retrieval.</li>
121
+ <li><b>Audio:</b> Analyze speech files for summarization, transcription, and Q&A.</li>
122
+ <li><b>Multimodal Processing:</b> Process multiple types of input media at the same time, such as video and audio input.</li>
123
+ </ul>""")
124
+
125
+ with gr.TabItem("Upload"):
126
+ gr.Markdown("""<b>Note: Please upload the file and submit your query in the next tab.</b>""")
127
+ with gr.Row():
128
+ filepath = gr.File(type='filepath')
129
+
130
+ with gr.TabItem("Chat"):
131
+ with gr.Column():
132
+ text_input_one = gr.Textbox(lines=15, show_label=False, container=True)
133
+ image_output = gr.Textbox(show_label=False, min_width=120)
134
+ text_button_one = gr.Button("Submit")
135
+
136
+
137
+ # text_button.click(encode_file, inputs=text_input)
138
+ text_button_one.click(multiturn_generate_content, inputs=[filepath, image_output], outputs=text_input_one)
139
+
140
+ demo.launch(debug=True)
141
+
142
+
143
+
144
+
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ google-cloud-aiplatform