Docfile commited on
Commit
7d9bce7
1 Parent(s): 6d9d7d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -9
app.py CHANGED
@@ -1,28 +1,56 @@
1
  import gradio as gr
2
- import google.generativeai as genai
3
 
4
  import os
5
  token=os.environ.get("TOKEN")
6
- genai.configure(api_key=token)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Chargez l'image
9
- model = genai.GenerativeModel(model_name="gemini-pro-vision")
10
- model_simple = genai.GenerativeModel(model_name="gemini-pro")
 
 
11
 
12
  e =""
13
  # Fonction pour générer le contenu
14
- def generate_content(pro,image):
15
  global e
16
 
17
  if not image:
18
- response = model_simple.generate_content(pro)
 
19
  e = response.text
20
  print(e)
21
 
22
  else:
23
- response = model.generate_content([pro, image])
24
- print(response.text)
25
- e = response.text
 
26
  return e
27
 
28
 
 
1
  import gradio as gr
2
+
3
 
4
  import os
5
  token=os.environ.get("TOKEN")
6
+ os.environ["GOOGLE_API_KEY"] = token
7
+
8
+ safe = [
9
+ {
10
+ "category": "HARM_CATEGORY_HARASSMENT",
11
+ "threshold": "BLOCK_NONE",
12
+ },
13
+ {
14
+ "category": "HARM_CATEGORY_HATE_SPEECH",
15
+ "threshold": "BLOCK_NONE",
16
+ },
17
+ {
18
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
19
+ "threshold": "BLOCK_NONE",
20
+ },
21
+ {
22
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
23
+ "threshold": "BLOCK_NONE",
24
+ },
25
+ ]
26
+ from llama_index.llms.gemini import Gemini
27
+ from llama_index.multi_modal_llms.gemini import GeminiMultiModal
28
+
29
+ from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
30
+
31
 
32
  # Chargez l'image
33
+
34
+ gemini_pro = GeminiMultiModal(model_name="models/gemini-pro-vision")
35
+
36
+ llm = Gemini(model="models/gemini-pro")
37
 
38
  e =""
39
  # Fonction pour générer le contenu
40
+ async def generate_content(pro,image):
41
  global e
42
 
43
  if not image:
44
+ response = await llm.acomplete(pro,safety_settings=safe)
45
+ print(response)
46
  e = response.text
47
  print(e)
48
 
49
  else:
50
+ #response = model.generate_content([pro, image])
51
+ response_acomplete = await gemini_pro.acomplete(prompt=pro, image_documents=image,)
52
+ print(response_acomplete)
53
+ e = response_acomplete
54
  return e
55
 
56