SaiLochana commited on
Commit
d548309
1 Parent(s): ee0ebbc

Upload 2 files

Browse files
Files changed (2) hide show
  1. color_palette.py +36 -0
  2. image.py +88 -0
color_palette.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import vertexai
2
+ from vertexai.generative_models import GenerativeModel, Image
3
+ # import streamlit as st
4
+ import os
5
+
6
+ PROJECT_ID = "agileai-poc"
7
+ REGION = "us-central1"
8
+ vertexai.init(project=PROJECT_ID, location=REGION)
9
+ # upload_image = st.file_uploader("Upload an image", type=["jpeg","png","jpg"],accept_multiple_files=True)
10
+ # print(upload_image)
11
+ IMAGE_FILE = "/home/lenovo/Pictures/palette_samples/3.jpg"
12
+ image = Image.load_from_file(IMAGE_FILE)
13
+ # if upload_image:
14
+ # image = Image.load_from_file(upload_image)
15
+
16
+ generative_multimodal_model = GenerativeModel("gemini-1.0-pro-vision")
17
+ prompt="""" Describe the desired theme,or brand,mood,and palette,detect objects and generate the appealing color palettes on image with respective object and rgb code "
18
+ output format should be "object name :rgb code"
19
+
20
+ example:
21
+ tree leaf: (0,255,0)
22
+ pillow :(12,0,230)
23
+ """
24
+ prompt2="""" Describe the desired theme,or brand,mood,and palette,detect objects with 100 percent accuracy and generate the appealing color palettes on image with respective object ,color name and hexacode "
25
+ output format should be "object name : color :hexacode"
26
+
27
+ example:
28
+ object_name :color :(hexacode)
29
+ """
30
+
31
+ # print("\n RGB ************************************** \n")
32
+ # response1 = generative_multimodal_model.generate_content([prompt, image])
33
+ # print(response1.text)
34
+ print("\n hexacode ************************************** \n")
35
+ response2 = generative_multimodal_model.generate_content([prompt2, image])
36
+ print(response2.text)
image.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import vertexai
2
+ from vertexai.generative_models import GenerativeModel, Image
3
+ import streamlit as st
4
+ import os
5
+ # export STREAMLIT_SERVER_MAX_UPLOAD_SIZE=200
6
+ PROJECT_ID = "agileai-poc"
7
+ REGION = "us-central1"
8
+ vertexai.init(project=PROJECT_ID, location=REGION)
9
+ # upload_image = st.file_uploader("Upload an image", type=["jpeg","png","jpg"],accept_multiple_files=True)
10
+ # print(upload_image)
11
+ # IMAGE_FILE = "image-path"
12
+ image =st.file_uploader("upload file",type=["png","jpg","jpeg"],accept_multiple_files=True)
13
+ # if file:
14
+ # file_bytes = file.read()
15
+ # image = Image.load_from_file(file_bytes)
16
+ # if upload_image:
17
+ # image = Image.load_from_file(upload_image)
18
+
19
+ generative_multimodal_model = GenerativeModel("gemini-1.0-pro-vision")
20
+ response = generative_multimodal_model.generate_content(["Describe the image", image])
21
+ # prompt="""Here are a few of the things that aren\'t allowed on this chat:
22
+ # 1. Nudity or other sexually suggestive content
23
+ # 2. Hate speech, credible threats or direct attacks on an individual or group
24
+ # 3. Content that contains self-harm or excessive violence
25
+ # 4. Fake or impostor profiles
26
+ # 5. Spam
27
+
28
+ # The following behaviour isn\'t allowed on this chat:
29
+ # 1. Posting things that don\'t follow the Community Standards (e.g. threats, hate speech, graphic violence).
30
+ # 2. Using Community to bully, impersonate or harass anyone.
31
+
32
+ # User-1: Hold onto your hearts. \"Tiger 3\" will make you laugh, cry, and feel everything in between. In cinemas soon.
33
+ # #love #heartbreak #feelgoodmovie
34
+ # User-2: Loved your look in \"Tiger 3\"! You always look so hot and sexy.
35
+
36
+ # User-1: Instead of politicians, let the monkeys govern the countries; at least they will steal only the bananas!
37
+ # User-2: User-2\'s comment is not allowed on this chat because it contains sexually suggestive content.
38
+
39
+ # User-1: Loved your look in \"Tiger 3\"! You always look so hot and sexy.
40
+ # User-2: The response was blocked because the input or response may contain descriptions of violence, sexual themes, or otherwise derogatory content. Please try rephrasing your prompt.
41
+
42
+ # User-1: Be courageous. Challenge orthodoxy. Stand up for what you believe in. When you are in your rocking chair talking to your grandchildren many years from now, be sure you have a good story to tell
43
+ # User-2:
44
+ # stricly follow prompt analyse {response.text} and declare output as "positive or negative"
45
+ # """
46
+ prompt=""" Analyse the {response.text} and understand the content provided in {response.text}
47
+ whether the content is sarcastic postive or negative etc and
48
+ declare the output as "positive" or "negative"
49
+ """
50
+ print(response.text)
51
+ result=generative_multimodal_model.generate_content([prompt,response.text])
52
+ print(result.text)
53
+
54
+ # import http.client
55
+ # import typing
56
+ # import urllib.request
57
+ # from vertexai.generative_models import GenerativeModel, Image
58
+
59
+ # # create helper function
60
+ # def load_image_from_url(image_url: str) -> Image:
61
+ # with urllib.request.urlopen(image_url) as response:
62
+ # response = typing.cast(http.client.HTTPResponse, response)
63
+ # image_bytes = response.read()
64
+ # return Image.from_bytes(image_bytes)
65
+
66
+ # # Load images from Cloud Storage URI
67
+ # landmark1 = load_image_from_url(
68
+ # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
69
+ # )
70
+ # landmark2 = load_image_from_url(
71
+ # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark2.png"
72
+ # )
73
+ # landmark3 = load_image_from_url(
74
+ # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark3.png"
75
+ # )
76
+
77
+ # # Pass multimodal prompt
78
+ # model = GenerativeModel("gemini-1.0-pro-vision")
79
+ # response = model.generate_content(
80
+ # [
81
+ # landmark1,
82
+ # "city: Rome, Landmark: the Colosseum",
83
+ # landmark2,
84
+ # "city: Beijing, Landmark: Forbidden City",
85
+ # landmark3,
86
+ # ]
87
+ # )
88
+ # print(response)