import vertexai from vertexai.generative_models import GenerativeModel, Image import streamlit as st import os # export STREAMLIT_SERVER_MAX_UPLOAD_SIZE=200 PROJECT_ID = "agileai-poc" REGION = "us-central1" vertexai.init(project=PROJECT_ID, location=REGION) # upload_image = st.file_uploader("Upload an image", type=["jpeg","png","jpg"],accept_multiple_files=True) # print(upload_image) # IMAGE_FILE = "image-path" image =st.file_uploader("upload file",type=["png","jpg","jpeg"],accept_multiple_files=True) # if file: # file_bytes = file.read() # image = Image.load_from_file(file_bytes) # if upload_image: # image = Image.load_from_file(upload_image) generative_multimodal_model = GenerativeModel("gemini-1.0-pro-vision") response = generative_multimodal_model.generate_content(["Describe the image", image]) # prompt="""Here are a few of the things that aren\'t allowed on this chat: # 1. Nudity or other sexually suggestive content # 2. Hate speech, credible threats or direct attacks on an individual or group # 3. Content that contains self-harm or excessive violence # 4. Fake or impostor profiles # 5. Spam # The following behaviour isn\'t allowed on this chat: # 1. Posting things that don\'t follow the Community Standards (e.g. threats, hate speech, graphic violence). # 2. Using Community to bully, impersonate or harass anyone. # User-1: Hold onto your hearts. \"Tiger 3\" will make you laugh, cry, and feel everything in between. In cinemas soon. # #love #heartbreak #feelgoodmovie # User-2: Loved your look in \"Tiger 3\"! You always look so hot and sexy. # User-1: Instead of politicians, let the monkeys govern the countries; at least they will steal only the bananas! # User-2: User-2\'s comment is not allowed on this chat because it contains sexually suggestive content. # User-1: Loved your look in \"Tiger 3\"! You always look so hot and sexy. # User-2: The response was blocked because the input or response may contain descriptions of violence, sexual themes, or otherwise derogatory content. Please try rephrasing your prompt. # User-1: Be courageous. Challenge orthodoxy. Stand up for what you believe in. When you are in your rocking chair talking to your grandchildren many years from now, be sure you have a good story to tell # User-2: # stricly follow prompt analyse {response.text} and declare output as "positive or negative" # """ prompt=""" Analyse the {response.text} and understand the content provided in {response.text} whether the content is sarcastic postive or negative etc and declare the output as "positive" or "negative" """ print(response.text) result=generative_multimodal_model.generate_content([prompt,response.text]) print(result.text) # import http.client # import typing # import urllib.request # from vertexai.generative_models import GenerativeModel, Image # # create helper function # def load_image_from_url(image_url: str) -> Image: # with urllib.request.urlopen(image_url) as response: # response = typing.cast(http.client.HTTPResponse, response) # image_bytes = response.read() # return Image.from_bytes(image_bytes) # # Load images from Cloud Storage URI # landmark1 = load_image_from_url( # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png" # ) # landmark2 = load_image_from_url( # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark2.png" # ) # landmark3 = load_image_from_url( # "https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark3.png" # ) # # Pass multimodal prompt # model = GenerativeModel("gemini-1.0-pro-vision") # response = model.generate_content( # [ # landmark1, # "city: Rome, Landmark: the Colosseum", # landmark2, # "city: Beijing, Landmark: Forbidden City", # landmark3, # ] # ) # print(response)