Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,6 @@ from llama_index.core import SimpleDirectoryReader
|
|
17 |
from llama_index.core.indices.multi_modal.base import MultiModalVectorStoreIndex
|
18 |
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
|
19 |
|
20 |
-
os.environ["OPENAI_API_KEY"] = "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"
|
21 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
22 |
chat_llm = ChatOpenAI(temperature = 0.5, model = 'gpt-4-turbo')
|
23 |
|
@@ -47,9 +46,10 @@ qa_prompt = ChatPromptTemplate.from_messages(
|
|
47 |
)
|
48 |
question_answer_chain = create_stuff_documents_chain(chat_llm, qa_prompt)
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
53 |
image_store = QdrantVectorStore(client=qd_client, collection_name="image_collection")
|
54 |
storage_context = StorageContext.from_defaults(image_store=image_store)
|
55 |
openai_mm_llm = OpenAIMultiModal(model="gpt-4o", max_new_tokens=1500)
|
@@ -96,8 +96,8 @@ with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
|
|
96 |
doc_label = gr.Dropdown(["LLaVA", "Interior"], label="Select a document:")
|
97 |
chatbot = gr.ChatInterface(fn=response, additional_inputs=[doc_label], fill_height=True)
|
98 |
with gr.Column(scale=1):
|
99 |
-
sample_1 = "https://i.
|
100 |
-
sample_2 = "https://
|
101 |
sample_3 = "https://blog.kakaocdn.net/dn/nqcUB/btrzYjTgjWl/jFFlIBrdkoKv4jbSyZbiEk/img.jpg"
|
102 |
gallery = gr.Gallery(label="Retrieved images",
|
103 |
show_label=True, preview=True,
|
|
|
17 |
from llama_index.core.indices.multi_modal.base import MultiModalVectorStoreIndex
|
18 |
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
|
19 |
|
|
|
20 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
21 |
chat_llm = ChatOpenAI(temperature = 0.5, model = 'gpt-4-turbo')
|
22 |
|
|
|
46 |
)
|
47 |
question_answer_chain = create_stuff_documents_chain(chat_llm, qa_prompt)
|
48 |
|
49 |
+
pg_password = os.getenv("PG_PASSWORD")
|
50 |
+
aws_ec2_ip = os.getenv("AWS_EC2_IP")
|
51 |
+
pg_connection = f"postgresql+psycopg://postgres:{pg_password}@{aws_ec2_ip}:5432/postgres"
|
52 |
+
qd_client = qdrant_client.QdrantClient(path="qdrant_db")
|
53 |
image_store = QdrantVectorStore(client=qd_client, collection_name="image_collection")
|
54 |
storage_context = StorageContext.from_defaults(image_store=image_store)
|
55 |
openai_mm_llm = OpenAIMultiModal(model="gpt-4o", max_new_tokens=1500)
|
|
|
96 |
doc_label = gr.Dropdown(["LLaVA", "Interior"], label="Select a document:")
|
97 |
chatbot = gr.ChatInterface(fn=response, additional_inputs=[doc_label], fill_height=True)
|
98 |
with gr.Column(scale=1):
|
99 |
+
sample_1 = "https://i.pinimg.com/originals/e3/44/d7/e344d7631cd515edd36cc6930deaedec.jpg"
|
100 |
+
sample_2 = "https://live.staticflickr.com/5307/5765340890_e386f42a99_b.jpg"
|
101 |
sample_3 = "https://blog.kakaocdn.net/dn/nqcUB/btrzYjTgjWl/jFFlIBrdkoKv4jbSyZbiEk/img.jpg"
|
102 |
gallery = gr.Gallery(label="Retrieved images",
|
103 |
show_label=True, preview=True,
|