code
stringlengths
193
97.3k
apis
sequencelengths
1
8
extract_api
stringlengths
113
214k
import sys import os # this is needed to import classes from the API. it will be removed when the worker is refactored sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))) import time import pika import json import pinecone import logging import weaviate import redis import lancedb import pymongo import pyarrow as pa import numpy as np import worker.config as config import services.database.batch_service as batch_service import services.database.job_service as job_service from services.database.database import get_db, safe_db_operation from shared.job_status import JobStatus from shared.batch_status import BatchStatus from qdrant_client import QdrantClient from qdrant_client.models import PointStruct from pymilvus import Collection, connections from shared.embeddings_type import EmbeddingsType from shared.vector_db_type import VectorDBType from shared.utils import generate_uuid_from_tuple from urllib.parse import quote_plus from services.rabbitmq.rabbit_service import create_connection_params from pika.exceptions import AMQPConnectionError logging.basicConfig(filename='./vdb-log.txt', level=logging.INFO) logging.basicConfig(filename='./vdb-errors.txt', level=logging.ERROR) def upload_batch(batch_id, chunks_with_embeddings): batch = safe_db_operation(batch_service.get_batch, batch_id) if batch.batch_status == BatchStatus.FAILED: safe_db_operation(batch_service.update_batch_retry_count, batch.id, batch.retries+1) logging.info(f"Retrying vector db upload of batch {batch.id}") batch = safe_db_operation(batch_service.get_batch, batch_id) vectors_uploaded = write_embeddings_to_vector_db(chunks_with_embeddings, batch.vector_db_metadata, batch.id, batch.job_id) if vectors_uploaded: status = safe_db_operation(batch_service.update_batch_status_with_successful_minibatch, batch.id) update_batch_and_job_status(batch.job_id, status, batch.id) else: update_batch_and_job_status(batch.job_id, BatchStatus.FAILED, batch.id) def write_embeddings_to_vector_db(chunks, vector_db_metadata, batch_id, job_id): # NOTE: the legacy code expects a list of tuples, (text_chunk, embedding) of form (str, list[float]) text_embeddings_list = [(chunk['text'], chunk['vector']) for chunk in chunks] job = safe_db_operation(job_service.get_job, job_id) source_filename = job.source_filename if vector_db_metadata.vector_db_type == VectorDBType.PINECONE: upsert_list = create_pinecone_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_pinecone(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.QDRANT: upsert_list = create_qdrant_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_qdrant(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.WEAVIATE: return write_embeddings_to_weaviate(text_embeddings_list, vector_db_metadata, batch_id, job_id, source_filename) elif vector_db_metadata.vector_db_type == VectorDBType.MILVUS: upsert_list = create_milvus_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_milvus(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.REDIS: upsert_list = create_redis_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_redis(upsert_list, vector_db_metadata) elif vector_db_metadata.vector_db_type == VectorDBType.LANCEDB: upsert_list = create_lancedb_source_chunks(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_lancedb(upsert_list, batch_id) elif vector_db_metadata.vector_db_type == VectorDBType.MONGODB: upsert_list = create_mongodb_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename) return write_embeddings_to_mongodb(upsert_list, vector_db_metadata) else: logging.error('Unsupported vector DB type: %s', vector_db_metadata.vector_db_type.value) def create_mongodb_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( {"_id": generate_uuid_from_tuple((job_id, batch_id, i)), "values": embedding, "source_text": source_text, "source_document": source_filename }) return upsert_list def write_embeddings_to_mongodb(upsert_list, vector_db_metadata): mongo_conn_uri = vector_db_metadata.environment mongo_password = quote_plus(os.getenv('VECTOR_DB_KEY')) mongo_conn_uri = mongo_conn_uri.replace("<password>", mongo_password) mongo_client = pymongo.MongoClient(mongo_conn_uri) db_name, collection = vector_db_metadata.index_name.split(".") db = mongo_client[db_name] try: db.command("ping") except Exception as e: logging.error(f"Error connecting to MongoDB via python client: {e}") return None if collection not in db.list_collection_names(): logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None index = db.get_collection(collection) logging.info(f"Starting MongoDB upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: upsert_batch = upsert_list[i:i+batch_size] upsert_response = index.insert_many(upsert_batch) vectors_uploaded += len(upsert_batch) except Exception as e: logging.error('Error writing embeddings to Mongo:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to MongoDB") return vectors_uploaded def create_pinecone_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( {"id": generate_uuid_from_tuple((job_id, batch_id, i)), "values": embedding, "metadata": {"source_text": source_text, "source_document": source_filename}}) return upsert_list def write_embeddings_to_pinecone(upsert_list, vector_db_metadata): pinecone_api_key = os.getenv('VECTOR_DB_KEY') pinecone.init(api_key=pinecone_api_key, environment=vector_db_metadata.environment) index = pinecone.GRPCIndex(vector_db_metadata.index_name) if not index: logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None logging.info(f"Starting pinecone upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: upsert_response = index.upsert(vectors=upsert_list[i:i+batch_size]) vectors_uploaded += upsert_response.upserted_count except Exception as e: logging.error('Error writing embeddings to pinecone:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to pinecone") return vectors_uploaded def create_redis_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): ids = [] source_texts = [] source_documents = [] embeddings = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): ids.append(generate_uuid_from_tuple((job_id, batch_id, i))) source_texts.append(source_text) embeddings.append(embedding) source_documents.append(source_filename) return [ids, source_texts, embeddings, source_documents] def write_embeddings_to_redis(upsert_list, vector_db_metadata): redis_client = redis.from_url(url=vector_db_metadata.environment, password=os.getenv('VECTOR_DB_KEY'), decode_responses=True) try: redis_client.ft(vector_db_metadata.index_name).info() except redis.exceptions.ResponseError as e: if "Unknown Index name" in str(e): logging.error(f"Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}") return None logging.info(f"Starting redis upsert for {len(upsert_list)} vectors") redis_pipeline = redis_client.pipeline() for i in range(0,len(upsert_list[0])): key = f'{vector_db_metadata.collection}:{upsert_list[0][i]}' obj = {"source_data": upsert_list[1][i], "embeddings": np.array(upsert_list[2][i]).tobytes(), "source_document": upsert_list[3][i]} redis_pipeline.hset(key, mapping=obj) res = redis_pipeline.execute() logging.info(f"Successfully uploaded {len(res)} vectors to redis") return len(res) def create_qdrant_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( PointStruct( id=generate_uuid_from_tuple((job_id, batch_id, i)), vector=embedding, payload={"source_text": source_text, "source_document": source_filename} ) ) return upsert_list def write_embeddings_to_qdrant(upsert_list, vector_db_metadata): qdrant_client = QdrantClient( url=vector_db_metadata.environment, api_key=os.getenv('VECTOR_DB_KEY'), grpc_port=6334, prefer_grpc=True, timeout=5 ) if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB') else QdrantClient(os.getenv('LOCAL_VECTOR_DB'), port=6333) index = qdrant_client.get_collection(collection_name=vector_db_metadata.index_name) if not index: logging.error(f"Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}") return None logging.info(f"Starting qdrant upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE for i in range(0, len(upsert_list), batch_size): try: qdrant_client.upsert( collection_name=vector_db_metadata.index_name, points=upsert_list[i:i+batch_size] ) except Exception as e: logging.error('Error writing embeddings to qdrant:', e) return None logging.info(f"Successfully uploaded {len(upsert_list)} vectors to qdrant") return len(upsert_list) def write_embeddings_to_weaviate(text_embeddings_list, vector_db_metadata, batch_id, job_id, source_filename): client = weaviate.Client( url=vector_db_metadata.environment, auth_client_secret=weaviate.AuthApiKey(api_key=os.getenv('VECTOR_DB_KEY')), ) if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB') else weaviate.Client(url=vector_db_metadata.environment) index = client.schema.get() class_list = [class_dict["class"] for class_dict in index["classes"]] if not index or not vector_db_metadata.index_name in class_list: logging.error(f"Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}") return None logging.info(f"Starting Weaviate upsert for {len(text_embeddings_list)} vectors") try: with client.batch(batch_size=config.PINECONE_BATCH_SIZE, dynamic=True, num_workers=2) as batch: for i, (text, vector) in enumerate(text_embeddings_list): properties = { "source_data": text, "vectoflow_id": generate_uuid_from_tuple((job_id, batch_id, i)), "source_document": source_filename } client.batch.add_data_object( properties, vector_db_metadata.index_name, vector=vector ) except Exception as e: logging.error('Error writing embeddings to weaviate: %s', e) return None logging.info(f"Successfully uploaded {len(text_embeddings_list)} vectors to Weaviate") return len(text_embeddings_list) def create_milvus_source_chunk_dict(text_embeddings_list, batch_id, job_id, source_filename): ids = [] source_texts = [] embeddings = [] source_filenames = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): ids.append(generate_uuid_from_tuple((job_id, batch_id, i))) source_texts.append(source_text) embeddings.append(embedding) source_filenames.append(source_filename) return [ids, source_texts, embeddings, source_filenames] def write_embeddings_to_milvus(upsert_list, vector_db_metadata): if vector_db_metadata.environment != os.getenv('LOCAL_VECTOR_DB'): connections.connect("default", uri = vector_db_metadata.environment, token = os.getenv('VECTOR_DB_KEY') ) else: connections.connect("default", host = vector_db_metadata.environment ) collection = Collection(vector_db_metadata.index_name) if not collection: logging.error(f"Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}") return None logging.info(f"Starting Milvus insert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: insert_response = collection.insert(upsert_list[i:i+batch_size]) vectors_uploaded += insert_response.insert_count except Exception as e: logging.error('Error writing embeddings to milvus: %s', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to milvus") return vectors_uploaded def create_lancedb_source_chunks(text_embeddings_list, batch_id, job_id, source_filename): upsert_list = [] for i, (source_text, embedding) in enumerate(text_embeddings_list): upsert_list.append( { "id": generate_uuid_from_tuple((job_id, batch_id, i)), "vector": embedding, "source_text": source_text, "source_document": source_filename } ) return upsert_list def write_embeddings_to_lancedb(upsert_list, batch_id): # right now only local connection, since its serverless and their cloud is in beta batch = safe_db_operation(batch_service.get_batch, batch_id) db = lancedb.connect(batch.vector_db_metadata.environment) try: table = db.open_table(batch.vector_db_metadata.index_name) except FileNotFoundError as e: logging.info(f"Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}.") if batch.embeddings_metadata.embeddings_type == EmbeddingsType.OPEN_AI: schema = pa.schema( [ pa.field("id", pa.string()), pa.field("vector", pa.list_(pa.float32(), 1536)), pa.field("source_text", pa.string()), pa.field("source_document", pa.string()), ]) table = db.create_table(batch.vector_db_metadata.index_name, schema=schema) logging.info(f"Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}.") else: logging.error(f"Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI") return None logging.info(f"Starting LanceDB upsert for {len(upsert_list)} vectors") batch_size = config.PINECONE_BATCH_SIZE vectors_uploaded = 0 for i in range(0,len(upsert_list), batch_size): try: table.add(data=upsert_list[i:i+batch_size]) vectors_uploaded += batch_size except Exception as e: logging.error('Error writing embeddings to lance db:', e) return None logging.info(f"Successfully uploaded {vectors_uploaded} vectors to lance db") return vectors_uploaded # TODO: refactor into utils def update_batch_and_job_status(job_id, batch_status, batch_id): try: if not job_id and batch_id: job = safe_db_operation(batch_service.get_batch, batch_id) job_id = job.job_id updated_batch_status = safe_db_operation(batch_service.update_batch_status, batch_id, batch_status) job = safe_db_operation(job_service.update_job_with_batch, job_id, updated_batch_status) if job.job_status == JobStatus.COMPLETED: logging.info(f"Job {job_id} completed successfully") elif job.job_status == JobStatus.PARTIALLY_COMPLETED: logging.info(f"Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded") except Exception as e: logging.error('Error updating job and batch status: %s', e) safe_db_operation(job_service.update_job_status, job_id, JobStatus.FAILED) def callback(ch, method, properties, body): # do these outside the try-catch so it can update the batch status if there's an error # if this parsing logic fails, the batch shouldn't be marked as failed data = json.loads(body) batch_id, chunks_with_embeddings, vector_db_key = data if vector_db_key: os.environ["VECTOR_DB_KEY"] = vector_db_key else: logging.info("No vector DB key provided") try: logging.info("Batch retrieved successfully") upload_batch(batch_id, chunks_with_embeddings) logging.info("Batch processed successfully") except Exception as e: logging.error('Error processing batch: %s', e) update_batch_and_job_status(None, batch_id, BatchStatus.FAILED) ch.basic_ack(delivery_tag=method.delivery_tag) def start_connection(max_retries=5, retry_delay=5): for attempt in range(max_retries): try: connection_params = create_connection_params() connection = pika.BlockingConnection(connection_params) channel = connection.channel() queue_name = os.getenv('VDB_UPLOAD_QUEUE') channel.queue_declare(queue=queue_name) channel.basic_consume(queue=queue_name, on_message_callback=callback) logging.info('Waiting for messages.') channel.start_consuming() return # If successful, exit the function except AMQPConnectionError as e: logging.error('AMQP Connection Error: %s', e) except Exception as e: logging.error('Unexpected error: %s', e) finally: if connection and not connection.is_closed: connection.close() logging.info('Retrying to connect in %s seconds (Attempt %s/%s)', retry_delay, attempt + 1, max_retries) time.sleep(retry_delay) if __name__ == "__main__": while True: try: start_connection() except Exception as e: logging.error('Error in start_connection: %s', e) logging.info('Restarting start_connection after encountering an error.') time.sleep(config.PIKA_RETRY_INTERVAL)
[ "lancedb.connect" ]
[((1090, 1155), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./vdb-log.txt"""', 'level': 'logging.INFO'}), "(filename='./vdb-log.txt', level=logging.INFO)\n", (1109, 1155), False, 'import logging\n'), ((1156, 1225), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./vdb-errors.txt"""', 'level': 'logging.ERROR'}), "(filename='./vdb-errors.txt', level=logging.ERROR)\n", (1175, 1225), False, 'import logging\n'), ((1291, 1343), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (1308, 1343), False, 'from services.database.database import get_db, safe_db_operation\n'), ((1570, 1622), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (1587, 1622), False, 'from services.database.database import get_db, safe_db_operation\n'), ((2324, 2370), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.get_job', 'job_id'], {}), '(job_service.get_job, job_id)\n', (2341, 2370), False, 'from services.database.database import get_db, safe_db_operation\n'), ((4945, 4980), 'pymongo.MongoClient', 'pymongo.MongoClient', (['mongo_conn_uri'], {}), '(mongo_conn_uri)\n', (4964, 4980), False, 'import pymongo\n'), ((6003, 6079), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to MongoDB"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to MongoDB')\n", (6015, 6079), False, 'import logging\n'), ((6634, 6660), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (6643, 6660), False, 'import os\n'), ((6665, 6753), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'vector_db_metadata.environment'}), '(api_key=pinecone_api_key, environment=vector_db_metadata.\n environment)\n', (6678, 6753), False, 'import pinecone\n'), ((6761, 6810), 'pinecone.GRPCIndex', 'pinecone.GRPCIndex', (['vector_db_metadata.index_name'], {}), '(vector_db_metadata.index_name)\n', (6779, 6810), False, 'import pinecone\n'), ((7471, 7548), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to pinecone"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to pinecone')\n", (7483, 7548), False, 'import logging\n'), ((13446, 13487), 'pymilvus.Collection', 'Collection', (['vector_db_metadata.index_name'], {}), '(vector_db_metadata.index_name)\n', (13456, 13487), False, 'from pymilvus import Collection, connections\n'), ((14146, 14221), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to milvus"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to milvus')\n", (14158, 14221), False, 'import logging\n'), ((14884, 14936), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (14901, 14936), False, 'from services.database.database import get_db, safe_db_operation\n'), ((14946, 14999), 'lancedb.connect', 'lancedb.connect', (['batch.vector_db_metadata.environment'], {}), '(batch.vector_db_metadata.environment)\n', (14961, 14999), False, 'import lancedb\n'), ((16477, 16554), 'logging.info', 'logging.info', (['f"""Successfully uploaded {vectors_uploaded} vectors to lance db"""'], {}), "(f'Successfully uploaded {vectors_uploaded} vectors to lance db')\n", (16489, 16554), False, 'import logging\n'), ((17756, 17772), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (17766, 17772), False, 'import json\n'), ((1401, 1492), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_retry_count', 'batch.id', '(batch.retries + 1)'], {}), '(batch_service.update_batch_retry_count, batch.id, batch.\n retries + 1)\n', (1418, 1492), False, 'from services.database.database import get_db, safe_db_operation\n'), ((1494, 1556), 'logging.info', 'logging.info', (['f"""Retrying vector db upload of batch {batch.id}"""'], {}), "(f'Retrying vector db upload of batch {batch.id}')\n", (1506, 1556), False, 'import logging\n'), ((1793, 1886), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_status_with_successful_minibatch', 'batch.id'], {}), '(batch_service.\n update_batch_status_with_successful_minibatch, batch.id)\n', (1810, 1886), False, 'from services.database.database import get_db, safe_db_operation\n'), ((4823, 4849), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (4832, 4849), False, 'import os\n'), ((5302, 5430), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (5315, 5430), False, 'import logging\n'), ((6837, 6965), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (6850, 6965), False, 'import logging\n'), ((10133, 10266), 'logging.error', 'logging.error', (['f"""Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}"""'], {}), "(\n f'Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}'\n )\n", (10146, 10266), False, 'import logging\n'), ((11215, 11266), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'vector_db_metadata.environment'}), '(url=vector_db_metadata.environment)\n', (11230, 11266), False, 'import weaviate\n'), ((11451, 11584), 'logging.error', 'logging.error', (['f"""Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}"""'], {}), "(\n f'Collection {vector_db_metadata.index_name} does not exist at cluster URL {vector_db_metadata.environment}'\n )\n", (11464, 11584), False, 'import logging\n'), ((13141, 13169), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (13150, 13169), False, 'import os\n'), ((13336, 13403), 'pymilvus.connections.connect', 'connections.connect', (['"""default"""'], {'host': 'vector_db_metadata.environment'}), "('default', host=vector_db_metadata.environment)\n", (13355, 13403), False, 'from pymilvus import Collection, connections\n'), ((13519, 13647), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist in environment {vector_db_metadata.environment}'\n )\n", (13532, 13647), False, 'import logging\n'), ((16856, 16932), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.update_batch_status', 'batch_id', 'batch_status'], {}), '(batch_service.update_batch_status, batch_id, batch_status)\n', (16873, 16932), False, 'from services.database.database import get_db, safe_db_operation\n'), ((16947, 17033), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_with_batch', 'job_id', 'updated_batch_status'], {}), '(job_service.update_job_with_batch, job_id,\n updated_batch_status)\n', (16964, 17033), False, 'from services.database.database import get_db, safe_db_operation\n'), ((17925, 17966), 'logging.info', 'logging.info', (['"""No vector DB key provided"""'], {}), "('No vector DB key provided')\n", (17937, 17966), False, 'import logging\n'), ((17989, 18033), 'logging.info', 'logging.info', (['"""Batch retrieved successfully"""'], {}), "('Batch retrieved successfully')\n", (18001, 18033), False, 'import logging\n'), ((18097, 18141), 'logging.info', 'logging.info', (['"""Batch processed successfully"""'], {}), "('Batch processed successfully')\n", (18109, 18141), False, 'import logging\n'), ((19271, 19379), 'logging.info', 'logging.info', (['"""Retrying to connect in %s seconds (Attempt %s/%s)"""', 'retry_delay', '(attempt + 1)', 'max_retries'], {}), "('Retrying to connect in %s seconds (Attempt %s/%s)',\n retry_delay, attempt + 1, max_retries)\n", (19283, 19379), False, 'import logging\n'), ((19384, 19407), 'time.sleep', 'time.sleep', (['retry_delay'], {}), '(retry_delay)\n', (19394, 19407), False, 'import time\n'), ((168, 193), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os\n'), ((5151, 5219), 'logging.error', 'logging.error', (['f"""Error connecting to MongoDB via python client: {e}"""'], {}), "(f'Error connecting to MongoDB via python client: {e}')\n", (5164, 5219), False, 'import logging\n'), ((7844, 7891), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (7868, 7891), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((8226, 8252), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (8235, 8252), False, 'import os\n'), ((9930, 9958), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (9939, 9958), False, 'import os\n'), ((9977, 10005), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (9986, 10005), False, 'import os\n'), ((11181, 11209), 'os.getenv', 'os.getenv', (['"""LOCAL_VECTOR_DB"""'], {}), "('LOCAL_VECTOR_DB')\n", (11190, 11209), False, 'import os\n'), ((12316, 12376), 'logging.error', 'logging.error', (['"""Error writing embeddings to weaviate: %s"""', 'e'], {}), "('Error writing embeddings to weaviate: %s', e)\n", (12329, 12376), False, 'import logging\n'), ((12797, 12844), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (12821, 12844), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((15119, 15259), 'logging.info', 'logging.info', (['f"""Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}."""'], {}), "(\n f'Table {batch.vector_db_metadata.index_name} does not exist in environment {batch.vector_db_metadata.environment}.'\n )\n", (15131, 15259), False, 'import logging\n'), ((16740, 16792), 'services.database.database.safe_db_operation', 'safe_db_operation', (['batch_service.get_batch', 'batch_id'], {}), '(batch_service.get_batch, batch_id)\n', (16757, 16792), False, 'from services.database.database import get_db, safe_db_operation\n'), ((17092, 17144), 'logging.info', 'logging.info', (['f"""Job {job_id} completed successfully"""'], {}), "(f'Job {job_id} completed successfully')\n", (17104, 17144), False, 'import logging\n'), ((17391, 17450), 'logging.error', 'logging.error', (['"""Error updating job and batch status: %s"""', 'e'], {}), "('Error updating job and batch status: %s', e)\n", (17404, 17450), False, 'import logging\n'), ((17459, 17533), 'services.database.database.safe_db_operation', 'safe_db_operation', (['job_service.update_job_status', 'job_id', 'JobStatus.FAILED'], {}), '(job_service.update_job_status, job_id, JobStatus.FAILED)\n', (17476, 17533), False, 'from services.database.database import get_db, safe_db_operation\n'), ((18177, 18223), 'logging.error', 'logging.error', (['"""Error processing batch: %s"""', 'e'], {}), "('Error processing batch: %s', e)\n", (18190, 18223), False, 'import logging\n'), ((18485, 18511), 'services.rabbitmq.rabbit_service.create_connection_params', 'create_connection_params', ([], {}), '()\n', (18509, 18511), False, 'from services.rabbitmq.rabbit_service import create_connection_params\n'), ((18537, 18579), 'pika.BlockingConnection', 'pika.BlockingConnection', (['connection_params'], {}), '(connection_params)\n', (18560, 18579), False, 'import pika\n'), ((18649, 18678), 'os.getenv', 'os.getenv', (['"""VDB_UPLOAD_QUEUE"""'], {}), "('VDB_UPLOAD_QUEUE')\n", (18658, 18678), False, 'import os\n'), ((18827, 18864), 'logging.info', 'logging.info', (['"""Waiting for messages."""'], {}), "('Waiting for messages.')\n", (18839, 18864), False, 'import logging\n'), ((4463, 4510), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (4487, 4510), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((5915, 5969), 'logging.error', 'logging.error', (['"""Error writing embeddings to Mongo:"""', 'e'], {}), "('Error writing embeddings to Mongo:', e)\n", (5928, 5969), False, 'import logging\n'), ((6345, 6392), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (6369, 6392), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((7380, 7437), 'logging.error', 'logging.error', (['"""Error writing embeddings to pinecone:"""', 'e'], {}), "('Error writing embeddings to pinecone:', e)\n", (7393, 7437), False, 'import logging\n'), ((8456, 8582), 'logging.error', 'logging.error', (['f"""Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}"""'], {}), "(\n f'Index {vector_db_metadata.index_name} does not exist at redis URL {vector_db_metadata.environment}'\n )\n", (8469, 8582), False, 'import logging\n'), ((9790, 9816), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (9799, 9816), False, 'import os\n'), ((10674, 10729), 'logging.error', 'logging.error', (['"""Error writing embeddings to qdrant:"""', 'e'], {}), "('Error writing embeddings to qdrant:', e)\n", (10687, 10729), False, 'import logging\n'), ((13281, 13307), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (13290, 13307), False, 'import os\n'), ((14054, 14112), 'logging.error', 'logging.error', (['"""Error writing embeddings to milvus: %s"""', 'e'], {}), "('Error writing embeddings to milvus: %s', e)\n", (14067, 14112), False, 'import logging\n'), ((14499, 14546), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (14523, 14546), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((15739, 15872), 'logging.info', 'logging.info', (['f"""Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}."""'], {}), "(\n f'Created table {batch.vector_db_metadata.index_name} in environment {batch.vector_db_metadata.environment}.'\n )\n", (15751, 15872), False, 'import logging\n'), ((15889, 16016), 'logging.error', 'logging.error', (['f"""Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI"""'], {}), "(\n f'Embeddings type {batch.embeddings_metadata.embeddings_type} not supported for LanceDB. Only Open AI'\n )\n", (15902, 16016), False, 'import logging\n'), ((16386, 16443), 'logging.error', 'logging.error', (['"""Error writing embeddings to lance db:"""', 'e'], {}), "('Error writing embeddings to lance db:', e)\n", (16399, 16443), False, 'import logging\n'), ((17219, 17348), 'logging.info', 'logging.info', (['f"""Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded"""'], {}), "(\n f'Job {job_id} partially completed. {job.batches_succeeded} out of {job.total_batches} batches succeeded'\n )\n", (17231, 17348), False, 'import logging\n'), ((19024, 19069), 'logging.error', 'logging.error', (['"""AMQP Connection Error: %s"""', 'e'], {}), "('AMQP Connection Error: %s', e)\n", (19037, 19069), False, 'import logging\n'), ((19113, 19153), 'logging.error', 'logging.error', (['"""Unexpected error: %s"""', 'e'], {}), "('Unexpected error: %s', e)\n", (19126, 19153), False, 'import logging\n'), ((19539, 19588), 'logging.error', 'logging.error', (['"""Error in start_connection: %s"""', 'e'], {}), "('Error in start_connection: %s', e)\n", (19552, 19588), False, 'import logging\n'), ((19601, 19673), 'logging.info', 'logging.info', (['"""Restarting start_connection after encountering an error."""'], {}), "('Restarting start_connection after encountering an error.')\n", (19613, 19673), False, 'import logging\n'), ((19686, 19724), 'time.sleep', 'time.sleep', (['config.PIKA_RETRY_INTERVAL'], {}), '(config.PIKA_RETRY_INTERVAL)\n', (19696, 19724), False, 'import time\n'), ((8898, 8925), 'numpy.array', 'np.array', (['upsert_list[2][i]'], {}), '(upsert_list[2][i])\n', (8906, 8925), True, 'import numpy as np\n'), ((9410, 9457), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (9434, 9457), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((11977, 12024), 'shared.utils.generate_uuid_from_tuple', 'generate_uuid_from_tuple', (['(job_id, batch_id, i)'], {}), '((job_id, batch_id, i))\n', (12001, 12024), False, 'from shared.utils import generate_uuid_from_tuple\n'), ((11109, 11135), 'os.getenv', 'os.getenv', (['"""VECTOR_DB_KEY"""'], {}), "('VECTOR_DB_KEY')\n", (11118, 11135), False, 'import os\n'), ((15416, 15427), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15425, 15427), True, 'import pyarrow as pa\n'), ((15544, 15555), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15553, 15555), True, 'import pyarrow as pa\n'), ((15606, 15617), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (15615, 15617), True, 'import pyarrow as pa\n'), ((4137, 4230), 'logging.error', 'logging.error', (['"""Unsupported vector DB type: %s"""', 'vector_db_metadata.vector_db_type.value'], {}), "('Unsupported vector DB type: %s', vector_db_metadata.\n vector_db_type.value)\n", (4150, 4230), False, 'import logging\n'), ((15478, 15490), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (15488, 15490), True, 'import pyarrow as pa\n')]
import streamlit as st import sqlite3 import streamlit_antd_components as sac import pandas as pd import os from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB import lancedb import pickle import configparser import ast class ConfigHandler: def __init__(self): self.config = configparser.ConfigParser() self.config.read('config.ini') def get_config_values(self, section, key): value = self.config.get(section, key) try: # Try converting the string value to a Python data structure return ast.literal_eval(value) except (SyntaxError, ValueError): # If not a data structure, return the plain string return value config_handler = ConfigHandler() PM = config_handler.get_config_values('constants', 'PM') USERS = config_handler.get_config_values('constants', 'USERS') SA = config_handler.get_config_values('constants', 'SA') AD = config_handler.get_config_values('constants', 'AD') # Create or check for the 'database' directory in the current working directory cwd = os.getcwd() WORKING_DIRECTORY = os.path.join(cwd, "database") if not os.path.exists(WORKING_DIRECTORY): os.makedirs(WORKING_DIRECTORY) if st.secrets["sql_ext_path"] == "None": WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"]) else: WORKING_DATABASE= st.secrets["sql_ext_path"] def fetch_vectorstores_with_usernames(): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() query = ''' SELECT Vector_Stores.vs_id, Subject.subject_name, Topic.topic_name, Vector_Stores.vectorstore_name, Users.username, Vector_Stores.sharing_enabled FROM Vector_Stores JOIN Users ON Vector_Stores.user_id = Users.user_id LEFT JOIN Subject ON Vector_Stores.subject = Subject.id LEFT JOIN Topic ON Vector_Stores.topic = Topic.id; ''' cursor.execute(query) data = cursor.fetchall() conn.close() return data def display_vectorstores(): data = fetch_vectorstores_with_usernames() df = pd.DataFrame(data, columns=["KB ID", "Function Name", "Process Name", "KB Name", "Username", "sharing_enabled"]) # Convert the 'sharing_enabled' values df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '') st.dataframe( df, use_container_width=True, column_order=["KB ID", "Function Name", "Process Name", "KB Name", "Username", "sharing_enabled"] ) def fetch_all_files(): """ Fetch all files either shared or based on user type """ conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Construct the SQL query with JOINs for Subject, Topic, and Users tables if st.session_state.user['profile_id'] == 'SA': cursor.execute(''' SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username FROM Files JOIN Subject ON Files.subject = Subject.id JOIN Topic ON Files.topic = Topic.id JOIN Users ON Files.user_id = Users.user_id ''') else: cursor.execute(''' SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username FROM Files JOIN Subject ON Files.subject = Subject.id JOIN Topic ON Files.topic = Topic.id JOIN Users ON Files.user_id = Users.user_id WHERE Files.sharing_enabled = 1 ''') files = cursor.fetchall() formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files] conn.close() return formatted_files def fetch_file_data(file_id): """ Fetch file data given a file id """ conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,)) data = cursor.fetchone() conn.close() if data: return data[0], data[1] else: return None, None def fetch_subjects_by_org(org_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Check if the user is a super_admin (org_id is 0) if org_id == 0: cursor.execute('SELECT * FROM Subject;') else: cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,)) subjects = cursor.fetchall() conn.close() return subjects def fetch_topics_by_org(org_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Check if the user is a super_admin (org_id is 0) if org_id == 0: cursor.execute('SELECT * FROM Topic;') else: cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,)) topics = cursor.fetchall() conn.close() return topics def split_docs(file_path,meta): #def split_meta_docs(file, source, tch_code): loader = UnstructuredFileLoader(file_path) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) metadata = {"source": meta} for doc in docs: doc.metadata.update(metadata) return docs def create_lancedb_table(embeddings, meta, table_name): lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") # LanceDB connection db = lancedb.connect(lancedb_path) table = db.create_table( f"{table_name}", data=[ { "vector": embeddings.embed_query("Query Unsuccessful"), "text": "Query Unsuccessful", "id": "1", "source": f"{meta}" } ], mode="overwrite", ) return table def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Fetch the user's details cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,)) user_details = cursor.fetchone() if not user_details: st.error("Error: User not found.") return user_id = user_details[0] # If Vector_Store instance exists in session state, then serialize and save if vs: serialized_db = pickle.dumps(vs) # Check if the entry already exists cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id)) exists = cursor.fetchone() if exists: st.error("Error: An entry with the same vectorstore_name and user_id already exists.") return if subject is None: st.error("Error: Function is missing.") return if topic is None: st.error("Error: Process is missing.") return # Get the subject and topic IDs cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,)) subject_id = cursor.fetchone()[0] cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,)) topic_id = cursor.fetchone()[0] # Insert the new row cursor.execute(''' INSERT INTO Vector_Stores (vectorstore_name, data, user_id, subject, topic, sharing_enabled) VALUES (?, ?, ?, ?, ?, ?) ''', (vstore_input_name, serialized_db, user_id, subject_id, topic_id, share_resource)) conn.commit() conn.close() def create_vectorstore(): full_docs = [] st.subheader("Enter the process and functions for your knowledge base") embeddings = OpenAIEmbeddings() # Fetch all available subjects subjects = fetch_subjects_by_org(st.session_state.user["org_id"]) subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name selected_subject = st.selectbox("Select an existing function or type a new one:", options=subject_names + ['New Function']) if selected_subject == 'New Function': subject = st.text_input("Please enter the new function name:", max_chars=30) else: subject = selected_subject # Fetch all available topics topics = fetch_topics_by_org(st.session_state.user["org_id"]) topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name selected_topic = st.selectbox("Select an existing process or type a new one:", options=topic_names + ['New Process']) if selected_topic == 'New Process': topic = st.text_input("Please enter the new process name:", max_chars=30) else: topic = selected_topic vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20) vs_name = vectorstore_input + f"_({st.session_state.user['username']})" share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line # Show the current build of files for the latest database st.subheader("Select one or more files to build your knowledge base") files = fetch_all_files() if files: selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False) # Alert to confirm the creation of knowledge base st.warning("Building your knowledge base will take some time. Please be patient.") build = sac.buttons([ dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'), dict(label='Cancel', icon='x-circle-fill', color='red'), ], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False) if build == 'Build VectorStore' and selected_files: for s_file in selected_files: file_id = int(s_file.split("(", 1)[1].split(")", 1)[0]) file_data, meta = fetch_file_data(file_id) docs = split_docs(file_data, meta) full_docs.extend(docs) db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name)) save_to_vectorstores(db, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function st.success("Knowledge Base loaded") else: st.write("No files found in the database.") def delete_lancedb_table(table_name): lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb") # LanceDB connection db = lancedb.connect(lancedb_path) db.drop_table(f"{table_name}") def fetch_vectorstores_by_user_id(user_id): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() # Fetch vectorstores based on user_id cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,)) vectorstores = cursor.fetchall() conn.close() return vectorstores def delete_vectorstores(): st.subheader("Delete Knowledge Base in Database:") user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"]) if user_vectorstores: vectorstore_names = [vs[0] for vs in user_vectorstores] selected_vectorstores = st.multiselect("Select knowledge bases to delete:", options=vectorstore_names) confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False) if st.button("Delete VectorStore"): if confirm_delete and selected_vectorstores: delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"]) st.success(f"Deleted {len(selected_vectorstores)} vectorstores.") else: st.warning("Please confirm the deletion action.") else: st.write("No knowledge base found in the database.") def delete_vectorstores_from_db(vectorstore_names, user_id, profile): conn = sqlite3.connect(WORKING_DATABASE) cursor = conn.cursor() for vectorstore_name in vectorstore_names: if profile in ['SA', 'AD']: # Delete the corresponding LanceDB table delete_lancedb_table(vectorstore_name) # Delete vectorstore irrespective of the user_id associated with them cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,)) else: # Delete the corresponding LanceDB table delete_lancedb_table(vectorstore_name) # Delete only if the user_id matches cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id)) # Check if the row was affected if cursor.rowcount == 0: st.error(f"Unable to delete knowledge base '{vectorstore_name}' that is not owned by you.") conn.commit() # Commit the changes conn.close() # Close the connection
[ "lancedb.connect" ]
[((1234, 1245), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1243, 1245), False, 'import os\n'), ((1266, 1295), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1278, 1295), False, 'import os\n'), ((1304, 1337), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1318, 1337), False, 'import os\n'), ((1340, 1370), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1351, 1370), False, 'import os\n'), ((1432, 1489), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', "st.secrets['default_db']"], {}), "(WORKING_DIRECTORY, st.secrets['default_db'])\n", (1444, 1489), False, 'import os\n'), ((1596, 1629), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (1611, 1629), False, 'import sqlite3\n'), ((2261, 2377), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['KB ID', 'Function Name', 'Process Name', 'KB Name', 'Username',\n 'sharing_enabled']"}), "(data, columns=['KB ID', 'Function Name', 'Process Name',\n 'KB Name', 'Username', 'sharing_enabled'])\n", (2273, 2377), True, 'import pandas as pd\n'), ((2512, 2657), 'streamlit.dataframe', 'st.dataframe', (['df'], {'use_container_width': '(True)', 'column_order': "['KB ID', 'Function Name', 'Process Name', 'KB Name', 'Username',\n 'sharing_enabled']"}), "(df, use_container_width=True, column_order=['KB ID',\n 'Function Name', 'Process Name', 'KB Name', 'Username', 'sharing_enabled'])\n", (2524, 2657), True, 'import streamlit as st\n'), ((2792, 2825), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (2807, 2825), False, 'import sqlite3\n'), ((3967, 4000), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (3982, 4000), False, 'import sqlite3\n'), ((4293, 4326), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4308, 4326), False, 'import sqlite3\n'), ((4687, 4720), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (4702, 4720), False, 'import sqlite3\n'), ((5116, 5149), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {}), '(file_path)\n', (5138, 5149), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((5194, 5249), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (5215, 5249), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((5464, 5506), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (5476, 5506), False, 'import os\n'), ((5535, 5564), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (5550, 5564), False, 'import lancedb\n'), ((5914, 5947), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (5929, 5947), False, 'import sqlite3\n'), ((7603, 7674), 'streamlit.subheader', 'st.subheader', (['"""Enter the process and functions for your knowledge base"""'], {}), "('Enter the process and functions for your knowledge base')\n", (7615, 7674), True, 'import streamlit as st\n'), ((7692, 7710), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (7708, 7710), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((7932, 8041), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing function or type a new one:"""'], {'options': "(subject_names + ['New Function'])"}), "('Select an existing function or type a new one:', options=\n subject_names + ['New Function'])\n", (7944, 8041), True, 'import streamlit as st\n'), ((8426, 8531), 'streamlit.selectbox', 'st.selectbox', (['"""Select an existing process or type a new one:"""'], {'options': "(topic_names + ['New Process'])"}), "('Select an existing process or type a new one:', options=\n topic_names + ['New Process'])\n", (8438, 8531), True, 'import streamlit as st\n'), ((8724, 8801), 'streamlit.text_input', 'st.text_input', (['"""Please type in a name for your knowledge base:"""'], {'max_chars': '(20)'}), "('Please type in a name for your knowledge base:', max_chars=20)\n", (8737, 8801), True, 'import streamlit as st\n'), ((8899, 8945), 'streamlit.checkbox', 'st.checkbox', (['"""Share this resource"""'], {'value': '(True)'}), "('Share this resource', value=True)\n", (8910, 8945), True, 'import streamlit as st\n'), ((9036, 9105), 'streamlit.subheader', 'st.subheader', (['"""Select one or more files to build your knowledge base"""'], {}), "('Select one or more files to build your knowledge base')\n", (9048, 9105), True, 'import streamlit as st\n'), ((10774, 10816), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""lancedb"""'], {}), "(WORKING_DIRECTORY, 'lancedb')\n", (10786, 10816), False, 'import os\n'), ((10845, 10874), 'lancedb.connect', 'lancedb.connect', (['lancedb_path'], {}), '(lancedb_path)\n', (10860, 10874), False, 'import lancedb\n'), ((10963, 10996), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (10978, 10996), False, 'import sqlite3\n'), ((11282, 11332), 'streamlit.subheader', 'st.subheader', (['"""Delete Knowledge Base in Database:"""'], {}), "('Delete Knowledge Base in Database:')\n", (11294, 11332), True, 'import streamlit as st\n'), ((12285, 12318), 'sqlite3.connect', 'sqlite3.connect', (['WORKING_DATABASE'], {}), '(WORKING_DATABASE)\n', (12300, 12318), False, 'import sqlite3\n'), ((456, 483), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (481, 483), False, 'import configparser\n'), ((6158, 6192), 'streamlit.error', 'st.error', (['"""Error: User not found."""'], {}), "('Error: User not found.')\n", (6166, 6192), True, 'import streamlit as st\n'), ((6355, 6371), 'pickle.dumps', 'pickle.dumps', (['vs'], {}), '(vs)\n', (6367, 6371), False, 'import pickle\n'), ((8103, 8169), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new function name:"""'], {'max_chars': '(30)'}), "('Please enter the new function name:', max_chars=30)\n", (8116, 8169), True, 'import streamlit as st\n'), ((8588, 8653), 'streamlit.text_input', 'st.text_input', (['"""Please enter the new process name:"""'], {'max_chars': '(30)'}), "('Please enter the new process name:', max_chars=30)\n", (8601, 8653), True, 'import streamlit as st\n'), ((9175, 9429), 'streamlit_antd_components.transfer', 'sac.transfer', ([], {'items': 'files', 'label': 'None', 'index': 'None', 'titles': "['Uploaded files', 'Select files for KB']", 'format_func': '"""title"""', 'width': '"""100%"""', 'height': 'None', 'search': '(True)', 'pagination': '(False)', 'oneway': '(False)', 'reload': '(True)', 'disabled': '(False)', 'return_index': '(False)'}), "(items=files, label=None, index=None, titles=['Uploaded files',\n 'Select files for KB'], format_func='title', width='100%', height=None,\n search=True, pagination=False, oneway=False, reload=True, disabled=\n False, return_index=False)\n", (9187, 9429), True, 'import streamlit_antd_components as sac\n'), ((9492, 9579), 'streamlit.warning', 'st.warning', (['"""Building your knowledge base will take some time. Please be patient."""'], {}), "(\n 'Building your knowledge base will take some time. Please be patient.')\n", (9502, 9579), True, 'import streamlit as st\n'), ((10674, 10717), 'streamlit.write', 'st.write', (['"""No files found in the database."""'], {}), "('No files found in the database.')\n", (10682, 10717), True, 'import streamlit as st\n'), ((11543, 11621), 'streamlit.multiselect', 'st.multiselect', (['"""Select knowledge bases to delete:"""'], {'options': 'vectorstore_names'}), "('Select knowledge bases to delete:', options=vectorstore_names)\n", (11557, 11621), True, 'import streamlit as st\n'), ((11647, 11722), 'streamlit.checkbox', 'st.checkbox', (['"""I understand that this action cannot be undone."""'], {'value': '(False)'}), "('I understand that this action cannot be undone.', value=False)\n", (11658, 11722), True, 'import streamlit as st\n'), ((11743, 11774), 'streamlit.button', 'st.button', (['"""Delete VectorStore"""'], {}), "('Delete VectorStore')\n", (11752, 11774), True, 'import streamlit as st\n'), ((12150, 12202), 'streamlit.write', 'st.write', (['"""No knowledge base found in the database."""'], {}), "('No knowledge base found in the database.')\n", (12158, 12202), True, 'import streamlit as st\n'), ((722, 745), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (738, 745), False, 'import ast\n'), ((6621, 6717), 'streamlit.error', 'st.error', (['"""Error: An entry with the same vectorstore_name and user_id already exists."""'], {}), "(\n 'Error: An entry with the same vectorstore_name and user_id already exists.'\n )\n", (6629, 6717), True, 'import streamlit as st\n'), ((6776, 6815), 'streamlit.error', 'st.error', (['"""Error: Function is missing."""'], {}), "('Error: Function is missing.')\n", (6784, 6815), True, 'import streamlit as st\n'), ((6874, 6912), 'streamlit.error', 'st.error', (['"""Error: Process is missing."""'], {}), "('Error: Process is missing.')\n", (6882, 6912), True, 'import streamlit as st\n'), ((10619, 10654), 'streamlit.success', 'st.success', (['"""Knowledge Base loaded"""'], {}), "('Knowledge Base loaded')\n", (10629, 10654), True, 'import streamlit as st\n'), ((10367, 10385), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (10383, 10385), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((12082, 12131), 'streamlit.warning', 'st.warning', (['"""Please confirm the deletion action."""'], {}), "('Please confirm the deletion action.')\n", (12092, 12131), True, 'import streamlit as st\n'), ((13147, 13248), 'streamlit.error', 'st.error', (['f"""Unable to delete knowledge base \'{vectorstore_name}\' that is not owned by you."""'], {}), '(\n f"Unable to delete knowledge base \'{vectorstore_name}\' that is not owned by you."\n )\n', (13155, 13248), True, 'import streamlit as st\n')]
# retrivel from PDF import os import sys from pydantic import BaseModel, Field from langchain.chat_models import AzureChatOpenAI from langchain.embeddings import AzureOpenAIEmbeddings from langchain.chains import LLMChain, HypotheticalDocumentEmbedder from langchain.prompts import PromptTemplate from langchain.embeddings import HuggingFaceEmbeddings from openai import AzureOpenAI # Get the directory of the current script current_script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the parent directory (applied_rag) by navigating up one level from the current script parent_dir = os.path.dirname(current_script_dir) # Add the parent directory to sys.path sys.path.append(parent_dir) import settings from langchain.vectorstores import LanceDB import lancedb from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import PyPDFLoader # Configuration data class class AppConfig(BaseModel): openai_api_key: str openai_api_base: str openai_api_type: str deployment_name: str model_name: str openai_api_version: str # Load configuration from settings config = AppConfig( openai_api_key=settings.openai_api_key, openai_api_base=settings.openai_api_base, openai_api_type=settings.openai_api_type, deployment_name=settings.deployment_name, model_name=settings.model_name, openai_api_version=settings.openai_api_version ) # Initialize Azure LLM llm = AzureChatOpenAI( deployment_name=config.deployment_name, openai_api_key=config.openai_api_key, openai_api_base=config.openai_api_base, openai_api_type=config.openai_api_type, openai_api_version=config.openai_api_version, model_name=config.model_name, temperature=0.0 ) emebeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") embeddings = HypotheticalDocumentEmbedder.from_llm(llm, emebeddings, "web_search") prompt_template = """ As a knowledgeable and helpful research assistant, your task is to provide informative answers based on the given context. Use your extensive knowledge base to offer clear, concise, and accurate responses to the user's inquiries. if quetion is not related to documents simply say you dont know Question: {question} Answer: """ prompt = PromptTemplate(input_variables=["question"], template=prompt_template) llm_chain = LLMChain(llm=llm, prompt=prompt) embeddings = HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings ) # Loading data from pdf pdf_folder_path = '/workspaces/rag/02_Hyde/pamphlet.pdf' loader = PyPDFLoader(pdf_folder_path) docs = loader.load_and_split() text_splitter = RecursiveCharacterTextSplitter( chunk_size=128, chunk_overlap=50, ) documents = text_splitter.split_documents(docs) # lancedb as vectorstore db = lancedb.connect('/tmp/lancedb') table = db.create_table("documentsai", data=[ {"vector": embeddings.embed_query("アジャイル"), "text": "アジャイル", "id": "1"} ], mode="overwrite") vector_store = LanceDB.from_documents(documents, embeddings, connection=table) # passing in the string query to get some refrence # query = "which factors appear to be the major nutritional limitations of fast-food meals" query = "Ridgelinezアジャイルサービスとその優位性を説明してください。" # result = vector_store.similarity_search(query) # print(result) answer = llm_chain.run(query) print(answer)
[ "lancedb.connect" ]
[((596, 631), 'os.path.dirname', 'os.path.dirname', (['current_script_dir'], {}), '(current_script_dir)\n', (611, 631), False, 'import os\n'), ((672, 699), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (687, 699), False, 'import sys\n'), ((1453, 1733), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': 'config.deployment_name', 'openai_api_key': 'config.openai_api_key', 'openai_api_base': 'config.openai_api_base', 'openai_api_type': 'config.openai_api_type', 'openai_api_version': 'config.openai_api_version', 'model_name': 'config.model_name', 'temperature': '(0.0)'}), '(deployment_name=config.deployment_name, openai_api_key=\n config.openai_api_key, openai_api_base=config.openai_api_base,\n openai_api_type=config.openai_api_type, openai_api_version=config.\n openai_api_version, model_name=config.model_name, temperature=0.0)\n', (1468, 1733), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((1765, 1817), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1786, 1817), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1832, 1901), 'langchain.chains.HypotheticalDocumentEmbedder.from_llm', 'HypotheticalDocumentEmbedder.from_llm', (['llm', 'emebeddings', '"""web_search"""'], {}), "(llm, emebeddings, 'web_search')\n", (1869, 1901), False, 'from langchain.chains import LLMChain, HypotheticalDocumentEmbedder\n'), ((2264, 2334), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': 'prompt_template'}), "(input_variables=['question'], template=prompt_template)\n", (2278, 2334), False, 'from langchain.prompts import PromptTemplate\n'), ((2348, 2380), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2356, 2380), False, 'from langchain.chains import LLMChain, HypotheticalDocumentEmbedder\n'), ((2394, 2471), 'langchain.chains.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=embeddings)\n', (2422, 2471), False, 'from langchain.chains import LLMChain, HypotheticalDocumentEmbedder\n'), ((2574, 2602), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['pdf_folder_path'], {}), '(pdf_folder_path)\n', (2585, 2602), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((2651, 2715), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(128)', 'chunk_overlap': '(50)'}), '(chunk_size=128, chunk_overlap=50)\n', (2681, 2715), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2808, 2839), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2823, 2839), False, 'import lancedb\n'), ((2998, 3061), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (3020, 3061), False, 'from langchain.vectorstores import LanceDB\n'), ((463, 488), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (478, 488), False, 'import os\n')]
from langchain.document_loaders import TextLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS from langchain.document_loaders import UnstructuredMarkdownLoader from langchain.document_loaders import DirectoryLoader from langchain.chat_models.fireworks import ChatFireworks from langchain.vectorstores import LanceDB from langchain.chat_models import ChatOpenAI from langchain.callbacks import StdOutCallbackHandler import lancedb handler = StdOutCallbackHandler() from langchain.agents.agent_toolkits import ( create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo, ) # llm = ChatFireworks(model="accounts/fireworks/models/mistral-7b", temperature=0) llm = ChatOpenAI(temperature=0) db = lancedb.connect(".lance-data") table = db.open_table("journal") db = LanceDB(table, OpenAIEmbeddings()) vectorstore_info = VectorStoreInfo( name="flancian's journal", description="collection of markdown files containing flancian's daily journal", vectorstore=db, ) toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info) def run_engine(prompt): agent_executor = create_vectorstore_agent( llm=llm, toolkit=toolkit, verbose=True, prefix="always use sources" ) resp = agent_executor.run( input=prompt, ) return resp
[ "lancedb.connect" ]
[((554, 577), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (575, 577), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((793, 818), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (803, 818), False, 'from langchain.chat_models import ChatOpenAI\n'), ((824, 854), 'lancedb.connect', 'lancedb.connect', (['""".lance-data"""'], {}), "('.lance-data')\n", (839, 854), False, 'import lancedb\n'), ((949, 1096), 'langchain.agents.agent_toolkits.VectorStoreInfo', 'VectorStoreInfo', ([], {'name': '"""flancian\'s journal"""', 'description': '"""collection of markdown files containing flancian\'s daily journal"""', 'vectorstore': 'db'}), '(name="flancian\'s journal", description=\n "collection of markdown files containing flancian\'s daily journal",\n vectorstore=db)\n', (964, 1096), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((1114, 1167), 'langchain.agents.agent_toolkits.VectorStoreToolkit', 'VectorStoreToolkit', ([], {'vectorstore_info': 'vectorstore_info'}), '(vectorstore_info=vectorstore_info)\n', (1132, 1167), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((908, 926), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (924, 926), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1215, 1313), 'langchain.agents.agent_toolkits.create_vectorstore_agent', 'create_vectorstore_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit', 'verbose': '(True)', 'prefix': '"""always use sources"""'}), "(llm=llm, toolkit=toolkit, verbose=True, prefix=\n 'always use sources')\n", (1239, 1313), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.data_structs.node import DocumentRelationship, Node from llama_index.vector_stores.types import ( NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb # noqa: F401 except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return None def add( self, embedding_results: List[NodeWithEmbedding], ) -> List[str]: data = [] ids = [] for result in embedding_results: data.append( { "id": result.id, "doc_id": result.ref_doc_id, "vector": result.embedding, "text": result.node.get_text(), } ) ids.append(result.id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ raise NotImplementedError("Delete not yet implemented for LanceDB.") def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: raise ValueError("Metadata filters not implemented for LanceDB yet.") table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = Node( doc_id=item.id, text=item.text, relationships={ DocumentRelationship.SOURCE: item.doc_id, }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=results["score"].tolist(), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((1711, 1731), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1726, 1731), False, 'import lancedb\n'), ((3712, 3811), 'llama_index.data_structs.node.Node', 'Node', ([], {'doc_id': 'item.id', 'text': 'item.text', 'relationships': '{DocumentRelationship.SOURCE: item.doc_id}'}), '(doc_id=item.id, text=item.text, relationships={DocumentRelationship.\n SOURCE: item.doc_id})\n', (3716, 3811), False, 'from llama_index.data_structs.node import DocumentRelationship, Node\n')]
import streamlit as st import docx2txt import fitz import os from langchain.text_splitter import CharacterTextSplitter from langchain_openai import OpenAIEmbeddings from dotenv import load_dotenv from pathlib import Path from langchain_community.vectorstores import LanceDB import lancedb import os load_dotenv() env_path = Path('.')/'.env' load_dotenv(dotenv_path=env_path) OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") def connect_table(database): try: if database == 'lancedb': db = lancedb.connect("~/.lancedb") return db except Exception as e: return st.error(f"Error during LanceDB operations: {e}") def create_table(db): table = db.create_table( "recipes", data=[ { "vector": OpenAIEmbeddings().embed_query("Recipes"), "text": "Recipes", } ], mode="overwrite", ) return table def store_embeddings(content, document_title): try: db = connect_table("lancedb") table = create_table(db) documents = character_text_splitter(content, document_title) docsearch = LanceDB.from_documents(documents, OpenAIEmbeddings(), connection=table) if "sharedsearch" not in st.session_state: st.session_state["sharedsearch"] = docsearch query = "Get METHOD for Aloo Palak" # ->>>> similarity_search docs = docsearch.similarity_search(query, k=1, score=True) st.write(docs) # ->>>> similarity_search_by_vector # embedding_vector = OpenAIEmbeddings().embed_query(query) # docs = docsearch.max_marginal_relevance_search(embedding_vector, k=2, fetch_k=10) # found_docs = docs.amax_marginal_relevance_search(query, k=2, fetch_k=10) # for i, doc in enumerate(found_docs): # print(f"{i + 1}.", doc.page_content, "\n") return docs except FileNotFoundError as e: st.error(f"Error while Storing Embeddings: {e}") def read_docx(file_path): text = docx2txt.process(file_path) return text def read_pdf(file_path): if not os.path.exists(file_path): raise FileNotFoundError(f"PDF file not found at: {file_path}") pdf_document = fitz.open(file_path) text = "" for page_num in range(len(pdf_document)): page = pdf_document[page_num] text += page.get_text() return text def read_file(file): content = "" if file.type == "application/pdf": try: content = read_pdf(file.name) except FileNotFoundError as e: st.error(f"Error reading PDF file: {e}") elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": content = read_docx(file) formatted_text = content.upper() return formatted_text def character_text_splitter(content, document_title): text_splitter = CharacterTextSplitter( separator="\n\n\t\t", chunk_size=1000, chunk_overlap=200, length_function=len, is_separator_regex=False, keep_separator=True ) metadatas = [{"title": document_title}] documents = text_splitter.create_documents([content], metadatas=metadatas) return documents def main(): st.markdown("# File Upload") st.sidebar.markdown("# File Upload") document_title = st.text_input("Enter the document title:") uploaded_file = st.file_uploader("Upload a document (doc, pdf) or provide a link", type=["docx", "pdf"]) if uploaded_file is not None: try: content = read_file(uploaded_file) documents = store_embeddings(content, document_title) st.write("### Content:") st.write(documents) except FileNotFoundError as e: st.error(f"Error reading content from the uploaded file: {e}") if __name__ == "__main__": main()
[ "lancedb.connect" ]
[((302, 315), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (313, 315), False, 'from dotenv import load_dotenv\n'), ((344, 377), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path'}), '(dotenv_path=env_path)\n', (355, 377), False, 'from dotenv import load_dotenv\n'), ((396, 423), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (405, 423), False, 'import os\n'), ((327, 336), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (331, 336), False, 'from pathlib import Path\n'), ((2044, 2071), 'docx2txt.process', 'docx2txt.process', (['file_path'], {}), '(file_path)\n', (2060, 2071), False, 'import docx2txt\n'), ((2243, 2263), 'fitz.open', 'fitz.open', (['file_path'], {}), '(file_path)\n', (2252, 2263), False, 'import fitz\n'), ((2910, 3062), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n\n\t\t"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len', 'is_separator_regex': '(False)', 'keep_separator': '(True)'}), "(separator='\\n\\n\\t\\t', chunk_size=1000, chunk_overlap=\n 200, length_function=len, is_separator_regex=False, keep_separator=True)\n", (2931, 3062), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3275, 3303), 'streamlit.markdown', 'st.markdown', (['"""# File Upload"""'], {}), "('# File Upload')\n", (3286, 3303), True, 'import streamlit as st\n'), ((3308, 3344), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""# File Upload"""'], {}), "('# File Upload')\n", (3327, 3344), True, 'import streamlit as st\n'), ((3367, 3409), 'streamlit.text_input', 'st.text_input', (['"""Enter the document title:"""'], {}), "('Enter the document title:')\n", (3380, 3409), True, 'import streamlit as st\n'), ((3430, 3523), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload a document (doc, pdf) or provide a link"""'], {'type': "['docx', 'pdf']"}), "('Upload a document (doc, pdf) or provide a link', type=[\n 'docx', 'pdf'])\n", (3446, 3523), True, 'import streamlit as st\n'), ((1487, 1501), 'streamlit.write', 'st.write', (['docs'], {}), '(docs)\n', (1495, 1501), True, 'import streamlit as st\n'), ((2125, 2150), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (2139, 2150), False, 'import os\n'), ((514, 543), 'lancedb.connect', 'lancedb.connect', (['"""~/.lancedb"""'], {}), "('~/.lancedb')\n", (529, 543), False, 'import lancedb\n'), ((604, 653), 'streamlit.error', 'st.error', (['f"""Error during LanceDB operations: {e}"""'], {}), "(f'Error during LanceDB operations: {e}')\n", (612, 653), True, 'import streamlit as st\n'), ((1185, 1203), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1201, 1203), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((1957, 2005), 'streamlit.error', 'st.error', (['f"""Error while Storing Embeddings: {e}"""'], {}), "(f'Error while Storing Embeddings: {e}')\n", (1965, 2005), True, 'import streamlit as st\n'), ((3692, 3716), 'streamlit.write', 'st.write', (['"""### Content:"""'], {}), "('### Content:')\n", (3700, 3716), True, 'import streamlit as st\n'), ((3729, 3748), 'streamlit.write', 'st.write', (['documents'], {}), '(documents)\n', (3737, 3748), True, 'import streamlit as st\n'), ((2595, 2635), 'streamlit.error', 'st.error', (['f"""Error reading PDF file: {e}"""'], {}), "(f'Error reading PDF file: {e}')\n", (2603, 2635), True, 'import streamlit as st\n'), ((3800, 3862), 'streamlit.error', 'st.error', (['f"""Error reading content from the uploaded file: {e}"""'], {}), "(f'Error reading content from the uploaded file: {e}')\n", (3808, 3862), True, 'import streamlit as st\n'), ((781, 799), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (797, 799), False, 'from langchain_openai import OpenAIEmbeddings\n')]
## Abandoned approach by Sasha from typing import List import lancedb from data_assessment_agent.config.config import cfg from data_assessment_agent.model.vector_db_model import Questions def connect_to_lance_questions(): db_file = cfg.lance_db_questions db = lancedb.connect(db_file) return db lance_db_questions_db = connect_to_lance_questions() def query_for_topic(topic: str, question: str, limit=5) -> List[str]: table = lance_db_questions_db.open_table(topic) results = table.search(question).limit(limit).to_pydantic(Questions) return [r.question for r in results] async def rank_questions( topic: str, question_answers: str, ranking_questions: List[str] ) -> List[str]: res = query_for_topic(topic, question_answers, limit=len(ranking_questions + 5)) return [r for r in res if r in ranking_questions] if __name__ == "__main__": from data_assessment_agent.test.provider.question_provider import ( create_question_answers, ) question_answers = create_question_answers() assert isinstance(question_answers, str)
[ "lancedb.connect" ]
[((272, 296), 'lancedb.connect', 'lancedb.connect', (['db_file'], {}), '(db_file)\n', (287, 296), False, 'import lancedb\n'), ((1019, 1044), 'data_assessment_agent.test.provider.question_provider.create_question_answers', 'create_question_answers', ([], {}), '()\n', (1042, 1044), False, 'from data_assessment_agent.test.provider.question_provider import create_question_answers\n')]
import os import shutil import tempfile import lancedb import pandas as pd import numpy as np import duckdb from typing import Optional, List from .data_load import load_batches, load_df from .model import BaseEmbeddingModel from .db import LabelsDB from .settings import DEFAULT_TABLE_NAME, DB_BATCH_LOAD, DB_BATCH_SIZE duckdb.sql( """ INSTALL sqlite; LOAD sqlite; """ ) class VectorDB: def __init__( self, db_path: str, db: lancedb.DBConnection, table: lancedb.table.Table, model: BaseEmbeddingModel, data_path: str, ) -> None: self.db = db self.model = model self.tbl = table self.data_path = data_path self.labelsdb_path = os.path.join(db_path, "labels.db") self.labelsdb = LabelsDB(self.labelsdb_path) @staticmethod def from_data_path( data_path: str, db_path: str, model: BaseEmbeddingModel, delete_existing=True, batch_load: bool = DB_BATCH_LOAD, batch_size: int = DB_BATCH_SIZE, ): db = lancedb.connect(db_path) table_name = DEFAULT_TABLE_NAME if delete_existing and table_name in db.table_names(): print(f"Dropping existing database {table_name}...") db.drop_table(table_name) print("done.") if table_name in db.table_names(): print(f'Opening existing table "{table_name}"...') tbl = db.open_table(table_name) return VectorDB(db_path, db, tbl, model, data_path) else: print(f'Creating table "{table_name}"...') if batch_load: tbl = load_batches(db, table_name, data_path, model, batch_size) else: tbl = load_df(db, table_name, data_path, model) return VectorDB(db_path, db, tbl, model, data_path) def count_rows(self) -> int: return len(self.tbl) def get(self, image_path: str) -> pd.DataFrame: lance_tbl = self.tbl.to_lance() return duckdb.sql( f"SELECT * FROM lance_tbl WHERE image_path='{image_path}';" ).to_df() def add_label(self, image_path: str, label: str): self.labelsdb.add(image_path=image_path, label=label) def remove_label(self, image_path: str, label: str): self.labelsdb.remove(image_path=image_path, label=label) def get_labels(self, image_path: Optional[str] = None) -> List[str]: return self.labelsdb.get(image_path=image_path) def get_label_counts(self) -> dict: return self.labelsdb.counts() def random_search(self, limit: int) -> pd.DataFrame: lance_tbl = self.tbl.to_lance() df_hits = duckdb.sql( f""" SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl LEFT OUTER JOIN ( SELECT image_path, list(label) AS labels FROM sqlite_scan('{self.labelsdb_path}', 'labels') GROUP BY image_path ) AS grouped_labels ON (lance_tbl.image_path = grouped_labels.image_path) USING SAMPLE {limit} ROWS; """ ).to_df() df_hits["labels"] = df_hits["labels"].fillna("").apply(list) df_hits.drop(columns=["vector"], inplace=True) return df_hits def search_by_image_path( self, image_path: str, limit: int, exclude_labeled: bool ) -> pd.DataFrame: full_image_path = os.path.join(self.data_path, image_path) image_embedding = self.model.embed_image_path(full_image_path) df_hits = self.__vector_embedding_search( image_embedding, limit, exclude_image_path=image_path, exclude_labeled=exclude_labeled, ) return df_hits def search_by_text( self, query_string: str, limit: int, exclude_labeled: bool ) -> pd.DataFrame: query_str_embedding = self.model.embed_text(query=query_string) df_hits = self.__vector_embedding_search( query_str_embedding, limit, exclude_labeled ) return df_hits def __vector_embedding_search( self, embedding: np.ndarray, limit: int, exclude_labeled: bool, exclude_image_path: str = None, ) -> pd.DataFrame: if exclude_labeled: exclude_image_paths = set(self.labelsdb.get_image_paths()) else: exclude_image_paths = set() if exclude_image_path is not None: exclude_image_paths.add(exclude_image_path) if len(exclude_image_paths) == 0: df_hits = self.tbl.search(embedding).limit(limit).to_df() else: exclude_image_paths_str = ",".join( [f"'{image_path}'" for image_path in exclude_image_paths] ) df_hits = ( self.tbl.search(embedding) .where(f"image_path NOT IN ({exclude_image_paths_str})") .limit(limit + len(exclude_image_paths)) .to_df() ) df_hits = df_hits[0:limit] df_hits.drop(columns=["vector"], inplace=True) df_hits = self.__join_labels(left_table=df_hits) return df_hits def __join_labels(self, left_table: pd.DataFrame) -> pd.DataFrame: df_join = duckdb.sql( f""" SELECT left_table.*, grouped_labels.labels FROM left_table LEFT OUTER JOIN ( SELECT image_path, list(label) AS labels FROM sqlite_scan('{self.labelsdb_path}', 'labels') GROUP BY image_path ) AS grouped_labels ON (left_table.image_path = grouped_labels.image_path) ORDER BY left_table._distance ASC; """ ).to_df() df_join["labels"] = df_join["labels"].fillna("").apply(list) return df_join def create_zip_labeled_binary_data(self, output_dir: str, filename: str) -> str: os.makedirs(output_dir, exist_ok=True) lance_tbl = self.tbl.to_lance() df = duckdb.sql( f""" SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl INNER JOIN ( SELECT image_path, label AS labels FROM sqlite_scan('{self.labelsdb_path}', 'labels') WHERE (label='relevant' OR label='irrelevant') ) AS grouped_labels ON (lance_tbl.image_path = grouped_labels.image_path); """ ).to_df() df.drop(columns=["vector"], inplace=True) # Save df_hits to a CSV file in a temporary folder with tempfile.TemporaryDirectory() as tmpdir: csv_path = os.path.join(tmpdir, "data.csv") df.to_csv(csv_path, index=False) # Copy all image paths to the same temporary folder for image_path in df["image_path"]: src_path = os.path.join(self.data_path, image_path) dst_path = os.path.join(tmpdir, image_path) print("Copying", src_path, "to", dst_path) os.makedirs(os.path.dirname(dst_path), exist_ok=True) shutil.copy(src_path, dst_path) # Create a zip file containing all files from the temporary folder if output_dir is None: output_dir = tempfile.gettempdir() if filename.endswith(".zip"): zip_path = os.path.join(output_dir, filename) else: zip_path = os.path.join(output_dir, filename + ".zip") shutil.make_archive(zip_path[:-4], "zip", tmpdir) return zip_path
[ "lancedb.connect" ]
[((323, 383), 'duckdb.sql', 'duckdb.sql', (['"""\n INSTALL sqlite;\n LOAD sqlite;\n """'], {}), '("""\n INSTALL sqlite;\n LOAD sqlite;\n """)\n', (333, 383), False, 'import duckdb\n'), ((743, 777), 'os.path.join', 'os.path.join', (['db_path', '"""labels.db"""'], {}), "(db_path, 'labels.db')\n", (755, 777), False, 'import os\n'), ((1088, 1112), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (1103, 1112), False, 'import lancedb\n'), ((3438, 3478), 'os.path.join', 'os.path.join', (['self.data_path', 'image_path'], {}), '(self.data_path, image_path)\n', (3450, 3478), False, 'import os\n'), ((5927, 5965), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (5938, 5965), False, 'import os\n'), ((6543, 6572), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6570, 6572), False, 'import tempfile\n'), ((6607, 6639), 'os.path.join', 'os.path.join', (['tmpdir', '"""data.csv"""'], {}), "(tmpdir, 'data.csv')\n", (6619, 6639), False, 'import os\n'), ((7474, 7523), 'shutil.make_archive', 'shutil.make_archive', (['zip_path[:-4]', '"""zip"""', 'tmpdir'], {}), "(zip_path[:-4], 'zip', tmpdir)\n", (7493, 7523), False, 'import shutil\n'), ((2056, 2127), 'duckdb.sql', 'duckdb.sql', (['f"""SELECT * FROM lance_tbl WHERE image_path=\'{image_path}\';"""'], {}), '(f"SELECT * FROM lance_tbl WHERE image_path=\'{image_path}\';")\n', (2066, 2127), False, 'import duckdb\n'), ((2723, 3125), 'duckdb.sql', 'duckdb.sql', (['f"""\n SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl\n LEFT OUTER JOIN (\n SELECT image_path, list(label) AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') GROUP BY image_path\n ) AS grouped_labels\n ON (lance_tbl.image_path = grouped_labels.image_path)\n USING SAMPLE {limit} ROWS;\n """'], {}), '(\n f"""\n SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl\n LEFT OUTER JOIN (\n SELECT image_path, list(label) AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') GROUP BY image_path\n ) AS grouped_labels\n ON (lance_tbl.image_path = grouped_labels.image_path)\n USING SAMPLE {limit} ROWS;\n """\n )\n', (2733, 3125), False, 'import duckdb\n'), ((5307, 5720), 'duckdb.sql', 'duckdb.sql', (['f"""\n SELECT left_table.*, grouped_labels.labels FROM left_table\n LEFT OUTER JOIN (\n SELECT image_path, list(label) AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') GROUP BY image_path\n ) AS grouped_labels\n ON (left_table.image_path = grouped_labels.image_path)\n ORDER BY left_table._distance ASC;\n """'], {}), '(\n f"""\n SELECT left_table.*, grouped_labels.labels FROM left_table\n LEFT OUTER JOIN (\n SELECT image_path, list(label) AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') GROUP BY image_path\n ) AS grouped_labels\n ON (left_table.image_path = grouped_labels.image_path)\n ORDER BY left_table._distance ASC;\n """\n )\n', (5317, 5720), False, 'import duckdb\n'), ((6019, 6399), 'duckdb.sql', 'duckdb.sql', (['f"""\n SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl\n INNER JOIN (\n SELECT image_path, label AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') WHERE (label=\'relevant\' OR label=\'irrelevant\')\n ) AS grouped_labels\n ON (lance_tbl.image_path = grouped_labels.image_path);\n """'], {}), '(\n f"""\n SELECT lance_tbl.*, grouped_labels.labels FROM lance_tbl\n INNER JOIN (\n SELECT image_path, label AS labels FROM sqlite_scan(\'{self.labelsdb_path}\', \'labels\') WHERE (label=\'relevant\' OR label=\'irrelevant\')\n ) AS grouped_labels\n ON (lance_tbl.image_path = grouped_labels.image_path);\n """\n )\n', (6029, 6399), False, 'import duckdb\n'), ((6825, 6865), 'os.path.join', 'os.path.join', (['self.data_path', 'image_path'], {}), '(self.data_path, image_path)\n', (6837, 6865), False, 'import os\n'), ((6893, 6925), 'os.path.join', 'os.path.join', (['tmpdir', 'image_path'], {}), '(tmpdir, image_path)\n', (6905, 6925), False, 'import os\n'), ((7071, 7102), 'shutil.copy', 'shutil.copy', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (7082, 7102), False, 'import shutil\n'), ((7247, 7268), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (7266, 7268), False, 'import tempfile\n'), ((7338, 7372), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (7350, 7372), False, 'import os\n'), ((7418, 7461), 'os.path.join', 'os.path.join', (['output_dir', "(filename + '.zip')"], {}), "(output_dir, filename + '.zip')\n", (7430, 7461), False, 'import os\n'), ((7013, 7038), 'os.path.dirname', 'os.path.dirname', (['dst_path'], {}), '(dst_path)\n', (7028, 7038), False, 'import os\n')]
import os import datetime import sys import json import time import os from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import LanceDB from langchain.document_loaders import DirectoryLoader from langchain.text_splitter import CharacterTextSplitter import lancedb import openai from flask import Flask, render_template, request, jsonify from langchain.text_splitter import RecursiveCharacterTextSplitter from werkzeug.utils import secure_filename from deep_motion import DeepMotionHandler from sketchfab import SketchfabHandler os.environ["OPENAI_API_KEY"] = "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" openai.api_key = "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" loader = DirectoryLoader("static/langchain_documents/") loaded_documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=100) documents = text_splitter.split_documents(loaded_documents) #documents = TextSplitter().split_documents(documents) print(type(documents)) embeddings = OpenAIEmbeddings() db = lancedb.connect("static/lanceDB") table = db.open_table("my_table") # # table = db.create_table( # "my_table", # data=[ # { # "vector": embeddings.embed_query("Hello World"), # "text": "Hello World", # "id": "1", # } # ], # mode="overwrite", # # ) docsearch = LanceDB.from_documents(documents, embeddings, connection=table) UPLOAD_FOLDER = '/uploaded_videos' ALLOWED_EXTENSIONS = {'mp4', 'mov'} app = Flask(__name__) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER current_convert_config = {"trainee_url": "", "coach_url": ""} @app.route("/") def home(): return render_template("home.html") @app.route("/shottracer") def shottracer(): print("here", flush=True) return render_template("shottracer.html") @app.route("/about") def about(): return render_template("about.html") @app.route("/upload", methods=["POST"]) def upload(): print("Entered upload", flush=True) try: uploaded_file = request.files['file'] if uploaded_file and uploaded_file.filename.endswith('.mp4'): print("uploaded file and is mp4", flush=True) uploaded_file.save('uploaded_video.mp4') return {'message': 'Video uploaded and saved successfully'} else: print("failed lmao", flush=True) return {'message': 'Invalid video file'} except Exception as e: return {'message': 'Error uploading video: ' + str(e)} @app.route('/process', methods=['POST']) def process(): print("in process", flush=True) print(request.data, flush=True) data = json.loads(request.data) result = data['value'] current_convert_config["trainee_url"] = "static/videos/" + result print("selected trainee: " + current_convert_config["trainee_url"], flush=True) return jsonify(result=result) @app.route('/coach_select', methods=["POST"]) def coach_select(): data = json.loads(request.data) result = data['value'] current_convert_config["coach_url"] = "static/videos/" + result print("selected coach: " + current_convert_config["coach_url"], flush=True) return jsonify(result=result) @app.route('/convert', methods=["POST"]) def convert(): print("converting", flush=True) if current_convert_config["trainee_url"] == "" or current_convert_config["coach_url"] == "": print(current_convert_config) return current_convert_config deep_motion = DeepMotionHandler() trainee_resp, trainee_input = deep_motion.new_job(current_convert_config["trainee_url"], download=False) coach_resp, coach_input = deep_motion.new_job(current_convert_config["coach_url"], download=False) trainee_fbx = deep_motion.download_job(trainee_resp, trainee_input) coach_fbx = deep_motion.download_job(coach_resp, coach_input) print(trainee_fbx, flush=True) print(coach_fbx, flush=True) sketchfab_handler = SketchfabHandler() trainee_url = sketchfab_handler.upload(trainee_fbx) coach_url = sketchfab_handler.upload(coach_fbx) print(trainee_url, flush=True) print(coach_url, flush=True) while not sketchfab_handler.poll_processing_status(trainee_url) or not sketchfab_handler.poll_processing_status(coach_url): time.sleep(1) return jsonify('{"trainee": "' + trainee_url.rsplit('/', 1)[-1] + '", "coach": "' + coach_url.rsplit('/', 1)[-1] + '"}') #return jsonify('{"trainee": "791e22a2678e4e05b56df1107dd1f8e8", "coach": "af8a9327edf3432f981000210da42022"}') #return (trainee_url.rsplit('/', 1)[-1], coach_url.rsplit('/', 1)[-1]) @app.route("/search", methods=["POST"]) def docSearch(): print("in search", flush=True) print(request.data, flush=True) data = json.loads(request.data) question = data['question'] docs = docsearch.similarity_search(question) prompt = f"""Given the question: {question} and this context: {docs} Answer the question as best as possible. Use any relevant information from the context to enrich your answer. Final Answer: """ response = openai.Completion.create( engine="text-davinci-003", prompt=prompt, max_tokens=300 ) generated_text = response.choices[0].text return jsonify(result=generated_text) if __name__ == "__main__": app.run(host="127.0.0.1", port=8080, debug=True)
[ "lancedb.connect" ]
[((730, 776), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['"""static/langchain_documents/"""'], {}), "('static/langchain_documents/')\n", (745, 776), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((827, 883), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(100)'}), '(chunk_size=300, chunk_overlap=100)\n', (848, 883), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1035, 1053), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1051, 1053), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1060, 1093), 'lancedb.connect', 'lancedb.connect', (['"""static/lanceDB"""'], {}), "('static/lanceDB')\n", (1075, 1093), False, 'import lancedb\n'), ((1389, 1452), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (1411, 1452), False, 'from langchain.vectorstores import LanceDB\n'), ((1532, 1547), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1537, 1547), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1695, 1723), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (1710, 1723), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1810, 1844), 'flask.render_template', 'render_template', (['"""shottracer.html"""'], {}), "('shottracer.html')\n", (1825, 1844), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1891, 1920), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (1906, 1920), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2666, 2690), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (2676, 2690), False, 'import json\n'), ((2892, 2914), 'flask.jsonify', 'jsonify', ([], {'result': 'result'}), '(result=result)\n', (2899, 2914), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2994, 3018), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3004, 3018), False, 'import json\n'), ((3208, 3230), 'flask.jsonify', 'jsonify', ([], {'result': 'result'}), '(result=result)\n', (3215, 3230), False, 'from flask import Flask, render_template, request, jsonify\n'), ((3516, 3535), 'deep_motion.DeepMotionHandler', 'DeepMotionHandler', ([], {}), '()\n', (3533, 3535), False, 'from deep_motion import DeepMotionHandler\n'), ((3982, 4000), 'sketchfab.SketchfabHandler', 'SketchfabHandler', ([], {}), '()\n', (3998, 4000), False, 'from sketchfab import SketchfabHandler\n'), ((4787, 4811), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (4797, 4811), False, 'import json\n'), ((5127, 5213), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'max_tokens': '(300)'}), "(engine='text-davinci-003', prompt=prompt,\n max_tokens=300)\n", (5151, 5213), False, 'import openai\n'), ((5299, 5329), 'flask.jsonify', 'jsonify', ([], {'result': 'generated_text'}), '(result=generated_text)\n', (5306, 5329), False, 'from flask import Flask, render_template, request, jsonify\n'), ((4315, 4328), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4325, 4328), False, 'import time\n')]
from PIL import Image import streamlit as st import openai #exercise 11 from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain #exercise 12 from langchain.memory import ConversationBufferWindowMemory #exercise 13 from langchain.document_loaders import TextLoader,PyPDFLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import LanceDB import lancedb import os import tempfile #exercise 15 import sqlite3 import pandas as pd from datetime import datetime #exercise 16 from langchain.agents import ConversationalChatAgent, AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.tools import DuckDuckGoSearchRun #Exercise 17 from langchain.agents import tool import json #Exercise 18 from pandasai import SmartDataframe from pandasai.llm.openai import OpenAI import matplotlib.pyplot as plt os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"] openai.api_key = st.secrets["openapi_key"] #Global ex 13 cwd = os.getcwd() WORKING_DIRECTORY = os.path.join(cwd, "database") if not os.path.exists(WORKING_DIRECTORY): os.makedirs(WORKING_DIRECTORY) #ex15 DB_NAME = os.path.join(WORKING_DIRECTORY, "default_db") def template(): st.subheader("Template") st.write("Instruction lines.") st.markdown("**:blue[Code]**") st.code(''' #exercise code here ''') st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' #challenge code here ''') st.markdown("**:red[Code Output]**") # Actual code here def class1_prep(): # st.subheader("Course Slides") # st.markdown("https://go.gov.sg/itdchatbotprototype") st.divider() st.subheader("Pre-workshop Setup") # st.divider() st.markdown("""1. Visual Studio (VS Code): this is the Integrated Development Environment (IDE) of choice by many coders and will make it easier for us to code our app.""") st.markdown("""2. Python (3.10 release or later): this is the coding language of choice for many data science related apps.""") st.write(""" 3. Once you have installed the above, we will need to set up a virtual environment and install the libraries in that environment.\n Create a folder named “chatbot” anywhere, e.g. in your Desktop.\n Open VS Code and navigate to the folder in a new terminal window.\n Create a virtual environment and activate it by entering the following commands in the terminal: """) st.markdown(" **a) Mac**") st.code(''' python3 -m venv venv source venv/bin/activate ''') st.markdown("(You should see a (venv) appear in your terminal window prompt)") st.markdown("#") st.markdown(" **b) Windows**") st.code(''' python -m venv venv cd venv\Scripts activate ''') st.markdown("4. While in your virtual environment, install the libraries using pip which should already be installed together with Python:") st.code(''' pip install streamlit openai ''') st.markdown(" To test if Streamlit is installed properly, run this command:") st.code(''' streamlit hello ''') st.markdown(" You should see a Streamlit application running at http://localhost:8501") st.markdown(" Type Ctrl + C in VS Code terminal to stop the Streamlit app") pass def class1_hw1(): st.subheader("My first Hello World app") st.divider() st.markdown("""1. Create a new file called 'main.py'.""") st.markdown("""2. Copy the code below and paste it in the newly created helloworld.py file.""") st.markdown("**:blue[Code]**") st.code(''' import streamlit as st #my first Hello World app st.write("Hello World") ''') st.markdown("Install the watchdog module by running the command below in the terminal.") st.code("pip install watchdog") st.markdown("Now you don't have to keep restarting the app to see the changes you make to the code. Just refresh the browser.") st.write("Save your file and run the app by typing the following command in the terminal:") st.code(''' streamlit run main.py ''') st.markdown("""3. You should see a Streamlit application running at http://localhost:8501""") st.markdown("""4. Type Ctrl + C in VS Code terminal to stop the Streamlit app""") st.markdown("**:red[Code Output]**") st.write("Hello World") pass def objectives(): st.subheader("Objectives") st.markdown("1. Learn how to use Python and Streamlit library to create an interactive web app.") st.markdown("2. Learn how to integrate and use OpenAI's API in their streamlit application to create a simple chatbot.") st.markdown("3. Learn how to apply basic prompt engineering to enhance the interaction with the chatbot.") def workshop_outline(): st.subheader("Outline") st.markdown("Part 0: Workshop introduction and rules") st.markdown("Part 1: Introduction to Python and Streamlit") st.markdown("Part 2: Creating a rule-based chatbot") st.markdown("Part 3: Creating a chatbot using OpenAI's API") st.markdown("Part 4: Modifying your chatbot with prompt engineering") def team_introduction(): st.write("Do introduce yourself to your teammates:\n", "1) name\n", "2) division\n", "3) role") st.write("Please also share your favourite Star Wars character and why!") image = Image.open('team_introductions.jpeg') st.image(image, caption='Hello there!') def workshop_rules(): st.subheader("Workshop Rules") st.write("1. Ask if you have questions.") st.write("2. Be open to different ways to solve the problem.") st.write("3. Try. Fail. Learn. Repeat.") st.write("4. Seek help from other team members.") st.write("5. Collaborate, if possible, for the challenges.") st.write("6. Approach facilitators if your team cannot solve the problem.") st.write("7. Toilet break is own-time-own-target.") st.write("8. Have fun!") def vscode_ui(): st.subheader("Navigating the VS Code interface") image = Image.open('VSCode_interface.png') st.image(image, caption='VS Code UI layout') st.markdown("**A: Activity Bar: this is where you can see the different activities you can do in VS Code.**") st.markdown(" Explorer: this is where you can see all the files and folders in your project.") st.markdown(" Source Control: this is where you can see the changes you have made to your project.") st.markdown(" Extensions: this is where you can install extensions to VS Code.") st.markdown(" Run and Debug: this is where you can debug your code.") st.markdown("**B: Side Bar: this is where you can see the different views of your project.**") st.markdown("**C: Editor: this is where you can see the code you have written in your project.**") st.markdown("**D: Panel: this is where you can see the different panels you have opened in your project.**") st.markdown(" Terminal: this is where you can run commands in your project.") st.markdown(" Output: this is where you can see the output of your code.") st.markdown(" Problems: this is where you can see the errors in your code.") st.markdown("**E. Status Bar: this is where you can see the status of your project.**") def command_palette_indent(): st.markdown("Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*") image = Image.open('command_palette_command.png') st.image(image, caption='Command Palette auto-indent command') def final_product(): st.write("This is what we will working towards and building by the end of the workshop today.") st.write("Do try out the chatbot below!") st.subheader("**:green[Feel the force! Yoda Chatbot]**") image = Image.open('yoda.jpg') st.image(image, caption='Master Yoda at your service') st.divider() openai.api_key = st.secrets["openapi_key"] prompt_template = """ "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" """ if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-3.5-turbo" if "msg_bot" not in st.session_state: st.session_state.msg_bot = [] for message in st.session_state.msg_bot: with st.chat_message(message["role"]): st.markdown(message["content"]) try: if prompt := st.chat_input("What is up?"): st.session_state.msg_bot.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" for response in openai.ChatCompletion.create( model=st.session_state["openai_model"], messages=[ {"role": "system", "content": prompt_template}, {"role": "user", "content": prompt}, ], stream=True, ): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg_bot.append({"role": "assistant", "content": full_response}) except Exception as e: st.error(e) pass def resources(): st.subheader("Useful resources and references") st.markdown("1. [Streamlit documentation](https://docs.streamlit.io/en/stable/)") st.markdown("2. [OpenAI API documentation](https://beta.openai.com/docs/introduction)") st.markdown("3. [VS Code documentation](https://code.visualstudio.com/docs)") st.markdown("4. [Python documentation](https://docs.python.org/3/)") st.markdown("5. [Python cheatsheet](https://www.pythoncheatsheet.org/)") st.markdown("6. [Python for beginners](https://www.python.org/about/gettingstarted/)") st.markdown("7. [ChatGPT](https://chat.openai.com/) - you can ask ChatGPT to generate code for you!") st.markdown("**Notes for this workshop course:** \n- you may do a single click to copy all the code \n- challenge code is hidden, click reveal to see the code") st.markdown("Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*") image = Image.open('command_palette_command.png') st.image(image, caption='Command Palette auto-indent command') def part1_intro1(): st.subheader("Streamlit") st.markdown(""" * an open-source Python library * used extensively for machine learning and data science * helps to create interactive web apps in just a few lines of code * highly flexible and supports complex interactive apps with highly customisable UI * Some real world examples: * CherGPT in String * Metacog for CotF MOE * AILC prototype for MOE """) def ex1(): # Exercise 1 : Functions st.write("Hello World") # only prints the Hello {name} if input box is not empty name = st.text_input("Enter your name") if name: st.write("Hello " + name) def class1_ex1(): st.subheader("Exercise 1: Functions") st.markdown("In the ***main.py*** file, the code below is already in ***ex1()*** in the ***part1.py*** file.") st.write("The code for *helloworld* is inside what you call a Python function.") st.write("The ***def main()*** function and ***if _ _ name _ _ == '_ _ main _ _'*** statement are coding conventions for any Python programme.") st.write("You need to include an import statement in ***main.py*** to import the ***ex1()*** function from the other file.") st.code(''' from part1 import ex1 ''') st.write("You will need to do the same for all the other exercises and challenges for the rest of the workshop. The code exercises are already in the respective files: ***part1.py, part2.py, part3.py, part4.py***.") st.markdown("**:blue[Code]**") st.code(''' import streamlit as st from part1 import ex1 #Exercise 1: Functions def ex1(): st.write("Hello World") name = st.text_input("Enter your name") if name: st.write("Hello " + name) def main(): ex1() if __name__ == "__main__": main() ''') st.markdown("Run the code by typing the following into the terminal:") st.code("streamlit run main.py") st.markdown("You should see the following behaviour in your browser window:") st.markdown("**:red[Code Output]**") #actual code here ex1() def ch1(): name = st.text_input("Enter your name") gender = st.selectbox("State your gender", ["Male", "Female"]) age = st.text_input("State your age", 18) if name and gender and age: st.text(f"Hello {name}, you are {gender} and this year you are {age} years old") def class1_ch1(): st.subheader("Challenge 1: Input, Output and Variables") st.write("Create a new function called ***ch1()*** in ***part1.py*** and call it in the main function.") st.write("Create three variables *name*, *age* and *gender*, and obtain these from the user.") st.write("Once the user filled up the input boxes, display back the information to the user.") st.write("Code hint: the following piece of code checks if *name* has been filled, and if so, displays it back to the user.") st.code(''' name = st.text_input("Enter your name") if name: st.text(f"Hello {name}") ''') st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' def ch1(): name = st.text_input("Enter your name") gender = st.selectbox("State your gender", ["Male", "Female"]) age = st.text_input("State your age", 18) if name and gender and age: st.text(f"Hello {name}, you are {gender} and this year you are {age} years old") ''') st.markdown("**:red[Code Output]**") # Actual code here ch1() def ex2(): gender = st.selectbox("State your gender", ["Male", "Female"]) age = int(st.text_input("State your age", 18)) photo = st.camera_input("Smile! take a picture here.") # conditional logic to run different statements if age >= 21 and gender == "Male": st.write("You are a male adult") elif age < 21 and gender == "Male": st.write("You are a young boy") elif age >= 21 and gender == "Female": st.write("You are a female adult") elif age < 21 and gender == "Female": st.write("You are a young girl") if photo: st.write("Here is your photo: ") st.image(photo) else: st.write("No photo taken") def class1_ex2(): st.subheader("Exercise 2: If-else logical conditionals") st.markdown("If-else statements help control the flow and logic of our application.") st.markdown("**:blue[Code]**") st.code(''' def ex2(): gender = st.selectbox("State your gender", ["Male", "Female"]) age = int(st.text_input("State your age", 18)) photo = st.camera_input("Smile! take a picture here.") # conditional logic to run different statements if age >= 21 and gender == "Male": st.write("You are a male adult") elif age < 21 and gender == "Male": st.write("You are a young boy") elif age >= 21 and gender == "Female": st.write("You are a female adult") elif age < 21 and gender == "Female": st.write("You are a young girl") if photo: st.write("Here is your photo: ") st.image(photo) else: st.write("No photo taken") ''') st.markdown("**:red[Code Output]**") #actual code here ex2() def ex3(): # Data list fruits = ["apple", "banana", "orange"] # For loop to show list for fruit in fruits: st.write(fruit) # Dictionary person = {"name": "John", "age": 30, "gender": "Male", "city": "New York"} # Print out the items in the dictionary st.write("Here is your *person* dictionary: ") st.write(person) # for loop to show dictionary list st.write("You can also show individual items in the dictionary like this: ") for key, value in person.items(): st.write(key + ": " + str(value)) # get user input to update the dictionary name = st.text_input("Enter your name", "John") age = st.text_input("State your age", 30) gender = st.selectbox("State your gender", ["Male", "Female"]) city = st.text_input("State your city", "New York") person["name"] = name person["age"] = age person["gender"] = gender person["city"] = city st.write("Here is your updated *person* dictionary: ") st.write(person) def class1_ex3(): st.subheader("Exercise 3: Data and Loops ") st.write("We can store data in a list or dictionary and display the data using a for loop.") st.write("Append the following code to the ***main.py*** file. Refresh the browser to see the changes.") st.write("You should see output similar to the *Code Output* below.") st.markdown("**:blue[Code]**") st.code(''' #Data and Loops def ex3(): # Data list fruits = ["apple", "banana", "orange"] # For loop to show list for fruit in fruits: st.write(fruit) # Dictionary person = {"name": "John", "age": 30, "city": "New York"} # Print out the items in the dictionary st.write("Here is your *person* dictionary: ") st.write(person) # for loop to show dictionary list st.write("You can also show individual items in the dictionary like this: ") for key, value in person.items(): st.write(key + ": " + str(value)) # get user input to update the dictionary name = st.text_input("Enter your name", "John") age = st.text_input("State your age", 30) gender = st.selectbox("State your gender", ["Male", "Female"]) city = st.text_input("State your city", "New York") person["name"] = name person["age"] = age person["gender"] = gender person["city"] = city st.write("Here is your updated *person* dictionary: ") st.write(person) ''') st.markdown("**:red[Code Output]**") #actual code here ex3() def ex4a(): #Exercise 4a: Creating Session Data if "session_data" not in st.session_state: st.session_state.session_data = ["alpha", "omega"] if "name" not in st.session_state: st.session_state.name = "" if "age" not in st.session_state: st.session_state.age = "" if "gender" not in st.session_state: st.session_state.gender = "" # For loop to show list for data in st.session_state.session_data: st.write("session_data: ", data) st.write("name: ", st.session_state.name) st.write("age: ", st.session_state.age) st.write("gender: ", st.session_state.gender) def class1_ex4a(): st.subheader("Exercise 4a: Session Data") st.write("We can create variables to store data in a user session. Session data persist within a user session.") st.markdown("**:blue[Code]**") st.code(''' # Exercise 4: Session State def ex4a(): st.subheader("Session Data:") if "session_data" not in st.session_state: st.session_state.session_data = ["alpha", "omega"] if "name" not in st.session_state: st.session_state.name = "" if "age" not in st.session_state: st.session_state.age = "" if "gender" not in st.session_state: st.session_state.gender = "" # For loop to show list for data in st.session_state.session_data: st.write("session_data: ", data) st.write("name: ", st.session_state.name) st.write("age: ", st.session_state.age) st.write("gender: ", st.session_state.gender) ''') st.markdown("**:red[Code Output]**") ex4a() def ex4b(): #Exercise 4b: Session Data with User Input user_name = st.text_input("Enter your name") user_age = st.text_input("State your age") user_gender = st.selectbox("State your gender", ["", "Male", "Female"]) if user_name: st.session_state.name = user_name st.write("name: ", st.session_state.name) if user_age: st.session_state.age = int(user_age) st.write("age: ", st.session_state.age) if user_gender: st.session_state.gender = user_gender st.write("gender: ", st.session_state.gender) def class1_ex4b(): st.subheader("Exercise 4b: Session Data with User Input") st.write("Lets now get input from the user and store it in the session data.") st.write("Now run *ex4a()* again to check the session data. Note that it persists.") st.markdown("**:blue[Code]**") st.code(''' def ex4b(): st.subheader("Session Data:") userName = st.text_input("Enter your name") userAge = st.text_input("State your age") userGender = st.selectbox("State your gender", ["", "Male", "Female"]) if userName: st.session_state.name = userName st.write("name: ", st.session_state.name) if userAge: st.session_state.age = int(userAge) st.write("age: ", st.session_state.age) if userGender: st.session_state.gender = userGender st.write("gender: ", st.session_state.gender) ''') st.markdown("**:red[Code Output]**") ex4b() def ch4(): if "name" not in st.session_state: st.session_state.name = "Yoda" if "age" not in st.session_state: st.session_state.age = 999 if "gender" not in st.session_state: st.session_state.gender = "male" if "prompt_template" not in st.session_state: st.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" st.write("session_state.name: ", st.session_state.name) st.write("session_state.age: ", st.session_state.age) st.write("session_state.gender: ", st.session_state.gender) st.write("session_state.prompt_template: ", st.session_state.prompt_template) def class1_ch4(): st.subheader("Challenge 4: Session Data") st.markdown(""" Add a new function called ***ch4()*** to the ***part1.py*** file and call it in the main function.\n In *ch4()*, modify the code in Exercise 4b to include the following: * Create session data for ***name***, ***age*** and ***gender*** * Create session data for ***prompt_template*** with the following value: "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" * Include this code in ***main()*** as well, because we need the session data for later exercises. Omit the ***st.write*** functions, since we do not want to see this output every time we run ***main()***. \n Hint: * In ***ch4()***, to check that the session data is created, you can print out the session data using ***st.write()***: """) st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' def ch4(): if "name" not in st.session_state: st.session_state.name = "Yoda" if "age" not in st.session_state: st.session_state.age = 999 if "gender" not in st.session_state: st.session_state.gender = "male" if "prompt_template" not in st.session_state: st.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" st.write("session_state.name: ", st.session_state.name) st.write("session_state.age: ", st.session_state.age) st.write("session_state.gender: ", st.session_state.gender) st.write("session_state.prompt_template: ", st.session_state.prompt_template) def main(): # initialize session state, from ch4 if "name" not in st.session_state: st.session_state.name = "Yoda" if "age" not in st.session_state: st.session_state.age = 999 if "gender" not in st.session_state: st.session_state.gender = "male" if "prompt_template" not in st.session_state: st.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" #the rest of your code is below ''') st.markdown("**:red[Code Output]**") #actual code here ch4() def ex5(): st.title("My first chatbot") if "store_msg" not in st.session_state: st.session_state.store_msg = [] prompt = st.chat_input("Say something") if prompt: st.write(f"User has sent the following prompt: {prompt}") st.session_state.store_msg.append(prompt) for message in st.session_state.store_msg: with st.chat_message("user"): st.write(message) with st.chat_message("assistant"): st.write("Hello human, what can I do for you?") def class1_ex5(): st.subheader("Exercise 5: Elements of a chatbot") st.write("We will start creating a user interface for our first chatbot.") st.write("Call the following code from ***part1.py*** in your **main()**.") st.write("You should see the output below when you run your programme.") st.markdown("**:blue[Code]**") st.code(''' #Exercise 5 : Chatbot UI def ex5(): st.title("My first chatbot") if "store_msg" not in st.session_state: st.session_state.store_msg = [] prompt = st.chat_input("Say something") if prompt: st.write(f"User has sent the following prompt: {prompt}") st.session_state.store_msg.append(prompt) for message in st.session_state.store_msg: with st.chat_message("user"): st.write(message) with st.chat_message("assistant"): st.write("Hello human, what can I do for you?") ''') st.markdown("**:red[Code Output]**") #actual code here ex5() def ex6(): st.markdown("**Echo Bot**") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) response = f"Echo: {prompt}" # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response}) def class1_ex6(): st.subheader("Exercise 6: Building a simple echo chatbot") st.write("We will now build a simple echo chatbot.") st.write("Call the following code from **part1.py** in your ***main()***.") st.write("You should see the output below when you run your programme.") st.markdown("**:blue[Code]**") st.code(''' #Exercise 6 : Rule-based Echo Chatbot def ex6(): st.title("Echo Bot") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) response = f"Echo: {prompt}" # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response}) ''') st.markdown("**:red[Code Output]**") #actual code here ex6() def ch6(): st.markdown("**Rule Based Bot**") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("Enter your query"): if prompt == "Hello": reply = "Hi there what can I do for you" elif prompt == "What is your name?": reply = "My name is EAI , an electronic artificial being" elif prompt == "How old are you?": reply = "Today is my birthday!" else: reply = "I am sorry, I am unable to help you with your query" with st.chat_message("user"): st.write(prompt) st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("assistant"): st.write(reply) st.session_state.messages.append({"role": "assistant", "content": reply}) def class1_ch6(): st.subheader("Challenge 6: Rule based chatbot ") st.markdown(""" Create a new function called ***ch6()*** in **part1.py** and modify the ***ex6()*** function to create the following rule based chatbot:\n * Human : “Hello”, Assistant: “Hi there what can I do for you”\n * Human : “What is your name?”, Assistant: “My name is EAI , an electronic artificial being”\n * Human : “How old are you?”, Assistant: “Today is my birthday!”\n For other queries, it will reply “I am sorry, I am unable to help you with your query”\n Use *if / elif / else* statements to create the chatbot behaviour logic.\n You should see the output below when you run your programme.\n """) st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' #Challenge 6 : Rule-based If-Else Chatbot def ch6(): st.markdown("**Rule Based Bot**") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("Enter your query"): if prompt == "Hello": reply = "Hi there what can I do for you" elif prompt == "What is your name?": reply = "My name is EAI , an electronic artificial being" elif prompt == "How old are you?": reply = "Today is my birthday!" else: reply = "I am sorry, I am unable to help you with your query" with st.chat_message("user"): st.write(prompt) st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("assistant"): st.write(reply) st.session_state.messages.append({"role": "assistant", "content": reply}) ''') st.markdown("**:red[Code Output]**") #actual code here ch6() def class1_ex7(): st.subheader("Exercise 7: Secrets- Shhh ") st.write("In this exercise, we will learn how to hide your API key") st.markdown(""" In your working directory (chatbot), create a directory called **.streamlit**\n Note the *dot* in front of the directory\n In this folder, create a file called **secrets.toml**\n Get an API key from your OpenAI account and type the following in **secrets.toml**: """) st.markdown("**:blue[Code]**") st.code(''' openapi_key = "xxxxxx" ''') st.write("Include the following global variables in your ***main.py*** file under the import statements:") st.code(''' os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"] openai.api_key = st.secrets["openapi_key"] ''') st.write("Create a .gitignore file and add .streamlit into it") #st.markdown("**:red[Code Output]**") pass def class1_ch7(): pass def ex8(): st.title("Api Call") MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me about Singapore in the 1970s in 50 words."}, ], temperature=0, ) st.markdown("**This is the raw response:**") st.write(response) st.markdown("**This is the extracted response:**") st.write(response["choices"][0]["message"]["content"].strip()) s = str(response["usage"]["total_tokens"]) st.markdown("**Total tokens used:**") st.write(s) def class1_ex8(): st.subheader("Exercise 8: Calling the OpenAI LLM API") st.write("In this exercise, we will learn how to call the OpenAI LLM API") st.write("Note that there is a new import statement **import openai**") st.markdown(""" Call the following code in your **main()** and run it.\n You should see the output as shown below.\n """) st.markdown("**:blue[Code]**") st.code(''' import openai #Exercise 8 : Using the OpenAI API def ex8(): st.title("Api Call") MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me about Singapore in the 1970s in 50 words."}, ], temperature=0, ) st.markdown("**This is the raw response:**") st.write(response) st.markdown("**This is the extracted response:**") st.write(response["choices"][0]["message"]["content"].strip()) s = str(response["usage"]["total_tokens"]) st.markdown("**Total tokens used:**") st.write(s) ''') st.markdown("**:red[Code Output]**") #actual code here ex8() def chat_completion(prompt): MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}, ], temperature=0, ) return response["choices"][0]["message"]["content"].strip() def class1_ch8(): st.subheader("Challenge 8: Incorporate your LLM API call into your chatbot") st.write("In this challenge, we will incorporate the LLM API call into our previous rule-based *Echo* chatbot") st.markdown(""" **Step1**\n Create a new function **ch8()** in ***part1.py*** and copy the code from **ex6()** into it. Recall that **ex6()** shows the chat history and gets a chat input from the user, and echoes the user input back to the user. \n **Step 2**\n Next, copy the code from **ex8** into a new function named **chat_completion()**. Recall that **ex8()** is about making an API call.\n Now, instead of echoing the user's input in **ex6()**, we will call the LLM API to generate a response. In particular, we are replacing this line of code with the response from the API call:\n """) st.code(''' response = f"Echo: {prompt}" ''') st.markdown(""" **Step 3**\n In **chat_completion()**, what we will do is to replace the previous *Tell me the history ..."* prompt from **ex8()** with the current user's input.\n In order to do so, in **ch8()**, use the following code to call **chat_completion()**.\n What we are doing now is to pass the prompt from the user to the API call instead of hard-coding the prompt as in **ex8()**.\n""") st.code(''' if prompt := st.chat.input("What's up?"): #display user messgae in chat message container reply = chat_completion(prompt) st.chat_message("user").markdown(prompt) ''') st.write("You should see the code output as shown below.") st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' #Challenge 8: Incorporating the API into your chatbot def chat_completion(prompt): MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}, ], temperature=0, ) return response["choices"][0]["message"]["content"].strip() def ch8(): st.title("My first LLM Chatbot") # Initialize chat history if "chat_msg" not in st.session_state: st.session_state.chat_msg = [] # Display chat chat_msg from history on app rerun for message in st.session_state.chat_msg: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What's up?"): # Display user message in chat message container reply = chat_completion(prompt) st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.chat_msg.append({"role": "user", "content": prompt}) # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(reply) # Add assistant response to chat history st.session_state.chat_msg.append({"role": "assistant", "content": reply}) ''') st.markdown("**:red[Code Output]**") st.title("My LLM Chatbot") # Initialize chat history if "chat_msg" not in st.session_state: st.session_state.chat_msg = [] # Display chat chat_msg from history on app rerun for message in st.session_state.chat_msg: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container reply = chat_completion(prompt) st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.chat_msg.append({"role": "user", "content": prompt}) # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(reply) # Add assistant response to chat history st.session_state.chat_msg.append({"role": "assistant", "content": reply}) def chat_completion_stream(prompt): MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": prompt}, ], temperature=0, # temperature stream=True, # stream option ) return response def ex9_basebot(): # Initialize chat history if "chat_msg" not in st.session_state: st.session_state.chat_msg = [] # Showing Chat history for message in st.session_state.chat_msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # set user prompt in chat history st.session_state.chat_msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.chat_msg.append( {"role": "assistant", "content": full_response} ) except Exception as e: st.error(e) def class1_ex9(): st.subheader("Exercise 9: Building a ChatGPT-like clone with streaming responses") st.write("Now, we will incorporate a streaming response from the LLM API into our chatbot to mimic the behaviour of ChatGPT.") st.write("Copy and run the code below to see the streaming responses.") st.markdown("**:blue[Code]**") st.code(''' # Exercise 9 : Using the OpenAI API with streaming option def chat_completion_stream(prompt): openai.api_key = st.secrets["openapi_key"] MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": prompt}, ], temperature=0, # temperature stream=True, # stream option ) return response # integration API call into streamlit chat components def ex9_basebot(): # Initialize chat history if "chat_msg" not in st.session_state: st.session_state.chat_msg = [] # Showing Chat history for message in st.session_state.chat_msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # set user prompt in chat history st.session_state.chat_msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.chat_msg.append( {"role": "assistant", "content": full_response} ) except Exception as e: st.error(e) ''') st.markdown("**:red[Code Output]**") ex9_basebot() def class1_ch9(): pass def ex10(): # prompt_template in session state already set in main() MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": st.session_state.prompt_template}, { "role": "user", "content": "Tell me about Singapore in the 1970s in 50 words", }, ], temperature=0, ) st.markdown("**LLM Response:**") st.write(response["choices"][0]["message"]["content"].strip()) st.markdown("**Total tokens:**") st.write(str(response["usage"]["total_tokens"])) def class1_ex10(): st.subheader("Exercise 10: Basic Prompt Engineering") st.markdown(""" Now, we are going to create a chatbot with a personality by using a default prompt for our chatbot. \n This is the default prompt that will be used for every conversation.\n Let's make it a chatbot that speaks like Yoda from Star Wars.\n We will use the ***prompt_template*** that is already in our ***main()*** for this. """) st.code(''' if "prompt_template" not in st.session_state: st.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars" ''') st.markdown(""" Run the code below. You should get the same chatbot behaviour as the code output below.\n Try varying the temperature setting (0.0 to 1.0) to see how it affects the chatbot's response.\n """) st.markdown("**:blue[Code]**") st.code(''' # Exercise 10: Basic prompt engineering def ex10_basebot(): #prompt_template in session state already set in main() MODEL = "gpt-3.5-turbo" response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": st.session_state.prompt_template}, { "role": "user", "content": "Tell me about Singapore in the 1970s in 50 words", }, ], temperature=0, ) st.markdown("**LLM Response:**") st.write(response["choices"][0]["message"]["content"].strip()) st.markdown("**Total tokens:**") st.write(str(response["usage"]["total_tokens"])) ''') st.markdown("**:red[Code Output]**") #actual code here ex10() #Challenge 10 #mod chat complete stream function by replacing system content to session_state prompt template def chat_completion_stream_prompt(prompt): MODEL = "gpt-3.5-turbo" #consider changing this to session_state response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": st.session_state.prompt_template}, {"role": "user", "content": prompt}, ], temperature= 0, # temperature stream=True #stream option ) return response # Challenge 10: Make the bot speak like someone you know def ch10(): #call the function in your base bot #Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] #Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): #set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" #streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) except Exception as e: st.error(e) def class1_ch10(): st.subheader("Challenge 10: Make your bot like someone you know!") st.write("Modify the ***prompt_template*** in your ***main()*** to your own liking.") st.write("Be imaginative!") st.write("Now, in new function called **chat_completion_stream_prompt()**, we are going to modify the earlier **streaming chat_completion** function to accept a user prompt input.") st.write("You will need to pass in a new input variable called **prompt** and replace the user content with the new **prompt** variable.") st.write("Replace the system prompt with **st.session_state.prompt_template**.") st.write("Before calling **chat_completion_stream_prompt()**, get a new prompt from the user like this to update the **st.session_state.prompt_template**:") st.code(''' if my_prompt_template := st.text_input("Enter a system prompt template. E.g. Speak like Yoda from Star Wars."): st.session_state.prompt_template = my_prompt_template st.write("new prompt template set! ", st.session_state.prompt_template) ''') st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' #Challenge 10 #mod chat complete stream function by replacing system content to session_state prompt template def chat_completion_stream_prompt(prompt): MODEL = "gpt-3.5-turbo" #consider changing this to session_state response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": st.session_state.prompt_template}, {"role": "user", "content": prompt}, ], temperature= 0, # temperature stream=True #stream option ) return response # Challenge 10: Make the bot speak like someone you know def ch10_basebot(): # call the function in your base bot # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): #set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" #streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) except Exception as e: st.error(e) ''') st.markdown("**:red[Code Output]**") st.title("ChatGPT-like clone with Prompt Engineering") ch10() #https://python.langchain.com/docs/modules/chains/ def ex11a(): # change in ex11a # langchain prompt template prompt = PromptTemplate( input_variables=["subject", "topic"], template="""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""", ) # openai_api_key = st.secrets["openapi_key"] llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9) # creating a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt) if st.button("Run my chain"): input_prompt = prompt.format(subject="English", topic="Verbs") # Showing what is sent to LLM Chain st.write("Input prompt: ", input_prompt) # Showing the output from LLM Chain st.write(chain.run({"subject": "English", "topic": "Verbs"})) def class1_ex11a(): st.subheader("Exercise 11a: Prompt Template with LangChain") st.write("LangChain helps you to create a more complext prompt template for your chatbot.") st.markdown("**:blue[Code]**") st.code(''' #https://python.langchain.com/docs/modules/chains/ def ex11a(): # change in ex11a # langchain prompt template prompt = PromptTemplate( input_variables=["subject", "topic"], template="""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""", ) # openai_api_key = st.secrets["openapi_key"] llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9) # creating a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt) if st.button("Run my chain"): input_prompt = prompt.format(subject="English", topic="Verbs") # Showing what is sent to LLM Chain st.write("Input prompt: ", input_prompt) # Showing the output from LLM Chain st.write(chain.run({"subject": "English", "topic": "Verbs"})) ''') st.markdown("**:red[Code Output]**") #actual code here ex11a() def prompt_inputs_form(): #Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age #langchain prompt template with st.form("Prompt Template"): occupation = st.text_input("Enter the occupation:") topic = st.text_input("Enter the topic:") age = st.text_input("Enter the age:") # Every form must have a submit button. submitted = st.form_submit_button("Submit") #return a dictionary of the values if submitted: return { 'occupation': occupation, 'topic': topic, 'age': age } def ex11b(): # create your template prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) # create a langchain function call to openai llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0.9, ) # create a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt_template) # call the prompt_inputs_form() dict_inputs = prompt_inputs_form() if dict_inputs: st.write(chain.run(dict_inputs)) def class1_ex11b(): st.subheader("Exercise 11b") st.write("Now, we will create a chatbot with a prompt template that is more complex.") st.write("We will use the ***prompt_inputs_form()*** function to get the user's input for the prompt template.") st.write("Run the code below to see the chatbot in action.") st.markdown("**:blue[Code]**") st.code(''' def prompt_inputs_form(): # Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age # langchain prompt template with st.form("Prompt Template"): occupation = st.text_input("Enter the occupation:") topic = st.text_input("Enter the topic:") age = st.text_input("Enter the age:") # Every form must have a submit button. submitted = st.form_submit_button("Submit") # return a dictionary of the values if submitted: return {"occupation": occupation, "topic": topic, "age": age} def ex11b(): # create your template prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) # create a langchain function call to openai llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0.9, ) # create a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt_template) # call the prompt_inputs_form() dict_inputs = prompt_inputs_form() if dict_inputs: st.write(chain.run(dict_inputs)) ''') st.markdown("**:red[Code Output]**") # Actual code here ex11b() def ch11(): # instead of running of the langchain, we are going to use the prompt template and run it the chatbot using format prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) # set session_state.prompt_template st.session_state.prompt_template = input_prompt st.write("New session_state.prompt_template: ", input_prompt) # call ch10() with the new session_state.prompt_template ch10() def class1_ch11(): st.subheader("Challenge 11: Prompt Template with LangChain") st.write("Now, let's incorporate the prompt template into our chatbot from the previous exercise.") st.write("We will use the ***prompt_inputs_form()*** function to get the user's input for the prompt template.") st.write("You can use the ***ch10*** function from the previous exercise to do the llm api call with the updated session_state.prompt_template.") st.write("Ignore the text input field that asks for a system prompt template from ch10(), since we will be using the prompt template from the user's input.") st.write("As you interact with the chatbot, observe that the prompt template is updated with the latest user input as seen from the code output.") st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' def ch11(): # instead of running of the langchain, we are going to use the prompt template and run it the chatbot using format prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) # set session_state.prompt_template st.session_state.prompt_template = input_prompt st.write("New session_state.prompt_template: ", input_prompt) # call the ch10() basebot with the new session_state.prompt_template ch10() ''') st.markdown("**:red[Code Output]**") # actual code here ch11() def ex12(): memory = ConversationBufferWindowMemory(k=3) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) memory = ConversationBufferWindowMemory( k=3, return_messages=True) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) def class1_ex12(): st.subheader("Exercise 12: Chatbot with memory") st.write("Now, we will create a chatbot with memory.") st.write("You can determine the number of previous messages to remember by setting the ***k*** parameter.") st.markdown("**:blue[Code]**") st.code(''' def ex12(): memory = ConversationBufferWindowMemory(k=3) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) memory = ConversationBufferWindowMemory( k=3, return_messages=True) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) ''') st.markdown("**:red[Code Output]**") #actual code here ex12() def ch12(): # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) else: input_prompt = "You are a helpful assistant. " st.write("input prompt: ", input_prompt) if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=3) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write("Memory Data: ", memory_data) st.session_state.prompt_template = f""" {input_prompt} Below is the conversation history between the AI and Users so far {memory_data} """ st.write("New prompt template:", st.session_state.prompt_template) # call the function in your base bot # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) except Exception as e: st.error(e) def class1_ch12(): st.subheader("Challenge 12: Chatbot with memory") st.write("Now, let's incorporate the memory into the session state prompt template.") st.write("The chatbot should remember the previous user input and use it as the prompt template for the next conversation.") st.write("Start with the following code and modify ex12() to create a chatbot with memory.") st.write("Get the *{input_prompt}* using *prompt_inputs_form()*.") st.write("As you interact with the chatbot, observe that the memory is updated with the latest k number of user input and output as seen from the code output.") st.markdown("**:blue[Code]**") st.code(''' if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) #step 1 save the memory from your chatbot #step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f"""{input_prompt}\n\nBelow is the conversation history between the AI and Users so far\n\n{memory_data}""" ''') st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' def ch12(): # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) else: input_prompt = "You are a helpful assistant. " st.write("input prompt: ", input_prompt) if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=3) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write("Memory Data: ", memory_data) st.session_state.prompt_template = f""" {input_prompt} Below is the conversation history between the AI and Users so far {memory_data} """ st.write("New prompt template: ", st.session_state.prompt_template) # call the function in your base bot # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) except Exception as e: st.error(e) ''') st.markdown("**:red[Code Output]**") #actual code here ch12() #exercise 13 - loading def upload_file_streamlit(): def get_file_extension(file_name): return os.path.splitext(file_name)[1] st.subheader("Upload your docs") # Streamlit file uploader to accept file input uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"]) if uploaded_file: # Reading file content file_content = uploaded_file.read() # Determine the suffix based on uploaded file's name file_suffix = get_file_extension(uploaded_file.name) # Saving the uploaded file temporarily to process it with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file: temp_file.write(file_content) temp_file.flush() # Ensure the data is written to the file temp_file_path = temp_file.name return temp_file_path #exercise 13 - split and chunk, embeddings and storing in vectorstores for reference def vectorstore_creator(): # WORKING_DIRECTORY set above in the main.py # Process the temporary file using UnstructuredFileLoader (or any other method you need) embeddings = OpenAIEmbeddings() db = lancedb.connect(WORKING_DIRECTORY) table = db.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Query unsuccessful"), "text": "Query unsuccessful", "id": "1", } ], mode="overwrite", ) # st.write(temp_file_path) temp_file_path = upload_file_streamlit() if temp_file_path: loader = PyPDFLoader(temp_file_path) documents = loader.load_and_split() db = LanceDB.from_documents(documents, embeddings, connection=table) return db def ex13_vectorstore_creator(): if "vectorstore" not in st.session_state: st.session_state.vectorstore = False db = vectorstore_creator() st.session_state.vectorstore = db if st.session_state.vectorstore: query = st.text_input("Enter a query") if query: st.session_state.vectorstore = db docs = db.similarity_search(query) st.write(docs[0].page_content) def class1_ex13(): st.subheader("Exercise 13: Create a vector store") st.write("Now, we will create a vector store to store the user's document.") st.write("This process uses OpenAI to generate embeddings and LanceDB for storing these embeddings.") st.write("For now, this only works for pdf files.") st.write("You may need to run the following commands in terminal to install new libaries:") st.code(''' pip install tiktoken ''') st.markdown("**:blue[Code]**") st.code(''' #exercise 13 - loading def upload_file_streamlit(): def get_file_extension(file_name): return os.path.splitext(file_name)[1] st.subheader("Upload your docs") # Streamlit file uploader to accept file input uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"]) if uploaded_file: # Reading file content file_content = uploaded_file.read() # Determine the suffix based on uploaded file's name file_suffix = get_file_extension(uploaded_file.name) # Saving the uploaded file temporarily to process it with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file: temp_file.write(file_content) temp_file.flush() # Ensure the data is written to the file temp_file_path = temp_file.name return temp_file_path #exercise 13 - split and chunk, embeddings and storing in vectorstores for reference def vectorstore_creator(): # WORKING_DIRECTORY set above in the main.py # Process the temporary file using UnstructuredFileLoader (or any other method you need) embeddings = OpenAIEmbeddings() db = lancedb.connect(WORKING_DIRECTORY) table = db.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Query unsuccessful"), "text": "Query unsuccessful", "id": "1", } ], mode="overwrite", ) # st.write(temp_file_path) temp_file_path = upload_file_streamlit() if temp_file_path: loader = PyPDFLoader(temp_file_path) documents = loader.load_and_split() db = LanceDB.from_documents(documents, embeddings, connection=table) return db def ex13_vectorstore_creator(): if "vectorstore" not in st.session_state: st.session_state.vectorstore = False db = vectorstore_creator() st.session_state.vectorstore = db if st.session_state.vectorstore: query = st.text_input("Enter a query") if query: st.session_state.vectorstore = db docs = db.similarity_search(query) st.write(docs[0].page_content) ''') st.markdown("**:red[Code Output]**") ex13_vectorstore_creator() # save the vectorstore in st.session_state # add semantic search prompt into memory prompt # integrate back into your chatbot def ex14(): # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) st.session_state.input_prompt = input_prompt if "input_prompt" not in st.session_state: st.session_state.input_prompt = "Speak like Yoda from Star Wars" if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f""" st.session_state.input_prompt: {st.session_state.input_prompt} This is the last conversation history {memory_data} """ st.write("new prompt template: ", st.session_state.prompt_template) st.session_state.vectorstore = vectorstore_creator() # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # query information if st.session_state.vectorstore: docs = st.session_state.vectorstore.similarity_search(prompt) docs = docs[0].page_content # add your query prompt vs_prompt = f"""You should reference this search result to help your answer, {docs} if the search result does not anwer the query, please say you are unable to answer, do not make up an answer""" else: vs_prompt = "" # add query prompt to your memory prompt and send it to LLM st.session_state.prompt_template = ( st.session_state.prompt_template + vs_prompt ) # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) except Exception as e: st.error(e) def class1_ex14(): st.subheader("Exercise 14: Semantic search") st.write("In this exercise. we will do a semantic search on the vector store in our chatbot.") st.write("At the same time, the chatbot is able to remember its conversation history to some extent.") st.write("This code integrates advanced features like semantic search and context-aware prompts to provide a more engaging and helpful conversational experience.") st.write("Copy and run the code below to see the chatbot in action.") st.markdown("**:blue[Code]**") st.code(''' # save the vectorstore in st.session_state # add semantic search prompt into memory prompt # integrate back into your chatbot def ex14_basebot(): # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) st.session_state.input_prompt = input_prompt if "input_prompt" not in st.session_state: st.session_state.input_prompt = "Speak like Yoda from Star Wars" if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f""" {st.session_state.input_prompt} This is the last conversation history: {memory_data}""" st.write("new prompt template: ", st.session_state.prompt_template) st.session_state.vectorstore = vectorstore_creator() # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # query information if st.session_state.vectorstore: docs = st.session_state.vectorstore.similarity_search(prompt) docs = docs[0].page_content # add your query prompt vs_prompt = f""" You should reference this search result to help your answer, {docs} if the search result does not anwer the query, please say you are unable to answer, do not make up an answer""" else: vs_prompt = "" # add query prompt to your memory prompt and send it to LLM st.session_state.prompt_template = ( st.session_state.prompt_template + vs_prompt ) # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) except Exception as e: st.error(e) ''') st.markdown("**:red[Code Output]**") #actual code here ex14() def ex15_initialise(): # Create or check for the 'database' directory in the current working directory # Set DB_NAME to be within the 'database' directory at the top of main.py # Connect to the SQLite database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Conversation data table cursor.execute( """ CREATE TABLE IF NOT EXISTS data_table ( id INTEGER PRIMARY KEY, date TEXT NOT NULL UNIQUE, username TEXT NOT NULL, chatbot_ans TEXT NOT NULL, user_prompt TEXT NOT NULL, tokens TEXT ) """ ) conn.commit() conn.close() def ex15_collect(username, chatbot_response, prompt): # collect data from bot conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() now = datetime.now() # Using ISO format for date tokens = len(chatbot_response) * 1.3 cursor.execute( """ INSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens) VALUES (?, ?, ?, ?, ?) """, (now, username, chatbot_response, prompt, tokens), ) conn.commit() conn.close() # implementing data collection and displaying def ex15(): # initialise database first ex15_initialise() # collect some data ex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?") # display data # Connect to the specified database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Fetch all data from data_table cursor.execute("SELECT * FROM data_table") rows = cursor.fetchall() column_names = [description[0] for description in cursor.description] df = pd.DataFrame(rows, columns=column_names) st.dataframe(df) conn.close() def class1_ex15(): st.subheader("Exercise 15: Using a database") st.write("In this exercise, we will demonstrate how to create a database, as well as how to store and retrieve data from it.") st.markdown("**:blue[Code]**") st.code(''' def ex15_initialise(): # Create or check for the 'database' directory in the current working directory # Set DB_NAME to be within the 'database' directory at the top of main.py # Connect to the SQLite database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Conversation data table cursor.execute( """ CREATE TABLE IF NOT EXISTS data_table ( id INTEGER PRIMARY KEY, date TEXT NOT NULL UNIQUE, username TEXT NOT NULL, chatbot_ans TEXT NOT NULL, user_prompt TEXT NOT NULL, tokens TEXT ) """ ) conn.commit() conn.close() def ex15_collect(username, chatbot_response, prompt): # collect data from bot conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() now = datetime.now() # Using ISO format for date tokens = len(chatbot_response) * 1.3 cursor.execute( """ INSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens) VALUES (?, ?, ?, ?, ?) """, (now, username, chatbot_response, prompt, tokens), ) conn.commit() conn.close() # implementing data collection and displaying def ex15(): # initialise database first ex15_initialise() # collect some data ex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?") # display data # Connect to the specified database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Fetch all data from data_table cursor.execute("SELECT * FROM data_table") rows = cursor.fetchall() column_names = [description[0] for description in cursor.description] df = pd.DataFrame(rows, columns=column_names) st.dataframe(df) conn.close() ''') st.markdown("**:red[Code Output]**") # Actual code here ex15() def ch15_chatbot(): #display ex15 table ex15() # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) st.session_state.input_prompt = input_prompt if "input_prompt" not in st.session_state: st.session_state.input_prompt = "Speak like Yoda from Star Wars" if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f"""{st.session_state.input_prompt} This is the last conversation history {memory_data} """ st.write("new prompt template: ", st.session_state.prompt_template) st.session_state.vectorstore = vectorstore_creator() # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # query information if st.session_state.vectorstore: docs = st.session_state.vectorstore.similarity_search(prompt) docs = docs[0].page_content # add your query prompt vs_prompt = f"""You should reference this search result to help your answer, {docs} if the search result does not anwer the query, please say you are unable to answer, do not make up an answer""" else: vs_prompt = "" # add query prompt to your memory prompt and send it to LLM st.session_state.prompt_template = ( st.session_state.prompt_template + vs_prompt ) # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) ex15_collect(st.session_state.name, full_response, prompt) except Exception as e: st.error(e) def class1_ch15(): st.subheader("Challenge 15: Using a database") st.write("For this challenge, we will incorporate using a database from our previous exercise.") st.write("Copy the code from ***ex14()*** and use the ***ex15()*** to collect and display the data.") st.markdown("**:blue[Code]**") with st.expander("Reveal Code"): st.code(''' def ch15_chatbot(): #display ex15 table ex15() # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) st.session_state.input_prompt = input_prompt if "input_prompt" not in st.session_state: st.session_state.input_prompt = "Speak like Yoda from Star Wars" if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f"""{st.session_state.input_prompt} This is the last conversation history {memory_data} """ st.write("new prompt template: ", st.session_state.prompt_template) st.session_state.vectorstore = vectorstore_creator() # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # query information if st.session_state.vectorstore: docs = st.session_state.vectorstore.similarity_search(prompt) docs = docs[0].page_content # add your query prompt vs_prompt = f"""You should reference this search result to help your answer, {docs} if the search result does not anwer the query, please say you are unable to answer, do not make up an answer""" else: vs_prompt = "" # add query prompt to your memory prompt and send it to LLM st.session_state.prompt_template = ( st.session_state.prompt_template + vs_prompt ) # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) ex15_collect(st.session_state.name, full_response, prompt) except Exception as e: st.error(e) ''') st.markdown("**:red[Code Output]**") # Actual code here ch15_chatbot() # smart agents accessing the internet for free # https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py def ex16(): st.title("🦜 LangChain: Chat with internet search") msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True ) tools = [DuckDuckGoSearchRun(name="Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ] def class1_ex16(): st.subheader("Exercise 16: Smart agent") st.write("In this exercise, we will configure a chatbot with an internet search tool that shows all intermediate steps and tool logs.") st.write("This overcomes the limitation of the training data that is only up to a certain point in time, by being able to access the current internet to search for answers.") st.write("You may need to run the following commands in terminal to install new libaries:") st.code(''' pip install duckduckgo-search ''') st.markdown("**:blue[Code]**") st.code(''' # smart agents accessing the internet for free # https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py def ex16_agent_bot(): st.title("🦜 LangChain: Chat with internet search") msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True ) tools = [DuckDuckGoSearchRun(name="Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ] ''') st.markdown("**:red[Code Output]**") # Actual code here ex16() # agents ,vectorstores, wiki # https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval # note tool @tool("Document search") def document_search(query: str) -> str: # this is the prompt to the tool itself "Use this function first to search for documents pertaining to the query before going into the internet" docs = st.session_state.vectorstore.similarity_search(query) docs = docs[0].page_content json_string = json.dumps(docs, ensure_ascii=False, indent=4) return json_string # combine vector store and internet search def ex17(): st.title("🦜 LangChain: Chat with internet search") st.session_state.vectorstore = vectorstore_creator() msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True ) tools = [document_search, DuckDuckGoSearchRun(name="Internet Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ] def class1_ex17(): st.subheader("Exercise 17: Smart agent with vector store") st.write("In this exercise, we will combine the vector store with the smart agent.") st.write("This allows the chatbot to search for answers from the vector store and the internet.") st.write("The @tool(\"Document search\") function is an enhancement to the chatbot. It allows for an initial internal document search based on the user query before resorting to external internet searches. ") st.write("Copy and run the code below to see the chatbot in action.") st.markdown("**:blue[Code]**") st.code(''' # agents ,vectorstores, wiki # https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval # note tool @tool("Document search") def document_search(query: str) -> str: # this is the prompt to the tool itself "Use this function first to search for documents pertaining to the query before going into the internet" docs = st.session_state.vectorstore.similarity_search(query) docs = docs[0].page_content json_string = json.dumps(docs, ensure_ascii=False, indent=4) return json_string # combine vector store and internet search def ex17_agent_bot(): st.title("🦜 LangChain: Chat with internet search") st.session_state.vectorstore = vectorstore_creator() msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True ) tools = [document_search, DuckDuckGoSearchRun(name="Internet Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ] ''') st.markdown("**:red[Code Output]**") # Actual code here ex17() # PandasAI- A smart agent that can do visual analytics def ex18(): st.title("pandas-ai streamlit interface") # Upload CSV file using st.file_uploader uploaded_file = st.file_uploader("Choose a CSV file", type="csv") if "openai_key" not in st.session_state: st.session_state.openai_key = st.secrets["openapi_key"] st.session_state.prompt_history = [] st.session_state.df = None if uploaded_file is not None: try: df = pd.read_csv(uploaded_file) st.session_state.df = df except Exception as e: st.write("There was an error processing the CSV file.") st.write(e) else: st.session_state.df = pd.DataFrame( { "country": [ "United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China", ], "gdp": [ 19294482071552, 2891615567872, 2411255037952, 3435817336832, 1745433788416, 1181205135360, 1607402389504, 1490967855104, 4380756541440, 14631844184064, ], "happiness_index": [ 6.94, 7.16, 6.66, 7.07, 6.38, 6.4, 7.23, 7.22, 5.87, 5.12, ], } ) chart_path = os.path.join("exports/charts") with st.form("Question"): question = st.text_input("Question", value="", type="default") submitted = st.form_submit_button("Submit") if submitted: with st.spinner(): llm = OpenAI(api_token=st.session_state.openai_key) df = SmartDataframe( st.session_state.df, config={ "llm": llm, "save_charts_path": chart_path, "save_charts": True, "verbose": True, }, ) response = df.chat( question ) # Using 'chat' method based on your context # Display the textual response (if any): if response: st.write(response) chart_path = os.path.join("exports/charts", "temp_chart.png") if os.path.exists(chart_path): st.image( chart_path, caption="Generated Chart", use_column_width=True ) # Append the question to the history: st.session_state.prompt_history.append(question) if st.session_state.df is not None: st.subheader("Current dataframe:") st.write(st.session_state.df) st.subheader("Prompt history:") st.write(st.session_state.prompt_history) if st.button("Clear"): st.session_state.prompt_history = [] st.session_state.df = None def class1_ex18(): st.subheader("Exercise 18: Data Analytics") st.write("In this exercise, we will use the Pandas AI library to perform data analytics.") st.write("The Pandas AI library is a smart agent that can perform data analytics on a dataframe.") st.write("You may need to install the following library:") st.code('''pip install bs4''') st.write("Copy and run the code below to see the chatbot in action.") st.markdown("**:blue[Code]**") st.code(''' # PandasAI- A smart agent that can do visual analytics def ex18_pandas_AI(): st.title("pandas-ai streamlit interface") # Upload CSV file using st.file_uploader uploaded_file = st.file_uploader("Choose a CSV file", type="csv") if "openai_key" not in st.session_state: st.session_state.openai_key = st.secrets["openapi_key"] st.session_state.prompt_history = [] st.session_state.df = None if uploaded_file is not None: try: df = pd.read_csv(uploaded_file) st.session_state.df = df except Exception as e: st.write("There was an error processing the CSV file.") st.write(e) else: st.session_state.df = pd.DataFrame( { "country": [ "United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China", ], "gdp": [ 19294482071552, 2891615567872, 2411255037952, 3435817336832, 1745433788416, 1181205135360, 1607402389504, 1490967855104, 4380756541440, 14631844184064, ], "happiness_index": [ 6.94, 7.16, 6.66, 7.07, 6.38, 6.4, 7.23, 7.22, 5.87, 5.12, ], } ) chart_path = os.path.join("exports/charts") with st.form("Question"): question = st.text_input("Question", value="", type="default") submitted = st.form_submit_button("Submit") if submitted: with st.spinner(): llm = OpenAI(api_token=st.session_state.openai_key) df = SmartDataframe( st.session_state.df, config={ "llm": llm, "save_charts_path": chart_path, "save_charts": True, "verbose": True, }, ) response = df.chat( question ) # Using 'chat' method based on your context # Display the textual response (if any): if response: st.write(response) chart_path = os.path.join("exports/charts", "temp_chart.png") if os.path.exists(chart_path): st.image( chart_path, caption="Generated Chart", use_column_width=True ) # Append the question to the history: st.session_state.prompt_history.append(question) if st.session_state.df is not None: st.subheader("Current dataframe:") st.write(st.session_state.df) st.subheader("Prompt history:") st.write(st.session_state.prompt_history) if st.button("Clear"): st.session_state.prompt_history = [] st.session_state.df = None ''') st.markdown("**:red[Code Output]**") ex18()
[ "lancedb.connect" ]
[((1222, 1233), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1231, 1233), False, 'import os\n'), ((1254, 1283), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1266, 1283), False, 'import os\n'), ((1375, 1420), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""default_db"""'], {}), "(WORKING_DIRECTORY, 'default_db')\n", (1387, 1420), False, 'import os\n'), ((86756, 86779), 'langchain.agents.tool', 'tool', (['"""Document search"""'], {}), "('Document search')\n", (86760, 86779), False, 'from langchain.agents import tool\n'), ((1292, 1325), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1306, 1325), False, 'import os\n'), ((1328, 1358), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1339, 1358), False, 'import os\n'), ((1439, 1463), 'streamlit.subheader', 'st.subheader', (['"""Template"""'], {}), "('Template')\n", (1451, 1463), True, 'import streamlit as st\n'), ((1465, 1495), 'streamlit.write', 'st.write', (['"""Instruction lines."""'], {}), "('Instruction lines.')\n", (1473, 1495), True, 'import streamlit as st\n'), ((1498, 1528), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (1509, 1528), True, 'import streamlit as st\n'), ((1530, 1566), 'streamlit.code', 'st.code', (['"""\n#exercise code here\n"""'], {}), '("""\n#exercise code here\n""")\n', (1537, 1566), True, 'import streamlit as st\n'), ((1568, 1598), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (1579, 1598), True, 'import streamlit as st\n'), ((1677, 1713), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (1688, 1713), True, 'import streamlit as st\n'), ((1844, 1856), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (1854, 1856), True, 'import streamlit as st\n'), ((1858, 1892), 'streamlit.subheader', 'st.subheader', (['"""Pre-workshop Setup"""'], {}), "('Pre-workshop Setup')\n", (1870, 1892), True, 'import streamlit as st\n'), ((1910, 2088), 'streamlit.markdown', 'st.markdown', (['"""1. Visual Studio (VS Code): this is the Integrated Development Environment (IDE) of choice by many coders and will make it easier for us to code our app."""'], {}), "(\n '1. Visual Studio (VS Code): this is the Integrated Development Environment (IDE) of choice by many coders and will make it easier for us to code our app.'\n )\n", (1921, 2088), True, 'import streamlit as st\n'), ((2084, 2217), 'streamlit.markdown', 'st.markdown', (['"""2. Python (3.10 release or later): this is the coding language of choice for many data science related apps."""'], {}), "(\n '2. Python (3.10 release or later): this is the coding language of choice for many data science related apps.'\n )\n", (2095, 2217), True, 'import streamlit as st\n'), ((2213, 2621), 'streamlit.write', 'st.write', (['"""\n\t\t\t3. Once you have installed the above, we will need to set up a virtual environment and install the libraries in that environment.\n\n\t\t\t\tCreate a folder named “chatbot” anywhere, e.g. in your Desktop.\n \n\t\t\t\tOpen VS Code and navigate to the folder in a new terminal window.\n \n\t\t\t\tCreate a virtual environment and activate it by entering the following commands in the terminal:\n\t\t\t """'], {}), '(\n """\n\t\t\t3. Once you have installed the above, we will need to set up a virtual environment and install the libraries in that environment.\n\n\t\t\t\tCreate a folder named “chatbot” anywhere, e.g. in your Desktop.\n \n\t\t\t\tOpen VS Code and navigate to the folder in a new terminal window.\n \n\t\t\t\tCreate a virtual environment and activate it by entering the following commands in the terminal:\n\t\t\t """\n )\n', (2221, 2621), True, 'import streamlit as st\n'), ((2616, 2644), 'streamlit.markdown', 'st.markdown', (['""" **a) Mac**"""'], {}), "(' **a) Mac**')\n", (2627, 2644), True, 'import streamlit as st\n'), ((2646, 2716), 'streamlit.code', 'st.code', (['"""\n\t\tpython3 -m venv venv\n\t\tsource venv/bin/activate \n\t\t\t"""'], {}), '("""\n\t\tpython3 -m venv venv\n\t\tsource venv/bin/activate \n\t\t\t""")\n', (2653, 2716), True, 'import streamlit as st\n'), ((2718, 2796), 'streamlit.markdown', 'st.markdown', (['"""(You should see a (venv) appear in your terminal window prompt)"""'], {}), "('(You should see a (venv) appear in your terminal window prompt)')\n", (2729, 2796), True, 'import streamlit as st\n'), ((2798, 2814), 'streamlit.markdown', 'st.markdown', (['"""#"""'], {}), "('#')\n", (2809, 2814), True, 'import streamlit as st\n'), ((2816, 2848), 'streamlit.markdown', 'st.markdown', (['""" **b) Windows**"""'], {}), "(' **b) Windows**')\n", (2827, 2848), True, 'import streamlit as st\n'), ((2850, 2921), 'streamlit.code', 'st.code', (['"""\n\t\tpython -m venv venv\n\t\tcd venv\\\\Scripts\n\t\tactivate\n\t\t\t"""'], {}), '("""\n\t\tpython -m venv venv\n\t\tcd venv\\\\Scripts\n\t\tactivate\n\t\t\t""")\n', (2857, 2921), True, 'import streamlit as st\n'), ((2922, 3072), 'streamlit.markdown', 'st.markdown', (['"""4. While in your virtual environment, install the libraries using pip which should already be installed together with Python:"""'], {}), "(\n '4. While in your virtual environment, install the libraries using pip which should already be installed together with Python:'\n )\n", (2933, 3072), True, 'import streamlit as st\n'), ((3064, 3114), 'streamlit.code', 'st.code', (['"""\n\t\tpip install streamlit openai\n\t\t\t"""'], {}), '("""\n\t\tpip install streamlit openai\n\t\t\t""")\n', (3071, 3114), True, 'import streamlit as st\n'), ((3116, 3193), 'streamlit.markdown', 'st.markdown', (['""" To test if Streamlit is installed properly, run this command:"""'], {}), "(' To test if Streamlit is installed properly, run this command:')\n", (3127, 3193), True, 'import streamlit as st\n'), ((3195, 3231), 'streamlit.code', 'st.code', (['"""\n\tstreamlit hello\n\t\t\t"""'], {}), '("""\n\tstreamlit hello\n\t\t\t""")\n', (3202, 3231), True, 'import streamlit as st\n'), ((3233, 3325), 'streamlit.markdown', 'st.markdown', (['""" You should see a Streamlit application running at http://localhost:8501"""'], {}), "(\n ' You should see a Streamlit application running at http://localhost:8501')\n", (3244, 3325), True, 'import streamlit as st\n'), ((3322, 3397), 'streamlit.markdown', 'st.markdown', (['""" Type Ctrl + C in VS Code terminal to stop the Streamlit app"""'], {}), "(' Type Ctrl + C in VS Code terminal to stop the Streamlit app')\n", (3333, 3397), True, 'import streamlit as st\n'), ((3424, 3464), 'streamlit.subheader', 'st.subheader', (['"""My first Hello World app"""'], {}), "('My first Hello World app')\n", (3436, 3464), True, 'import streamlit as st\n'), ((3466, 3478), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (3476, 3478), True, 'import streamlit as st\n'), ((3480, 3533), 'streamlit.markdown', 'st.markdown', (['"""1. Create a new file called \'main.py\'."""'], {}), '("1. Create a new file called \'main.py\'.")\n', (3491, 3533), True, 'import streamlit as st\n'), ((3539, 3640), 'streamlit.markdown', 'st.markdown', (['"""2. Copy the code below and paste it in the newly created helloworld.py file."""'], {}), "(\n '2. Copy the code below and paste it in the newly created helloworld.py file.'\n )\n", (3550, 3640), True, 'import streamlit as st\n'), ((3636, 3666), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (3647, 3666), True, 'import streamlit as st\n'), ((3668, 3778), 'streamlit.code', 'st.code', (['"""\n\t\t\timport streamlit as st\n\t\t\t#my first Hello World app\n\t\t\tst.write("Hello World")\n\t\t"""'], {}), '(\n """\n\t\t\timport streamlit as st\n\t\t\t#my first Hello World app\n\t\t\tst.write("Hello World")\n\t\t"""\n )\n', (3675, 3778), True, 'import streamlit as st\n'), ((3770, 3868), 'streamlit.markdown', 'st.markdown', (['"""Install the watchdog module by running the command below in the terminal."""'], {}), "(\n 'Install the watchdog module by running the command below in the terminal.'\n )\n", (3781, 3868), True, 'import streamlit as st\n'), ((3860, 3891), 'streamlit.code', 'st.code', (['"""pip install watchdog"""'], {}), "('pip install watchdog')\n", (3867, 3891), True, 'import streamlit as st\n'), ((3893, 4030), 'streamlit.markdown', 'st.markdown', (['"""Now you don\'t have to keep restarting the app to see the changes you make to the code. Just refresh the browser."""'], {}), '(\n "Now you don\'t have to keep restarting the app to see the changes you make to the code. Just refresh the browser."\n )\n', (3904, 4030), True, 'import streamlit as st\n'), ((4022, 4123), 'streamlit.write', 'st.write', (['"""Save your file and run the app by typing the following command in the terminal:"""'], {}), "(\n 'Save your file and run the app by typing the following command in the terminal:'\n )\n", (4030, 4123), True, 'import streamlit as st\n'), ((4115, 4158), 'streamlit.code', 'st.code', (['"""\n\t\t\tstreamlit run main.py\n\t\t"""'], {}), '("""\n\t\t\tstreamlit run main.py\n\t\t""")\n', (4122, 4158), True, 'import streamlit as st\n'), ((4160, 4259), 'streamlit.markdown', 'st.markdown', (['"""3. You should see a Streamlit application running at http://localhost:8501"""'], {}), "(\n '3. You should see a Streamlit application running at http://localhost:8501'\n )\n", (4171, 4259), True, 'import streamlit as st\n'), ((4255, 4332), 'streamlit.markdown', 'st.markdown', (['"""4. Type Ctrl + C in VS Code terminal to stop the Streamlit app"""'], {}), "('4. Type Ctrl + C in VS Code terminal to stop the Streamlit app')\n", (4266, 4332), True, 'import streamlit as st\n'), ((4338, 4374), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (4349, 4374), True, 'import streamlit as st\n'), ((4376, 4399), 'streamlit.write', 'st.write', (['"""Hello World"""'], {}), "('Hello World')\n", (4384, 4399), True, 'import streamlit as st\n'), ((4426, 4452), 'streamlit.subheader', 'st.subheader', (['"""Objectives"""'], {}), "('Objectives')\n", (4438, 4452), True, 'import streamlit as st\n'), ((4454, 4561), 'streamlit.markdown', 'st.markdown', (['"""1. Learn how to use Python and Streamlit library to create an interactive web app."""'], {}), "(\n '1. Learn how to use Python and Streamlit library to create an interactive web app.'\n )\n", (4465, 4561), True, 'import streamlit as st\n'), ((4553, 4683), 'streamlit.markdown', 'st.markdown', (['"""2. Learn how to integrate and use OpenAI\'s API in their streamlit application to create a simple chatbot."""'], {}), '(\n "2. Learn how to integrate and use OpenAI\'s API in their streamlit application to create a simple chatbot."\n )\n', (4564, 4683), True, 'import streamlit as st\n'), ((4675, 4791), 'streamlit.markdown', 'st.markdown', (['"""3. Learn how to apply basic prompt engineering to enhance the interaction with the chatbot."""'], {}), "(\n '3. Learn how to apply basic prompt engineering to enhance the interaction with the chatbot.'\n )\n", (4686, 4791), True, 'import streamlit as st\n'), ((4808, 4831), 'streamlit.subheader', 'st.subheader', (['"""Outline"""'], {}), "('Outline')\n", (4820, 4831), True, 'import streamlit as st\n'), ((4833, 4887), 'streamlit.markdown', 'st.markdown', (['"""Part 0: Workshop introduction and rules"""'], {}), "('Part 0: Workshop introduction and rules')\n", (4844, 4887), True, 'import streamlit as st\n'), ((4889, 4948), 'streamlit.markdown', 'st.markdown', (['"""Part 1: Introduction to Python and Streamlit"""'], {}), "('Part 1: Introduction to Python and Streamlit')\n", (4900, 4948), True, 'import streamlit as st\n'), ((4950, 5002), 'streamlit.markdown', 'st.markdown', (['"""Part 2: Creating a rule-based chatbot"""'], {}), "('Part 2: Creating a rule-based chatbot')\n", (4961, 5002), True, 'import streamlit as st\n'), ((5004, 5064), 'streamlit.markdown', 'st.markdown', (['"""Part 3: Creating a chatbot using OpenAI\'s API"""'], {}), '("Part 3: Creating a chatbot using OpenAI\'s API")\n', (5015, 5064), True, 'import streamlit as st\n'), ((5066, 5135), 'streamlit.markdown', 'st.markdown', (['"""Part 4: Modifying your chatbot with prompt engineering"""'], {}), "('Part 4: Modifying your chatbot with prompt engineering')\n", (5077, 5135), True, 'import streamlit as st\n'), ((5163, 5262), 'streamlit.write', 'st.write', (['"""Do introduce yourself to your teammates:\n"""', '"""1) name\n"""', '"""2) division\n"""', '"""3) role"""'], {}), "('Do introduce yourself to your teammates:\\n', '1) name\\n',\n '2) division\\n', '3) role')\n", (5171, 5262), True, 'import streamlit as st\n'), ((5260, 5333), 'streamlit.write', 'st.write', (['"""Please also share your favourite Star Wars character and why!"""'], {}), "('Please also share your favourite Star Wars character and why!')\n", (5268, 5333), True, 'import streamlit as st\n'), ((5343, 5380), 'PIL.Image.open', 'Image.open', (['"""team_introductions.jpeg"""'], {}), "('team_introductions.jpeg')\n", (5353, 5380), False, 'from PIL import Image\n'), ((5382, 5421), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Hello there!"""'}), "(image, caption='Hello there!')\n", (5390, 5421), True, 'import streamlit as st\n'), ((5446, 5476), 'streamlit.subheader', 'st.subheader', (['"""Workshop Rules"""'], {}), "('Workshop Rules')\n", (5458, 5476), True, 'import streamlit as st\n'), ((5478, 5519), 'streamlit.write', 'st.write', (['"""1. Ask if you have questions."""'], {}), "('1. Ask if you have questions.')\n", (5486, 5519), True, 'import streamlit as st\n'), ((5521, 5583), 'streamlit.write', 'st.write', (['"""2. Be open to different ways to solve the problem."""'], {}), "('2. Be open to different ways to solve the problem.')\n", (5529, 5583), True, 'import streamlit as st\n'), ((5585, 5625), 'streamlit.write', 'st.write', (['"""3. Try. Fail. Learn. Repeat."""'], {}), "('3. Try. Fail. Learn. Repeat.')\n", (5593, 5625), True, 'import streamlit as st\n'), ((5627, 5676), 'streamlit.write', 'st.write', (['"""4. Seek help from other team members."""'], {}), "('4. Seek help from other team members.')\n", (5635, 5676), True, 'import streamlit as st\n'), ((5678, 5738), 'streamlit.write', 'st.write', (['"""5. Collaborate, if possible, for the challenges."""'], {}), "('5. Collaborate, if possible, for the challenges.')\n", (5686, 5738), True, 'import streamlit as st\n'), ((5740, 5815), 'streamlit.write', 'st.write', (['"""6. Approach facilitators if your team cannot solve the problem."""'], {}), "('6. Approach facilitators if your team cannot solve the problem.')\n", (5748, 5815), True, 'import streamlit as st\n'), ((5817, 5868), 'streamlit.write', 'st.write', (['"""7. Toilet break is own-time-own-target."""'], {}), "('7. Toilet break is own-time-own-target.')\n", (5825, 5868), True, 'import streamlit as st\n'), ((5870, 5894), 'streamlit.write', 'st.write', (['"""8. Have fun!"""'], {}), "('8. Have fun!')\n", (5878, 5894), True, 'import streamlit as st\n'), ((5914, 5962), 'streamlit.subheader', 'st.subheader', (['"""Navigating the VS Code interface"""'], {}), "('Navigating the VS Code interface')\n", (5926, 5962), True, 'import streamlit as st\n'), ((5972, 6006), 'PIL.Image.open', 'Image.open', (['"""VSCode_interface.png"""'], {}), "('VSCode_interface.png')\n", (5982, 6006), False, 'from PIL import Image\n'), ((6008, 6052), 'streamlit.image', 'st.image', (['image'], {'caption': '"""VS Code UI layout"""'}), "(image, caption='VS Code UI layout')\n", (6016, 6052), True, 'import streamlit as st\n'), ((6054, 6173), 'streamlit.markdown', 'st.markdown', (['"""**A: Activity Bar: this is where you can see the different activities you can do in VS Code.**"""'], {}), "(\n '**A: Activity Bar: this is where you can see the different activities you can do in VS Code.**'\n )\n", (6065, 6173), True, 'import streamlit as st\n'), ((6165, 6270), 'streamlit.markdown', 'st.markdown', (['"""\tExplorer: this is where you can see all the files and folders in your project."""'], {}), "(\n '\\tExplorer: this is where you can see all the files and folders in your project.'\n )\n", (6176, 6270), True, 'import streamlit as st\n'), ((6261, 6372), 'streamlit.markdown', 'st.markdown', (['"""\tSource Control: this is where you can see the changes you have made to your project."""'], {}), "(\n '\\tSource Control: this is where you can see the changes you have made to your project.'\n )\n", (6272, 6372), True, 'import streamlit as st\n'), ((6363, 6449), 'streamlit.markdown', 'st.markdown', (['"""\tExtensions: this is where you can install extensions to VS Code."""'], {}), "(\n '\\tExtensions: this is where you can install extensions to VS Code.')\n", (6374, 6449), True, 'import streamlit as st\n'), ((6445, 6515), 'streamlit.markdown', 'st.markdown', (['"""\tRun and Debug: this is where you can debug your code."""'], {}), "('\\tRun and Debug: this is where you can debug your code.')\n", (6456, 6515), True, 'import streamlit as st\n'), ((6516, 6620), 'streamlit.markdown', 'st.markdown', (['"""**B: Side Bar: this is where you can see the different views of your project.**"""'], {}), "(\n '**B: Side Bar: this is where you can see the different views of your project.**'\n )\n", (6527, 6620), True, 'import streamlit as st\n'), ((6612, 6720), 'streamlit.markdown', 'st.markdown', (['"""**C: Editor: this is where you can see the code you have written in your project.**"""'], {}), "(\n '**C: Editor: this is where you can see the code you have written in your project.**'\n )\n", (6623, 6720), True, 'import streamlit as st\n'), ((6712, 6830), 'streamlit.markdown', 'st.markdown', (['"""**D: Panel: this is where you can see the different panels you have opened in your project.**"""'], {}), "(\n '**D: Panel: this is where you can see the different panels you have opened in your project.**'\n )\n", (6723, 6830), True, 'import streamlit as st\n'), ((6822, 6900), 'streamlit.markdown', 'st.markdown', (['"""\tTerminal: this is where you can run commands in your project."""'], {}), "('\\tTerminal: this is where you can run commands in your project.')\n", (6833, 6900), True, 'import streamlit as st\n'), ((6901, 6976), 'streamlit.markdown', 'st.markdown', (['"""\tOutput: this is where you can see the output of your code."""'], {}), "('\\tOutput: this is where you can see the output of your code.')\n", (6912, 6976), True, 'import streamlit as st\n'), ((6977, 7054), 'streamlit.markdown', 'st.markdown', (['"""\tProblems: this is where you can see the errors in your code."""'], {}), "('\\tProblems: this is where you can see the errors in your code.')\n", (6988, 7054), True, 'import streamlit as st\n'), ((7055, 7147), 'streamlit.markdown', 'st.markdown', (['"""**E. Status Bar: this is where you can see the status of your project.**"""'], {}), "(\n '**E. Status Bar: this is where you can see the status of your project.**')\n", (7066, 7147), True, 'import streamlit as st\n'), ((7175, 7405), 'streamlit.markdown', 'st.markdown', (['"""Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*"""'], {}), '(\n """Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*"""\n )\n', (7186, 7405), True, 'import streamlit as st\n'), ((7407, 7448), 'PIL.Image.open', 'Image.open', (['"""command_palette_command.png"""'], {}), "('command_palette_command.png')\n", (7417, 7448), False, 'from PIL import Image\n'), ((7450, 7512), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Command Palette auto-indent command"""'}), "(image, caption='Command Palette auto-indent command')\n", (7458, 7512), True, 'import streamlit as st\n'), ((7536, 7641), 'streamlit.write', 'st.write', (['"""This is what we will working towards and building by the end of the workshop today."""'], {}), "(\n 'This is what we will working towards and building by the end of the workshop today.'\n )\n", (7544, 7641), True, 'import streamlit as st\n'), ((7633, 7674), 'streamlit.write', 'st.write', (['"""Do try out the chatbot below!"""'], {}), "('Do try out the chatbot below!')\n", (7641, 7674), True, 'import streamlit as st\n'), ((7676, 7732), 'streamlit.subheader', 'st.subheader', (['"""**:green[Feel the force! Yoda Chatbot]**"""'], {}), "('**:green[Feel the force! Yoda Chatbot]**')\n", (7688, 7732), True, 'import streamlit as st\n'), ((7742, 7764), 'PIL.Image.open', 'Image.open', (['"""yoda.jpg"""'], {}), "('yoda.jpg')\n", (7752, 7764), False, 'from PIL import Image\n'), ((7766, 7820), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Master Yoda at your service"""'}), "(image, caption='Master Yoda at your service')\n", (7774, 7820), True, 'import streamlit as st\n'), ((7822, 7834), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (7832, 7834), True, 'import streamlit as st\n'), ((9208, 9255), 'streamlit.subheader', 'st.subheader', (['"""Useful resources and references"""'], {}), "('Useful resources and references')\n", (9220, 9255), True, 'import streamlit as st\n'), ((9257, 9343), 'streamlit.markdown', 'st.markdown', (['"""1. [Streamlit documentation](https://docs.streamlit.io/en/stable/)"""'], {}), "(\n '1. [Streamlit documentation](https://docs.streamlit.io/en/stable/)')\n", (9268, 9343), True, 'import streamlit as st\n'), ((9340, 9432), 'streamlit.markdown', 'st.markdown', (['"""2. [OpenAI API documentation](https://beta.openai.com/docs/introduction)"""'], {}), "(\n '2. [OpenAI API documentation](https://beta.openai.com/docs/introduction)')\n", (9351, 9432), True, 'import streamlit as st\n'), ((9429, 9506), 'streamlit.markdown', 'st.markdown', (['"""3. [VS Code documentation](https://code.visualstudio.com/docs)"""'], {}), "('3. [VS Code documentation](https://code.visualstudio.com/docs)')\n", (9440, 9506), True, 'import streamlit as st\n'), ((9508, 9576), 'streamlit.markdown', 'st.markdown', (['"""4. [Python documentation](https://docs.python.org/3/)"""'], {}), "('4. [Python documentation](https://docs.python.org/3/)')\n", (9519, 9576), True, 'import streamlit as st\n'), ((9578, 9650), 'streamlit.markdown', 'st.markdown', (['"""5. [Python cheatsheet](https://www.pythoncheatsheet.org/)"""'], {}), "('5. [Python cheatsheet](https://www.pythoncheatsheet.org/)')\n", (9589, 9650), True, 'import streamlit as st\n'), ((9652, 9743), 'streamlit.markdown', 'st.markdown', (['"""6. [Python for beginners](https://www.python.org/about/gettingstarted/)"""'], {}), "(\n '6. [Python for beginners](https://www.python.org/about/gettingstarted/)')\n", (9663, 9743), True, 'import streamlit as st\n'), ((9740, 9851), 'streamlit.markdown', 'st.markdown', (['"""7. [ChatGPT](https://chat.openai.com/) - you can ask ChatGPT to generate code for you!"""'], {}), "(\n '7. [ChatGPT](https://chat.openai.com/) - you can ask ChatGPT to generate code for you!'\n )\n", (9751, 9851), True, 'import streamlit as st\n'), ((9843, 10017), 'streamlit.markdown', 'st.markdown', (['"""**Notes for this workshop course:** \n- you may do a single click to copy all the code \n- challenge code is hidden, click reveal to see the code"""'], {}), '(\n """**Notes for this workshop course:** \n- you may do a single click to copy all the code \n- challenge code is hidden, click reveal to see the code"""\n )\n', (9854, 10017), True, 'import streamlit as st\n'), ((10007, 10237), 'streamlit.markdown', 'st.markdown', (['"""Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*"""'], {}), '(\n """Python is very particular about indentation.\nUse the command palette to automatically indent your code.\n\nWindows: Ctrl-Shift-P \nMac: Command-Shift-P\n\nSelect the option to *Convert Indentation to Tabs*"""\n )\n', (10018, 10237), True, 'import streamlit as st\n'), ((10239, 10280), 'PIL.Image.open', 'Image.open', (['"""command_palette_command.png"""'], {}), "('command_palette_command.png')\n", (10249, 10280), False, 'from PIL import Image\n'), ((10282, 10344), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Command Palette auto-indent command"""'}), "(image, caption='Command Palette auto-indent command')\n", (10290, 10344), True, 'import streamlit as st\n'), ((10367, 10392), 'streamlit.subheader', 'st.subheader', (['"""Streamlit"""'], {}), "('Streamlit')\n", (10379, 10392), True, 'import streamlit as st\n'), ((10394, 10790), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t * an open-source Python library\n\t\t * used extensively for machine learning and data science\n\t\t * helps to create interactive web apps in just a few lines of code\n\t\t * highly flexible and supports complex interactive apps with highly customisable UI\n\t\t * Some real world examples:\n\t\t \t* CherGPT in String\n\t\t \t* Metacog for CotF MOE\n\t\t \t* AILC prototype for MOE\n\t\t """'], {}), '(\n """\n\t\t * an open-source Python library\n\t\t * used extensively for machine learning and data science\n\t\t * helps to create interactive web apps in just a few lines of code\n\t\t * highly flexible and supports complex interactive apps with highly customisable UI\n\t\t * Some real world examples:\n\t\t \t* CherGPT in String\n\t\t \t* Metacog for CotF MOE\n\t\t \t* AILC prototype for MOE\n\t\t """\n )\n', (10405, 10790), True, 'import streamlit as st\n'), ((10820, 10843), 'streamlit.write', 'st.write', (['"""Hello World"""'], {}), "('Hello World')\n", (10828, 10843), True, 'import streamlit as st\n'), ((10910, 10942), 'streamlit.text_input', 'st.text_input', (['"""Enter your name"""'], {}), "('Enter your name')\n", (10923, 10942), True, 'import streamlit as st\n'), ((11001, 11038), 'streamlit.subheader', 'st.subheader', (['"""Exercise 1: Functions"""'], {}), "('Exercise 1: Functions')\n", (11013, 11038), True, 'import streamlit as st\n'), ((11040, 11160), 'streamlit.markdown', 'st.markdown', (['"""In the ***main.py*** file, the code below is already in ***ex1()*** in the ***part1.py*** file."""'], {}), "(\n 'In the ***main.py*** file, the code below is already in ***ex1()*** in the ***part1.py*** file.'\n )\n", (11051, 11160), True, 'import streamlit as st\n'), ((11152, 11237), 'streamlit.write', 'st.write', (['"""The code for *helloworld* is inside what you call a Python function."""'], {}), "('The code for *helloworld* is inside what you call a Python function.'\n )\n", (11160, 11237), True, 'import streamlit as st\n'), ((11234, 11388), 'streamlit.write', 'st.write', (['"""The ***def main()*** function and ***if _ _ name _ _ == \'_ _ main _ _\'*** statement are coding conventions for any Python programme."""'], {}), '(\n "The ***def main()*** function and ***if _ _ name _ _ == \'_ _ main _ _\'*** statement are coding conventions for any Python programme."\n )\n', (11242, 11388), True, 'import streamlit as st\n'), ((11380, 11514), 'streamlit.write', 'st.write', (['"""You need to include an import statement in ***main.py*** to import the ***ex1()*** function from the other file."""'], {}), "(\n 'You need to include an import statement in ***main.py*** to import the ***ex1()*** function from the other file.'\n )\n", (11388, 11514), True, 'import streamlit as st\n'), ((11506, 11544), 'streamlit.code', 'st.code', (['"""\nfrom part1 import ex1\n"""'], {}), '("""\nfrom part1 import ex1\n""")\n', (11513, 11544), True, 'import streamlit as st\n'), ((11546, 11771), 'streamlit.write', 'st.write', (['"""You will need to do the same for all the other exercises and challenges for the rest of the workshop. The code exercises are already in the respective files: ***part1.py, part2.py, part3.py, part4.py***."""'], {}), "(\n 'You will need to do the same for all the other exercises and challenges for the rest of the workshop. The code exercises are already in the respective files: ***part1.py, part2.py, part3.py, part4.py***.'\n )\n", (11554, 11771), True, 'import streamlit as st\n'), ((11763, 11793), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (11774, 11793), True, 'import streamlit as st\n'), ((11795, 12075), 'streamlit.code', 'st.code', (['"""\nimport streamlit as st\n\nfrom part1 import ex1\n\t\t \t \n#Exercise 1: Functions\ndef ex1():\n\tst.write("Hello World")\n\tname = st.text_input("Enter your name")\n\tif name:\n\t\tst.write("Hello " + name)\n\t \ndef main():\n\tex1()\n\t \nif __name__ == "__main__":\n\tmain()\t\t\n\t """'], {}), '(\n """\nimport streamlit as st\n\nfrom part1 import ex1\n\t\t \t \n#Exercise 1: Functions\ndef ex1():\n\tst.write("Hello World")\n\tname = st.text_input("Enter your name")\n\tif name:\n\t\tst.write("Hello " + name)\n\t \ndef main():\n\tex1()\n\t \nif __name__ == "__main__":\n\tmain()\t\t\n\t """\n )\n', (11802, 12075), True, 'import streamlit as st\n'), ((12067, 12137), 'streamlit.markdown', 'st.markdown', (['"""Run the code by typing the following into the terminal:"""'], {}), "('Run the code by typing the following into the terminal:')\n", (12078, 12137), True, 'import streamlit as st\n'), ((12139, 12171), 'streamlit.code', 'st.code', (['"""streamlit run main.py"""'], {}), "('streamlit run main.py')\n", (12146, 12171), True, 'import streamlit as st\n'), ((12173, 12250), 'streamlit.markdown', 'st.markdown', (['"""You should see the following behaviour in your browser window:"""'], {}), "('You should see the following behaviour in your browser window:')\n", (12184, 12250), True, 'import streamlit as st\n'), ((12252, 12288), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (12263, 12288), True, 'import streamlit as st\n'), ((12335, 12367), 'streamlit.text_input', 'st.text_input', (['"""Enter your name"""'], {}), "('Enter your name')\n", (12348, 12367), True, 'import streamlit as st\n'), ((12378, 12431), 'streamlit.selectbox', 'st.selectbox', (['"""State your gender"""', "['Male', 'Female']"], {}), "('State your gender', ['Male', 'Female'])\n", (12390, 12431), True, 'import streamlit as st\n'), ((12439, 12474), 'streamlit.text_input', 'st.text_input', (['"""State your age"""', '(18)'], {}), "('State your age', 18)\n", (12452, 12474), True, 'import streamlit as st\n'), ((12608, 12664), 'streamlit.subheader', 'st.subheader', (['"""Challenge 1: Input, Output and Variables"""'], {}), "('Challenge 1: Input, Output and Variables')\n", (12620, 12664), True, 'import streamlit as st\n'), ((12666, 12780), 'streamlit.write', 'st.write', (['"""Create a new function called ***ch1()*** in ***part1.py*** and call it in the main function."""'], {}), "(\n 'Create a new function called ***ch1()*** in ***part1.py*** and call it in the main function.'\n )\n", (12674, 12780), True, 'import streamlit as st\n'), ((12772, 12876), 'streamlit.write', 'st.write', (['"""Create three variables *name*, *age* and *gender*, and obtain these from the user."""'], {}), "(\n 'Create three variables *name*, *age* and *gender*, and obtain these from the user.'\n )\n", (12780, 12876), True, 'import streamlit as st\n'), ((12868, 12972), 'streamlit.write', 'st.write', (['"""Once the user filled up the input boxes, display back the information to the user."""'], {}), "(\n 'Once the user filled up the input boxes, display back the information to the user.'\n )\n", (12876, 12972), True, 'import streamlit as st\n'), ((12964, 13099), 'streamlit.write', 'st.write', (['"""Code hint: the following piece of code checks if *name* has been filled, and if so, displays it back to the user."""'], {}), "(\n 'Code hint: the following piece of code checks if *name* has been filled, and if so, displays it back to the user.'\n )\n", (12972, 13099), True, 'import streamlit as st\n'), ((13091, 13192), 'streamlit.code', 'st.code', (['"""\nname = st.text_input("Enter your name")\nif name:\n\tst.text(f"Hello {name}")\n"""'], {}), '(\n """\nname = st.text_input("Enter your name")\nif name:\n\tst.text(f"Hello {name}")\n"""\n )\n', (13098, 13192), True, 'import streamlit as st\n'), ((13184, 13214), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (13195, 13214), True, 'import streamlit as st\n'), ((13582, 13618), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (13593, 13618), True, 'import streamlit as st\n'), ((13671, 13724), 'streamlit.selectbox', 'st.selectbox', (['"""State your gender"""', "['Male', 'Female']"], {}), "('State your gender', ['Male', 'Female'])\n", (13683, 13724), True, 'import streamlit as st\n'), ((13788, 13834), 'streamlit.camera_input', 'st.camera_input', (['"""Smile! take a picture here."""'], {}), "('Smile! take a picture here.')\n", (13803, 13834), True, 'import streamlit as st\n'), ((14362, 14418), 'streamlit.subheader', 'st.subheader', (['"""Exercise 2: If-else logical conditionals"""'], {}), "('Exercise 2: If-else logical conditionals')\n", (14374, 14418), True, 'import streamlit as st\n'), ((14420, 14510), 'streamlit.markdown', 'st.markdown', (['"""If-else statements help control the flow and logic of our application."""'], {}), "(\n 'If-else statements help control the flow and logic of our application.')\n", (14431, 14510), True, 'import streamlit as st\n'), ((14507, 14537), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (14518, 14537), True, 'import streamlit as st\n'), ((14539, 15260), 'streamlit.code', 'st.code', (['"""\ndef ex2():\n gender = st.selectbox("State your gender", ["Male", "Female"])\n age = int(st.text_input("State your age", 18))\n photo = st.camera_input("Smile! take a picture here.")\n\n # conditional logic to run different statements\n if age >= 21 and gender == "Male":\n st.write("You are a male adult")\n elif age < 21 and gender == "Male":\n st.write("You are a young boy")\n elif age >= 21 and gender == "Female":\n st.write("You are a female adult")\n elif age < 21 and gender == "Female":\n st.write("You are a young girl")\n\n if photo:\n st.write("Here is your photo: ")\n st.image(photo)\n else:\n st.write("No photo taken")\n"""'], {}), '(\n """\ndef ex2():\n gender = st.selectbox("State your gender", ["Male", "Female"])\n age = int(st.text_input("State your age", 18))\n photo = st.camera_input("Smile! take a picture here.")\n\n # conditional logic to run different statements\n if age >= 21 and gender == "Male":\n st.write("You are a male adult")\n elif age < 21 and gender == "Male":\n st.write("You are a young boy")\n elif age >= 21 and gender == "Female":\n st.write("You are a female adult")\n elif age < 21 and gender == "Female":\n st.write("You are a young girl")\n\n if photo:\n st.write("Here is your photo: ")\n st.image(photo)\n else:\n st.write("No photo taken")\n"""\n )\n', (14546, 15260), True, 'import streamlit as st\n'), ((15252, 15288), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (15263, 15288), True, 'import streamlit as st\n'), ((15580, 15626), 'streamlit.write', 'st.write', (['"""Here is your *person* dictionary: """'], {}), "('Here is your *person* dictionary: ')\n", (15588, 15626), True, 'import streamlit as st\n'), ((15628, 15644), 'streamlit.write', 'st.write', (['person'], {}), '(person)\n', (15636, 15644), True, 'import streamlit as st\n'), ((15683, 15759), 'streamlit.write', 'st.write', (['"""You can also show individual items in the dictionary like this: """'], {}), "('You can also show individual items in the dictionary like this: ')\n", (15691, 15759), True, 'import streamlit as st\n'), ((15883, 15923), 'streamlit.text_input', 'st.text_input', (['"""Enter your name"""', '"""John"""'], {}), "('Enter your name', 'John')\n", (15896, 15923), True, 'import streamlit as st\n'), ((15931, 15966), 'streamlit.text_input', 'st.text_input', (['"""State your age"""', '(30)'], {}), "('State your age', 30)\n", (15944, 15966), True, 'import streamlit as st\n'), ((15977, 16030), 'streamlit.selectbox', 'st.selectbox', (['"""State your gender"""', "['Male', 'Female']"], {}), "('State your gender', ['Male', 'Female'])\n", (15989, 16030), True, 'import streamlit as st\n'), ((16039, 16083), 'streamlit.text_input', 'st.text_input', (['"""State your city"""', '"""New York"""'], {}), "('State your city', 'New York')\n", (16052, 16083), True, 'import streamlit as st\n'), ((16181, 16235), 'streamlit.write', 'st.write', (['"""Here is your updated *person* dictionary: """'], {}), "('Here is your updated *person* dictionary: ')\n", (16189, 16235), True, 'import streamlit as st\n'), ((16237, 16253), 'streamlit.write', 'st.write', (['person'], {}), '(person)\n', (16245, 16253), True, 'import streamlit as st\n'), ((16274, 16317), 'streamlit.subheader', 'st.subheader', (['"""Exercise 3: Data and Loops """'], {}), "('Exercise 3: Data and Loops ')\n", (16286, 16317), True, 'import streamlit as st\n'), ((16319, 16421), 'streamlit.write', 'st.write', (['"""We can store data in a list or dictionary and display the data using a for loop."""'], {}), "(\n 'We can store data in a list or dictionary and display the data using a for loop.'\n )\n", (16327, 16421), True, 'import streamlit as st\n'), ((16413, 16527), 'streamlit.write', 'st.write', (['"""Append the following code to the ***main.py*** file. Refresh the browser to see the changes."""'], {}), "(\n 'Append the following code to the ***main.py*** file. Refresh the browser to see the changes.'\n )\n", (16421, 16527), True, 'import streamlit as st\n'), ((16519, 16588), 'streamlit.write', 'st.write', (['"""You should see output similar to the *Code Output* below."""'], {}), "('You should see output similar to the *Code Output* below.')\n", (16527, 16588), True, 'import streamlit as st\n'), ((16590, 16620), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (16601, 16620), True, 'import streamlit as st\n'), ((16622, 17584), 'streamlit.code', 'st.code', (['"""\n#Data and Loops\ndef ex3():\n\t# Data list\n\tfruits = ["apple", "banana", "orange"]\n\n\t# For loop to show list\n\tfor fruit in fruits:\n\t\tst.write(fruit)\n\n\t# Dictionary\n\tperson = {"name": "John", "age": 30, "city": "New York"}\n\n\t# Print out the items in the dictionary\n\tst.write("Here is your *person* dictionary: ")\n\tst.write(person)\n\n\t# for loop to show dictionary list\n\tst.write("You can also show individual items in the dictionary like this: ")\n\tfor key, value in person.items():\n\t\tst.write(key + ": " + str(value))\n\n\t# get user input to update the dictionary\n\tname = st.text_input("Enter your name", "John")\n\tage = st.text_input("State your age", 30)\n\tgender = st.selectbox("State your gender", ["Male", "Female"])\n\tcity = st.text_input("State your city", "New York")\n\tperson["name"] = name\n\tperson["age"] = age\n\tperson["gender"] = gender\n\tperson["city"] = city\n\t\n\tst.write("Here is your updated *person* dictionary: ")\n\tst.write(person)\n"""'], {}), '(\n """\n#Data and Loops\ndef ex3():\n\t# Data list\n\tfruits = ["apple", "banana", "orange"]\n\n\t# For loop to show list\n\tfor fruit in fruits:\n\t\tst.write(fruit)\n\n\t# Dictionary\n\tperson = {"name": "John", "age": 30, "city": "New York"}\n\n\t# Print out the items in the dictionary\n\tst.write("Here is your *person* dictionary: ")\n\tst.write(person)\n\n\t# for loop to show dictionary list\n\tst.write("You can also show individual items in the dictionary like this: ")\n\tfor key, value in person.items():\n\t\tst.write(key + ": " + str(value))\n\n\t# get user input to update the dictionary\n\tname = st.text_input("Enter your name", "John")\n\tage = st.text_input("State your age", 30)\n\tgender = st.selectbox("State your gender", ["Male", "Female"])\n\tcity = st.text_input("State your city", "New York")\n\tperson["name"] = name\n\tperson["age"] = age\n\tperson["gender"] = gender\n\tperson["city"] = city\n\t\n\tst.write("Here is your updated *person* dictionary: ")\n\tst.write(person)\n"""\n )\n', (16629, 17584), True, 'import streamlit as st\n'), ((17576, 17612), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (17587, 17612), True, 'import streamlit as st\n'), ((18096, 18137), 'streamlit.write', 'st.write', (['"""name: """', 'st.session_state.name'], {}), "('name: ', st.session_state.name)\n", (18104, 18137), True, 'import streamlit as st\n'), ((18139, 18178), 'streamlit.write', 'st.write', (['"""age: """', 'st.session_state.age'], {}), "('age: ', st.session_state.age)\n", (18147, 18178), True, 'import streamlit as st\n'), ((18180, 18225), 'streamlit.write', 'st.write', (['"""gender: """', 'st.session_state.gender'], {}), "('gender: ', st.session_state.gender)\n", (18188, 18225), True, 'import streamlit as st\n'), ((18247, 18288), 'streamlit.subheader', 'st.subheader', (['"""Exercise 4a: Session Data"""'], {}), "('Exercise 4a: Session Data')\n", (18259, 18288), True, 'import streamlit as st\n'), ((18290, 18412), 'streamlit.write', 'st.write', (['"""We can create variables to store data in a user session. Session data persist within a user session."""'], {}), "(\n 'We can create variables to store data in a user session. Session data persist within a user session.'\n )\n", (18298, 18412), True, 'import streamlit as st\n'), ((18405, 18435), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (18416, 18435), True, 'import streamlit as st\n'), ((18437, 19071), 'streamlit.code', 'st.code', (['"""\n# Exercise 4: Session State\ndef ex4a():\n\tst.subheader("Session Data:")\n\tif "session_data" not in st.session_state:\n\t\tst.session_state.session_data = ["alpha", "omega"]\n\t\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = ""\n\t\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = ""\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = ""\n\t\n\t# For loop to show list\n\tfor data in st.session_state.session_data:\n\t\tst.write("session_data: ", data)\n\n\tst.write("name: ", st.session_state.name)\n\tst.write("age: ", st.session_state.age)\n\tst.write("gender: ", st.session_state.gender)\n"""'], {}), '(\n """\n# Exercise 4: Session State\ndef ex4a():\n\tst.subheader("Session Data:")\n\tif "session_data" not in st.session_state:\n\t\tst.session_state.session_data = ["alpha", "omega"]\n\t\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = ""\n\t\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = ""\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = ""\n\t\n\t# For loop to show list\n\tfor data in st.session_state.session_data:\n\t\tst.write("session_data: ", data)\n\n\tst.write("name: ", st.session_state.name)\n\tst.write("age: ", st.session_state.age)\n\tst.write("gender: ", st.session_state.gender)\n"""\n )\n', (18444, 19071), True, 'import streamlit as st\n'), ((19067, 19103), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (19078, 19103), True, 'import streamlit as st\n'), ((19183, 19215), 'streamlit.text_input', 'st.text_input', (['"""Enter your name"""'], {}), "('Enter your name')\n", (19196, 19215), True, 'import streamlit as st\n'), ((19228, 19259), 'streamlit.text_input', 'st.text_input', (['"""State your age"""'], {}), "('State your age')\n", (19241, 19259), True, 'import streamlit as st\n'), ((19275, 19332), 'streamlit.selectbox', 'st.selectbox', (['"""State your gender"""', "['', 'Male', 'Female']"], {}), "('State your gender', ['', 'Male', 'Female'])\n", (19287, 19332), True, 'import streamlit as st\n'), ((19650, 19707), 'streamlit.subheader', 'st.subheader', (['"""Exercise 4b: Session Data with User Input"""'], {}), "('Exercise 4b: Session Data with User Input')\n", (19662, 19707), True, 'import streamlit as st\n'), ((19709, 19787), 'streamlit.write', 'st.write', (['"""Lets now get input from the user and store it in the session data."""'], {}), "('Lets now get input from the user and store it in the session data.')\n", (19717, 19787), True, 'import streamlit as st\n'), ((19789, 19878), 'streamlit.write', 'st.write', (['"""Now run *ex4a()* again to check the session data. Note that it persists."""'], {}), "(\n 'Now run *ex4a()* again to check the session data. Note that it persists.')\n", (19797, 19878), True, 'import streamlit as st\n'), ((19876, 19906), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (19887, 19906), True, 'import streamlit as st\n'), ((19908, 20427), 'streamlit.code', 'st.code', (['"""\ndef ex4b():\n\tst.subheader("Session Data:")\n\tuserName = st.text_input("Enter your name")\n\tuserAge = st.text_input("State your age")\n\tuserGender = st.selectbox("State your gender", ["", "Male", "Female"])\n\n\tif userName:\n\t\tst.session_state.name = userName\n\t\tst.write("name: ", st.session_state.name)\n\tif userAge:\n\t\tst.session_state.age = int(userAge)\n\t\tst.write("age: ", st.session_state.age)\n\tif userGender:\n\t\tst.session_state.gender = userGender\n\t\tst.write("gender: ", st.session_state.gender)\n"""'], {}), '(\n """\ndef ex4b():\n\tst.subheader("Session Data:")\n\tuserName = st.text_input("Enter your name")\n\tuserAge = st.text_input("State your age")\n\tuserGender = st.selectbox("State your gender", ["", "Male", "Female"])\n\n\tif userName:\n\t\tst.session_state.name = userName\n\t\tst.write("name: ", st.session_state.name)\n\tif userAge:\n\t\tst.session_state.age = int(userAge)\n\t\tst.write("age: ", st.session_state.age)\n\tif userGender:\n\t\tst.session_state.gender = userGender\n\t\tst.write("gender: ", st.session_state.gender)\n"""\n )\n', (19915, 20427), True, 'import streamlit as st\n'), ((20419, 20455), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (20430, 20455), True, 'import streamlit as st\n'), ((20931, 20986), 'streamlit.write', 'st.write', (['"""session_state.name: """', 'st.session_state.name'], {}), "('session_state.name: ', st.session_state.name)\n", (20939, 20986), True, 'import streamlit as st\n'), ((20988, 21041), 'streamlit.write', 'st.write', (['"""session_state.age: """', 'st.session_state.age'], {}), "('session_state.age: ', st.session_state.age)\n", (20996, 21041), True, 'import streamlit as st\n'), ((21043, 21102), 'streamlit.write', 'st.write', (['"""session_state.gender: """', 'st.session_state.gender'], {}), "('session_state.gender: ', st.session_state.gender)\n", (21051, 21102), True, 'import streamlit as st\n'), ((21104, 21181), 'streamlit.write', 'st.write', (['"""session_state.prompt_template: """', 'st.session_state.prompt_template'], {}), "('session_state.prompt_template: ', st.session_state.prompt_template)\n", (21112, 21181), True, 'import streamlit as st\n'), ((21202, 21243), 'streamlit.subheader', 'st.subheader', (['"""Challenge 4: Session Data"""'], {}), "('Challenge 4: Session Data')\n", (21214, 21243), True, 'import streamlit as st\n'), ((21245, 22105), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t Add a new function called ***ch4()*** to the ***part1.py*** file and call it in the main function.\n\n\t\t In *ch4()*, modify the code in Exercise 4b to include the following:\n\t\t * Create session data for ***name***, ***age*** and ***gender***\n\t\t * Create session data for ***prompt_template*** with the following value:\n\t\t\t "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t\t * Include this code in ***main()*** as well, because we need the session data for later exercises. Omit the ***st.write*** functions, since we do not want to see this output every time we run ***main()***. \n\n\t\t Hint:\n\t\t * In ***ch4()***, to check that the session data is created, you can print out the session data using ***st.write()***:\n\t\t """'], {}), '(\n """\n\t\t Add a new function called ***ch4()*** to the ***part1.py*** file and call it in the main function.\n\n\t\t In *ch4()*, modify the code in Exercise 4b to include the following:\n\t\t * Create session data for ***name***, ***age*** and ***gender***\n\t\t * Create session data for ***prompt_template*** with the following value:\n\t\t\t "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t\t * Include this code in ***main()*** as well, because we need the session data for later exercises. Omit the ***st.write*** functions, since we do not want to see this output every time we run ***main()***. \n\n\t\t Hint:\n\t\t * In ***ch4()***, to check that the session data is created, you can print out the session data using ***st.write()***:\n\t\t """\n )\n', (21256, 22105), True, 'import streamlit as st\n'), ((22099, 22129), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (22110, 22129), True, 'import streamlit as st\n'), ((23448, 23484), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (23459, 23484), True, 'import streamlit as st\n'), ((23524, 23552), 'streamlit.title', 'st.title', (['"""My first chatbot"""'], {}), "('My first chatbot')\n", (23532, 23552), True, 'import streamlit as st\n'), ((23640, 23670), 'streamlit.chat_input', 'st.chat_input', (['"""Say something"""'], {}), "('Say something')\n", (23653, 23670), True, 'import streamlit as st\n'), ((23997, 24046), 'streamlit.subheader', 'st.subheader', (['"""Exercise 5: Elements of a chatbot"""'], {}), "('Exercise 5: Elements of a chatbot')\n", (24009, 24046), True, 'import streamlit as st\n'), ((24048, 24122), 'streamlit.write', 'st.write', (['"""We will start creating a user interface for our first chatbot."""'], {}), "('We will start creating a user interface for our first chatbot.')\n", (24056, 24122), True, 'import streamlit as st\n'), ((24124, 24199), 'streamlit.write', 'st.write', (['"""Call the following code from ***part1.py*** in your **main()**."""'], {}), "('Call the following code from ***part1.py*** in your **main()**.')\n", (24132, 24199), True, 'import streamlit as st\n'), ((24201, 24273), 'streamlit.write', 'st.write', (['"""You should see the output below when you run your programme."""'], {}), "('You should see the output below when you run your programme.')\n", (24209, 24273), True, 'import streamlit as st\n'), ((24275, 24305), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (24286, 24305), True, 'import streamlit as st\n'), ((24307, 24823), 'streamlit.code', 'st.code', (['"""\n#Exercise 5 : Chatbot UI\ndef ex5():\n\tst.title("My first chatbot")\n\n\tif "store_msg" not in st.session_state:\n\t\tst.session_state.store_msg = []\n\n\tprompt = st.chat_input("Say something")\n\tif prompt:\n\t\tst.write(f"User has sent the following prompt: {prompt}")\n\t\tst.session_state.store_msg.append(prompt)\n\t\tfor message in st.session_state.store_msg:\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.write(message)\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tst.write("Hello human, what can I do for you?")\n"""'], {}), '(\n """\n#Exercise 5 : Chatbot UI\ndef ex5():\n\tst.title("My first chatbot")\n\n\tif "store_msg" not in st.session_state:\n\t\tst.session_state.store_msg = []\n\n\tprompt = st.chat_input("Say something")\n\tif prompt:\n\t\tst.write(f"User has sent the following prompt: {prompt}")\n\t\tst.session_state.store_msg.append(prompt)\n\t\tfor message in st.session_state.store_msg:\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.write(message)\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tst.write("Hello human, what can I do for you?")\n"""\n )\n', (24314, 24823), True, 'import streamlit as st\n'), ((24815, 24851), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (24826, 24851), True, 'import streamlit as st\n'), ((24891, 24918), 'streamlit.markdown', 'st.markdown', (['"""**Echo Bot**"""'], {}), "('**Echo Bot**')\n", (24902, 24918), True, 'import streamlit as st\n'), ((25757, 25815), 'streamlit.subheader', 'st.subheader', (['"""Exercise 6: Building a simple echo chatbot"""'], {}), "('Exercise 6: Building a simple echo chatbot')\n", (25769, 25815), True, 'import streamlit as st\n'), ((25817, 25869), 'streamlit.write', 'st.write', (['"""We will now build a simple echo chatbot."""'], {}), "('We will now build a simple echo chatbot.')\n", (25825, 25869), True, 'import streamlit as st\n'), ((25871, 25946), 'streamlit.write', 'st.write', (['"""Call the following code from **part1.py** in your ***main()***."""'], {}), "('Call the following code from **part1.py** in your ***main()***.')\n", (25879, 25946), True, 'import streamlit as st\n'), ((25948, 26020), 'streamlit.write', 'st.write', (['"""You should see the output below when you run your programme."""'], {}), "('You should see the output below when you run your programme.')\n", (25956, 26020), True, 'import streamlit as st\n'), ((26022, 26052), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (26033, 26052), True, 'import streamlit as st\n'), ((26054, 26968), 'streamlit.code', 'st.code', (['"""\n#Exercise 6 : Rule-based Echo Chatbot \ndef ex6():\n\tst.title("Echo Bot")\n\n\t# Initialize chat history\n\tif "messages" not in st.session_state:\n\t\tst.session_state.messages = []\n\n\t# Display chat messages from history on app rerun\n\tfor message in st.session_state.messages:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("What is up?"):\n\t\t# Display user message in chat message container\n\t\tst.chat_message("user").markdown(prompt)\n\t\t# Add user message to chat history\n\t\tst.session_state.messages.append({"role": "user", "content": prompt})\n\n\t\tresponse = f"Echo: {prompt}"\n\t\t# Display assistant response in chat message container\n\t\twith st.chat_message("assistant"):\n\t\t\tst.markdown(response)\n\t\t# Add assistant response to chat history\n\t\tst.session_state.messages.append({"role": "assistant", "content": response})\n"""'], {}), '(\n """\n#Exercise 6 : Rule-based Echo Chatbot \ndef ex6():\n\tst.title("Echo Bot")\n\n\t# Initialize chat history\n\tif "messages" not in st.session_state:\n\t\tst.session_state.messages = []\n\n\t# Display chat messages from history on app rerun\n\tfor message in st.session_state.messages:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("What is up?"):\n\t\t# Display user message in chat message container\n\t\tst.chat_message("user").markdown(prompt)\n\t\t# Add user message to chat history\n\t\tst.session_state.messages.append({"role": "user", "content": prompt})\n\n\t\tresponse = f"Echo: {prompt}"\n\t\t# Display assistant response in chat message container\n\t\twith st.chat_message("assistant"):\n\t\t\tst.markdown(response)\n\t\t# Add assistant response to chat history\n\t\tst.session_state.messages.append({"role": "assistant", "content": response})\n"""\n )\n', (26061, 26968), True, 'import streamlit as st\n'), ((26960, 26996), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (26971, 26996), True, 'import streamlit as st\n'), ((27036, 27069), 'streamlit.markdown', 'st.markdown', (['"""**Rule Based Bot**"""'], {}), "('**Rule Based Bot**')\n", (27047, 27069), True, 'import streamlit as st\n'), ((28014, 28062), 'streamlit.subheader', 'st.subheader', (['"""Challenge 6: Rule based chatbot """'], {}), "('Challenge 6: Rule based chatbot ')\n", (28026, 28062), True, 'import streamlit as st\n'), ((28064, 28709), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t Create a new function called ***ch6()*** in **part1.py** and modify the ***ex6()*** function to create the following rule based chatbot:\n\n\t\t * Human : “Hello”, Assistant: “Hi there what can I do for you”\n\n\t\t * Human : “What is your name?”, Assistant: “My name is EAI , an electronic artificial being”\n\t\n\t\t * Human : “How old are you?”, Assistant: “Today is my birthday!”\n\n\t\t For other queries, it will reply “I am sorry, I am unable to help you with your query”\n\n\t\t Use *if / elif / else* statements to create the chatbot behaviour logic.\n \n\t\t You should see the output below when you run your programme.\n\n\t\t """'], {}), '(\n """\n\t\t Create a new function called ***ch6()*** in **part1.py** and modify the ***ex6()*** function to create the following rule based chatbot:\n\n\t\t * Human : “Hello”, Assistant: “Hi there what can I do for you”\n\n\t\t * Human : “What is your name?”, Assistant: “My name is EAI , an electronic artificial being”\n\t\n\t\t * Human : “How old are you?”, Assistant: “Today is my birthday!”\n\n\t\t For other queries, it will reply “I am sorry, I am unable to help you with your query”\n\n\t\t Use *if / elif / else* statements to create the chatbot behaviour logic.\n \n\t\t You should see the output below when you run your programme.\n\n\t\t """\n )\n', (28075, 28709), True, 'import streamlit as st\n'), ((28708, 28738), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (28719, 28738), True, 'import streamlit as st\n'), ((29805, 29841), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (29816, 29841), True, 'import streamlit as st\n'), ((29888, 29930), 'streamlit.subheader', 'st.subheader', (['"""Exercise 7: Secrets- Shhh """'], {}), "('Exercise 7: Secrets- Shhh ')\n", (29900, 29930), True, 'import streamlit as st\n'), ((29932, 30000), 'streamlit.write', 'st.write', (['"""In this exercise, we will learn how to hide your API key"""'], {}), "('In this exercise, we will learn how to hide your API key')\n", (29940, 30000), True, 'import streamlit as st\n'), ((30002, 30297), 'streamlit.markdown', 'st.markdown', (['"""\n\tIn your working directory (chatbot), create a directory called **.streamlit**\n\n\tNote the *dot* in front of the directory\n\n\tIn this folder, create a file called **secrets.toml**\n\n\tGet an API key from your OpenAI account and type the following in **secrets.toml**:\n\t"""'], {}), '(\n """\n\tIn your working directory (chatbot), create a directory called **.streamlit**\n\n\tNote the *dot* in front of the directory\n\n\tIn this folder, create a file called **secrets.toml**\n\n\tGet an API key from your OpenAI account and type the following in **secrets.toml**:\n\t"""\n )\n', (30013, 30297), True, 'import streamlit as st\n'), ((30292, 30322), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (30303, 30322), True, 'import streamlit as st\n'), ((30324, 30365), 'streamlit.code', 'st.code', (['"""\n\topenapi_key = "xxxxxx"\n\t"""'], {}), '("""\n\topenapi_key = "xxxxxx"\n\t""")\n', (30331, 30365), True, 'import streamlit as st\n'), ((30367, 30483), 'streamlit.write', 'st.write', (['"""Include the following global variables in your ***main.py*** file under the import statements:"""'], {}), "(\n 'Include the following global variables in your ***main.py*** file under the import statements:'\n )\n", (30375, 30483), True, 'import streamlit as st\n'), ((30475, 30601), 'streamlit.code', 'st.code', (['"""\nos.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"]\nopenai.api_key = st.secrets["openapi_key"]\n"""'], {}), '(\n """\nos.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"]\nopenai.api_key = st.secrets["openapi_key"]\n"""\n )\n', (30482, 30601), True, 'import streamlit as st\n'), ((30593, 30656), 'streamlit.write', 'st.write', (['"""Create a .gitignore file and add .streamlit into it"""'], {}), "('Create a .gitignore file and add .streamlit into it')\n", (30601, 30656), True, 'import streamlit as st\n'), ((30740, 30760), 'streamlit.title', 'st.title', (['"""Api Call"""'], {}), "('Api Call')\n", (30748, 30760), True, 'import streamlit as st\n'), ((30798, 31018), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role':\n 'user', 'content': 'Tell me about Singapore in the 1970s in 50 words.'}]", 'temperature': '(0)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content':\n 'Tell me about Singapore in the 1970s in 50 words.'}], temperature=0)\n", (30826, 31018), False, 'import openai\n'), ((31033, 31077), 'streamlit.markdown', 'st.markdown', (['"""**This is the raw response:**"""'], {}), "('**This is the raw response:**')\n", (31044, 31077), True, 'import streamlit as st\n'), ((31080, 31098), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (31088, 31098), True, 'import streamlit as st\n'), ((31100, 31150), 'streamlit.markdown', 'st.markdown', (['"""**This is the extracted response:**"""'], {}), "('**This is the extracted response:**')\n", (31111, 31150), True, 'import streamlit as st\n'), ((31260, 31297), 'streamlit.markdown', 'st.markdown', (['"""**Total tokens used:**"""'], {}), "('**Total tokens used:**')\n", (31271, 31297), True, 'import streamlit as st\n'), ((31299, 31310), 'streamlit.write', 'st.write', (['s'], {}), '(s)\n', (31307, 31310), True, 'import streamlit as st\n'), ((31331, 31385), 'streamlit.subheader', 'st.subheader', (['"""Exercise 8: Calling the OpenAI LLM API"""'], {}), "('Exercise 8: Calling the OpenAI LLM API')\n", (31343, 31385), True, 'import streamlit as st\n'), ((31387, 31461), 'streamlit.write', 'st.write', (['"""In this exercise, we will learn how to call the OpenAI LLM API"""'], {}), "('In this exercise, we will learn how to call the OpenAI LLM API')\n", (31395, 31461), True, 'import streamlit as st\n'), ((31463, 31534), 'streamlit.write', 'st.write', (['"""Note that there is a new import statement **import openai**"""'], {}), "('Note that there is a new import statement **import openai**')\n", (31471, 31534), True, 'import streamlit as st\n'), ((31536, 31674), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t Call the following code in your **main()** and run it.\n\n\t\t You should see the output as shown below.\n\n\t\t """'], {}), '(\n """\n\t\t Call the following code in your **main()** and run it.\n\n\t\t You should see the output as shown below.\n\n\t\t """\n )\n', (31547, 31674), True, 'import streamlit as st\n'), ((31668, 31698), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (31679, 31698), True, 'import streamlit as st\n'), ((31700, 32363), 'streamlit.code', 'st.code', (['"""\nimport openai\n\t \n#Exercise 8 : Using the OpenAI API\ndef ex8():\n\tst.title("Api Call")\n\tMODEL = "gpt-3.5-turbo"\n\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant."},\n\t\t\t{"role": "user", "content": "Tell me about Singapore in the 1970s in 50 words."},\n\t\t],\n\t\ttemperature=0,\n\t)\n\n\tst.markdown("**This is the raw response:**") \n\tst.write(response)\n\tst.markdown("**This is the extracted response:**")\n\tst.write(response["choices"][0]["message"]["content"].strip())\n\ts = str(response["usage"]["total_tokens"])\n\tst.markdown("**Total tokens used:**")\n\tst.write(s)\n"""'], {}), '(\n """\nimport openai\n\t \n#Exercise 8 : Using the OpenAI API\ndef ex8():\n\tst.title("Api Call")\n\tMODEL = "gpt-3.5-turbo"\n\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant."},\n\t\t\t{"role": "user", "content": "Tell me about Singapore in the 1970s in 50 words."},\n\t\t],\n\t\ttemperature=0,\n\t)\n\n\tst.markdown("**This is the raw response:**") \n\tst.write(response)\n\tst.markdown("**This is the extracted response:**")\n\tst.write(response["choices"][0]["message"]["content"].strip())\n\ts = str(response["usage"]["total_tokens"])\n\tst.markdown("**Total tokens used:**")\n\tst.write(s)\n"""\n )\n', (31707, 32363), True, 'import streamlit as st\n'), ((32355, 32391), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (32366, 32391), True, 'import streamlit as st\n'), ((32485, 32660), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role':\n 'user', 'content': prompt}]", 'temperature': '(0)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content':\n prompt}], temperature=0)\n", (32513, 32660), False, 'import openai\n'), ((32755, 32831), 'streamlit.subheader', 'st.subheader', (['"""Challenge 8: Incorporate your LLM API call into your chatbot"""'], {}), "('Challenge 8: Incorporate your LLM API call into your chatbot')\n", (32767, 32831), True, 'import streamlit as st\n'), ((32833, 32954), 'streamlit.write', 'st.write', (['"""In this challenge, we will incorporate the LLM API call into our previous rule-based *Echo* chatbot"""'], {}), "(\n 'In this challenge, we will incorporate the LLM API call into our previous rule-based *Echo* chatbot'\n )\n", (32841, 32954), True, 'import streamlit as st\n'), ((32946, 33545), 'streamlit.markdown', 'st.markdown', (['"""\n**Step1**\n\nCreate a new function **ch8()** in ***part1.py*** and copy the code from **ex6()** into it. Recall that **ex6()** shows the chat history and gets a chat input from the user, and echoes the user input back to the user. \n\n**Step 2**\n\nNext, copy the code from **ex8** into a new function named **chat_completion()**. Recall that **ex8()** is about making an API call.\n\nNow, instead of echoing the user\'s input in **ex6()**, we will call the LLM API to generate a response. In particular, we are replacing this line of code with the response from the API call:\n\n"""'], {}), '(\n """\n**Step1**\n\nCreate a new function **ch8()** in ***part1.py*** and copy the code from **ex6()** into it. Recall that **ex6()** shows the chat history and gets a chat input from the user, and echoes the user input back to the user. \n\n**Step 2**\n\nNext, copy the code from **ex8** into a new function named **chat_completion()**. Recall that **ex8()** is about making an API call.\n\nNow, instead of echoing the user\'s input in **ex6()**, we will call the LLM API to generate a response. In particular, we are replacing this line of code with the response from the API call:\n\n"""\n )\n', (32957, 33545), True, 'import streamlit as st\n'), ((33542, 33588), 'streamlit.code', 'st.code', (['"""\nresponse = f"Echo: {prompt}\\"\n"""'], {}), '("""\nresponse = f"Echo: {prompt}\\"\n""")\n', (33549, 33588), True, 'import streamlit as st\n'), ((33589, 33994), 'streamlit.markdown', 'st.markdown', (['"""\n**Step 3**\n\nIn **chat_completion()**, what we will do is to replace the previous *Tell me the history ..."* prompt from **ex8()** with the current user\'s input.\n\nIn order to do so, in **ch8()**, use the following code to call **chat_completion()**.\n\nWhat we are doing now is to pass the prompt from the user to the API call instead of hard-coding the prompt as in **ex8()**.\n"""'], {}), '(\n """\n**Step 3**\n\nIn **chat_completion()**, what we will do is to replace the previous *Tell me the history ..."* prompt from **ex8()** with the current user\'s input.\n\nIn order to do so, in **ch8()**, use the following code to call **chat_completion()**.\n\nWhat we are doing now is to pass the prompt from the user to the API call instead of hard-coding the prompt as in **ex8()**.\n"""\n )\n', (33600, 33994), True, 'import streamlit as st\n'), ((33990, 34193), 'streamlit.code', 'st.code', (['"""\n\t if prompt := st.chat.input("What\'s up?"):\n\t \t#display user messgae in chat message container\n\t \treply = chat_completion(prompt) \n\t \tst.chat_message("user").markdown(prompt)\n\t\t"""'], {}), '(\n """\n\t if prompt := st.chat.input("What\'s up?"):\n\t \t#display user messgae in chat message container\n\t \treply = chat_completion(prompt) \n\t \tst.chat_message("user").markdown(prompt)\n\t\t"""\n )\n', (33997, 34193), True, 'import streamlit as st\n'), ((34185, 34243), 'streamlit.write', 'st.write', (['"""You should see the code output as shown below."""'], {}), "('You should see the code output as shown below.')\n", (34193, 34243), True, 'import streamlit as st\n'), ((34245, 34275), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (34256, 34275), True, 'import streamlit as st\n'), ((35559, 35595), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (35570, 35595), True, 'import streamlit as st\n'), ((35597, 35623), 'streamlit.title', 'st.title', (['"""My LLM Chatbot"""'], {}), "('My LLM Chatbot')\n", (35605, 35623), True, 'import streamlit as st\n'), ((36510, 36697), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role':\n 'user', 'content': prompt}]", 'temperature': '(0)', 'stream': '(True)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': 'You are a helpful assistant'}, {'role': 'user', 'content':\n prompt}], temperature=0, stream=True)\n", (36538, 36697), False, 'import openai\n'), ((37746, 37833), 'streamlit.subheader', 'st.subheader', (['"""Exercise 9: Building a ChatGPT-like clone with streaming responses"""'], {}), "(\n 'Exercise 9: Building a ChatGPT-like clone with streaming responses')\n", (37758, 37833), True, 'import streamlit as st\n'), ((37830, 37966), 'streamlit.write', 'st.write', (['"""Now, we will incorporate a streaming response from the LLM API into our chatbot to mimic the behaviour of ChatGPT."""'], {}), "(\n 'Now, we will incorporate a streaming response from the LLM API into our chatbot to mimic the behaviour of ChatGPT.'\n )\n", (37838, 37966), True, 'import streamlit as st\n'), ((37958, 38029), 'streamlit.write', 'st.write', (['"""Copy and run the code below to see the streaming responses."""'], {}), "('Copy and run the code below to see the streaming responses.')\n", (37966, 38029), True, 'import streamlit as st\n'), ((38031, 38061), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (38042, 38061), True, 'import streamlit as st\n'), ((38063, 39535), 'streamlit.code', 'st.code', (['"""\n# Exercise 9 : Using the OpenAI API with streaming option\ndef chat_completion_stream(prompt):\n\topenai.api_key = st.secrets["openapi_key"]\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant"},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature=0, # temperature\n\t\tstream=True, # stream option\n\t)\n\treturn response\n\n# integration API call into streamlit chat components\ndef ex9_basebot():\n\t# Initialize chat history\n\tif "chat_msg" not in st.session_state:\n\t\tst.session_state.chat_msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.chat_msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.chat_msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.chat_msg.append(\n\t\t\t\t{"role": "assistant", "content": full_response}\n\t\t\t)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""'], {}), '(\n """\n# Exercise 9 : Using the OpenAI API with streaming option\ndef chat_completion_stream(prompt):\n\topenai.api_key = st.secrets["openapi_key"]\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant"},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature=0, # temperature\n\t\tstream=True, # stream option\n\t)\n\treturn response\n\n# integration API call into streamlit chat components\ndef ex9_basebot():\n\t# Initialize chat history\n\tif "chat_msg" not in st.session_state:\n\t\tst.session_state.chat_msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.chat_msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.chat_msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.chat_msg.append(\n\t\t\t\t{"role": "assistant", "content": full_response}\n\t\t\t)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""\n )\n', (38070, 39535), True, 'import streamlit as st\n'), ((39527, 39563), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (39538, 39563), True, 'import streamlit as st\n'), ((39712, 39937), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': st.session_state.prompt_template}, {'role':\n 'user', 'content': 'Tell me about Singapore in the 1970s in 50 words'}]", 'temperature': '(0)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': st.session_state.prompt_template}, {'role': 'user',\n 'content': 'Tell me about Singapore in the 1970s in 50 words'}],\n temperature=0)\n", (39740, 39937), False, 'import openai\n'), ((39962, 39994), 'streamlit.markdown', 'st.markdown', (['"""**LLM Response:**"""'], {}), "('**LLM Response:**')\n", (39973, 39994), True, 'import streamlit as st\n'), ((40060, 40092), 'streamlit.markdown', 'st.markdown', (['"""**Total tokens:**"""'], {}), "('**Total tokens:**')\n", (40071, 40092), True, 'import streamlit as st\n'), ((40164, 40217), 'streamlit.subheader', 'st.subheader', (['"""Exercise 10: Basic Prompt Engineering"""'], {}), "('Exercise 10: Basic Prompt Engineering')\n", (40176, 40217), True, 'import streamlit as st\n'), ((40219, 40583), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t Now, we are going to create a chatbot with a personality by using a default prompt for our chatbot. \n\n\t\t This is the default prompt that will be used for every conversation.\n\n\t\t Let\'s make it a chatbot that speaks like Yoda from Star Wars.\n\n\t\t We will use the ***prompt_template*** that is already in our ***main()*** for this.\n\t\t """'], {}), '(\n """\n\t\t Now, we are going to create a chatbot with a personality by using a default prompt for our chatbot. \n\n\t\t This is the default prompt that will be used for every conversation.\n\n\t\t Let\'s make it a chatbot that speaks like Yoda from Star Wars.\n\n\t\t We will use the ***prompt_template*** that is already in our ***main()*** for this.\n\t\t """\n )\n', (40230, 40583), True, 'import streamlit as st\n'), ((40578, 40847), 'streamlit.code', 'st.code', (['"""\nif "prompt_template" not in st.session_state:\n\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t"""'], {}), '(\n """\nif "prompt_template" not in st.session_state:\n\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t"""\n )\n', (40585, 40847), True, 'import streamlit as st\n'), ((40839, 41063), 'streamlit.markdown', 'st.markdown', (['"""\n\t\t Run the code below. You should get the same chatbot behaviour as the code output below.\n\n\t\t Try varying the temperature setting (0.0 to 1.0) to see how it affects the chatbot\'s response.\n\n\t\t """'], {}), '(\n """\n\t\t Run the code below. You should get the same chatbot behaviour as the code output below.\n\n\t\t Try varying the temperature setting (0.0 to 1.0) to see how it affects the chatbot\'s response.\n\n\t\t """\n )\n', (40850, 41063), True, 'import streamlit as st\n'), ((41057, 41087), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (41068, 41087), True, 'import streamlit as st\n'), ((41089, 41700), 'streamlit.code', 'st.code', (['"""\n# Exercise 10: Basic prompt engineering\ndef ex10_basebot():\n\t#prompt_template in session state already set in main()\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": st.session_state.prompt_template},\n\t\t\t{\n\t\t\t\t"role": "user",\n\t\t\t\t"content": "Tell me about Singapore in the 1970s in 50 words",\n\t\t\t},\n\t\t],\n\t\ttemperature=0,\n\t)\n\tst.markdown("**LLM Response:**")\n\tst.write(response["choices"][0]["message"]["content"].strip())\n\tst.markdown("**Total tokens:**")\n\tst.write(str(response["usage"]["total_tokens"]))\n"""'], {}), '(\n """\n# Exercise 10: Basic prompt engineering\ndef ex10_basebot():\n\t#prompt_template in session state already set in main()\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": st.session_state.prompt_template},\n\t\t\t{\n\t\t\t\t"role": "user",\n\t\t\t\t"content": "Tell me about Singapore in the 1970s in 50 words",\n\t\t\t},\n\t\t],\n\t\ttemperature=0,\n\t)\n\tst.markdown("**LLM Response:**")\n\tst.write(response["choices"][0]["message"]["content"].strip())\n\tst.markdown("**Total tokens:**")\n\tst.write(str(response["usage"]["total_tokens"]))\n"""\n )\n', (41096, 41700), True, 'import streamlit as st\n'), ((41692, 41728), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (41703, 41728), True, 'import streamlit as st\n'), ((41988, 42178), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': st.session_state.prompt_template}, {'role':\n 'user', 'content': prompt}]", 'temperature': '(0)', 'stream': '(True)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': st.session_state.prompt_template}, {'role': 'user',\n 'content': prompt}], temperature=0, stream=True)\n", (42016, 42178), False, 'import openai\n'), ((43282, 43348), 'streamlit.subheader', 'st.subheader', (['"""Challenge 10: Make your bot like someone you know!"""'], {}), "('Challenge 10: Make your bot like someone you know!')\n", (43294, 43348), True, 'import streamlit as st\n'), ((43350, 43445), 'streamlit.write', 'st.write', (['"""Modify the ***prompt_template*** in your ***main()*** to your own liking."""'], {}), "(\n 'Modify the ***prompt_template*** in your ***main()*** to your own liking.'\n )\n", (43358, 43445), True, 'import streamlit as st\n'), ((43437, 43464), 'streamlit.write', 'st.write', (['"""Be imaginative!"""'], {}), "('Be imaginative!')\n", (43445, 43464), True, 'import streamlit as st\n'), ((43466, 43657), 'streamlit.write', 'st.write', (['"""Now, in new function called **chat_completion_stream_prompt()**, we are going to modify the earlier **streaming chat_completion** function to accept a user prompt input."""'], {}), "(\n 'Now, in new function called **chat_completion_stream_prompt()**, we are going to modify the earlier **streaming chat_completion** function to accept a user prompt input.'\n )\n", (43474, 43657), True, 'import streamlit as st\n'), ((43649, 43797), 'streamlit.write', 'st.write', (['"""You will need to pass in a new input variable called **prompt** and replace the user content with the new **prompt** variable."""'], {}), "(\n 'You will need to pass in a new input variable called **prompt** and replace the user content with the new **prompt** variable.'\n )\n", (43657, 43797), True, 'import streamlit as st\n'), ((43789, 43874), 'streamlit.write', 'st.write', (['"""Replace the system prompt with **st.session_state.prompt_template**."""'], {}), "('Replace the system prompt with **st.session_state.prompt_template**.'\n )\n", (43797, 43874), True, 'import streamlit as st\n'), ((43871, 44037), 'streamlit.write', 'st.write', (['"""Before calling **chat_completion_stream_prompt()**, get a new prompt from the user like this to update the **st.session_state.prompt_template**:"""'], {}), "(\n 'Before calling **chat_completion_stream_prompt()**, get a new prompt from the user like this to update the **st.session_state.prompt_template**:'\n )\n", (43879, 44037), True, 'import streamlit as st\n'), ((44029, 44295), 'streamlit.code', 'st.code', (['"""\nif my_prompt_template := st.text_input("Enter a system prompt template. E.g. Speak like Yoda from Star Wars."):\n\tst.session_state.prompt_template = my_prompt_template\n\tst.write("new prompt template set! ", st.session_state.prompt_template)\n"""'], {}), '(\n """\nif my_prompt_template := st.text_input("Enter a system prompt template. E.g. Speak like Yoda from Star Wars."):\n\tst.session_state.prompt_template = my_prompt_template\n\tst.write("new prompt template set! ", st.session_state.prompt_template)\n"""\n )\n', (44036, 44295), True, 'import streamlit as st\n'), ((44287, 44317), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (44298, 44317), True, 'import streamlit as st\n'), ((45887, 45923), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (45898, 45923), True, 'import streamlit as st\n'), ((45925, 45979), 'streamlit.title', 'st.title', (['"""ChatGPT-like clone with Prompt Engineering"""'], {}), "('ChatGPT-like clone with Prompt Engineering')\n", (45933, 45979), True, 'import streamlit as st\n'), ((46111, 46266), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['subject', 'topic']", 'template': '"""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students"""'}), "(input_variables=['subject', 'topic'], template=\n 'Design a lesson plan on {subject} on the topic of {topic} for primary 1 students'\n )\n", (46125, 46266), False, 'from langchain.prompts import PromptTemplate\n'), ((46323, 46378), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (46333, 46378), False, 'from langchain.chat_models import ChatOpenAI\n'), ((46457, 46489), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (46465, 46489), False, 'from langchain.chains import LLMChain\n'), ((46494, 46519), 'streamlit.button', 'st.button', (['"""Run my chain"""'], {}), "('Run my chain')\n", (46503, 46519), True, 'import streamlit as st\n'), ((46793, 46853), 'streamlit.subheader', 'st.subheader', (['"""Exercise 11a: Prompt Template with LangChain"""'], {}), "('Exercise 11a: Prompt Template with LangChain')\n", (46805, 46853), True, 'import streamlit as st\n'), ((46855, 46956), 'streamlit.write', 'st.write', (['"""LangChain helps you to create a more complext prompt template for your chatbot."""'], {}), "(\n 'LangChain helps you to create a more complext prompt template for your chatbot.'\n )\n", (46863, 46956), True, 'import streamlit as st\n'), ((46949, 46979), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (46960, 46979), True, 'import streamlit as st\n'), ((46981, 47789), 'streamlit.code', 'st.code', (['"""\n#https://python.langchain.com/docs/modules/chains/\ndef ex11a(): # change in ex11a\n\t# langchain prompt template\n\tprompt = PromptTemplate(\n\t\tinput_variables=["subject", "topic"],\n\t\ttemplate=""\\"Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""\\",\n\t)\n\n\t# openai_api_key = st.secrets["openapi_key"]\n\tllm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)\n\n\t# creating a LLM chain with the langchain call and prompt template\n\tchain = LLMChain(llm=llm, prompt=prompt)\n\tif st.button("Run my chain"):\n\t\tinput_prompt = prompt.format(subject="English", topic="Verbs")\n\t\t# Showing what is sent to LLM Chain\n\t\tst.write("Input prompt: ", input_prompt)\n\t\t# Showing the output from LLM Chain\n\t\tst.write(chain.run({"subject": "English", "topic": "Verbs"}))\n"""'], {}), '(\n """\n#https://python.langchain.com/docs/modules/chains/\ndef ex11a(): # change in ex11a\n\t# langchain prompt template\n\tprompt = PromptTemplate(\n\t\tinput_variables=["subject", "topic"],\n\t\ttemplate=""\\"Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""\\",\n\t)\n\n\t# openai_api_key = st.secrets["openapi_key"]\n\tllm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)\n\n\t# creating a LLM chain with the langchain call and prompt template\n\tchain = LLMChain(llm=llm, prompt=prompt)\n\tif st.button("Run my chain"):\n\t\tinput_prompt = prompt.format(subject="English", topic="Verbs")\n\t\t# Showing what is sent to LLM Chain\n\t\tst.write("Input prompt: ", input_prompt)\n\t\t# Showing the output from LLM Chain\n\t\tst.write(chain.run({"subject": "English", "topic": "Verbs"}))\n"""\n )\n', (46988, 47789), True, 'import streamlit as st\n'), ((47779, 47815), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (47790, 47815), True, 'import streamlit as st\n'), ((48484, 48823), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), "(input_variables=['occupation', 'topic', 'age'], template=\n 'Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up'\n )\n", (48498, 48823), False, 'from langchain.prompts import PromptTemplate\n'), ((48879, 48934), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (48889, 48934), False, 'from langchain.chat_models import ChatOpenAI\n'), ((49018, 49059), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template'}), '(llm=llm, prompt=prompt_template)\n', (49026, 49059), False, 'from langchain.chains import LLMChain\n'), ((49203, 49231), 'streamlit.subheader', 'st.subheader', (['"""Exercise 11b"""'], {}), "('Exercise 11b')\n", (49215, 49231), True, 'import streamlit as st\n'), ((49233, 49329), 'streamlit.write', 'st.write', (['"""Now, we will create a chatbot with a prompt template that is more complex."""'], {}), "(\n 'Now, we will create a chatbot with a prompt template that is more complex.'\n )\n", (49241, 49329), True, 'import streamlit as st\n'), ((49321, 49443), 'streamlit.write', 'st.write', (['"""We will use the ***prompt_inputs_form()*** function to get the user\'s input for the prompt template."""'], {}), '(\n "We will use the ***prompt_inputs_form()*** function to get the user\'s input for the prompt template."\n )\n', (49329, 49443), True, 'import streamlit as st\n'), ((49435, 49495), 'streamlit.write', 'st.write', (['"""Run the code below to see the chatbot in action."""'], {}), "('Run the code below to see the chatbot in action.')\n", (49443, 49495), True, 'import streamlit as st\n'), ((49498, 49528), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (49509, 49528), True, 'import streamlit as st\n'), ((49530, 50886), 'streamlit.code', 'st.code', (['"""\ndef prompt_inputs_form(): # Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age\n\t# langchain prompt template\n\twith st.form("Prompt Template"):\n\t\toccupation = st.text_input("Enter the occupation:")\n\t\ttopic = st.text_input("Enter the topic:")\n\t\tage = st.text_input("Enter the age:")\n\n\t\t# Every form must have a submit button.\n\t\tsubmitted = st.form_submit_button("Submit")\n\t# return a dictionary of the values\n\tif submitted:\n\t\treturn {"occupation": occupation, "topic": topic, "age": age}\n\ndef ex11b():\n\t# create your template\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\t# create a langchain function call to openai\n\tllm = ChatOpenAI(\n\t\tmodel_name="gpt-3.5-turbo",\n\t\ttemperature=0.9,\n\t)\n\t# create a LLM chain with the langchain call and prompt template\n\tchain = LLMChain(llm=llm, prompt=prompt_template)\n\t# call the prompt_inputs_form()\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tst.write(chain.run(dict_inputs))\n"""'], {}), '(\n """\ndef prompt_inputs_form(): # Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age\n\t# langchain prompt template\n\twith st.form("Prompt Template"):\n\t\toccupation = st.text_input("Enter the occupation:")\n\t\ttopic = st.text_input("Enter the topic:")\n\t\tage = st.text_input("Enter the age:")\n\n\t\t# Every form must have a submit button.\n\t\tsubmitted = st.form_submit_button("Submit")\n\t# return a dictionary of the values\n\tif submitted:\n\t\treturn {"occupation": occupation, "topic": topic, "age": age}\n\ndef ex11b():\n\t# create your template\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\t# create a langchain function call to openai\n\tllm = ChatOpenAI(\n\t\tmodel_name="gpt-3.5-turbo",\n\t\ttemperature=0.9,\n\t)\n\t# create a LLM chain with the langchain call and prompt template\n\tchain = LLMChain(llm=llm, prompt=prompt_template)\n\t# call the prompt_inputs_form()\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tst.write(chain.run(dict_inputs))\n"""\n )\n', (49537, 50886), True, 'import streamlit as st\n'), ((50878, 50914), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (50889, 50914), True, 'import streamlit as st\n'), ((51092, 51431), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), "(input_variables=['occupation', 'topic', 'age'], template=\n 'Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up'\n )\n", (51106, 51431), False, 'from langchain.prompts import PromptTemplate\n'), ((51870, 51930), 'streamlit.subheader', 'st.subheader', (['"""Challenge 11: Prompt Template with LangChain"""'], {}), "('Challenge 11: Prompt Template with LangChain')\n", (51882, 51930), True, 'import streamlit as st\n'), ((51932, 52041), 'streamlit.write', 'st.write', (['"""Now, let\'s incorporate the prompt template into our chatbot from the previous exercise."""'], {}), '(\n "Now, let\'s incorporate the prompt template into our chatbot from the previous exercise."\n )\n', (51940, 52041), True, 'import streamlit as st\n'), ((52033, 52155), 'streamlit.write', 'st.write', (['"""We will use the ***prompt_inputs_form()*** function to get the user\'s input for the prompt template."""'], {}), '(\n "We will use the ***prompt_inputs_form()*** function to get the user\'s input for the prompt template."\n )\n', (52041, 52155), True, 'import streamlit as st\n'), ((52147, 52302), 'streamlit.write', 'st.write', (['"""You can use the ***ch10*** function from the previous exercise to do the llm api call with the updated session_state.prompt_template."""'], {}), "(\n 'You can use the ***ch10*** function from the previous exercise to do the llm api call with the updated session_state.prompt_template.'\n )\n", (52155, 52302), True, 'import streamlit as st\n'), ((52294, 52461), 'streamlit.write', 'st.write', (['"""Ignore the text input field that asks for a system prompt template from ch10(), since we will be using the prompt template from the user\'s input."""'], {}), '(\n "Ignore the text input field that asks for a system prompt template from ch10(), since we will be using the prompt template from the user\'s input."\n )\n', (52302, 52461), True, 'import streamlit as st\n'), ((52453, 52609), 'streamlit.write', 'st.write', (['"""As you interact with the chatbot, observe that the prompt template is updated with the latest user input as seen from the code output."""'], {}), "(\n 'As you interact with the chatbot, observe that the prompt template is updated with the latest user input as seen from the code output.'\n )\n", (52461, 52609), True, 'import streamlit as st\n'), ((52602, 52632), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (52613, 52632), True, 'import streamlit as st\n'), ((53603, 53639), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (53614, 53639), True, 'import streamlit as st\n'), ((53691, 53726), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)'}), '(k=3)\n', (53721, 53726), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((53934, 53991), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)', 'return_messages': '(True)'}), '(k=3, return_messages=True)\n', (53964, 53991), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((54207, 54255), 'streamlit.subheader', 'st.subheader', (['"""Exercise 12: Chatbot with memory"""'], {}), "('Exercise 12: Chatbot with memory')\n", (54219, 54255), True, 'import streamlit as st\n'), ((54257, 54311), 'streamlit.write', 'st.write', (['"""Now, we will create a chatbot with memory."""'], {}), "('Now, we will create a chatbot with memory.')\n", (54265, 54311), True, 'import streamlit as st\n'), ((54313, 54430), 'streamlit.write', 'st.write', (['"""You can determine the number of previous messages to remember by setting the ***k*** parameter."""'], {}), "(\n 'You can determine the number of previous messages to remember by setting the ***k*** parameter.'\n )\n", (54321, 54430), True, 'import streamlit as st\n'), ((54423, 54453), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (54434, 54453), True, 'import streamlit as st\n'), ((54455, 54998), 'streamlit.code', 'st.code', (['"""\ndef ex12():\n\tmemory = ConversationBufferWindowMemory(k=3)\n\tmemory.save_context({"input": "hi"}, {"output": "whats up?"})\n\tmemory.save_context({"input": "not much"}, {"output": "what can I help you with?"})\n\n\tst.write(memory.load_memory_variables({}))\n \n\tmemory = ConversationBufferWindowMemory( k=3, return_messages=True)\n\tmemory.save_context({"input": "hi"}, {"output": "whats up?"})\n\tmemory.save_context({"input": "not much"}, {"output": "what can I help you with?"})\n\n\tst.write(memory.load_memory_variables({}))\n"""'], {}), '(\n """\ndef ex12():\n\tmemory = ConversationBufferWindowMemory(k=3)\n\tmemory.save_context({"input": "hi"}, {"output": "whats up?"})\n\tmemory.save_context({"input": "not much"}, {"output": "what can I help you with?"})\n\n\tst.write(memory.load_memory_variables({}))\n \n\tmemory = ConversationBufferWindowMemory( k=3, return_messages=True)\n\tmemory.save_context({"input": "hi"}, {"output": "whats up?"})\n\tmemory.save_context({"input": "not much"}, {"output": "what can I help you with?"})\n\n\tst.write(memory.load_memory_variables({}))\n"""\n )\n', (54462, 54998), True, 'import streamlit as st\n'), ((54992, 55028), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (55003, 55028), True, 'import streamlit as st\n'), ((55122, 55471), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), '(input_variables=[\'occupation\', \'topic\', \'age\'], template=\n """Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""\n )\n', (55136, 55471), False, 'from langchain.prompts import PromptTemplate\n'), ((55725, 55765), 'streamlit.write', 'st.write', (['"""input prompt: """', 'input_prompt'], {}), "('input prompt: ', input_prompt)\n", (55733, 55765), True, 'import streamlit as st\n'), ((56030, 56079), 'streamlit.session_state.memory.load_memory_variables', 'st.session_state.memory.load_memory_variables', (['{}'], {}), '({})\n', (56075, 56079), True, 'import streamlit as st\n'), ((56081, 56119), 'streamlit.write', 'st.write', (['"""Memory Data: """', 'memory_data'], {}), "('Memory Data: ', memory_data)\n", (56089, 56119), True, 'import streamlit as st\n'), ((56284, 56350), 'streamlit.write', 'st.write', (['"""New prompt template:"""', 'st.session_state.prompt_template'], {}), "('New prompt template:', st.session_state.prompt_template)\n", (56292, 56350), True, 'import streamlit as st\n'), ((57422, 57471), 'streamlit.subheader', 'st.subheader', (['"""Challenge 12: Chatbot with memory"""'], {}), "('Challenge 12: Chatbot with memory')\n", (57434, 57471), True, 'import streamlit as st\n'), ((57473, 57568), 'streamlit.write', 'st.write', (['"""Now, let\'s incorporate the memory into the session state prompt template."""'], {}), '(\n "Now, let\'s incorporate the memory into the session state prompt template."\n )\n', (57481, 57568), True, 'import streamlit as st\n'), ((57560, 57694), 'streamlit.write', 'st.write', (['"""The chatbot should remember the previous user input and use it as the prompt template for the next conversation."""'], {}), "(\n 'The chatbot should remember the previous user input and use it as the prompt template for the next conversation.'\n )\n", (57568, 57694), True, 'import streamlit as st\n'), ((57686, 57788), 'streamlit.write', 'st.write', (['"""Start with the following code and modify ex12() to create a chatbot with memory."""'], {}), "(\n 'Start with the following code and modify ex12() to create a chatbot with memory.'\n )\n", (57694, 57788), True, 'import streamlit as st\n'), ((57780, 57846), 'streamlit.write', 'st.write', (['"""Get the *{input_prompt}* using *prompt_inputs_form()*."""'], {}), "('Get the *{input_prompt}* using *prompt_inputs_form()*.')\n", (57788, 57846), True, 'import streamlit as st\n'), ((57848, 58018), 'streamlit.write', 'st.write', (['"""As you interact with the chatbot, observe that the memory is updated with the latest k number of user input and output as seen from the code output."""'], {}), "(\n 'As you interact with the chatbot, observe that the memory is updated with the latest k number of user input and output as seen from the code output.'\n )\n", (57856, 58018), True, 'import streamlit as st\n'), ((58010, 58040), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (58021, 58040), True, 'import streamlit as st\n'), ((58042, 58532), 'streamlit.code', 'st.code', (['"""\nif "memory" not in st.session_state:\n\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t#step 1 save the memory from your chatbot \n\t#step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) \n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"{input_prompt}\n\nBelow is the conversation history between the AI and Users so far\n\n{memory_data}""\\"\n"""'], {}), '(\n """\nif "memory" not in st.session_state:\n\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t#step 1 save the memory from your chatbot \n\t#step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) \n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"{input_prompt}\n\nBelow is the conversation history between the AI and Users so far\n\n{memory_data}""\\"\n"""\n )\n', (58049, 58532), True, 'import streamlit as st\n'), ((58526, 58556), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (58537, 58556), True, 'import streamlit as st\n'), ((60957, 60993), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (60968, 60993), True, 'import streamlit as st\n'), ((61152, 61184), 'streamlit.subheader', 'st.subheader', (['"""Upload your docs"""'], {}), "('Upload your docs')\n", (61164, 61184), True, 'import streamlit as st\n'), ((61251, 61313), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['docx', 'txt', 'pdf']"}), "('Choose a file', type=['docx', 'txt', 'pdf'])\n", (61267, 61313), True, 'import streamlit as st\n'), ((62066, 62084), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (62082, 62084), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((62091, 62125), 'lancedb.connect', 'lancedb.connect', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (62106, 62125), False, 'import lancedb\n'), ((62967, 63017), 'streamlit.subheader', 'st.subheader', (['"""Exercise 13: Create a vector store"""'], {}), "('Exercise 13: Create a vector store')\n", (62979, 63017), True, 'import streamlit as st\n'), ((63019, 63095), 'streamlit.write', 'st.write', (['"""Now, we will create a vector store to store the user\'s document."""'], {}), '("Now, we will create a vector store to store the user\'s document.")\n', (63027, 63095), True, 'import streamlit as st\n'), ((63097, 63208), 'streamlit.write', 'st.write', (['"""This process uses OpenAI to generate embeddings and LanceDB for storing these embeddings."""'], {}), "(\n 'This process uses OpenAI to generate embeddings and LanceDB for storing these embeddings.'\n )\n", (63105, 63208), True, 'import streamlit as st\n'), ((63200, 63251), 'streamlit.write', 'st.write', (['"""For now, this only works for pdf files."""'], {}), "('For now, this only works for pdf files.')\n", (63208, 63251), True, 'import streamlit as st\n'), ((63253, 63354), 'streamlit.write', 'st.write', (['"""You may need to run the following commands in terminal to install new libaries:"""'], {}), "(\n 'You may need to run the following commands in terminal to install new libaries:'\n )\n", (63261, 63354), True, 'import streamlit as st\n'), ((63346, 63383), 'streamlit.code', 'st.code', (['"""\npip install tiktoken\n"""'], {}), '("""\npip install tiktoken\n""")\n', (63353, 63383), True, 'import streamlit as st\n'), ((63385, 63415), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (63396, 63415), True, 'import streamlit as st\n'), ((63417, 65367), 'streamlit.code', 'st.code', (['"""\n#exercise 13 - loading\ndef upload_file_streamlit():\n\tdef get_file_extension(file_name):\n\t\treturn os.path.splitext(file_name)[1]\n\n\tst.subheader("Upload your docs")\n\n\t# Streamlit file uploader to accept file input\n\tuploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"])\n\n\tif uploaded_file:\n\t\t# Reading file content\n\t\tfile_content = uploaded_file.read()\n\n\t\t# Determine the suffix based on uploaded file\'s name\n\t\tfile_suffix = get_file_extension(uploaded_file.name)\n\n\t\t# Saving the uploaded file temporarily to process it\n\t\twith tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file:\n\t\t\ttemp_file.write(file_content)\n\t\t\ttemp_file.flush() # Ensure the data is written to the file\n\t\t\ttemp_file_path = temp_file.name\n\t\treturn temp_file_path\n\t\n#exercise 13 - split and chunk, embeddings and storing in vectorstores for reference\ndef vectorstore_creator():\n\t# WORKING_DIRECTORY set above in the main.py\n\t# Process the temporary file using UnstructuredFileLoader (or any other method you need)\n\tembeddings = OpenAIEmbeddings()\n\tdb = lancedb.connect(WORKING_DIRECTORY)\n\ttable = db.create_table(\n\t\t"my_table",\n\t\tdata=[\n\t\t\t{\n\t\t\t\t"vector": embeddings.embed_query("Query unsuccessful"),\n\t\t\t\t"text": "Query unsuccessful",\n\t\t\t\t"id": "1",\n\t\t\t}\n\t\t],\n\t\tmode="overwrite",\n\t)\n\t# st.write(temp_file_path)\n\ttemp_file_path = upload_file_streamlit()\n\tif temp_file_path:\n\t\tloader = PyPDFLoader(temp_file_path)\n\t\tdocuments = loader.load_and_split()\n\t\tdb = LanceDB.from_documents(documents, embeddings, connection=table)\n\t\treturn db\n\ndef ex13_vectorstore_creator():\n\tif "vectorstore" not in st.session_state:\n\t\tst.session_state.vectorstore = False\n\tdb = vectorstore_creator()\n\tst.session_state.vectorstore = db\n\tif st.session_state.vectorstore:\n\t\tquery = st.text_input("Enter a query")\n\t\tif query:\n\t\t\tst.session_state.vectorstore = db\n\t\t\tdocs = db.similarity_search(query)\n\t\t\tst.write(docs[0].page_content)\n"""'], {}), '(\n """\n#exercise 13 - loading\ndef upload_file_streamlit():\n\tdef get_file_extension(file_name):\n\t\treturn os.path.splitext(file_name)[1]\n\n\tst.subheader("Upload your docs")\n\n\t# Streamlit file uploader to accept file input\n\tuploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"])\n\n\tif uploaded_file:\n\t\t# Reading file content\n\t\tfile_content = uploaded_file.read()\n\n\t\t# Determine the suffix based on uploaded file\'s name\n\t\tfile_suffix = get_file_extension(uploaded_file.name)\n\n\t\t# Saving the uploaded file temporarily to process it\n\t\twith tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file:\n\t\t\ttemp_file.write(file_content)\n\t\t\ttemp_file.flush() # Ensure the data is written to the file\n\t\t\ttemp_file_path = temp_file.name\n\t\treturn temp_file_path\n\t\n#exercise 13 - split and chunk, embeddings and storing in vectorstores for reference\ndef vectorstore_creator():\n\t# WORKING_DIRECTORY set above in the main.py\n\t# Process the temporary file using UnstructuredFileLoader (or any other method you need)\n\tembeddings = OpenAIEmbeddings()\n\tdb = lancedb.connect(WORKING_DIRECTORY)\n\ttable = db.create_table(\n\t\t"my_table",\n\t\tdata=[\n\t\t\t{\n\t\t\t\t"vector": embeddings.embed_query("Query unsuccessful"),\n\t\t\t\t"text": "Query unsuccessful",\n\t\t\t\t"id": "1",\n\t\t\t}\n\t\t],\n\t\tmode="overwrite",\n\t)\n\t# st.write(temp_file_path)\n\ttemp_file_path = upload_file_streamlit()\n\tif temp_file_path:\n\t\tloader = PyPDFLoader(temp_file_path)\n\t\tdocuments = loader.load_and_split()\n\t\tdb = LanceDB.from_documents(documents, embeddings, connection=table)\n\t\treturn db\n\ndef ex13_vectorstore_creator():\n\tif "vectorstore" not in st.session_state:\n\t\tst.session_state.vectorstore = False\n\tdb = vectorstore_creator()\n\tst.session_state.vectorstore = db\n\tif st.session_state.vectorstore:\n\t\tquery = st.text_input("Enter a query")\n\t\tif query:\n\t\t\tst.session_state.vectorstore = db\n\t\t\tdocs = db.similarity_search(query)\n\t\t\tst.write(docs[0].page_content)\n"""\n )\n', (63424, 65367), True, 'import streamlit as st\n'), ((65364, 65400), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (65375, 65400), True, 'import streamlit as st\n'), ((65621, 65970), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), '(input_variables=[\'occupation\', \'topic\', \'age\'], template=\n """Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""\n )\n', (65635, 65970), False, 'from langchain.prompts import PromptTemplate\n'), ((66589, 66638), 'streamlit.session_state.memory.load_memory_variables', 'st.session_state.memory.load_memory_variables', (['{}'], {}), '({})\n', (66634, 66638), True, 'import streamlit as st\n'), ((66640, 66661), 'streamlit.write', 'st.write', (['memory_data'], {}), '(memory_data)\n', (66648, 66661), True, 'import streamlit as st\n'), ((66825, 66892), 'streamlit.write', 'st.write', (['"""new prompt template: """', 'st.session_state.prompt_template'], {}), "('new prompt template: ', st.session_state.prompt_template)\n", (66833, 66892), True, 'import streamlit as st\n'), ((68569, 68613), 'streamlit.subheader', 'st.subheader', (['"""Exercise 14: Semantic search"""'], {}), "('Exercise 14: Semantic search')\n", (68581, 68613), True, 'import streamlit as st\n'), ((68615, 68719), 'streamlit.write', 'st.write', (['"""In this exercise. we will do a semantic search on the vector store in our chatbot."""'], {}), "(\n 'In this exercise. we will do a semantic search on the vector store in our chatbot.'\n )\n", (68623, 68719), True, 'import streamlit as st\n'), ((68711, 68823), 'streamlit.write', 'st.write', (['"""At the same time, the chatbot is able to remember its conversation history to some extent."""'], {}), "(\n 'At the same time, the chatbot is able to remember its conversation history to some extent.'\n )\n", (68719, 68823), True, 'import streamlit as st\n'), ((68815, 68988), 'streamlit.write', 'st.write', (['"""This code integrates advanced features like semantic search and context-aware prompts to provide a more engaging and helpful conversational experience."""'], {}), "(\n 'This code integrates advanced features like semantic search and context-aware prompts to provide a more engaging and helpful conversational experience.'\n )\n", (68823, 68988), True, 'import streamlit as st\n'), ((68980, 69049), 'streamlit.write', 'st.write', (['"""Copy and run the code below to see the chatbot in action."""'], {}), "('Copy and run the code below to see the chatbot in action.')\n", (68988, 69049), True, 'import streamlit as st\n'), ((69052, 69082), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (69063, 69082), True, 'import streamlit as st\n'), ((69084, 72216), 'streamlit.code', 'st.code', (['"""\n# save the vectorstore in st.session_state\n# add semantic search prompt into memory prompt\n# integrate back into your chatbot\ndef ex14_basebot():\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\tst.session_state.input_prompt = input_prompt\n\n\tif "input_prompt" not in st.session_state:\n\t\tst.session_state.input_prompt = "Speak like Yoda from Star Wars"\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"\n\t\t{st.session_state.input_prompt}\n\n\t\tThis is the last conversation history:\n\t\t{memory_data}""\\"\n\tst.write("new prompt template: ", st.session_state.prompt_template)\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# query information\n\t\t\tif st.session_state.vectorstore:\n\t\t\t\tdocs = st.session_state.vectorstore.similarity_search(prompt)\n\t\t\t\tdocs = docs[0].page_content\n\t\t\t\t# add your query prompt\n\t\t\t\tvs_prompt = f""\\"\n\t\t\t\t\tYou should reference this search result to help your answer,\n\t\t\t\t\t{docs}\n\t\t\t\t\tif the search result does not anwer the query, please say you are unable to answer, do not make up an answer""\\"\n\t\t\telse:\n\t\t\t\tvs_prompt = ""\n\t\t\t# add query prompt to your memory prompt and send it to LLM\n\t\t\tst.session_state.prompt_template = (\n\t\t\t\tst.session_state.prompt_template + vs_prompt\n\t\t\t)\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""'], {}), '(\n """\n# save the vectorstore in st.session_state\n# add semantic search prompt into memory prompt\n# integrate back into your chatbot\ndef ex14_basebot():\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\tst.session_state.input_prompt = input_prompt\n\n\tif "input_prompt" not in st.session_state:\n\t\tst.session_state.input_prompt = "Speak like Yoda from Star Wars"\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"\n\t\t{st.session_state.input_prompt}\n\n\t\tThis is the last conversation history:\n\t\t{memory_data}""\\"\n\tst.write("new prompt template: ", st.session_state.prompt_template)\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# query information\n\t\t\tif st.session_state.vectorstore:\n\t\t\t\tdocs = st.session_state.vectorstore.similarity_search(prompt)\n\t\t\t\tdocs = docs[0].page_content\n\t\t\t\t# add your query prompt\n\t\t\t\tvs_prompt = f""\\"\n\t\t\t\t\tYou should reference this search result to help your answer,\n\t\t\t\t\t{docs}\n\t\t\t\t\tif the search result does not anwer the query, please say you are unable to answer, do not make up an answer""\\"\n\t\t\telse:\n\t\t\t\tvs_prompt = ""\n\t\t\t# add query prompt to your memory prompt and send it to LLM\n\t\t\tst.session_state.prompt_template = (\n\t\t\t\tst.session_state.prompt_template + vs_prompt\n\t\t\t)\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""\n )\n', (69091, 72216), True, 'import streamlit as st\n'), ((72203, 72239), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (72214, 72239), True, 'import streamlit as st\n'), ((72489, 72513), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (72504, 72513), False, 'import sqlite3\n'), ((72919, 72943), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (72934, 72943), False, 'import sqlite3\n'), ((72975, 72989), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (72987, 72989), False, 'from datetime import datetime\n'), ((73541, 73565), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (73556, 73565), False, 'import sqlite3\n'), ((73772, 73812), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'column_names'}), '(rows, columns=column_names)\n', (73784, 73812), True, 'import pandas as pd\n'), ((73814, 73830), 'streamlit.dataframe', 'st.dataframe', (['df'], {}), '(df)\n', (73826, 73830), True, 'import streamlit as st\n'), ((73866, 73911), 'streamlit.subheader', 'st.subheader', (['"""Exercise 15: Using a database"""'], {}), "('Exercise 15: Using a database')\n", (73878, 73911), True, 'import streamlit as st\n'), ((73913, 74049), 'streamlit.write', 'st.write', (['"""In this exercise, we will demonstrate how to create a database, as well as how to store and retrieve data from it."""'], {}), "(\n 'In this exercise, we will demonstrate how to create a database, as well as how to store and retrieve data from it.'\n )\n", (73921, 74049), True, 'import streamlit as st\n'), ((74041, 74071), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (74052, 74071), True, 'import streamlit as st\n'), ((74073, 75680), 'streamlit.code', 'st.code', (['"""\ndef ex15_initialise():\n\t# Create or check for the \'database\' directory in the current working directory\n\t# Set DB_NAME to be within the \'database\' directory at the top of main.py\n\t# Connect to the SQLite database\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\n\t# Conversation data table\n\tcursor.execute(\n\t\t""\\"\n\t\tCREATE TABLE IF NOT EXISTS data_table (\n\t\t\tid INTEGER PRIMARY KEY,\n\t\t\tdate TEXT NOT NULL UNIQUE,\n\t\t\tusername TEXT NOT NULL,\n\t\t\tchatbot_ans TEXT NOT NULL,\n\t\t\tuser_prompt TEXT NOT NULL,\n\t\t\ttokens TEXT\n\t\t)\n\t""\\"\n\t)\n\tconn.commit()\n\tconn.close()\n\ndef ex15_collect(username, chatbot_response, prompt):\n\t# collect data from bot\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\tnow = datetime.now() # Using ISO format for date\n\ttokens = len(chatbot_response) * 1.3\n\tcursor.execute(\n\t\t""\\"\n\t\tINSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens)\n\t\tVALUES (?, ?, ?, ?, ?)\n\t""\\",\n\t\t(now, username, chatbot_response, prompt, tokens),\n\t)\n\tconn.commit()\n\tconn.close()\n\n# implementing data collection and displaying\ndef ex15():\n\t# initialise database first\n\tex15_initialise()\n\t# collect some data\n\tex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?")\n\t# display data\n\t# Connect to the specified database\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\n\t# Fetch all data from data_table\n\tcursor.execute("SELECT * FROM data_table")\n\trows = cursor.fetchall()\n\tcolumn_names = [description[0] for description in cursor.description]\n\tdf = pd.DataFrame(rows, columns=column_names)\n\tst.dataframe(df)\n\tconn.close()\n"""'], {}), '(\n """\ndef ex15_initialise():\n\t# Create or check for the \'database\' directory in the current working directory\n\t# Set DB_NAME to be within the \'database\' directory at the top of main.py\n\t# Connect to the SQLite database\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\n\t# Conversation data table\n\tcursor.execute(\n\t\t""\\"\n\t\tCREATE TABLE IF NOT EXISTS data_table (\n\t\t\tid INTEGER PRIMARY KEY,\n\t\t\tdate TEXT NOT NULL UNIQUE,\n\t\t\tusername TEXT NOT NULL,\n\t\t\tchatbot_ans TEXT NOT NULL,\n\t\t\tuser_prompt TEXT NOT NULL,\n\t\t\ttokens TEXT\n\t\t)\n\t""\\"\n\t)\n\tconn.commit()\n\tconn.close()\n\ndef ex15_collect(username, chatbot_response, prompt):\n\t# collect data from bot\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\tnow = datetime.now() # Using ISO format for date\n\ttokens = len(chatbot_response) * 1.3\n\tcursor.execute(\n\t\t""\\"\n\t\tINSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens)\n\t\tVALUES (?, ?, ?, ?, ?)\n\t""\\",\n\t\t(now, username, chatbot_response, prompt, tokens),\n\t)\n\tconn.commit()\n\tconn.close()\n\n# implementing data collection and displaying\ndef ex15():\n\t# initialise database first\n\tex15_initialise()\n\t# collect some data\n\tex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?")\n\t# display data\n\t# Connect to the specified database\n\tconn = sqlite3.connect(DB_NAME)\n\tcursor = conn.cursor()\n\n\t# Fetch all data from data_table\n\tcursor.execute("SELECT * FROM data_table")\n\trows = cursor.fetchall()\n\tcolumn_names = [description[0] for description in cursor.description]\n\tdf = pd.DataFrame(rows, columns=column_names)\n\tst.dataframe(df)\n\tconn.close()\n"""\n )\n', (74080, 75680), True, 'import streamlit as st\n'), ((75668, 75704), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (75679, 75704), True, 'import streamlit as st\n'), ((75836, 76185), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), '(input_variables=[\'occupation\', \'topic\', \'age\'], template=\n """Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""\n )\n', (75850, 76185), False, 'from langchain.prompts import PromptTemplate\n'), ((76804, 76853), 'streamlit.session_state.memory.load_memory_variables', 'st.session_state.memory.load_memory_variables', (['{}'], {}), '({})\n', (76849, 76853), True, 'import streamlit as st\n'), ((76855, 76876), 'streamlit.write', 'st.write', (['memory_data'], {}), '(memory_data)\n', (76863, 76876), True, 'import streamlit as st\n'), ((77036, 77103), 'streamlit.write', 'st.write', (['"""new prompt template: """', 'st.session_state.prompt_template'], {}), "('new prompt template: ', st.session_state.prompt_template)\n", (77044, 77103), True, 'import streamlit as st\n'), ((78843, 78889), 'streamlit.subheader', 'st.subheader', (['"""Challenge 15: Using a database"""'], {}), "('Challenge 15: Using a database')\n", (78855, 78889), True, 'import streamlit as st\n'), ((78891, 78997), 'streamlit.write', 'st.write', (['"""For this challenge, we will incorporate using a database from our previous exercise."""'], {}), "(\n 'For this challenge, we will incorporate using a database from our previous exercise.'\n )\n", (78899, 78997), True, 'import streamlit as st\n'), ((78989, 79100), 'streamlit.write', 'st.write', (['"""Copy the code from ***ex14()*** and use the ***ex15()*** to collect and display the data."""'], {}), "(\n 'Copy the code from ***ex14()*** and use the ***ex15()*** to collect and display the data.'\n )\n", (78997, 79100), True, 'import streamlit as st\n'), ((79093, 79123), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (79104, 79123), True, 'import streamlit as st\n'), ((82271, 82307), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (82282, 82307), True, 'import streamlit as st\n'), ((82500, 82550), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (82508, 82550), True, 'import streamlit as st\n'), ((82560, 82589), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (82587, 82589), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((82600, 82717), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (82624, 82717), False, 'from langchain.memory import ConversationBufferMemory\n'), ((84182, 84222), 'streamlit.subheader', 'st.subheader', (['"""Exercise 16: Smart agent"""'], {}), "('Exercise 16: Smart agent')\n", (84194, 84222), True, 'import streamlit as st\n'), ((84224, 84369), 'streamlit.write', 'st.write', (['"""In this exercise, we will configure a chatbot with an internet search tool that shows all intermediate steps and tool logs."""'], {}), "(\n 'In this exercise, we will configure a chatbot with an internet search tool that shows all intermediate steps and tool logs.'\n )\n", (84232, 84369), True, 'import streamlit as st\n'), ((84361, 84545), 'streamlit.write', 'st.write', (['"""This overcomes the limitation of the training data that is only up to a certain point in time, by being able to access the current internet to search for answers."""'], {}), "(\n 'This overcomes the limitation of the training data that is only up to a certain point in time, by being able to access the current internet to search for answers.'\n )\n", (84369, 84545), True, 'import streamlit as st\n'), ((84538, 84639), 'streamlit.write', 'st.write', (['"""You may need to run the following commands in terminal to install new libaries:"""'], {}), "(\n 'You may need to run the following commands in terminal to install new libaries:'\n )\n", (84546, 84639), True, 'import streamlit as st\n'), ((84631, 84677), 'streamlit.code', 'st.code', (['"""\npip install duckduckgo-search\n"""'], {}), '("""\npip install duckduckgo-search\n""")\n', (84638, 84677), True, 'import streamlit as st\n'), ((84681, 84711), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (84692, 84711), True, 'import streamlit as st\n'), ((84713, 86565), 'streamlit.code', 'st.code', (['"""\n# smart agents accessing the internet for free\n# https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py\ndef ex16_agent_bot():\n\tst.title("🦜 LangChain: Chat with internet search")\n\n\tmsgs = StreamlitChatMessageHistory()\n\tmemory = ConversationBufferMemory(\n\t\tchat_memory=msgs,\n\t\treturn_messages=True,\n\t\tmemory_key="chat_history",\n\t\toutput_key="output",\n\t)\n\tif len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):\n\t\tmsgs.clear()\n\t\tmsgs.add_ai_message("How can I help you?")\n\t\tst.session_state.steps = {}\n\n\tavatars = {"human": "user", "ai": "assistant"}\n\tfor idx, msg in enumerate(msgs.messages):\n\t\twith st.chat_message(avatars[msg.type]):\n\t\t\t# Render intermediate steps if any were saved\n\t\t\tfor step in st.session_state.steps.get(str(idx), []):\n\t\t\t\tif step[0].tool == "_Exception":\n\t\t\t\t\tcontinue\n\t\t\t\twith st.status(\n\t\t\t\t\tf"**{step[0].tool}**: {step[0].tool_input}", state="complete"\n\t\t\t\t):\n\t\t\t\t\tst.write(step[0].log)\n\t\t\t\t\tst.write(step[1])\n\t\t\tst.write(msg.content)\n\n\tif prompt := st.chat_input(placeholder="Enter a query on the Internet"):\n\t\tst.chat_message("user").write(prompt)\n\n\t\tllm = ChatOpenAI(\n\t\t\tmodel_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True\n\t\t)\n\t\ttools = [DuckDuckGoSearchRun(name="Search")]\n\t\tchat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)\n\t\texecutor = AgentExecutor.from_agent_and_tools(\n\t\t\tagent=chat_agent,\n\t\t\ttools=tools,\n\t\t\tmemory=memory,\n\t\t\treturn_intermediate_steps=True,\n\t\t\thandle_parsing_errors=True,\n\t\t)\n\t\twith st.chat_message("assistant"):\n\t\t\tst_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)\n\t\t\tresponse = executor(prompt, callbacks=[st_cb])\n\t\t\tst.write(response["output"])\n\t\t\tst.session_state.steps[str(len(msgs.messages) - 1)] = response[\n\t\t\t\t"intermediate_steps"\n\t\t\t]\n"""'], {}), '(\n """\n# smart agents accessing the internet for free\n# https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py\ndef ex16_agent_bot():\n\tst.title("🦜 LangChain: Chat with internet search")\n\n\tmsgs = StreamlitChatMessageHistory()\n\tmemory = ConversationBufferMemory(\n\t\tchat_memory=msgs,\n\t\treturn_messages=True,\n\t\tmemory_key="chat_history",\n\t\toutput_key="output",\n\t)\n\tif len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):\n\t\tmsgs.clear()\n\t\tmsgs.add_ai_message("How can I help you?")\n\t\tst.session_state.steps = {}\n\n\tavatars = {"human": "user", "ai": "assistant"}\n\tfor idx, msg in enumerate(msgs.messages):\n\t\twith st.chat_message(avatars[msg.type]):\n\t\t\t# Render intermediate steps if any were saved\n\t\t\tfor step in st.session_state.steps.get(str(idx), []):\n\t\t\t\tif step[0].tool == "_Exception":\n\t\t\t\t\tcontinue\n\t\t\t\twith st.status(\n\t\t\t\t\tf"**{step[0].tool}**: {step[0].tool_input}", state="complete"\n\t\t\t\t):\n\t\t\t\t\tst.write(step[0].log)\n\t\t\t\t\tst.write(step[1])\n\t\t\tst.write(msg.content)\n\n\tif prompt := st.chat_input(placeholder="Enter a query on the Internet"):\n\t\tst.chat_message("user").write(prompt)\n\n\t\tllm = ChatOpenAI(\n\t\t\tmodel_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True\n\t\t)\n\t\ttools = [DuckDuckGoSearchRun(name="Search")]\n\t\tchat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)\n\t\texecutor = AgentExecutor.from_agent_and_tools(\n\t\t\tagent=chat_agent,\n\t\t\ttools=tools,\n\t\t\tmemory=memory,\n\t\t\treturn_intermediate_steps=True,\n\t\t\thandle_parsing_errors=True,\n\t\t)\n\t\twith st.chat_message("assistant"):\n\t\t\tst_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)\n\t\t\tresponse = executor(prompt, callbacks=[st_cb])\n\t\t\tst.write(response["output"])\n\t\t\tst.session_state.steps[str(len(msgs.messages) - 1)] = response[\n\t\t\t\t"intermediate_steps"\n\t\t\t]\n"""\n )\n', (84720, 86565), True, 'import streamlit as st\n'), ((86557, 86593), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (86568, 86593), True, 'import streamlit as st\n'), ((86975, 87028), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['query'], {}), '(query)\n', (87021, 87028), True, 'import streamlit as st\n'), ((87073, 87119), 'json.dumps', 'json.dumps', (['docs'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(docs, ensure_ascii=False, indent=4)\n', (87083, 87119), False, 'import json\n'), ((87197, 87247), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (87205, 87247), True, 'import streamlit as st\n'), ((87312, 87341), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (87339, 87341), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((87352, 87469), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (87376, 87469), False, 'from langchain.memory import ConversationBufferMemory\n'), ((88960, 89018), 'streamlit.subheader', 'st.subheader', (['"""Exercise 17: Smart agent with vector store"""'], {}), "('Exercise 17: Smart agent with vector store')\n", (88972, 89018), True, 'import streamlit as st\n'), ((89020, 89109), 'streamlit.write', 'st.write', (['"""In this exercise, we will combine the vector store with the smart agent."""'], {}), "(\n 'In this exercise, we will combine the vector store with the smart agent.')\n", (89028, 89109), True, 'import streamlit as st\n'), ((89106, 89213), 'streamlit.write', 'st.write', (['"""This allows the chatbot to search for answers from the vector store and the internet."""'], {}), "(\n 'This allows the chatbot to search for answers from the vector store and the internet.'\n )\n", (89114, 89213), True, 'import streamlit as st\n'), ((89205, 89421), 'streamlit.write', 'st.write', (['"""The @tool("Document search") function is an enhancement to the chatbot. It allows for an initial internal document search based on the user query before resorting to external internet searches. """'], {}), '(\n \'The @tool("Document search") function is an enhancement to the chatbot. It allows for an initial internal document search based on the user query before resorting to external internet searches. \'\n )\n', (89213, 89421), True, 'import streamlit as st\n'), ((89415, 89484), 'streamlit.write', 'st.write', (['"""Copy and run the code below to see the chatbot in action."""'], {}), "('Copy and run the code below to see the chatbot in action.')\n", (89423, 89484), True, 'import streamlit as st\n'), ((89487, 89517), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (89498, 89517), True, 'import streamlit as st\n'), ((89519, 91871), 'streamlit.code', 'st.code', (['"""\n# agents ,vectorstores, wiki\n# https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval\n# note tool\n@tool("Document search")\ndef document_search(query: str) -> str:\n\t# this is the prompt to the tool itself\n\t"Use this function first to search for documents pertaining to the query before going into the internet"\n\tdocs = st.session_state.vectorstore.similarity_search(query)\n\tdocs = docs[0].page_content\n\tjson_string = json.dumps(docs, ensure_ascii=False, indent=4)\n\treturn json_string\n\n# combine vector store and internet search\ndef ex17_agent_bot():\n\tst.title("🦜 LangChain: Chat with internet search")\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\tmsgs = StreamlitChatMessageHistory()\n\tmemory = ConversationBufferMemory(\n\t\tchat_memory=msgs,\n\t\treturn_messages=True,\n\t\tmemory_key="chat_history",\n\t\toutput_key="output",\n\t)\n\tif len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):\n\t\tmsgs.clear()\n\t\tmsgs.add_ai_message("How can I help you?")\n\t\tst.session_state.steps = {}\n\n\tavatars = {"human": "user", "ai": "assistant"}\n\tfor idx, msg in enumerate(msgs.messages):\n\t\twith st.chat_message(avatars[msg.type]):\n\t\t\t# Render intermediate steps if any were saved\n\t\t\tfor step in st.session_state.steps.get(str(idx), []):\n\t\t\t\tif step[0].tool == "_Exception":\n\t\t\t\t\tcontinue\n\t\t\t\twith st.status(\n\t\t\t\t\tf"**{step[0].tool}**: {step[0].tool_input}", state="complete"\n\t\t\t\t):\n\t\t\t\t\tst.write(step[0].log)\n\t\t\t\t\tst.write(step[1])\n\t\t\tst.write(msg.content)\n\n\tif prompt := st.chat_input(placeholder="Enter a query on the Internet"):\n\t\tst.chat_message("user").write(prompt)\n\n\t\tllm = ChatOpenAI(\n\t\t\tmodel_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True\n\t\t)\n\t\ttools = [document_search, DuckDuckGoSearchRun(name="Internet Search")]\n\t\tchat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)\n\t\texecutor = AgentExecutor.from_agent_and_tools(\n\t\t\tagent=chat_agent,\n\t\t\ttools=tools,\n\t\t\tmemory=memory,\n\t\t\treturn_intermediate_steps=True,\n\t\t\thandle_parsing_errors=True,\n\t\t)\n\t\twith st.chat_message("assistant"):\n\t\t\tst_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)\n\t\t\tresponse = executor(prompt, callbacks=[st_cb])\n\t\t\tst.write(response["output"])\n\t\t\tst.session_state.steps[str(len(msgs.messages) - 1)] = response[\n\t\t\t\t"intermediate_steps"\n\t\t\t]\n"""'], {}), '(\n """\n# agents ,vectorstores, wiki\n# https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval\n# note tool\n@tool("Document search")\ndef document_search(query: str) -> str:\n\t# this is the prompt to the tool itself\n\t"Use this function first to search for documents pertaining to the query before going into the internet"\n\tdocs = st.session_state.vectorstore.similarity_search(query)\n\tdocs = docs[0].page_content\n\tjson_string = json.dumps(docs, ensure_ascii=False, indent=4)\n\treturn json_string\n\n# combine vector store and internet search\ndef ex17_agent_bot():\n\tst.title("🦜 LangChain: Chat with internet search")\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\tmsgs = StreamlitChatMessageHistory()\n\tmemory = ConversationBufferMemory(\n\t\tchat_memory=msgs,\n\t\treturn_messages=True,\n\t\tmemory_key="chat_history",\n\t\toutput_key="output",\n\t)\n\tif len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):\n\t\tmsgs.clear()\n\t\tmsgs.add_ai_message("How can I help you?")\n\t\tst.session_state.steps = {}\n\n\tavatars = {"human": "user", "ai": "assistant"}\n\tfor idx, msg in enumerate(msgs.messages):\n\t\twith st.chat_message(avatars[msg.type]):\n\t\t\t# Render intermediate steps if any were saved\n\t\t\tfor step in st.session_state.steps.get(str(idx), []):\n\t\t\t\tif step[0].tool == "_Exception":\n\t\t\t\t\tcontinue\n\t\t\t\twith st.status(\n\t\t\t\t\tf"**{step[0].tool}**: {step[0].tool_input}", state="complete"\n\t\t\t\t):\n\t\t\t\t\tst.write(step[0].log)\n\t\t\t\t\tst.write(step[1])\n\t\t\tst.write(msg.content)\n\n\tif prompt := st.chat_input(placeholder="Enter a query on the Internet"):\n\t\tst.chat_message("user").write(prompt)\n\n\t\tllm = ChatOpenAI(\n\t\t\tmodel_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True\n\t\t)\n\t\ttools = [document_search, DuckDuckGoSearchRun(name="Internet Search")]\n\t\tchat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)\n\t\texecutor = AgentExecutor.from_agent_and_tools(\n\t\t\tagent=chat_agent,\n\t\t\ttools=tools,\n\t\t\tmemory=memory,\n\t\t\treturn_intermediate_steps=True,\n\t\t\thandle_parsing_errors=True,\n\t\t)\n\t\twith st.chat_message("assistant"):\n\t\t\tst_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)\n\t\t\tresponse = executor(prompt, callbacks=[st_cb])\n\t\t\tst.write(response["output"])\n\t\t\tst.session_state.steps[str(len(msgs.messages) - 1)] = response[\n\t\t\t\t"intermediate_steps"\n\t\t\t]\n"""\n )\n', (89526, 91871), True, 'import streamlit as st\n'), ((91863, 91899), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (91874, 91899), True, 'import streamlit as st\n'), ((91997, 92038), 'streamlit.title', 'st.title', (['"""pandas-ai streamlit interface"""'], {}), "('pandas-ai streamlit interface')\n", (92005, 92038), True, 'import streamlit as st\n'), ((92099, 92148), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a CSV file"""'], {'type': '"""csv"""'}), "('Choose a CSV file', type='csv')\n", (92115, 92148), True, 'import streamlit as st\n'), ((93144, 93174), 'os.path.join', 'os.path.join', (['"""exports/charts"""'], {}), "('exports/charts')\n", (93156, 93174), False, 'import os\n'), ((94162, 94193), 'streamlit.subheader', 'st.subheader', (['"""Prompt history:"""'], {}), "('Prompt history:')\n", (94174, 94193), True, 'import streamlit as st\n'), ((94195, 94236), 'streamlit.write', 'st.write', (['st.session_state.prompt_history'], {}), '(st.session_state.prompt_history)\n', (94203, 94236), True, 'import streamlit as st\n'), ((94242, 94260), 'streamlit.button', 'st.button', (['"""Clear"""'], {}), "('Clear')\n", (94251, 94260), True, 'import streamlit as st\n'), ((94351, 94394), 'streamlit.subheader', 'st.subheader', (['"""Exercise 18: Data Analytics"""'], {}), "('Exercise 18: Data Analytics')\n", (94363, 94394), True, 'import streamlit as st\n'), ((94396, 94496), 'streamlit.write', 'st.write', (['"""In this exercise, we will use the Pandas AI library to perform data analytics."""'], {}), "(\n 'In this exercise, we will use the Pandas AI library to perform data analytics.'\n )\n", (94404, 94496), True, 'import streamlit as st\n'), ((94488, 94596), 'streamlit.write', 'st.write', (['"""The Pandas AI library is a smart agent that can perform data analytics on a dataframe."""'], {}), "(\n 'The Pandas AI library is a smart agent that can perform data analytics on a dataframe.'\n )\n", (94496, 94596), True, 'import streamlit as st\n'), ((94588, 94646), 'streamlit.write', 'st.write', (['"""You may need to install the following library:"""'], {}), "('You may need to install the following library:')\n", (94596, 94646), True, 'import streamlit as st\n'), ((94648, 94674), 'streamlit.code', 'st.code', (['"""pip install bs4"""'], {}), "('pip install bs4')\n", (94655, 94674), True, 'import streamlit as st\n'), ((94680, 94749), 'streamlit.write', 'st.write', (['"""Copy and run the code below to see the chatbot in action."""'], {}), "('Copy and run the code below to see the chatbot in action.')\n", (94688, 94749), True, 'import streamlit as st\n'), ((94752, 94782), 'streamlit.markdown', 'st.markdown', (['"""**:blue[Code]**"""'], {}), "('**:blue[Code]**')\n", (94763, 94782), True, 'import streamlit as st\n'), ((94784, 97221), 'streamlit.code', 'st.code', (['"""\n# PandasAI- A smart agent that can do visual analytics\ndef ex18_pandas_AI():\n\tst.title("pandas-ai streamlit interface")\n\n\t# Upload CSV file using st.file_uploader\n\tuploaded_file = st.file_uploader("Choose a CSV file", type="csv")\n\tif "openai_key" not in st.session_state:\n\t\tst.session_state.openai_key = st.secrets["openapi_key"]\n\t\tst.session_state.prompt_history = []\n\t\tst.session_state.df = None\n\n\tif uploaded_file is not None:\n\t\ttry:\n\t\t\tdf = pd.read_csv(uploaded_file)\n\t\t\tst.session_state.df = df\n\t\texcept Exception as e:\n\t\t\tst.write("There was an error processing the CSV file.")\n\t\t\tst.write(e)\n\n\telse:\n\t\tst.session_state.df = pd.DataFrame(\n\t\t\t{\n\t\t\t\t"country": [\n\t\t\t\t\t"United States",\n\t\t\t\t\t"United Kingdom",\n\t\t\t\t\t"France",\n\t\t\t\t\t"Germany",\n\t\t\t\t\t"Italy",\n\t\t\t\t\t"Spain",\n\t\t\t\t\t"Canada",\n\t\t\t\t\t"Australia",\n\t\t\t\t\t"Japan",\n\t\t\t\t\t"China",\n\t\t\t\t],\n\t\t\t\t"gdp": [\n\t\t\t\t\t19294482071552,\n\t\t\t\t\t2891615567872,\n\t\t\t\t\t2411255037952,\n\t\t\t\t\t3435817336832,\n\t\t\t\t\t1745433788416,\n\t\t\t\t\t1181205135360,\n\t\t\t\t\t1607402389504,\n\t\t\t\t\t1490967855104,\n\t\t\t\t\t4380756541440,\n\t\t\t\t\t14631844184064,\n\t\t\t\t],\n\t\t\t\t"happiness_index": [\n\t\t\t\t\t6.94,\n\t\t\t\t\t7.16,\n\t\t\t\t\t6.66,\n\t\t\t\t\t7.07,\n\t\t\t\t\t6.38,\n\t\t\t\t\t6.4,\n\t\t\t\t\t7.23,\n\t\t\t\t\t7.22,\n\t\t\t\t\t5.87,\n\t\t\t\t\t5.12,\n\t\t\t\t],\n\t\t\t}\n\t\t)\n\tchart_path = os.path.join("exports/charts")\n\twith st.form("Question"):\n\t\tquestion = st.text_input("Question", value="", type="default")\n\t\tsubmitted = st.form_submit_button("Submit")\n\t\tif submitted:\n\t\t\twith st.spinner():\n\t\t\t\tllm = OpenAI(api_token=st.session_state.openai_key)\n\t\t\t\tdf = SmartDataframe(\n\t\t\t\t\tst.session_state.df,\n\t\t\t\t\tconfig={\n\t\t\t\t\t\t"llm": llm,\n\t\t\t\t\t\t"save_charts_path": chart_path,\n\t\t\t\t\t\t"save_charts": True,\n\t\t\t\t\t\t"verbose": True,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\tresponse = df.chat(\n\t\t\t\t\tquestion\n\t\t\t\t) # Using \'chat\' method based on your context\n\n\t\t\t\t# Display the textual response (if any):\n\t\t\t\tif response:\n\t\t\t\t\tst.write(response)\n\t\t\t\tchart_path = os.path.join("exports/charts", "temp_chart.png")\n\t\t\t\tif os.path.exists(chart_path):\n\t\t\t\t\tst.image(\n\t\t\t\t\t\tchart_path, caption="Generated Chart", use_column_width=True\n\t\t\t\t\t)\n\t\t\t\t# Append the question to the history:\n\t\t\t\tst.session_state.prompt_history.append(question)\n\n\tif st.session_state.df is not None:\n\t\tst.subheader("Current dataframe:")\n\t\tst.write(st.session_state.df)\n\n\tst.subheader("Prompt history:")\n\tst.write(st.session_state.prompt_history)\n\n\tif st.button("Clear"):\n\t\tst.session_state.prompt_history = []\n\t\tst.session_state.df = None\n"""'], {}), '(\n """\n# PandasAI- A smart agent that can do visual analytics\ndef ex18_pandas_AI():\n\tst.title("pandas-ai streamlit interface")\n\n\t# Upload CSV file using st.file_uploader\n\tuploaded_file = st.file_uploader("Choose a CSV file", type="csv")\n\tif "openai_key" not in st.session_state:\n\t\tst.session_state.openai_key = st.secrets["openapi_key"]\n\t\tst.session_state.prompt_history = []\n\t\tst.session_state.df = None\n\n\tif uploaded_file is not None:\n\t\ttry:\n\t\t\tdf = pd.read_csv(uploaded_file)\n\t\t\tst.session_state.df = df\n\t\texcept Exception as e:\n\t\t\tst.write("There was an error processing the CSV file.")\n\t\t\tst.write(e)\n\n\telse:\n\t\tst.session_state.df = pd.DataFrame(\n\t\t\t{\n\t\t\t\t"country": [\n\t\t\t\t\t"United States",\n\t\t\t\t\t"United Kingdom",\n\t\t\t\t\t"France",\n\t\t\t\t\t"Germany",\n\t\t\t\t\t"Italy",\n\t\t\t\t\t"Spain",\n\t\t\t\t\t"Canada",\n\t\t\t\t\t"Australia",\n\t\t\t\t\t"Japan",\n\t\t\t\t\t"China",\n\t\t\t\t],\n\t\t\t\t"gdp": [\n\t\t\t\t\t19294482071552,\n\t\t\t\t\t2891615567872,\n\t\t\t\t\t2411255037952,\n\t\t\t\t\t3435817336832,\n\t\t\t\t\t1745433788416,\n\t\t\t\t\t1181205135360,\n\t\t\t\t\t1607402389504,\n\t\t\t\t\t1490967855104,\n\t\t\t\t\t4380756541440,\n\t\t\t\t\t14631844184064,\n\t\t\t\t],\n\t\t\t\t"happiness_index": [\n\t\t\t\t\t6.94,\n\t\t\t\t\t7.16,\n\t\t\t\t\t6.66,\n\t\t\t\t\t7.07,\n\t\t\t\t\t6.38,\n\t\t\t\t\t6.4,\n\t\t\t\t\t7.23,\n\t\t\t\t\t7.22,\n\t\t\t\t\t5.87,\n\t\t\t\t\t5.12,\n\t\t\t\t],\n\t\t\t}\n\t\t)\n\tchart_path = os.path.join("exports/charts")\n\twith st.form("Question"):\n\t\tquestion = st.text_input("Question", value="", type="default")\n\t\tsubmitted = st.form_submit_button("Submit")\n\t\tif submitted:\n\t\t\twith st.spinner():\n\t\t\t\tllm = OpenAI(api_token=st.session_state.openai_key)\n\t\t\t\tdf = SmartDataframe(\n\t\t\t\t\tst.session_state.df,\n\t\t\t\t\tconfig={\n\t\t\t\t\t\t"llm": llm,\n\t\t\t\t\t\t"save_charts_path": chart_path,\n\t\t\t\t\t\t"save_charts": True,\n\t\t\t\t\t\t"verbose": True,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\tresponse = df.chat(\n\t\t\t\t\tquestion\n\t\t\t\t) # Using \'chat\' method based on your context\n\n\t\t\t\t# Display the textual response (if any):\n\t\t\t\tif response:\n\t\t\t\t\tst.write(response)\n\t\t\t\tchart_path = os.path.join("exports/charts", "temp_chart.png")\n\t\t\t\tif os.path.exists(chart_path):\n\t\t\t\t\tst.image(\n\t\t\t\t\t\tchart_path, caption="Generated Chart", use_column_width=True\n\t\t\t\t\t)\n\t\t\t\t# Append the question to the history:\n\t\t\t\tst.session_state.prompt_history.append(question)\n\n\tif st.session_state.df is not None:\n\t\tst.subheader("Current dataframe:")\n\t\tst.write(st.session_state.df)\n\n\tst.subheader("Prompt history:")\n\tst.write(st.session_state.prompt_history)\n\n\tif st.button("Clear"):\n\t\tst.session_state.prompt_history = []\n\t\tst.session_state.df = None\n"""\n )\n', (94791, 97221), True, 'import streamlit as st\n'), ((97213, 97249), 'streamlit.markdown', 'st.markdown', (['"""**:red[Code Output]**"""'], {}), "('**:red[Code Output]**')\n", (97224, 97249), True, 'import streamlit as st\n'), ((1605, 1631), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (1616, 1631), True, 'import streamlit as st\n'), ((1635, 1672), 'streamlit.code', 'st.code', (['"""\n#challenge code here\n"""'], {}), '("""\n#challenge code here\n""")\n', (1642, 1672), True, 'import streamlit as st\n'), ((10955, 10980), 'streamlit.write', 'st.write', (["('Hello ' + name)"], {}), "('Hello ' + name)\n", (10963, 10980), True, 'import streamlit as st\n'), ((12507, 12592), 'streamlit.text', 'st.text', (['f"""Hello {name}, you are {gender} and this year you are {age} years old"""'], {}), "(f'Hello {name}, you are {gender} and this year you are {age} years old'\n )\n", (12514, 12592), True, 'import streamlit as st\n'), ((13221, 13247), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (13232, 13247), True, 'import streamlit as st\n'), ((13251, 13587), 'streamlit.code', 'st.code', (['"""\ndef ch1():\n name = st.text_input("Enter your name")\n gender = st.selectbox("State your gender", ["Male", "Female"])\n age = st.text_input("State your age", 18)\n\n if name and gender and age:\n st.text(f"Hello {name}, you are {gender} and this year you are {age} years old")\n"""'], {}), '(\n """\ndef ch1():\n name = st.text_input("Enter your name")\n gender = st.selectbox("State your gender", ["Male", "Female"])\n age = st.text_input("State your age", 18)\n\n if name and gender and age:\n st.text(f"Hello {name}, you are {gender} and this year you are {age} years old")\n"""\n )\n', (13258, 13587), True, 'import streamlit as st\n'), ((13739, 13774), 'streamlit.text_input', 'st.text_input', (['"""State your age"""', '(18)'], {}), "('State your age', 18)\n", (13752, 13774), True, 'import streamlit as st\n'), ((13935, 13967), 'streamlit.write', 'st.write', (['"""You are a male adult"""'], {}), "('You are a male adult')\n", (13943, 13967), True, 'import streamlit as st\n'), ((14240, 14272), 'streamlit.write', 'st.write', (['"""Here is your photo: """'], {}), "('Here is your photo: ')\n", (14248, 14272), True, 'import streamlit as st\n'), ((14281, 14296), 'streamlit.image', 'st.image', (['photo'], {}), '(photo)\n', (14289, 14296), True, 'import streamlit as st\n'), ((14315, 14341), 'streamlit.write', 'st.write', (['"""No photo taken"""'], {}), "('No photo taken')\n", (14323, 14341), True, 'import streamlit as st\n'), ((15430, 15445), 'streamlit.write', 'st.write', (['fruit'], {}), '(fruit)\n', (15438, 15445), True, 'import streamlit as st\n'), ((18061, 18093), 'streamlit.write', 'st.write', (['"""session_data: """', 'data'], {}), "('session_data: ', data)\n", (18069, 18093), True, 'import streamlit as st\n'), ((19387, 19428), 'streamlit.write', 'st.write', (['"""name: """', 'st.session_state.name'], {}), "('name: ', st.session_state.name)\n", (19395, 19428), True, 'import streamlit as st\n'), ((19484, 19523), 'streamlit.write', 'st.write', (['"""age: """', 'st.session_state.age'], {}), "('age: ', st.session_state.age)\n", (19492, 19523), True, 'import streamlit as st\n'), ((19583, 19628), 'streamlit.write', 'st.write', (['"""gender: """', 'st.session_state.gender'], {}), "('gender: ', st.session_state.gender)\n", (19591, 19628), True, 'import streamlit as st\n'), ((22136, 22162), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (22147, 22162), True, 'import streamlit as st\n'), ((22166, 23451), 'streamlit.code', 'st.code', (['"""\ndef ch4():\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = "Yoda"\n\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = 999\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = "male"\n\n\tif "prompt_template" not in st.session_state:\n\t\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\n\tst.write("session_state.name: ", st.session_state.name)\n\tst.write("session_state.age: ", st.session_state.age)\n\tst.write("session_state.gender: ", st.session_state.gender)\n\tst.write("session_state.prompt_template: ", st.session_state.prompt_template)\n\ndef main():\n\t# initialize session state, from ch4\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = "Yoda"\n\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = 999\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = "male"\n\n\tif "prompt_template" not in st.session_state:\n\t\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t\t \n\t#the rest of your code is below\n"""'], {}), '(\n """\ndef ch4():\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = "Yoda"\n\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = 999\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = "male"\n\n\tif "prompt_template" not in st.session_state:\n\t\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\n\tst.write("session_state.name: ", st.session_state.name)\n\tst.write("session_state.age: ", st.session_state.age)\n\tst.write("session_state.gender: ", st.session_state.gender)\n\tst.write("session_state.prompt_template: ", st.session_state.prompt_template)\n\ndef main():\n\t# initialize session state, from ch4\n\tif "name" not in st.session_state:\n\t\tst.session_state.name = "Yoda"\n\n\tif "age" not in st.session_state:\n\t\tst.session_state.age = 999\n\n\tif "gender" not in st.session_state:\n\t\tst.session_state.gender = "male"\n\n\tif "prompt_template" not in st.session_state:\n\t\tst.session_state.prompt_template = "Speak like Yoda from Star Wars for every question that was asked, do not give a direct answer but ask more questions in the style of wise Yoda from Star Wars"\n\t\t \n\t#the rest of your code is below\n"""\n )\n', (22173, 23451), True, 'import streamlit as st\n'), ((23685, 23742), 'streamlit.write', 'st.write', (['f"""User has sent the following prompt: {prompt}"""'], {}), "(f'User has sent the following prompt: {prompt}')\n", (23693, 23742), True, 'import streamlit as st\n'), ((23745, 23786), 'streamlit.session_state.store_msg.append', 'st.session_state.store_msg.append', (['prompt'], {}), '(prompt)\n', (23778, 23786), True, 'import streamlit as st\n'), ((25229, 25257), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (25242, 25257), True, 'import streamlit as st\n'), ((25392, 25461), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (25424, 25461), True, 'import streamlit as st\n'), ((25658, 25734), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (25690, 25734), True, 'import streamlit as st\n'), ((27382, 27415), 'streamlit.chat_input', 'st.chat_input', (['"""Enter your query"""'], {}), "('Enter your query')\n", (27395, 27415), True, 'import streamlit as st\n'), ((28745, 28771), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (28756, 28771), True, 'import streamlit as st\n'), ((28775, 29813), 'streamlit.code', 'st.code', (['"""\n#Challenge 6 : Rule-based If-Else Chatbot\ndef ch6():\n\tst.markdown("**Rule Based Bot**")\n\n\t# Initialize chat history\n\tif "messages" not in st.session_state:\n\t\tst.session_state.messages = []\n\n\t# # Display chat messages from history on app rerun\n\tfor message in st.session_state.messages:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("Enter your query"):\n\t\tif prompt == "Hello":\n\t\t\treply = "Hi there what can I do for you"\n\n\t\telif prompt == "What is your name?":\n\t\t\treply = "My name is EAI , an electronic artificial being"\n\n\t\telif prompt == "How old are you?":\n\t\t\treply = "Today is my birthday!"\n\t\t\n\t\telse:\n\t\t\treply = "I am sorry, I am unable to help you with your query"\n\n\t\twith st.chat_message("user"):\n\t\t\tst.write(prompt)\n\t\t\tst.session_state.messages.append({"role": "user", "content": prompt})\n\t\twith st.chat_message("assistant"):\n\t\t\tst.write(reply)\n\t\t\tst.session_state.messages.append({"role": "assistant", "content": reply})\n"""'], {}), '(\n """\n#Challenge 6 : Rule-based If-Else Chatbot\ndef ch6():\n\tst.markdown("**Rule Based Bot**")\n\n\t# Initialize chat history\n\tif "messages" not in st.session_state:\n\t\tst.session_state.messages = []\n\n\t# # Display chat messages from history on app rerun\n\tfor message in st.session_state.messages:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("Enter your query"):\n\t\tif prompt == "Hello":\n\t\t\treply = "Hi there what can I do for you"\n\n\t\telif prompt == "What is your name?":\n\t\t\treply = "My name is EAI , an electronic artificial being"\n\n\t\telif prompt == "How old are you?":\n\t\t\treply = "Today is my birthday!"\n\t\t\n\t\telse:\n\t\t\treply = "I am sorry, I am unable to help you with your query"\n\n\t\twith st.chat_message("user"):\n\t\t\tst.write(prompt)\n\t\t\tst.session_state.messages.append({"role": "user", "content": prompt})\n\t\twith st.chat_message("assistant"):\n\t\t\tst.write(reply)\n\t\t\tst.session_state.messages.append({"role": "assistant", "content": reply})\n"""\n )\n', (28782, 29813), True, 'import streamlit as st\n'), ((34282, 34308), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (34293, 34308), True, 'import streamlit as st\n'), ((34312, 35567), 'streamlit.code', 'st.code', (['"""\t\n#Challenge 8: Incorporating the API into your chatbot\ndef chat_completion(prompt):\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant."},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature=0,\n\t)\n\treturn response["choices"][0]["message"]["content"].strip()\n\t\ndef ch8():\n\tst.title("My first LLM Chatbot")\n\n\t# Initialize chat history\n\tif "chat_msg" not in st.session_state:\n\t\tst.session_state.chat_msg = []\n\n\t# Display chat chat_msg from history on app rerun\n\tfor message in st.session_state.chat_msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("What\'s up?"):\n\t\t# Display user message in chat message container\n\t\treply = chat_completion(prompt)\n\t\tst.chat_message("user").markdown(prompt)\n\t\t# Add user message to chat history\n\t\tst.session_state.chat_msg.append({"role": "user", "content": prompt})\n\t\t# Display assistant response in chat message container\n\t\twith st.chat_message("assistant"):\n\t\t\tst.markdown(reply)\n\t\t# Add assistant response to chat history\n\t\tst.session_state.chat_msg.append({"role": "assistant", "content": reply})\n"""'], {}), '(\n """\t\n#Challenge 8: Incorporating the API into your chatbot\ndef chat_completion(prompt):\n\tMODEL = "gpt-3.5-turbo"\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": "You are a helpful assistant."},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature=0,\n\t)\n\treturn response["choices"][0]["message"]["content"].strip()\n\t\ndef ch8():\n\tst.title("My first LLM Chatbot")\n\n\t# Initialize chat history\n\tif "chat_msg" not in st.session_state:\n\t\tst.session_state.chat_msg = []\n\n\t# Display chat chat_msg from history on app rerun\n\tfor message in st.session_state.chat_msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\n\t# React to user input\n\tif prompt := st.chat_input("What\'s up?"):\n\t\t# Display user message in chat message container\n\t\treply = chat_completion(prompt)\n\t\tst.chat_message("user").markdown(prompt)\n\t\t# Add user message to chat history\n\t\tst.session_state.chat_msg.append({"role": "user", "content": prompt})\n\t\t# Display assistant response in chat message container\n\t\twith st.chat_message("assistant"):\n\t\t\tst.markdown(reply)\n\t\t# Add assistant response to chat history\n\t\tst.session_state.chat_msg.append({"role": "assistant", "content": reply})\n"""\n )\n', (34319, 35567), True, 'import streamlit as st\n'), ((35934, 35962), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (35947, 35962), True, 'import streamlit as st\n'), ((36131, 36200), 'streamlit.session_state.chat_msg.append', 'st.session_state.chat_msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (36163, 36200), True, 'import streamlit as st\n'), ((36362, 36435), 'streamlit.session_state.chat_msg.append', 'st.session_state.chat_msg.append', (["{'role': 'assistant', 'content': reply}"], {}), "({'role': 'assistant', 'content': reply})\n", (36394, 36435), True, 'import streamlit as st\n'), ((44324, 44350), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (44335, 44350), True, 'import streamlit as st\n'), ((44354, 45895), 'streamlit.code', 'st.code', (['"""\n#Challenge 10\n#mod chat complete stream function by replacing system content to session_state prompt template\ndef chat_completion_stream_prompt(prompt):\n\tMODEL = "gpt-3.5-turbo" #consider changing this to session_state\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": st.session_state.prompt_template},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature= 0, # temperature\n\t\tstream=True #stream option\n\t)\n\treturn response\n\n# Challenge 10: Make the bot speak like someone you know\ndef ch10_basebot():\n\t# call the function in your base bot\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t#set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t#streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""'], {}), '(\n """\n#Challenge 10\n#mod chat complete stream function by replacing system content to session_state prompt template\ndef chat_completion_stream_prompt(prompt):\n\tMODEL = "gpt-3.5-turbo" #consider changing this to session_state\n\tresponse = openai.ChatCompletion.create(\n\t\tmodel=MODEL,\n\t\tmessages=[\n\t\t\t{"role": "system", "content": st.session_state.prompt_template},\n\t\t\t{"role": "user", "content": prompt},\n\t\t],\n\t\ttemperature= 0, # temperature\n\t\tstream=True #stream option\n\t)\n\treturn response\n\n# Challenge 10: Make the bot speak like someone you know\ndef ch10_basebot():\n\t# call the function in your base bot\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t#set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t#streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""\n )\n', (44361, 45895), True, 'import streamlit as st\n'), ((46626, 46666), 'streamlit.write', 'st.write', (['"""Input prompt: """', 'input_prompt'], {}), "('Input prompt: ', input_prompt)\n", (46634, 46666), True, 'import streamlit as st\n'), ((48045, 48071), 'streamlit.form', 'st.form', (['"""Prompt Template"""'], {}), "('Prompt Template')\n", (48052, 48071), True, 'import streamlit as st\n'), ((48088, 48126), 'streamlit.text_input', 'st.text_input', (['"""Enter the occupation:"""'], {}), "('Enter the occupation:')\n", (48101, 48126), True, 'import streamlit as st\n'), ((48137, 48170), 'streamlit.text_input', 'st.text_input', (['"""Enter the topic:"""'], {}), "('Enter the topic:')\n", (48150, 48170), True, 'import streamlit as st\n'), ((48179, 48210), 'streamlit.text_input', 'st.text_input', (['"""Enter the age:"""'], {}), "('Enter the age:')\n", (48192, 48210), True, 'import streamlit as st\n'), ((48267, 48298), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (48288, 48298), True, 'import streamlit as st\n'), ((51721, 51782), 'streamlit.write', 'st.write', (['"""New session_state.prompt_template: """', 'input_prompt'], {}), "('New session_state.prompt_template: ', input_prompt)\n", (51729, 51782), True, 'import streamlit as st\n'), ((52639, 52665), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (52650, 52665), True, 'import streamlit as st\n'), ((52669, 53613), 'streamlit.code', 'st.code', (['"""\ndef ch11():\n\t# instead of running of the langchain, we are going to use the prompt template and run it the chatbot using format\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\t# set session_state.prompt_template\n\t\tst.session_state.prompt_template = input_prompt\n\t\tst.write("New session_state.prompt_template: ", input_prompt)\n\t# call the ch10() basebot with the new session_state.prompt_template\n\tch10()\n"""'], {}), '(\n """\ndef ch11():\n\t# instead of running of the langchain, we are going to use the prompt template and run it the chatbot using format\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\t# set session_state.prompt_template\n\t\tst.session_state.prompt_template = input_prompt\n\t\tst.write("New session_state.prompt_template: ", input_prompt)\n\t# call the ch10() basebot with the new session_state.prompt_template\n\tch10()\n"""\n )\n', (52676, 53613), True, 'import streamlit as st\n'), ((55833, 55868), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)'}), '(k=3)\n', (55863, 55868), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((58563, 58589), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (58574, 58589), True, 'import streamlit as st\n'), ((58593, 60969), 'streamlit.code', 'st.code', (['"""\ndef ch12():\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\telse:\n\t\tinput_prompt = "You are a helpful assistant. "\n\n\tst.write("input prompt: ", input_prompt)\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=3)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write("Memory Data: ", memory_data)\n\tst.session_state.prompt_template = f""\\"\n{input_prompt}\t\t\t\t\t\t\t\t\t\t\n\nBelow is the conversation history between the AI and Users so far\n\t\t\t\t\t\t\t\t\t\t\n{memory_data}\n\n""\\"\n\n\tst.write("New prompt template: ", st.session_state.prompt_template)\n\t# call the function in your base bot\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\texcept Exception as e:\n\t\tst.error(e)\n"""'], {}), '(\n """\ndef ch12():\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\telse:\n\t\tinput_prompt = "You are a helpful assistant. "\n\n\tst.write("input prompt: ", input_prompt)\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=3)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write("Memory Data: ", memory_data)\n\tst.session_state.prompt_template = f""\\"\n{input_prompt}\t\t\t\t\t\t\t\t\t\t\n\nBelow is the conversation history between the AI and Users so far\n\t\t\t\t\t\t\t\t\t\t\n{memory_data}\n\n""\\"\n\n\tst.write("New prompt template: ", st.session_state.prompt_template)\n\t# call the function in your base bot\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\texcept Exception as e:\n\t\tst.error(e)\n"""\n )\n', (58600, 60969), True, 'import streamlit as st\n'), ((62423, 62450), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (62434, 62450), False, 'from langchain.document_loaders import TextLoader, PyPDFLoader\n'), ((62496, 62559), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (62518, 62559), False, 'from langchain.vectorstores import LanceDB\n'), ((62794, 62824), 'streamlit.text_input', 'st.text_input', (['"""Enter a query"""'], {}), "('Enter a query')\n", (62807, 62824), True, 'import streamlit as st\n'), ((66392, 66427), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(5)'}), '(k=5)\n', (66422, 66427), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((76607, 76642), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(5)'}), '(k=5)\n', (76637, 76642), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((79130, 79156), 'streamlit.expander', 'st.expander', (['"""Reveal Code"""'], {}), "('Reveal Code')\n", (79141, 79156), True, 'import streamlit as st\n'), ((79160, 82280), 'streamlit.code', 'st.code', (['"""\ndef ch15_chatbot():\n\t#display ex15 table\n\tex15()\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\tst.session_state.input_prompt = input_prompt\n\n\tif "input_prompt" not in st.session_state:\n\t\tst.session_state.input_prompt = "Speak like Yoda from Star Wars"\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"{st.session_state.input_prompt}\n\t\t\t\t\t\t\t\t\t\tThis is the last conversation history\n\t\t\t\t\t\t\t\t\t\t{memory_data}\n\t\t\t\t\t\t\t\t\t\t""\\"\n\tst.write("new prompt template: ", st.session_state.prompt_template)\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# query information\n\t\t\tif st.session_state.vectorstore:\n\t\t\t\tdocs = st.session_state.vectorstore.similarity_search(prompt)\n\t\t\t\tdocs = docs[0].page_content\n\t\t\t\t# add your query prompt\n\t\t\t\tvs_prompt = f""\\"You should reference this search result to help your answer,\n\t\t\t\t\t\t\t\t{docs}\n\t\t\t\t\t\t\t\tif the search result does not anwer the query, please say you are unable to answer, do not make up an answer""\\"\n\t\t\telse:\n\t\t\t\tvs_prompt = ""\n\t\t\t# add query prompt to your memory prompt and send it to LLM\n\t\t\tst.session_state.prompt_template = (\n\t\t\t\tst.session_state.prompt_template + vs_prompt\n\t\t\t)\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\n\t\t\tex15_collect(st.session_state.name, full_response, prompt)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""'], {}), '(\n """\ndef ch15_chatbot():\n\t#display ex15 table\n\tex15()\n\t# Prompt_template form from ex11\n\tprompt_template = PromptTemplate(\n\t\tinput_variables=["occupation", "topic", "age"],\n\t\ttemplate=""\\"Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""\\",\n\t)\n\tdict_inputs = prompt_inputs_form()\n\tif dict_inputs:\n\t\tinput_prompt = prompt_template.format(\n\t\t\toccupation=dict_inputs["occupation"],\n\t\t\ttopic=dict_inputs["topic"],\n\t\t\tage=dict_inputs["age"],\n\t\t)\n\t\tst.session_state.input_prompt = input_prompt\n\n\tif "input_prompt" not in st.session_state:\n\t\tst.session_state.input_prompt = "Speak like Yoda from Star Wars"\n\n\tif "memory" not in st.session_state:\n\t\tst.session_state.memory = ConversationBufferWindowMemory(k=5)\n\n\t# step 1 save the memory from your chatbot\n\t# step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint\n\tmemory_data = st.session_state.memory.load_memory_variables({})\n\tst.write(memory_data)\n\tst.session_state.prompt_template = f""\\"{st.session_state.input_prompt}\n\t\t\t\t\t\t\t\t\t\tThis is the last conversation history\n\t\t\t\t\t\t\t\t\t\t{memory_data}\n\t\t\t\t\t\t\t\t\t\t""\\"\n\tst.write("new prompt template: ", st.session_state.prompt_template)\n\n\tst.session_state.vectorstore = vectorstore_creator()\n\n\t# Initialize chat history\n\tif "msg" not in st.session_state:\n\t\tst.session_state.msg = []\n\n\t# Showing Chat history\n\tfor message in st.session_state.msg:\n\t\twith st.chat_message(message["role"]):\n\t\t\tst.markdown(message["content"])\n\ttry:\n\t\t#\n\t\tif prompt := st.chat_input("What is up?"):\n\t\t\t# query information\n\t\t\tif st.session_state.vectorstore:\n\t\t\t\tdocs = st.session_state.vectorstore.similarity_search(prompt)\n\t\t\t\tdocs = docs[0].page_content\n\t\t\t\t# add your query prompt\n\t\t\t\tvs_prompt = f""\\"You should reference this search result to help your answer,\n\t\t\t\t\t\t\t\t{docs}\n\t\t\t\t\t\t\t\tif the search result does not anwer the query, please say you are unable to answer, do not make up an answer""\\"\n\t\t\telse:\n\t\t\t\tvs_prompt = ""\n\t\t\t# add query prompt to your memory prompt and send it to LLM\n\t\t\tst.session_state.prompt_template = (\n\t\t\t\tst.session_state.prompt_template + vs_prompt\n\t\t\t)\n\t\t\t# set user prompt in chat history\n\t\t\tst.session_state.msg.append({"role": "user", "content": prompt})\n\t\t\twith st.chat_message("user"):\n\t\t\t\tst.markdown(prompt)\n\n\t\t\twith st.chat_message("assistant"):\n\t\t\t\tmessage_placeholder = st.empty()\n\t\t\t\tfull_response = ""\n\t\t\t\t# streaming function\n\t\t\t\tfor response in chat_completion_stream_prompt(prompt):\n\t\t\t\t\tfull_response += response.choices[0].delta.get("content", "")\n\t\t\t\t\tmessage_placeholder.markdown(full_response + "▌")\n\t\t\t\tmessage_placeholder.markdown(full_response)\n\t\t\tst.session_state.msg.append({"role": "assistant", "content": full_response})\n\t\t\tst.session_state.memory.save_context(\n\t\t\t\t{"input": prompt}, {"output": full_response}\n\t\t\t)\n\n\t\t\tex15_collect(st.session_state.name, full_response, prompt)\n\n\texcept Exception as e:\n\t\tst.error(e)\n"""\n )\n', (79167, 82280), True, 'import streamlit as st\n'), ((82756, 82795), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (82773, 82795), True, 'import streamlit as st\n'), ((83363, 83421), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (83376, 83421), True, 'import streamlit as st\n'), ((83472, 83561), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (83482, 83561), False, 'from langchain.chat_models import ChatOpenAI\n'), ((83627, 83691), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (83669, 83691), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((83705, 83850), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (83739, 83850), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((87508, 87547), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (87525, 87547), True, 'import streamlit as st\n'), ((88115, 88173), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (88128, 88173), True, 'import streamlit as st\n'), ((88224, 88313), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (88234, 88313), False, 'from langchain.chat_models import ChatOpenAI\n'), ((88405, 88469), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (88447, 88469), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((88483, 88628), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (88517, 88628), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((92550, 92956), 'pandas.DataFrame', 'pd.DataFrame', (["{'country': ['United States', 'United Kingdom', 'France', 'Germany',\n 'Italy', 'Spain', 'Canada', 'Australia', 'Japan', 'China'], 'gdp': [\n 19294482071552, 2891615567872, 2411255037952, 3435817336832, \n 1745433788416, 1181205135360, 1607402389504, 1490967855104, \n 4380756541440, 14631844184064], 'happiness_index': [6.94, 7.16, 6.66, \n 7.07, 6.38, 6.4, 7.23, 7.22, 5.87, 5.12]}"], {}), "({'country': ['United States', 'United Kingdom', 'France',\n 'Germany', 'Italy', 'Spain', 'Canada', 'Australia', 'Japan', 'China'],\n 'gdp': [19294482071552, 2891615567872, 2411255037952, 3435817336832, \n 1745433788416, 1181205135360, 1607402389504, 1490967855104, \n 4380756541440, 14631844184064], 'happiness_index': [6.94, 7.16, 6.66, \n 7.07, 6.38, 6.4, 7.23, 7.22, 5.87, 5.12]})\n", (92562, 92956), True, 'import pandas as pd\n'), ((93181, 93200), 'streamlit.form', 'st.form', (['"""Question"""'], {}), "('Question')\n", (93188, 93200), True, 'import streamlit as st\n'), ((93215, 93266), 'streamlit.text_input', 'st.text_input', (['"""Question"""'], {'value': '""""""', 'type': '"""default"""'}), "('Question', value='', type='default')\n", (93228, 93266), True, 'import streamlit as st\n'), ((93281, 93312), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (93302, 93312), True, 'import streamlit as st\n'), ((94093, 94127), 'streamlit.subheader', 'st.subheader', (['"""Current dataframe:"""'], {}), "('Current dataframe:')\n", (94105, 94127), True, 'import streamlit as st\n'), ((94130, 94159), 'streamlit.write', 'st.write', (['st.session_state.df'], {}), '(st.session_state.df)\n', (94138, 94159), True, 'import streamlit as st\n'), ((8292, 8324), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (8307, 8324), True, 'import streamlit as st\n'), ((8329, 8360), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (8340, 8360), True, 'import streamlit as st\n'), ((8385, 8413), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (8398, 8413), True, 'import streamlit as st\n'), ((8418, 8486), 'streamlit.session_state.msg_bot.append', 'st.session_state.msg_bot.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (8449, 8486), True, 'import streamlit as st\n'), ((9063, 9148), 'streamlit.session_state.msg_bot.append', 'st.session_state.msg_bot.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response}\n )\n", (9094, 9148), True, 'import streamlit as st\n'), ((9171, 9182), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (9179, 9182), True, 'import streamlit as st\n'), ((14016, 14047), 'streamlit.write', 'st.write', (['"""You are a young boy"""'], {}), "('You are a young boy')\n", (14024, 14047), True, 'import streamlit as st\n'), ((25122, 25154), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (25137, 25154), True, 'import streamlit as st\n'), ((25159, 25190), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (25170, 25190), True, 'import streamlit as st\n'), ((25558, 25586), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (25573, 25586), True, 'import streamlit as st\n'), ((25591, 25612), 'streamlit.markdown', 'st.markdown', (['response'], {}), '(response)\n', (25602, 25612), True, 'import streamlit as st\n'), ((27275, 27307), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (27290, 27307), True, 'import streamlit as st\n'), ((27312, 27343), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (27323, 27343), True, 'import streamlit as st\n'), ((27743, 27766), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (27758, 27766), True, 'import streamlit as st\n'), ((27771, 27787), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (27779, 27787), True, 'import streamlit as st\n'), ((27791, 27860), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (27823, 27860), True, 'import streamlit as st\n'), ((27868, 27896), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (27883, 27896), True, 'import streamlit as st\n'), ((27901, 27916), 'streamlit.write', 'st.write', (['reply'], {}), '(reply)\n', (27909, 27916), True, 'import streamlit as st\n'), ((27920, 27993), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': reply}"], {}), "({'role': 'assistant', 'content': reply})\n", (27952, 27993), True, 'import streamlit as st\n'), ((35827, 35859), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (35842, 35859), True, 'import streamlit as st\n'), ((35864, 35895), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (35875, 35895), True, 'import streamlit as st\n'), ((36265, 36293), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (36280, 36293), True, 'import streamlit as st\n'), ((36298, 36316), 'streamlit.markdown', 'st.markdown', (['reply'], {}), '(reply)\n', (36309, 36316), True, 'import streamlit as st\n'), ((36957, 36989), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (36972, 36989), True, 'import streamlit as st\n'), ((36994, 37025), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (37005, 37025), True, 'import streamlit as st\n'), ((37051, 37079), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (37064, 37079), True, 'import streamlit as st\n'), ((37121, 37190), 'streamlit.session_state.chat_msg.append', 'st.session_state.chat_msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (37153, 37190), True, 'import streamlit as st\n'), ((37597, 37682), 'streamlit.session_state.chat_msg.append', 'st.session_state.chat_msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (37629, 37682), True, 'import streamlit as st\n'), ((37714, 37725), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (37722, 37725), True, 'import streamlit as st\n'), ((42505, 42537), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (42520, 42537), True, 'import streamlit as st\n'), ((42542, 42573), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (42553, 42573), True, 'import streamlit as st\n'), ((42599, 42627), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (42612, 42627), True, 'import streamlit as st\n'), ((42668, 42732), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (42695, 42732), True, 'import streamlit as st\n'), ((43145, 43221), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (43172, 43221), True, 'import streamlit as st\n'), ((43249, 43260), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (43257, 43260), True, 'import streamlit as st\n'), ((56549, 56581), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (56564, 56581), True, 'import streamlit as st\n'), ((56586, 56617), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (56597, 56617), True, 'import streamlit as st\n'), ((56643, 56671), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (56656, 56671), True, 'import streamlit as st\n'), ((56713, 56777), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (56740, 56777), True, 'import streamlit as st\n'), ((57191, 57267), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (57218, 57267), True, 'import streamlit as st\n'), ((57271, 57357), 'streamlit.session_state.memory.save_context', 'st.session_state.memory.save_context', (["{'input': prompt}", "{'output': full_response}"], {}), "({'input': prompt}, {'output':\n full_response})\n", (57307, 57357), True, 'import streamlit as st\n'), ((57389, 57400), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (57397, 57400), True, 'import streamlit as st\n'), ((61119, 61146), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (61135, 61146), False, 'import os\n'), ((61571, 61632), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (61598, 61632), False, 'import tempfile\n'), ((62915, 62945), 'streamlit.write', 'st.write', (['docs[0].page_content'], {}), '(docs[0].page_content)\n', (62923, 62945), True, 'import streamlit as st\n'), ((67109, 67141), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (67124, 67141), True, 'import streamlit as st\n'), ((67146, 67177), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (67157, 67177), True, 'import streamlit as st\n'), ((67203, 67231), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (67216, 67231), True, 'import streamlit as st\n'), ((67859, 67923), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (67886, 67923), True, 'import streamlit as st\n'), ((68337, 68413), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (68364, 68413), True, 'import streamlit as st\n'), ((68417, 68503), 'streamlit.session_state.memory.save_context', 'st.session_state.memory.save_context', (["{'input': prompt}", "{'output': full_response}"], {}), "({'input': prompt}, {'output':\n full_response})\n", (68453, 68503), True, 'import streamlit as st\n'), ((68536, 68547), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (68544, 68547), True, 'import streamlit as st\n'), ((77320, 77352), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (77335, 77352), True, 'import streamlit as st\n'), ((77357, 77388), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (77368, 77388), True, 'import streamlit as st\n'), ((77414, 77442), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (77427, 77442), True, 'import streamlit as st\n'), ((78070, 78134), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (78097, 78134), True, 'import streamlit as st\n'), ((78548, 78624), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (78575, 78624), True, 'import streamlit as st\n'), ((78628, 78714), 'streamlit.session_state.memory.save_context', 'st.session_state.memory.save_context', (["{'input': prompt}", "{'output': full_response}"], {}), "({'input': prompt}, {'output':\n full_response})\n", (78664, 78714), True, 'import streamlit as st\n'), ((78810, 78821), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (78818, 78821), True, 'import streamlit as st\n'), ((82986, 83020), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (83001, 83020), True, 'import streamlit as st\n'), ((83326, 83347), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (83334, 83347), True, 'import streamlit as st\n'), ((83576, 83610), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (83595, 83610), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((83873, 83901), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (83888, 83901), True, 'import streamlit as st\n'), ((84035, 84063), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (84043, 84063), True, 'import streamlit as st\n'), ((87738, 87772), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (87753, 87772), True, 'import streamlit as st\n'), ((88078, 88099), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (88086, 88099), True, 'import streamlit as st\n'), ((88345, 88388), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Internet Search"""'}), "(name='Internet Search')\n", (88364, 88388), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((88651, 88679), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (88666, 88679), True, 'import streamlit as st\n'), ((88813, 88841), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (88821, 88841), True, 'import streamlit as st\n'), ((92364, 92390), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (92375, 92390), True, 'import pandas as pd\n'), ((8495, 8518), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (8510, 8518), True, 'import streamlit as st\n'), ((8524, 8543), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (8535, 8543), True, 'import streamlit as st\n'), ((8553, 8581), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8568, 8581), True, 'import streamlit as st\n'), ((8609, 8619), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (8617, 8619), True, 'import streamlit as st\n'), ((8663, 8848), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': "st.session_state['openai_model']", 'messages': "[{'role': 'system', 'content': prompt_template}, {'role': 'user', 'content':\n prompt}]", 'stream': '(True)'}), "(model=st.session_state['openai_model'],\n messages=[{'role': 'system', 'content': prompt_template}, {'role':\n 'user', 'content': prompt}], stream=True)\n", (8691, 8848), False, 'import openai\n'), ((14099, 14133), 'streamlit.write', 'st.write', (['"""You are a female adult"""'], {}), "('You are a female adult')\n", (14107, 14133), True, 'import streamlit as st\n'), ((23840, 23863), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (23855, 23863), True, 'import streamlit as st\n'), ((23869, 23886), 'streamlit.write', 'st.write', (['message'], {}), '(message)\n', (23877, 23886), True, 'import streamlit as st\n'), ((23895, 23923), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (23910, 23923), True, 'import streamlit as st\n'), ((23929, 23976), 'streamlit.write', 'st.write', (['"""Hello human, what can I do for you?"""'], {}), "('Hello human, what can I do for you?')\n", (23937, 23976), True, 'import streamlit as st\n'), ((25312, 25335), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (25327, 25335), True, 'import streamlit as st\n'), ((36051, 36074), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (36066, 36074), True, 'import streamlit as st\n'), ((37199, 37222), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (37214, 37222), True, 'import streamlit as st\n'), ((37228, 37247), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (37239, 37247), True, 'import streamlit as st\n'), ((37257, 37285), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (37272, 37285), True, 'import streamlit as st\n'), ((37313, 37323), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (37321, 37323), True, 'import streamlit as st\n'), ((42741, 42764), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (42756, 42764), True, 'import streamlit as st\n'), ((42770, 42789), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (42781, 42789), True, 'import streamlit as st\n'), ((42799, 42827), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (42814, 42827), True, 'import streamlit as st\n'), ((42855, 42865), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (42863, 42865), True, 'import streamlit as st\n'), ((56786, 56809), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (56801, 56809), True, 'import streamlit as st\n'), ((56815, 56834), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (56826, 56834), True, 'import streamlit as st\n'), ((56844, 56872), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (56859, 56872), True, 'import streamlit as st\n'), ((56900, 56910), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (56908, 56910), True, 'import streamlit as st\n'), ((67303, 67357), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['prompt'], {}), '(prompt)\n', (67349, 67357), True, 'import streamlit as st\n'), ((67932, 67955), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (67947, 67955), True, 'import streamlit as st\n'), ((67961, 67980), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (67972, 67980), True, 'import streamlit as st\n'), ((67990, 68018), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (68005, 68018), True, 'import streamlit as st\n'), ((68046, 68056), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (68054, 68056), True, 'import streamlit as st\n'), ((77514, 77568), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['prompt'], {}), '(prompt)\n', (77560, 77568), True, 'import streamlit as st\n'), ((78143, 78166), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (78158, 78166), True, 'import streamlit as st\n'), ((78172, 78191), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (78183, 78191), True, 'import streamlit as st\n'), ((78201, 78229), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (78216, 78229), True, 'import streamlit as st\n'), ((78257, 78267), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (78265, 78267), True, 'import streamlit as st\n'), ((83425, 83448), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (83440, 83448), True, 'import streamlit as st\n'), ((83939, 83953), 'streamlit.container', 'st.container', ([], {}), '()\n', (83951, 83953), True, 'import streamlit as st\n'), ((88177, 88200), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (88192, 88200), True, 'import streamlit as st\n'), ((88717, 88731), 'streamlit.container', 'st.container', ([], {}), '()\n', (88729, 88731), True, 'import streamlit as st\n'), ((92447, 92502), 'streamlit.write', 'st.write', (['"""There was an error processing the CSV file."""'], {}), "('There was an error processing the CSV file.')\n", (92455, 92502), True, 'import streamlit as st\n'), ((92506, 92517), 'streamlit.write', 'st.write', (['e'], {}), '(e)\n', (92514, 92517), True, 'import streamlit as st\n'), ((93337, 93349), 'streamlit.spinner', 'st.spinner', ([], {}), '()\n', (93347, 93349), True, 'import streamlit as st\n'), ((93361, 93406), 'pandasai.llm.openai.OpenAI', 'OpenAI', ([], {'api_token': 'st.session_state.openai_key'}), '(api_token=st.session_state.openai_key)\n', (93367, 93406), False, 'from pandasai.llm.openai import OpenAI\n'), ((93416, 93546), 'pandasai.SmartDataframe', 'SmartDataframe', (['st.session_state.df'], {'config': "{'llm': llm, 'save_charts_path': chart_path, 'save_charts': True, 'verbose':\n True}"}), "(st.session_state.df, config={'llm': llm, 'save_charts_path':\n chart_path, 'save_charts': True, 'verbose': True})\n", (93430, 93546), False, 'from pandasai import SmartDataframe\n'), ((93785, 93833), 'os.path.join', 'os.path.join', (['"""exports/charts"""', '"""temp_chart.png"""'], {}), "('exports/charts', 'temp_chart.png')\n", (93797, 93833), False, 'import os\n'), ((93841, 93867), 'os.path.exists', 'os.path.exists', (['chart_path'], {}), '(chart_path)\n', (93855, 93867), False, 'import os\n'), ((94004, 94052), 'streamlit.session_state.prompt_history.append', 'st.session_state.prompt_history.append', (['question'], {}), '(question)\n', (94042, 94052), True, 'import streamlit as st\n'), ((14184, 14216), 'streamlit.write', 'st.write', (['"""You are a young girl"""'], {}), "('You are a young girl')\n", (14192, 14216), True, 'import streamlit as st\n'), ((83188, 83260), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (83197, 83260), True, 'import streamlit as st\n'), ((83278, 83299), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (83286, 83299), True, 'import streamlit as st\n'), ((83305, 83322), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (83313, 83322), True, 'import streamlit as st\n'), ((87940, 88012), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (87949, 88012), True, 'import streamlit as st\n'), ((88030, 88051), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (88038, 88051), True, 'import streamlit as st\n'), ((88057, 88074), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (88065, 88074), True, 'import streamlit as st\n'), ((93749, 93767), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (93757, 93767), True, 'import streamlit as st\n'), ((93874, 93944), 'streamlit.image', 'st.image', (['chart_path'], {'caption': '"""Generated Chart"""', 'use_column_width': '(True)'}), "(chart_path, caption='Generated Chart', use_column_width=True)\n", (93882, 93944), True, 'import streamlit as st\n')]
import argparse import duckdb import lancedb import pyarrow.compute as pc from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast import gradio as gr MODEL_ID = None MODEL = None TOKENIZER = None PROCESSOR = None def get_table(): db = lancedb.connect("data/video-lancedb") return db.open_table("multimodal_video") def setup_clip_model(model_id): global MODEL_ID, MODEL, TOKENIZER, PROCESSOR MODEL_ID = model_id TOKENIZER = CLIPTokenizerFast.from_pretrained(MODEL_ID) MODEL = CLIPModel.from_pretrained(MODEL_ID) PROCESSOR = CLIPProcessor.from_pretrained(MODEL_ID) def embed_func(query): inputs = TOKENIZER([query], padding=True, return_tensors="pt") text_features = MODEL.get_text_features(**inputs) return text_features.detach().numpy()[0] def find_video_vectors(query): emb = embed_func(query) code = ( "import lancedb\n" "db = lancedb.connect('data/video-lancedb')\n" "tbl = db.open_table('multimodal_video')\n\n" f"embedding = embed_func('{query}')\n" "tbl.search(embedding).limit(9).to_df()" ) return (_extract(tbl.search(emb).limit(9).to_df()), code) def find_video_keywords(query): code = ( "import lancedb\n" "db = lancedb.connect('data/video-lancedb')\n" "tbl = db.open_table('multimodal_video')\n\n" f"tbl.search('{query}').limit(9).to_df()" ) return (_extract(tbl.search(query).limit(9).to_df()), code) def find_video_sql(query): code = ( "import lancedb\n" "import duckdb\n" "db = lancedb.connect('data/video-lancedb')\n" "tbl = db.open_table('multimodal_video')\n\n" "videos = tbl.to_lance()\n" f"duckdb.sql('{query}').to_df()" ) videos = tbl.to_lance() return (_extract(duckdb.sql(query).to_df()), code) def _extract(df): # Define a 3x3 Python list to store the HTML code video_id_col = "video_id" start_time_col = "start_time" grid_html = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-gap: 20px;">' for _, row in df.iterrows(): iframe_code = f'<iframe width="100%" height="315" src="https://www.youtube.com/embed/{row[video_id_col]}?start={str(row[start_time_col])}" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>' grid_html += f'<div style="width: 100%;">{iframe_code}</div>' grid_html += "</div>" return grid_html def create_gradio_dash(): with gr.Blocks() as demo: gr.Markdown( """ # Multimodal Video Search with LanceDB We used LanceDB to store frames every thirty seconds and the title of 13000+ videos, 5 random from each top category from the Youtube 8M dataset. Then, we used the CLIP model to embed frames and titles together. With LanceDB, we can perform embedding, keyword, and SQL search on these videos. """ ) with gr.Row(): with gr.Tab("Embeddings"): vector_query = gr.Textbox(value="retro gaming", show_label=False) b1 = gr.Button("Submit") with gr.Tab("Keywords"): keyword_query = gr.Textbox(value="ninja turtles", show_label=False) b2 = gr.Button("Submit") with gr.Tab("SQL"): sql_query = gr.Textbox( value="SELECT DISTINCT video_id, * from videos WHERE start_time > 0 LIMIT 9", show_label=False, ) b3 = gr.Button("Submit") with gr.Row(): code = gr.Code(label="Code", language="python") with gr.Row(): gallery = gr.HTML() b1.click(find_video_vectors, inputs=vector_query, outputs=[gallery, code]) b2.click(find_video_keywords, inputs=keyword_query, outputs=[gallery, code]) b3.click(find_video_sql, inputs=sql_query, outputs=[gallery, code]) demo.launch() def args_parse(): parser = argparse.ArgumentParser() parser.add_argument("--model_id", type=str, default="openai/clip-vit-base-patch32") return parser.parse_args() if __name__ == "__main__": args = args_parse() setup_clip_model(args.model_id) tbl = get_table() create_gradio_dash()
[ "lancedb.connect" ]
[((255, 292), 'lancedb.connect', 'lancedb.connect', (['"""data/video-lancedb"""'], {}), "('data/video-lancedb')\n", (270, 292), False, 'import lancedb\n'), ((461, 504), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (494, 504), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((517, 552), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (542, 552), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((569, 608), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (598, 608), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((4123, 4148), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4146, 4148), False, 'import argparse\n'), ((2589, 2600), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (2598, 2600), True, 'import gradio as gr\n'), ((2618, 3061), 'gradio.Markdown', 'gr.Markdown', (['"""\n # Multimodal Video Search with LanceDB\n We used LanceDB to store frames every thirty seconds and the title of 13000+ videos, 5 random from each top category from the Youtube 8M dataset. \n Then, we used the CLIP model to embed frames and titles together. With LanceDB, we can perform embedding, keyword, and SQL search on these videos.\n """'], {}), '(\n """\n # Multimodal Video Search with LanceDB\n We used LanceDB to store frames every thirty seconds and the title of 13000+ videos, 5 random from each top category from the Youtube 8M dataset. \n Then, we used the CLIP model to embed frames and titles together. With LanceDB, we can perform embedding, keyword, and SQL search on these videos.\n """\n )\n', (2629, 3061), True, 'import gradio as gr\n'), ((3087, 3095), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (3093, 3095), True, 'import gradio as gr\n'), ((3701, 3709), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (3707, 3709), True, 'import gradio as gr\n'), ((3730, 3770), 'gradio.Code', 'gr.Code', ([], {'label': '"""Code"""', 'language': '"""python"""'}), "(label='Code', language='python')\n", (3737, 3770), True, 'import gradio as gr\n'), ((3784, 3792), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (3790, 3792), True, 'import gradio as gr\n'), ((3816, 3825), 'gradio.HTML', 'gr.HTML', ([], {}), '()\n', (3823, 3825), True, 'import gradio as gr\n'), ((3114, 3134), 'gradio.Tab', 'gr.Tab', (['"""Embeddings"""'], {}), "('Embeddings')\n", (3120, 3134), True, 'import gradio as gr\n'), ((3167, 3217), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""retro gaming"""', 'show_label': '(False)'}), "(value='retro gaming', show_label=False)\n", (3177, 3217), True, 'import gradio as gr\n'), ((3239, 3258), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (3248, 3258), True, 'import gradio as gr\n'), ((3276, 3294), 'gradio.Tab', 'gr.Tab', (['"""Keywords"""'], {}), "('Keywords')\n", (3282, 3294), True, 'import gradio as gr\n'), ((3328, 3379), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""ninja turtles"""', 'show_label': '(False)'}), "(value='ninja turtles', show_label=False)\n", (3338, 3379), True, 'import gradio as gr\n'), ((3401, 3420), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (3410, 3420), True, 'import gradio as gr\n'), ((3438, 3451), 'gradio.Tab', 'gr.Tab', (['"""SQL"""'], {}), "('SQL')\n", (3444, 3451), True, 'import gradio as gr\n'), ((3481, 3596), 'gradio.Textbox', 'gr.Textbox', ([], {'value': '"""SELECT DISTINCT video_id, * from videos WHERE start_time > 0 LIMIT 9"""', 'show_label': '(False)'}), "(value=\n 'SELECT DISTINCT video_id, * from videos WHERE start_time > 0 LIMIT 9',\n show_label=False)\n", (3491, 3596), True, 'import gradio as gr\n'), ((3668, 3687), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (3677, 3687), True, 'import gradio as gr\n'), ((1813, 1830), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (1823, 1830), False, 'import duckdb\n')]
# %% [markdown] # # Code documentation Q&A bot example with LangChain # # This Q&A bot will allow you to query your own documentation easily using questions. We'll also demonstrate the use of LangChain and LanceDB using the OpenAI API. # # In this example we'll use Pandas 2.0 documentation, but, this could be replaced for your own docs as well import os import openai import argparse import lancedb import re import pickle import requests import zipfile from pathlib import Path from langchain.document_loaders import BSHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.llms import OpenAI from langchain.chains import RetrievalQA def get_document_title(document): m = str(document.metadata["source"]) title = re.findall("pandas.documentation(.*).html", m) if title[0] is not None: return title[0] return "" def arg_parse(): default_query = "What are the major differences in pandas 2.0?" parser = argparse.ArgumentParser(description="Code Documentation QA Bot") parser.add_argument( "--query", type=str, default=default_query, help="query to search" ) parser.add_argument("--openai-key", type=str, help="OpenAI API Key") args = parser.parse_args() if not args.openai_key: if "OPENAI_API_KEY" not in os.environ: raise ValueError( "OPENAI_API_KEY environment variable not set. Please set it or pass --openai_key" ) else: openai.api_key = args.openai_key return args if __name__ == "__main__": args = arg_parse() docs_path = Path("docs.pkl") docs = [] pandas_docs = requests.get( "https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip" ) with open("/tmp/pandas.documentation.zip", "wb") as f: f.write(pandas_docs.content) file = zipfile.ZipFile("/tmp/pandas.documentation.zip") file.extractall(path="/tmp/pandas_docs") if not docs_path.exists(): for p in Path("/tmp/pandas_docs/pandas.documentation").rglob("*.html"): print(p) if p.is_dir(): continue loader = BSHTMLLoader(p, open_encoding="utf8") raw_document = loader.load() m = {} m["title"] = get_document_title(raw_document[0]) m["version"] = "2.0rc0" raw_document[0].metadata = raw_document[0].metadata | m raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"]) docs = docs + raw_document with docs_path.open("wb") as fh: pickle.dump(docs, fh) else: with docs_path.open("rb") as fh: docs = pickle.load(fh) print("Loaded {} documents".format(len(docs))) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(docs) embeddings = OpenAIEmbeddings() db = lancedb.connect("/tmp/lancedb") table = db.create_table( "pandas_docs", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) docsearch = LanceDB.from_documents(documents, embeddings, connection=table) qa = RetrievalQA.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever() ) result = qa.run(args.query) print(result)
[ "lancedb.connect" ]
[((859, 905), 're.findall', 're.findall', (['"""pandas.documentation(.*).html"""', 'm'], {}), "('pandas.documentation(.*).html', m)\n", (869, 905), False, 'import re\n'), ((1074, 1138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code Documentation QA Bot"""'}), "(description='Code Documentation QA Bot')\n", (1097, 1138), False, 'import argparse\n'), ((1704, 1720), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (1708, 1720), False, 'from pathlib import Path\n'), ((1754, 1871), 'requests.get', 'requests.get', (['"""https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"""'], {}), "(\n 'https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip'\n )\n", (1766, 1871), False, 'import requests\n'), ((1984, 2032), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""/tmp/pandas.documentation.zip"""'], {}), "('/tmp/pandas.documentation.zip')\n", (1999, 2032), False, 'import zipfile\n'), ((2909, 2975), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (2939, 2975), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((3068, 3086), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (3084, 3086), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3097, 3128), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (3112, 3128), False, 'import lancedb\n'), ((3414, 3477), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (3436, 3477), False, 'from langchain.vectorstores import LanceDB\n'), ((2284, 2321), 'langchain.document_loaders.BSHTMLLoader', 'BSHTMLLoader', (['p'], {'open_encoding': '"""utf8"""'}), "(p, open_encoding='utf8')\n", (2296, 2321), False, 'from langchain.document_loaders import BSHTMLLoader\n'), ((2730, 2751), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (2741, 2751), False, 'import pickle\n'), ((2822, 2837), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2833, 2837), False, 'import pickle\n'), ((3529, 3537), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (3535, 3537), False, 'from langchain.llms import OpenAI\n'), ((2127, 2172), 'pathlib.Path', 'Path', (['"""/tmp/pandas_docs/pandas.documentation"""'], {}), "('/tmp/pandas_docs/pandas.documentation')\n", (2131, 2172), False, 'from pathlib import Path\n')]
from langchain.vectorstores.lancedb import LanceDB import lancedb from .embedding.base import BaseEmbedding from .base import VectorStore class LanceDBVectorStore(VectorStore): def __init__(self, index_name: str, db_path: str, embeddings: BaseEmbedding, api_key: str | None = None): self.db = lancedb.connect(db_path, api_key=api_key) if index_name not in self.db.table_names(): self.table = self.db.create_table( name=index_name, data=[{ "id": "1", "text": "Hello World", "vector": embeddings.client.embed_query("Hello World"), }], mode="overwrite", ) else: self.table = self.db.open_table(index_name) self.embeddings = embeddings self.client = LanceDB(connection=self.table, embedding=embeddings.client)
[ "lancedb.connect" ]
[((309, 350), 'lancedb.connect', 'lancedb.connect', (['db_path'], {'api_key': 'api_key'}), '(db_path, api_key=api_key)\n', (324, 350), False, 'import lancedb\n'), ((854, 913), 'langchain.vectorstores.lancedb.LanceDB', 'LanceDB', ([], {'connection': 'self.table', 'embedding': 'embeddings.client'}), '(connection=self.table, embedding=embeddings.client)\n', (861, 913), False, 'from langchain.vectorstores.lancedb import LanceDB\n')]
import os import glob import tqdm import pathtrees as pt import numpy as np import pandas as pd import lancedb import matplotlib.pyplot as plt from .step_annotations import load_object_annotations_from_csvs, get_obj_ann from IPython import embed from ..config import get_cfg # def load_object_annotations(meta_csv, states_csv): # meta_df = pd.read_csv(meta_csv).set_index('video_name').groupby(level=0).last() # object_names = [] # for c in meta_df.columns: # if c.startswith('#'): # meta_df[c[1:]] = meta_df.pop(c).fillna('').apply(lambda s: [int(float(x)) for x in str(s).split('+') if x != '']) # object_names.append(c[1:]) # states_df = pd.read_csv(states_csv) # states_df = states_df[states_df.time.fillna('') != ''] # # print(states_df.shape) # print(set(states_df.video_name.unique()) - set(meta_df.index.unique())) # states_df = states_df[states_df.video_name.isin(meta_df.index.unique())] # states_df['time'] = pd.to_timedelta(states_df.time.apply(lambda x: f'00:{x}')) # states_df['start_frame'] = (states_df.time.dt.total_seconds() * meta_df.fps.loc[states_df.video_name].values).astype(int) # # print(states_df.shape) # # creating a dict of {video_id: {track_id: df}} # dfs = {} # for vid, row in meta_df.iterrows(): # objs = {} # sdf = states_df[states_df.video_name == vid] # for c in object_names: # if c not in sdf.columns: # continue # odf = sdf[[c, 'start_frame']].copy().rename(columns={c: "state"}) # odf = odf[odf.state.fillna('') != ''] # odf = odf.drop_duplicates(subset=['start_frame'], keep='last') # odf['stop_frame'] = odf['start_frame'].shift(-1) # odf['object'] = c # if not len(odf): # continue # track_id=None # for track_id in row[c]: # objs[track_id] = odf # print(vid, track_id, odf.shape) # dfs[vid] = objs # return dfs def get_obj_anns(dfs, frame_idx): idxs = [] for i in frame_idx: ds = { k: df[(df.start_frame <= i) & (pd.isna(df.stop_frame) | (df.stop_frame > i))] for k, df in dfs.items() } obj = list(set(d.object.iloc[0] for d in ds.values() if len(d))) assert len(obj) < 2, f"Something is wrong.. disagreeing labels ({obj}) assigned to object track" idxs.append({ **{f'{k}_state': d.state.iloc[-1] for k, d in ds.items() if len(d)}, 'object': obj[0], } if obj else {}) return pd.DataFrame(idxs) def load_data(cfg, data_file_pattern, use_aug=True, data_slice=None): '''Load npz files (one per video) with embedding and label keys and concatenate ''' embeddings_list, df_list = [], [] dfs = { k: load_object_annotations_from_csvs(cfg.DATASET.META_CSV, f) for k, f in cfg.DATASET.STATES_CSVS.items() } # embed() fs = glob.glob(data_file_pattern) if not isinstance(data_file_pattern, list) else data_file_pattern print(f"Found {len(fs)} files", fs[:1]) for f in tqdm.tqdm(fs, desc='loading data...'): # print(f) # if 'pinwheels' not in f and 'quesadilla' not in f: continue # if 'plain' not in f: # print(f) # continue data = np.load(f) z = data['z'].astype(np.float32) z = z / np.linalg.norm(z, axis=-1, keepdims=True) frame_idx = data['frame_index'] # maybe filter out augmentations augmented = data.get('augmented') if augmented is None: augmented = np.zeros(len(z), dtype=bool) if not use_aug: z = z[~augmented] frame_idx = frame_idx[~augmented] # get video ID and track ID video_id = data.get('video_name') if video_id is None: video_id = f.split('/')[-3] else: video_id = video_id.item() video_id = os.path.splitext(video_id)[0] track_id = data.get('track_id') if track_id is None: track_id = f.split('/')[-1].split('.')[0] else: track_id = track_id.item() track_id = int(track_id) dfsi = {k: df[video_id][track_id] for k, df in dfs.items() if video_id in df and track_id in df[video_id]} for k in dfsi: tqdm.tqdm.write(f"{k} {set(dfs[k])&{video_id}}") if video_id in dfs[k]: tqdm.tqdm.write(f"{k} {set(dfs[k][video_id])&{track_id}}") if not dfsi: tqdm.tqdm.write(f"Skipping: {video_id}: {track_id}") continue # tqdm.tqdm.write(f"Using: {video_id}: {track_id}") # get object state annotations ann = get_obj_anns(dfsi, frame_idx) if not all(ann.shape): tqdm.tqdm.write(f"no data for {video_id}.{track_id} {ann.shape} {z.shape}") for k in dfs: tqdm.tqdm.write(f"{k} {set(dfs[k])&{video_id}}") continue tqdm.tqdm.write(f"using {video_id}.{track_id} {ann.shape} {set(dfsi)} {z.shape}") embeddings_list.append(z) df_list.append(pd.DataFrame({ 'index': frame_idx, 'object': ann.object, **{k: ann[k] for k in ann.columns if k.endswith('_state')}, 'track_id': track_id, 'video_id': video_id, 'augmented': augmented, })) # break df = pd.concat(df_list) df['vector'] = [x for xs in embeddings_list for x in xs] return df def cluster_reduce(data, y, n_clusters=200, n_pts_per_cluster=20, n_clusters_per_class=10): from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.cluster import KMeans embed() data = data[y=='peanut-butter'] y=y[y=='peanut-butter'] # Step 0: Normalization scaler = StandardScaler() data = scaler.fit_transform(data) # Step 1: PCA print(data.shape) pca = PCA(n_components=30) # Choose the number of components based on your data data_pca = pca.fit_transform(data) print(data_pca.shape) plt.figure(figsize=(15, 15)) for yi in np.unique(y): plt.scatter(data_pca[y==yi, 0], data_pca[y==yi, 1], label=yi) plt.legend() plt.savefig("PCA.png") plt.close() # Perform k-means clustering _, inv, counts = np.unique(y, return_inverse=True, return_counts=True) p = 1 / counts[inv] p = p / p.sum() n = n_clusters * n_pts_per_cluster * 10 if n < len(y): idxs = np.random.choice(len(y), n, p=p, replace=False) np.sort(idxs) data = data[idxs] kmeans = KMeans(n_clusters=n_clusters, random_state=42) kmeans.fit(data_pca) clusters = kmeans.labels_ # unique, count = np.unique(labels, return_counts=True) # for c, u in sorted(zip(count, unique)): # print(u, c) # Sample a fixed number of points from each cluster sampled_points = [] cluster_indices = {} for i in np.unique(clusters): indices = np.where(clusters == i)[0] y_counts = pd.Series(y[indices]).value_counts() y_top = y_counts.index[0] cluster_indices.setdefault(y_top, []).append((i, y_counts, indices)) sampled_points = [] for y_top, xs in cluster_indices.items(): print(y_top, len(xs)) xs = sorted(xs, key=lambda x: (x[1]/x[1].sum()).iloc[0], reverse=True) for (i, c, indices) in xs[:n_clusters_per_class]: print(i, c) sampled_points.extend(indices) # print(i, len(indices), pd.Series(y[indices]).value_counts().iloc[:2].to_dict()) # if len(indices) > n_pts_per_cluster: # indices = np.random.choice(indices, n_pts_per_cluster, replace=False) # sampled_points.extend(indices) return np.array(sorted(sampled_points)) def dump_db(db_fname, df): db = lancedb.connect(db_fname) table_names = db.table_names() # ---------------------- Write out table for each object --------------------- # for object_name, odf in tqdm.tqdm(df.groupby('object')): # if object_name != 'tortilla': continue if object_name in table_names: # if not overwrite: # print("table", object_name, 'exists') # return db.drop_table(object_name) print(object_name, len(odf)) print(odf.describe()) # idx = cluster_reduce(np.array(list(odf.vector.values)), odf.super_simple_state.values) # odf = odf.iloc[idx] # print(odf.describe()) tbl = db.create_table(object_name, data=odf)#.iloc[idx] def fix_vocab(sdf): RENAME = { '[partial]': '', '[full]': '', 'floss-underneath': 'ends-cut', 'floss-crossed': 'ends-cut', 'raisins[cooked]': 'raisins', 'oatmeal[cooked]+raisins': 'oatmeal+raisins', 'teabag': 'tea-bag', '+stirrer': '', '[stirred]': '', 'water+honey': 'water', 'with-quesadilla': 'with-food', 'with-pinwheels': 'with-food', } sdf['mod_state'] = sdf.full_state.copy() for old, new in RENAME.items(): sdf['mod_state'] = sdf.mod_state.str.replace(old, new) sdf = sdf[~sdf.mod_state.isin(['folding', 'rolling', 'oatmeal+raisins+cinnamon', 'oatmeal+raisins+cinnamon+honey', 'on-plate'])] sdf.groupby('object').mod_state.value_counts() sdf['super_simple_state'] = sdf['mod_state'] return sdf import ipdb @ipdb.iex def build(config_name, embeddings_dir='embeddings', overwrite=False): cfg = get_cfg(config_name) tree = pt.tree(cfg.DATASET.ROOT, { '{embeddings_dir}/{field_name}/{video_id}/{emb_type}/{track_id}.npz': 'emb_file', '{emb_type}.lancedb': 'db_fname', }).specify(embeddings_dir=embeddings_dir) # emb_dir = os.path.join(cfg.DATASET.ROOT, 'embeddings1', cfg.EVAL.DETECTION_NAME) # emb_types = cfg.EVAL.EMBEDDING_TYPES for emb_type in ['clip']: #emb_types: # ---------------------- Load the embeddings and states ---------------------- # data_file_pattern = tree.emb_file.specify(emb_type=emb_type).glob_format() #f'{emb_dir}/*/{emb_type}/*.npz' df = load_data(cfg, data_file_pattern) df = fix_vocab(df) # ----------------------------- Open the database ---------------------------- # # db_fname = os.path.join(cfg.DATASET.ROOT, f'{cfg.EVAL.DETECTION_NAME}_{emb_type}.lancedb') db_fname = tree.db_fname.format(emb_type=emb_type) dump_db(db_fname, df) def vis(db_fname): db = lancedb.connect(db_fname) for name in db.table_names(): print(name) df = db[name].to_pandas() print(df.describe(include='all')) embed() if __name__ == '__main__': import fire fire.Fire()
[ "lancedb.connect" ]
[((2658, 2676), 'pandas.DataFrame', 'pd.DataFrame', (['idxs'], {}), '(idxs)\n', (2670, 2676), True, 'import pandas as pd\n'), ((3211, 3248), 'tqdm.tqdm', 'tqdm.tqdm', (['fs'], {'desc': '"""loading data..."""'}), "(fs, desc='loading data...')\n", (3220, 3248), False, 'import tqdm\n'), ((5542, 5560), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (5551, 5560), True, 'import pandas as pd\n'), ((5869, 5876), 'IPython.embed', 'embed', ([], {}), '()\n', (5874, 5876), False, 'from IPython import embed\n'), ((5982, 5998), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5996, 5998), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6088, 6108), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(30)'}), '(n_components=30)\n', (6091, 6108), False, 'from sklearn.decomposition import PCA\n'), ((6233, 6261), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (6243, 6261), True, 'import matplotlib.pyplot as plt\n'), ((6276, 6288), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (6285, 6288), True, 'import numpy as np\n'), ((6364, 6376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6374, 6376), True, 'import matplotlib.pyplot as plt\n'), ((6381, 6403), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""PCA.png"""'], {}), "('PCA.png')\n", (6392, 6403), True, 'import matplotlib.pyplot as plt\n'), ((6408, 6419), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6417, 6419), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6528), 'numpy.unique', 'np.unique', (['y'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(y, return_inverse=True, return_counts=True)\n', (6484, 6528), True, 'import numpy as np\n'), ((6760, 6806), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(42)'}), '(n_clusters=n_clusters, random_state=42)\n', (6766, 6806), False, 'from sklearn.cluster import KMeans\n'), ((7109, 7128), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (7118, 7128), True, 'import numpy as np\n'), ((7991, 8016), 'lancedb.connect', 'lancedb.connect', (['db_fname'], {}), '(db_fname)\n', (8006, 8016), False, 'import lancedb\n'), ((10675, 10700), 'lancedb.connect', 'lancedb.connect', (['db_fname'], {}), '(db_fname)\n', (10690, 10700), False, 'import lancedb\n'), ((10835, 10842), 'IPython.embed', 'embed', ([], {}), '()\n', (10840, 10842), False, 'from IPython import embed\n'), ((10891, 10902), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (10900, 10902), False, 'import fire\n'), ((3059, 3087), 'glob.glob', 'glob.glob', (['data_file_pattern'], {}), '(data_file_pattern)\n', (3068, 3087), False, 'import glob\n'), ((3432, 3442), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3439, 3442), True, 'import numpy as np\n'), ((6298, 6363), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data_pca[y == yi, 0]', 'data_pca[y == yi, 1]'], {'label': 'yi'}), '(data_pca[y == yi, 0], data_pca[y == yi, 1], label=yi)\n', (6309, 6363), True, 'import matplotlib.pyplot as plt\n'), ((6707, 6720), 'numpy.sort', 'np.sort', (['idxs'], {}), '(idxs)\n', (6714, 6720), True, 'import numpy as np\n'), ((3500, 3541), 'numpy.linalg.norm', 'np.linalg.norm', (['z'], {'axis': '(-1)', 'keepdims': '(True)'}), '(z, axis=-1, keepdims=True)\n', (3514, 3541), True, 'import numpy as np\n'), ((4069, 4095), 'os.path.splitext', 'os.path.splitext', (['video_id'], {}), '(video_id)\n', (4085, 4095), False, 'import os\n'), ((4651, 4703), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""Skipping: {video_id}: {track_id}"""'], {}), "(f'Skipping: {video_id}: {track_id}')\n", (4666, 4703), False, 'import tqdm\n'), ((4912, 4987), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""no data for {video_id}.{track_id} {ann.shape} {z.shape}"""'], {}), "(f'no data for {video_id}.{track_id} {ann.shape} {z.shape}')\n", (4927, 4987), False, 'import tqdm\n'), ((7148, 7171), 'numpy.where', 'np.where', (['(clusters == i)'], {}), '(clusters == i)\n', (7156, 7171), True, 'import numpy as np\n'), ((9707, 9859), 'pathtrees.tree', 'pt.tree', (['cfg.DATASET.ROOT', "{'{embeddings_dir}/{field_name}/{video_id}/{emb_type}/{track_id}.npz':\n 'emb_file', '{emb_type}.lancedb': 'db_fname'}"], {}), "(cfg.DATASET.ROOT, {\n '{embeddings_dir}/{field_name}/{video_id}/{emb_type}/{track_id}.npz':\n 'emb_file', '{emb_type}.lancedb': 'db_fname'})\n", (9714, 9859), True, 'import pathtrees as pt\n'), ((7194, 7215), 'pandas.Series', 'pd.Series', (['y[indices]'], {}), '(y[indices])\n', (7203, 7215), True, 'import pandas as pd\n'), ((2203, 2225), 'pandas.isna', 'pd.isna', (['df.stop_frame'], {}), '(df.stop_frame)\n', (2210, 2225), True, 'import pandas as pd\n')]
## Imports: import os from langchain.chat_models import ChatOpenAI from langchain.tools import WikipediaQueryRun from langchain.utilities import WikipediaAPIWrapper from langchain.tools import DuckDuckGoSearchRun from langchain.agents.agent_toolkits import create_python_agent from langchain.tools.python.tool import PythonREPLTool from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import LLMChainExtractor from langchain.document_loaders import WebBaseLoader from langchain.agents import Tool, initialize_agent, AgentType from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import LanceDB from langchain.chains import LLMChain from langchain.prompts import PromptTemplate, MessagesPlaceholder from langchain.memory import ConversationBufferWindowMemory from app.task import Task, TaskDescription, TaskList from dotenv import load_dotenv from fastapi import FastAPI, HTTPException, BackgroundTasks from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel import lancedb ## Set Env Variables if None == os.environ.get('OPENAI_API_KEY'): raise ValueError("Env OPENAI_API_KEY not set") else: OAI_TOKEN = os.environ.get('OPENAI_API_KEY') if None == os.environ.get('OPENAI_CHAT_MODEL'): OPENAI_CHAT_MODEL = "gpt-3.5-turbo-0613" else: OPENAI_CHAT_MODEL = os.environ.get('OPENAI_CHAT_MODEL') if None == os.environ.get('OPENAI_CHAT_TEMPERATURE'): OPENAI_CHAT_TEMPERATURE = 0.3 else: OPENAI_CHAT_TEMPERATURE = os.environ.get('OPENAI_CHAT_TEMPERATURE') if None == os.environ.get('OPENAI_MAX_CHAT_TOKENS'): OPENAI_MAX_CHAT_TOKENS = 200 else: OPENAI_MAX_CHAT_TOKENS = os.environ.get('OPENAI_MAX_CHAT_TOKENS') if None == os.environ.get('OPENAI_RESEARCH_MODEL'): OPENAI_RESEARCH_MODEL = "gpt-3.5-turbo-16k-0613" else: OPENAI_RESEARCH_MODEL = os.environ.get('OPENAI_RESEARCH_MODEL') if None == os.environ.get('OPENAI_RESEARCH_TEMPERATURE'): OPENAI_RESEARCH_TEMPERATURE = 0.1 else: OPENAI_RESEARCH_TEMPERATURE = os.environ.get('OPENAI_RESEARCH_TEMPERATURE') if None == os.environ.get('OPENAI_MAX_RESEARCH_TOKENS'): OPENAI_MAX_RESEARCH_TOKENS = 500 else: OPENAI_MAX_RESEARCH_TOKENS = os.environ.get('OPENAI_MAX_RESEARCH_TOKENS') if None == os.environ.get('HELIOS_URL'): HELIOS_URL = "helios.latrobe.group" else: HELIOS_URL = os.environ.get('HELIOS_URL') ## Set up OpenAI VERBOSE = False chat_model = ChatOpenAI( temperature=OPENAI_CHAT_TEMPERATURE, max_tokens=OPENAI_MAX_CHAT_TOKENS, model=OPENAI_CHAT_MODEL, ) research_model = ChatOpenAI( temperature=OPENAI_RESEARCH_TEMPERATURE, max_tokens=OPENAI_MAX_RESEARCH_TOKENS, model=OPENAI_RESEARCH_MODEL, ) embeddings = OpenAIEmbeddings() ## Set up FastAPI helios_app = FastAPI() origins = [ f"http://{HELIOS_URL}", f"https://{HELIOS_URL}", "http://localhost", "http://localhost:8000", ] helios_app.add_middleware( CORSMiddleware, allow_credentials=True, allow_origins=origins, allow_methods=["*"], allow_headers=["*"], ) ## Set up Task Queue TASKS = TaskList() ## Set up knowledge VectorStore and retriever chain db_name = "./helios_kb.db" table_name = "helios_kb" db = lancedb.connect(db_name) if table_name not in db.table_names(): table = db.create_table( "helios_kb", data=[ { "vector": embeddings.embed_query("You are Helios, an AI chatbot that can perform background research tasks."), "text": "You are Helios, an AI chatbot that can perform background research tasks with access to the internet.", "id": "1", } ], mode="create", ) else: table = db.open_table(table_name) vectorstore = LanceDB(connection=table, embedding=embeddings) kb_retriever = vectorstore.as_retriever() def check_search_result(query: str, result: str) -> bool: '''Checks if the result of a search is a well informed answer to the query.''' prompt = PromptTemplate( input_variables=["search_query", "search_response"], template="Answer only 'Yes' or 'No' only - did the following response actually answer the question or include the right information to help the user with the query - yes or no:\n#####\nQuery: {search_query}\n#####\nResponse:{search_response}", ) chain = LLMChain(llm=chat_model, prompt=prompt) check_response = chain.run( { "search_query": query, "search_response": result, } ) if "YES" in check_response.upper(): pass else: add_new_task(description=f"Use all of your tools to research this query: {query}") def search_kb(query: str) -> str: compressor = LLMChainExtractor.from_llm(research_model) compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=kb_retriever) compressed_docs = compression_retriever.get_relevant_documents(query) return compressed_docs ## Define Helper Functions def add_new_task(description: str) -> Task: '''Adds a new task to the task queue.''' task_id = len(TASKS) task = Task(task_id=task_id, description=description) print(f"Adding new task: {description}") task.pending() TASKS.append(task) return task def do_tasks(): '''Runs the task queue.''' pending_tasks = [t for t in TASKS if t.get_status() == 'pending'] for TASK in pending_tasks: run_task(task=TASK) def run_task(task: Task): '''Runs a task.''' task.running() response = research_agent.run("Use your tools to create a knowledge graph on this topic: {TASK}".format(TASK=task.description)) vectorstore.add_texts(texts=[response], metadatas=[{"id": task.task_id, "task": task.description}]) task.done(result=response) return task def load_web_page(url: str) -> str: '''Loads a web page and returns the text.''' loader = WebBaseLoader(url) loader.requests_kwargs = {'verify':False} data = loader.load() return f"Text from {url}:\n{data}" ## Set up LangChain Agents python_agent = create_python_agent( llm=research_model, tool=PythonREPLTool(), agent_type=AgentType.OPENAI_FUNCTIONS, agent_executor_kwargs={"handle_parsing_errors": True}, ) chat_tools = [ Tool( name="knowledgebase-search", func=search_kb, description="Searches the knowledge base for information. Input must be a list of key words or search terms.", ), Tool( name="add-research-task", func=add_new_task, description="If the knowledgebase doesn't have an answer, use this tool to start a background research task. Input must be a description of new research or a task to be done.", ) ] wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()) search = DuckDuckGoSearchRun() research_tools = [ Tool( name="knowledgebase-search", func=search_kb, description="Searches the knowledge base for information. Input must be a list of key words or search terms.", ), Tool( name="wikipedia-search", func=wikipedia.run, description="Searches Wikipedia information about people, places and historical facts. Input must be a list of key words or search terms.", ), Tool( name="web-search", func=search.run, description="Searches the web using DuckDuckGo for information from web pages. Input must be a list of key words or search terms.", ), Tool( name="run-python-code", func=python_agent.run, description="Sends a task to another agent that will write and run custom python code to achieve a task. Input must be a task or goal that a python programmer could achieve.", ), Tool( name="add-research-task", func=add_new_task, description="Only use this tool if the user asks you to add a new task. Input must be a description of new research or a task to be done.", ) ] agent_kwargs = { "extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")], } memory = ConversationBufferWindowMemory(memory_key="memory", return_messages=True, k=4) ## Init Agents chat_agent = initialize_agent( chat_tools, chat_model, agent=AgentType.OPENAI_MULTI_FUNCTIONS, agent_kwargs=agent_kwargs, memory=memory, handle_parsing_errors=True, verbose=VERBOSE) research_agent = initialize_agent( research_tools, research_model, agent=AgentType.OPENAI_MULTI_FUNCTIONS, handle_parsing_errors=True, verbose=VERBOSE) ## Add FastAPI Routes: @helios_app.get("/") async def root(background_tasks: BackgroundTasks): '''Returns the status of the bot.''' background_tasks.add_task(do_tasks) return {"status": "ok", "version": "helios v{VERSION}".format(VERSION=os.environ.get('VERSION'))} @helios_app.get("/tasks") async def get_tasks(background_tasks: BackgroundTasks): '''Returns a list of all tasks and their status.''' background_tasks.add_task(do_tasks) return TASKS.model_dump_json(indent = 2) @helios_app.get("/tasks/{task_id}") async def get_task(task_id: int, background_tasks: BackgroundTasks): '''Returns the status of a specific task.''' background_tasks.add_task(do_tasks) return TASKS[task_id].model_dump_json(indent = 2) @helios_app.post("/tasks/") async def create_task(task: TaskDescription, background_tasks: BackgroundTasks): '''Creates a new task.''' new_task = add_new_task(task.description) background_tasks.add_task(do_tasks) if isinstance(new_task, Task): background_tasks.add_task(do_tasks) return new_task.model_dump_json(indent = 2) else: raise HTTPException(status_code=500, detail="Task creation failed") class SearchQuery(BaseModel): q: str @helios_app.post("/search/") async def search(q: SearchQuery, background_tasks: BackgroundTasks): '''Searches the knowledgebase for an answer to a question.''' response = search_kb(q.q) background_tasks.add_task(check_search_result, q.q, response) background_tasks.add_task(do_tasks) return response class ChatMessage(BaseModel): message: str @helios_app.post("/chat/") async def chat(message: ChatMessage, background_tasks: BackgroundTasks): '''Chats with the bot.''' agent_prompt = f"Use your tools to give a detailed answer to this message - if you can't find the answer, say I don't know: {message.message}" response = chat_agent.run(agent_prompt) response = response.replace("\n", "<br />").replace("\r", "").replace("\"", "'") background_tasks.add_task(check_search_result, message.message, response) background_tasks.add_task(do_tasks) return response
[ "lancedb.connect" ]
[((2468, 2580), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'OPENAI_CHAT_TEMPERATURE', 'max_tokens': 'OPENAI_MAX_CHAT_TOKENS', 'model': 'OPENAI_CHAT_MODEL'}), '(temperature=OPENAI_CHAT_TEMPERATURE, max_tokens=\n OPENAI_MAX_CHAT_TOKENS, model=OPENAI_CHAT_MODEL)\n', (2478, 2580), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2608, 2732), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'OPENAI_RESEARCH_TEMPERATURE', 'max_tokens': 'OPENAI_MAX_RESEARCH_TOKENS', 'model': 'OPENAI_RESEARCH_MODEL'}), '(temperature=OPENAI_RESEARCH_TEMPERATURE, max_tokens=\n OPENAI_MAX_RESEARCH_TOKENS, model=OPENAI_RESEARCH_MODEL)\n', (2618, 2732), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2756, 2774), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2772, 2774), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2807, 2816), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2814, 2816), False, 'from fastapi import FastAPI, HTTPException, BackgroundTasks\n'), ((3125, 3135), 'app.task.TaskList', 'TaskList', ([], {}), '()\n', (3133, 3135), False, 'from app.task import Task, TaskDescription, TaskList\n'), ((3246, 3270), 'lancedb.connect', 'lancedb.connect', (['db_name'], {}), '(db_name)\n', (3261, 3270), False, 'import lancedb\n'), ((3784, 3831), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (3791, 3831), False, 'from langchain.vectorstores import LanceDB\n'), ((6844, 6865), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {}), '()\n', (6863, 6865), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((8113, 8191), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)', 'k': '(4)'}), "(memory_key='memory', return_messages=True, k=4)\n", (8143, 8191), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((8221, 8397), 'langchain.agents.initialize_agent', 'initialize_agent', (['chat_tools', 'chat_model'], {'agent': 'AgentType.OPENAI_MULTI_FUNCTIONS', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory', 'handle_parsing_errors': '(True)', 'verbose': 'VERBOSE'}), '(chat_tools, chat_model, agent=AgentType.\n OPENAI_MULTI_FUNCTIONS, agent_kwargs=agent_kwargs, memory=memory,\n handle_parsing_errors=True, verbose=VERBOSE)\n', (8237, 8397), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((8438, 8576), 'langchain.agents.initialize_agent', 'initialize_agent', (['research_tools', 'research_model'], {'agent': 'AgentType.OPENAI_MULTI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'verbose': 'VERBOSE'}), '(research_tools, research_model, agent=AgentType.\n OPENAI_MULTI_FUNCTIONS, handle_parsing_errors=True, verbose=VERBOSE)\n', (8454, 8576), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((1115, 1147), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1129, 1147), False, 'import os\n'), ((1222, 1254), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1236, 1254), False, 'import os\n'), ((1267, 1302), 'os.environ.get', 'os.environ.get', (['"""OPENAI_CHAT_MODEL"""'], {}), "('OPENAI_CHAT_MODEL')\n", (1281, 1302), False, 'import os\n'), ((1379, 1414), 'os.environ.get', 'os.environ.get', (['"""OPENAI_CHAT_MODEL"""'], {}), "('OPENAI_CHAT_MODEL')\n", (1393, 1414), False, 'import os\n'), ((1427, 1468), 'os.environ.get', 'os.environ.get', (['"""OPENAI_CHAT_TEMPERATURE"""'], {}), "('OPENAI_CHAT_TEMPERATURE')\n", (1441, 1468), False, 'import os\n'), ((1540, 1581), 'os.environ.get', 'os.environ.get', (['"""OPENAI_CHAT_TEMPERATURE"""'], {}), "('OPENAI_CHAT_TEMPERATURE')\n", (1554, 1581), False, 'import os\n'), ((1594, 1634), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MAX_CHAT_TOKENS"""'], {}), "('OPENAI_MAX_CHAT_TOKENS')\n", (1608, 1634), False, 'import os\n'), ((1704, 1744), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MAX_CHAT_TOKENS"""'], {}), "('OPENAI_MAX_CHAT_TOKENS')\n", (1718, 1744), False, 'import os\n'), ((1757, 1796), 'os.environ.get', 'os.environ.get', (['"""OPENAI_RESEARCH_MODEL"""'], {}), "('OPENAI_RESEARCH_MODEL')\n", (1771, 1796), False, 'import os\n'), ((1885, 1924), 'os.environ.get', 'os.environ.get', (['"""OPENAI_RESEARCH_MODEL"""'], {}), "('OPENAI_RESEARCH_MODEL')\n", (1899, 1924), False, 'import os\n'), ((1937, 1982), 'os.environ.get', 'os.environ.get', (['"""OPENAI_RESEARCH_TEMPERATURE"""'], {}), "('OPENAI_RESEARCH_TEMPERATURE')\n", (1951, 1982), False, 'import os\n'), ((2062, 2107), 'os.environ.get', 'os.environ.get', (['"""OPENAI_RESEARCH_TEMPERATURE"""'], {}), "('OPENAI_RESEARCH_TEMPERATURE')\n", (2076, 2107), False, 'import os\n'), ((2120, 2164), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MAX_RESEARCH_TOKENS"""'], {}), "('OPENAI_MAX_RESEARCH_TOKENS')\n", (2134, 2164), False, 'import os\n'), ((2242, 2286), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MAX_RESEARCH_TOKENS"""'], {}), "('OPENAI_MAX_RESEARCH_TOKENS')\n", (2256, 2286), False, 'import os\n'), ((2299, 2327), 'os.environ.get', 'os.environ.get', (['"""HELIOS_URL"""'], {}), "('HELIOS_URL')\n", (2313, 2327), False, 'import os\n'), ((2392, 2420), 'os.environ.get', 'os.environ.get', (['"""HELIOS_URL"""'], {}), "('HELIOS_URL')\n", (2406, 2420), False, 'import os\n'), ((4029, 4354), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['search_query', 'search_response']", 'template': '"""Answer only \'Yes\' or \'No\' only - did the following response actually answer the question or include the right information to help the user with the query - yes or no:\n#####\nQuery: {search_query}\n#####\nResponse:{search_response}"""'}), '(input_variables=[\'search_query\', \'search_response\'],\n template=\n """Answer only \'Yes\' or \'No\' only - did the following response actually answer the question or include the right information to help the user with the query - yes or no:\n#####\nQuery: {search_query}\n#####\nResponse:{search_response}"""\n )\n', (4043, 4354), False, 'from langchain.prompts import PromptTemplate, MessagesPlaceholder\n'), ((4376, 4415), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'chat_model', 'prompt': 'prompt'}), '(llm=chat_model, prompt=prompt)\n', (4384, 4415), False, 'from langchain.chains import LLMChain\n'), ((4754, 4796), 'langchain.retrievers.document_compressors.LLMChainExtractor.from_llm', 'LLMChainExtractor.from_llm', (['research_model'], {}), '(research_model)\n', (4780, 4796), False, 'from langchain.retrievers.document_compressors import LLMChainExtractor\n'), ((4825, 4917), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'compressor', 'base_retriever': 'kb_retriever'}), '(base_compressor=compressor, base_retriever=\n kb_retriever)\n', (4855, 4917), False, 'from langchain.retrievers import ContextualCompressionRetriever\n'), ((5167, 5213), 'app.task.Task', 'Task', ([], {'task_id': 'task_id', 'description': 'description'}), '(task_id=task_id, description=description)\n', (5171, 5213), False, 'from app.task import Task, TaskDescription, TaskList\n'), ((5945, 5963), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['url'], {}), '(url)\n', (5958, 5963), False, 'from langchain.document_loaders import WebBaseLoader\n'), ((6312, 6482), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""knowledgebase-search"""', 'func': 'search_kb', 'description': '"""Searches the knowledge base for information. Input must be a list of key words or search terms."""'}), "(name='knowledgebase-search', func=search_kb, description=\n 'Searches the knowledge base for information. Input must be a list of key words or search terms.'\n )\n", (6316, 6482), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((6509, 6745), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""add-research-task"""', 'func': 'add_new_task', 'description': '"""If the knowledgebase doesn\'t have an answer, use this tool to start a background research task. Input must be a description of new research or a task to be done."""'}), '(name=\'add-research-task\', func=add_new_task, description=\n "If the knowledgebase doesn\'t have an answer, use this tool to start a background research task. Input must be a description of new research or a task to be done."\n )\n', (6513, 6745), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((6889, 7059), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""knowledgebase-search"""', 'func': 'search_kb', 'description': '"""Searches the knowledge base for information. Input must be a list of key words or search terms."""'}), "(name='knowledgebase-search', func=search_kb, description=\n 'Searches the knowledge base for information. Input must be a list of key words or search terms.'\n )\n", (6893, 7059), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((7086, 7285), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""wikipedia-search"""', 'func': 'wikipedia.run', 'description': '"""Searches Wikipedia information about people, places and historical facts. Input must be a list of key words or search terms."""'}), "(name='wikipedia-search', func=wikipedia.run, description=\n 'Searches Wikipedia information about people, places and historical facts. Input must be a list of key words or search terms.'\n )\n", (7090, 7285), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((7312, 7494), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""web-search"""', 'func': 'search.run', 'description': '"""Searches the web using DuckDuckGo for information from web pages. Input must be a list of key words or search terms."""'}), "(name='web-search', func=search.run, description=\n 'Searches the web using DuckDuckGo for information from web pages. Input must be a list of key words or search terms.'\n )\n", (7316, 7494), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((7521, 7758), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""run-python-code"""', 'func': 'python_agent.run', 'description': '"""Sends a task to another agent that will write and run custom python code to achieve a task. Input must be a task or goal that a python programmer could achieve."""'}), "(name='run-python-code', func=python_agent.run, description=\n 'Sends a task to another agent that will write and run custom python code to achieve a task. Input must be a task or goal that a python programmer could achieve.'\n )\n", (7525, 7758), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((7785, 7984), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""add-research-task"""', 'func': 'add_new_task', 'description': '"""Only use this tool if the user asks you to add a new task. Input must be a description of new research or a task to be done."""'}), "(name='add-research-task', func=add_new_task, description=\n 'Only use this tool if the user asks you to add a new task. Input must be a description of new research or a task to be done.'\n )\n", (7789, 7984), False, 'from langchain.agents import Tool, initialize_agent, AgentType\n'), ((6171, 6187), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (6185, 6187), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((6812, 6833), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (6831, 6833), False, 'from langchain.utilities import WikipediaAPIWrapper\n'), ((8056, 8099), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (8075, 8099), False, 'from langchain.prompts import PromptTemplate, MessagesPlaceholder\n'), ((9741, 9802), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(500)', 'detail': '"""Task creation failed"""'}), "(status_code=500, detail='Task creation failed')\n", (9754, 9802), False, 'from fastapi import FastAPI, HTTPException, BackgroundTasks\n'), ((8859, 8884), 'os.environ.get', 'os.environ.get', (['"""VERSION"""'], {}), "('VERSION')\n", (8873, 8884), False, 'import os\n')]
""" Chatbot for talking to Github Codespaces using Langchain, Qwen and LanceDB """ import os import shutil import lancedb from langchain.memory import ConversationSummaryMemory from langchain_community.document_loaders import GitLoader from langchain.vectorstores import LanceDB from langchain.embeddings import HuggingFaceEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.chat_models import ChatOllama from langchain.chains import ConversationalRetrievalChain def lanceDBConnection(embed): db = lancedb.connect("/tmp/lancedb") table = db.create_table( "github_repo", data=[{"vector": embed.embed_query("Hello World"), "text": "Hello World"}], mode="overwrite", ) return table def vectorStoreSetup(query_path): temp_repo_dir = "./example_data/test_repo1/" if os.path.exists(temp_repo_dir): shutil.rmtree(temp_repo_dir) docs = GitLoader( clone_url=query_path, repo_path=temp_repo_dir, file_filter=lambda file_path: file_path.endswith(".py") or file_path.endswith(".md") or file_path.endswith(".js"), ) docs = docs.load() # chunking text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0) all_splits = text_splitter.split_documents(docs) # Huggingface embeddings embed = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") # LanceDB as vector store table = lanceDBConnection(embed) vectorstore = LanceDB.from_documents( documents=all_splits, embedding=HuggingFaceEmbeddings( model_name="sentence-transformers/all-MiniLM-L6-v2" ), connection=table, ) return vectorstore def retrieverSetup(text): vectorstore = vectorStoreSetup(text) # define ChatOllama: using Qwen model for LLM llm = ChatOllama(model="qwen") memory = ConversationSummaryMemory( llm=llm, memory_key="chat_history", return_messages=True ) retriever = vectorstore.as_retriever() # define Retrieval Chain for retriver qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory) return qa def chat(qa, question): # chat query r = qa.run({"question": question}) return r
[ "lancedb.connect" ]
[((538, 569), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (553, 569), False, 'import lancedb\n'), ((848, 877), 'os.path.exists', 'os.path.exists', (['temp_repo_dir'], {}), '(temp_repo_dir)\n', (862, 877), False, 'import os\n'), ((1205, 1259), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(100)', 'chunk_overlap': '(0)'}), '(chunk_size=100, chunk_overlap=0)\n', (1226, 1259), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1355, 1429), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L6-v2')\n", (1376, 1429), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1871, 1895), 'langchain.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""qwen"""'}), "(model='qwen')\n", (1881, 1895), False, 'from langchain.chat_models import ChatOllama\n'), ((1909, 1996), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True)\n", (1934, 1996), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((2102, 2180), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', (['llm'], {'retriever': 'retriever', 'memory': 'memory'}), '(llm, retriever=retriever, memory=memory)\n', (2139, 2180), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((887, 915), 'shutil.rmtree', 'shutil.rmtree', (['temp_repo_dir'], {}), '(temp_repo_dir)\n', (900, 915), False, 'import shutil\n'), ((1588, 1662), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L6-v2')\n", (1609, 1662), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1680, 1873), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1686, 1873), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2514, 2525), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2518, 2525), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3857, 3885), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3874, 3885), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8492, 8530), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8503, 8530), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8545, 8562), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8555, 8562), False, 'import duckdb\n'), ((9524, 9544), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9539, 9544), False, 'from PIL import Image\n'), ((12169, 12189), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12184, 12189), False, 'from PIL import Image\n'), ((16441, 16460), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16449, 16460), True, 'import numpy as np\n'), ((16545, 16572), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16552, 16572), True, 'from matplotlib import pyplot as plt\n'), ((16622, 16644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16632, 16644), True, 'from matplotlib import pyplot as plt\n'), ((16653, 16672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16663, 16672), True, 'from matplotlib import pyplot as plt\n'), ((16681, 16710), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16690, 16710), True, 'from matplotlib import pyplot as plt\n'), ((16728, 16737), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16735, 16737), False, 'from io import BytesIO\n'), ((16746, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16757, 16779), True, 'from matplotlib import pyplot as plt\n'), ((3318, 3404), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3329, 3404), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3506, 3616), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3517, 3616), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9392, 9424), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9403, 9424), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12036, 12068), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12047, 12068), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13646, 13749), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13657, 13749), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1190, 1201), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1197, 1201), True, 'import numpy as np\n'), ((1255, 1268), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1265, 1268), False, 'import cv2\n'), ((16899, 16917), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16909, 16917), False, 'from PIL import Image\n'), ((18135, 18234), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18147, 18234), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18237, 18252), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18249, 18252), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6717, 6736), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6728, 6736), False, 'import torch\n')]
""" Chatbot for talking to Podcast using Langchain, Ollama and LanceDB """ from langchain.document_loaders import DataFrameLoader import pandas as pd from langchain.memory import ConversationSummaryMemory import lancedb from langchain.vectorstores import LanceDB from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chat_models import ChatOllama from langchain.chains import ConversationalRetrievalChain def lanceDBConnection(dataset): db = lancedb.connect("/tmp/lancedb") table = db.create_table("tb", data=dataset, mode="overwrite") return table def vectorStoreSetup(text, OPENAI_KEY): # OpenAI embeddings embedding = OpenAIEmbeddings(openai_api_key=OPENAI_KEY) emb = embedding.embed_query(text) dataset = [{"vector": emb, "text": text}] # LanceDB as vector store table = lanceDBConnection(dataset) df = pd.DataFrame(dataset) loader = DataFrameLoader(df) data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) vectorstore = LanceDB.from_documents( documents=all_splits, embedding=OpenAIEmbeddings(openai_api_key=OPENAI_KEY), connection=table, ) return vectorstore def retrieverSetup(text, OPENAI_KEY): vectorstore = vectorStoreSetup(text, OPENAI_KEY) # define ChatOllama: by default takes llama2-4bit quantized model llm = ChatOllama() memory = ConversationSummaryMemory( llm=llm, memory_key="chat_history", return_messages=True ) retriever = vectorstore.as_retriever() # define Retrieval Chain for retriver qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory) return qa def chat(qa, question): # chat query r = qa.run({"question": question}) return r
[ "lancedb.connect" ]
[((527, 558), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (542, 558), False, 'import lancedb\n'), ((724, 767), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (740, 767), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((932, 953), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {}), '(dataset)\n', (944, 953), True, 'import pandas as pd\n'), ((967, 986), 'langchain.document_loaders.DataFrameLoader', 'DataFrameLoader', (['df'], {}), '(df)\n', (982, 986), False, 'from langchain.document_loaders import DataFrameLoader\n'), ((1033, 1096), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (1063, 1096), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1514, 1526), 'langchain.chat_models.ChatOllama', 'ChatOllama', ([], {}), '()\n', (1524, 1526), False, 'from langchain.chat_models import ChatOllama\n'), ((1540, 1627), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True)\n", (1565, 1627), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((1733, 1811), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', (['llm'], {'retriever': 'retriever', 'memory': 'memory'}), '(llm, retriever=retriever, memory=memory)\n', (1770, 1811), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((1241, 1284), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (1257, 1284), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import flask import lancedb import openai import langchain from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import LanceDB from langchain.docstore.document import Document from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter import os from flask import Flask, render_template, request, jsonify from flask import Response from flask_cors import CORS app = Flask(__name__) # cors = CORS(app, resources={r"/*": {"origins": "*"}}) # from dotenv import dotenv_values # env_vars = dotenv_values('.env') env_vars = {'OPENAI_API_KEY': 'sk-1234'} uri = "~/.lancedb" db = lancedb.connect(uri) app = Flask(__name__) OPENAI_API_KEY = env_vars['OPENAI_API_KEY'] # Route for "/" for a web-based interface to this micro-service: @app.route('/') def index(): return "Hello, World" @app.after_request def add_cors_headers(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type') response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS') return response @app.route('/test', methods=['POST']) def test(): # Process the request data = request.get_json() print(data) print("TESTING") # Generate the response response_data = {'message': 'Hello, World'} response = jsonify(response_data) return response # Handle OPTIONS request for CORS preflight @app.route('/test', methods=['OPTIONS']) def handle_options(): response = jsonify({'message': 'Preflight request received'}) response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type') response.headers.add('Access-Control-Allow-Methods', 'POST') print('Testing') return response @app.route('/store', methods = ['POST']) def store_embedding(): json_data = request.get_json() text = json_data['raw_text'] metadata = json_data['metadata'] document = Document(page_content=text, metadata=metadata) chunks = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0).split_documents([document]) embeddings = OpenAIEmbeddings() table = db.create_table("text", data=[ {"vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1"} ], mode="overwrite") docsearch = LanceDB.from_documents(chunks, embeddings, connection=table) return "Loaded Document Into Table" @app.route('/retrieve', methods = ['POST']) def retrieve_embedding(): json_data = request.get_json() query = json_data['query'] assert len(openai.Model.list()["data"]) > 0 def embed_func(query): rs = openai.Embedding.create(input=query, engine="text-embedding-ada-002") return [record["embedding"] for record in rs["data"]] query_vector = embed_func([query])[0] table = db.open_table("text") docs = table.search(query_vector).limit(10).to_df() print(docs) return docs if __name__ == '__main__': app.run(debug=True)
[ "lancedb.connect" ]
[((448, 463), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'from flask import Flask, render_template, request, jsonify\n'), ((658, 678), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (673, 678), False, 'import lancedb\n'), ((685, 700), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (690, 700), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1234, 1252), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1250, 1252), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1382, 1404), 'flask.jsonify', 'jsonify', (['response_data'], {}), '(response_data)\n', (1389, 1404), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1549, 1599), 'flask.jsonify', 'jsonify', (["{'message': 'Preflight request received'}"], {}), "({'message': 'Preflight request received'})\n", (1556, 1599), False, 'from flask import Flask, render_template, request, jsonify\n'), ((1921, 1939), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1937, 1939), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2025, 2071), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (2033, 2071), False, 'from langchain.docstore.document import Document\n'), ((2186, 2204), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2202, 2204), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2383, 2443), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['chunks', 'embeddings'], {'connection': 'table'}), '(chunks, embeddings, connection=table)\n', (2405, 2443), False, 'from langchain.vectorstores import LanceDB\n'), ((2576, 2594), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2592, 2594), False, 'from flask import Flask, render_template, request, jsonify\n'), ((2716, 2785), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'query', 'engine': '"""text-embedding-ada-002"""'}), "(input=query, engine='text-embedding-ada-002')\n", (2739, 2785), False, 'import openai\n'), ((2085, 2140), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (2106, 2140), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2640, 2659), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (2657, 2659), False, 'import openai\n')]
import os import sys import shutil import platform import argparse import lancedb import time import uuid import toml import pyarrow as pa import pyarrow.parquet as pq from dotenv import load_dotenv, find_dotenv from pymilvus import ( utility, connections, FieldSchema, CollectionSchema, DataType, Collection, ) from qdrant_client import QdrantClient, models from qdrant_client.http.models import Distance, VectorParams, PointStruct # Constants DEFAULT_CONFIG_FILE = "configs/default.toml" MILVUS_MAX_BATCH_SIZE = 10000 QDRANT_MAX_BATCH_SIZE = 1000 # Import ChromaDB properly if platform.system() == "Linux": __import__('pysqlite3') import sys sys.modules['sqlite3'] = sys.modules.pop('pysqlite3') import chromadb # Load the environment variables load_dotenv(find_dotenv()) def read_toml_file(file_path): with open(file_path, "r") as f: return toml.load(f) def read_parquet_file(file_path): table = pq.read_table(file_path) df = table.to_pandas() lst = df.values.tolist() return lst def create_batches(l, n): # looping till length l for i in range(0, len(l), n): yield l[i:i + n] def init_db_collection(config): if config["database"] == "chroma": if os.path.exists("./chroma_db"): shutil.rmtree("./chroma_db") db = chromadb.PersistentClient(path="./chroma_db") collection = db.get_or_create_collection(config["table"]) elif config["database"] == "lance": if os.path.exists("./lance_db"): shutil.rmtree("./lance_db") db = lancedb.connect("./lance_db") schema = pa.schema( [ pa.field("embedding", pa.list_(pa.float32(), list_size=config["dimension"])), pa.field("token", pa.string()), pa.field("id", pa.int64()), ]) collection = db.create_table(config["table"], schema=schema) elif config["database"] == "milvus": connections.connect("default", host="localhost", port="19530") if utility.has_collection(config["table"]): utility.drop_collection(config["table"]) fields = [ FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True), FieldSchema(name="token", dtype=DataType.VARCHAR, max_length=16384), FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=config["dimension"]) ] schema = CollectionSchema(fields, config["table"]) collection = Collection(config["table"], schema) elif config["database"] == "qdrant": collection = QdrantClient("localhost", port=6333) collection.delete_collection(collection_name=config["table"]) collection.create_collection( collection_name=config["table"], vectors_config=VectorParams(size=config["dimension"], distance=Distance.DOT), optimizers_config=models.OptimizersConfigDiff( indexing_threshold=0, ), ) return collection def init_client(config): if config["database"] == "qdrant": return QdrantClient("localhost", port=6333) def run_query(config, client, vector): if config["database"] == "qdrant": results = client.search( collection_name=config["table"], query_vector=vector, with_vectors=True, with_payload=True, limit=config["top_k"], ) for idx, point in enumerate(results): print(f"[INFO] Score #{idx}: {point.score}") def insert_into_collection_bulk(collection, batch, config): if config["database"] == "milvus": mini_batches = list(create_batches(batch, MILVUS_MAX_BATCH_SIZE)) for b in mini_batches: s = time.time() collection.insert([ # Add an Id to the row (probably uuid.uuid4() would be a good idea) [str(uuid.uuid4()) for _ in b], [row[config["payload_idx"]] for row in b], [list(row[config["embedding_idx"]]) for row in b], ]) print(f"[INFO] Inserted batch of size {len(b)} in {time.time() - s} seconds") elif config["database"] == "chroma": collection.add( ids=[str(idx) for idx, _ in enumerate(batch)], documents=[row[config["payload_idx"]] for row in batch], embeddings=[list(row[config["embedding_idx"]]) for row in batch], ) elif config["database"] == "lance": collection.add([ { "id": idx, "token": row[config["payload_idx"]], "embedding": list(row[config["embedding_idx"]]), } for idx, row in enumerate(batch)]) elif config["database"] == "qdrant": mini_batches = list(create_batches(batch, QDRANT_MAX_BATCH_SIZE)) for b in mini_batches: s = time.time() collection.upsert( collection_name=config["table"], wait=True, points=[ PointStruct( id=str(uuid.uuid4()), payload={"token": row[config["payload_idx"]]}, vector=list(row[config["embedding_idx"]]), ) for row in b ], ) print(f"[INFO] Inserted batch of size {len(b)} in {time.time() - s} seconds") def get_collection_info(collection, config): if config["database"] == "milvus": collection.flush() print(collection.name) print(collection.schema) print(collection.num_entities) elif config["database"] == "chroma": print(collection.count()) elif config["database"] == "lance": print(collection.schema) print(collection.count_rows()) elif config["database"] == "qdrant": print("Collection info") print(collection.get_collection(collection_name=config["table"])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--bench", type=str, default="qdrant-1M-1536", help="The benchmark setup to use") parser.add_argument("--debug", action="store_true", help="Whether to run the script in debug mode") parser.add_argument("--query", action="store_true", help="Whether to run a query on the collection") parser.add_argument("--ingest", action="store_true", help="Whether to ingest the embeddings into the collection") args = parser.parse_args() # Read the default config file config = read_toml_file(DEFAULT_CONFIG_FILE) config = {**config[args.bench]} print("[INFO] Running with config: ") print(config) # Ingest the dataset if args.ingest: total_rows_written = 0 collection = init_db_collection(config) # Read the parquet files if args.debug: file_list = os.listdir(config["dataset"])[:2] else: file_list = os.listdir(config["dataset"])[config["train_start_idx"]:config["train_stop_idx"]] print(f"[INFO] Inserting {len(file_list)} files into collection {config['table']}") # Insert the embeddings into the collection for file in file_list: batch = read_parquet_file(os.path.join(config["dataset"], file)) insert_into_collection_bulk(collection, batch, config) total_rows_written += len(batch) print(f"[INFO] Total rows written: {total_rows_written}") # Print out collection stats after insertion get_collection_info(collection, config) # Query the dataset if args.query: queries_ran = 0 client = init_client(config) file_list = os.listdir(config["dataset"])[config["query_start_idx"]:config["query_stop_idx"]] print(f"[INFO] Running queries from {len(file_list)} files") for file in file_list: for row in read_parquet_file(os.path.join(config["dataset"], file)): vector = row[config["embedding_idx"]] print(f"[INFO] Running query #{queries_ran} for vector: [{vector[0]}, {vector[1]}, {vector[2]}, ...]") run_query(config, client, vector) queries_ran += 1 if queries_ran >= config["queries_to_run"]: break print("[INFO] Done!")
[ "lancedb.connect" ]
[((605, 622), 'platform.system', 'platform.system', ([], {}), '()\n', (620, 622), False, 'import platform\n'), ((707, 735), 'sys.modules.pop', 'sys.modules.pop', (['"""pysqlite3"""'], {}), "('pysqlite3')\n", (722, 735), False, 'import sys\n'), ((798, 811), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (809, 811), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((958, 982), 'pyarrow.parquet.read_table', 'pq.read_table', (['file_path'], {}), '(file_path)\n', (971, 982), True, 'import pyarrow.parquet as pq\n'), ((6025, 6050), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6048, 6050), False, 'import argparse\n'), ((897, 909), 'toml.load', 'toml.load', (['f'], {}), '(f)\n', (906, 909), False, 'import toml\n'), ((1257, 1286), 'os.path.exists', 'os.path.exists', (['"""./chroma_db"""'], {}), "('./chroma_db')\n", (1271, 1286), False, 'import os\n'), ((1342, 1387), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (1367, 1387), False, 'import chromadb\n'), ((3125, 3161), 'qdrant_client.QdrantClient', 'QdrantClient', (['"""localhost"""'], {'port': '(6333)'}), "('localhost', port=6333)\n", (3137, 3161), False, 'from qdrant_client import QdrantClient, models\n'), ((1300, 1328), 'shutil.rmtree', 'shutil.rmtree', (['"""./chroma_db"""'], {}), "('./chroma_db')\n", (1313, 1328), False, 'import shutil\n'), ((1505, 1533), 'os.path.exists', 'os.path.exists', (['"""./lance_db"""'], {}), "('./lance_db')\n", (1519, 1533), False, 'import os\n'), ((1588, 1617), 'lancedb.connect', 'lancedb.connect', (['"""./lance_db"""'], {}), "('./lance_db')\n", (1603, 1617), False, 'import lancedb\n'), ((3785, 3796), 'time.time', 'time.time', ([], {}), '()\n', (3794, 3796), False, 'import time\n'), ((7733, 7762), 'os.listdir', 'os.listdir', (["config['dataset']"], {}), "(config['dataset'])\n", (7743, 7762), False, 'import os\n'), ((1547, 1574), 'shutil.rmtree', 'shutil.rmtree', (['"""./lance_db"""'], {}), "('./lance_db')\n", (1560, 1574), False, 'import shutil\n'), ((1979, 2041), 'pymilvus.connections.connect', 'connections.connect', (['"""default"""'], {'host': '"""localhost"""', 'port': '"""19530"""'}), "('default', host='localhost', port='19530')\n", (1998, 2041), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2053, 2092), 'pymilvus.utility.has_collection', 'utility.has_collection', (["config['table']"], {}), "(config['table'])\n", (2075, 2092), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2459, 2500), 'pymilvus.CollectionSchema', 'CollectionSchema', (['fields', "config['table']"], {}), "(fields, config['table'])\n", (2475, 2500), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2522, 2557), 'pymilvus.Collection', 'Collection', (["config['table']", 'schema'], {}), "(config['table'], schema)\n", (2532, 2557), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((6903, 6932), 'os.listdir', 'os.listdir', (["config['dataset']"], {}), "(config['dataset'])\n", (6913, 6932), False, 'import os\n'), ((6975, 7004), 'os.listdir', 'os.listdir', (["config['dataset']"], {}), "(config['dataset'])\n", (6985, 7004), False, 'import os\n'), ((7272, 7309), 'os.path.join', 'os.path.join', (["config['dataset']", 'file'], {}), "(config['dataset'], file)\n", (7284, 7309), False, 'import os\n'), ((7957, 7994), 'os.path.join', 'os.path.join', (["config['dataset']", 'file'], {}), "(config['dataset'], file)\n", (7969, 7994), False, 'import os\n'), ((2106, 2146), 'pymilvus.utility.drop_collection', 'utility.drop_collection', (["config['table']"], {}), "(config['table'])\n", (2129, 2146), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2178, 2253), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""id"""', 'dtype': 'DataType.INT64', 'is_primary': '(True)', 'auto_id': '(True)'}), "(name='id', dtype=DataType.INT64, is_primary=True, auto_id=True)\n", (2189, 2253), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2267, 2334), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""token"""', 'dtype': 'DataType.VARCHAR', 'max_length': '(16384)'}), "(name='token', dtype=DataType.VARCHAR, max_length=16384)\n", (2278, 2334), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2348, 2436), 'pymilvus.FieldSchema', 'FieldSchema', ([], {'name': '"""embedding"""', 'dtype': 'DataType.FLOAT_VECTOR', 'dim': "config['dimension']"}), "(name='embedding', dtype=DataType.FLOAT_VECTOR, dim=config[\n 'dimension'])\n", (2359, 2436), False, 'from pymilvus import utility, connections, FieldSchema, CollectionSchema, DataType, Collection\n'), ((2620, 2656), 'qdrant_client.QdrantClient', 'QdrantClient', (['"""localhost"""'], {'port': '(6333)'}), "('localhost', port=6333)\n", (2632, 2656), False, 'from qdrant_client import QdrantClient, models\n'), ((1788, 1799), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1797, 1799), True, 'import pyarrow as pa\n'), ((1833, 1843), 'pyarrow.int64', 'pa.int64', ([], {}), '()\n', (1841, 1843), True, 'import pyarrow as pa\n'), ((4911, 4922), 'time.time', 'time.time', ([], {}), '()\n', (4920, 4922), False, 'import time\n'), ((1707, 1719), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1717, 1719), True, 'import pyarrow as pa\n'), ((2837, 2898), 'qdrant_client.http.models.VectorParams', 'VectorParams', ([], {'size': "config['dimension']", 'distance': 'Distance.DOT'}), "(size=config['dimension'], distance=Distance.DOT)\n", (2849, 2898), False, 'from qdrant_client.http.models import Distance, VectorParams, PointStruct\n'), ((2930, 2979), 'qdrant_client.models.OptimizersConfigDiff', 'models.OptimizersConfigDiff', ([], {'indexing_threshold': '(0)'}), '(indexing_threshold=0)\n', (2957, 2979), False, 'from qdrant_client import QdrantClient, models\n'), ((3934, 3946), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3944, 3946), False, 'import uuid\n'), ((4165, 4176), 'time.time', 'time.time', ([], {}), '()\n', (4174, 4176), False, 'import time\n'), ((5404, 5415), 'time.time', 'time.time', ([], {}), '()\n', (5413, 5415), False, 'import time\n'), ((5119, 5131), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5129, 5131), False, 'import uuid\n')]
import pandas as pd from omnivector.abstraction import AbstractDB class LanceDB(AbstractDB): """ LanceDB is a vector database that uses Lance to store and search vectors. """ def __init__(self): super().__init__() def create_index(self): # not sure how to do this in Lance pass def delete(self, ids): import lancedb db = lancedb.connect(self.config["lancedb"]["DB_PATH"]) tbl = db.open_table("my_table") ids = ", ".join(str(v) for v in ids) tbl.delete(f"id IN ({ids})") def add(self, ids, vectors, metadata=None): import lancedb data = pd.DataFrame({"id": ids}) db = lancedb.connect(self.config["lancedb"]["DB_PATH"]) if metadata is not None: meta_df = pd.DataFrame.from_records(metadata) data = pd.concat([data, meta_df], axis=1) data["vector"] = vectors.tolist() try: tbl = db.open_table("my_table") tbl.add(data) except: db.create_table("my_table", data) def vector_search(self, vector, k=3): import lancedb db = lancedb.connect(self.config["lancedb"]["DB_PATH"]) tbl = db.open_table("my_table") return tbl.search(vector).limit(k).to_df()
[ "lancedb.connect" ]
[((388, 438), 'lancedb.connect', 'lancedb.connect', (["self.config['lancedb']['DB_PATH']"], {}), "(self.config['lancedb']['DB_PATH'])\n", (403, 438), False, 'import lancedb\n'), ((650, 675), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': ids}"], {}), "({'id': ids})\n", (662, 675), True, 'import pandas as pd\n'), ((690, 740), 'lancedb.connect', 'lancedb.connect', (["self.config['lancedb']['DB_PATH']"], {}), "(self.config['lancedb']['DB_PATH'])\n", (705, 740), False, 'import lancedb\n'), ((1157, 1207), 'lancedb.connect', 'lancedb.connect', (["self.config['lancedb']['DB_PATH']"], {}), "(self.config['lancedb']['DB_PATH'])\n", (1172, 1207), False, 'import lancedb\n'), ((797, 832), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['metadata'], {}), '(metadata)\n', (822, 832), True, 'import pandas as pd\n'), ((852, 886), 'pandas.concat', 'pd.concat', (['[data, meta_df]'], {'axis': '(1)'}), '([data, meta_df], axis=1)\n', (861, 886), True, 'import pandas as pd\n')]
from typing import Union, List, Optional import pandas as pd from fastapi import FastAPI, BackgroundTasks, Query, WebSocket, WebSocketDisconnect from fastapi.responses import FileResponse import time import os, json, urllib import lancedb from pydantic import BaseModel from . import backend from .utils import get_full_book_name, get_book_abbreviation, embed_batch from .types import Message, RequestModel, TranslationTriplet import requests import logging logger = logging.getLogger('uvicorn') app = FastAPI() bsb_bible_df, macula_df = backend.get_dataframes() # Store these in global state @app.get("/api/python") def read_root(): return {"Hello": "World"} # get bsb verse by ref @app.get("/api/bsb_verses/{full_verse_ref}") def read_item(full_verse_ref: str): """ Get verse from bsb_bible_df (Berean Standard Bible) e.g., http://localhost:3000/api/bsb_verses/GEN%202:19 """ logger.info('debug: ', bsb_bible_df.head()) verse = bsb_bible_df[bsb_bible_df['vref'] == full_verse_ref] entry_number_of_verse = verse.index[0] verse_output = { 'verse_number': int(entry_number_of_verse), 'vref': verse['vref'][entry_number_of_verse], 'content': verse['content'][entry_number_of_verse] } return verse_output # get macula verse by ref @app.get("/api/macula_verses/{full_verse_ref}") def read_macula_verse_item(full_verse_ref: str): """ Get verse from macula_greek_df and macula_hebrew_df e.g., http://localhost:3000/api/macula_verses/GEN%202:19 or NT: http://localhost:3000/api/macula_verses/ROM%202:19 """ logger.info('full_verse_ref', full_verse_ref) verse = macula_df[macula_df['vref'] == full_verse_ref] entry_number_of_verse = verse.index[0] verse_output = { 'verse_number': int(entry_number_of_verse), 'vref': verse['vref'][entry_number_of_verse], 'content': verse['content'][entry_number_of_verse] } return verse_output # get target language data by language code # @app.get("/api/target_vref_data/{language_code}") # def read_target_language_item(language_code: str, drop_empty_verses: bool = False): # """ # Get target language data by language code # e.g., http://localhost:3000/api/target_vref_data/aai # """ # target_vref_data = get_target_vref_df(language_code, drop_empty_verses=drop_empty_verses) # return target_vref_data @app.get("/api/download_triplets") async def download_triplets(target_language_code: str, file_suffix: Optional[str] = None, force: bool=False): print(f'target_language_code: {target_language_code}') filename = f"{target_language_code}_triplets.json" verse_triplets = {} book_list = backend.get_vref_list() for book in book_list: vref_list = backend.get_vref_list(book) # print(f'vref_list: {vref_list[:10]}') for vref in vref_list: verse_triplet = backend.get_verse_triplet(vref, target_language_code, bsb_bible_df, macula_df) # print('verse_triplet', verse_triplet) if verse_triplet is not None: verse_triplets[vref] = verse_triplet print(len(verse_triplets), 'verse triplets') json_str = json.dumps(verse_triplets) # Write the json_str to a file with open(filename, 'w') as f: f.write(json_str) # return FileResponse( # filename, # media_type="application/json", # headers={ # "Content-Disposition": f"attachment; filename={filename}" # } # ) return {"status": "Download started. Check back later for results."} # get a single verse with source text and gloss, bsb english, and target language @app.get("/api/verse/{full_verse_ref}&{language_code}") def get_verse(full_verse_ref: str, language_code: str): return backend.get_verse_triplet(full_verse_ref, language_code, bsb_bible_df, macula_df) @app.get("/api/bible") async def get_bible(language_code: str, file_suffix: Optional[str], force: Optional[bool], background_tasks: BackgroundTasks): """ Get the entire Bible from bsb_bible_df, AND macula_df (greek and hebrew) AND target_vref_data (target language) e.g., http://localhost:3000/api/bible/aai """ filename = f'data/bible/{language_code}{file_suffix}.json' # If the file exists and we're not forcing a reprocess, return it if os.path.exists(filename) and not force: return FileResponse( filename, media_type="application/json", headers={ "Content-Disposition": f"attachment; filename={language_code}.json" } ) # Otherwise, start the long-running process in the background background_tasks.add_task(process_bible, language_code, file_suffix) return {"status": "Processing started. Check back later for results."} async def process_bible(language_code: str, file_suffix: Optional[str] = None): target_vref_df = backend.get_target_vref_df(language_code, file_suffix=file_suffix) output = [] for vref in bsb_bible_df['vref']: bsb_row = bsb_bible_df[bsb_bible_df['vref'] == vref] macula_row = macula_df[macula_df['vref'] == vref] target_row = target_vref_df[target_vref_df['vref'] == vref] output.append({ 'vref': vref, 'bsb': { 'vref': bsb_row['vref'].values[0] if not bsb_row.empty else '', 'content': bsb_row['content'].values[0] if not bsb_row.empty else '' }, 'macula': { 'vref': macula_row['vref'].values[0] if not macula_row.empty else '', 'content': macula_row['content'].values[0] if not macula_row.empty else '' }, 'target': { 'vref': target_row['vref'].values[0] if not target_row.empty else '', 'content': target_row['content'].values[0] if not target_row.empty else '' } }) # Save output to disk as `data/bible/{language_code}.json` if not os.path.exists('data/bible'): os.mkdir('data/bible') with open(f'data/bible/{language_code}{file_suffix}.json', 'w') as f: json.dump(output, f, ensure_ascii=False, indent=4) # Return the file as a download return FileResponse( f'data/bible/{language_code}{file_suffix}.json', media_type="application/json", headers={ "Content-Disposition": f"attachment; filename={language_code}.json" } ) # endpoint to get table info @app.get("/api/db_info") def get_db_info(): output = [] db = lancedb.connect("./lancedb") table = db.open_table('verses').to_pandas() # Get unique languages in the table languages = table['language'].unique() output.append({ 'name': 'verses', 'columns': list(table.columns), 'num_rows': len(table), 'languages': languages.tolist() }) return output @app.get("/api/populate_db") def populate_db(target_language_code: str, file_suffix: str, background_tasks: BackgroundTasks): """ Populate database based on language code (3-char ISO 639-3 code). Pulls data from bsb_bible_df, macula_df, and a target language scraped from the ebible corpus. """ # Check if db exists if os.path.exists('./lancedb'): db = lancedb.connect("./lancedb") try: table = db.open_table('verses').to_pandas() if target_language_code in table['language'].unique(): return {"status": "Language already exists in the database. Please delete the language data and try again."} except: if target_language_code.startswith('init'): # To initialize databases logger.info('Initializing Greek/Hebrew and English vectorstores...') background_tasks.add_task(backend.create_lancedb_table_from_df, bsb_bible_df, 'verses') # load_database loads up the macula and bsb tables by default if they don't exist... Probably should make this less magical in the future return {"status": f"Database initialization started for {target_language_code + file_suffix}... takes about 45 seconds for 10 lines of text and ~300 seconds for the whole Bible, so be patient!"} logger.info('Populating database...') background_tasks.add_task(backend.load_database, target_language_code, file_suffix) return {"status": "Database population started... takes about 45 seconds for 10 lines of text and ~300 seconds for the whole Bible, so be patient!"} @app.get("/api/query/{language_code}/{query}&limit={limit}") def call_query_endpoint(language_code: str, query: str, limit: str = '10'): return backend.query_lancedb_table(language_code, query, limit=limit) # User should be able to submit vref + source language + target language to a /api/translation-prompt-builder/ endpoint @app.get("/api/translation-prompt-builder") def get_translation_prompt(vref: str, target_language_code: str, source_language_code: str='', bsb_bible_df=None, macula_df=None, number_of_examples: int = 3): """Get a forward-translation few-shot prompt for a given vref, source, and target language code.""" # Decode URI vref vref = urllib.parse.unquote(vref) print(f'vref: {vref}') return backend.build_translation_prompt(vref, target_language_code, source_language_code=source_language_code, bsb_bible_df=bsb_bible_df, macula_df=macula_df, number_of_examples=number_of_examples) @app.get("/api/vrefs/?book={book}") def get_vrefs(book: str): """Get a list of vrefs from the ebible corpus.""" return backend.get_vref_list(book) @app.get("/api/vrefs") def get_vrefs(): """Get a list of vrefs from the ebible corpus.""" return backend.get_vref_list() @app.get("/api/unique_tokens") def get_unique_tokens(language_code: str): print(f'language_code: {language_code}') """Get a list of unique tokens from the ebible corpus texts by language code.""" return backend.get_unique_tokens_for_language(language_code) ''' example post body: { "language_code": "aai", "size": 3, "n": 5, "string_filter": [ "Ayu", "Paul", "Jesu", "Keriso", "ana", "akir", "wairafin", "tur", "abarin", "isan", "rubinu", "naatu", "Tur", "Gewasin", "binan", "isan", "God", "eafu", "atit" ] } ''' class NgramRequest(BaseModel): language_code: str size: int = 2 n: int = 100 string_filter: List[str] = [] @app.post("/api/ngrams") def get_ngrams(request: NgramRequest): return backend.get_ngrams(request.language_code, size=request.size, n=request.n, string_filter=request.string_filter) class EvaluateTranslationRequest(BaseModel): verse_triplets: dict[str, TranslationTriplet] hypothesis_vref: str @app.post("/api/evaluate") def evaluate_translation_prompt(request: EvaluateTranslationRequest): verse_triplets = request.verse_triplets hypothesis_vref = request.hypothesis_vref valid_vrefs = backend.get_vref_list(hypothesis_vref.split(' ')[0]) print(f'valid_vrefs: {valid_vrefs[:10]}') if hypothesis_vref not in valid_vrefs: return {"status": f"You submitted vref {hypothesis_vref}, but this vref is not in the ebible corpus. See https://raw.githubusercontent.com/BibleNLP/ebible/main/metadata/vref.txt for valid vrefs."} prediction = backend.execute_discriminator_evaluation(verse_triplets, hypothesis_vref=hypothesis_vref) return {"input_received": verse_triplets, "hypothesis_vref": hypothesis_vref, "prediction": prediction} @app.get("/api/evaluate_test") def evaluate_translation_prompt_test(): verse_triplets = {"ACT 13:47":{"Greek/Hebrew Source":"οὕτως γὰρ ἐντέταλται ἡμῖν ὁ Κύριος Τέθεικά σε εἰς φῶς ἐθνῶν τοῦ εἶναί σε εἰς σωτηρίαν ἕως ἐσχάτου τῆς γῆς.","English Reference":"For this is what the Lord has commanded us: ‘I have made you a light for the Gentiles, to bring salvation to the ends of the earth.’”","Target":"Anayabin Regah ana obaiyunen tur biti iti na’atube eo, ‘Ayu kwa ayasairi Ufun Sabuw hai marakaw isan, saise kwa i boro yawas kwanab kwanatit kwanan tafaram yomanin kwanatit.’"},"ACT 3:20":{"Greek/Hebrew Source":"ὅπως ἂν ἔλθωσιν καιροὶ ἀναψύξεως ἀπὸ προσώπου τοῦ Κυρίου καὶ ἀποστείλῃ τὸν προκεχειρισμένον ὑμῖν Χριστὸν Ἰησοῦν,","English Reference":"that times of refreshing may come from the presence of the Lord, and that He may send Jesus, the Christ, who has been appointed for you.","Target":"Nati namamatar ana veya, imaibo ayub ana fair bain baiboubun isan boro Regah wanawananamaim nan biya natit. Jesu, i ana Roubinineyan orot marasika kwa isa rurubin boro niyafar."},"LAM 2:13":{"Greek/Hebrew Source":"מָ֣ה אֲדַמֶּה־ לָּ֗ךְ הַבַּת֙ יְר֣וּשָׁלִַ֔ם מָ֤ה אַשְׁוֶה־ לָּךְ֙ וַאֲנַֽחֲמֵ֔ךְ בְּתוּלַ֖ת בַּת־ צִיּ֑וֹן כִּֽי־ גָד֥וֹל כַּיָּ֛ם שִׁבְרֵ֖ךְ מִ֥י יִרְפָּא־ לָֽךְ׃ס","English Reference":"What can I say for you? To what can I compare you, O Daughter of Jerusalem? To what can I liken you, that I may console you, O Virgin Daughter of Zion? For your wound is as deep as the sea. Who can ever heal you?","Target":""},"ROM 1:8":{"Greek/Hebrew Source":"Πρῶτον μὲν εὐχαριστῶ τῷ Θεῷ μου διὰ Ἰησοῦ Χριστοῦ περὶ πάντων ὑμῶν, ὅτι ἡ πίστις ὑμῶν καταγγέλλεται ἐν ὅλῳ τῷ κόσμῳ.","English Reference":"First, I thank my God through Jesus Christ for all of you, because your faith is being proclaimed all over the world.","Target":"This is the hypothesized verse translation."}} verse_triplets: dict[str, TranslationTriplet] = { k: TranslationTriplet(**v) for k, v in verse_triplets.items() } # return {"status": "Evaluation prompted", "input_received": verse_triplets, "hypothesis_vref": None, "hypothesis_key": None} return backend.execute_discriminator_evaluation(verse_triplets, hypothesis_vref='ROM 1:8') # @app.websocket("/api/test_feedback_loop") # async def test_feedback_loop(websocket: WebSocket, vref: str = Query(...), target_language_code: str = Query(...), source_language_code: str = Query(None)): # await websocket.accept() # feedback_loop = backend.AILoop( # iterations=10, # function_a=lambda: backend.execute_fewshot_translation(vref, target_language_code, source_language_code), # function_b=backend.execute_discriminator_evaluation, # ) # for result in feedback_loop: # await websocket.send_json(result) # await websocket.close() @app.get('/api/translate') def forward_translation_request(vref: str, target_language_code: str): translation = backend.Translation(vref, target_language_code=target_language_code) return str({'hypothesis': translation.get_hypothesis(), 'feedback': translation.get_feedback()}) import random @app.get('/api/get_alignment') def get_available_alignment(language_code=None, n=10): # return ../data/alignments/test_spanish.jsonl code = language_code if language_code else 'test_spanish' with open(f'data/alignments/{code}.jsonl', 'r') as f: # note that the data is jsonl, so we need to read it line by line data = f.readlines() # Check if n is greater than the length of data n = min(int(n), len(data)) # Randomly sample n lines from the data random_indexes = random.sample(range(len(data)), n) data = [data[i] for i in random_indexes] # Map over each line and make restructure to match interface expected by frontend restructured_data = [] for raw_line in data: line = json.loads(raw_line) restructured_alignments = [] for alignment in line['alignments']: keys = list(alignment.keys()) source_key = next((key for key in keys if 'Greek' in key or 'Hebrew' in key), None) bridge_key = next((key for key in keys if 'English' in key), None) target_key = next((key for key in keys if key != source_key and key != bridge_key), None) if source_key and bridge_key and target_key: restructured_alignments.append({ 'source': alignment[source_key], 'bridge': alignment[bridge_key], 'target': alignment[target_key] }) restructured_data.append({ 'vref': line['vref'], 'alignments': restructured_alignments }) return restructured_data @app.get('/api/all_alignment_files') def get_all_alignment_files(): alignment_files = [] def find_jsonl_files(directory): for root, dirs, files in os.walk(directory): for file in files: if file.endswith('.jsonl'): alignment_files.append(os.path.join(root, file)) find_jsonl_files('data/alignments') return alignment_files
[ "lancedb.connect" ]
[((468, 496), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (485, 496), False, 'import logging\n'), ((504, 513), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (511, 513), False, 'from fastapi import FastAPI, BackgroundTasks, Query, WebSocket, WebSocketDisconnect\n'), ((3201, 3227), 'json.dumps', 'json.dumps', (['verse_triplets'], {}), '(verse_triplets)\n', (3211, 3227), False, 'import os, json, urllib\n'), ((6321, 6501), 'fastapi.responses.FileResponse', 'FileResponse', (['f"""data/bible/{language_code}{file_suffix}.json"""'], {'media_type': '"""application/json"""', 'headers': "{'Content-Disposition': f'attachment; filename={language_code}.json'}"}), "(f'data/bible/{language_code}{file_suffix}.json', media_type=\n 'application/json', headers={'Content-Disposition':\n f'attachment; filename={language_code}.json'})\n", (6333, 6501), False, 'from fastapi.responses import FileResponse\n'), ((6649, 6677), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (6664, 6677), False, 'import lancedb\n'), ((7356, 7383), 'os.path.exists', 'os.path.exists', (['"""./lancedb"""'], {}), "('./lancedb')\n", (7370, 7383), False, 'import os, json, urllib\n'), ((9292, 9318), 'urllib.parse.unquote', 'urllib.parse.unquote', (['vref'], {}), '(vref)\n', (9312, 9318), False, 'import os, json, urllib\n'), ((4386, 4410), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (4400, 4410), False, 'import os, json, urllib\n'), ((4441, 4578), 'fastapi.responses.FileResponse', 'FileResponse', (['filename'], {'media_type': '"""application/json"""', 'headers': "{'Content-Disposition': f'attachment; filename={language_code}.json'}"}), "(filename, media_type='application/json', headers={\n 'Content-Disposition': f'attachment; filename={language_code}.json'})\n", (4453, 4578), False, 'from fastapi.responses import FileResponse\n'), ((6066, 6094), 'os.path.exists', 'os.path.exists', (['"""data/bible"""'], {}), "('data/bible')\n", (6080, 6094), False, 'import os, json, urllib\n'), ((6104, 6126), 'os.mkdir', 'os.mkdir', (['"""data/bible"""'], {}), "('data/bible')\n", (6112, 6126), False, 'import os, json, urllib\n'), ((6218, 6268), 'json.dump', 'json.dump', (['output', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(output, f, ensure_ascii=False, indent=4)\n', (6227, 6268), False, 'import os, json, urllib\n'), ((7398, 7426), 'lancedb.connect', 'lancedb.connect', (['"""./lancedb"""'], {}), "('./lancedb')\n", (7413, 7426), False, 'import lancedb\n'), ((16777, 16795), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (16784, 16795), False, 'import os, json, urllib\n'), ((15664, 15684), 'json.loads', 'json.loads', (['raw_line'], {}), '(raw_line)\n', (15674, 15684), False, 'import os, json, urllib\n'), ((16915, 16939), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (16927, 16939), False, 'import os, json, urllib\n')]
from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from matplotlib import pyplot as plt from pandas import DataFrame from PIL import Image from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) # NOTE: Load the image directly without any resize operations. def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i', returns (im, resized hw).""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f'Image Not Found {f}') h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): return Format( bbox_format='xyxy', normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__(self, data: Union[str, Path] = 'coco128.yaml', model: str = 'yolov8n.pt', uri: str = '~/ultralytics/explorer') -> None: checks.check_requirements(['lancedb>=0.4.3', 'duckdb']) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + '_' + model.lower() self.sim_idx_base_name = f'{self.table_name}_sim_idx'.lower( ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = 'train') -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info('Table already exists. Reusing it. Pass force=True to overwrite it.') return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.') self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError('Data must be provided to create embeddings table') data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f'Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}' ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch['im_file'], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode='overwrite') table.add( self._yield_batches(dataset, data_info, self.model, exclude_keys=['img', 'ratio_pad', 'resized_shape', 'ori_shape', 'batch_idx'])) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): # Implement Batching for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch['vector'] = model.embed(batch['im_file'], verbose=False)[0].detach().tolist() yield [batch] def query(self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError('Table is not created. Please create the table first.') if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f'img must be a string or a list of strings. Got {type(imgs)}' embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query(self, query: str, return_type: str = 'pandas') -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in ['pandas', 'arrow'], f'Return type should be either `pandas` or `arrow`, but got {return_type}' import duckdb if self.table is None: raise ValueError('Table is not created. Please create the table first.') # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith('SELECT') and not query.startswith('WHERE'): raise ValueError( f'Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}' ) if query.startswith('WHERE'): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f'Running query: {query}') rs = duckdb.sql(query) if return_type == 'pandas': return rs.df() elif return_type == 'arrow': return rs.arrow() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: PIL Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type='arrow') if len(result) == 0: LOGGER.info('No results found.') return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar(self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = 'pandas') -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: A table or pandas dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in ['pandas', 'arrow'], f'Return type should be either `pandas` or `arrow`, but got {return_type}' img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == 'pandas': return similar.to_pandas() elif return_type == 'arrow': return similar def plot_similar(self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: PIL Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type='arrow') if len(similar) == 0: LOGGER.info('No results found.') return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: A pandas dataframe containing the similarity index. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError('Table is not created. Please create the table first.') sim_idx_table_name = f'{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}'.lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info('Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.') return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f'top_k must be between 0.0 and 1.0. Got {top_k}') if max_dist < 0.0: raise ValueError(f'max_dist must be greater than 0. Got {max_dist}') top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=['vector', 'im_file']).to_pydict() im_files = features['im_file'] embeddings = features['vector'] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode='overwrite') def _yield_sim_idx(): for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f'_distance <= {max_dist}') yield [{ 'idx': i, 'im_file': im_files[i], 'count': len(sim_idx), 'sim_im_files': sim_idx['im_file'].tolist()}] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: PIL.PngImagePlugin.PngImageFile containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx['count'].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel('data idx') plt.ylabel('Count') plt.title('Similarity Count') buffer = BytesIO() plt.savefig(buffer, format='png') buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs(self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]) -> List[np.ndarray]: if img is None and idx is None: raise ValueError('Either img or idx must be provided.') if img is not None and idx is not None: raise ValueError('Only one of img or idx must be provided.') if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=['im_file']).to_pydict()['im_file'] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: Answer from AI. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error('AI generated query is not valid. Please try again with a different prompt') LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. Args: result (arrow table): Arrow table containing the results of a query. """ # TODO: pass def generate_report(self, result): """Generate a report of the dataset.""" pass
[ "lancedb.connect" ]
[((1642, 1835), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1648, 1835), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2499, 2510), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2503, 2510), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3842, 3870), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3859, 3870), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8431, 8469), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8442, 8469), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8484, 8501), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8494, 8501), False, 'import duckdb\n'), ((9454, 9474), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9469, 9474), False, 'from PIL import Image\n'), ((12152, 12172), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12167, 12172), False, 'from PIL import Image\n'), ((16115, 16134), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16123, 16134), True, 'import numpy as np\n'), ((16219, 16246), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16226, 16246), True, 'from matplotlib import pyplot as plt\n'), ((16296, 16318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16306, 16318), True, 'from matplotlib import pyplot as plt\n'), ((16327, 16346), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16337, 16346), True, 'from matplotlib import pyplot as plt\n'), ((16355, 16384), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16364, 16384), True, 'from matplotlib import pyplot as plt\n'), ((16402, 16411), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16409, 16411), False, 'from io import BytesIO\n'), ((16420, 16453), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16431, 16453), True, 'from matplotlib import pyplot as plt\n'), ((3303, 3389), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3314, 3389), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3491, 3601), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3502, 3601), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9322, 9354), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9333, 9354), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12019, 12051), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12030, 12051), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13473, 13576), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13484, 13576), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1222, 1233), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1229, 1233), True, 'import numpy as np\n'), ((1287, 1300), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1297, 1300), False, 'import cv2\n'), ((16573, 16591), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16583, 16591), False, 'from PIL import Image\n'), ((17761, 17860), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (17773, 17860), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((17863, 17878), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (17875, 17878), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6655, 6674), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6666, 6674), False, 'import torch\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text or "", # ensure text is a string id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2782, 2802), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2797, 2802), False, 'import lancedb\n'), ((1179, 1208), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1185, 1208), True, 'import numpy as np\n'), ((3214, 3293), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3235, 3293), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1089, 1113), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1095, 1113), True, 'import numpy as np\n'), ((5548, 5584), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5563, 5584), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2270, 2332), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2295, 2332), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2383, 2403), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2398, 2403), False, 'import lancedb\n'), ((2654, 2665), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2658, 2665), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3997, 4025), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (4014, 4025), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8632, 8670), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8643, 8670), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8685, 8702), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8695, 8702), False, 'import duckdb\n'), ((9664, 9684), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9679, 9684), False, 'from PIL import Image\n'), ((12309, 12329), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12324, 12329), False, 'from PIL import Image\n'), ((16581, 16600), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16589, 16600), True, 'import numpy as np\n'), ((16685, 16712), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16692, 16712), True, 'from matplotlib import pyplot as plt\n'), ((16762, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16772, 16784), True, 'from matplotlib import pyplot as plt\n'), ((16793, 16812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16803, 16812), True, 'from matplotlib import pyplot as plt\n'), ((16821, 16850), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16830, 16850), True, 'from matplotlib import pyplot as plt\n'), ((16868, 16877), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16875, 16877), False, 'from io import BytesIO\n'), ((16886, 16919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16897, 16919), True, 'from matplotlib import pyplot as plt\n'), ((3458, 3544), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3469, 3544), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3646, 3756), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3657, 3756), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9532, 9564), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9543, 9564), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12176, 12208), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12187, 12208), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13786, 13889), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13797, 13889), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((17039, 17057), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17049, 17057), False, 'from PIL import Image\n'), ((18275, 18374), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18287, 18374), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18377, 18392), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18389, 18392), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2430, 2440), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2434, 2440), False, 'from pathlib import Path\n'), ((6857, 6876), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6868, 6876), False, 'import torch\n')]
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.core.vector_stores.types import ( MetadataFilters, BasePydanticVectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.core.vector_stores.utils import ( DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) from pandas import DataFrame import lancedb _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(BasePydanticVectorStore): """ The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". vector_column_name (str, optional): The vector column name in the table if different from default. Defaults to "vector", in keeping with lancedb convention. nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True _connection: Any = PrivateAttr() uri: Optional[str] table_name: Optional[str] vector_column_name: Optional[str] nprobes: Optional[int] refine_factor: Optional[int] text_key: Optional[str] doc_id_key: Optional[str] def __init__( self, uri: Optional[str], table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> None: """Init params.""" self._connection = lancedb.connect(uri) super().__init__( uri=uri, table_name=table_name, vector_column_name=vector_column_name, nprobes=nprobes, refine_factor=refine_factor, text_key=text_key, doc_id_key=doc_id_key, **kwargs, ) @property def client(self) -> None: """Get client.""" return self._connection @classmethod def from_params( cls, uri: Optional[str], table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> "LanceDBVectorStore": """Create instance from params.""" _connection_ = cls._connection return cls( _connection=_connection_, uri=uri, table_name=table_name, vector_column_name=vector_column_name, nprobes=nprobes, refine_factor=refine_factor, text_key=text_key, doc_id_key=doc_id_key, **kwargs, ) def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: if not nodes: _logger.debug("No nodes to add. Skipping the database operation.") return [] data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self._connection.table_names(): tbl = self._connection.open_table(self.table_name) tbl.add(data) else: self._connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self._connection.open_table(self.table_name) table.delete('doc_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self._connection.open_table(self.table_name) lance_query = ( table.search( query=query.query_embedding, vector_column_name=self.vector_column_name, ) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_pandas() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item[self.vector_column_name]) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) if "metadata" in item: metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) else: metadata, node_info = {}, {} node = TextNode( text=item[self.text_key] or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=item[self.doc_id_key] ), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((685, 712), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (702, 712), False, 'import logging\n'), ((2814, 2827), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2825, 2827), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((3431, 3451), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3446, 3451), False, 'import lancedb\n'), ((1449, 1478), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1455, 1478), True, 'import numpy as np\n'), ((4960, 5045), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (4981, 5045), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1359, 1383), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1365, 1383), True, 'import numpy as np\n'), ((7233, 7269), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (7254, 7269), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7658, 7725), 'llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (7686, 7725), False, 'from llama_index.core.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((8211, 8257), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (8226, 8257), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
from openai import OpenAI import streamlit as st import os from trubrics import Trubrics import lancedb from langchain_community.vectorstores import LanceDB from langchain_openai import OpenAIEmbeddings from langchain_openai import ChatOpenAI from langchain_community.callbacks import TrubricsCallbackHandler import os import time os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"] os.environ["TRUBRICS_EMAIL"] = st.secrets["TRUBRICS_EMAIL"] os.environ["TRUBRICS_PASSWORD"] = st.secrets["TRUBRICS_PASSWORD"] db = lancedb.connect("/mnt/d/LLM-Project/my-app/lancedb_meta_data") table = db.open_table("EIC_archive") embeddings = OpenAIEmbeddings() vectorstore = LanceDB(connection = table, embedding = embeddings) retriever = vectorstore.as_retriever(search_type = "similarity", search_kwargs={"k" : 100}) with st.sidebar: with st.form("User Name"): st.info("By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods") name = st.text_input("What's your name?") submitted = st.form_submit_button("Submit and start") if submitted: for key in st.session_state: del st.session_state[key] st.session_state["user_name"] = name if "user_name" not in st.session_state: st.stop() llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", temperature=0, callbacks=[ TrubricsCallbackHandler( project="EIC-RAG-TestRun", tags = ["EIC-RAG-TestRun"], user_id = st.session_state["user_name"], ) ], max_tokens=4096) from langchain.schema import StrOutputParser from langchain.schema.runnable import RunnablePassthrough def format_docs(docs): return f"\n\n".join(f'{i+1}. ' + doc.page_content.strip("\n") + f"<ARXIV_ID> {doc.metadata['arxiv_id']} <ARXIV_ID/>" for i, doc in enumerate(docs)) from langchain.prompts import PromptTemplate response = """\ You are an expert in providing up to date information about the Electron Ion Collider (EIC), tasked with answering any question. You greet people when greeted. \ about EIC based only on the provided context. You shall strictly not answer questions anything other than EIC related questions. \ Refrain any other topics by saying you will not answer questions about them and Exit right away here. DO NOT PROCEED. \ You are not allowed to use any other sources other than the provided search results. \ Generate a comprehensive, and informative answer strictly within 200 words or less for the \ given question based solely on the provided search results (URL and content). You must \ only use information from the provided search results. Use an unbiased and \ journalistic tone. Combine search results together into a coherent answer. Do not \ repeat text. You should use bullet points in your answer for readability. Make sure to break down your answer into bullet points.\ You should not hallicunate nor build up any references, Use only the `context` html block below and do not use any text within <ARXIV_ID> and </ARXIV_ID> except when citing in the end. Make sure not to repeat the same context. Be specific to the exact question asked for.\ Here is the response template: --- # Response template - Start with a greeting and a summary of the user's query - Use bullet points to list the main points or facts that answer the query using the information within the tags <context> and <context/>. - After answering, analyze the respective source links provided within <ARXIV_ID> and </ARXIV_ID> and keep only the unique links for the next step. Try to minimize the total number of unique links with no more than 10 unique links for the answer. - You will strictly use no more than 10 most unique links for the answer. - Use bulleted list of superscript numbers within square brackets to cite the sources for each point or fact. The numbers should correspond to the order of the sources which will be provided in the end of this reponse. Note that for every source, you must provide a URL. - End with a closing remark and a list of sources with their respective URLs as a bullet list explicitly with full links which are enclosed in the tag <ARXIV_ID> and </ARXIV_ID> respectively.\ --- Here is how an response would look like. Reproduce the same format for your response: --- # Example response Hello, thank you for your question about Retrieval Augmented Generation. Here are some key points about RAG: - Retrieval Augmented Generation is a technique that combines the strengths of pre-trained language models and information retrieval systems to generate responses or content by leveraging external knowledge[^1^] [^2^] - RAG can be useful when the pre-trained language model alone may not have the necessary information to generate accurate or sufficiently detailed responses, since standard language models like GPT-4 are not capable of accessing real-time or post-training external information directly[^1^] [^3^] - RAG uses a vector database such as Milvus to index and retrieve relevant documents or text snippets from a knowledge source, and provides them as additional context for the language model[^4^] [^5^] - RAG can benefit from adding citations to the generated outputs, as it can improve their factual correctness, verifiability, and trustworthiness[^6^] [^7^] I hope this helps you understand more about RAG. ## Sources * [^1^][1]: http://arxiv.org/abs/2308.03393v1 * [^2^][2]: http://arxiv.org/abs/2308.03393v1 * [^3^][3]: http://arxiv.org/abs/2307.08593v1 * [^4^][4]: http://arxiv.org/abs/2202.05981v2 * [^5^][5]: http://arxiv.org/abs/2210.09287v1 * [^6^][6]: http://arxiv.org/abs/2242.05981v2 * [^7^][7]: http://arxiv.org/abs/2348.05293v1 --- Where each of the references are taken from the corresponding <ARXIV_ID> in the context. Strictly do not provide title for the references \ Strictly do not repeat the same links. Use the numbers to cite the sources. \ If there is nothing in the context relevant to the question at hand, just say "Hmm, \ I'm not sure." or greet back. Don't try to make up an answer. Write the answer in the form of markdown bullet points.\ Make sure to highlight the most important key words in bold font. Dot repeat any context nor points in the answer.\ Anything between the following `context` html blocks is retrieved from a knowledge \ bank, not part of the conversation with the user. The context are numbered based on its knowledge retrival and increasing cosine similarity index. \ Make sure to consider the order in which they appear context appear. It is an increasing order of cosine similarity index.\ The contents are formatted in latex, you need to remove any special characters and latex formatting before cohercing the points to build your answer.\ Write your answer in the form of markdown bullet points. You can use latex commands if necessary. You will strictly cite no more than 10 unqiue citations at maximum from the context below.\ Make sure these citations have to be relavant and strictly do not repeat the context in the answer. <context> {context} <context/> REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \ not sure." or greet back. Don't try to make up an answer. Anything between the preceding 'context' \ html blocks is retrieved from a knowledge bank, not part of the conversation with the \ user.\ Question: {question} """ rag_prompt_custom = PromptTemplate.from_template(response) from operator import itemgetter from langchain.schema.runnable import RunnableMap rag_chain_from_docs = ( { "context": lambda input: format_docs(input["documents"]), "question": itemgetter("question"), } | rag_prompt_custom | llm | StrOutputParser() ) rag_chain_with_source = RunnableMap( {"documents": retriever, "question": RunnablePassthrough()} ) | { "answer": rag_chain_from_docs, } st.warning("This project is being continuously developed. Please report any feedback to ai4eic@gmail.com") col1, col2 = st.columns(2) with col1: st.image("https://indico.bnl.gov/event/19560/logo-410523303.png") with col2: st.title("AI4EIC Agent") st.sidebar.title("Data Collection") if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-3.5-turbo" if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("What is up?"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): full_response = "" allchunks = None with st.spinner("Gathering info from Knowledge Bank and writing response..."): allchunks = rag_chain_with_source.stream(prompt) message_placeholder = st.empty() for chunk in allchunks: full_response += (chunk.get("answer") or "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.messages.append({"role": "assistant", "content": full_response})
[ "lancedb.connect" ]
[((526, 588), 'lancedb.connect', 'lancedb.connect', (['"""/mnt/d/LLM-Project/my-app/lancedb_meta_data"""'], {}), "('/mnt/d/LLM-Project/my-app/lancedb_meta_data')\n", (541, 588), False, 'import lancedb\n'), ((639, 657), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (655, 657), False, 'from langchain_openai import OpenAIEmbeddings\n'), ((672, 719), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (679, 719), False, 'from langchain_community.vectorstores import LanceDB\n'), ((7605, 7643), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['response'], {}), '(response)\n', (7633, 7643), False, 'from langchain.prompts import PromptTemplate\n'), ((8080, 8196), 'streamlit.warning', 'st.warning', (['"""This project is being continuously developed. Please report any feedback to ai4eic@gmail.com"""'], {}), "(\n 'This project is being continuously developed. Please report any feedback to ai4eic@gmail.com'\n )\n", (8090, 8196), True, 'import streamlit as st\n'), ((8201, 8214), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (8211, 8214), True, 'import streamlit as st\n'), ((8337, 8372), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Data Collection"""'], {}), "('Data Collection')\n", (8353, 8372), True, 'import streamlit as st\n'), ((1329, 1338), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1336, 1338), True, 'import streamlit as st\n'), ((7915, 7932), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (7930, 7932), False, 'from langchain.schema import StrOutputParser\n'), ((8230, 8295), 'streamlit.image', 'st.image', (['"""https://indico.bnl.gov/event/19560/logo-410523303.png"""'], {}), "('https://indico.bnl.gov/event/19560/logo-410523303.png')\n", (8238, 8295), True, 'import streamlit as st\n'), ((8311, 8335), 'streamlit.title', 'st.title', (['"""AI4EIC Agent"""'], {}), "('AI4EIC Agent')\n", (8319, 8335), True, 'import streamlit as st\n'), ((8687, 8715), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (8700, 8715), True, 'import streamlit as st\n'), ((8721, 8790), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (8753, 8790), True, 'import streamlit as st\n'), ((9361, 9446), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (9393, 9446), True, 'import streamlit as st\n'), ((844, 864), 'streamlit.form', 'st.form', (['"""User Name"""'], {}), "('User Name')\n", (851, 864), True, 'import streamlit as st\n'), ((874, 1027), 'streamlit.info', 'st.info', (['"""By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods"""'], {}), "(\n 'By providing you name, you agree that all the prompts and responses will be recorded and will be used to further improve RAG methods'\n )\n", (881, 1027), True, 'import streamlit as st\n'), ((1033, 1067), 'streamlit.text_input', 'st.text_input', (['"""What\'s your name?"""'], {}), '("What\'s your name?")\n', (1046, 1067), True, 'import streamlit as st\n'), ((1088, 1129), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit and start"""'], {}), "('Submit and start')\n", (1109, 1129), True, 'import streamlit as st\n'), ((8599, 8631), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (8614, 8631), True, 'import streamlit as st\n'), ((8641, 8672), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (8652, 8672), True, 'import streamlit as st\n'), ((8800, 8823), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (8815, 8823), True, 'import streamlit as st\n'), ((8833, 8852), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (8844, 8852), True, 'import streamlit as st\n'), ((8863, 8891), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8878, 8891), True, 'import streamlit as st\n'), ((1455, 1574), 'langchain_community.callbacks.TrubricsCallbackHandler', 'TrubricsCallbackHandler', ([], {'project': '"""EIC-RAG-TestRun"""', 'tags': "['EIC-RAG-TestRun']", 'user_id': "st.session_state['user_name']"}), "(project='EIC-RAG-TestRun', tags=['EIC-RAG-TestRun'],\n user_id=st.session_state['user_name'])\n", (1478, 1574), False, 'from langchain_community.callbacks import TrubricsCallbackHandler\n'), ((8013, 8034), 'langchain.schema.runnable.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (8032, 8034), False, 'from langchain.schema.runnable import RunnablePassthrough\n'), ((8958, 9030), 'streamlit.spinner', 'st.spinner', (['"""Gathering info from Knowledge Bank and writing response..."""'], {}), "('Gathering info from Knowledge Bank and writing response...')\n", (8968, 9030), True, 'import streamlit as st\n'), ((9127, 9137), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (9135, 9137), True, 'import streamlit as st\n'), ((7845, 7867), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (7855, 7867), False, 'from operator import itemgetter\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2183, 2238), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2208, 2238), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2289, 2309), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2304, 2309), False, 'import lancedb\n'), ((2560, 2571), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2564, 2571), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3903, 3931), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3920, 3931), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8538, 8576), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8549, 8576), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8591, 8608), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8601, 8608), False, 'import duckdb\n'), ((9570, 9590), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9585, 9590), False, 'from PIL import Image\n'), ((12215, 12235), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12230, 12235), False, 'from PIL import Image\n'), ((16487, 16506), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16495, 16506), True, 'import numpy as np\n'), ((16591, 16618), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16598, 16618), True, 'from matplotlib import pyplot as plt\n'), ((16668, 16690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16678, 16690), True, 'from matplotlib import pyplot as plt\n'), ((16699, 16718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16709, 16718), True, 'from matplotlib import pyplot as plt\n'), ((16727, 16756), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16736, 16756), True, 'from matplotlib import pyplot as plt\n'), ((16774, 16783), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16781, 16783), False, 'from io import BytesIO\n'), ((16792, 16825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16803, 16825), True, 'from matplotlib import pyplot as plt\n'), ((3364, 3450), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3375, 3450), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3552, 3662), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3563, 3662), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9438, 9470), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9449, 9470), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12082, 12114), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12093, 12114), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13692, 13795), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13703, 13795), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((16945, 16963), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16955, 16963), False, 'from PIL import Image\n'), ((18181, 18280), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18193, 18280), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18283, 18298), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18295, 18298), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2336, 2346), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2340, 2346), False, 'from pathlib import Path\n'), ((6763, 6782), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6774, 6782), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2183, 2238), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2208, 2238), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2289, 2309), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2304, 2309), False, 'import lancedb\n'), ((2560, 2571), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2564, 2571), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3903, 3931), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3920, 3931), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8538, 8576), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8549, 8576), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8591, 8608), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8601, 8608), False, 'import duckdb\n'), ((9570, 9590), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9585, 9590), False, 'from PIL import Image\n'), ((12215, 12235), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12230, 12235), False, 'from PIL import Image\n'), ((16487, 16506), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16495, 16506), True, 'import numpy as np\n'), ((16591, 16618), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16598, 16618), True, 'from matplotlib import pyplot as plt\n'), ((16668, 16690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16678, 16690), True, 'from matplotlib import pyplot as plt\n'), ((16699, 16718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16709, 16718), True, 'from matplotlib import pyplot as plt\n'), ((16727, 16756), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16736, 16756), True, 'from matplotlib import pyplot as plt\n'), ((16774, 16783), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16781, 16783), False, 'from io import BytesIO\n'), ((16792, 16825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16803, 16825), True, 'from matplotlib import pyplot as plt\n'), ((3364, 3450), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3375, 3450), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3552, 3662), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3563, 3662), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9438, 9470), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9449, 9470), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12082, 12114), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12093, 12114), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13692, 13795), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13703, 13795), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((16945, 16963), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16955, 16963), False, 'from PIL import Image\n'), ((18181, 18280), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18193, 18280), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18283, 18298), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18295, 18298), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2336, 2346), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2340, 2346), False, 'from pathlib import Path\n'), ((6763, 6782), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6774, 6782), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1697, 1890), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1703, 1890), False, 'from ultralytics.data.augment import Format\n'), ((2183, 2238), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2208, 2238), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2289, 2309), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2304, 2309), False, 'import lancedb\n'), ((2560, 2571), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2564, 2571), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3903, 3931), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3920, 3931), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8538, 8576), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8549, 8576), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8591, 8608), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8601, 8608), False, 'import duckdb\n'), ((9570, 9590), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9585, 9590), False, 'from PIL import Image\n'), ((12215, 12235), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12230, 12235), False, 'from PIL import Image\n'), ((16487, 16506), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16495, 16506), True, 'import numpy as np\n'), ((16591, 16618), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16598, 16618), True, 'from matplotlib import pyplot as plt\n'), ((16668, 16690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16678, 16690), True, 'from matplotlib import pyplot as plt\n'), ((16699, 16718), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16709, 16718), True, 'from matplotlib import pyplot as plt\n'), ((16727, 16756), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16736, 16756), True, 'from matplotlib import pyplot as plt\n'), ((16774, 16783), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16781, 16783), False, 'from io import BytesIO\n'), ((16792, 16825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16803, 16825), True, 'from matplotlib import pyplot as plt\n'), ((3364, 3450), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3375, 3450), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3552, 3662), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3563, 3662), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9438, 9470), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9449, 9470), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12082, 12114), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12093, 12114), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13692, 13795), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13703, 13795), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1207, 1218), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1214, 1218), True, 'import numpy as np\n'), ((1272, 1285), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1282, 1285), False, 'import cv2\n'), ((16945, 16963), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16955, 16963), False, 'from PIL import Image\n'), ((18181, 18280), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18193, 18280), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18283, 18298), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18295, 18298), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2336, 2346), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2340, 2346), False, 'from pathlib import Path\n'), ((6763, 6782), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6774, 6782), False, 'import torch\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.data_structs.node import DocumentRelationship, Node from llama_index.vector_stores.types import ( NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb # noqa: F401 except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return None def add( self, embedding_results: List[NodeWithEmbedding], ) -> List[str]: data = [] ids = [] for result in embedding_results: data.append( { "id": result.id, "doc_id": result.ref_doc_id, "vector": result.embedding, "text": result.node.get_text(), } ) ids.append(result.id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ raise NotImplementedError("Delete not yet implemented for LanceDB.") def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: raise ValueError("Metadata filters not implemented for LanceDB yet.") table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = Node( doc_id=item.id, text=item.text, relationships={ DocumentRelationship.SOURCE: item.doc_id, }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=results["score"].tolist(), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((1711, 1731), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1726, 1731), False, 'import lancedb\n'), ((3712, 3811), 'llama_index.data_structs.node.Node', 'Node', ([], {'doc_id': 'item.id', 'text': 'item.text', 'relationships': '{DocumentRelationship.SOURCE: item.doc_id}'}), '(doc_id=item.id, text=item.text, relationships={DocumentRelationship.\n SOURCE: item.doc_id})\n', (3716, 3811), False, 'from llama_index.data_structs.node import DocumentRelationship, Node\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.data_structs.node import DocumentRelationship, Node from llama_index.vector_stores.types import ( NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb # noqa: F401 except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return None def add( self, embedding_results: List[NodeWithEmbedding], ) -> List[str]: data = [] ids = [] for result in embedding_results: data.append( { "id": result.id, "doc_id": result.ref_doc_id, "vector": result.embedding, "text": result.node.get_text(), } ) ids.append(result.id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ raise NotImplementedError("Delete not yet implemented for LanceDB.") def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: raise ValueError("Metadata filters not implemented for LanceDB yet.") table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = Node( doc_id=item.id, text=item.text, relationships={ DocumentRelationship.SOURCE: item.doc_id, }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=results["score"].tolist(), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((1711, 1731), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1726, 1731), False, 'import lancedb\n'), ((3712, 3811), 'llama_index.data_structs.node.Node', 'Node', ([], {'doc_id': 'item.id', 'text': 'item.text', 'relationships': '{DocumentRelationship.SOURCE: item.doc_id}'}), '(doc_id=item.id, text=item.text, relationships={DocumentRelationship.\n SOURCE: item.doc_id})\n', (3716, 3811), False, 'from llama_index.data_structs.node import DocumentRelationship, Node\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1680, 1873), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1686, 1873), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2514, 2525), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2518, 2525), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3857, 3885), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3874, 3885), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8492, 8530), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8503, 8530), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8545, 8562), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8555, 8562), False, 'import duckdb\n'), ((9524, 9544), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9539, 9544), False, 'from PIL import Image\n'), ((12169, 12189), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12184, 12189), False, 'from PIL import Image\n'), ((16441, 16460), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16449, 16460), True, 'import numpy as np\n'), ((16545, 16572), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16552, 16572), True, 'from matplotlib import pyplot as plt\n'), ((16622, 16644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16632, 16644), True, 'from matplotlib import pyplot as plt\n'), ((16653, 16672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16663, 16672), True, 'from matplotlib import pyplot as plt\n'), ((16681, 16710), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16690, 16710), True, 'from matplotlib import pyplot as plt\n'), ((16728, 16737), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16735, 16737), False, 'from io import BytesIO\n'), ((16746, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16757, 16779), True, 'from matplotlib import pyplot as plt\n'), ((3318, 3404), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3329, 3404), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3506, 3616), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3517, 3616), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9392, 9424), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9403, 9424), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12036, 12068), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12047, 12068), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13646, 13749), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13657, 13749), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1190, 1201), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1197, 1201), True, 'import numpy as np\n'), ((1255, 1268), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1265, 1268), False, 'import cv2\n'), ((16899, 16917), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16909, 16917), False, 'from PIL import Image\n'), ((18135, 18234), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18147, 18234), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18237, 18252), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18249, 18252), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6717, 6736), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6728, 6736), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1680, 1873), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1686, 1873), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2514, 2525), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2518, 2525), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3857, 3885), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3874, 3885), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8492, 8530), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8503, 8530), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8545, 8562), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8555, 8562), False, 'import duckdb\n'), ((9524, 9544), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9539, 9544), False, 'from PIL import Image\n'), ((12169, 12189), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12184, 12189), False, 'from PIL import Image\n'), ((16441, 16460), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16449, 16460), True, 'import numpy as np\n'), ((16545, 16572), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16552, 16572), True, 'from matplotlib import pyplot as plt\n'), ((16622, 16644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16632, 16644), True, 'from matplotlib import pyplot as plt\n'), ((16653, 16672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16663, 16672), True, 'from matplotlib import pyplot as plt\n'), ((16681, 16710), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16690, 16710), True, 'from matplotlib import pyplot as plt\n'), ((16728, 16737), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16735, 16737), False, 'from io import BytesIO\n'), ((16746, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16757, 16779), True, 'from matplotlib import pyplot as plt\n'), ((3318, 3404), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3329, 3404), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3506, 3616), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3517, 3616), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9392, 9424), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9403, 9424), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12036, 12068), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12047, 12068), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13646, 13749), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13657, 13749), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1190, 1201), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1197, 1201), True, 'import numpy as np\n'), ((1255, 1268), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1265, 1268), False, 'import cv2\n'), ((16899, 16917), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16909, 16917), False, 'from PIL import Image\n'), ((18135, 18234), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18147, 18234), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18237, 18252), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18249, 18252), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6717, 6736), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6728, 6736), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1680, 1873), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1686, 1873), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2514, 2525), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2518, 2525), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3857, 3885), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3874, 3885), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8492, 8530), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8503, 8530), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8545, 8562), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8555, 8562), False, 'import duckdb\n'), ((9524, 9544), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9539, 9544), False, 'from PIL import Image\n'), ((12169, 12189), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12184, 12189), False, 'from PIL import Image\n'), ((16441, 16460), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16449, 16460), True, 'import numpy as np\n'), ((16545, 16572), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16552, 16572), True, 'from matplotlib import pyplot as plt\n'), ((16622, 16644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16632, 16644), True, 'from matplotlib import pyplot as plt\n'), ((16653, 16672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16663, 16672), True, 'from matplotlib import pyplot as plt\n'), ((16681, 16710), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16690, 16710), True, 'from matplotlib import pyplot as plt\n'), ((16728, 16737), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16735, 16737), False, 'from io import BytesIO\n'), ((16746, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16757, 16779), True, 'from matplotlib import pyplot as plt\n'), ((3318, 3404), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3329, 3404), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3506, 3616), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3517, 3616), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9392, 9424), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9403, 9424), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12036, 12068), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12047, 12068), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13646, 13749), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13657, 13749), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1190, 1201), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1197, 1201), True, 'import numpy as np\n'), ((1255, 1268), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1265, 1268), False, 'import cv2\n'), ((16899, 16917), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16909, 16917), False, 'from PIL import Image\n'), ((18135, 18234), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18147, 18234), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18237, 18252), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18249, 18252), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6717, 6736), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6728, 6736), False, 'import torch\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1680, 1873), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1686, 1873), False, 'from ultralytics.data.augment import Format\n'), ((2137, 2192), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2162, 2192), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2243, 2263), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2258, 2263), False, 'import lancedb\n'), ((2514, 2525), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2518, 2525), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3857, 3885), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3874, 3885), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8492, 8530), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8503, 8530), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8545, 8562), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8555, 8562), False, 'import duckdb\n'), ((9524, 9544), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9539, 9544), False, 'from PIL import Image\n'), ((12169, 12189), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12184, 12189), False, 'from PIL import Image\n'), ((16441, 16460), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16449, 16460), True, 'import numpy as np\n'), ((16545, 16572), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16552, 16572), True, 'from matplotlib import pyplot as plt\n'), ((16622, 16644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16632, 16644), True, 'from matplotlib import pyplot as plt\n'), ((16653, 16672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16663, 16672), True, 'from matplotlib import pyplot as plt\n'), ((16681, 16710), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16690, 16710), True, 'from matplotlib import pyplot as plt\n'), ((16728, 16737), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16735, 16737), False, 'from io import BytesIO\n'), ((16746, 16779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16757, 16779), True, 'from matplotlib import pyplot as plt\n'), ((3318, 3404), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3329, 3404), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3506, 3616), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3517, 3616), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9392, 9424), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9403, 9424), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12036, 12068), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12047, 12068), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13646, 13749), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13657, 13749), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1190, 1201), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1197, 1201), True, 'import numpy as np\n'), ((1255, 1268), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1265, 1268), False, 'import cv2\n'), ((16899, 16917), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16909, 16917), False, 'from PIL import Image\n'), ((18135, 18234), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18147, 18234), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18237, 18252), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18249, 18252), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2290, 2300), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2294, 2300), False, 'from pathlib import Path\n'), ((6717, 6736), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6728, 6736), False, 'import torch\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text or "", # ensure text is a string id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2782, 2802), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2797, 2802), False, 'import lancedb\n'), ((1179, 1208), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1185, 1208), True, 'import numpy as np\n'), ((3214, 3293), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3235, 3293), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1089, 1113), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1095, 1113), True, 'import numpy as np\n'), ((5548, 5584), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5563, 5584), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text or "", # ensure text is a string id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2782, 2802), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2797, 2802), False, 'import lancedb\n'), ((1179, 1208), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1185, 1208), True, 'import numpy as np\n'), ((3214, 3293), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3235, 3293), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1089, 1113), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1095, 1113), True, 'import numpy as np\n'), ((5548, 5584), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5563, 5584), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text or "", # ensure text is a string id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2782, 2802), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2797, 2802), False, 'import lancedb\n'), ((1179, 1208), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1185, 1208), True, 'import numpy as np\n'), ((3214, 3293), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3235, 3293), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1089, 1113), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1095, 1113), True, 'import numpy as np\n'), ((5548, 5584), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5563, 5584), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text or "", # ensure text is a string id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2782, 2802), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2797, 2802), False, 'import lancedb\n'), ((1179, 1208), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1185, 1208), True, 'import numpy as np\n'), ((3214, 3293), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3235, 3293), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1089, 1113), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1095, 1113), True, 'import numpy as np\n'), ((5548, 5584), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5563, 5584), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
import lancedb import numpy as np import pandas as pd import pyarrow as pa def client_vector_db(vector_db_config: dict) -> lancedb.LanceDBConnection: """Connect to a lancedb instance""" return lancedb.connect(**vector_db_config) def initialize_vector_db_indices( client_vector_db: lancedb.LanceDBConnection, class_name: str, embedding_dimension: int, ) -> bool: """Initialize the LanceDB table; NOTE this pattern currently doesn't work and is due to a bug with lancedb """ schema = pa.schema( [ ("squad_id", pa.string()), ("title", pa.string()), ("context", pa.string()), ("embedding_service", pa.string()), ("model_name", pa.string()), pa.field("vector", type=pa.list_(pa.float32(), list_size=embedding_dimension)), ] ) client_vector_db.create_table(name=class_name, schema=schema, mode="create") return True def reset_vector_db(client_vector_db: lancedb.LanceDBConnection) -> bool: """Delete all tables from the database""" for table_name in client_vector_db.table_names(): client_vector_db.drop_table(table_name) return True def data_objects( ids: list[str], titles: list[str], text_contents: list[str], embeddings: list[np.ndarray], metadata: dict, ) -> list[dict]: """Create valid LanceDB objects""" assert len(ids) == len(titles) == len(text_contents) == len(embeddings) return [ dict(squad_id=id_, title=title, context=context, vector=embedding, **metadata) for id_, title, context, embedding in zip(ids, titles, text_contents, embeddings) ] def push_to_vector_db( client_vector_db: lancedb.LanceDBConnection, class_name: str, data_objects: list[dict], embedding_metric: str = "cosine", ) -> int: """Push dataframe of objects to LanceDB. Return number of objects. """ df = pd.DataFrame.from_records(data_objects) table = client_vector_db.create_table(name=class_name, data=df, mode="overwrite") return table.to_pandas().shape[0]
[ "lancedb.connect" ]
[((203, 238), 'lancedb.connect', 'lancedb.connect', ([], {}), '(**vector_db_config)\n', (218, 238), False, 'import lancedb\n'), ((1932, 1971), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data_objects'], {}), '(data_objects)\n', (1957, 1971), True, 'import pandas as pd\n'), ((568, 579), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (577, 579), True, 'import pyarrow as pa\n'), ((604, 615), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (613, 615), True, 'import pyarrow as pa\n'), ((642, 653), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (651, 653), True, 'import pyarrow as pa\n'), ((690, 701), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (699, 701), True, 'import pyarrow as pa\n'), ((731, 742), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (740, 742), True, 'import pyarrow as pa\n'), ((790, 802), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (800, 802), True, 'import pyarrow as pa\n')]
from datasets import load_dataset from panns_inference import AudioTagging from tqdm import tqdm from IPython.display import Audio, display import numpy as np import lancedb def create_audio_embedding(audio_data): return at.inference(audio_data) def insert_audio(): batches = [batch["audio"] for batch in dataset.iter(100)] meta_batches = [batch["category"] for batch in dataset.iter(100)] audio_data = [np.array([audio["array"] for audio in batch]) for batch in batches] meta_data = [np.array([meta for meta in batch]) for batch in meta_batches] for i in tqdm(range(len(audio_data))): (_, embedding) = create_audio_embedding(audio_data[i]) data = [ { "audio": x[0]["array"], "vector": x[1], "sampling_rate": x[0]["sampling_rate"], "category": x[2], } for x in zip(batches[i], embedding, meta_data[i]) ] if table_name not in db.table_names(): tbl = db.create_table(table_name, data) else: tbl = db.open_table(table_name) tbl.add(data) def search_audio(id): tbl = db.open_table(table_name) audio = dataset[id]["audio"]["array"] category = dataset[id]["category"] display(Audio(audio, rate=dataset[id]["audio"]["sampling_rate"])) print("Category:", category) (_, embedding) = create_audio_embedding(audio[None, :]) result = tbl.search(embedding[0]).limit(5).to_df() print(result) for i in range(len(result)): display(Audio(result["audio"][i], rate=result["sampling_rate"][i])) print("Category:", result["category"][i]) if __name__ == "__main__": global dataset, at, db, table_name dataset = load_dataset("ashraq/esc50", split="train") at = AudioTagging(checkpoint_path=None, device="cuda") db = lancedb.connect("data/audio-lancedb") table_name = "audio-search" # This function will take a while to run # Run if you don't have the LanceDB table yet, but skip if you already have it insert_audio() # The audio won't display in command line, but it will display in Jupyter Notebook search_audio(500)
[ "lancedb.connect" ]
[((1759, 1802), 'datasets.load_dataset', 'load_dataset', (['"""ashraq/esc50"""'], {'split': '"""train"""'}), "('ashraq/esc50', split='train')\n", (1771, 1802), False, 'from datasets import load_dataset\n'), ((1812, 1861), 'panns_inference.AudioTagging', 'AudioTagging', ([], {'checkpoint_path': 'None', 'device': '"""cuda"""'}), "(checkpoint_path=None, device='cuda')\n", (1824, 1861), False, 'from panns_inference import AudioTagging\n'), ((1872, 1909), 'lancedb.connect', 'lancedb.connect', (['"""data/audio-lancedb"""'], {}), "('data/audio-lancedb')\n", (1887, 1909), False, 'import lancedb\n'), ((424, 469), 'numpy.array', 'np.array', (["[audio['array'] for audio in batch]"], {}), "([audio['array'] for audio in batch])\n", (432, 469), True, 'import numpy as np\n'), ((509, 543), 'numpy.array', 'np.array', (['[meta for meta in batch]'], {}), '([meta for meta in batch])\n', (517, 543), True, 'import numpy as np\n'), ((1292, 1348), 'IPython.display.Audio', 'Audio', (['audio'], {'rate': "dataset[id]['audio']['sampling_rate']"}), "(audio, rate=dataset[id]['audio']['sampling_rate'])\n", (1297, 1348), False, 'from IPython.display import Audio, display\n'), ((1566, 1624), 'IPython.display.Audio', 'Audio', (["result['audio'][i]"], {'rate': "result['sampling_rate'][i]"}), "(result['audio'][i], rate=result['sampling_rate'][i])\n", (1571, 1624), False, 'from IPython.display import Audio, display\n')]
import os from datetime import datetime from pathlib import Path from uuid import uuid4 import lancedb import pyarrow as pa import tantivy from pydantic import computed_field from slugify import slugify from sqlalchemy import TIMESTAMP, Engine, text from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select from tracecat import auth from tracecat.auth import decrypt_key, encrypt_key from tracecat.labels.mitre import get_mitre_tactics_techniques STORAGE_PATH = Path(os.path.expanduser("~/.tracecat/storage")) EMBEDDINGS_SIZE = os.environ.get("TRACECAT__EMBEDDINGS_SIZE", 512) DEFAULT_CASE_ACTIONS = [ "Active compromise", "Ignore", "Informational", "Investigate", "Quarantined", "Sinkholed", ] class User(SQLModel, table=True): # The id is also the JWT 'sub' claim id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) tier: str = "free" # "free" or "premium" settings: str | None = None # JSON-serialized String of settings owned_workflows: list["Workflow"] = Relationship( back_populates="owner", sa_relationship_kwargs={"cascade": "delete"}, ) case_actions: list["CaseAction"] = Relationship(back_populates="user") case_contexts: list["CaseContext"] = Relationship(back_populates="user") secrets: list["Secret"] = Relationship( back_populates="owner", sa_relationship_kwargs={"cascade": "delete"}, ) class Resource(SQLModel): """Base class for all resources in the system.""" owner_id: str created_at: datetime = Field( sa_type=TIMESTAMP(), # UTC Timestamp sa_column_kwargs={ "server_default": text("CURRENT_TIMESTAMP"), "nullable": False, }, ) updated_at: datetime = Field( sa_type=TIMESTAMP(), # UTC Timestamp sa_column_kwargs={ "server_default": text("CURRENT_TIMESTAMP"), "server_onupdate": text("CURRENT_TIMESTAMP"), "nullable": False, }, ) class Secret(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) name: str | None = Field(default=None, max_length=255, index=True, nullable=True) encrypted_api_key: bytes | None = Field(default=None, nullable=True) owner_id: str = Field(foreign_key="user.id") owner: User | None = Relationship(back_populates="secrets") @property def key(self) -> str | None: if not self.encrypted_api_key: return None return decrypt_key(self.encrypted_api_key) @key.setter def key(self, value: str) -> None: self.encrypted_api_key = encrypt_key(value) class Editor(SQLModel, table=True): user_id: str | None = Field(default=None, foreign_key="user.id", primary_key=True) workflow_id: str | None = Field( default=None, foreign_key="workflow.id", primary_key=True ) class CaseAction(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) tag: str value: str user_id: str | None = Field(foreign_key="user.id") user: User | None = Relationship(back_populates="case_actions") class CaseContext(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) tag: str value: str user_id: str | None = Field(foreign_key="user.id") user: User | None = Relationship(back_populates="case_contexts") class Workflow(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) title: str description: str status: str = "offline" # "online" or "offline" object: str | None = None # JSON-serialized String of react flow object # Owner owner_id: str = Field(foreign_key="user.id") owner: User | None = Relationship(back_populates="owned_workflows") runs: list["WorkflowRun"] | None = Relationship(back_populates="workflow") actions: list["Action"] | None = Relationship( back_populates="workflow", sa_relationship_kwargs={"cascade": "delete"}, ) webhooks: list["Webhook"] | None = Relationship( back_populates="workflow", sa_relationship_kwargs={"cascade": "delete"}, ) @computed_field @property def key(self) -> str: slug = slugify(self.title, separator="_") return f"{self.id}.{slug}" class WorkflowRun(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) status: str = "pending" # "online" or "offline" workflow_id: str = Field(foreign_key="workflow.id") workflow: Workflow | None = Relationship(back_populates="runs") class Action(Resource, table=True): id: str | None = Field(default_factory=lambda: uuid4().hex, primary_key=True) type: str title: str description: str status: str = "offline" # "online" or "offline" inputs: str | None = None # JSON-serialized String of inputs workflow_id: str | None = Field(foreign_key="workflow.id") workflow: Workflow | None = Relationship(back_populates="actions") @computed_field @property def key(self) -> str: slug = slugify(self.title, separator="_") return f"{self.id}.{slug}" class Webhook(Resource, table=True): """Webhook is a URL that can be called to trigger a workflow. Notes ----- - We need this because we need a way to trigger a workflow from an external source. - External sources only have access to the path """ id: str | None = Field( default_factory=lambda: uuid4().hex, primary_key=True, description="Webhook path", ) action_id: str | None = Field(foreign_key="action.id") workflow_id: str | None = Field(foreign_key="workflow.id") workflow: Workflow | None = Relationship(back_populates="webhooks") @computed_field @property def secret(self) -> str: return auth.compute_hash(self.id) def create_db_engine() -> Engine: STORAGE_PATH.mkdir(parents=True, exist_ok=True) sqlite_uri = f"sqlite:////{STORAGE_PATH}/database.db" engine = create_engine( sqlite_uri, echo=True, connect_args={"check_same_thread": False} ) return engine def build_events_index(): index_path = STORAGE_PATH / "event_index" index_path.mkdir(parents=True, exist_ok=True) event_schema = ( tantivy.SchemaBuilder() .add_date_field("published_at", fast=True, stored=True) .add_text_field("action_id", stored=True) .add_text_field("action_run_id", stored=True) .add_text_field("action_title", stored=True) .add_text_field("action_type", stored=True) .add_text_field("workflow_id", stored=True) .add_text_field("workflow_title", stored=True) .add_text_field("workflow_run_id", stored=True) .add_json_field("data", stored=True) .build() ) tantivy.Index(event_schema, path=str(index_path)) def create_events_index() -> tantivy.Index: index_path = STORAGE_PATH / "event_index" return tantivy.Index.open(str(index_path)) def create_vdb_conn() -> lancedb.DBConnection: db = lancedb.connect(STORAGE_PATH / "vector.db") return db CaseSchema = pa.schema( [ pa.field("id", pa.string(), nullable=False), pa.field("owner_id", pa.string(), nullable=False), pa.field("workflow_id", pa.string(), nullable=False), pa.field("title", pa.string(), nullable=False), pa.field("payload", pa.string(), nullable=False), # JSON-serialized pa.field("context", pa.string(), nullable=True), # JSON-serialized pa.field("malice", pa.string(), nullable=False), pa.field("status", pa.string(), nullable=False), pa.field("priority", pa.string(), nullable=False), pa.field("action", pa.string(), nullable=True), pa.field("suppression", pa.string(), nullable=True), # JSON-serialized # pa.field("_action_vector", pa.list_(pa.float32(), list_size=EMBEDDINGS_SIZE)), # pa.field("_payload_vector", pa.list_(pa.float32(), list_size=EMBEDDINGS_SIZE)), # pa.field("_context_vector", pa.list_(pa.float32(), list_size=EMBEDDINGS_SIZE)), ] ) def initialize_db() -> Engine: # Relational table engine = create_db_engine() SQLModel.metadata.create_all(engine) # VectorDB db = create_vdb_conn() db.create_table("cases", schema=CaseSchema, exist_ok=True) # Search build_events_index() # Add TTPs to context table only if context table is empty with Session(engine) as session: case_contexts_count = session.exec(select(CaseContext)).all() if len(case_contexts_count) == 0: mitre_labels = get_mitre_tactics_techniques() mitre_contexts = [ CaseContext(owner_id="tracecat", tag="mitre", value=label) for label in mitre_labels ] session.add_all(mitre_contexts) session.commit() case_actions_count = session.exec(select(CaseAction)).all() if len(case_actions_count) == 0: default_actions = [ CaseAction(owner_id="tracecat", tag="case_action", value=case_action) for case_action in DEFAULT_CASE_ACTIONS ] session.add_all(default_actions) session.commit() return engine
[ "lancedb.connect" ]
[((557, 605), 'os.environ.get', 'os.environ.get', (['"""TRACECAT__EMBEDDINGS_SIZE"""', '(512)'], {}), "('TRACECAT__EMBEDDINGS_SIZE', 512)\n", (571, 605), False, 'import os\n'), ((496, 537), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.tracecat/storage"""'], {}), "('~/.tracecat/storage')\n", (514, 537), False, 'import os\n'), ((1064, 1150), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owner"""', 'sa_relationship_kwargs': "{'cascade': 'delete'}"}), "(back_populates='owner', sa_relationship_kwargs={'cascade':\n 'delete'})\n", (1076, 1150), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1209, 1244), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""'}), "(back_populates='user')\n", (1221, 1244), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1286, 1321), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""user"""'}), "(back_populates='user')\n", (1298, 1321), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1352, 1438), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owner"""', 'sa_relationship_kwargs': "{'cascade': 'delete'}"}), "(back_populates='owner', sa_relationship_kwargs={'cascade':\n 'delete'})\n", (1364, 1438), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2184, 2246), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'max_length': '(255)', 'index': '(True)', 'nullable': '(True)'}), '(default=None, max_length=255, index=True, nullable=True)\n', (2189, 2246), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2285, 2319), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'nullable': '(True)'}), '(default=None, nullable=True)\n', (2290, 2319), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2340, 2368), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (2345, 2368), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2394, 2432), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""secrets"""'}), "(back_populates='secrets')\n", (2406, 2432), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2767, 2827), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""user.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='user.id', primary_key=True)\n", (2772, 2827), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2858, 2922), 'sqlmodel.Field', 'Field', ([], {'default': 'None', 'foreign_key': '"""workflow.id"""', 'primary_key': '(True)'}), "(default=None, foreign_key='workflow.id', primary_key=True)\n", (2863, 2922), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3115, 3143), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (3120, 3143), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3168, 3211), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""case_actions"""'}), "(back_populates='case_actions')\n", (3180, 3211), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3391, 3419), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (3396, 3419), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3444, 3488), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""case_contexts"""'}), "(back_populates='case_contexts')\n", (3456, 3488), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3809, 3837), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""user.id"""'}), "(foreign_key='user.id')\n", (3814, 3837), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3863, 3909), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""owned_workflows"""'}), "(back_populates='owned_workflows')\n", (3875, 3909), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((3949, 3988), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""workflow"""'}), "(back_populates='workflow')\n", (3961, 3988), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((4026, 4115), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""workflow"""', 'sa_relationship_kwargs': "{'cascade': 'delete'}"}), "(back_populates='workflow', sa_relationship_kwargs={'cascade':\n 'delete'})\n", (4038, 4115), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((4174, 4263), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""workflow"""', 'sa_relationship_kwargs': "{'cascade': 'delete'}"}), "(back_populates='workflow', sa_relationship_kwargs={'cascade':\n 'delete'})\n", (4186, 4263), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((4630, 4662), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""workflow.id"""'}), "(foreign_key='workflow.id')\n", (4635, 4662), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((4695, 4730), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""runs"""'}), "(back_populates='runs')\n", (4707, 4730), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((5050, 5082), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""workflow.id"""'}), "(foreign_key='workflow.id')\n", (5055, 5082), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((5115, 5153), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""actions"""'}), "(back_populates='actions')\n", (5127, 5153), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((5744, 5774), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""action.id"""'}), "(foreign_key='action.id')\n", (5749, 5774), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((5805, 5837), 'sqlmodel.Field', 'Field', ([], {'foreign_key': '"""workflow.id"""'}), "(foreign_key='workflow.id')\n", (5810, 5837), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((5870, 5909), 'sqlmodel.Relationship', 'Relationship', ([], {'back_populates': '"""webhooks"""'}), "(back_populates='webhooks')\n", (5882, 5909), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((6175, 6254), 'sqlmodel.create_engine', 'create_engine', (['sqlite_uri'], {'echo': '(True)', 'connect_args': "{'check_same_thread': False}"}), "(sqlite_uri, echo=True, connect_args={'check_same_thread': False})\n", (6188, 6254), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((7219, 7262), 'lancedb.connect', 'lancedb.connect', (["(STORAGE_PATH / 'vector.db')"], {}), "(STORAGE_PATH / 'vector.db')\n", (7234, 7262), False, 'import lancedb\n'), ((8370, 8406), 'sqlmodel.SQLModel.metadata.create_all', 'SQLModel.metadata.create_all', (['engine'], {}), '(engine)\n', (8398, 8406), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((2559, 2594), 'tracecat.auth.decrypt_key', 'decrypt_key', (['self.encrypted_api_key'], {}), '(self.encrypted_api_key)\n', (2570, 2594), False, 'from tracecat.auth import decrypt_key, encrypt_key\n'), ((2684, 2702), 'tracecat.auth.encrypt_key', 'encrypt_key', (['value'], {}), '(value)\n', (2695, 2702), False, 'from tracecat.auth import decrypt_key, encrypt_key\n'), ((4359, 4393), 'slugify.slugify', 'slugify', (['self.title'], {'separator': '"""_"""'}), "(self.title, separator='_')\n", (4366, 4393), False, 'from slugify import slugify\n'), ((5230, 5264), 'slugify.slugify', 'slugify', (['self.title'], {'separator': '"""_"""'}), "(self.title, separator='_')\n", (5237, 5264), False, 'from slugify import slugify\n'), ((5989, 6015), 'tracecat.auth.compute_hash', 'auth.compute_hash', (['self.id'], {}), '(self.id)\n', (6006, 6015), False, 'from tracecat import auth\n'), ((8625, 8640), 'sqlmodel.Session', 'Session', (['engine'], {}), '(engine)\n', (8632, 8640), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((1609, 1620), 'sqlalchemy.TIMESTAMP', 'TIMESTAMP', ([], {}), '()\n', (1618, 1620), False, 'from sqlalchemy import TIMESTAMP, Engine, text\n'), ((1821, 1832), 'sqlalchemy.TIMESTAMP', 'TIMESTAMP', ([], {}), '()\n', (1830, 1832), False, 'from sqlalchemy import TIMESTAMP, Engine, text\n'), ((7332, 7343), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7341, 7343), True, 'import pyarrow as pa\n'), ((7391, 7402), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7400, 7402), True, 'import pyarrow as pa\n'), ((7453, 7464), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7462, 7464), True, 'import pyarrow as pa\n'), ((7509, 7520), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7518, 7520), True, 'import pyarrow as pa\n'), ((7567, 7578), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7576, 7578), True, 'import pyarrow as pa\n'), ((7644, 7655), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7653, 7655), True, 'import pyarrow as pa\n'), ((7719, 7730), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7728, 7730), True, 'import pyarrow as pa\n'), ((7776, 7787), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7785, 7787), True, 'import pyarrow as pa\n'), ((7835, 7846), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7844, 7846), True, 'import pyarrow as pa\n'), ((7892, 7903), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7901, 7903), True, 'import pyarrow as pa\n'), ((7953, 7964), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7962, 7964), True, 'import pyarrow as pa\n'), ((8792, 8822), 'tracecat.labels.mitre.get_mitre_tactics_techniques', 'get_mitre_tactics_techniques', ([], {}), '()\n', (8820, 8822), False, 'from tracecat.labels.mitre import get_mitre_tactics_techniques\n'), ((1696, 1721), 'sqlalchemy.text', 'text', (['"""CURRENT_TIMESTAMP"""'], {}), "('CURRENT_TIMESTAMP')\n", (1700, 1721), False, 'from sqlalchemy import TIMESTAMP, Engine, text\n'), ((1908, 1933), 'sqlalchemy.text', 'text', (['"""CURRENT_TIMESTAMP"""'], {}), "('CURRENT_TIMESTAMP')\n", (1912, 1933), False, 'from sqlalchemy import TIMESTAMP, Engine, text\n'), ((1966, 1991), 'sqlalchemy.text', 'text', (['"""CURRENT_TIMESTAMP"""'], {}), "('CURRENT_TIMESTAMP')\n", (1970, 1991), False, 'from sqlalchemy import TIMESTAMP, Engine, text\n'), ((877, 884), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (882, 884), False, 'from uuid import uuid4\n'), ((2130, 2137), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2135, 2137), False, 'from uuid import uuid4\n'), ((3030, 3037), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3035, 3037), False, 'from uuid import uuid4\n'), ((3306, 3313), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3311, 3313), False, 'from uuid import uuid4\n'), ((3580, 3587), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3585, 3587), False, 'from uuid import uuid4\n'), ((4523, 4530), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4528, 4530), False, 'from uuid import uuid4\n'), ((4820, 4827), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4825, 4827), False, 'from uuid import uuid4\n'), ((5635, 5642), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (5640, 5642), False, 'from uuid import uuid4\n'), ((8696, 8715), 'sqlmodel.select', 'select', (['CaseContext'], {}), '(CaseContext)\n', (8702, 8715), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((9101, 9119), 'sqlmodel.select', 'select', (['CaseAction'], {}), '(CaseAction)\n', (9107, 9119), False, 'from sqlmodel import Field, Relationship, Session, SQLModel, create_engine, select\n'), ((6440, 6463), 'tantivy.SchemaBuilder', 'tantivy.SchemaBuilder', ([], {}), '()\n', (6461, 6463), False, 'import tantivy\n')]
from dotenv import load_dotenv from typing import List import lancedb import openai import os from gpt_pdf_bot.shared import embed_text load_dotenv() class ChatBot: def __init__(self, table_name: str): self.db = lancedb.connect(uri=".lancedb") self.table = self.db[table_name] def run(self): while True: query = input("\nAsk me a question: ") if query == "exit": break print(f"Searching for {query}", end="\n\n") context = self.retrieve_context(query=query) print(f"Found {len(context)} results", end="\n\n") texts = self.retrieve_text_from_context(context=context) annotations = self.create_annotations_from_context(context=context) prompt = self.generate_prompt(query=query, context=texts) response = self.ask_gpt(prompt, annotations=annotations) print(response, end="\n\n") def retrieve_context(self, query: str, limit: int = 5) -> List[str]: emb = embed_text([query])[0] # TODO: return and pass metadata so we can display the source and page number context = self.table.search(emb).limit(limit).to_df() return context def retrieve_text_from_context(self, context: List[str]) -> List[str]: """Extracts only the text from the context, so it can be used in the prompt""" # curly braces will mess up prompt templating, so remove them texts = context["text"].str.replace("{", "").str.replace("}", "").tolist() return texts def create_annotations_from_context(self, context: List[str]) -> List[str]: """Extracts the document metadata from the context, so it can be displayed with the LLM's response""" metadata = context["metadata"].tolist() annotations = [] for meta in metadata: source = meta["source"] page_num = meta["page_num"] annotations.append(f"- file name: {source}, page number: {page_num}") return annotations def generate_prompt(self, query: str, context: List[str]) -> str: # TODO: this is a naive implementation that doesn't handle the case where the context is too long prepared_context = "\n\n---\n\n".join(context) prompt_tmpl = f"""Answer the question based on the context below. {context} Question: {query} Answer:""" return prompt_tmpl.format(query=query, context=prepared_context) def prepare_messages_for_chat(self, text: str) -> list: messages = [ {"role": "user", "content": text}, ] return messages def ask_gpt(self, prompt: str, annotations: List[str]) -> str: res = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=self.prepare_messages_for_chat(prompt), temperature=0.7, api_key=os.environ["OPENAI_API_KEY"], ) response = res["choices"][0]["message"]["content"].strip() annotations = "\n".join(annotations).strip() answer = f"Answer: \n\n{response}\n\n---\n\n" answer += f"Here are the sources I used:\n{annotations}" return answer
[ "lancedb.connect" ]
[((139, 152), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (150, 152), False, 'from dotenv import load_dotenv\n'), ((229, 260), 'lancedb.connect', 'lancedb.connect', ([], {'uri': '""".lancedb"""'}), "(uri='.lancedb')\n", (244, 260), False, 'import lancedb\n'), ((1044, 1063), 'gpt_pdf_bot.shared.embed_text', 'embed_text', (['[query]'], {}), '([query])\n', (1054, 1063), False, 'from gpt_pdf_bot.shared import embed_text\n')]
import os from pathlib import Path import streamlit as st from langchain.prompts import PromptTemplate from langchain.schema import StrOutputParser from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.llms import HuggingFaceHub from langchain_community.vectorstores import LanceDB from langchain_core.runnables import RunnableParallel, RunnablePassthrough import lancedb # HUGGINGFACEHUB_API_TOKEN = st.secrets["HUGGINGFACEHUB_API_TOKEN"] HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] @st.cache_resource def load_chain(): emb_repo = "BAAI/bge-small-en-v1.5" embeddings = HuggingFaceEmbeddings(model_name=emb_repo) llm_repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" # llm_repo_id = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT" llm = HuggingFaceHub( repo_id=llm_repo_id, model_kwargs={"temperature": 0.1, "max_length": 180} ) db_path = Path("lancedb") db = lancedb.connect(db_path) table = db.open_table("dharma_qa") docsearch = LanceDB(table, embeddings) retriever = docsearch.as_retriever(search_kwargs={"k": 4}) # Create system prompt template = """ You are a respected spiritual teacher, Rob Burbea. Try to distill the following pieces of context to answer the question at the end. Question is asked by a student. If you don't know the answer, just say that you don't know. Don't try to make up an answer. Use five sentences maximum and keep the answer as concise as possible. Avoid answering questions that are not related to the dharma. If the question is not about the dharma, politely inform them that you are tuned to only answer questions about the dharma. {context} Question: {question} Helpful Answer:""" # Add system prompt to chain def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) prompt = PromptTemplate(template=template, input_variables=["context", "question"]) rag_chain = ( RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"]))) # {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser() ) rag_chain_with_source = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ).assign(answer=rag_chain) return rag_chain_with_source, emb_repo, llm_repo_id
[ "lancedb.connect" ]
[((641, 683), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'emb_repo'}), '(model_name=emb_repo)\n', (662, 683), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((818, 911), 'langchain_community.llms.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'llm_repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_length': 180}"}), "(repo_id=llm_repo_id, model_kwargs={'temperature': 0.1,\n 'max_length': 180})\n", (832, 911), False, 'from langchain_community.llms import HuggingFaceHub\n'), ((936, 951), 'pathlib.Path', 'Path', (['"""lancedb"""'], {}), "('lancedb')\n", (940, 951), False, 'from pathlib import Path\n'), ((961, 985), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (976, 985), False, 'import lancedb\n'), ((1041, 1067), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1048, 1067), False, 'from langchain_community.vectorstores import LanceDB\n'), ((1931, 2005), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (1945, 2005), False, 'from langchain.prompts import PromptTemplate\n'), ((2229, 2246), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2244, 2246), False, 'from langchain.schema import StrOutputParser\n'), ((2343, 2364), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (2362, 2364), False, 'from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n')]
from flask import Flask, request, jsonify, Response from flask_cors import CORS app = Flask(__name__) CORS(app) import argparse import io import PIL import duckdb import lancedb import lance import pyarrow.compute as pc from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast import gradio as gr @app.route("/api/python", methods=['POST']) def get_image_blob(): # Assuming you have the setup_clip_model, get_table, and embed_func functions data = request.get_json() # Parse incoming JSON data prompt = data.get('prompt', '') # Get the 'prompt' property from the JSON data tbl = get_table() print(data) emb = embed_func(prompt) print(emb) result_df = tbl.search(emb).limit(1).to_df() if not result_df.empty: image_col = "image" image, prompt = _extract(result_df) # Convert PIL.Image to bytes image_bytes = io.BytesIO() image.save(image_bytes, format='PNG') image_blob = image_bytes.getvalue() return Response(image_blob, content_type='image/png') return jsonify({"error": "No matching data found"}) def _extract(df): image_col = "image" return PIL.Image.open(io.BytesIO(df.iloc[0][image_col])), df.iloc[0]["prompt"] MODEL_ID = None MODEL = None TOKENIZER = None PROCESSOR = None import pyarrow.compute as pc def get_table(): db = lancedb.connect("data/lancedb") if "diffusiondb" in db.table_names(): tbl= db.open_table("diffusiondb") else: # First data processing and full-text-search index data = lance.dataset("rawdata.lance").to_table() # remove null prompts tbl = db.create_table("diffusiondb", data.filter(~pc.field("prompt").is_null()), mode="overwrite") tbl = tbl.create_fts_index(["prompt"]) return tbl def setup_clip_model(): global MODEL_ID, MODEL, TOKENIZER, PROCESSOR MODEL_ID = "openai/clip-vit-base-patch32" TOKENIZER = CLIPTokenizerFast.from_pretrained(MODEL_ID) MODEL = CLIPModel.from_pretrained(MODEL_ID) PROCESSOR = CLIPProcessor.from_pretrained(MODEL_ID) def embed_func(query): inputs = TOKENIZER([query], padding=True, return_tensors="pt") text_features = MODEL.get_text_features(**inputs) return text_features.detach().numpy()[0] setup_clip_model()
[ "lancedb.connect" ]
[((86, 101), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (91, 101), False, 'from flask import Flask, request, jsonify, Response\n'), ((102, 111), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (106, 111), False, 'from flask_cors import CORS\n'), ((468, 486), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (484, 486), False, 'from flask import Flask, request, jsonify, Response\n'), ((1077, 1121), 'flask.jsonify', 'jsonify', (["{'error': 'No matching data found'}"], {}), "({'error': 'No matching data found'})\n", (1084, 1121), False, 'from flask import Flask, request, jsonify, Response\n'), ((1369, 1400), 'lancedb.connect', 'lancedb.connect', (['"""data/lancedb"""'], {}), "('data/lancedb')\n", (1384, 1400), False, 'import lancedb\n'), ((1945, 1988), 'transformers.CLIPTokenizerFast.from_pretrained', 'CLIPTokenizerFast.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (1978, 1988), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((2001, 2036), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (2026, 2036), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((2053, 2092), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['MODEL_ID'], {}), '(MODEL_ID)\n', (2082, 2092), False, 'from transformers import CLIPModel, CLIPProcessor, CLIPTokenizerFast\n'), ((899, 911), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (909, 911), False, 'import io\n'), ((1018, 1064), 'flask.Response', 'Response', (['image_blob'], {'content_type': '"""image/png"""'}), "(image_blob, content_type='image/png')\n", (1026, 1064), False, 'from flask import Flask, request, jsonify, Response\n'), ((1191, 1224), 'io.BytesIO', 'io.BytesIO', (['df.iloc[0][image_col]'], {}), '(df.iloc[0][image_col])\n', (1201, 1224), False, 'import io\n'), ((1569, 1599), 'lance.dataset', 'lance.dataset', (['"""rawdata.lance"""'], {}), "('rawdata.lance')\n", (1582, 1599), False, 'import lance\n'), ((1699, 1717), 'pyarrow.compute.field', 'pc.field', (['"""prompt"""'], {}), "('prompt')\n", (1707, 1717), True, 'import pyarrow.compute as pc\n')]
from PIL import Image from transformers import CLIPProcessor, CLIPModel from openai import OpenAI import lancedb import os import pandas as pd from dotenv import load_dotenv load_dotenv() uri = "./sample-lancedb" client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) model_name = "openai/clip-vit-base-patch32" def create_clip_image_embeddings(image_path, model_name): # Load the image image = Image.open(image_path) # Load the CLIP model and processor model = CLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) # Generate the image embedding inputs = processor(images=image, return_tensors="pt") image_embedding = model.get_image_features(**inputs) # Return the image embedding return image_embedding.detach().numpy()[0] def get_embedding(text, model="text-embedding-3-small"): text = text.replace("\n", " ") return client.embeddings.create(input = [text], model=model).data[0].embedding if __name__ == '__main__': df = pd.read_csv('listings.csv') # Create the full description column combining all the text and data columns which will be used to generate the embeddings df["full_description"] = "Neighbourhood: " +df["Neighborhood"] + \ " Price: " + df["Price"].astype(str) + " Bedrooms: " + df["Bedrooms"].astype(str) + \ " Bathrooms: " + df["Bathrooms"].astype(str) + " House Size: " + df["House Size"].astype(str) + \ "Description: " + df["Description"] + " " + "Neighborhood Description: " + df["Neighborhood Description"] df['ada_embedding'] = df["full_description"].apply(lambda x: get_embedding(x, model='text-embedding-3-small')) df['image_embedding'] = df["images"].apply(lambda x: create_clip_image_embeddings(x, model_name)) df.to_csv('listings_with_embeddings.csv', index=False) db = lancedb.connect(uri) df_text = df[["Neighborhood","Price","Bedrooms","Bathrooms","images","ada_embedding"]].copy() df_text = df_text.rename(columns={"ada_embedding":"vector"}) df_images = df[["Neighborhood","Price","Bedrooms","Bathrooms","images","image_embedding"]].copy() df_images = df_images.rename(columns={"image_embedding":"vector"}) tbl_text = db.create_table("table_from_df_text", data=df_text, exist_ok=True) print(df_images.head()) print(df_images["vector"][0].shape) tbl_images = db.create_table("table_from_df_images", data=df_images,exist_ok=True)
[ "lancedb.connect" ]
[((175, 188), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (186, 188), False, 'from dotenv import load_dotenv\n'), ((403, 425), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (413, 425), False, 'from PIL import Image\n'), ((479, 516), 'transformers.CLIPModel.from_pretrained', 'CLIPModel.from_pretrained', (['model_name'], {}), '(model_name)\n', (504, 516), False, 'from transformers import CLIPProcessor, CLIPModel\n'), ((533, 574), 'transformers.CLIPProcessor.from_pretrained', 'CLIPProcessor.from_pretrained', (['model_name'], {}), '(model_name)\n', (562, 574), False, 'from transformers import CLIPProcessor, CLIPModel\n'), ((1024, 1051), 'pandas.read_csv', 'pd.read_csv', (['"""listings.csv"""'], {}), "('listings.csv')\n", (1035, 1051), True, 'import pandas as pd\n'), ((1851, 1871), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1866, 1871), False, 'import lancedb\n'), ((238, 265), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (247, 265), False, 'import os\n')]
import streamlit as st import pandas as pd import json import requests from pathlib import Path from datetime import datetime from jinja2 import Template import lancedb import sqlite3 from services.lancedb_notes import IndexDocumentsNotes from services.auto_research import AutoResearch st.set_page_config(layout='wide', page_title='AutoResearch') notes_folder = Path('data/notes') collections_folder = Path('data/collections') tmp_folder = Path('data/tmp') config_folder = Path('data/config') with open(config_folder.joinpath('prompt_templates.json'), 'r') as f: prompt_options = json.load(f) index_folder = Path('indexes') sqlite_location = Path('data/indexes/documents.sqlite') lance_index = lancedb.connect(index_folder) available_indexes = lance_index.table_names() index_to_search = st.selectbox(label='Available Indexes', options=available_indexes) query = st.text_input(label="What do you want to search?", value='') @st.cache_data def remote_search(query, collection_name): results = requests.post('http://localhost:8000/hybrid', json={'query':query, 'collection_name':collection_name, 'top_k':50, 'fts_weight':0.5, 'vec_weight':1-0.5}) result_data, available_fields = results.json() available_fields = set(available_fields) new_fields = set() for result in result_data: if 'metadata' in result and len(result['metadata']) > 0: metadata = json.loads(result['metadata']) result.update(metadata) new_fields.update(metadata.keys()) del result['metadata'] return result_data if 'results_to_save' not in st.session_state: st.session_state['results_to_save'] = dict() def add_result_to_save(result): note_hash = hash(str(result)) st.write(st.session_state['results_to_save'].keys()) if note_hash not in st.session_state['results_to_save']: st.session_state['results_to_save'][note_hash] = result else: del st.session_state['results_to_save'][note_hash] if query: st.markdown(f"## {query}") auto_search = AutoResearch(objective=query, collection_name='trump') report_results = auto_search.research_question(query) # st.write(report_results) for task_idx in range(len(report_results)): task_description = report_results[str(task_idx + 1)]['task'] task_result = report_results[str(task_idx + 1)]['results'] # st.json(task_result) st.markdown(f"### Task {task_idx + 1}: {task_description['task']}") st.markdown(f"**Research Actions**: {task_description['actions']}") st.markdown(f"**Expected Outcomes**: {task_description['expected_outcomes']}") st.markdown(f"**Considerations**: {task_description['considerations']}") st.write() st.markdown(f"### Full Summary:\n{task_result['final_summary']}") st.markdown(f"### Summarized Summary:\n{task_result['summarized_summary']}") st.divider() for query_idx, sub_query in enumerate(task_result['internet_queries']): with st.expander(f"Expand for sub query: {sub_query}"): st.markdown(f"### Subquery: {sub_query}") # st.json(task_result['queries']) query_results = task_result['queries'][query_idx] st.markdown("#### Summary") st.markdown(query_results['sub_summary']) st.divider() for _article in query_results['article_objects']: st.markdown(f"**Title: {_article['title']}** --- **UUID: {_article['uuid']}**") st.markdown("**Text**") _text = _article['text'].replace('\n','\n\n') st.markdown(f"{_text}") st.divider() # # for index, result in enumerate(query_results): # # st.write(result) # st.markdown(f"**:blue[{result['title']}]**") # st.markdown(f"*:blue[Score: {round(result['score'], 3)}]*") # with st.container(): # st.write(f"{' '.join(result['text'].split(' ')[:100])}.....") # with st.expander('See Full Text and Details'): # full_text, quick_annotate = st.columns([4,1]) # with full_text: # st.markdown('**Text:**') # st.markdown(result['text']) # save_to_collection = st.toggle('Save to collection',key=f'toggle_{index}', # on_change=add_result_to_save, args=(result, )) st.divider()
[ "lancedb.connect" ]
[((288, 348), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""AutoResearch"""'}), "(layout='wide', page_title='AutoResearch')\n", (306, 348), True, 'import streamlit as st\n'), ((384, 402), 'pathlib.Path', 'Path', (['"""data/notes"""'], {}), "('data/notes')\n", (388, 402), False, 'from pathlib import Path\n'), ((424, 448), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (428, 448), False, 'from pathlib import Path\n'), ((462, 478), 'pathlib.Path', 'Path', (['"""data/tmp"""'], {}), "('data/tmp')\n", (466, 478), False, 'from pathlib import Path\n'), ((495, 514), 'pathlib.Path', 'Path', (['"""data/config"""'], {}), "('data/config')\n", (499, 514), False, 'from pathlib import Path\n'), ((636, 651), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (640, 651), False, 'from pathlib import Path\n'), ((670, 707), 'pathlib.Path', 'Path', (['"""data/indexes/documents.sqlite"""'], {}), "('data/indexes/documents.sqlite')\n", (674, 707), False, 'from pathlib import Path\n'), ((723, 752), 'lancedb.connect', 'lancedb.connect', (['index_folder'], {}), '(index_folder)\n', (738, 752), False, 'import lancedb\n'), ((817, 883), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Available Indexes"""', 'options': 'available_indexes'}), "(label='Available Indexes', options=available_indexes)\n", (829, 883), True, 'import streamlit as st\n'), ((893, 953), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""What do you want to search?"""', 'value': '""""""'}), "(label='What do you want to search?', value='')\n", (906, 953), True, 'import streamlit as st\n'), ((606, 618), 'json.load', 'json.load', (['f'], {}), '(f)\n', (615, 618), False, 'import json\n'), ((1028, 1195), 'requests.post', 'requests.post', (['"""http://localhost:8000/hybrid"""'], {'json': "{'query': query, 'collection_name': collection_name, 'top_k': 50,\n 'fts_weight': 0.5, 'vec_weight': 1 - 0.5}"}), "('http://localhost:8000/hybrid', json={'query': query,\n 'collection_name': collection_name, 'top_k': 50, 'fts_weight': 0.5,\n 'vec_weight': 1 - 0.5})\n", (1041, 1195), False, 'import requests\n'), ((2085, 2111), 'streamlit.markdown', 'st.markdown', (['f"""## {query}"""'], {}), "(f'## {query}')\n", (2096, 2111), True, 'import streamlit as st\n'), ((2131, 2185), 'services.auto_research.AutoResearch', 'AutoResearch', ([], {'objective': 'query', 'collection_name': '"""trump"""'}), "(objective=query, collection_name='trump')\n", (2143, 2185), False, 'from services.auto_research import AutoResearch\n'), ((2499, 2566), 'streamlit.markdown', 'st.markdown', (['f"""### Task {task_idx + 1}: {task_description[\'task\']}"""'], {}), '(f"### Task {task_idx + 1}: {task_description[\'task\']}")\n', (2510, 2566), True, 'import streamlit as st\n'), ((2575, 2642), 'streamlit.markdown', 'st.markdown', (['f"""**Research Actions**: {task_description[\'actions\']}"""'], {}), '(f"**Research Actions**: {task_description[\'actions\']}")\n', (2586, 2642), True, 'import streamlit as st\n'), ((2651, 2729), 'streamlit.markdown', 'st.markdown', (['f"""**Expected Outcomes**: {task_description[\'expected_outcomes\']}"""'], {}), '(f"**Expected Outcomes**: {task_description[\'expected_outcomes\']}")\n', (2662, 2729), True, 'import streamlit as st\n'), ((2738, 2810), 'streamlit.markdown', 'st.markdown', (['f"""**Considerations**: {task_description[\'considerations\']}"""'], {}), '(f"**Considerations**: {task_description[\'considerations\']}")\n', (2749, 2810), True, 'import streamlit as st\n'), ((2819, 2829), 'streamlit.write', 'st.write', ([], {}), '()\n', (2827, 2829), True, 'import streamlit as st\n'), ((2838, 2906), 'streamlit.markdown', 'st.markdown', (['f"""### Full Summary:\n{task_result[\'final_summary\']}"""'], {}), '(f"""### Full Summary:\n{task_result[\'final_summary\']}""")\n', (2849, 2906), True, 'import streamlit as st\n'), ((2912, 2991), 'streamlit.markdown', 'st.markdown', (['f"""### Summarized Summary:\n{task_result[\'summarized_summary\']}"""'], {}), '(f"""### Summarized Summary:\n{task_result[\'summarized_summary\']}""")\n', (2923, 2991), True, 'import streamlit as st\n'), ((2997, 3009), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (3007, 3009), True, 'import streamlit as st\n'), ((1482, 1512), 'json.loads', 'json.loads', (["result['metadata']"], {}), "(result['metadata'])\n", (1492, 1512), False, 'import json\n'), ((4586, 4598), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (4596, 4598), True, 'import streamlit as st\n'), ((3107, 3156), 'streamlit.expander', 'st.expander', (['f"""Expand for sub query: {sub_query}"""'], {}), "(f'Expand for sub query: {sub_query}')\n", (3118, 3156), True, 'import streamlit as st\n'), ((3174, 3215), 'streamlit.markdown', 'st.markdown', (['f"""### Subquery: {sub_query}"""'], {}), "(f'### Subquery: {sub_query}')\n", (3185, 3215), True, 'import streamlit as st\n'), ((3348, 3375), 'streamlit.markdown', 'st.markdown', (['"""#### Summary"""'], {}), "('#### Summary')\n", (3359, 3375), True, 'import streamlit as st\n'), ((3392, 3433), 'streamlit.markdown', 'st.markdown', (["query_results['sub_summary']"], {}), "(query_results['sub_summary'])\n", (3403, 3433), True, 'import streamlit as st\n'), ((3450, 3462), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (3460, 3462), True, 'import streamlit as st\n'), ((3549, 3634), 'streamlit.markdown', 'st.markdown', (['f"""**Title: {_article[\'title\']}** --- **UUID: {_article[\'uuid\']}**"""'], {}), '(f"**Title: {_article[\'title\']}** --- **UUID: {_article[\'uuid\']}**"\n )\n', (3560, 3634), True, 'import streamlit as st\n'), ((3650, 3673), 'streamlit.markdown', 'st.markdown', (['"""**Text**"""'], {}), "('**Text**')\n", (3661, 3673), True, 'import streamlit as st\n'), ((3760, 3783), 'streamlit.markdown', 'st.markdown', (['f"""{_text}"""'], {}), "(f'{_text}')\n", (3771, 3783), True, 'import streamlit as st\n'), ((3804, 3816), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (3814, 3816), True, 'import streamlit as st\n')]
#!/usr/bin/env python import numpy as np import time import lancedb import pyarrow as pa import logging import sys logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') def generate_random_data_vectors(num_vectors, dimension, offset=0): """ Generate random data vectors. :param num_vectors: Number of vectors to generate. :param dimension: Dimension of each vector. :param offset: Offset for ID numbering. :return: List of dictionaries with vector and id. """ return [{"vector": list(np.random.random(dimension)), "id": i} for i in range(1 + offset, num_vectors + 1 + offset)] def generate_random_vectors(num_vectors, dimension): """ Generate random vectors using NumPy. :param num_vectors: Number of vectors to generate. :param dimension: Dimension of each vector. :return: NumPy array of random vectors. """ return np.random.random((num_vectors, dimension)) def main(): # Parameters dimension = 1536 num_vectors = 100000 num_query_vectors = 20000 top_k = 10 db = lancedb.connect("./vectors.db") logging.info("Starting Generating Data") db_vectors = generate_random_data_vectors(num_vectors, dimension) query_vectors = generate_random_vectors(num_query_vectors, dimension) logging.info("Finished Generating Data") logging.info("Starting to insert data") table = db.create_table("vectors", data=db_vectors) for i in range(1, 10): logging.info(f"Starting to insert data with offset {i}") table.add(generate_random_data_vectors(num_vectors, dimension, offset=num_vectors * i)) logging.info(f"Finished inserting data with offset {i}") logging.info("Finished inserting data") logging.info("Starting Queries") query_times = [] for i, query_vector in enumerate(query_vectors): start_time = time.time() table.search(query_vector).limit(top_k) # Adjust based on actual search method end_time = time.time() query_duration_ms = (end_time - start_time) * 1000 query_times.append(query_duration_ms) if (i + 1) % 5000 == 0: logging.info(f"Iteration {i + 1}") percentiles = [50, 90, 95, 99] percentile_results = {p: np.percentile(query_times, p) for p in percentiles} logging.info("Query Time Percentiles (milliseconds):") for percentile, value in percentile_results.items(): logging.info(f"{percentile}th percentile: {value:.3f} ms") db.drop_table("vectors") if __name__ == "__main__": main()
[ "lancedb.connect" ]
[((117, 213), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (136, 213), False, 'import logging\n'), ((922, 964), 'numpy.random.random', 'np.random.random', (['(num_vectors, dimension)'], {}), '((num_vectors, dimension))\n', (938, 964), True, 'import numpy as np\n'), ((1097, 1128), 'lancedb.connect', 'lancedb.connect', (['"""./vectors.db"""'], {}), "('./vectors.db')\n", (1112, 1128), False, 'import lancedb\n'), ((1134, 1174), 'logging.info', 'logging.info', (['"""Starting Generating Data"""'], {}), "('Starting Generating Data')\n", (1146, 1174), False, 'import logging\n'), ((1323, 1363), 'logging.info', 'logging.info', (['"""Finished Generating Data"""'], {}), "('Finished Generating Data')\n", (1335, 1363), False, 'import logging\n'), ((1369, 1408), 'logging.info', 'logging.info', (['"""Starting to insert data"""'], {}), "('Starting to insert data')\n", (1381, 1408), False, 'import logging\n'), ((1723, 1762), 'logging.info', 'logging.info', (['"""Finished inserting data"""'], {}), "('Finished inserting data')\n", (1735, 1762), False, 'import logging\n'), ((1768, 1800), 'logging.info', 'logging.info', (['"""Starting Queries"""'], {}), "('Starting Queries')\n", (1780, 1800), False, 'import logging\n'), ((2334, 2388), 'logging.info', 'logging.info', (['"""Query Time Percentiles (milliseconds):"""'], {}), "('Query Time Percentiles (milliseconds):')\n", (2346, 2388), False, 'import logging\n'), ((1500, 1556), 'logging.info', 'logging.info', (['f"""Starting to insert data with offset {i}"""'], {}), "(f'Starting to insert data with offset {i}')\n", (1512, 1556), False, 'import logging\n'), ((1661, 1717), 'logging.info', 'logging.info', (['f"""Finished inserting data with offset {i}"""'], {}), "(f'Finished inserting data with offset {i}')\n", (1673, 1717), False, 'import logging\n'), ((1896, 1907), 'time.time', 'time.time', ([], {}), '()\n', (1905, 1907), False, 'import time\n'), ((2015, 2026), 'time.time', 'time.time', ([], {}), '()\n', (2024, 2026), False, 'import time\n'), ((2277, 2306), 'numpy.percentile', 'np.percentile', (['query_times', 'p'], {}), '(query_times, p)\n', (2290, 2306), True, 'import numpy as np\n'), ((2454, 2512), 'logging.info', 'logging.info', (['f"""{percentile}th percentile: {value:.3f} ms"""'], {}), "(f'{percentile}th percentile: {value:.3f} ms')\n", (2466, 2512), False, 'import logging\n'), ((2177, 2211), 'logging.info', 'logging.info', (['f"""Iteration {i + 1}"""'], {}), "(f'Iteration {i + 1}')\n", (2189, 2211), False, 'import logging\n'), ((558, 585), 'numpy.random.random', 'np.random.random', (['dimension'], {}), '(dimension)\n', (574, 585), True, 'import numpy as np\n')]
import argparse import lancedb import torch import pyarrow as pa import pandas as pd from pathlib import Path import tqdm import numpy as np import logging from transformers import AutoConfig from sentence_transformers import SentenceTransformer logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser() parser.add_argument("--emb-model", help="embedding model name on HF hub", type=str) parser.add_argument("--table", help="table name in DB", type=str) parser.add_argument("--input-dir", help="input directory with documents to ingest", type=str) parser.add_argument("--vec-column", help="vector column name in the table", type=str, default="vector") parser.add_argument("--text-column", help="text column name in the table", type=str, default="text") parser.add_argument("--db-loc", help="database location", type=str, default=str(Path().resolve() / ".lancedb")) parser.add_argument("--batch-size", help="batch size for embedding model", type=int, default=32) parser.add_argument("--num-partitions", help="number of partitions for index", type=int, default=256) parser.add_argument("--num-sub-vectors", help="number of sub-vectors for index", type=int, default=96) args = parser.parse_args() emb_config = AutoConfig.from_pretrained(args.emb_model) emb_dimension = emb_config.hidden_size assert emb_dimension % args.num_sub_vectors == 0, \ "Embedding size must be divisible by the num of sub vectors" model = SentenceTransformer(args.emb_model) model.eval() if torch.backends.mps.is_available(): device = "mps" elif torch.cuda.is_available(): device = "cuda" else: device = "cpu" logger.info(f"using {str(device)} device") db = lancedb.connect(args.db_loc) schema = pa.schema( [ pa.field(args.vec_column, pa.list_(pa.float32(), emb_dimension)), pa.field(args.text_column, pa.string()) ] ) tbl = db.create_table(args.table, schema=schema, mode="overwrite") input_dir = Path(args.input_dir) files = list(input_dir.rglob("*")) sentences = [] for file in files: if file.is_file(): with open(file, encoding='utf-8') as f: sentences.append(f.read()) for i in tqdm.tqdm(range(0, int(np.ceil(len(sentences) / args.batch_size)))): try: batch = [sent for sent in sentences[i * args.batch_size:(i + 1) * args.batch_size] if len(sent) > 0] encoded = model.encode(batch, normalize_embeddings=True, device=device) encoded = [list(vec) for vec in encoded] df = pd.DataFrame({ args.vec_column: encoded, args.text_column: batch }) tbl.add(df) except: logger.info(f"batch {i} was skipped") ''' create ivf-pd index https://lancedb.github.io/lancedb/ann_indexes/ with the size of the transformer docs, index is not really needed but we'll do it for demonstrational purposes ''' tbl.create_index( num_partitions=args.num_partitions, num_sub_vectors=args.num_sub_vectors, vector_column_name=args.vec_column ) if __name__ == "__main__": main()
[ "lancedb.connect" ]
[((248, 287), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (267, 287), False, 'import logging\n'), ((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n'), ((352, 377), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (375, 377), False, 'import argparse\n'), ((1351, 1393), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.emb_model'], {}), '(args.emb_model)\n', (1377, 1393), False, 'from transformers import AutoConfig\n'), ((1576, 1611), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['args.emb_model'], {}), '(args.emb_model)\n', (1595, 1611), False, 'from sentence_transformers import SentenceTransformer\n'), ((1637, 1670), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (1668, 1670), False, 'import torch\n'), ((1845, 1873), 'lancedb.connect', 'lancedb.connect', (['args.db_loc'], {}), '(args.db_loc)\n', (1860, 1873), False, 'import lancedb\n'), ((2135, 2155), 'pathlib.Path', 'Path', (['args.input_dir'], {}), '(args.input_dir)\n', (2139, 2155), False, 'from pathlib import Path\n'), ((1704, 1729), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1727, 1729), False, 'import torch\n'), ((2724, 2789), 'pandas.DataFrame', 'pd.DataFrame', (['{args.vec_column: encoded, args.text_column: batch}'], {}), '({args.vec_column: encoded, args.text_column: batch})\n', (2736, 2789), True, 'import pandas as pd\n'), ((2020, 2031), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (2029, 2031), True, 'import pyarrow as pa\n'), ((1952, 1964), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1962, 1964), True, 'import pyarrow as pa\n'), ((955, 961), 'pathlib.Path', 'Path', ([], {}), '()\n', (959, 961), False, 'from pathlib import Path\n')]
# import ray # from ray import serve # from ray.serve.handle import DeploymentHandle import logging from collections import Counter, defaultdict, deque import pickle import os import glob import cv2 import numpy as np import pandas as pd import torch from PIL import Image import lancedb import clip from detic import Detic from detic.inference import load_classifier from xmem import XMem from detectron2.structures import Boxes, Instances, pairwise_iou from torchvision.ops import masks_to_boxes from torchvision import transforms from ..util.nms import asymmetric_nms, mask_iou from ..util.vocab import prepare_vocab from .download import ensure_db from IPython import embed log = logging.getLogger(__name__) # ray.init() IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) class CustomTrack(XMem.Track): hoi_class_id = 0 state_class_label = '' confidence = 0 def __init__(self, track_id, t_obs, n_init=3, state_history_len=4, hand_obj_history_len=4, **kw): super().__init__(track_id, t_obs, n_init, **kw) self.label_count = Counter() self.obj_state_history = deque(maxlen=state_history_len) self.hoi_history = deque(maxlen=hand_obj_history_len) self.obj_state_dist = pd.Series(dtype=float) self.obj_state_dist_label = None self.z_clips = {} @property def pred_label(self): xs = self.label_count.most_common(1) return xs[0][0] if xs else None def update_state(self, state, pred_label, alpha=0.1): # if the label changed, delete the state if self.obj_state_dist_label != pred_label: self.obj_state_dist = pd.Series(dtype=float) self.obj_state_dist_label = pred_label # set default for k in state.index: if k not in self.obj_state_dist: self.obj_state_dist[k] = state[k] # do EMA for k in self.obj_state_dist.index: self.obj_state_dist[k] = (1 - alpha) * self.obj_state_dist[k] + alpha * state.get(k, 0) return self.obj_state_dist import itertools def cat_instances(instance_lists): assert all(isinstance(i, Instances) for i in instance_lists) assert len(instance_lists) > 0 if len(instance_lists) == 1: return instance_lists[0] image_size = instance_lists[0].image_size if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing for i in instance_lists[1:]: assert i.image_size == image_size ret = Instances(image_size) for k in instance_lists[0]._fields.keys(): values = [i.get(k) for i in instance_lists] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) elif hasattr(type(v0), "cat"): values = type(v0).cat(values) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) else: raise ValueError("Unsupported type {} for concatenation".format(type(v0))) ret.set(k, values) return ret # IGNORE_CLASSES = ['table', 'dining_table', 'table-tennis_table', 'person'] class ObjectDetector: def __init__( self, vocabulary, state_db_fname=None, custom_state_clsf_fname=None, xmem_config={}, conf_threshold=0.3, detect_hoi=None, state_key='state', detic_config_key=None, additional_roi_heads=None, filter_tracked_detections_from_frame=True, device='cuda', detic_device=None, egohos_device=None, xmem_device=None, clip_device=None ): # initialize models self.device = device self.detic_device = detic_device or device self.egohos_device = egohos_device or device self.xmem_device = xmem_device or device self.clip_device = clip_device or device self.detic = Detic([], config=detic_config_key, masks=True, one_class_per_proposal=3, conf_threshold=conf_threshold, device=self.detic_device).eval().to(self.detic_device) self.conf_threshold = conf_threshold self.filter_tracked_detections_from_frame = filter_tracked_detections_from_frame self.egohos = None self.egohos_type = np.array(['', 'hand', 'hand', 'obj', 'obj', 'obj', 'obj', 'obj', 'obj', 'cb']) self.egohos_hand_side = np.array(['', 'left', 'right', 'left', 'right', 'both', 'left', 'right', 'both', '']) if detect_hoi is not False: try: from egohos import EgoHos self.egohos = EgoHos('obj1', device=self.egohos_device).eval() except ImportError as e: print('Could not import EgoHOS:', e) if detect_hoi is True: raise self.xmem = XMem({ 'top_k': 30, 'mem_every': 30, 'deep_update_every': -1, 'enable_long_term': True, 'enable_long_term_count_usage': True, 'num_prototypes': 128, 'min_mid_term_frames': 6, 'max_mid_term_frames': 12, 'max_long_term_elements': 1000, 'tentative_frames': 3, 'tentative_age': 3, 'max_age': 60, # in steps # 'min_iou': 0.3, **xmem_config, }, Track=CustomTrack).to(self.xmem_device).eval() # load vocabularies if vocabulary.get('base'): _, open_meta, _ = load_classifier(vocabulary['base'], prepare=False) base_prompts = open_meta.thing_classes else: base_prompts = [] tracked_prompts, tracked_vocab = prepare_vocab(vocabulary['tracked']) untracked_prompts, untracked_vocab = prepare_vocab(vocabulary.get('untracked') or []) # get base prompts remove_vocab = set(vocabulary.get('remove') or ()) | set(tracked_prompts) | set(untracked_prompts) base_prompts = [c for c in base_prompts if c not in remove_vocab] # get base vocab equival_map = vocabulary.get('equivalencies') or {} base_vocab = [equival_map.get(c, c) for c in base_prompts] # combine and get final vocab list full_vocab = list(tracked_vocab) + list(untracked_vocab) + base_vocab full_prompts = list(tracked_prompts) + list(untracked_prompts) + base_prompts # if external_vocab: # full_vocab, full_prompts = list(zip(*[(v, p) for v, p in zip(full_vocab, full_prompts) if v not in external_vocab])) or [[],[]] if additional_roi_heads is not None and not isinstance(additional_roi_heads, list): additional_roi_heads = [additional_roi_heads] self.additional_roi_heads = [ (torch.load(h) if isinstance(h, str) else h).to(self.detic_device) for h in additional_roi_heads or [] ] for h in self.additional_roi_heads: h.one_class_per_proposal = self.detic.predictor.model.roi_heads.one_class_per_proposal # for p in h.box_predictor: # p.test_topk_per_image = self.detic.predictor.model.roi_heads.box_predictor[0].test_topk_per_image self.additional_roi_heads_labels = [h.labels for h in self.additional_roi_heads] labels_covered_by_roi_heads = [l for ls in self.additional_roi_heads_labels for l in ls] self.base_labels = [l for l in full_vocab if l not in labels_covered_by_roi_heads] self.tracked_vocabulary = np.asarray(list(set(tracked_vocab))) self.ignored_vocabulary = np.asarray(['IGNORE']) self.skill_clsf, _, _ = load_classifier(full_prompts, metadata_name='lvis+', device=self.detic_device) self.skill_labels = np.asarray(full_vocab) self.skill_labels_is_tracked = np.isin(self.skill_labels, self.tracked_vocabulary) self.state_ema = 0.25 self.state_clsf_type = None self.state_db_key = state_key self.obj_label_names = [] self.sklearn_state_clsfs = {} if state_db_fname: if state_db_fname.endswith(".lancedb"): self.state_clsf_type = 'lancedb' # image encoder self.clip, self.clip_pre = clip.load("ViT-B/32", device=self.clip_device) state_db_fname = ensure_db(state_db_fname) print("Using state db:", state_db_fname) self.obj_state_db = lancedb.connect(state_db_fname) self.obj_label_names = self.obj_state_db.table_names() self.obj_state_tables = { k: self.obj_state_db[k] for k in self.obj_label_names } print(f"State DB: {self.obj_state_db}") print(f'Objects: {self.obj_label_names}') # for name in self.obj_label_names: # tbl.create_index(num_partitions=256, num_sub_vectors=96) if custom_state_clsf_fname: import joblib for f in glob.glob(os.path.join(custom_state_clsf_fname, '*.joblib')): cname = os.path.splitext(os.path.basename(f))[0] print('using sklearn model:', cname, f) c = joblib.load(f) c.labels = np.array([l.strip() for l in open(os.path.join(custom_state_clsf_fname, f'{cname}.txt')).readlines() if l.strip()]) self.sklearn_state_clsfs[cname] = c print(c) # print(self.sklearn_state_clsfs) # input() # embed() # if state_db_fname.endswith('.pkl'): # self.state_clsf_type = 'dino' # self.dinov2 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_reg').eval().to(self.clip_device) # self.dino_head, self.dino_classes = pickle.load(open(state_db_fname, 'rb')) # dino_object_classes = np.array([x.split('__')[0] for x in self.dino_classes]) # self.dino_state_classes = np.array([x.split('__')[1] for x in self.dino_classes]) # self.obj_label_names = np.unique(dino_object_classes) # self.dino_label_mask = {l: dino_object_classes == l for l in self.obj_label_names} # self.dino_pre = transforms.Compose([ # transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC), # transforms.CenterCrop(224), # transforms.ToTensor(), # transforms.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), # ]) # print(f'Objects: {self.obj_label_names}') def clear_memory(self): self.xmem.clear_memory() def predict_objects(self, image): # ----------------------------- Object Detection ----------------------------- # # predict objects detic_query = self.detic.build_query(image) outputs = detic_query.detect(self.skill_clsf, conf_threshold=0.3, labels=self.skill_labels) instances = outputs['instances'] if self.additional_roi_heads: instances = instances[np.isin(instances.pred_labels, self.base_labels)] instances_list = [ detic_query.detect(self.skill_clsf, roi_heads=h, labels=self.skill_labels)['instances'] for h in self.additional_roi_heads ] instances_list = [ h[np.isin(h.pred_labels, ls)] for h, ls in zip(instances_list, self.additional_roi_heads_labels) ] instances = self._cat_instances(instances, instances_list) instances = self._filter_detections(instances) return instances, detic_query def _cat_instances(self, instances, instances_list): if instances_list: instances = [instances] + instances_list # score_len = max(x.topk_scores.shape[1] for x in instances) # print(score_len) # class_offset = 0 # for x in instances: # try: # x.remove('topk_scores') # x.remove('topk_classes') # x.remove('topk_labels') # except KeyError: # pass # s = x.pred_scores # x.pred_scores = torch.cat([torch.zeros((len(s), class_offset), device=s.device, dtype=s.dtype), s], dim=1) # class_offset += s.shape[1] # s = x.topk_scores # if s.shape[1] < score_len: # print(s.shape) # x2 = torch.zeros((len(x), score_len), device=s.device, dtype=s.dtype) # x2[:, :len(s)] = s # x.topk_scores = x2 instances = cat_instances(instances) return instances def _filter_detections(self, instances): # drop any ignored instances instances = instances[~np.isin(instances.pred_labels, self.ignored_vocabulary)] # filter out objects completely inside another object obj_priority = torch.from_numpy(np.isin(instances.pred_labels, self.tracked_vocabulary)).int() filtered, overlap = asymmetric_nms(instances.pred_boxes.tensor, instances.scores, obj_priority, iou_threshold=0.85) filtered_instances = instances[filtered.cpu().numpy()] # if Counter(instances.pred_labels.tolist()).get('tortilla', 0) > 1: # embed() for i, i_ov in enumerate(overlap): if not len(i_ov): continue # get overlapping instances overlap_insts = instances[i_ov.cpu().numpy()] log.info(f"object {filtered_instances.pred_labels[i]} filtered {overlap_insts.pred_labels}") # merge overlapping detections with the same label overlap_insts = overlap_insts[overlap_insts.pred_labels == filtered_instances.pred_labels[i]] if len(overlap_insts): log.info(f"object {filtered_instances.pred_labels[i]} merged {len(overlap_insts)}") filtered_instances.pred_masks[i] |= torch.maximum( filtered_instances.pred_masks[i], overlap_insts.pred_masks.max(0).values) # filtered_instances.pred_masks # log.info("filtered detections %s", len(filtered_instances)) return filtered_instances def predict_hoi(self, image): if self.egohos is None: return None, None # -------------------------- Hand-Object Interaction ------------------------- # # predict HOI hoi_masks, hoi_class_ids = self.egohos(image) keep = hoi_masks.sum(1).sum(1) > 4 hoi_masks = hoi_masks[keep] hoi_class_ids = hoi_class_ids[keep.cpu().numpy()] # create detectron2 instances instances = Instances( image.shape, pred_masks=hoi_masks, pred_boxes=Boxes(masks_to_boxes(hoi_masks)), pred_hoi_classes=hoi_class_ids) # get a mask of the hands hand_mask = hoi_masks[self.egohos_type[hoi_class_ids] == 'hand'].sum(0) return instances, hand_mask def merge_hoi(self, other_detections, hoi_detections, detic_query): if hoi_detections is None: return None is_obj_type = self.egohos_type[hoi_detections.pred_hoi_classes] == 'obj' hoi_obj_detections = hoi_detections[is_obj_type] hoi_obj_masks = hoi_obj_detections.pred_masks hoi_obj_boxes = hoi_obj_detections.pred_boxes.tensor hoi_obj_hand_side = self.egohos_hand_side[hoi_detections.pred_hoi_classes[is_obj_type]] # ----------------- Compare & Merge HOI with Object Detector ----------------- # # get mask iou other_detections = [d for d in other_detections if d is not None] mask_list = [d.pred_masks.to(self.egohos_device) for d in other_detections] det_masks = torch.cat(mask_list) if mask_list else torch.zeros(0, hoi_obj_masks.shape[1:]) iou = mask_iou(det_masks, hoi_obj_masks) # add hand side interaction to tracks i = 0 for d, b in zip(other_detections, mask_list): d.left_hand_interaction = iou[i:i+len(b), hoi_obj_hand_side == 'left'].sum(1) d.right_hand_interaction = iou[i:i+len(b), hoi_obj_hand_side == 'right'].sum(1) d.both_hand_interaction = iou[i:i+len(b), hoi_obj_hand_side == 'both'].sum(1) i += len(b) # ---------------------- Predict class for unlabeled HOI --------------------- # # get hoi objects with poor overlap hoi_iou = iou.sum(0) hoi_is_its_own_obj = hoi_iou < 0.2 bbox = hoi_obj_boxes[hoi_is_its_own_obj].to(self.detic_device) masks = hoi_obj_masks[hoi_is_its_own_obj] scores = torch.Tensor(1 - hoi_iou[hoi_is_its_own_obj]) labels = np.array(['unknown' for i in range(len(bbox))]) try: instances = Instances( hoi_obj_detections.image_size, scores=scores, pred_boxes=Boxes(bbox), pred_masks=masks, pred_labels=labels, ) except AssertionError: print('failed creating unknown instances:\n', bbox.shape, masks.shape, scores.shape, labels.shape, '\n') instances = None return instances # # get labels for HOIs # hoi_outputs = detic_query.predict( # hoi_obj_boxes[hoi_is_its_own_obj].to(self.detic_device), # self.skill_clsf, labels=self.skill_labels) # hoi_detections2 = hoi_outputs['instances'] # hoi_detections2.pred_labels[:] = 'unknown' # pm = hoi_obj_detections.pred_masks[hoi_is_its_own_obj] # # if len(hoi_detections2) != len(pm): # # print(len(hoi_detections2)) # # print(hoi_is_its_own_obj) # # print(pm.shape) # hoi_detections2.pred_masks = pm # hoi_is_its_own_obj = hoi_is_its_own_obj.cpu() # hoi_detections2.left_hand_interaction = torch.as_tensor(hoi_obj_hand_side == 'left')[hoi_is_its_own_obj] # hoi_detections2.right_hand_interaction = torch.as_tensor(hoi_obj_hand_side == 'right')[hoi_is_its_own_obj] # hoi_detections2.both_hand_interaction = torch.as_tensor(hoi_obj_hand_side == 'both')[hoi_is_its_own_obj] # # TODO: add top K classes and scores # return hoi_detections2 def filter_objects(self, detections): return detections, detections def track_objects(self, image, detections, negative_mask=None): # det_mask = None det_scores = None if detections is not None: # other_mask = frame_detections.pred_masks det_scores = detections.pred_scores det_mask = detections.pred_masks.to(self.xmem_device) if negative_mask is not None: negative_mask = negative_mask.to(self.xmem_device) # run xmem pred_mask, track_ids, input_track_ids = self.xmem( image, det_mask, negative_mask=negative_mask, mask_scores=det_scores, tracked_labels=self.skill_labels_is_tracked, only_confirmed=True ) # update label counts tracks = self.xmem.tracks if input_track_ids is not None and detections is not None: labels = detections.pred_labels scores = detections.scores for i, ti in enumerate(input_track_ids): if ti >= 0: tracks[ti].label_count.update([labels[i]]) tracks[ti].confidence = scores[i] instances = Instances( image.shape, scores=torch.Tensor([tracks[i].confidence for i in track_ids]), pred_boxes=Boxes(masks_to_boxes(pred_mask)), pred_masks=pred_mask, pred_labels=np.array([tracks[i].pred_label for i in track_ids]), track_ids=torch.as_tensor(track_ids), ) frame_detections = detections if detections is not None and self.filter_tracked_detections_from_frame: frame_detections = detections[~np.isin(detections.pred_labels, self.tracked_vocabulary)] return instances, frame_detections def predict_state(self, image, detections, det_shape=None): states = [] labels = detections.pred_labels has_state = np.isin(labels, self.obj_label_names) track_ids = detections.track_ids.cpu().numpy() if detections.has('track_ids') else None dets = detections[has_state] i_z = {k: i for i, k in enumerate(np.where(has_state)[0])} Z_imgs = self._encode_boxes(image, dets.pred_boxes.tensor, det_shape=det_shape) if len(dets) else None for i in range(len(detections)): pred_label = labels[i] state = {} if has_state[i]: if pred_label in self.sklearn_state_clsfs: z = Z_imgs[i_z[i]].cpu().numpy() c = self.sklearn_state_clsfs[pred_label] y = c.predict_proba(z[None])[0] state = { c: x for c, x in zip(c.labels.tolist(), y.tolist()) } # print(pred_label) # print(sorted(state.items(), key=lambda x: x[1])[-3:]) # input() elif self.state_clsf_type == 'lancedb': z = Z_imgs[i_z[i]].cpu().numpy() df = self.obj_state_tables[pred_label].search(z).limit(11).to_df() state = df[self.state_db_key].value_counts() state = state / state.sum() if track_ids is not None and track_ids[i] in self.xmem.tracks: state = self.xmem.tracks[track_ids[i]].update_state(state, pred_label, self.state_ema) state = state.to_dict() # elif self.state_clsf_type == 'dino': # y = Z_imgs[i_z[i]]#.cpu().numpy() # assert y.shape[-1] == self.dino_state_classes.shape[0] # label_mask = self.dino_label_mask[pred_label] # state = dict(zip( # self.dino_state_classes[label_mask].tolist(), # y[label_mask].tolist() # )) # # print(state) states.append(state) # detections.__dict__['pred_states'] = states detections.pred_states = np.array(states) return detections def _encode_boxes(self, img, boxes, det_shape=None): # BGR # encode each bounding box crop with clip # print(f"Clip encoding: {img.shape} {boxes.shape}") # for x, y, x2, y2 in boxes.cpu(): # Image.fromarray(img[ # int(y):max(int(np.ceil(y2)), int(y+2)), # int(x):max(int(np.ceil(x2)), int(x+2)), # ::-1]).save("box.png") # input() sx = sy = 1 if det_shape: hd, wd = det_shape[:2] hi, wi = img.shape[:2] sx = wi / wd sy = hi / hd crops = [ Image.fromarray(img[ max(int(y * sy - 15), 0):max(int(np.ceil(y2 * sy + 15)), int(y * sy + 2)), max(int(x * sx - 15), 0):max(int(np.ceil(x2 * sx + 15)), int(x * sx + 2)), ::-1]) for x, y, x2, y2 in boxes.cpu() ] # for c in crops: # c.save('demo.png') # input() if self.state_clsf_type == 'lancedb': Z = self.clip.encode_image(torch.stack([self.clip_pre(x) for x in crops]).to(self.clip_device)) # Z /= Z.norm(dim=1, keepdim=True) # elif self.state_clsf_type == 'dino': # Z = self.dinov2(torch.stack([self.dino_pre(x) for x in crops]).to(self.clip_device)) # Z = self.dino_head.predict_proba(np.ascontiguousarray(Z.cpu().numpy())) return Z def classify(self, Z, labels): outputs = [] for z, l in zip(Z, labels): z_cls, txt_cls = self.classifiers[l] out = (z @ z_cls.t()).softmax(dim=-1).cpu().numpy() i = np.argmax(out) outputs.append(txt_cls[i]) return np.atleast_1d(np.array(outputs)) def forward(self, img, boxes, labels): valid = self.can_classify(labels) if not valid.any(): return np.array([None]*len(boxes)) labels = np.asanyarray(labels) Z = self.encode_boxes(img, boxes[valid]) clses = self.classify(Z, labels[valid]) all_clses = np.array([None]*len(boxes)) all_clses[valid] = clses return all_clses class Perception: def __init__(self, *a, detect_every_n_seconds=0.5, max_width=480, **kw): self.detector = ObjectDetector(*a, **kw) self.detect_every_n_seconds = 0 if detect_every_n_seconds is True else detect_every_n_seconds self.detection_timestamp = -1e30 self.max_width = max_width def clear_memory(self): self.detector.clear_memory() self.detection_timestamp = -1e30 @torch.no_grad() def predict(self, image, timestamp): # # Get a small version of the image # h, w = image.shape[:2] full_image = image # W = self.max_width # H = int((h * W / w)//16)*16 # # W = int((w * H / h)//16)*16 # image = cv2.resize(image, (W, H)) # ---------------------------------------------------------------------------- # # Detection: every N frames # # ---------------------------------------------------------------------------- # detections = detic_query = hoi_detections = hand_mask = None is_detection_frame = abs(timestamp - self.detection_timestamp) >= self.detect_every_n_seconds if is_detection_frame: self.detection_timestamp = timestamp # -------------------------- First we detect objects ------------------------- # # Detic: detections, detic_query = self.detector.predict_objects(image) # ------------------ Then we detect hand object interactions ----------------- # # EgoHOS: hoi_detections, hand_mask = self.detector.predict_hoi(image) # ---------------------------------------------------------------------------- # # Tracking: Every frame # # ---------------------------------------------------------------------------- # # ------------------------- Then we track the objects ------------------------ # # XMem: track_detections, frame_detections = self.detector.track_objects(image, detections, negative_mask=hand_mask) # ---------------------------------------------------------------------------- # # Predicting Object State # # ---------------------------------------------------------------------------- # # -------- For objects with labels we care about, classify their state ------- # # LanceDB: # predict state for tracked objects track_detections = self.detector.predict_state(full_image, track_detections, image.shape) # predict state for untracked objects # if frame_detections is not None: # frame_detections = self.detector.predict_state(image, frame_detections) # ----- Merge our multi-model detections into a single set of detections ----- # # IoU between tracks+frames & hoi: if hoi_detections is not None: # Merging HOI into track_detections, frame_detections, hoi_detections hoi_detections = self.detector.merge_hoi( [track_detections, frame_detections], hoi_detections, detic_query) self.timestamp = timestamp return track_detections, frame_detections, hoi_detections def serialize_detections(self, detections, frame_shape, include_mask=False): if detections is None: return None bboxes = detections.pred_boxes.tensor.cpu().numpy() bboxes[:, 0] /= frame_shape[1] bboxes[:, 1] /= frame_shape[0] bboxes[:, 2] /= frame_shape[1] bboxes[:, 3] /= frame_shape[0] labels = detections.pred_labels track_ids = detections.track_ids.cpu().numpy() if detections.has('track_ids') else None scores = detections.scores.cpu().numpy() if detections.has('scores') else None hand_object = { k: f'{k}_hand_interaction' for k in ['left', 'right', 'both'] } hand_object = { k: detections.get(kk).cpu().numpy() for k, kk in hand_object.items() if detections.has(kk)} possible_labels = None if detections.has('topk_scores'): possible_labels = [ {k: v for k, v in zip(ls.tolist(), ss.tolist()) if v > 0} for ls, ss in zip(detections.topk_labels, detections.topk_scores.cpu().numpy()) ] segments = None if include_mask and detections.has('pred_masks'): segments = [ norm_contours(cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0], frame_shape) for mask in detections.pred_masks.cpu().numpy().astype(np.uint8) ] states = detections.pred_states if detections.has('pred_states') else None output = [] for i in range(len(detections)): data = { 'xyxyn': bboxes[i].tolist(), 'label': labels[i], } if scores is not None: data['confidence'] = scores[i] if hand_object: data['hand_object'] = ho = {k: x[i] for k, x in hand_object.items()} data['hand_object_interaction'] = max(ho.values(), default=0) if possible_labels: data['possible_labels'] = possible_labels[i] if segments: data['segment'] = segments[i] if states is not None: data['state'] = states[i] if track_ids is not None: data['segment_track_id'] = track_ids[i] output.append(data) return output def norm_contours(contours, shape): contours = list(contours) WH = np.array(shape[:2][::-1]) for i in range(len(contours)): contours[i] = np.asarray(contours[i]) / WH return contours
[ "lancedb.connect" ]
[((690, 717), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (707, 717), False, 'import logging\n'), ((2546, 2567), 'detectron2.structures.Instances', 'Instances', (['image_size'], {}), '(image_size)\n', (2555, 2567), False, 'from detectron2.structures import Boxes, Instances, pairwise_iou\n'), ((25276, 25291), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25289, 25291), False, 'import torch\n'), ((30636, 30661), 'numpy.array', 'np.array', (['shape[:2][::-1]'], {}), '(shape[:2][::-1])\n', (30644, 30661), True, 'import numpy as np\n'), ((1109, 1118), 'collections.Counter', 'Counter', ([], {}), '()\n', (1116, 1118), False, 'from collections import Counter, defaultdict, deque\n'), ((1152, 1183), 'collections.deque', 'deque', ([], {'maxlen': 'state_history_len'}), '(maxlen=state_history_len)\n', (1157, 1183), False, 'from collections import Counter, defaultdict, deque\n'), ((1211, 1245), 'collections.deque', 'deque', ([], {'maxlen': 'hand_obj_history_len'}), '(maxlen=hand_obj_history_len)\n', (1216, 1245), False, 'from collections import Counter, defaultdict, deque\n'), ((1276, 1298), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (1285, 1298), True, 'import pandas as pd\n'), ((4361, 4439), 'numpy.array', 'np.array', (["['', 'hand', 'hand', 'obj', 'obj', 'obj', 'obj', 'obj', 'obj', 'cb']"], {}), "(['', 'hand', 'hand', 'obj', 'obj', 'obj', 'obj', 'obj', 'obj', 'cb'])\n", (4369, 4439), True, 'import numpy as np\n'), ((4472, 4561), 'numpy.array', 'np.array', (["['', 'left', 'right', 'left', 'right', 'both', 'left', 'right', 'both', '']"], {}), "(['', 'left', 'right', 'left', 'right', 'both', 'left', 'right',\n 'both', ''])\n", (4480, 4561), True, 'import numpy as np\n'), ((7642, 7664), 'numpy.asarray', 'np.asarray', (["['IGNORE']"], {}), "(['IGNORE'])\n", (7652, 7664), True, 'import numpy as np\n'), ((7698, 7776), 'detic.inference.load_classifier', 'load_classifier', (['full_prompts'], {'metadata_name': '"""lvis+"""', 'device': 'self.detic_device'}), "(full_prompts, metadata_name='lvis+', device=self.detic_device)\n", (7713, 7776), False, 'from detic.inference import load_classifier\n'), ((7805, 7827), 'numpy.asarray', 'np.asarray', (['full_vocab'], {}), '(full_vocab)\n', (7815, 7827), True, 'import numpy as np\n'), ((7867, 7918), 'numpy.isin', 'np.isin', (['self.skill_labels', 'self.tracked_vocabulary'], {}), '(self.skill_labels, self.tracked_vocabulary)\n', (7874, 7918), True, 'import numpy as np\n'), ((16878, 16923), 'torch.Tensor', 'torch.Tensor', (['(1 - hoi_iou[hoi_is_its_own_obj])'], {}), '(1 - hoi_iou[hoi_is_its_own_obj])\n', (16890, 16923), False, 'import torch\n'), ((20496, 20533), 'numpy.isin', 'np.isin', (['labels', 'self.obj_label_names'], {}), '(labels, self.obj_label_names)\n', (20503, 20533), True, 'import numpy as np\n'), ((22621, 22637), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (22629, 22637), True, 'import numpy as np\n'), ((24614, 24635), 'numpy.asanyarray', 'np.asanyarray', (['labels'], {}), '(labels)\n', (24627, 24635), True, 'import numpy as np\n'), ((1686, 1708), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (1695, 1708), True, 'import pandas as pd\n'), ((2752, 2776), 'torch.cat', 'torch.cat', (['values'], {'dim': '(0)'}), '(values, dim=0)\n', (2761, 2776), False, 'import torch\n'), ((5565, 5615), 'detic.inference.load_classifier', 'load_classifier', (["vocabulary['base']"], {'prepare': '(False)'}), "(vocabulary['base'], prepare=False)\n", (5580, 5615), False, 'from detic.inference import load_classifier\n'), ((15994, 16014), 'torch.cat', 'torch.cat', (['mask_list'], {}), '(mask_list)\n', (16003, 16014), False, 'import torch\n'), ((16033, 16072), 'torch.zeros', 'torch.zeros', (['(0)', 'hoi_obj_masks.shape[1:]'], {}), '(0, hoi_obj_masks.shape[1:])\n', (16044, 16072), False, 'import torch\n'), ((24334, 24348), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (24343, 24348), True, 'import numpy as np\n'), ((24417, 24434), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (24425, 24434), True, 'import numpy as np\n'), ((30719, 30742), 'numpy.asarray', 'np.asarray', (['contours[i]'], {}), '(contours[i])\n', (30729, 30742), True, 'import numpy as np\n'), ((8300, 8346), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'self.clip_device'}), "('ViT-B/32', device=self.clip_device)\n", (8309, 8346), False, 'import clip\n'), ((8500, 8531), 'lancedb.connect', 'lancedb.connect', (['state_db_fname'], {}), '(state_db_fname)\n', (8515, 8531), False, 'import lancedb\n'), ((9096, 9145), 'os.path.join', 'os.path.join', (['custom_state_clsf_fname', '"""*.joblib"""'], {}), "(custom_state_clsf_fname, '*.joblib')\n", (9108, 9145), False, 'import os\n'), ((9289, 9303), 'joblib.load', 'joblib.load', (['f'], {}), '(f)\n', (9300, 9303), False, 'import joblib\n'), ((11199, 11247), 'numpy.isin', 'np.isin', (['instances.pred_labels', 'self.base_labels'], {}), '(instances.pred_labels, self.base_labels)\n', (11206, 11247), True, 'import numpy as np\n'), ((13013, 13068), 'numpy.isin', 'np.isin', (['instances.pred_labels', 'self.ignored_vocabulary'], {}), '(instances.pred_labels, self.ignored_vocabulary)\n', (13020, 13068), True, 'import numpy as np\n'), ((19801, 19856), 'torch.Tensor', 'torch.Tensor', (['[tracks[i].confidence for i in track_ids]'], {}), '([tracks[i].confidence for i in track_ids])\n', (19813, 19856), False, 'import torch\n'), ((19973, 20024), 'numpy.array', 'np.array', (['[tracks[i].pred_label for i in track_ids]'], {}), '([tracks[i].pred_label for i in track_ids])\n', (19981, 20024), True, 'import numpy as np\n'), ((20048, 20074), 'torch.as_tensor', 'torch.as_tensor', (['track_ids'], {}), '(track_ids)\n', (20063, 20074), False, 'import torch\n'), ((2838, 2862), 'itertools.chain', 'itertools.chain', (['*values'], {}), '(*values)\n', (2853, 2862), False, 'import itertools\n'), ((11498, 11524), 'numpy.isin', 'np.isin', (['h.pred_labels', 'ls'], {}), '(h.pred_labels, ls)\n', (11505, 11524), True, 'import numpy as np\n'), ((13172, 13227), 'numpy.isin', 'np.isin', (['instances.pred_labels', 'self.tracked_vocabulary'], {}), '(instances.pred_labels, self.tracked_vocabulary)\n', (13179, 13227), True, 'import numpy as np\n'), ((14998, 15023), 'torchvision.ops.masks_to_boxes', 'masks_to_boxes', (['hoi_masks'], {}), '(hoi_masks)\n', (15012, 15023), False, 'from torchvision.ops import masks_to_boxes\n'), ((17142, 17153), 'detectron2.structures.Boxes', 'Boxes', (['bbox'], {}), '(bbox)\n', (17147, 17153), False, 'from detectron2.structures import Boxes, Instances, pairwise_iou\n'), ((19887, 19912), 'torchvision.ops.masks_to_boxes', 'masks_to_boxes', (['pred_mask'], {}), '(pred_mask)\n', (19901, 19912), False, 'from torchvision.ops import masks_to_boxes\n'), ((20249, 20305), 'numpy.isin', 'np.isin', (['detections.pred_labels', 'self.tracked_vocabulary'], {}), '(detections.pred_labels, self.tracked_vocabulary)\n', (20256, 20305), True, 'import numpy as np\n'), ((3007, 3037), 'numpy.concatenate', 'np.concatenate', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (3021, 3037), True, 'import numpy as np\n'), ((4012, 4145), 'detic.Detic', 'Detic', (['[]'], {'config': 'detic_config_key', 'masks': '(True)', 'one_class_per_proposal': '(3)', 'conf_threshold': 'conf_threshold', 'device': 'self.detic_device'}), '([], config=detic_config_key, masks=True, one_class_per_proposal=3,\n conf_threshold=conf_threshold, device=self.detic_device)\n', (4017, 4145), False, 'from detic import Detic\n'), ((4683, 4724), 'egohos.EgoHos', 'EgoHos', (['"""obj1"""'], {'device': 'self.egohos_device'}), "('obj1', device=self.egohos_device)\n", (4689, 4724), False, 'from egohos import EgoHos\n'), ((4908, 5249), 'xmem.XMem', 'XMem', (["{'top_k': 30, 'mem_every': 30, 'deep_update_every': -1, 'enable_long_term':\n True, 'enable_long_term_count_usage': True, 'num_prototypes': 128,\n 'min_mid_term_frames': 6, 'max_mid_term_frames': 12,\n 'max_long_term_elements': 1000, 'tentative_frames': 3, 'tentative_age':\n 3, 'max_age': 60, **xmem_config}"], {'Track': 'CustomTrack'}), "({'top_k': 30, 'mem_every': 30, 'deep_update_every': -1,\n 'enable_long_term': True, 'enable_long_term_count_usage': True,\n 'num_prototypes': 128, 'min_mid_term_frames': 6, 'max_mid_term_frames':\n 12, 'max_long_term_elements': 1000, 'tentative_frames': 3,\n 'tentative_age': 3, 'max_age': 60, **xmem_config}, Track=CustomTrack)\n", (4912, 5249), False, 'from xmem import XMem\n'), ((6835, 6848), 'torch.load', 'torch.load', (['h'], {}), '(h)\n', (6845, 6848), False, 'import torch\n'), ((9189, 9208), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (9205, 9208), False, 'import os\n'), ((20709, 20728), 'numpy.where', 'np.where', (['has_state'], {}), '(has_state)\n', (20717, 20728), True, 'import numpy as np\n'), ((29453, 29515), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (29469, 29515), False, 'import cv2\n'), ((23366, 23387), 'numpy.ceil', 'np.ceil', (['(y2 * sy + 15)'], {}), '(y2 * sy + 15)\n', (23373, 23387), True, 'import numpy as np\n'), ((23457, 23478), 'numpy.ceil', 'np.ceil', (['(x2 * sx + 15)'], {}), '(x2 * sx + 15)\n', (23464, 23478), True, 'import numpy as np\n'), ((9365, 9418), 'os.path.join', 'os.path.join', (['custom_state_clsf_fname', 'f"""{cname}.txt"""'], {}), "(custom_state_clsf_fname, f'{cname}.txt')\n", (9377, 9418), False, 'import os\n')]
import os, sqlite3, lancedb, tiktoken, bcrypt from pinecone import Pinecone, ServerlessSpec from enum import Enum from langchain_community.vectorstores import LanceDB, Chroma from langchain_community.vectorstores import Pinecone as LangPinecone import streamlit as st def SetHeader(page_title: str): st.set_page_config(page_title=page_title, page_icon="https://indico.bnl.gov/event/19560/logo-410523303.png", layout="wide") st.warning("This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback.") col_l, col1, col2, col_r = st.columns([1, 3, 3, 1]) with col1: st.image("https://indico.bnl.gov/event/19560/logo-410523303.png") with col2: st.title("""AI4EIC - RAG QA-ChatBot""", anchor = "AI4EIC-RAG-QA-Bot", help = "Will Link to arxiv proceeding here.") class UserNotFoundError(Exception): pass class DBNotFoundError(Exception): pass def hash_password(password: str): bytes = password.encode('utf-8') salt = bcrypt.gensalt() return bcrypt.hashpw(bytes, salt) def get_user_info(db_name, username): if not os.path.exists(db_name): raise FileNotFoundError(f"Database {db_name} does not exist.") conn = sqlite3.connect(db_name) cursor = conn.cursor() cursor.execute(''' SELECT username, first_name, last_name, password FROM users WHERE username = ? ''', (username,)) user = cursor.fetchone() conn.close() if user: return user else: return None def SetOpenAIModel(model_name: str): if model_name == "4": return False class VectorDB(Enum): LANCE = 1 CHROMA = 2 PINECONE = 3 def GetRetriever(TYPE: str, vector_config: dict, search_config = {}): if TYPE == VectorDB.LANCE.name: db = lancedb.connect(vector_config["db_name"]) table = db.open_table(vector_config["table_name"]) return LanceDB(connection = table, embedding = vector_config["embedding_function"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) elif TYPE == VectorDB.CHROMA.name: return Chroma(persist_directory = vector_config["db_name"], embedding_function = vector_config["embedding_function"], collection_name=vector_config["collection_name"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) elif TYPE == VectorDB.PINECONE.name: pc = Pinecone(api_key = vector_config["db_api_key"]) if vector_config["index_name"] not in pc.list_indexes().names(): raise DBNotFoundError(f"Database {vector_config['index_name']} does not exist.") return LangPinecone.from_existing_index(vector_config["index_name"], vector_config["embedding_function"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) else: raise NotImplementedError("Invalid VectorDB type") def num_tokens_from_prompt(prompt: str, model: str) -> int: """Return the number of tokens used by a prompt.""" encoding = tiktoken.encoding_for_model(model) return len(encoding.encode(prompt)) def num_tokens_from_messages(messages, model) -> int: """Return the number of tokens used by a list of messages.""" encoding = tiktoken.encoding_for_model(model) tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = 1 num_tokens = 0 for message in messages: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name num_tokens += 4 # every reply is primed with <|start|>assistant<|message|> return num_tokens
[ "lancedb.connect" ]
[((305, 433), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'page_title', 'page_icon': '"""https://indico.bnl.gov/event/19560/logo-410523303.png"""', 'layout': '"""wide"""'}), "(page_title=page_title, page_icon=\n 'https://indico.bnl.gov/event/19560/logo-410523303.png', layout='wide')\n", (323, 433), True, 'import streamlit as st\n'), ((433, 553), 'streamlit.warning', 'st.warning', (['"""This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback."""'], {}), "(\n 'This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback.'\n )\n", (443, 553), True, 'import streamlit as st\n'), ((575, 599), 'streamlit.columns', 'st.columns', (['[1, 3, 3, 1]'], {}), '([1, 3, 3, 1])\n', (585, 599), True, 'import streamlit as st\n'), ((1000, 1016), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (1014, 1016), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1028, 1054), 'bcrypt.hashpw', 'bcrypt.hashpw', (['bytes', 'salt'], {}), '(bytes, salt)\n', (1041, 1054), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1211, 1235), 'sqlite3.connect', 'sqlite3.connect', (['db_name'], {}), '(db_name)\n', (1226, 1235), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((3705, 3739), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (3732, 3739), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((3927, 3961), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (3954, 3961), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((623, 688), 'streamlit.image', 'st.image', (['"""https://indico.bnl.gov/event/19560/logo-410523303.png"""'], {}), "('https://indico.bnl.gov/event/19560/logo-410523303.png')\n", (631, 688), True, 'import streamlit as st\n'), ((712, 824), 'streamlit.title', 'st.title', (['"""AI4EIC - RAG QA-ChatBot"""'], {'anchor': '"""AI4EIC-RAG-QA-Bot"""', 'help': '"""Will Link to arxiv proceeding here."""'}), "('AI4EIC - RAG QA-ChatBot', anchor='AI4EIC-RAG-QA-Bot', help=\n 'Will Link to arxiv proceeding here.')\n", (720, 824), True, 'import streamlit as st\n'), ((1104, 1127), 'os.path.exists', 'os.path.exists', (['db_name'], {}), '(db_name)\n', (1118, 1127), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1793, 1834), 'lancedb.connect', 'lancedb.connect', (["vector_config['db_name']"], {}), "(vector_config['db_name'])\n", (1808, 1834), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1909, 1981), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': "vector_config['embedding_function']"}), "(connection=table, embedding=vector_config['embedding_function'])\n", (1916, 1981), False, 'from langchain_community.vectorstores import LanceDB, Chroma\n'), ((2793, 2838), 'pinecone.Pinecone', 'Pinecone', ([], {'api_key': "vector_config['db_api_key']"}), "(api_key=vector_config['db_api_key'])\n", (2801, 2838), False, 'from pinecone import Pinecone, ServerlessSpec\n'), ((2300, 2466), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': "vector_config['db_name']", 'embedding_function': "vector_config['embedding_function']", 'collection_name': "vector_config['collection_name']"}), "(persist_directory=vector_config['db_name'], embedding_function=\n vector_config['embedding_function'], collection_name=vector_config[\n 'collection_name'])\n", (2306, 2466), False, 'from langchain_community.vectorstores import LanceDB, Chroma\n'), ((3022, 3125), 'langchain_community.vectorstores.Pinecone.from_existing_index', 'LangPinecone.from_existing_index', (["vector_config['index_name']", "vector_config['embedding_function']"], {}), "(vector_config['index_name'], vector_config\n ['embedding_function'])\n", (3054, 3125), True, 'from langchain_community.vectorstores import Pinecone as LangPinecone\n')]
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import ( DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.text_key = text_key self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item.vector) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) node = TextNode( text=item.text or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((561, 588), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (578, 588), False, 'import logging\n'), ((2970, 2990), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2985, 2990), False, 'import lancedb\n'), ((1325, 1354), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1331, 1354), True, 'import numpy as np\n'), ((3435, 3520), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3456, 3520), False, 'from llama_index.vector_stores.utils import DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1235, 1259), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1241, 1259), True, 'import numpy as np\n'), ((5607, 5643), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (5628, 5643), False, 'from llama_index.vector_stores.utils import DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((5971, 6038), 'llama_index.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (5999, 6038), False, 'from llama_index.vector_stores.utils import DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6435, 6471), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (6450, 6471), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
from langchain import PromptTemplate, LLMChain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceBgeEmbeddings from io import BytesIO from langchain.document_loaders import PyPDFLoader import gradio as gr import lancedb from langchain.vectorstores import LanceDB from langchain.document_loaders import ArxivLoader from langchain.chains import FlareChain from langchain.prompts import PromptTemplate from langchain.chains import LLMChain import os from langchain.llms import OpenAI import getpass os.environ["OPENAI_API_KEY"] = "sk-yourapikeyforopenai" llm = OpenAI() model_name = "BAAI/bge-large-en" model_kwargs = {"device": "cpu"} encode_kwargs = {"normalize_embeddings": False} embeddings = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) # here is example https://arxiv.org/pdf/2305.06983.pdf # you need to pass this number to query 2305.06983 # fetch docs from arxiv, in this case it's the FLARE paper docs = ArxivLoader(query="2305.06983", load_max_docs=2).load() # instantiate text splitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150) # split the document into chunks doc_chunks = text_splitter.split_documents(docs) # lancedb vectordb db = lancedb.connect("/tmp/lancedb") table = db.create_table( "documentsai", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) vector_store = LanceDB.from_documents(doc_chunks, embeddings, connection=table) vector_store_retriever = vector_store.as_retriever() flare = FlareChain.from_llm( llm=llm, retriever=vector_store_retriever, max_generation_len=300, min_prob=0.45 ) # Define a function to generate FLARE output based on user input def generate_flare_output(input_text): output = flare.run(input_text) return output input = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) iface = gr.Interface( fn=generate_flare_output, inputs=input, outputs="text", title="My AI bot", description="FLARE implementation with lancedb & bge embedding.", ) iface.launch(debug=True, share=True)
[ "lancedb.connect" ]
[((704, 712), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (710, 712), False, 'from langchain.llms import OpenAI\n'), ((841, 948), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(model_name=model_name, model_kwargs=model_kwargs,\n encode_kwargs=encode_kwargs)\n', (865, 948), False, 'from langchain.embeddings import HuggingFaceBgeEmbeddings\n'), ((1225, 1291), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(150)'}), '(chunk_size=1500, chunk_overlap=150)\n', (1255, 1291), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1400, 1431), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1415, 1431), False, 'import lancedb\n'), ((1672, 1736), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['doc_chunks', 'embeddings'], {'connection': 'table'}), '(doc_chunks, embeddings, connection=table)\n', (1694, 1736), False, 'from langchain.vectorstores import LanceDB\n'), ((1800, 1905), 'langchain.chains.FlareChain.from_llm', 'FlareChain.from_llm', ([], {'llm': 'llm', 'retriever': 'vector_store_retriever', 'max_generation_len': '(300)', 'min_prob': '(0.45)'}), '(llm=llm, retriever=vector_store_retriever,\n max_generation_len=300, min_prob=0.45)\n', (1819, 1905), False, 'from langchain.chains import FlareChain\n'), ((2077, 2186), 'gradio.Text', 'gr.Text', ([], {'label': '"""Prompt"""', 'show_label': '(False)', 'max_lines': '(1)', 'placeholder': '"""Enter your prompt"""', 'container': '(False)'}), "(label='Prompt', show_label=False, max_lines=1, placeholder=\n 'Enter your prompt', container=False)\n", (2084, 2186), True, 'import gradio as gr\n'), ((2214, 2377), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'generate_flare_output', 'inputs': 'input', 'outputs': '"""text"""', 'title': '"""My AI bot"""', 'description': '"""FLARE implementation with lancedb & bge embedding."""'}), "(fn=generate_flare_output, inputs=input, outputs='text', title=\n 'My AI bot', description=\n 'FLARE implementation with lancedb & bge embedding.')\n", (2226, 2377), True, 'import gradio as gr\n'), ((1124, 1172), 'langchain.document_loaders.ArxivLoader', 'ArxivLoader', ([], {'query': '"""2305.06983"""', 'load_max_docs': '(2)'}), "(query='2305.06983', load_max_docs=2)\n", (1135, 1172), False, 'from langchain.document_loaders import ArxivLoader\n')]
import os import re import instructor import openai import pandas as pd import lancedb from typing import Optional, List from pydantic import BaseModel, Field from tenacity import Retrying, stop_after_attempt, wait_fixed from src.embedding_models.models import OpenAIEmbeddings from src.utils.logging import setup_colored_logging logger = setup_colored_logging(__name__) class QueryFilterPlan(BaseModel): """A revised user query, possibly improved by filtering.""" original_query: str = Field( ..., description="The original user query." ) filter: Optional[str] | None = Field( None, description="An SQL-like filter inferred from the user query." ) rephrased_query: str = Field( ..., description="A rephrased query based on the FILTER fields, or an empty string if no filter is needed.", ) def filter_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Filters the given DataFrame based on the (LLM) defined filter, using LanceDB for querying. Args: df (pd.DataFrame): The DataFrame to be filtered. Returns: pd.DataFrame: The filtered DataFrame. """ if not self.filter: logger.info(f"No filters have been set! Returning input DataFrame.") return df else: logger.info(f"Input DataFrame has {len(df):,} rows") logger.info(f"Applying filter(s): {self.filter}") uri = "../temp-lancedb/pd_table.lance" db = lancedb.connect(uri) try: table = db.create_table("temp_lance", data=df, mode="create") except: table = db.create_table("temp_lance", data=df, mode="overwrite") result_df = ( table.search() .where(self.filter) .limit(None) .to_df() ) db.drop_database() logger.info(f"Filtered DataFrame has {len(result_df):,} rows") return result_df system_message = f""" You will receive a QUERY, to be answered based on an EXTREMELY LARGE collection of documents you DO NOT have access to, but your ASSISTANT does. You only know that these documents contain text content and FILTERABLE fields in the SCHEMA below: {{doc_schema}} Based on the QUERY and the above SCHEMA, your task is to determine a QUERY PLAN, consisting of: - a FILTER (can be None) that would help the ASSISTANT to answer the query. Remember the FILTER can refer to ANY fields in the above SCHEMA. To get good results, for STRING MATCHES, consider using LIKE instead of =, e.g. "CEO LIKE '%Jobs%'" instead of "CEO = 'Steve Jobs'" - a possibly REPHRASED QUERY to be answerable given the FILTER. Keep in mind that the ASSISTANT does NOT know anything about the FILTER fields, so the REPHRASED QUERY should NOT mention ANY FILTER fields. EXAMPLE: ------- Suppose there is a document-set about crime reports, where: CONTENT = crime report, Filterable SCHEMA consists of City, Year, num_deaths. Then given this ORIGINAL QUERY: What were the total deaths in shoplifting crimes in Los Angeles in 2023? A POSSIBLE QUERY PLAN could be: FILTER: "City LIKE '%Los Angeles%' AND Year = 2023" REPHRASED QUERY: "shoplifting crime" --> this will be used to MATCH content of docs [NOTE: we dropped the FILTER fields City and Year since the ASSISTANT does not know about them and only uses the query to match the CONTENT of the docs.] ------------- END OF EXAMPLE ---------------- The FILTER must be a SQL-like condition, e.g. "year > 2000 AND genre = 'ScienceFiction'". To ensure you get useful results, you should make your FILTER NOT TOO STRICT, e.g. look for approximate match using LIKE, etc. E.g. "CEO LIKE '%Jobs%'" instead of "CEO = 'Steve Jobs'" """ def describe_dataframe( input_df: pd.DataFrame, filter_fields: List[str] = [], n_vals: int = 10 ) -> str: """ Generates a description of the columns in the dataframe, along with a listing of up to `n_vals` unique values for each column. Intended to be used to insert into an LLM context so it can generate appropriate queries or filters on the df. Args: df (pd.DataFrame): The dataframe to describe. filter_fields (list): A list of fields that can be used for filtering. When non-empty, the values-list will be restricted to these. n_vals (int): How many unique values to show for each column. Returns: str: A description of the dataframe. """ # Convert column names to snake_case for compatibility with LanceDB df = input_df[filter_fields] description = [] for column in df.columns.to_list(): unique_values = df[column].dropna().unique() unique_count = len(unique_values) if unique_count > n_vals: displayed_values = unique_values[:n_vals] more_count = unique_count - n_vals values_desc = f" Values - {displayed_values}, ... {more_count} more" else: values_desc = f" Values - {unique_values}" col_type = "string" if df[column].dtype == "object" else df[column].dtype col_desc = f"* {column} ({col_type}); {values_desc}" description.append(col_desc) all_cols = "\n".join(description) return f""" Name of each field, its type and unique values (up to {n_vals}): {all_cols} """ def generate_query_plan( input_df: pd.DataFrame, query: str, filter_fields: List[str], n_vals: int = 20 ) -> QueryFilterPlan: client = instructor.patch(openai.OpenAI()) df = input_df[filter_fields] filter_string = describe_dataframe( input_df=df, filter_fields=filter_fields, n_vals=n_vals, ) logger.info(f"Schema shown to LLM: {filter_string}") return client.chat.completions.create( model="gpt-4-1106-preview", response_model=QueryFilterPlan, max_retries=Retrying( stop=stop_after_attempt(5), wait=wait_fixed(1), ), messages=[ { "role": "system", "content": system_message.format(doc_schema=filter_string), }, { "role": "user", "content": f"Here is the user query:\n\nQUERY:{query}", }, ], ) def auto_filter_vector_search( df: pd.DataFrame, query: str, text_column: str, embeddings_column: str, filter_fields: List[str], top_k: int = 20 ) -> pd.DataFrame: query_plan = generate_query_plan( input_df=df, query=query, filter_fields=filter_fields, ) if not query_plan.filter: logger.info(f"No filters were identified for query: {query}") search_query = query_plan.original_query else: logger.info(f"Applying filter(s): {query_plan.filter}") logger.info(f"Revised query: {query_plan.rephrased_query}") search_query = query_plan.rephrased_query df.rename(columns={embeddings_column: "vector"}, inplace=True) uri = "../temp-lancedb/pd_table.lance" db = lancedb.connect(uri) filter_fields.append(text_column) filter_fields.append("vector") try: table = db.create_table("temp_lance", data=df[filter_fields], mode="create") except: table = db.create_table("temp_lance", data=df[filter_fields], mode="overwrite") embeddings_model = OpenAIEmbeddings() embedder = embeddings_model.embedding_fn() query_vector = embedder(search_query) if query_plan.filter: result = ( table.search(query_vector[0]) \ .metric("cosine") \ .where(query_plan.filter, prefilter=True) \ .limit(top_k) \ .to_df() ) else: result = ( table.search(query_vector[0]) \ .metric("cosine") \ .limit(top_k) \ .to_df() ) result.rename(columns={"vector": embeddings_column}, inplace=True) logger.info(f"Vector search yielded a DataFrame with {len(result):,} rows") return result def auto_filter_fts_search( df: pd.DataFrame, query: str, text_column: str, embeddings_column: str, filter_fields: List[str], top_k: int = 20 ) -> pd.DataFrame: """ Performs a full-text search (FTS) on the given DataFrame based on the specified query and filter fields. Args: df (pd.DataFrame): The DataFrame to search. query (str): The query string to search for. text_column (str): The name of the column containing text to search. embeddings_column (str): The name of the column containing embeddings. filter_fields (List[str]): A list of fields to filter the search results. top_k (int): The maximum number of search results to return. Returns: pd.DataFrame: A DataFrame containing the top_k search results. """ query_plan = generate_query_plan( input_df=df, query=query, filter_fields=filter_fields, ) if not query_plan.filter: logger.info(f"No filters were identified for query: {query}") else: logger.info(f"Applying filter(s): {query_plan.filter}") # Check if a revised query exists and is not an empty string, use it if so if query_plan.rephrased_query and query_plan.rephrased_query.strip(): logger.info(f"Revised query: {query_plan.rephrased_query}") search_query = query_plan.rephrased_query else: search_query = query_plan.original_query df.rename(columns={embeddings_column: "vector"}, inplace=True) uri = "../temp-lancedb/pd_table.lance" db = lancedb.connect(uri) filter_fields.append(text_column) filter_fields.append("vector") try: table = db.create_table("temp_lance", data=df[filter_fields], mode="create") except: table = db.create_table("temp_lance", data=df[filter_fields], mode="overwrite") table.create_fts_index(text_column, replace=True) # Clean up query: replace all newlines with spaces in query, # force special search keywords to lower case, remove quotes, # so it's not interpreted as search syntax query_clean = ( search_query.replace("\n", " ") .replace("AND", "and") .replace("OR", "or") .replace("NOT", "not") .replace("'", "") .replace('"', "") ) if query_plan.filter: result = ( table.search(query_clean, query_type="fts") \ .where(query_plan.filter) \ .limit(top_k) \ .to_df() ) else: result = ( table.search(query_clean, query_type="fts") \ .limit(top_k) \ .to_df() ) result.rename(columns={"vector": embeddings_column}, inplace=True) logger.info(f"Full Text Search (FTS) search yielded a DataFrame with {len(result):,} rows") return result
[ "lancedb.connect" ]
[((341, 372), 'src.utils.logging.setup_colored_logging', 'setup_colored_logging', (['__name__'], {}), '(__name__)\n', (362, 372), False, 'from src.utils.logging import setup_colored_logging\n'), ((504, 554), 'pydantic.Field', 'Field', (['...'], {'description': '"""The original user query."""'}), "(..., description='The original user query.')\n", (509, 554), False, 'from pydantic import BaseModel, Field\n'), ((613, 688), 'pydantic.Field', 'Field', (['None'], {'description': '"""An SQL-like filter inferred from the user query."""'}), "(None, description='An SQL-like filter inferred from the user query.')\n", (618, 688), False, 'from pydantic import BaseModel, Field\n'), ((739, 863), 'pydantic.Field', 'Field', (['...'], {'description': '"""A rephrased query based on the FILTER fields, or an empty string if no filter is needed."""'}), "(..., description=\n 'A rephrased query based on the FILTER fields, or an empty string if no filter is needed.'\n )\n", (744, 863), False, 'from pydantic import BaseModel, Field\n'), ((7186, 7206), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (7201, 7206), False, 'import lancedb\n'), ((7506, 7524), 'src.embedding_models.models.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (7522, 7524), False, 'from src.embedding_models.models import OpenAIEmbeddings\n'), ((9778, 9798), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (9793, 9798), False, 'import lancedb\n'), ((1544, 1564), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1559, 1564), False, 'import lancedb\n'), ((5614, 5629), 'openai.OpenAI', 'openai.OpenAI', ([], {}), '()\n', (5627, 5629), False, 'import openai\n'), ((6016, 6037), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(5)'], {}), '(5)\n', (6034, 6037), False, 'from tenacity import Retrying, stop_after_attempt, wait_fixed\n'), ((6056, 6069), 'tenacity.wait_fixed', 'wait_fixed', (['(1)'], {}), '(1)\n', (6066, 6069), False, 'from tenacity import Retrying, stop_after_attempt, wait_fixed\n')]
import logging import lancedb import os from pathlib import Path DB_TABLE_NAME = "split_files_db" # Setting up the logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # db db_uri = os.path.join(Path(__file__).parents[1], ".lancedb") db = lancedb.connect(db_uri) table = db.open_table(DB_TABLE_NAME)
[ "lancedb.connect" ]
[((126, 165), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (145, 165), False, 'import logging\n'), ((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n'), ((276, 299), 'lancedb.connect', 'lancedb.connect', (['db_uri'], {}), '(db_uri)\n', (291, 299), False, 'import lancedb\n'), ((232, 246), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (236, 246), False, 'from pathlib import Path\n')]
""" Chatbot for talking to Podcast using Langchain, Ollama and LanceDB """ from langchain.document_loaders import WikipediaLoader import pandas as pd from langchain.memory import ConversationSummaryMemory import lancedb from langchain.vectorstores import LanceDB from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chat_models import ChatOllama from langchain.chains import ConversationalRetrievalChain def lanceDBConnection(embed): db = lancedb.connect("/tmp/lancedb") table = db.create_table( "pdf_search", data=[{"vector": embed.embed_query("Hello World"), "text": "Hello World"}], mode="overwrite", ) return table def vectorStoreSetup(query, OPENAI_KEY): docs = WikipediaLoader(query=query, load_max_docs=2).load() # chunking text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(docs) # OpenAI embeddings embed = OpenAIEmbeddings(openai_api_key=OPENAI_KEY) # LanceDB as vector store table = lanceDBConnection(embed) vectorstore = LanceDB.from_documents( documents=all_splits, embedding=OpenAIEmbeddings(openai_api_key=OPENAI_KEY), connection=table, ) return vectorstore def retrieverSetup(text, OPENAI_KEY): vectorstore = vectorStoreSetup(text, OPENAI_KEY) # define ChatOllama: by default takes llama2-4bit quantized model llm = ChatOllama() memory = ConversationSummaryMemory( llm=llm, memory_key="chat_history", return_messages=True ) retriever = vectorstore.as_retriever() # define Retrieval Chain for retriver qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory) return qa def chat(qa, question): # chat query r = qa.run({"question": question}) return r
[ "lancedb.connect" ]
[((525, 556), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (540, 556), False, 'import lancedb\n'), ((883, 946), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)'}), '(chunk_size=500, chunk_overlap=0)\n', (913, 946), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1036, 1079), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (1052, 1079), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1510, 1522), 'langchain.chat_models.ChatOllama', 'ChatOllama', ([], {}), '()\n', (1520, 1522), False, 'from langchain.chat_models import ChatOllama\n'), ((1536, 1623), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True)\n", (1561, 1623), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((1728, 1806), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', (['llm'], {'retriever': 'retriever', 'memory': 'memory'}), '(llm, retriever=retriever, memory=memory)\n', (1765, 1806), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((795, 840), 'langchain.document_loaders.WikipediaLoader', 'WikipediaLoader', ([], {'query': 'query', 'load_max_docs': '(2)'}), '(query=query, load_max_docs=2)\n', (810, 840), False, 'from langchain.document_loaders import WikipediaLoader\n'), ((1237, 1280), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_KEY'}), '(openai_api_key=OPENAI_KEY)\n', (1253, 1280), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
import os import dotenv import gradio as gr import lancedb import logging from langchain.embeddings.cohere import CohereEmbeddings from langchain.llms import Cohere from langchain.prompts import PromptTemplate from langchain.chains import RetrievalQA from langchain.vectorstores import LanceDB from langchain.document_loaders import TextLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import PyPDFLoader import argostranslate.package import argostranslate.translate # Configuration Management dotenv.load_dotenv(".env") DB_PATH = "/tmp/lancedb" COHERE_MODEL_NAME = "multilingual-22-12" LANGUAGE_ISO_CODES = { "English": "en", "Hindi": "hi", "Turkish": "tr", "French": "fr", } # Logging Configuration logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def initialize_documents_and_embeddings(input_file_path): """ Initialize documents and their embeddings from a given file. Parameters: - input_file_path (str): The path to the input file. Supported formats are .txt and .pdf. Returns: - tuple: A tuple containing a list of texts split from the document and the embeddings object. """ file_extension = os.path.splitext(input_file_path)[1] if file_extension == ".txt": logger.info("txt file processing") # Handle text file loader = TextLoader(input_file_path) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50) texts = text_splitter.split_documents(documents) elif file_extension == ".pdf": logger.info("pdf file processing") # Handle PDF file loader = PyPDFLoader(input_file_path) texts = loader.load_and_split() text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50) texts = text_splitter.split_documents(texts) else: raise ValueError( "Unsupported file type. Supported files are .txt and .pdf only." ) embeddings = CohereEmbeddings(model=COHERE_MODEL_NAME) return texts, embeddings # Database Initialization def initialize_database(texts, embeddings): """ Initialize and populate a LanceDB database with documents and their embeddings. Parameters: - texts (list): A list of texts to be stored in the database. - embeddings (CohereEmbeddings): An embeddings object used to generate vector embeddings for the texts. Returns: - LanceDB: An instance of LanceDB with the documents and their embeddings stored. """ db = lancedb.connect(DB_PATH) table = db.create_table( "multiling-rag", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) return LanceDB.from_documents(texts, embeddings, connection=table) # Translation Function def translate_text(text, from_code, to_code): """ Translate a given text from one language to another. Parameters: - text (str): The text to translate. - from_code (str): The ISO language code of the source language. - to_code (str): The ISO language code of the target language. Returns: - str: The translated text. """ try: argostranslate.package.update_package_index() available_packages = argostranslate.package.get_available_packages() package_to_install = next( filter( lambda x: x.from_code == from_code and x.to_code == to_code, available_packages, ) ) argostranslate.package.install_from_path(package_to_install.download()) return argostranslate.translate.translate(text, from_code, to_code) except Exception as e: logger.error(f"Error in translate_text: {str(e)}") return "Translation error" prompt_template = """Text: {context} Question: {question} Answer the question based on the text provided. If the text doesn't contain the answer, reply that the answer is not available.""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) # Question Answering Function def answer_question(question, input_language, output_language, db): """ Answer a given question by retrieving relevant information from a database, translating the question and answer if necessary. Parameters: - question (str): The question to answer. - input_language (str): The language of the input question. - output_language (str): The desired language of the answer. - db (LanceDB): The LanceDB instance to use for information retrieval. Returns: - str: The answer to the question, in the desired output language """ try: input_lang_code = LANGUAGE_ISO_CODES[input_language] output_lang_code = LANGUAGE_ISO_CODES[output_language] question_in_english = ( translate_text(question, from_code=input_lang_code, to_code="en") if input_language != "English" else question ) prompt = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) qa = RetrievalQA.from_chain_type( llm=Cohere(model="command", temperature=0), chain_type="stuff", retriever=db.as_retriever(), chain_type_kwargs={"prompt": prompt}, return_source_documents=True, ) answer = qa({"query": question_in_english}) result_in_english = answer["result"].replace("\n", "").replace("Answer:", "") return ( translate_text(result_in_english, from_code="en", to_code=output_lang_code) if output_language != "English" else result_in_english ) except Exception as e: logger.error(f"Error in answer_question: {str(e)}") return "An error occurred while processing your question. Please try again." def setup_gradio_interface(db): """ Setup a Gradio interface for interacting with the multilingual chatbot. Parameters: - db (LanceDB): The database instance to use for information retrieval. Returns: - gr.Interface: A Gradio interface object for the chatbot. """ return gr.Interface( fn=lambda question, input_language, output_language: answer_question( question, input_language, output_language, db ), inputs=[ gr.Textbox(lines=2, placeholder="Type your question here..."), gr.Dropdown(list(LANGUAGE_ISO_CODES.keys()), label="Input Language"), gr.Dropdown(list(LANGUAGE_ISO_CODES.keys()), label="Output Language"), ], outputs="text", title="Multilingual Chatbot", description="Ask any question in your chosen language and get an answer in the language of your choice.", ) # Main Function def main(): INPUT_FILE_PATH = "healthy-diet-fact-sheet-394.pdf" texts, embeddings = initialize_documents_and_embeddings(INPUT_FILE_PATH) db = initialize_database(texts, embeddings) iface = setup_gradio_interface(db) iface.launch(share=True, debug=True) if __name__ == "__main__": main()
[ "lancedb.connect" ]
[((563, 589), 'dotenv.load_dotenv', 'dotenv.load_dotenv', (['""".env"""'], {}), "('.env')\n", (581, 589), False, 'import dotenv\n'), ((788, 827), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (807, 827), False, 'import logging\n'), ((837, 864), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (854, 864), False, 'import logging\n'), ((4194, 4279), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (4208, 4279), False, 'from langchain.prompts import PromptTemplate\n'), ((2089, 2130), 'langchain.embeddings.cohere.CohereEmbeddings', 'CohereEmbeddings', ([], {'model': 'COHERE_MODEL_NAME'}), '(model=COHERE_MODEL_NAME)\n', (2105, 2130), False, 'from langchain.embeddings.cohere import CohereEmbeddings\n'), ((2632, 2656), 'lancedb.connect', 'lancedb.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (2647, 2656), False, 'import lancedb\n'), ((2939, 2998), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['texts', 'embeddings'], {'connection': 'table'}), '(texts, embeddings, connection=table)\n', (2961, 2998), False, 'from langchain.vectorstores import LanceDB\n'), ((1251, 1284), 'os.path.splitext', 'os.path.splitext', (['input_file_path'], {}), '(input_file_path)\n', (1267, 1284), False, 'import os\n'), ((1408, 1435), 'langchain.document_loaders.TextLoader', 'TextLoader', (['input_file_path'], {}), '(input_file_path)\n', (1418, 1435), False, 'from langchain.document_loaders import TextLoader\n'), ((1494, 1558), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(50)'}), '(chunk_size=300, chunk_overlap=50)\n', (1524, 1558), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((5222, 5307), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (5236, 5307), False, 'from langchain.prompts import PromptTemplate\n'), ((1737, 1765), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['input_file_path'], {}), '(input_file_path)\n', (1748, 1765), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((1830, 1894), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(50)'}), '(chunk_size=300, chunk_overlap=50)\n', (1860, 1894), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((5384, 5422), 'langchain.llms.Cohere', 'Cohere', ([], {'model': '"""command"""', 'temperature': '(0)'}), "(model='command', temperature=0)\n", (5390, 5422), False, 'from langchain.llms import Cohere\n'), ((6603, 6664), 'gradio.Textbox', 'gr.Textbox', ([], {'lines': '(2)', 'placeholder': '"""Type your question here..."""'}), "(lines=2, placeholder='Type your question here...')\n", (6613, 6664), True, 'import gradio as gr\n')]
import lancedb #vectorstore functions class Vectorstore(): """A class to interact with the vectorstore.""" def __init__(self,) -> None: """Initialize the vectorstore object.""" uri = "data/sample-lancedb" db = lancedb.connect(uri) self.name = name
[ "lancedb.connect" ]
[((251, 271), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (266, 271), False, 'import lancedb\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.filters: if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text, id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2773, 2793), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2788, 2793), False, 'import lancedb\n'), ((1170, 1199), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1176, 1199), True, 'import numpy as np\n'), ((3178, 3257), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3199, 3257), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1080, 1104), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1086, 1104), True, 'import numpy as np\n'), ((5479, 5515), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5494, 5515), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
from flask import Flask, jsonify, request, json, send_from_directory import os import openai import pandas as pd import base64 import os import requests import numpy as np # import google.cloud.texttospeech as tts import lancedb from flask_cors import CORS # os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'service.json' # os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.getenv("GCP") openai.api_key = os.getenv("OPENAI_API_KEY") app = Flask(__name__) CORS(app, resources={r"/*": {"origins": "*"}}) stories_file = 'data/stories.csv' session_file = 'data/session.csv' uri = "./data/lancedb" db = lancedb.connect(uri) try: db.create_table('mytale', data=[{"vector":[0,0],"id": "0", "title": "", "story": "", "img":['','','',''], "audio":""}]) except: pass def insertIntoLance(data): t = db.open_table('mytale') t.add(data=data) print('inserted into lance') return t def readLance(): t = db.open_table('mytale') #print(t.head()) print('read lance') return t.head() @app.route('/test', methods=['GET']) def getLance(): data = readLance() id_values = data['id'].to_pandas().tolist() title_values = data['title'].to_pandas().tolist() story_values = data['story'].to_pandas().tolist() audio_values = data['audio'].to_pandas().tolist() img_values = data['img'].to_pandas().tolist() # Construct a list of dictionaries with 'id', 'title', 'story', and 'audio' keys final = [] for i in range(1,len(id_values)): entry = { 'id': id_values[i], 'title': title_values[i], 'story': f"""{story_values[i]}""", 'img':[img_values[i][0],img_values[i][1],img_values[i][2],img_values[i][3]], 'audio': audio_values[i] } final.append(entry) # Convert the list of dictionaries to JSON format return final # if not os.path.exists(stories_file): # df = pd.DataFrame({ # "id": [], # "title": [], # "story": [], # "img": [] # }) # df.to_csv(stories_file, index=False) if not os.path.exists(session_file): df = pd.DataFrame({ "id": [], "sess_id": [], "story_id": [], "role": [], "content": [] }) df.to_csv(session_file, index=False) # stories_df = pd.read_csv(stories_file) session_df = pd.read_csv(session_file) def generate_story(topic: str) -> str: completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "user", "content": f"Generate a 4 paragraph children's story with title about {topic} that contains a moral."} ] ) content = completion.choices[0].message.content content = content.encode().decode('unicode_escape') title = content.split('\n')[0] title = title.replace('Title: ', '') story = content[content.find('\n'):] story = story.lstrip() return title, story def generate_prompts(story: str): completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "user", "content": f"Create four text to image prompts, seperated by new line, that will be suitable as images of the below given story such that each image represents a paragraph in the story. Do not include the character names, instead include only the characters physical description.\n\n{story}"} ] ) prompts = completion.choices[0].message.content prompts = prompts.encode().decode('unicode_escape') prompts = prompts.split('\n') ans = [] for i in prompts: #t = i.message.content #t = t.encode().decode('unicode_escape') if ':' in i: i = i[i.find(':')+1:] i = i.strip() if(i != ""): ans.append(i) content = completion.choices[0].message.content content = content.encode().decode('unicode_escape') if ':' in content: content = content[content.find(':')+1:] content = content.strip() return ans def generate_image(prompt: str): engine_id = "stable-diffusion-512-v2-1" api_host = os.getenv('API_HOST', 'https://api.stability.ai') api_key = os.getenv("STABILITYAI_API_KEY") if api_key is None: raise Exception("Missing Stability API key.") response = requests.post( f"{api_host}/v1/generation/{engine_id}/text-to-image", headers={ "Content-Type": "application/json", "Accept": "application/json", "Authorization": f"Bearer {api_key}" }, json={ "text_prompts": [ { "text": f"{prompt}" } ], "cfg_scale": 7, "clip_guidance_preset": "FAST_BLUE", "height": 512, "width": 512, "samples": 1, "steps": 30, }, ) if response.status_code != 200: raise Exception( "Non-200 response for image generation: " + str(response.text)) data = response.json() for i, image in enumerate(data["artifacts"]): return image["base64"] def save_story(title: str, story: str, img: [],audio_filename: str): img_filename=[] for i in range(len(img)): img_filename.append(f"./images/{title+str(i)}.png") with open(img_filename[i], "wb") as f: f.write(base64.b64decode(img[i])) global stories_df images_dest=[] for i in range(len(img)): # images_dest.append(request.root_url + 'images/' + title+str(i) + '.png') images_dest.append('images/' + title+str(i) + '.png') images_dest = np.array(images_dest) # temp_df = pd.DataFrame({ # "id": [len(stories_df)+1], # "title": [title], # "story": [story], # "img": [images_dest.tolist()], # "audio": [request.root_url + 'audios/' + title + '.wav'] # }) #[{"vector": [1, 1], "id": 2, "title": "test", "story": "test", "img": ['a', 'b', 'c', 'd'], "audio": "asdasd"}] insertIntoLance([{ "vector":[len(getLance())+1, len(getLance())+1], # "id": len(stories_df)+1, "id": len(getLance())+1, "title": title, "story": story.replace("'", "\\'"), "img": images_dest.tolist(), "audio": request.root_url + 'audios/' + title + '.wav' }]) # stories_df = pd.concat([stories_df, temp_df], ignore_index=True) # stories_df.to_csv(stories_file, index=False) def get_followup_response(session_id: int, story_id: int, question: str): global session_df # story = stories_df[stories_df['id'] == story_id]['story'].values[0] # t = getLance() # print("t", t) # print(f"story id {story_id}") # story = t[story_id+1]['story'] print("story_id", story_id) data = getLance() target_story = next((story for story in data if int(story["id"]) == story_id), None) story = target_story["story"] print("story", story) system_msg = f"You are an assistant that answers the questions to the children's "\ "story given below. You should answer the questions descriptively in a "\ "way that a child can understand them. If the question asked is unrelated "\ "to the story, do not answer the question and instead reply by asking the "\ "user to ask questions related to the story."\ "\n\n"\ f"Story: {story}" temp_df = pd.DataFrame({ "id": [len(session_df)+1], "sess_id": [session_id], "story_id": [story_id], "role": ["user"], "content": [question] }) session_df = pd.concat([session_df, temp_df], ignore_index=True) messages = session_df[session_df['sess_id'] == session_id][["id", "role", "content"]] messages = messages.sort_values(by=['id']) messages = messages[['role', 'content']] messages = messages.to_dict('records') completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": system_msg}, *messages ] ) content = completion.choices[0].message.content content = content.encode().decode('unicode_escape') temp_df = pd.DataFrame({ "id": [len(session_df)+1], "sess_id": [session_id], "story_id": [story_id], "role": ["assistant"], "content": [content] }) session_df = pd.concat([session_df, temp_df], ignore_index=True) session_df.to_csv(session_file, index=False) return content def text_to_wav(text: str, title, dest, voice_name = "en-IN-Wavenet-A"): language_code = "-".join(voice_name.split("-")[:2]) text_input = tts.SynthesisInput(text=text) voice_params = tts.VoiceSelectionParams( language_code=language_code, name=voice_name ) audio_config = tts.AudioConfig(audio_encoding=tts.AudioEncoding.LINEAR16, speaking_rate=0.8) client = tts.TextToSpeechClient() response = client.synthesize_speech( input=text_input, voice=voice_params, audio_config=audio_config, ) filename = f"{dest + '/' + title}.wav" with open(filename, "wb") as out: out.write(response.audio_content) print(f'Generated speech saved to "{filename}"') return filename @app.route('/', methods=['GET']) def index(): return jsonify({'message': 'Hello World!'}) @app.route('/images/<path:path>', methods=['GET']) def get_image(path): return send_from_directory('images', path) @app.route('/audios/<path:path>', methods=['GET']) def get_audio(path): return send_from_directory('audios', path) @app.route('/generate', methods=['GET']) def generate(): topic = request.args.get('topic') title, story = generate_story(topic) print(f"Title: {title}") print(f"Story: {story}") prompts = generate_prompts(story) print(f"Prompts: {prompts}") img = [] for i in range(len(prompts)): img.append(generate_image(prompts[i])) print("Image generated") # audio_file = text_to_wav(story, title, "./audios") audio_file = "" print("Audio generated") save_story(title, story, img, audio_file) t = getLance() tl = len(t) return jsonify({'title': title, 'story': story, "id": tl, 'img': request.root_url + 'images/' + title + '.png', 'audio': request.root_url + 'audios/' + title + '.wav'}) @app.route('/get_story', methods=['GET']) def get_story(): story_id = int(request.args.get('id')) t = getLance() return t[story_id]['story'] # story = stories_df[stories_df['id'] == story_id].to_dict('records')[0] # if(type(story['img']) == str): # story['img'] = ast.literal_eval(story['img']) return jsonify({'story': story}) @app.route('/get_n_stories', methods=['GET']) def get_n_stories(): # n = int(request.args.get('n')) # sampled_stories = stories_df.sample(n=n).copy() # for idx, story in sampled_stories.iterrows(): # if(type(sampled_stories.at[idx, 'img']) == str): # sampled_stories.at[idx, 'img'] = ast.literal_eval(story['img']) # stories = sampled_stories.to_dict('records') return jsonify({'stories': getLance()}) return jsonify({'stories': stories}) @app.route('/get_story_count', methods=['GET']) def get_story_count(): return jsonify({'count': len(getLance())}) @app.route('/get_followup', methods=['GET']) def get_followup(): session_id = int(request.args.get('session_id')) story_id = int(request.args.get('story_id')) question = request.args.get('question') response = get_followup_response(session_id, story_id, question) # audio_file = text_to_wav(response, f"temp", "./audios") audio_file = "" return jsonify({'response': response, 'audio': request.root_url + 'audios/' + 'temp' + '.wav'}) def transcribe_file(audio): """Transcribe the given audio file.""" from google.cloud import speech client = speech.SpeechClient() audio = speech.RecognitionAudio(content=audio) config = speech.RecognitionConfig( encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=48000, language_code="en-US", ) response = client.recognize(config=config, audio=audio) # Each result is for a consecutive portion of the audio. Iterate through # them to get the transcripts for the entire audio file. for result in response.results: # The first alternative is the most likely one for this portion. print("Transcript: {}".format(result.alternatives[0].transcript)) return result.alternatives[0].transcript @app.route('/post_followup_audio', methods=['POST']) def get_text(): # get the audio data from form audio_file = request.files['audio'] sess_id = request.form['session_id'] story_id = request.form['story_id'] text = transcribe_file(audio_file.read()) if text is None: return jsonify({'response': 'Sorry, I could not understand you. Please try again.'}) response = get_followup_response(sess_id, story_id, text) return jsonify({'response': response}) if __name__ == '__main__': # app.run() app.run(debug=True)
[ "lancedb.connect" ]
[((407, 434), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (416, 434), False, 'import os\n'), ((441, 456), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (446, 456), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((457, 502), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (461, 502), False, 'from flask_cors import CORS\n'), ((603, 623), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (618, 623), False, 'import lancedb\n'), ((2347, 2372), 'pandas.read_csv', 'pd.read_csv', (['session_file'], {}), '(session_file)\n', (2358, 2372), True, 'import pandas as pd\n'), ((2083, 2111), 'os.path.exists', 'os.path.exists', (['session_file'], {}), '(session_file)\n', (2097, 2111), False, 'import os\n'), ((2122, 2208), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': [], 'sess_id': [], 'story_id': [], 'role': [], 'content': []}"], {}), "({'id': [], 'sess_id': [], 'story_id': [], 'role': [],\n 'content': []})\n", (2134, 2208), True, 'import pandas as pd\n'), ((2430, 2626), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': '[{\'role\': \'user\', \'content\':\n f"Generate a 4 paragraph children\'s story with title about {topic} that contains a moral."\n }]'}), '(model=\'gpt-3.5-turbo\', messages=[{\'role\':\n \'user\', \'content\':\n f"Generate a 4 paragraph children\'s story with title about {topic} that contains a moral."\n }])\n', (2458, 2626), False, 'import openai\n'), ((2987, 3375), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': '[{\'role\': \'user\', \'content\':\n f"""Create four text to image prompts, seperated by new line, that will be suitable as images of the below given story such that each image represents a paragraph in the story. Do not include the character names, instead include only the characters physical description.\n\n{story}"""\n }]'}), '(model=\'gpt-3.5-turbo\', messages=[{\'role\':\n \'user\', \'content\':\n f"""Create four text to image prompts, seperated by new line, that will be suitable as images of the below given story such that each image represents a paragraph in the story. Do not include the character names, instead include only the characters physical description.\n\n{story}"""\n }])\n', (3015, 3375), False, 'import openai\n'), ((4117, 4166), 'os.getenv', 'os.getenv', (['"""API_HOST"""', '"""https://api.stability.ai"""'], {}), "('API_HOST', 'https://api.stability.ai')\n", (4126, 4166), False, 'import os\n'), ((4181, 4213), 'os.getenv', 'os.getenv', (['"""STABILITYAI_API_KEY"""'], {}), "('STABILITYAI_API_KEY')\n", (4190, 4213), False, 'import os\n'), ((4309, 4669), 'requests.post', 'requests.post', (['f"""{api_host}/v1/generation/{engine_id}/text-to-image"""'], {'headers': "{'Content-Type': 'application/json', 'Accept': 'application/json',\n 'Authorization': f'Bearer {api_key}'}", 'json': "{'text_prompts': [{'text': f'{prompt}'}], 'cfg_scale': 7,\n 'clip_guidance_preset': 'FAST_BLUE', 'height': 512, 'width': 512,\n 'samples': 1, 'steps': 30}"}), "(f'{api_host}/v1/generation/{engine_id}/text-to-image',\n headers={'Content-Type': 'application/json', 'Accept':\n 'application/json', 'Authorization': f'Bearer {api_key}'}, json={\n 'text_prompts': [{'text': f'{prompt}'}], 'cfg_scale': 7,\n 'clip_guidance_preset': 'FAST_BLUE', 'height': 512, 'width': 512,\n 'samples': 1, 'steps': 30})\n", (4322, 4669), False, 'import requests\n'), ((5650, 5671), 'numpy.array', 'np.array', (['images_dest'], {}), '(images_dest)\n', (5658, 5671), True, 'import numpy as np\n'), ((7674, 7725), 'pandas.concat', 'pd.concat', (['[session_df, temp_df]'], {'ignore_index': '(True)'}), '([session_df, temp_df], ignore_index=True)\n', (7683, 7725), True, 'import pandas as pd\n'), ((7996, 8116), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'system', 'content': system_msg}, *messages]"}), "(model='gpt-3.5-turbo', messages=[{'role':\n 'system', 'content': system_msg}, *messages])\n", (8024, 8116), False, 'import openai\n'), ((8493, 8544), 'pandas.concat', 'pd.concat', (['[session_df, temp_df]'], {'ignore_index': '(True)'}), '([session_df, temp_df], ignore_index=True)\n', (8502, 8544), True, 'import pandas as pd\n'), ((9462, 9498), 'flask.jsonify', 'jsonify', (["{'message': 'Hello World!'}"], {}), "({'message': 'Hello World!'})\n", (9469, 9498), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((9583, 9618), 'flask.send_from_directory', 'send_from_directory', (['"""images"""', 'path'], {}), "('images', path)\n", (9602, 9618), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((9703, 9738), 'flask.send_from_directory', 'send_from_directory', (['"""audios"""', 'path'], {}), "('audios', path)\n", (9722, 9738), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((9809, 9834), 'flask.request.args.get', 'request.args.get', (['"""topic"""'], {}), "('topic')\n", (9825, 9834), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((10332, 10501), 'flask.jsonify', 'jsonify', (["{'title': title, 'story': story, 'id': tl, 'img': request.root_url +\n 'images/' + title + '.png', 'audio': request.root_url + 'audios/' +\n title + '.wav'}"], {}), "({'title': title, 'story': story, 'id': tl, 'img': request.root_url +\n 'images/' + title + '.png', 'audio': request.root_url + 'audios/' +\n title + '.wav'})\n", (10339, 10501), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((10857, 10882), 'flask.jsonify', 'jsonify', (["{'story': story}"], {}), "({'story': story})\n", (10864, 10882), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((11340, 11369), 'flask.jsonify', 'jsonify', (["{'stories': stories}"], {}), "({'stories': stories})\n", (11347, 11369), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((11673, 11701), 'flask.request.args.get', 'request.args.get', (['"""question"""'], {}), "('question')\n", (11689, 11701), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((11864, 11956), 'flask.jsonify', 'jsonify', (["{'response': response, 'audio': request.root_url + 'audios/' + 'temp' + '.wav'}"], {}), "({'response': response, 'audio': request.root_url + 'audios/' +\n 'temp' + '.wav'})\n", (11871, 11956), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((12075, 12096), 'google.cloud.speech.SpeechClient', 'speech.SpeechClient', ([], {}), '()\n', (12094, 12096), False, 'from google.cloud import speech\n'), ((12110, 12148), 'google.cloud.speech.RecognitionAudio', 'speech.RecognitionAudio', ([], {'content': 'audio'}), '(content=audio)\n', (12133, 12148), False, 'from google.cloud import speech\n'), ((12162, 12297), 'google.cloud.speech.RecognitionConfig', 'speech.RecognitionConfig', ([], {'encoding': 'speech.RecognitionConfig.AudioEncoding.LINEAR16', 'sample_rate_hertz': '(48000)', 'language_code': '"""en-US"""'}), "(encoding=speech.RecognitionConfig.AudioEncoding.\n LINEAR16, sample_rate_hertz=48000, language_code='en-US')\n", (12186, 12297), False, 'from google.cloud import speech\n'), ((13215, 13246), 'flask.jsonify', 'jsonify', (["{'response': response}"], {}), "({'response': response})\n", (13222, 13246), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((10594, 10616), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (10610, 10616), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((11577, 11607), 'flask.request.args.get', 'request.args.get', (['"""session_id"""'], {}), "('session_id')\n", (11593, 11607), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((11628, 11656), 'flask.request.args.get', 'request.args.get', (['"""story_id"""'], {}), "('story_id')\n", (11644, 11656), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((13064, 13141), 'flask.jsonify', 'jsonify', (["{'response': 'Sorry, I could not understand you. Please try again.'}"], {}), "({'response': 'Sorry, I could not understand you. Please try again.'})\n", (13071, 13141), False, 'from flask import Flask, jsonify, request, json, send_from_directory\n'), ((5386, 5410), 'base64.b64decode', 'base64.b64decode', (['img[i]'], {}), '(img[i])\n', (5402, 5410), False, 'import base64\n')]
from __future__ import annotations import json import logging import boto3 import embeddings import lancedb from config import settings # TODO: why doesn't logger print anything? logger = logging.getLogger(__name__) logging.getLogger().setLevel(logging.INFO) client = boto3.client('cloudformation', region_name='us-east-1') response = client.describe_stacks( StackName=f'wb-agrifoods-data-lab-{settings.STAGE}'.lower(), ) outputs = response['Stacks'][0]['Outputs'] [bucket_name] = [o['OutputValue'] for o in outputs if o['OutputKey'] == 'bucketname'] db = lancedb.connect(f's3://{bucket_name}/{settings.LANCEDB_DATA_PATH}') with open('records_v1.0.json', 'r') as f: records = json.loads(f.read()) # Flatten records data = [ {'vector': r['embedding'], **{k: v for k, v in r.items() if k != 'embedding'}} for r in records ] # LanceDB assumes uses the keys from the first list element # as the table columns, so we first ensure that all records # have the set same of keys (values will be None for keys # not relevant to a record) key_set = set() for d in data: key_set.update(set(d.keys())) data = [{**{k: None for k in key_set}, **d} for d in data] print(len(data)) # Note: AWS S3 Buckets are not region specific, so the region # doesn't really matter here db.create_table('agrifood', data, mode='overwrite') table = db.open_table('agrifood') logger.info(table.head()) queries = [ 'How is food security affected by drought in north africa?', 'How has climate change affected wheat production in asian minor in the past decade?', 'In what regions of the world is pivot irrigation most common?', ] query_vectors = [ embeddings.client.embeddings.create(input=q, model=settings.OPENAI_EMBEDDING_MODEL) .data[0] .embedding for q in queries ] for q, v in zip(queries, query_vectors): _type = 'project' query_result = table.search(v).metric('cosine').where(f"type='{_type}'").limit(5) print(f'QUERY: {q}') print(f'RESULT: {query_result.to_list()}') print('\n')
[ "lancedb.connect" ]
[((191, 218), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'import logging\n'), ((272, 327), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {'region_name': '"""us-east-1"""'}), "('cloudformation', region_name='us-east-1')\n", (284, 327), False, 'import boto3\n'), ((567, 634), 'lancedb.connect', 'lancedb.connect', (['f"""s3://{bucket_name}/{settings.LANCEDB_DATA_PATH}"""'], {}), "(f's3://{bucket_name}/{settings.LANCEDB_DATA_PATH}')\n", (582, 634), False, 'import lancedb\n'), ((219, 238), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (236, 238), False, 'import logging\n'), ((1664, 1752), 'embeddings.client.embeddings.create', 'embeddings.client.embeddings.create', ([], {'input': 'q', 'model': 'settings.OPENAI_EMBEDDING_MODEL'}), '(input=q, model=settings.\n OPENAI_EMBEDDING_MODEL)\n', (1699, 1752), False, 'import embeddings\n')]
from typing import List, Union, Dict from pathlib import Path from enum import StrEnum import lancedb import numpy as np from image_search.config.log_factory import logger from image_search.model.image_data import ImageData from image_search.model.error import Error, ErrorCode from image_search.vector_db.imagedb_schema import schema from image_search.model.image_data import convert_to_pyarrow from image_search.config.config import cfg from image_search.vector_db.imagedb_schema import ( FIELD_IMAGE_VECTOR, FIELD_IMAGE_NAME, FIELD_TEXT_VECTOR, FIELD_IMAGE_DESCRIPTION, FIELD_CREATE_TIMESTAMP, FIELD_UPDATE_TIMESTAMP, ) from image_search.service.conversion_service import ( convert_single_image, ) class DISTANCE(StrEnum): EUCLIDEAN = "l2" COSINE = "cosine" DOT = "dot" def execute_knn_search( embedding: List[float], vector_column_name: str, limit: int = 10, distance: str = DISTANCE.EUCLIDEAN, ) -> List[Dict]: return ( tbl.search( embedding, query_type="vector", vector_column_name=vector_column_name ) .metric(distance) .limit(limit) .to_list() ) def init_image_vector_table() -> lancedb.table.LanceTable: db = lancedb.connect(cfg.lance_db_location) table_name = cfg.lance_table_image try: return db.open_table(table_name) except FileNotFoundError as e: logger.warning("Could not open database. It does not exist.") return db.create_table(table_name, schema=schema) tbl = init_image_vector_table() def sql_escape(text: str) -> str: return text.replace("'", "''") def convert_vec_to_literal(float_list: List[float]) -> List[str]: return [str(v) for v in float_list] def save_image(image_data: ImageData, ignore_update: bool = False) -> bool: results = execute_knn_search(image_data.image_embedding, FIELD_IMAGE_VECTOR, 1) image_available = False if len(results) > 0: first_result = results[0] image_available = np.array_equal( first_result[FIELD_IMAGE_VECTOR], image_data.image_embedding ) if not image_available: # insert logger.info("Creating %s", image_data.file_name) pa_table = convert_to_pyarrow(image_data, None) tbl.add(pa_table) return True else: # update logger.info("Updating %s", image_data.file_name) first_result = results[0] if ignore_update == False: create_timestamp = first_result[FIELD_CREATE_TIMESTAMP] pa_table = convert_to_pyarrow(image_data, create_timestamp) single_value = { FIELD_TEXT_VECTOR: convert_vec_to_literal( first_result[FIELD_TEXT_VECTOR] ), FIELD_IMAGE_DESCRIPTION: sql_escape( first_result[FIELD_IMAGE_DESCRIPTION] ), FIELD_IMAGE_VECTOR: convert_vec_to_literal( first_result[FIELD_IMAGE_VECTOR] ), FIELD_UPDATE_TIMESTAMP: first_result[FIELD_UPDATE_TIMESTAMP], } filter_expression = ( f"{FIELD_IMAGE_NAME} = '{first_result[FIELD_IMAGE_NAME]}'" ) if image_data.image_path: image_data.image_path.unlink() tbl.update(where=filter_expression, values=single_value) return False async def save_image_from_path(image_path: Path) -> Union[bool, Error]: if not image_path.exists(): return Error( code=ErrorCode.NOT_FOUND, message=f"Could not find original image path: {image_path}", ) image_data = await convert_single_image(image_path) if image_data is None: return Error( ErrorCode.DESCRIPTION_MISSING, f"Image description is missing for {image_path}", ) return save_image(image_data)
[ "lancedb.connect" ]
[((1246, 1284), 'lancedb.connect', 'lancedb.connect', (['cfg.lance_db_location'], {}), '(cfg.lance_db_location)\n', (1261, 1284), False, 'import lancedb\n'), ((2025, 2101), 'numpy.array_equal', 'np.array_equal', (['first_result[FIELD_IMAGE_VECTOR]', 'image_data.image_embedding'], {}), '(first_result[FIELD_IMAGE_VECTOR], image_data.image_embedding)\n', (2039, 2101), True, 'import numpy as np\n'), ((2170, 2218), 'image_search.config.log_factory.logger.info', 'logger.info', (['"""Creating %s"""', 'image_data.file_name'], {}), "('Creating %s', image_data.file_name)\n", (2181, 2218), False, 'from image_search.config.log_factory import logger\n'), ((2238, 2274), 'image_search.model.image_data.convert_to_pyarrow', 'convert_to_pyarrow', (['image_data', 'None'], {}), '(image_data, None)\n', (2256, 2274), False, 'from image_search.model.image_data import convert_to_pyarrow\n'), ((2349, 2397), 'image_search.config.log_factory.logger.info', 'logger.info', (['"""Updating %s"""', 'image_data.file_name'], {}), "('Updating %s', image_data.file_name)\n", (2360, 2397), False, 'from image_search.config.log_factory import logger\n'), ((3540, 3637), 'image_search.model.error.Error', 'Error', ([], {'code': 'ErrorCode.NOT_FOUND', 'message': 'f"""Could not find original image path: {image_path}"""'}), "(code=ErrorCode.NOT_FOUND, message=\n f'Could not find original image path: {image_path}')\n", (3545, 3637), False, 'from image_search.model.error import Error, ErrorCode\n'), ((3691, 3723), 'image_search.service.conversion_service.convert_single_image', 'convert_single_image', (['image_path'], {}), '(image_path)\n', (3711, 3723), False, 'from image_search.service.conversion_service import convert_single_image\n'), ((3766, 3856), 'image_search.model.error.Error', 'Error', (['ErrorCode.DESCRIPTION_MISSING', 'f"""Image description is missing for {image_path}"""'], {}), "(ErrorCode.DESCRIPTION_MISSING,\n f'Image description is missing for {image_path}')\n", (3771, 3856), False, 'from image_search.model.error import Error, ErrorCode\n'), ((1417, 1478), 'image_search.config.log_factory.logger.warning', 'logger.warning', (['"""Could not open database. It does not exist."""'], {}), "('Could not open database. It does not exist.')\n", (1431, 1478), False, 'from image_search.config.log_factory import logger\n'), ((2558, 2606), 'image_search.model.image_data.convert_to_pyarrow', 'convert_to_pyarrow', (['image_data', 'create_timestamp'], {}), '(image_data, create_timestamp)\n', (2576, 2606), False, 'from image_search.model.image_data import convert_to_pyarrow\n')]
import lancedb import pyarrow as pa uri = "data/sample-lancedb" db = lancedb.connect(uri) schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 1536)), pa.field("review", pa.string()), pa.field("id",pa.string()) ]) tbl = db.create_table("review_table", schema=schema) print(tbl)
[ "lancedb.connect" ]
[((69, 89), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (84, 89), False, 'import lancedb\n'), ((195, 206), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (204, 206), True, 'import pyarrow as pa\n'), ((229, 240), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (238, 240), True, 'import pyarrow as pa\n'), ((148, 160), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (158, 160), True, 'import pyarrow as pa\n')]
"""The lambda function for the Bolt app.""" import json import logging import os import re import time from typing import Any import lancedb import slack_bolt from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain from langchain.memory.buffer import ConversationBufferMemory from langchain_community.chat_message_histories import DynamoDBChatMessageHistory from langchain_community.chat_models import ChatOpenAI from langchain_community.vectorstores.lancedb import LanceDB from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.outputs.llm_result import LLMResult from langchain_openai.embeddings import OpenAIEmbeddings from slack_bolt import Ack, BoltContext, Say from slack_bolt.adapter.aws_lambda import SlackRequestHandler from slack_sdk.models.blocks import ( ContextBlock, DividerBlock, MarkdownTextObject, SectionBlock, ) INITIAL_CHAT_UPDATE_INTERVAL_SEC = 1 SlackRequestHandler.clear_all_log_handlers() logging.basicConfig( format="%(levelname)s %(message)s", level=logging.INFO, ) logger = logging.getLogger(__name__) def initialize_conversation_memory(id_ts: str) -> ConversationBufferMemory: """Initialize the conversation memory. Parameters ---------- id_ts: str The ID timestamp. Returns ------- ConversationBufferMemory The initialized conversation memory. """ history = DynamoDBChatMessageHistory( table_name=os.environ["CHAT_HISTORY_TABLE"], session_id=id_ts, ) return ConversationBufferMemory( chat_memory=history, memory_key="chat_history", return_messages=True, ) def initialize_vector_store() -> LanceDB: """Initialize the vector store. Returns ------- LanceDB The initialized vector store. """ db = lancedb.connect(os.environ["LANCEDB_DB"]) table = db.open_table(os.environ["LANCEDB_TABLE"]) embeddings = OpenAIEmbeddings() return LanceDB( table, embeddings, ) app = slack_bolt.App( signing_secret=os.environ["SLACK_SIGNING_SECRET"], token=os.environ["SLACK_BOT_TOKEN"], process_before_response=True, ) class SlackStreamingCallbackHandler(BaseCallbackHandler): """Handles callbacks for Slack streaming.""" last_send_time = time.time() message = "" def __init__(self: "SlackStreamingCallbackHandler", channel: str, ts: str) -> None: """Initialize the SlackStreamingCallbackHandler. Parameters ---------- channel : str The channel to send updates to. ts : str The timestamp of the message to update. Returns ------- None This is a constructor method and does not return anything. """ self.channel = channel self.ts = ts self.interval = INITIAL_CHAT_UPDATE_INTERVAL_SEC self.update_count = 0 def on_llm_new_token( self: "SlackStreamingCallbackHandler", token: str, **kwargs: Any, # noqa: ARG002 ) -> None: """Handle the new token received during the LLM process. Parameters ---------- token : str The new token received. kwargs : Any Additional keyword arguments. Returns ------- None Nothing to return. """ self.message += token now = time.time() if now - self.last_send_time > self.interval: app.client.chat_update( channel=self.channel, ts=self.ts, text=f"{self.message}\n\nTyping...", ) self.last_send_time = now self.update_count += 1 if self.update_count / 10 > self.interval: self.interval = self.interval * 2 def on_llm_end( self: "SlackStreamingCallbackHandler", response: LLMResult, # noqa: ARG002 **kwargs: Any, # noqa: ARG002 ) -> None: """Handle the end of the LLM process. This method is called when the LLM process is completed. Parameters ---------- response : LLMResult The response of the LLM process. kwargs : Any Additional keyword arguments. Returns ------- None This method does not return anything. """ message_context = f"Generated by {os.environ['OPENAI_API_MODEL']}" message_blocks = [ SectionBlock(text=MarkdownTextObject(text=self.message)), DividerBlock(), ContextBlock(elements=[MarkdownTextObject(text=message_context)]), ] app.client.chat_update( channel=self.channel, ts=self.ts, text=self.message, blocks=message_blocks, ) def handle_mention(event: dict, say: Say) -> None: """Handle the mention event. Parameters ---------- event : dict The mention event. say : Say The say function. Returns ------- None This function does not return anything. """ channel = event["channel"] thread_ts = event["ts"] message = re.sub(r"<@.*>", "", event["text"]).strip() id_ts = event["ts"] if "thread_ts" in event: id_ts = event["thread_ts"] result = say("\n\nTyping...", thread_ts=thread_ts) ts = result["ts"] memory = initialize_conversation_memory(id_ts) vector_store = initialize_vector_store() slack_callback = SlackStreamingCallbackHandler(channel, ts) llm = ChatOpenAI( model=os.environ["OPENAI_API_MODEL"], temperature=float(os.environ["OPENAI_API_TEMPERATURE"]), streaming=True, callbacks=[slack_callback], ) condense_question_llm = ChatOpenAI( model=os.environ["OPENAI_API_MODEL"], temperature=float(os.environ["OPENAI_API_TEMPERATURE"]), ) cr_chain = ConversationalRetrievalChain.from_llm( llm=llm, retriever=vector_store.as_retriever(), memory=memory, condense_question_llm=condense_question_llm, ) cr_chain.run(message) def just_ack(ack: Ack) -> None: """Just acknowledge the request. Parameters ---------- ack : Ack The acknowledgement function. Returns ------- None This function does not return anything. """ ack() app.event("app_mintion")( ack=just_ack, lazy=[handle_mention], ) def lambda_handler(event: dict, context: BoltContext) -> dict: """Handle the Lambda function. Parameters ---------- event : dict The event dictionary. context : BoltContext The Bolt context. Returns ------- dict The response dictionary. """ headers = event["headers"] logger.info(json.dumps(headers)) if "x-slack-retry-num" in headers: logger.info("SKIP > %s", headers["x-slack-retry-num"]) return {"statusCode": 200, "body": json.dumps({"message": "ok"})} slack_handler = SlackRequestHandler(app=app) return slack_handler.handle(event, context)
[ "lancedb.connect" ]
[((946, 990), 'slack_bolt.adapter.aws_lambda.SlackRequestHandler.clear_all_log_handlers', 'SlackRequestHandler.clear_all_log_handlers', ([], {}), '()\n', (988, 990), False, 'from slack_bolt.adapter.aws_lambda import SlackRequestHandler\n'), ((991, 1066), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s %(message)s', level=logging.INFO)\n", (1010, 1066), False, 'import logging\n'), ((1087, 1114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1104, 1114), False, 'import logging\n'), ((2053, 2190), 'slack_bolt.App', 'slack_bolt.App', ([], {'signing_secret': "os.environ['SLACK_SIGNING_SECRET']", 'token': "os.environ['SLACK_BOT_TOKEN']", 'process_before_response': '(True)'}), "(signing_secret=os.environ['SLACK_SIGNING_SECRET'], token=os.\n environ['SLACK_BOT_TOKEN'], process_before_response=True)\n", (2067, 2190), False, 'import slack_bolt\n'), ((1429, 1522), 'langchain_community.chat_message_histories.DynamoDBChatMessageHistory', 'DynamoDBChatMessageHistory', ([], {'table_name': "os.environ['CHAT_HISTORY_TABLE']", 'session_id': 'id_ts'}), "(table_name=os.environ['CHAT_HISTORY_TABLE'],\n session_id=id_ts)\n", (1455, 1522), False, 'from langchain_community.chat_message_histories import DynamoDBChatMessageHistory\n'), ((1553, 1651), 'langchain.memory.buffer.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'history', 'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(chat_memory=history, memory_key='chat_history',\n return_messages=True)\n", (1577, 1651), False, 'from langchain.memory.buffer import ConversationBufferMemory\n'), ((1851, 1892), 'lancedb.connect', 'lancedb.connect', (["os.environ['LANCEDB_DB']"], {}), "(os.environ['LANCEDB_DB'])\n", (1866, 1892), False, 'import lancedb\n'), ((1965, 1983), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1981, 1983), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((1995, 2021), 'langchain_community.vectorstores.lancedb.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (2002, 2021), False, 'from langchain_community.vectorstores.lancedb import LanceDB\n'), ((2332, 2343), 'time.time', 'time.time', ([], {}), '()\n', (2341, 2343), False, 'import time\n'), ((7110, 7138), 'slack_bolt.adapter.aws_lambda.SlackRequestHandler', 'SlackRequestHandler', ([], {'app': 'app'}), '(app=app)\n', (7129, 7138), False, 'from slack_bolt.adapter.aws_lambda import SlackRequestHandler\n'), ((3454, 3465), 'time.time', 'time.time', ([], {}), '()\n', (3463, 3465), False, 'import time\n'), ((6891, 6910), 'json.dumps', 'json.dumps', (['headers'], {}), '(headers)\n', (6901, 6910), False, 'import json\n'), ((4615, 4629), 'slack_sdk.models.blocks.DividerBlock', 'DividerBlock', ([], {}), '()\n', (4627, 4629), False, 'from slack_sdk.models.blocks import ContextBlock, DividerBlock, MarkdownTextObject, SectionBlock\n'), ((5251, 5285), 're.sub', 're.sub', (['"""<@.*>"""', '""""""', "event['text']"], {}), "('<@.*>', '', event['text'])\n", (5257, 5285), False, 'import re\n'), ((7058, 7087), 'json.dumps', 'json.dumps', (["{'message': 'ok'}"], {}), "({'message': 'ok'})\n", (7068, 7087), False, 'import json\n'), ((4563, 4600), 'slack_sdk.models.blocks.MarkdownTextObject', 'MarkdownTextObject', ([], {'text': 'self.message'}), '(text=self.message)\n', (4581, 4600), False, 'from slack_sdk.models.blocks import ContextBlock, DividerBlock, MarkdownTextObject, SectionBlock\n'), ((4666, 4706), 'slack_sdk.models.blocks.MarkdownTextObject', 'MarkdownTextObject', ([], {'text': 'message_context'}), '(text=message_context)\n', (4684, 4706), False, 'from slack_sdk.models.blocks import ContextBlock, DividerBlock, MarkdownTextObject, SectionBlock\n')]
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import ( DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """ The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". vector_column_name (str, optional): The vector column name in the table if different from default. Defaults to "vector", in keeping with lancedb convention. nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.vector_column_name = vector_column_name self.nprobes = nprobes self.text_key = text_key self.doc_id_key = doc_id_key self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search( query=query.query_embedding, vector_column_name=self.vector_column_name, ) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_pandas() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item[self.vector_column_name]) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) if "metadata" in item: metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) else: metadata, node_info = {}, {} node = TextNode( text=item[self.text_key] or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=item[self.doc_id_key] ), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((585, 612), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (602, 612), False, 'import logging\n'), ((3266, 3286), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3281, 3286), False, 'import lancedb\n'), ((1349, 1378), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1355, 1378), True, 'import numpy as np\n'), ((3821, 3906), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3842, 3906), False, 'from llama_index.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1259, 1283), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1265, 1283), True, 'import numpy as np\n'), ((6094, 6130), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6115, 6130), False, 'from llama_index.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6519, 6586), 'llama_index.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6547, 6586), False, 'from llama_index.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7072, 7118), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7087, 7118), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.filters: if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text, id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2773, 2793), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2788, 2793), False, 'import lancedb\n'), ((1170, 1199), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1176, 1199), True, 'import numpy as np\n'), ((3205, 3284), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3226, 3284), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1080, 1104), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1086, 1104), True, 'import numpy as np\n'), ((5506, 5542), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5521, 5542), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
from dotenv import load_dotenv from pathlib import Path from typing import List import lancedb import pypdf from gpt_pdf_bot.shared import embed_text from gpt_pdf_bot.types import Chunk, Document, Page load_dotenv() class PdfIngestionPipeline: def __init__(self, pdf_directory: str): self.pdf_directory = Path(pdf_directory) self.db = lancedb.connect(uri=".lancedb") def run(self) -> None: print(f"Running ingestion pipeline on {self.pdf_directory}", end="\n\n") documents = self.read_pdfs() print(f"Read {len(documents)} documents", end="\n\n") print("Splitting documents into chunks", end="\n\n") chunks = self.chunk_documents(documents=documents) print("Creating and persisting embeddings", end="\n\n") table = self.create_and_persist_embeddings(chunks=chunks) def read_pdfs(self) -> List[Document]: documents = [] for pdf in self.pdf_directory.glob("*.pdf"): print(f"Reading {pdf.name}") reader = pypdf.PdfReader(pdf) doc = Document( source=pdf.name, metadata=reader.metadata, ) for i, page in enumerate(reader.pages): doc.pages.append( Page( page_num=i, text=page.extract_text(), ) ) documents.append(doc) return documents def chunk_documents( self, documents: List[Document], chunk_size: int = 1000, chunk_overlap: int = 200, ) -> List[Chunk]: # TODO: This is a naive implementation that is not smart about splitting on word boundaries chunks = [] for doc in documents: for page in doc.pages: for i in range(0, len(page.text), chunk_size - chunk_overlap): chunks.append( Chunk( text=page.text[i : i + chunk_size], metadata={"source": doc.source, "page_num": page.page_num}, ) ) return chunks def create_and_persist_embeddings(self, chunks: List[Chunk]): # check if table exists # TODO: this is a naive way to not recompute embeddings for the same pdfs if self.pdf_directory.name in self.db.table_names(): return self.db[self.pdf_directory.name] texts = [chunk.text for chunk in chunks] embeddings = embed_text(texts=texts) merged = [] for chunk, emb in zip(chunks, embeddings): merged.append( { "text": chunk.text, "metadata": chunk.metadata, "vector": emb, } ) table = self.db.create_table(self.pdf_directory.name, data=merged) print(f"Wrote {len(embeddings)} embeddings to {table.name} table") return table
[ "lancedb.connect" ]
[((204, 217), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (215, 217), False, 'from dotenv import load_dotenv\n'), ((321, 340), 'pathlib.Path', 'Path', (['pdf_directory'], {}), '(pdf_directory)\n', (325, 340), False, 'from pathlib import Path\n'), ((359, 390), 'lancedb.connect', 'lancedb.connect', ([], {'uri': '""".lancedb"""'}), "(uri='.lancedb')\n", (374, 390), False, 'import lancedb\n'), ((2552, 2575), 'gpt_pdf_bot.shared.embed_text', 'embed_text', ([], {'texts': 'texts'}), '(texts=texts)\n', (2562, 2575), False, 'from gpt_pdf_bot.shared import embed_text\n'), ((1034, 1054), 'pypdf.PdfReader', 'pypdf.PdfReader', (['pdf'], {}), '(pdf)\n', (1049, 1054), False, 'import pypdf\n'), ((1073, 1124), 'gpt_pdf_bot.types.Document', 'Document', ([], {'source': 'pdf.name', 'metadata': 'reader.metadata'}), '(source=pdf.name, metadata=reader.metadata)\n', (1081, 1124), False, 'from gpt_pdf_bot.types import Chunk, Document, Page\n'), ((1958, 2061), 'gpt_pdf_bot.types.Chunk', 'Chunk', ([], {'text': 'page.text[i:i + chunk_size]', 'metadata': "{'source': doc.source, 'page_num': page.page_num}"}), "(text=page.text[i:i + chunk_size], metadata={'source': doc.source,\n 'page_num': page.page_num})\n", (1963, 2061), False, 'from gpt_pdf_bot.types import Chunk, Document, Page\n')]
import streamlit as st import pandas as pd import json import requests from datetime import datetime from pathlib import Path import lancedb from services.lancedb_index import IndexDocuments from services.lancedb_notes import IndexDocumentsNotes import yaml ### For multipage note taking, save to a json and then load the json in a state ### Delete the file ### Create second page for using notes and searching those ### ### Sends it to a notes embedding api and index ### Create setup page to add templates, research buckets, etc ### ### Store initially as json, then as sqlite ### Create a drag and drop file upload with schema to add data ### ### Low priority ### Create a page where selected results are sent to a model for summaries for review/notes/etc ### ### Tag with the query, datetime, maybe a name from a modal pop out? ### Create a notes template st.set_page_config(layout='wide', page_title='Search') notes_folder = Path('data/notes') collections_folder = Path('data/collections') tmp_folder = Path('data/tmp') st.title("Welcome to streamSearchable\n**Your local reSearch engine**") st.header("Query your data here:") index_folder = Path('indexes') index = lancedb.connect(index_folder) available_indexes = index.table_names() index_to_search = st.selectbox(label='Available Indexes', options=available_indexes) query = st.text_input(label="What do you want to search?", value='') result_cutoff = st.number_input(label='Result cutoff', value=50) keyword_importance = st.slider(label='Importance of keyword matches', min_value=0.0, max_value=1.0, step=0.05, value=0.5) st.session_state['query'] = query @st.cache_data def remote_search(query, collection_name): results = requests.post('http://localhost:8000/hybrid', json={'query':query, 'collection_name':collection_name, 'top_k':50, 'fts_weight':keyword_importance, 'vec_weight':1-keyword_importance}) result_data, available_fields = results.json() available_fields = set(available_fields) new_fields = set() for result in result_data: if 'metadata' in result and len(result['metadata']) > 0: metadata = json.loads(result['metadata']) result.update(metadata) new_fields.update(metadata.keys()) del result['metadata'] return result_data, sorted(list(new_fields)) if 'results_to_save' not in st.session_state: st.session_state['results_to_save'] = dict() def add_result_to_save(result): note_hash = hash(str(result)) st.write(st.session_state['results_to_save'].keys()) if note_hash not in st.session_state['results_to_save']: st.session_state['results_to_save'][note_hash] = result else: del st.session_state['results_to_save'][note_hash] with st.sidebar: current_collections = index.table_names() selected_collections = st.selectbox('Existing Collection Destination', options=current_collections) new_index = st.text_input(label='Create new index', value='') note_quick_view = [x['title'] for _hash, x in st.session_state['results_to_save'].items()] st.markdown("Selected Notes") st.json(note_quick_view, expanded=False) if st.button('Save selected results'): if new_index != '': notes_save_name = new_index else: notes_save_name = selected_collections notes_save_name = notes_save_name + '_notes' notes_save_path = collections_folder.joinpath(notes_save_name) notes_save_path.mkdir(parents=True, exist_ok=True) notes_save_file = notes_save_path.joinpath('notes.json') save_data = list() for session_key, _note in st.session_state['results_to_save'].items(): save_data.append(_note) with open(notes_save_file, 'w') as f: json.dump(save_data, f) indexer = IndexDocumentsNotes(field_mapping={'text':'text', 'tags':'tags','title':'title','date':'date'}, source_file=notes_save_file, index_name=notes_save_name, overwrite=False) indexer.open_json() indexer.create_documents() indexer.ingest() st.write('done') if query: query_results, available_fields = remote_search(query, index_to_search) show_fields = st.multiselect("Show Fields", available_fields, default=available_fields) st.write(len(query_results)) for index, result in enumerate(query_results): st.markdown(f"**:blue[{result['title']}]**") st.markdown(f"*:blue[Score: {round(result['score'], 3)}]*") with st.container(): st.write(f"{' '.join(result['text'].split(' ')[:100])}.....") with st.expander('See Full Text and Details'): full_text, quick_annotate = st.columns([4,1]) with full_text: if 'date' in result: st.markdown(f"""**Date:** {result['date']}""") if 'tags' in result: st.markdown(f"""**Tags:** {', '.join(result['tags'])}""") st.markdown('**Text:**') st.markdown(result['text']) for _field in show_fields: st.markdown(f"**{_field}:** {result[_field]}") save_to_collection = st.toggle('Save to collection',key=f'toggle_{index}', on_change=add_result_to_save, args=(result, )) st.divider()
[ "lancedb.connect" ]
[((864, 918), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Search"""'}), "(layout='wide', page_title='Search')\n", (882, 918), True, 'import streamlit as st\n'), ((953, 971), 'pathlib.Path', 'Path', (['"""data/notes"""'], {}), "('data/notes')\n", (957, 971), False, 'from pathlib import Path\n'), ((993, 1017), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (997, 1017), False, 'from pathlib import Path\n'), ((1031, 1047), 'pathlib.Path', 'Path', (['"""data/tmp"""'], {}), "('data/tmp')\n", (1035, 1047), False, 'from pathlib import Path\n'), ((1049, 1123), 'streamlit.title', 'st.title', (['"""Welcome to streamSearchable\n**Your local reSearch engine**"""'], {}), '("""Welcome to streamSearchable\n**Your local reSearch engine**""")\n', (1057, 1123), True, 'import streamlit as st\n'), ((1123, 1157), 'streamlit.header', 'st.header', (['"""Query your data here:"""'], {}), "('Query your data here:')\n", (1132, 1157), True, 'import streamlit as st\n'), ((1173, 1188), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (1177, 1188), False, 'from pathlib import Path\n'), ((1198, 1227), 'lancedb.connect', 'lancedb.connect', (['index_folder'], {}), '(index_folder)\n', (1213, 1227), False, 'import lancedb\n'), ((1286, 1352), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Available Indexes"""', 'options': 'available_indexes'}), "(label='Available Indexes', options=available_indexes)\n", (1298, 1352), True, 'import streamlit as st\n'), ((1362, 1422), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""What do you want to search?"""', 'value': '""""""'}), "(label='What do you want to search?', value='')\n", (1375, 1422), True, 'import streamlit as st\n'), ((1439, 1487), 'streamlit.number_input', 'st.number_input', ([], {'label': '"""Result cutoff"""', 'value': '(50)'}), "(label='Result cutoff', value=50)\n", (1454, 1487), True, 'import streamlit as st\n'), ((1509, 1614), 'streamlit.slider', 'st.slider', ([], {'label': '"""Importance of keyword matches"""', 'min_value': '(0.0)', 'max_value': '(1.0)', 'step': '(0.05)', 'value': '(0.5)'}), "(label='Importance of keyword matches', min_value=0.0, max_value=\n 1.0, step=0.05, value=0.5)\n", (1518, 1614), True, 'import streamlit as st\n'), ((1748, 1945), 'requests.post', 'requests.post', (['"""http://localhost:8000/hybrid"""'], {'json': "{'query': query, 'collection_name': collection_name, 'top_k': 50,\n 'fts_weight': keyword_importance, 'vec_weight': 1 - keyword_importance}"}), "('http://localhost:8000/hybrid', json={'query': query,\n 'collection_name': collection_name, 'top_k': 50, 'fts_weight':\n keyword_importance, 'vec_weight': 1 - keyword_importance})\n", (1761, 1945), False, 'import requests\n'), ((2933, 3009), 'streamlit.selectbox', 'st.selectbox', (['"""Existing Collection Destination"""'], {'options': 'current_collections'}), "('Existing Collection Destination', options=current_collections)\n", (2945, 3009), True, 'import streamlit as st\n'), ((3065, 3114), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Create new index"""', 'value': '""""""'}), "(label='Create new index', value='')\n", (3078, 3114), True, 'import streamlit as st\n'), ((3215, 3244), 'streamlit.markdown', 'st.markdown', (['"""Selected Notes"""'], {}), "('Selected Notes')\n", (3226, 3244), True, 'import streamlit as st\n'), ((3249, 3289), 'streamlit.json', 'st.json', (['note_quick_view'], {'expanded': '(False)'}), '(note_quick_view, expanded=False)\n', (3256, 3289), True, 'import streamlit as st\n'), ((3297, 3331), 'streamlit.button', 'st.button', (['"""Save selected results"""'], {}), "('Save selected results')\n", (3306, 3331), True, 'import streamlit as st\n'), ((4444, 4517), 'streamlit.multiselect', 'st.multiselect', (['"""Show Fields"""', 'available_fields'], {'default': 'available_fields'}), "('Show Fields', available_fields, default=available_fields)\n", (4458, 4517), True, 'import streamlit as st\n'), ((3957, 4141), 'services.lancedb_notes.IndexDocumentsNotes', 'IndexDocumentsNotes', ([], {'field_mapping': "{'text': 'text', 'tags': 'tags', 'title': 'title', 'date': 'date'}", 'source_file': 'notes_save_file', 'index_name': 'notes_save_name', 'overwrite': '(False)'}), "(field_mapping={'text': 'text', 'tags': 'tags', 'title':\n 'title', 'date': 'date'}, source_file=notes_save_file, index_name=\n notes_save_name, overwrite=False)\n", (3976, 4141), False, 'from services.lancedb_notes import IndexDocumentsNotes\n'), ((4322, 4338), 'streamlit.write', 'st.write', (['"""done"""'], {}), "('done')\n", (4330, 4338), True, 'import streamlit as st\n'), ((4610, 4654), 'streamlit.markdown', 'st.markdown', (['f"""**:blue[{result[\'title\']}]**"""'], {}), '(f"**:blue[{result[\'title\']}]**")\n', (4621, 4654), True, 'import streamlit as st\n'), ((5610, 5622), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (5620, 5622), True, 'import streamlit as st\n'), ((2230, 2260), 'json.loads', 'json.loads', (["result['metadata']"], {}), "(result['metadata'])\n", (2240, 2260), False, 'import json\n'), ((3914, 3937), 'json.dump', 'json.dump', (['save_data', 'f'], {}), '(save_data, f)\n', (3923, 3937), False, 'import json\n'), ((4736, 4750), 'streamlit.container', 'st.container', ([], {}), '()\n', (4748, 4750), True, 'import streamlit as st\n'), ((5458, 5563), 'streamlit.toggle', 'st.toggle', (['"""Save to collection"""'], {'key': 'f"""toggle_{index}"""', 'on_change': 'add_result_to_save', 'args': '(result,)'}), "('Save to collection', key=f'toggle_{index}', on_change=\n add_result_to_save, args=(result,))\n", (5467, 5563), True, 'import streamlit as st\n'), ((4843, 4883), 'streamlit.expander', 'st.expander', (['"""See Full Text and Details"""'], {}), "('See Full Text and Details')\n", (4854, 4883), True, 'import streamlit as st\n'), ((4929, 4947), 'streamlit.columns', 'st.columns', (['[4, 1]'], {}), '([4, 1])\n', (4939, 4947), True, 'import streamlit as st\n'), ((5234, 5258), 'streamlit.markdown', 'st.markdown', (['"""**Text:**"""'], {}), "('**Text:**')\n", (5245, 5258), True, 'import streamlit as st\n'), ((5279, 5306), 'streamlit.markdown', 'st.markdown', (["result['text']"], {}), "(result['text'])\n", (5290, 5306), True, 'import streamlit as st\n'), ((5044, 5086), 'streamlit.markdown', 'st.markdown', (['f"""**Date:** {result[\'date\']}"""'], {}), '(f"**Date:** {result[\'date\']}")\n', (5055, 5086), True, 'import streamlit as st\n'), ((5378, 5424), 'streamlit.markdown', 'st.markdown', (['f"""**{_field}:** {result[_field]}"""'], {}), "(f'**{_field}:** {result[_field]}')\n", (5389, 5424), True, 'import streamlit as st\n')]
""" Unit test for retrieve_utils.py """ try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token except ImportError: skip = True else: skip = False import os import sys import pytest try: from unstructured.partition.auto import partition HAS_UNSTRUCTURED = True except ImportError: HAS_UNSTRUCTURED = False test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" @pytest.mark.skipif(skip, reason="dependency is not installed") class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir, recursive=False) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, ) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, types=["pdf", "txt"], ) assert all(os.path.isfile(file) for file in files) assert len(files) == 3 def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, recursive=False, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities" in results.get("documents")[0][0] ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir( dir_path="./website/docs", client=client, collection_name="autogen-docs", custom_text_types=["txt", "md", "rtf", "rst"], get_or_create=True, ) results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 @pytest.mark.skipif( not HAS_UNSTRUCTURED, reason="do not run if unstructured is not installed", ) def test_unstructured(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") word_file_path = os.path.join(test_dir, "example.docx") chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((1021, 1083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1039, 1083), False, 'import pytest\n'), ((619, 644), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'import os\n'), ((8695, 8794), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8713, 8794), False, 'import pytest\n'), ((9332, 9345), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9343, 9345), False, 'import pytest\n'), ((9407, 9430), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9421, 9430), False, 'import os\n'), ((1199, 1247), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1219, 1247), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1574, 1611), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1586, 1611), False, 'import os\n'), ((1790, 1827), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1802, 1827), False, 'import os\n'), ((1852, 1889), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1864, 1889), False, 'import os\n'), ((1907, 1960), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1928, 1960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2195, 2240), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2213, 2240), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2324, 2361), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2336, 2361), False, 'import os\n'), ((2386, 2423), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2398, 2423), False, 'import os\n'), ((2440, 2490), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2458, 2490), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3414, 3447), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3420, 3447), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3602, 3625), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3616, 3625), False, 'import os\n'), ((3989, 4012), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (4003, 4012), False, 'import os\n'), ((4283, 4326), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4298, 4326), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7323, 7362), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7348, 7362), False, 'import chromadb\n'), ((7680, 7777), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7695, 7777), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8002, 8049), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8027, 8049), False, 'import chromadb\n'), ((8058, 8232), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8083, 8232), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8314, 8524), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8329, 8524), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8870, 8907), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8882, 8907), False, 'import os\n'), ((8932, 8969), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8944, 8969), False, 'import os\n'), ((8995, 9033), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (9007, 9033), False, 'import os\n'), ((9051, 9120), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9072, 9120), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9440, 9458), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9449, 9458), False, 'import os\n'), ((1399, 1428), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1412, 1428), False, 'import pytest\n'), ((1442, 1506), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1462, 1506), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3467, 3486), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3473, 3486), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3648, 3687), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3673, 3687), False, 'import chromadb\n'), ((3723, 3762), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3748, 3762), False, 'import chromadb\n'), ((3775, 3825), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3800, 3825), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4035, 4074), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4060, 4074), False, 'import chromadb\n'), ((4161, 4200), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4186, 4200), False, 'import chromadb\n'), ((4213, 4263), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4238, 4263), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4747, 4771), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4762, 4771), False, 'import lancedb\n'), ((7410, 7447), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7422, 7447), False, 'import os\n'), ((2260, 2280), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2274, 2280), False, 'import os\n'), ((2510, 2530), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2524, 2530), False, 'import os\n'), ((2678, 2728), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2690, 2728), False, 'import os\n'), ((2888, 2908), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2902, 2908), False, 'import os\n'), ((3056, 3106), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3068, 3106), False, 'import os\n'), ((3300, 3320), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3314, 3320), False, 'import os\n'), ((5827, 5851), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5842, 5851), False, 'import lancedb\n'), ((1267, 1285), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1278, 1285), False, 'from autogen.token_count_utils import count_token\n'), ((1669, 1705), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1690, 1705), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (241, 257), False, 'import lancedb\n'), ((577, 603), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (584, 603), False, 'from langchain_community.vectorstores import LanceDB\n'), ((820, 836), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (834, 836), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((846, 877), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (861, 877), False, 'import lancedb\n'), ((1177, 1203), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1184, 1203), False, 'from langchain_community.vectorstores import LanceDB\n')]
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pytest import chromadb test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( results.get("documents")[0][0] == "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o" ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs") results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7481), False, 'import os\n'), ((881, 929), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (901, 929), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1256, 1293), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1268, 1293), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1484, 1509), False, 'import os\n'), ((1534, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1546, 1571), False, 'import os\n'), ((1589, 1642), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1610, 1642), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1780, 1808), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (1798, 1808), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1892, 1929), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1904, 1929), False, 'import os\n'), ((1954, 1991), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1966, 1991), False, 'import os\n'), ((2008, 2058), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2026, 2058), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2161, 2194), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (2167, 2194), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2349, 2372), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2363, 2372), False, 'import os\n'), ((2736, 2759), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2750, 2759), False, 'import os\n'), ((3030, 3073), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (3045, 3073), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6070, 6109), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (6095, 6109), False, 'import chromadb\n'), ((6398, 6495), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (6413, 6495), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6801, 6848), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (6826, 6848), False, 'import chromadb\n'), ((6857, 6960), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs')\n", (6882, 6960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6975, 7185), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (6990, 7185), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7491, 7509), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7500, 7509), False, 'import os\n'), ((1081, 1110), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1094, 1110), False, 'import pytest\n'), ((1124, 1188), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1144, 1188), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2214, 2233), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (2220, 2233), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2395, 2434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2420, 2434), False, 'import chromadb\n'), ((2470, 2509), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2495, 2509), False, 'import chromadb\n'), ((2522, 2572), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2547, 2572), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2782, 2821), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2807, 2821), False, 'import chromadb\n'), ((2908, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2933, 2947), False, 'import chromadb\n'), ((2960, 3010), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2985, 3010), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3494, 3518), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (3509, 3518), False, 'import lancedb\n'), ((6157, 6194), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (6169, 6194), False, 'import os\n'), ((1828, 1848), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1842, 1848), False, 'import os\n'), ((2078, 2098), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2092, 2098), False, 'import os\n'), ((4574, 4598), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4589, 4598), False, 'import lancedb\n'), ((949, 967), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (960, 967), False, 'from autogen.token_count_utils import count_token\n'), ((1351, 1387), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1372, 1387), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
import lancedb import numpy as np import pandas as pd import pyarrow as pa def client_vector_db(vector_db_config: dict) -> lancedb.LanceDBConnection: """Connect to a lancedb instance""" return lancedb.connect(**vector_db_config) def initialize_vector_db_indices( client_vector_db: lancedb.LanceDBConnection, class_name: str, embedding_dimension: int, ) -> bool: """Initialize the LanceDB table; NOTE this pattern currently doesn't work and is due to a bug with lancedb """ schema = pa.schema( [ ("squad_id", pa.string()), ("title", pa.string()), ("context", pa.string()), ("embedding_service", pa.string()), ("model_name", pa.string()), pa.field("vector", type=pa.list_(pa.float32(), list_size=embedding_dimension)), ] ) client_vector_db.create_table(name=class_name, schema=schema, mode="create") return True def reset_vector_db(client_vector_db: lancedb.LanceDBConnection) -> bool: """Delete all tables from the database""" for table_name in client_vector_db.table_names(): client_vector_db.drop_table(table_name) return True def data_objects( ids: list[str], titles: list[str], text_contents: list[str], embeddings: list[np.ndarray], metadata: dict, ) -> list[dict]: """Create valid LanceDB objects""" assert len(ids) == len(titles) == len(text_contents) == len(embeddings) return [ dict(squad_id=id_, title=title, context=context, vector=embedding, **metadata) for id_, title, context, embedding in zip(ids, titles, text_contents, embeddings) ] def push_to_vector_db( client_vector_db: lancedb.LanceDBConnection, class_name: str, data_objects: list[dict], embedding_metric: str = "cosine", ) -> int: """Push dataframe of objects to LanceDB. Return number of objects. """ df = pd.DataFrame.from_records(data_objects) table = client_vector_db.create_table(name=class_name, data=df, mode="overwrite") return table.to_pandas().shape[0]
[ "lancedb.connect" ]
[((203, 238), 'lancedb.connect', 'lancedb.connect', ([], {}), '(**vector_db_config)\n', (218, 238), False, 'import lancedb\n'), ((1932, 1971), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data_objects'], {}), '(data_objects)\n', (1957, 1971), True, 'import pandas as pd\n'), ((568, 579), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (577, 579), True, 'import pyarrow as pa\n'), ((604, 615), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (613, 615), True, 'import pyarrow as pa\n'), ((642, 653), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (651, 653), True, 'import pyarrow as pa\n'), ((690, 701), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (699, 701), True, 'import pyarrow as pa\n'), ((731, 742), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (740, 742), True, 'import pyarrow as pa\n'), ((790, 802), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (800, 802), True, 'import pyarrow as pa\n')]
import os, sqlite3, lancedb, tiktoken, bcrypt from pinecone import Pinecone, ServerlessSpec from enum import Enum from langchain_community.vectorstores import LanceDB, Chroma from langchain_community.vectorstores import Pinecone as LangPinecone import streamlit as st def SetHeader(page_title: str): st.set_page_config(page_title=page_title, page_icon="https://indico.bnl.gov/event/19560/logo-410523303.png", layout="wide") st.warning("This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback.") col_l, col1, col2, col_r = st.columns([1, 3, 3, 1]) with col1: st.image("https://indico.bnl.gov/event/19560/logo-410523303.png") with col2: st.title("""AI4EIC - RAG QA-ChatBot""", anchor = "AI4EIC-RAG-QA-Bot", help = "Will Link to arxiv proceeding here.") class UserNotFoundError(Exception): pass class DBNotFoundError(Exception): pass def hash_password(password: str): bytes = password.encode('utf-8') salt = bcrypt.gensalt() return bcrypt.hashpw(bytes, salt) def get_user_info(db_name, username): if not os.path.exists(db_name): raise FileNotFoundError(f"Database {db_name} does not exist.") conn = sqlite3.connect(db_name) cursor = conn.cursor() cursor.execute(''' SELECT username, first_name, last_name, password FROM users WHERE username = ? ''', (username,)) user = cursor.fetchone() conn.close() if user: return user else: return None def SetOpenAIModel(model_name: str): if model_name == "4": return False class VectorDB(Enum): LANCE = 1 CHROMA = 2 PINECONE = 3 def GetRetriever(TYPE: str, vector_config: dict, search_config = {}): if TYPE == VectorDB.LANCE.name: db = lancedb.connect(vector_config["db_name"]) table = db.open_table(vector_config["table_name"]) return LanceDB(connection = table, embedding = vector_config["embedding_function"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) elif TYPE == VectorDB.CHROMA.name: return Chroma(persist_directory = vector_config["db_name"], embedding_function = vector_config["embedding_function"], collection_name=vector_config["collection_name"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) elif TYPE == VectorDB.PINECONE.name: pc = Pinecone(api_key = vector_config["db_api_key"]) if vector_config["index_name"] not in pc.list_indexes().names(): raise DBNotFoundError(f"Database {vector_config['index_name']} does not exist.") return LangPinecone.from_existing_index(vector_config["index_name"], vector_config["embedding_function"] ).as_retriever(search_type = search_config.get("metric", "similarity"), search_kwargs=search_config.get("search_kwargs", {"k" : 100}) ) else: raise NotImplementedError("Invalid VectorDB type") def num_tokens_from_prompt(prompt: str, model: str) -> int: """Return the number of tokens used by a prompt.""" encoding = tiktoken.encoding_for_model(model) return len(encoding.encode(prompt)) def num_tokens_from_messages(messages, model) -> int: """Return the number of tokens used by a list of messages.""" encoding = tiktoken.encoding_for_model(model) tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n tokens_per_name = 1 num_tokens = 0 for message in messages: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name num_tokens += 4 # every reply is primed with <|start|>assistant<|message|> return num_tokens
[ "lancedb.connect" ]
[((305, 433), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'page_title', 'page_icon': '"""https://indico.bnl.gov/event/19560/logo-410523303.png"""', 'layout': '"""wide"""'}), "(page_title=page_title, page_icon=\n 'https://indico.bnl.gov/event/19560/logo-410523303.png', layout='wide')\n", (323, 433), True, 'import streamlit as st\n'), ((433, 553), 'streamlit.warning', 'st.warning', (['"""This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback."""'], {}), "(\n 'This project is being continuously developed. Please write to ai4eic@gmail.com for any feedback.'\n )\n", (443, 553), True, 'import streamlit as st\n'), ((575, 599), 'streamlit.columns', 'st.columns', (['[1, 3, 3, 1]'], {}), '([1, 3, 3, 1])\n', (585, 599), True, 'import streamlit as st\n'), ((1000, 1016), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (1014, 1016), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1028, 1054), 'bcrypt.hashpw', 'bcrypt.hashpw', (['bytes', 'salt'], {}), '(bytes, salt)\n', (1041, 1054), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1211, 1235), 'sqlite3.connect', 'sqlite3.connect', (['db_name'], {}), '(db_name)\n', (1226, 1235), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((3705, 3739), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (3732, 3739), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((3927, 3961), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (3954, 3961), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((623, 688), 'streamlit.image', 'st.image', (['"""https://indico.bnl.gov/event/19560/logo-410523303.png"""'], {}), "('https://indico.bnl.gov/event/19560/logo-410523303.png')\n", (631, 688), True, 'import streamlit as st\n'), ((712, 824), 'streamlit.title', 'st.title', (['"""AI4EIC - RAG QA-ChatBot"""'], {'anchor': '"""AI4EIC-RAG-QA-Bot"""', 'help': '"""Will Link to arxiv proceeding here."""'}), "('AI4EIC - RAG QA-ChatBot', anchor='AI4EIC-RAG-QA-Bot', help=\n 'Will Link to arxiv proceeding here.')\n", (720, 824), True, 'import streamlit as st\n'), ((1104, 1127), 'os.path.exists', 'os.path.exists', (['db_name'], {}), '(db_name)\n', (1118, 1127), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1793, 1834), 'lancedb.connect', 'lancedb.connect', (["vector_config['db_name']"], {}), "(vector_config['db_name'])\n", (1808, 1834), False, 'import os, sqlite3, lancedb, tiktoken, bcrypt\n'), ((1909, 1981), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': "vector_config['embedding_function']"}), "(connection=table, embedding=vector_config['embedding_function'])\n", (1916, 1981), False, 'from langchain_community.vectorstores import LanceDB, Chroma\n'), ((2793, 2838), 'pinecone.Pinecone', 'Pinecone', ([], {'api_key': "vector_config['db_api_key']"}), "(api_key=vector_config['db_api_key'])\n", (2801, 2838), False, 'from pinecone import Pinecone, ServerlessSpec\n'), ((2300, 2466), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': "vector_config['db_name']", 'embedding_function': "vector_config['embedding_function']", 'collection_name': "vector_config['collection_name']"}), "(persist_directory=vector_config['db_name'], embedding_function=\n vector_config['embedding_function'], collection_name=vector_config[\n 'collection_name'])\n", (2306, 2466), False, 'from langchain_community.vectorstores import LanceDB, Chroma\n'), ((3022, 3125), 'langchain_community.vectorstores.Pinecone.from_existing_index', 'LangPinecone.from_existing_index', (["vector_config['index_name']", "vector_config['embedding_function']"], {}), "(vector_config['index_name'], vector_config\n ['embedding_function'])\n", (3054, 3125), True, 'from langchain_community.vectorstores import Pinecone as LangPinecone\n')]
from langchain import PromptTemplate, LLMChain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.embeddings import HuggingFaceBgeEmbeddings from io import BytesIO from langchain.document_loaders import PyPDFLoader import gradio as gr import lancedb from langchain.vectorstores import LanceDB from langchain.document_loaders import ArxivLoader from langchain.chains import FlareChain from langchain.prompts import PromptTemplate from langchain.chains import LLMChain import os from langchain.llms import OpenAI import getpass os.environ["OPENAI_API_KEY"] = "sk-yourapikeyforopenai" llm = OpenAI() model_name = "BAAI/bge-large-en" model_kwargs = {"device": "cpu"} encode_kwargs = {"normalize_embeddings": False} embeddings = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) # here is example https://arxiv.org/pdf/2305.06983.pdf # you need to pass this number to query 2305.06983 # fetch docs from arxiv, in this case it's the FLARE paper docs = ArxivLoader(query="2305.06983", load_max_docs=2).load() # instantiate text splitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150) # split the document into chunks doc_chunks = text_splitter.split_documents(docs) # lancedb vectordb db = lancedb.connect("/tmp/lancedb") table = db.create_table( "documentsai", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) vector_store = LanceDB.from_documents(doc_chunks, embeddings, connection=table) vector_store_retriever = vector_store.as_retriever() flare = FlareChain.from_llm( llm=llm, retriever=vector_store_retriever, max_generation_len=300, min_prob=0.45 ) # Define a function to generate FLARE output based on user input def generate_flare_output(input_text): output = flare.run(input_text) return output input = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) iface = gr.Interface( fn=generate_flare_output, inputs=input, outputs="text", title="My AI bot", description="FLARE implementation with lancedb & bge embedding.", ) iface.launch(debug=True, share=True)
[ "lancedb.connect" ]
[((704, 712), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (710, 712), False, 'from langchain.llms import OpenAI\n'), ((841, 948), 'langchain.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(model_name=model_name, model_kwargs=model_kwargs,\n encode_kwargs=encode_kwargs)\n', (865, 948), False, 'from langchain.embeddings import HuggingFaceBgeEmbeddings\n'), ((1225, 1291), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(150)'}), '(chunk_size=1500, chunk_overlap=150)\n', (1255, 1291), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1400, 1431), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1415, 1431), False, 'import lancedb\n'), ((1672, 1736), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['doc_chunks', 'embeddings'], {'connection': 'table'}), '(doc_chunks, embeddings, connection=table)\n', (1694, 1736), False, 'from langchain.vectorstores import LanceDB\n'), ((1800, 1905), 'langchain.chains.FlareChain.from_llm', 'FlareChain.from_llm', ([], {'llm': 'llm', 'retriever': 'vector_store_retriever', 'max_generation_len': '(300)', 'min_prob': '(0.45)'}), '(llm=llm, retriever=vector_store_retriever,\n max_generation_len=300, min_prob=0.45)\n', (1819, 1905), False, 'from langchain.chains import FlareChain\n'), ((2077, 2186), 'gradio.Text', 'gr.Text', ([], {'label': '"""Prompt"""', 'show_label': '(False)', 'max_lines': '(1)', 'placeholder': '"""Enter your prompt"""', 'container': '(False)'}), "(label='Prompt', show_label=False, max_lines=1, placeholder=\n 'Enter your prompt', container=False)\n", (2084, 2186), True, 'import gradio as gr\n'), ((2214, 2377), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'generate_flare_output', 'inputs': 'input', 'outputs': '"""text"""', 'title': '"""My AI bot"""', 'description': '"""FLARE implementation with lancedb & bge embedding."""'}), "(fn=generate_flare_output, inputs=input, outputs='text', title=\n 'My AI bot', description=\n 'FLARE implementation with lancedb & bge embedding.')\n", (2226, 2377), True, 'import gradio as gr\n'), ((1124, 1172), 'langchain.document_loaders.ArxivLoader', 'ArxivLoader', ([], {'query': '"""2305.06983"""', 'load_max_docs': '(2)'}), "(query='2305.06983', load_max_docs=2)\n", (1135, 1172), False, 'from langchain.document_loaders import ArxivLoader\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.filters: if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text, id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2773, 2793), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2788, 2793), False, 'import lancedb\n'), ((1170, 1199), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1176, 1199), True, 'import numpy as np\n'), ((3205, 3284), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (3226, 3284), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((1080, 1104), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1086, 1104), True, 'import numpy as np\n'), ((5506, 5542), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5521, 5542), False, 'from llama_index.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
""" Unit test for retrieve_utils.py """ try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token except ImportError: skip = True else: skip = False import os import sys import pytest try: from unstructured.partition.auto import partition HAS_UNSTRUCTURED = True except ImportError: HAS_UNSTRUCTURED = False test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" @pytest.mark.skipif(skip, reason="dependency is not installed") class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir, recursive=False) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, ) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, types=["pdf", "txt"], ) assert all(os.path.isfile(file) for file in files) assert len(files) == 3 def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, recursive=False, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities" in results.get("documents")[0][0] ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir( dir_path="./website/docs", client=client, collection_name="autogen-docs", custom_text_types=["txt", "md", "rtf", "rst"], get_or_create=True, ) results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 @pytest.mark.skipif( not HAS_UNSTRUCTURED, reason="do not run if unstructured is not installed", ) def test_unstructured(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") word_file_path = os.path.join(test_dir, "example.docx") chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((1021, 1083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1039, 1083), False, 'import pytest\n'), ((619, 644), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'import os\n'), ((8695, 8794), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8713, 8794), False, 'import pytest\n'), ((9332, 9345), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9343, 9345), False, 'import pytest\n'), ((9407, 9430), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9421, 9430), False, 'import os\n'), ((1199, 1247), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1219, 1247), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1574, 1611), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1586, 1611), False, 'import os\n'), ((1790, 1827), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1802, 1827), False, 'import os\n'), ((1852, 1889), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1864, 1889), False, 'import os\n'), ((1907, 1960), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1928, 1960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2195, 2240), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2213, 2240), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2324, 2361), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2336, 2361), False, 'import os\n'), ((2386, 2423), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2398, 2423), False, 'import os\n'), ((2440, 2490), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2458, 2490), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3414, 3447), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3420, 3447), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3602, 3625), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3616, 3625), False, 'import os\n'), ((3989, 4012), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (4003, 4012), False, 'import os\n'), ((4283, 4326), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4298, 4326), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7323, 7362), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7348, 7362), False, 'import chromadb\n'), ((7680, 7777), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7695, 7777), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8002, 8049), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8027, 8049), False, 'import chromadb\n'), ((8058, 8232), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8083, 8232), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8314, 8524), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8329, 8524), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8870, 8907), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8882, 8907), False, 'import os\n'), ((8932, 8969), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8944, 8969), False, 'import os\n'), ((8995, 9033), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (9007, 9033), False, 'import os\n'), ((9051, 9120), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9072, 9120), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9440, 9458), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9449, 9458), False, 'import os\n'), ((1399, 1428), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1412, 1428), False, 'import pytest\n'), ((1442, 1506), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1462, 1506), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3467, 3486), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3473, 3486), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3648, 3687), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3673, 3687), False, 'import chromadb\n'), ((3723, 3762), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3748, 3762), False, 'import chromadb\n'), ((3775, 3825), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3800, 3825), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4035, 4074), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4060, 4074), False, 'import chromadb\n'), ((4161, 4200), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4186, 4200), False, 'import chromadb\n'), ((4213, 4263), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4238, 4263), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4747, 4771), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4762, 4771), False, 'import lancedb\n'), ((7410, 7447), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7422, 7447), False, 'import os\n'), ((2260, 2280), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2274, 2280), False, 'import os\n'), ((2510, 2530), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2524, 2530), False, 'import os\n'), ((2678, 2728), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2690, 2728), False, 'import os\n'), ((2888, 2908), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2902, 2908), False, 'import os\n'), ((3056, 3106), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3068, 3106), False, 'import os\n'), ((3300, 3320), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3314, 3320), False, 'import os\n'), ((5827, 5851), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5842, 5851), False, 'import lancedb\n'), ((1267, 1285), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1278, 1285), False, 'from autogen.token_count_utils import count_token\n'), ((1669, 1705), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1690, 1705), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
""" Unit test for retrieve_utils.py """ try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token except ImportError: skip = True else: skip = False import os import sys import pytest try: from unstructured.partition.auto import partition HAS_UNSTRUCTURED = True except ImportError: HAS_UNSTRUCTURED = False test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" @pytest.mark.skipif(skip, reason="dependency is not installed") class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir, recursive=False) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, ) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, types=["pdf", "txt"], ) assert all(os.path.isfile(file) for file in files) assert len(files) == 3 def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, recursive=False, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities" in results.get("documents")[0][0] ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir( dir_path="./website/docs", client=client, collection_name="autogen-docs", custom_text_types=["txt", "md", "rtf", "rst"], get_or_create=True, ) results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 @pytest.mark.skipif( not HAS_UNSTRUCTURED, reason="do not run if unstructured is not installed", ) def test_unstructured(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") word_file_path = os.path.join(test_dir, "example.docx") chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((1021, 1083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1039, 1083), False, 'import pytest\n'), ((619, 644), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'import os\n'), ((8695, 8794), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8713, 8794), False, 'import pytest\n'), ((9332, 9345), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9343, 9345), False, 'import pytest\n'), ((9407, 9430), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9421, 9430), False, 'import os\n'), ((1199, 1247), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1219, 1247), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1574, 1611), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1586, 1611), False, 'import os\n'), ((1790, 1827), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1802, 1827), False, 'import os\n'), ((1852, 1889), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1864, 1889), False, 'import os\n'), ((1907, 1960), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1928, 1960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2195, 2240), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2213, 2240), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2324, 2361), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2336, 2361), False, 'import os\n'), ((2386, 2423), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2398, 2423), False, 'import os\n'), ((2440, 2490), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2458, 2490), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3414, 3447), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3420, 3447), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3602, 3625), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3616, 3625), False, 'import os\n'), ((3989, 4012), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (4003, 4012), False, 'import os\n'), ((4283, 4326), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4298, 4326), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7323, 7362), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7348, 7362), False, 'import chromadb\n'), ((7680, 7777), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7695, 7777), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8002, 8049), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8027, 8049), False, 'import chromadb\n'), ((8058, 8232), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8083, 8232), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8314, 8524), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8329, 8524), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8870, 8907), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8882, 8907), False, 'import os\n'), ((8932, 8969), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8944, 8969), False, 'import os\n'), ((8995, 9033), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (9007, 9033), False, 'import os\n'), ((9051, 9120), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9072, 9120), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9440, 9458), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9449, 9458), False, 'import os\n'), ((1399, 1428), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1412, 1428), False, 'import pytest\n'), ((1442, 1506), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1462, 1506), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3467, 3486), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3473, 3486), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3648, 3687), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3673, 3687), False, 'import chromadb\n'), ((3723, 3762), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3748, 3762), False, 'import chromadb\n'), ((3775, 3825), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3800, 3825), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4035, 4074), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4060, 4074), False, 'import chromadb\n'), ((4161, 4200), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4186, 4200), False, 'import chromadb\n'), ((4213, 4263), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4238, 4263), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4747, 4771), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4762, 4771), False, 'import lancedb\n'), ((7410, 7447), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7422, 7447), False, 'import os\n'), ((2260, 2280), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2274, 2280), False, 'import os\n'), ((2510, 2530), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2524, 2530), False, 'import os\n'), ((2678, 2728), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2690, 2728), False, 'import os\n'), ((2888, 2908), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2902, 2908), False, 'import os\n'), ((3056, 3106), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3068, 3106), False, 'import os\n'), ((3300, 3320), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3314, 3320), False, 'import os\n'), ((5827, 5851), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5842, 5851), False, 'import lancedb\n'), ((1267, 1285), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1278, 1285), False, 'from autogen.token_count_utils import count_token\n'), ((1669, 1705), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1690, 1705), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (241, 257), False, 'import lancedb\n'), ((577, 603), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (584, 603), False, 'from langchain_community.vectorstores import LanceDB\n'), ((820, 836), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (834, 836), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((846, 877), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (861, 877), False, 'import lancedb\n'), ((1177, 1203), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1184, 1203), False, 'from langchain_community.vectorstores import LanceDB\n')]
from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((200, 216), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (214, 216), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((226, 257), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (241, 257), False, 'import lancedb\n'), ((577, 603), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (584, 603), False, 'from langchain_community.vectorstores import LanceDB\n'), ((820, 836), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (834, 836), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((846, 877), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (861, 877), False, 'import lancedb\n'), ((1177, 1203), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1184, 1203), False, 'from langchain_community.vectorstores import LanceDB\n')]
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pytest import chromadb test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( results.get("documents")[0][0] == "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o" ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs") results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7481), False, 'import os\n'), ((881, 929), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (901, 929), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1256, 1293), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1268, 1293), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1484, 1509), False, 'import os\n'), ((1534, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1546, 1571), False, 'import os\n'), ((1589, 1642), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1610, 1642), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1780, 1808), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (1798, 1808), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1892, 1929), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1904, 1929), False, 'import os\n'), ((1954, 1991), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1966, 1991), False, 'import os\n'), ((2008, 2058), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2026, 2058), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2161, 2194), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (2167, 2194), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2349, 2372), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2363, 2372), False, 'import os\n'), ((2736, 2759), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2750, 2759), False, 'import os\n'), ((3030, 3073), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (3045, 3073), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6070, 6109), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (6095, 6109), False, 'import chromadb\n'), ((6398, 6495), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (6413, 6495), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6801, 6848), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (6826, 6848), False, 'import chromadb\n'), ((6857, 6960), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs')\n", (6882, 6960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6975, 7185), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (6990, 7185), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7491, 7509), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7500, 7509), False, 'import os\n'), ((1081, 1110), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1094, 1110), False, 'import pytest\n'), ((1124, 1188), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1144, 1188), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2214, 2233), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (2220, 2233), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2395, 2434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2420, 2434), False, 'import chromadb\n'), ((2470, 2509), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2495, 2509), False, 'import chromadb\n'), ((2522, 2572), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2547, 2572), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2782, 2821), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2807, 2821), False, 'import chromadb\n'), ((2908, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2933, 2947), False, 'import chromadb\n'), ((2960, 3010), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2985, 3010), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3494, 3518), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (3509, 3518), False, 'import lancedb\n'), ((6157, 6194), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (6169, 6194), False, 'import os\n'), ((1828, 1848), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1842, 1848), False, 'import os\n'), ((2078, 2098), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2092, 2098), False, 'import os\n'), ((4574, 4598), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4589, 4598), False, 'import lancedb\n'), ((949, 967), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (960, 967), False, 'from autogen.token_count_utils import count_token\n'), ((1351, 1387), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1372, 1387), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
""" Unit test for retrieve_utils.py """ from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token import os import pytest import chromadb test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all(isinstance(chunk, str) and chunk.strip() for chunk in chunks) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( results.get("documents")[0][0] == "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o" ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs") results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((7383, 7396), 'pytest.main', 'pytest.main', ([], {}), '()\n', (7394, 7396), False, 'import pytest\n'), ((7458, 7481), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (7472, 7481), False, 'import os\n'), ((881, 929), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (901, 929), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1256, 1293), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1268, 1293), False, 'import os\n'), ((1472, 1509), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1484, 1509), False, 'import os\n'), ((1534, 1571), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1546, 1571), False, 'import os\n'), ((1589, 1642), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1610, 1642), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1780, 1808), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {}), '(test_dir)\n', (1798, 1808), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1892, 1929), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1904, 1929), False, 'import os\n'), ((1954, 1991), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1966, 1991), False, 'import os\n'), ((2008, 2058), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2026, 2058), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2161, 2194), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (2167, 2194), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2349, 2372), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2363, 2372), False, 'import os\n'), ((2736, 2759), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2750, 2759), False, 'import os\n'), ((3030, 3073), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (3045, 3073), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6070, 6109), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (6095, 6109), False, 'import chromadb\n'), ((6398, 6495), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (6413, 6495), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6801, 6848), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (6826, 6848), False, 'import chromadb\n'), ((6857, 6960), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs')\n", (6882, 6960), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((6975, 7185), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (6990, 7185), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7491, 7509), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (7500, 7509), False, 'import os\n'), ((1081, 1110), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1094, 1110), False, 'import pytest\n'), ((1124, 1188), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1144, 1188), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2214, 2233), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (2220, 2233), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2395, 2434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2420, 2434), False, 'import chromadb\n'), ((2470, 2509), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2495, 2509), False, 'import chromadb\n'), ((2522, 2572), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2547, 2572), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2782, 2821), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2807, 2821), False, 'import chromadb\n'), ((2908, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (2933, 2947), False, 'import chromadb\n'), ((2960, 3010), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (2985, 3010), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3494, 3518), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (3509, 3518), False, 'import lancedb\n'), ((6157, 6194), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (6169, 6194), False, 'import os\n'), ((1828, 1848), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1842, 1848), False, 'import os\n'), ((2078, 2098), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2092, 2098), False, 'import os\n'), ((4574, 4598), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4589, 4598), False, 'import lancedb\n'), ((949, 967), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (960, 967), False, 'from autogen.token_count_utils import count_token\n'), ((1351, 1387), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1372, 1387), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
import os import lancedb import shutil import uvicorn import openai from fastapi import FastAPI, HTTPException, WebSocket, UploadFile, File from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import PromptTemplate from langchain.document_loaders import PyPDFLoader from langchain.vectorstores import LanceDB from langchain.text_splitter import RecursiveCharacterTextSplitter from fastapi import FastAPI, HTTPException, UploadFile, File from pydantic import BaseModel # Initialize FastAPI app with metadata app = FastAPI( title="Chatbot RAG API", description="This is a chatbot API template for RAG system.", version="1.0.0", ) # Pydantic model for chatbot request and response class ChatRequest(BaseModel): prompt: str class ChatResponse(BaseModel): response: str # Global variable to store the path of the uploaded file uploaded_file_path = None # Endpoint to upload PDF @app.post("/upload-pdf/") async def upload_pdf(file: UploadFile = File(...)): global uploaded_file_path uploaded_file_path = f"uploaded_files/{file.filename}" os.makedirs(os.path.dirname(uploaded_file_path), exist_ok=True) with open(uploaded_file_path, "wb") as buffer: shutil.copyfileobj(file.file, buffer) return {"filename": file.filename} # Setup LangChain def setup_chain(): global uploaded_file_path if not uploaded_file_path or not os.path.exists(uploaded_file_path): raise HTTPException( status_code=400, detail="No PDF file uploaded or file not found." ) template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum and keep the answer as concise as possible. {context} Question: {question} Helpful Answer:""" OPENAI_API_KEY = "sk-yourkey" loader = PyPDFLoader(uploaded_file_path) docs = loader.load_and_split() text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=50) documents = text_splitter.split_documents(docs) prompt = PromptTemplate(input_variables=["context", "question"], template=template) embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) db_lance = lancedb.connect("/tmp/lancedb") table = db_lance.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) db = LanceDB.from_documents(documents, embeddings, connection=table) retriever = db.as_retriever() chain_type_kwargs = {"prompt": prompt} llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY) chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs=chain_type_kwargs, verbose=True, ) return chain # WebSocket endpoint for chat interaction @app.websocket("/ws/chat") async def websocket_chat(websocket: WebSocket): await websocket.accept() try: while True: data = await websocket.receive_text() try: agent = ( setup_chain() ) # Setup agent for each request to use the latest uploaded file response = agent.run(data) await websocket.send_text(response) except Exception as e: await websocket.send_text(f"Error: {str(e)}") except Exception as e: await websocket.close(code=1001, reason=str(e)) # Endpoint for chatbot interaction @app.post("/chat", response_model=ChatResponse) async def chat(request: ChatRequest): agent = setup_chain() response = agent.run(request.prompt) return {"response": response} # Health check endpoint @app.get("/", tags=["Health Check"]) async def read_root(): return {"message": "Chatbot API is running!"} # Main function to run the app if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)
[ "lancedb.connect" ]
[((621, 737), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Chatbot RAG API"""', 'description': '"""This is a chatbot API template for RAG system."""', 'version': '"""1.0.0"""'}), "(title='Chatbot RAG API', description=\n 'This is a chatbot API template for RAG system.', version='1.0.0')\n", (628, 737), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1075, 1084), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (1079, 1084), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((2017, 2048), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (2028, 2048), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((2105, 2169), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(50)'}), '(chunk_size=200, chunk_overlap=50)\n', (2135, 2169), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2236, 2310), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2250, 2310), False, 'from langchain.prompts import PromptTemplate\n'), ((2328, 2375), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (2344, 2375), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2392, 2423), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2407, 2423), False, 'import lancedb\n'), ((2706, 2769), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (2728, 2769), False, 'from langchain.vectorstores import LanceDB\n'), ((2858, 2899), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (2868, 2899), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2913, 3046), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever', 'chain_type_kwargs': 'chain_type_kwargs', 'verbose': '(True)'}), "(llm=llm, chain_type='stuff', retriever=\n retriever, chain_type_kwargs=chain_type_kwargs, verbose=True)\n", (2940, 3046), False, 'from langchain.chains import RetrievalQA\n'), ((4191, 4234), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (4202, 4234), False, 'import uvicorn\n'), ((1192, 1227), 'os.path.dirname', 'os.path.dirname', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1207, 1227), False, 'import os\n'), ((1303, 1340), 'shutil.copyfileobj', 'shutil.copyfileobj', (['file.file', 'buffer'], {}), '(file.file, buffer)\n', (1321, 1340), False, 'import shutil\n'), ((1536, 1621), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""No PDF file uploaded or file not found."""'}), "(status_code=400, detail='No PDF file uploaded or file not found.'\n )\n", (1549, 1621), False, 'from fastapi import FastAPI, HTTPException, UploadFile, File\n'), ((1486, 1520), 'os.path.exists', 'os.path.exists', (['uploaded_file_path'], {}), '(uploaded_file_path)\n', (1500, 1520), False, 'import os\n')]
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.""" PRIORITIZATION_PROMPT = """ You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks. Return the result as a numbered list, like: #. First task #. Second task Start the task list with number {next_task_id}.""" EXECUTION_PROMPT = """ You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse: """ class OpenAIService: def __init__(self, api_key): openai.api_key = api_key def get_ada_embedding(self, text): text = text.replace('\n', ' ') return openai.Embedding.create(input=[text], model='text-embedding-ada-002')['data'][0]['embedding'] def create(self, prompt, max_tokens=100, temperature=0.5): return ( openai.Completion.create( engine='text-davinci-003', prompt=prompt, temperature=temperature, max_tokens=max_tokens, top_p=1, frequency_penalty=0, presence_penalty=0, ) .choices[0] .text.strip() ) class TestAIService: def __init__(self, ai_service, cache_file): self.ai_service = ai_service self.cache_file = cache_file if os.path.isfile(cache_file): self.cache = pickle.load(open(cache_file, 'rb')) else: self.cache = {'ada': {}, 'create': {}} pickle.dump(self.cache, open(cache_file, 'wb')) def get_ada_embedding(self, text): if text not in self.cache['ada']: self.cache['ada'][text] = self.ai_service.get_ada_embedding(text) pickle.dump(self.cache, open(self.cache_file, 'wb')) return self.cache['ada'][text] def create(self, prompt, max_tokens=100, temperature=0.5): key = (prompt, max_tokens, temperature) if key not in self.cache['create']: self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature) pickle.dump(self.cache, open(self.cache_file, 'wb')) return self.cache['create'][key] class PineconeService: def __init__(self, api_key, environment, table_name, dimension, metric, pod_type): self.table_name = table_name pinecone.init(api_key=api_key, environment=environment) if table_name not in pinecone.list_indexes(): pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type) self.index = pinecone.Index(table_name) def query(self, query_embedding, top_k): results = self.index.query(query_embedding, top_k=top_k, include_metadata=True) sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True) return [(str(item.metadata['task'])) for item in sorted_results] def upsert(self, data): self.index.upsert(data) class LanceService: def __init__(self, table_name, dimension): self.db = lancedb.connect('.') schema = pa.schema( [ pa.field('result_id', pa.string()), pa.field('vector', pa.list_(pa.float32(), dimension)), pa.field('task', pa.string()), pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting ] ) data = [{'result_id': 0, 'vector': [0.0] * dimension, 'task': 'asd', 'result': 'asd'}] self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema) def query(self, query_embedding, top_k): result = self.table.search(query_embedding).limit(top_k).to_df() return [v for v in result['task']] def upsert(self, data): data = { # TODO This doesn't look good, why are we converting? 'result_id': data[0][0], 'vector': data[0][1], 'task': data[0][2]['task'], 'result': data[0][2]['result'], } self.table.add(pd.DataFrame([data])) class BabyAGI: def __init__(self, objective, ai_service, vector_service): self.objective = objective self.ai_service = ai_service self.vector_service = vector_service self.task_list = deque([]) def add_task(self, task): self.task_list.append(task) def task_creation_agent(self, result, task_description): prompt = TASK_CREATION_PROMPT.format( objective=self.objective, result=result, task_description=task_description, task_list=', '.join([t['task_name'] for t in self.task_list]), ) response = self.ai_service.create(prompt) new_tasks = response.split('\n') return [{'task_name': task_name} for task_name in new_tasks] def prioritization_agent(self, this_task_id): task_names = [t['task_name'] for t in self.task_list] next_task_id = int(this_task_id) + 1 prompt = PRIORITIZATION_PROMPT.format( task_names=task_names, objective=self.objective, next_task_id=next_task_id ) response = self.ai_service.create(prompt, max_tokens=1000) new_tasks = response.split('\n') self.task_list = deque() for task_string in new_tasks: task_parts = task_string.strip().split('.', 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() self.task_list.append({'task_id': task_id, 'task_name': task_name}) def execution_agent(self, task) -> str: context = self.context_agent(query=self.objective, n=5) response = self.ai_service.create( prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task), max_tokens=2000, temperature=0.7 ) return response def context_agent(self, query, n): query_embedding = self.ai_service.get_ada_embedding(query) return self.vector_service.query(query_embedding, n) def run(self, first_task): print(self.objective) first_task = {'task_id': 1, 'task_name': first_task} self.add_task(first_task) task_id_counter = 1 for _ in range(4): if self.task_list: task = self.task_list.popleft() print(task['task_name']) result = self.execution_agent(task['task_name']) print(result) this_task_id = int(task['task_id']) enriched_result = {'data': result} result_id = f'result_{task["task_id"]}' vector = enriched_result['data'] self.vector_service.upsert( [ ( result_id, self.ai_service.get_ada_embedding(vector), {'task': task['task_name'], 'result': result}, ) ] ) new_tasks = self.task_creation_agent(enriched_result, task['task_name']) for new_task in new_tasks: task_id_counter += 1 new_task.update({'task_id': task_id_counter}) self.add_task(new_task) self.prioritization_agent(this_task_id) def main(): load_dotenv() baby_agi = BabyAGI( objective='Solve world hunger.', ai_service=TestAIService( ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')), cache_file='babyagi_cache.pkl', ), vector_service=LanceService( table_name='test-table', dimension=1536, ) # vector_service=PineconeService( # api_key=os.getenv('PINECONE_API_KEY'), # environment=os.getenv('PINECONE_ENVIRONMENT'), # table_name='test-table', # dimension=1536, # metric='cosine', # pod_type='p1', # ), ) baby_agi.run(first_task='Develop a task list.') if __name__ == '__main__': typer.run(main)
[ "lancedb.connect" ]
[((8057, 8070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8068, 8070), False, 'from dotenv import load_dotenv\n'), ((8801, 8816), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8810, 8816), False, 'import typer\n'), ((2034, 2060), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2048, 2060), False, 'import os\n'), ((3027, 3082), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3040, 3082), False, 'import pinecone\n'), ((3259, 3285), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3273, 3285), False, 'import pinecone\n'), ((3727, 3747), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3742, 3747), False, 'import lancedb\n'), ((4976, 4985), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (4981, 4985), False, 'from collections import deque\n'), ((5953, 5960), 'collections.deque', 'deque', ([], {}), '()\n', (5958, 5960), False, 'from collections import deque\n'), ((3112, 3135), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3133, 3135), False, 'import pinecone\n'), ((3149, 3241), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3170, 3241), False, 'import pinecone\n'), ((4732, 4752), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (4744, 4752), True, 'import pandas as pd\n'), ((1339, 1408), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (1362, 1408), False, 'import openai\n'), ((3828, 3839), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3837, 3839), True, 'import pyarrow as pa\n'), ((3946, 3957), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (3955, 3957), True, 'import pyarrow as pa\n'), ((3995, 4006), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4004, 4006), True, 'import pyarrow as pa\n'), ((3886, 3898), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (3896, 3898), True, 'import pyarrow as pa\n'), ((1526, 1698), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1550, 1698), False, 'import openai\n'), ((8215, 8242), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (8224, 8242), False, 'import os\n')]
import openai import os import lancedb import pickle import requests from pathlib import Path from bs4 import BeautifulSoup import re from langchain.document_loaders import UnstructuredHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.llms import OpenAI from langchain.chains import RetrievalQA # Function to fetch and save a page as an HTML file def save_page(url, save_dir): response = requests.get(url) if response.status_code == 200: soup = BeautifulSoup(response.content, 'html.parser') title = soup.find('title').text filename = f"{title}.html" with open(os.path.join(save_dir, filename), 'w', encoding='utf-8') as file: file.write(str(soup)) def get_document_title(document): m = str(document.metadata["source"]) title = re.findall("(.*)\.html", m) print("PRINTING TITLES") print(title) if title[0] is not None: return(title[0]) return '' # if "OPENAI_API_KEY" not in os.environ: openai.api_key = "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" assert len(openai.Model.list()["data"]) > 0 print("fetching data") # Base URL of Wikivoyage base_url = "https://en.wikivoyage.org/wiki/" # List of page titles to download page_titles = ["London", "Paris", "New_York_City"] # Add more as needed # Directory to save the HTML files save_directory = "./wikivoyage_pages" # Create the save directory if it doesn't exist if not os.path.exists(save_directory): os.makedirs(save_directory) # Loop through the page titles and download the pages for title in page_titles: url = f"{base_url}{title}" save_page(url, save_directory) docs_path = Path("cities.pkl") docs = [] if not docs_path.exists(): for p in Path("./wikivoyage_pages").rglob("*.html"): if p.is_dir(): continue loader = UnstructuredHTMLLoader(p) raw_document = loader.load() m = {} m["title"] = get_document_title(raw_document[0]) raw_document[0].metadata = raw_document[0].metadata | m raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"]) docs = docs + raw_document with docs_path.open("wb") as fh: pickle.dump(docs, fh) else: with docs_path.open("rb") as fh: docs = pickle.load(fh) #split text text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, ) documents = text_splitter.split_documents(docs) embeddings = OpenAIEmbeddings(openai_api_key="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") db = lancedb.connect('/tmp/lancedb') table = db.create_table("city_docs", data=[ {"vector": embeddings.embed_query("Hello World"), "text": "Hello World"} ], mode="overwrite") print("generated embeddings!") docsearch = LanceDB.from_documents(documents[5:], embeddings, connection=table) qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key="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"), chain_type="stuff", retriever=docsearch.as_retriever()) query_file = open('query.pkl', 'wb') pickle.dump(qa, query_file) query_file.close() print("returning query object")
[ "lancedb.connect" ]
[((1788, 1806), 'pathlib.Path', 'Path', (['"""cities.pkl"""'], {}), "('cities.pkl')\n", (1792, 1806), False, 'from pathlib import Path\n'), ((2462, 2526), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500, chunk_overlap=50)\n', (2492, 2526), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2601, 2692), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key=\n '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')\n", (2617, 2692), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2694, 2725), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2709, 2725), False, 'import lancedb\n'), ((2913, 2980), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents[5:]', 'embeddings'], {'connection': 'table'}), '(documents[5:], embeddings, connection=table)\n', (2935, 2980), False, 'from langchain.vectorstores import LanceDB\n'), ((3190, 3217), 'pickle.dump', 'pickle.dump', (['qa', 'query_file'], {}), '(qa, query_file)\n', (3201, 3217), False, 'import pickle\n'), ((530, 547), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (542, 547), False, 'import requests\n'), ((931, 959), 're.findall', 're.findall', (['"""(.*)\\\\.html"""', 'm'], {}), "('(.*)\\\\.html', m)\n", (941, 959), False, 'import re\n'), ((1564, 1594), 'os.path.exists', 'os.path.exists', (['save_directory'], {}), '(save_directory)\n', (1578, 1594), False, 'import os\n'), ((1600, 1627), 'os.makedirs', 'os.makedirs', (['save_directory'], {}), '(save_directory)\n', (1611, 1627), False, 'import os\n'), ((599, 645), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (612, 645), False, 'from bs4 import BeautifulSoup\n'), ((1963, 1988), 'langchain.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['p'], {}), '(p)\n', (1985, 1988), False, 'from langchain.document_loaders import UnstructuredHTMLLoader\n'), ((2337, 2358), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (2348, 2358), False, 'import pickle\n'), ((2417, 2432), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2428, 2432), False, 'import pickle\n'), ((3018, 3094), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key='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')\n", (3024, 3094), False, 'from langchain.llms import OpenAI\n'), ((1199, 1218), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (1216, 1218), False, 'import openai\n'), ((1858, 1884), 'pathlib.Path', 'Path', (['"""./wikivoyage_pages"""'], {}), "('./wikivoyage_pages')\n", (1862, 1884), False, 'from pathlib import Path\n'), ((739, 771), 'os.path.join', 'os.path.join', (['save_dir', 'filename'], {}), '(save_dir, filename)\n', (751, 771), False, 'import os\n')]
from flask import Flask, render_template, jsonify, request from scripts.mock_llm_api import llm_api import lancedb import pandas as pd uri = "data/lancedb" db = lancedb.connect(uri) # Set initial entries in items vector database def _reset_tables(): items = ['Fire', 'Earth', 'Water', 'Wind'] descriptions = ["Strength: 10\nCost: 300\nCategory: Element", "Strength: 7\nCost: 100\nCategory: Element", "Strength: 3\nCost: 50\nCategory: Element", "Strength: 1\nCost: 10\nCategory: Element" ] vectors = [llm_api.embedding_request(item) for item in items] df = pd.DataFrame({"item": items, "vector": vectors, "description": descriptions}) db.create_table("items", mode="overwrite", data=df) if not db.table_names(): print("No DB set up, creating initial tables") _reset_tables() app = Flask(__name__) @app.route("/") def hello_world(): table = db.open_table("items") return render_template('index.html', items=table.to_pandas()['item'].values.tolist()) @app.route('/generate') def generate(): item_1 = request.args.get('item_1', type=str) item_2 = request.args.get('item_2', type=str) # Generate prompt embedding with open("prompt_templates/basic_embedding_prompt.txt") as embedding_prompt_file: embedding_prompt_template = embedding_prompt_file.read().strip() embeddding_prompt = embedding_prompt_template.format(item_1=item_1, item_2=item_2) prompt_embedding = llm_api.embedding_request(embeddding_prompt) # Generate combination with open("prompt_templates/basic_prompt.txt") as prompt_file: prompt_template = prompt_file.read().strip() prompt = prompt_template.format(item_1=item_1, item_2=item_2) combination = llm_api.completion_request(prompt, max_tokens=30) # Generate description for combination with open("prompt_templates/basic_description_prompt.txt") as description_prompt_file: prompt_template = description_prompt_file.read().strip() prompt = prompt_template.format(item=combination) description = llm_api.completion_request(prompt, max_tokens=100) # Add result to vector database table = db.open_table("items") table.add(pd.DataFrame([{"item": combination, "vector": llm_api.embedding_request(combination), "description": description}])) return jsonify(result=combination) @app.route('/get_description') def get_description(): item = request.args.get('item', type=str) table = db.open_table("items") # Get description # TODO: Important! This retrieves the whole database, which is very inefficient # You may be tempted to do something like # df = table.search(llm_api.embedding_request(item)).limit(1).to_df() # description = df['description'].values.tolist()[0] # instead. However, LanceDB is a bit unstable and will occasionally crash if you do this # The ideal setup would be to have a standard SQL database for this lookup df = table.to_pandas() description = df[df["item"] == item]["description"].values[0] return jsonify(result=description)
[ "lancedb.connect" ]
[((162, 182), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (177, 182), False, 'import lancedb\n'), ((879, 894), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (884, 894), False, 'from flask import Flask, render_template, jsonify, request\n'), ((640, 717), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': items, 'vector': vectors, 'description': descriptions}"], {}), "({'item': items, 'vector': vectors, 'description': descriptions})\n", (652, 717), True, 'import pandas as pd\n'), ((1111, 1147), 'flask.request.args.get', 'request.args.get', (['"""item_1"""'], {'type': 'str'}), "('item_1', type=str)\n", (1127, 1147), False, 'from flask import Flask, render_template, jsonify, request\n'), ((1161, 1197), 'flask.request.args.get', 'request.args.get', (['"""item_2"""'], {'type': 'str'}), "('item_2', type=str)\n", (1177, 1197), False, 'from flask import Flask, render_template, jsonify, request\n'), ((1505, 1549), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['embeddding_prompt'], {}), '(embeddding_prompt)\n', (1530, 1549), False, 'from scripts.mock_llm_api import llm_api\n'), ((1782, 1831), 'scripts.mock_llm_api.llm_api.completion_request', 'llm_api.completion_request', (['prompt'], {'max_tokens': '(30)'}), '(prompt, max_tokens=30)\n', (1808, 1831), False, 'from scripts.mock_llm_api import llm_api\n'), ((2104, 2154), 'scripts.mock_llm_api.llm_api.completion_request', 'llm_api.completion_request', (['prompt'], {'max_tokens': '(100)'}), '(prompt, max_tokens=100)\n', (2130, 2154), False, 'from scripts.mock_llm_api import llm_api\n'), ((2370, 2397), 'flask.jsonify', 'jsonify', ([], {'result': 'combination'}), '(result=combination)\n', (2377, 2397), False, 'from flask import Flask, render_template, jsonify, request\n'), ((2465, 2499), 'flask.request.args.get', 'request.args.get', (['"""item"""'], {'type': 'str'}), "('item', type=str)\n", (2481, 2499), False, 'from flask import Flask, render_template, jsonify, request\n'), ((3104, 3131), 'flask.jsonify', 'jsonify', ([], {'result': 'description'}), '(result=description)\n', (3111, 3131), False, 'from flask import Flask, render_template, jsonify, request\n'), ((579, 610), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['item'], {}), '(item)\n', (604, 610), False, 'from scripts.mock_llm_api import llm_api\n'), ((2287, 2325), 'scripts.mock_llm_api.llm_api.embedding_request', 'llm_api.embedding_request', (['combination'], {}), '(combination)\n', (2312, 2325), False, 'from scripts.mock_llm_api import llm_api\n')]
import logging import json import gradio as gr import numpy as np import lancedb import os from huggingface_hub import AsyncInferenceClient # Setting up the logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # db TABLE_NAME = "docs" TEXT_COLUMN = "text" BATCH_SIZE = int(os.getenv("BATCH_SIZE")) NPROBES = int(os.getenv("NPROBES")) REFINE_FACTOR = int(os.getenv("REFINE_FACTOR")) retriever = AsyncInferenceClient(model=os.getenv("EMBED_URL") + "/embed") reranker = AsyncInferenceClient(model=os.getenv("RERANK_URL") + "/rerank") db = lancedb.connect("/usr/src/.lancedb") tbl = db.open_table(TABLE_NAME) async def retrieve(query: str, k: int) -> list[str]: """ Retrieve top k items with RETRIEVER """ resp = await retriever.post( json={ "inputs": query, "truncate": True } ) try: query_vec = json.loads(resp)[0] except: raise gr.Error(resp.decode()) documents = tbl.search( query=query_vec ).nprobes(NPROBES).refine_factor(REFINE_FACTOR).limit(k).to_list() documents = [doc[TEXT_COLUMN] for doc in documents] return documents async def rerank(query: str, documents: list[str], k: int) -> list[str]: """ Rerank items returned by RETRIEVER and return top k """ scores = [] for i in range(int(np.ceil(len(documents) / BATCH_SIZE))): resp = await reranker.post( json={ "query": query, "texts": documents[i * BATCH_SIZE:(i + 1) * BATCH_SIZE], "truncate": True } ) try: batch_scores = json.loads(resp) batch_scores = [s["score"] for s in batch_scores] scores.extend(batch_scores) except: raise gr.Error(resp.decode()) documents = [doc for _, doc in sorted(zip(scores, documents))[-k:]] return documents
[ "lancedb.connect" ]
[((167, 206), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (186, 206), False, 'import logging\n'), ((216, 243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'import logging\n'), ((573, 609), 'lancedb.connect', 'lancedb.connect', (['"""/usr/src/.lancedb"""'], {}), "('/usr/src/.lancedb')\n", (588, 609), False, 'import lancedb\n'), ((308, 331), 'os.getenv', 'os.getenv', (['"""BATCH_SIZE"""'], {}), "('BATCH_SIZE')\n", (317, 331), False, 'import os\n'), ((347, 367), 'os.getenv', 'os.getenv', (['"""NPROBES"""'], {}), "('NPROBES')\n", (356, 367), False, 'import os\n'), ((389, 415), 'os.getenv', 'os.getenv', (['"""REFINE_FACTOR"""'], {}), "('REFINE_FACTOR')\n", (398, 415), False, 'import os\n'), ((457, 479), 'os.getenv', 'os.getenv', (['"""EMBED_URL"""'], {}), "('EMBED_URL')\n", (466, 479), False, 'import os\n'), ((530, 553), 'os.getenv', 'os.getenv', (['"""RERANK_URL"""'], {}), "('RERANK_URL')\n", (539, 553), False, 'import os\n'), ((904, 920), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (914, 920), False, 'import json\n'), ((1663, 1679), 'json.loads', 'json.loads', (['resp'], {}), '(resp)\n', (1673, 1679), False, 'import json\n')]
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import os from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.embeddings import BedrockEmbeddings from langchain.document_loaders import PyPDFDirectoryLoader import lancedb as ldb import pyarrow as pa embeddings = BedrockEmbeddings() # we split the data into chunks of 1,000 characters, with an overlap # of 200 characters between the chunks, which helps to give better results # and contain the context of the information between chunks text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200) db = ldb.connect('/tmp/embeddings') schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), 1536)), # document vector with 1.5k dimensions (TitanEmbedding) pa.field("text", pa.string()), # langchain requires it pa.field("id", pa.string()) # langchain requires it ]) tbl = db.create_table("doc_table", schema=schema) # load the document as before loader = PyPDFDirectoryLoader("./docs/") docs = loader.load() docs = text_splitter.split_documents(docs) LanceDB.from_documents(docs, embeddings, connection=tbl)
[ "lancedb.connect" ]
[((384, 403), 'langchain.embeddings.BedrockEmbeddings', 'BedrockEmbeddings', ([], {}), '()\n', (401, 403), False, 'from langchain.embeddings import BedrockEmbeddings\n'), ((625, 682), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (646, 682), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((689, 719), 'lancedb.connect', 'ldb.connect', (['"""/tmp/embeddings"""'], {}), "('/tmp/embeddings')\n", (700, 719), True, 'import lancedb as ldb\n'), ((1073, 1104), 'langchain.document_loaders.PyPDFDirectoryLoader', 'PyPDFDirectoryLoader', (['"""./docs/"""'], {}), "('./docs/')\n", (1093, 1104), False, 'from langchain.document_loaders import PyPDFDirectoryLoader\n'), ((1171, 1227), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['docs', 'embeddings'], {'connection': 'tbl'}), '(docs, embeddings, connection=tbl)\n', (1193, 1227), False, 'from langchain.vectorstores import LanceDB\n'), ((880, 891), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (889, 891), True, 'import pyarrow as pa\n'), ((939, 950), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (948, 950), True, 'import pyarrow as pa\n'), ((779, 791), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (789, 791), True, 'import pyarrow as pa\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from PIL import Image from matplotlib import pyplot as plt from pandas import DataFrame from tqdm import tqdm from engine.data.augment import Format from engine.data.dataset import YOLODataset from engine.data.utils import check_det_dataset from engine.models.yolo.model import YOLO from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = USER_CONFIG_DIR / "explorer", ) -> None: # Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181 checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "arrow": return rs.arrow() elif return_type == "pandas": return rs.df() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in { "pandas", "arrow", }, f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "arrow": return similar elif return_type == "pandas": return similar.to_pandas() def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1672, 1865), 'engine.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1678, 1865), False, 'from engine.data.augment import Format\n'), ((2245, 2307), 'engine.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb<=0.9.2']"], {}), "(['lancedb>=0.4.3', 'duckdb<=0.9.2'])\n", (2270, 2307), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2358, 2378), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2373, 2378), False, 'import lancedb\n'), ((2629, 2640), 'engine.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2633, 2640), False, 'from engine.models.yolo.model import YOLO\n'), ((3972, 4000), 'engine.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3989, 4000), False, 'from engine.data.utils import check_det_dataset\n'), ((8607, 8645), 'engine.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8618, 8645), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((8660, 8677), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8670, 8677), False, 'import duckdb\n'), ((9639, 9659), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9654, 9659), False, 'from PIL import Image\n'), ((12284, 12304), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12299, 12304), False, 'from PIL import Image\n'), ((16556, 16575), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16564, 16575), True, 'import numpy as np\n'), ((16660, 16687), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16667, 16687), True, 'from matplotlib import pyplot as plt\n'), ((16737, 16759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16747, 16759), True, 'from matplotlib import pyplot as plt\n'), ((16768, 16787), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16778, 16787), True, 'from matplotlib import pyplot as plt\n'), ((16796, 16825), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16805, 16825), True, 'from matplotlib import pyplot as plt\n'), ((16843, 16852), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16850, 16852), False, 'from io import BytesIO\n'), ((16861, 16894), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16872, 16894), True, 'from matplotlib import pyplot as plt\n'), ((3433, 3519), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3444, 3519), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((3621, 3731), 'engine.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3632, 3731), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((9507, 9539), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9518, 9539), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((12151, 12183), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12162, 12183), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((13761, 13864), 'engine.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13772, 13864), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((1182, 1193), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1189, 1193), True, 'import numpy as np\n'), ((1247, 1260), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1257, 1260), False, 'import cv2\n'), ((17014, 17032), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (17024, 17032), False, 'from PIL import Image\n'), ((18250, 18349), 'engine.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18262, 18349), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((18352, 18367), 'engine.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18364, 18367), False, 'from engine.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR\n'), ((2405, 2415), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2409, 2415), False, 'from pathlib import Path\n'), ((6832, 6851), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6843, 6851), False, 'import torch\n')]
import pyarrow as pa from typing import Union from dryg.settings import DB_URI import lancedb def connection() -> lancedb.LanceDBConnection: """ Connect to the database Returns: lancedb.LanceDBConnection: LanceDBConnection object """ db = lancedb.connect(DB_URI) return db def open_table(table_name: str) -> Union[lancedb.table.LanceTable, None]: """ Open a table from the database Args: table_name (str): Name of the table Returns: lancedb.table.LanceTable: LanceTable object """ db = connection() try: table = db.open_table(table_name) if table_name in db.table_names() else None return table except ValueError: return None def create_table(table_name: str, table: pa.Table, mode: str = "overwrite") -> lancedb.LanceDBConnection: """ Create a table in the database Args: table_name (str): Name of the table table (pa.Table): Table to be created mode (str, optional): Mode to use when creating the table. Defaults to "overwrite". Returns: lancedb.LanceDBConnection: LanceDBConnection object """ db = connection() db.create_table(table_name, table, mode=mode) return db
[ "lancedb.connect" ]
[((271, 294), 'lancedb.connect', 'lancedb.connect', (['DB_URI'], {}), '(DB_URI)\n', (286, 294), False, 'import lancedb\n')]
import os import typer import pickle import pandas as pd from dotenv import load_dotenv import openai import pinecone import lancedb import pyarrow as pa from collections import deque TASK_CREATION_PROMPT = """ You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {task_list}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.""" PRIORITIZATION_PROMPT = """ You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team:{objective}. Do not remove any tasks. Return the result as a numbered list, like: #. First task #. Second task Start the task list with number {next_task_id}.""" EXECUTION_PROMPT = """ You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse: """ class Task: def __init__(self, name, id=None, result=None, vector=None): self.name = name self.id = id self.result = result self.vector = vector class OpenAIService: def __init__(self, api_key): openai.api_key = api_key def get_ada_embedding(self, text): return openai.Embedding.create(input=[text.replace('\n', ' ')], model='text-embedding-ada-002')['data'][0][ 'embedding' ] def create(self, prompt, max_tokens=100, temperature=0.5): return ( openai.Completion.create( engine='text-davinci-003', prompt=prompt, temperature=temperature, max_tokens=max_tokens, top_p=1, frequency_penalty=0, presence_penalty=0, ) .choices[0] .text.strip() ) class TestAIService: def __init__(self, ai_service, cache_file): self.ai_service = ai_service self.cache_file = cache_file if os.path.isfile(cache_file): self.cache = pickle.load(open(cache_file, 'rb')) else: self.cache = {'ada': {}, 'create': {}} pickle.dump(self.cache, open(cache_file, 'wb')) def get_ada_embedding(self, text): if text not in self.cache['ada']: self.cache['ada'][text] = self.ai_service.get_ada_embedding(text) pickle.dump(self.cache, open(self.cache_file, 'wb')) return self.cache['ada'][text] def create(self, prompt, max_tokens=100, temperature=0.5): key = (prompt, max_tokens, temperature) if key not in self.cache['create']: self.cache['create'][key] = self.ai_service.create(prompt, max_tokens, temperature) pickle.dump(self.cache, open(self.cache_file, 'wb')) return self.cache['create'][key] class PineconeService: def __init__(self, api_key, environment, table_name, dimension, metric, pod_type): self.table_name = table_name pinecone.init(api_key=api_key, environment=environment) if table_name not in pinecone.list_indexes(): pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type) self.index = pinecone.Index(table_name) def query(self, query_embedding, top_k): results = self.index.query(query_embedding, top_k=top_k, include_metadata=True) sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True) return [Task(**item.metadata) for item in sorted_results] def upsert(self, task): self.index.upsert([(task.id, task.vector, task.__dict__)]) class LanceService: def __init__(self, table_name, dimension): self.db = lancedb.connect('.') schema = pa.schema( [ pa.field('id', pa.int32()), pa.field('vector', pa.list_(pa.float32(), dimension)), pa.field('name', pa.string()), pa.field('result', pa.string()), # TODO There is a fixed schema but we keep converting ] ) data = [{'id': 0, 'vector': [0.0] * dimension, 'name': 'asd', 'result': 'asd'}] self.table = self.db.create_table(table_name, mode='overwrite', data=data, schema=schema) def query(self, query_embedding, top_k): result = self.table.search(query_embedding).limit(top_k).to_df().drop(columns=['score']) return [Task(**v) for v in result.to_dict(orient="records")] def upsert(self, task): self.table.add(pd.DataFrame([task.__dict__])) class BabyAGI: def __init__(self, objective, ai_service, vector_service): self.ai_service = ai_service self.vector_service = vector_service self.objective = objective self.objective_embedding = self.ai_service.get_ada_embedding(self.objective) self.task_list = deque([]) def add_task(self, task): if task.id is None: task.id = max([t.id for t in self.task_list], default=0) + 1 self.task_list.append(task) def task_creation_agent(self, task): prompt = TASK_CREATION_PROMPT.format( objective=self.objective, result=task.result, task_description=task.name, task_list=', '.join([t.name for t in self.task_list]), ) return [{'task_name': task_name} for task_name in self.ai_service.create(prompt).split('\n')] def prioritization_agent(self, this_task_id): def to_task(value): parts = value.strip().split('.', 1) if len(parts) != 2: return None return Task(id=int(parts[0].strip()), name=parts[1].strip()) prompt = PRIORITIZATION_PROMPT.format( task_names=', '.join([t.name for t in self.task_list]), objective=self.objective, next_task_id=int(this_task_id) + 1, ) new_tasks = self.ai_service.create(prompt, max_tokens=1000) self.task_list = deque([to_task(v) for v in new_tasks.split('\n') if to_task(v) is not None]) def run(self, first_task): self.add_task(Task(name=first_task)) for _ in range(4): if self.task_list: context = self.vector_service.query(self.objective_embedding, 5) task = self.task_list.popleft() task.result = self.ai_service.create( prompt=EXECUTION_PROMPT.format(objective=self.objective, task=task), max_tokens=2000, temperature=0.7, ) task.vector = self.ai_service.get_ada_embedding(task.result) self.vector_service.upsert(task) new_tasks = self.task_creation_agent(task) task_id_counter = 1 for new_task in new_tasks: task_id_counter += 1 new_task.update({'task_id': task_id_counter}) self.add_task(Task(id=new_task['task_id'], name=new_task['task_name'])) self.prioritization_agent(task.id) def main(): load_dotenv() baby_agi = BabyAGI( objective='Solve world hunger.', ai_service=TestAIService( ai_service=OpenAIService(api_key=os.getenv('OPENAI_API_KEY')), cache_file='babyagi_cache.pkl', ), vector_service=LanceService( table_name='test-table', dimension=1536, ) # vector_service=PineconeService( # api_key=os.getenv('PINECONE_API_KEY'), # environment=os.getenv('PINECONE_ENVIRONMENT'), # table_name='test-table', # dimension=1536, # metric='cosine', # pod_type='p1', # ), ) baby_agi.run(first_task='Develop a task list.') if __name__ == '__main__': typer.run(main)
[ "lancedb.connect" ]
[((7282, 7295), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (7293, 7295), False, 'from dotenv import load_dotenv\n'), ((8026, 8041), 'typer.run', 'typer.run', (['main'], {}), '(main)\n', (8035, 8041), False, 'import typer\n'), ((2219, 2245), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (2233, 2245), False, 'import os\n'), ((3212, 3267), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': 'environment'}), '(api_key=api_key, environment=environment)\n', (3225, 3267), False, 'import pinecone\n'), ((3444, 3470), 'pinecone.Index', 'pinecone.Index', (['table_name'], {}), '(table_name)\n', (3458, 3470), False, 'import pinecone\n'), ((3940, 3960), 'lancedb.connect', 'lancedb.connect', (['"""."""'], {}), "('.')\n", (3955, 3960), False, 'import lancedb\n'), ((5081, 5090), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (5086, 5090), False, 'from collections import deque\n'), ((3297, 3320), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (3318, 3320), False, 'import pinecone\n'), ((3334, 3426), 'pinecone.create_index', 'pinecone.create_index', (['table_name'], {'dimension': 'dimension', 'metric': 'metric', 'pod_type': 'pod_type'}), '(table_name, dimension=dimension, metric=metric,\n pod_type=pod_type)\n', (3355, 3426), False, 'import pinecone\n'), ((4743, 4772), 'pandas.DataFrame', 'pd.DataFrame', (['[task.__dict__]'], {}), '([task.__dict__])\n', (4755, 4772), True, 'import pandas as pd\n'), ((4034, 4044), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (4042, 4044), True, 'import pyarrow as pa\n'), ((4151, 4162), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4160, 4162), True, 'import pyarrow as pa\n'), ((4200, 4211), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (4209, 4211), True, 'import pyarrow as pa\n'), ((4091, 4103), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (4101, 4103), True, 'import pyarrow as pa\n'), ((1711, 1883), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=temperature, max_tokens=max_tokens, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n", (1735, 1883), False, 'import openai\n'), ((7440, 7467), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (7449, 7467), False, 'import os\n')]
import json from generate_data import * from create_embeddings import * import lancedb uri = "./sample-lancedb" db = lancedb.connect(uri) text_table = "table_from_df_text" img_table = "table_from_df_images" tbl_txt = db.open_table(text_table) tbl_img = db.open_table(img_table) with open('./test_data.json') as f: test_user_scripts = json.loads(f.read()) customers = list(test_user_scripts.keys()) customer_id = 0 # change this to test for different customers # Format user chat history def build_customer_chat_history(customer_id): outstring="" for q,a in test_user_scripts[customers[customer_id]].items(): outstring+=f"Question- {q} : User Answer: {a}" return outstring # this function reformats the user inputs to a json structured output very similar to the one used to create embeddings def get_reformatted_output(user_prompt): response = client.chat.completions.create( model="gpt-4-turbo-preview", response_format={ "type": "json_object" }, messages=[ {"role": "system", "content": "You are a helpful assistant with deep expertise in real estate."}, {"role": "user", "content": user_prompt} ] ) listings = json.loads(response.choices[0].message.content)["listings"] # print(listings) return listings def format_response(response): out_string="" for item,val in response.items(): if val != 'None': out_string+=f"{item}: {val} " return out_string def get_user_preference(customer_id, img_path=None): # if user provides an image as reference, we shall also use that. The assumption is that the image has been # loaded and placed in a path the application can access image = img_path # if img_path: # try: # image = Image.open(image_path) # except: # pass chat_history = build_customer_chat_history(customer_id) user_prompt= f""" Please only use the customer chat history given below to create a desired listing for them. Use the example given below and format the results in json format. All the results should be saved inside a key called listings. Each result should have the following keys: Neighborhood, Price, Bedrooms, Bathrooms, House Size, Description, Neighborhood Description. Use only information from the chat history. If any of the fields are unavailable,list them as None. Customer Chat History: {chat_history} Example:{example_listing} """ response = get_reformatted_output(user_prompt)[0] formatted_response = format_response(response) return formatted_response, image def get_embeddings_user_prefs(resp): text_resp, img_resp = resp[0],resp[1] text_embs = get_embedding(text_resp) img_embs = None if img_resp: try: img_embs = create_clip_image_embeddings(img_resp, model_name) except: pass return text_embs,img_embs def search_tables(embeddings,num_responses=5): text_embeddings = embeddings[0] img_embeddings = embeddings[1] df = tbl_txt.search(text_embeddings) \ .metric("cosine") \ .limit(num_responses) \ .to_pandas() return df resp = get_user_preference(customer_id) embeddings = get_embeddings_user_prefs(resp) print(search_tables(embeddings))
[ "lancedb.connect" ]
[((119, 139), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (134, 139), False, 'import lancedb\n'), ((1192, 1239), 'json.loads', 'json.loads', (['response.choices[0].message.content'], {}), '(response.choices[0].message.content)\n', (1202, 1239), False, 'import json\n')]
# Ultralytics YOLO 🚀, AGPL-3.0 license from io import BytesIO from pathlib import Path from typing import Any, List, Tuple, Union import cv2 import numpy as np import torch from matplotlib import pyplot as plt from pandas import DataFrame from PIL import Image from tqdm import tqdm from ultralytics.data.augment import Format from ultralytics.data.dataset import YOLODataset from ultralytics.data.utils import check_det_dataset from ultralytics.models.yolo.model import YOLO from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch class ExplorerDataset(YOLODataset): def __init__(self, *args, data: dict = None, **kwargs) -> None: super().__init__(*args, data=data, **kwargs) def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]: """Loads 1 image from dataset index 'i' without any resize ops.""" im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR if im is None: raise FileNotFoundError(f"Image Not Found {f}") h0, w0 = im.shape[:2] # orig hw return im, (h0, w0), im.shape[:2] return self.ims[i], self.im_hw0[i], self.im_hw[i] def build_transforms(self, hyp: IterableSimpleNamespace = None): """Creates transforms for dataset images without resizing.""" return Format( bbox_format="xyxy", normalize=False, return_mask=self.use_segments, return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.mask_ratio, mask_overlap=hyp.overlap_mask, ) class Explorer: def __init__( self, data: Union[str, Path] = "coco128.yaml", model: str = "yolov8n.pt", uri: str = "~/ultralytics/explorer" ) -> None: checks.check_requirements(["lancedb>=0.4.3", "duckdb"]) import lancedb self.connection = lancedb.connect(uri) self.table_name = Path(data).name.lower() + "_" + model.lower() self.sim_idx_base_name = ( f"{self.table_name}_sim_idx".lower() ) # Use this name and append thres and top_k to reuse the table self.model = YOLO(model) self.data = data # None self.choice_set = None self.table = None self.progress = 0 def create_embeddings_table(self, force: bool = False, split: str = "train") -> None: """ Create LanceDB table containing the embeddings of the images in the dataset. The table will be reused if it already exists. Pass force=True to overwrite the existing table. Args: force (bool): Whether to overwrite the existing table or not. Defaults to False. split (str): Split of the dataset to use. Defaults to 'train'. Example: ```python exp = Explorer() exp.create_embeddings_table() ``` """ if self.table is not None and not force: LOGGER.info("Table already exists. Reusing it. Pass force=True to overwrite it.") return if self.table_name in self.connection.table_names() and not force: LOGGER.info(f"Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.") self.table = self.connection.open_table(self.table_name) self.progress = 1 return if self.data is None: raise ValueError("Data must be provided to create embeddings table") data_info = check_det_dataset(self.data) if split not in data_info: raise ValueError( f"Split {split} is not found in the dataset. Available keys in the dataset are {list(data_info.keys())}" ) choice_set = data_info[split] choice_set = choice_set if isinstance(choice_set, list) else [choice_set] self.choice_set = choice_set dataset = ExplorerDataset(img_path=choice_set, data=data_info, augment=False, cache=False, task=self.model.task) # Create the table schema batch = dataset[0] vector_size = self.model.embed(batch["im_file"], verbose=False)[0].shape[0] table = self.connection.create_table(self.table_name, schema=get_table_schema(vector_size), mode="overwrite") table.add( self._yield_batches( dataset, data_info, self.model, exclude_keys=["img", "ratio_pad", "resized_shape", "ori_shape", "batch_idx"], ) ) self.table = table def _yield_batches(self, dataset: ExplorerDataset, data_info: dict, model: YOLO, exclude_keys: List[str]): """Generates batches of data for embedding, excluding specified keys.""" for i in tqdm(range(len(dataset))): self.progress = float(i + 1) / len(dataset) batch = dataset[i] for k in exclude_keys: batch.pop(k, None) batch = sanitize_batch(batch, data_info) batch["vector"] = model.embed(batch["im_file"], verbose=False)[0].detach().tolist() yield [batch] def query( self, imgs: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, limit: int = 25 ) -> Any: # pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: imgs (str or list): Path to the image or a list of paths to the images. limit (int): Number of results to return. Returns: (pyarrow.Table): An arrow table containing the results. Supports converting to: - pandas dataframe: `result.to_pandas()` - dict of lists: `result.to_pydict()` Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.query(img='https://ultralytics.com/images/zidane.jpg') ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") if isinstance(imgs, str): imgs = [imgs] assert isinstance(imgs, list), f"img must be a string or a list of strings. Got {type(imgs)}" embeds = self.model.embed(imgs) # Get avg if multiple images are passed (len > 1) embeds = torch.mean(torch.stack(embeds), 0).cpu().numpy() if len(embeds) > 1 else embeds[0].cpu().numpy() return self.table.search(embeds).limit(limit).to_arrow() def sql_query( self, query: str, return_type: str = "pandas" ) -> Union[DataFrame, Any, None]: # pandas.dataframe or pyarrow.Table """ Run a SQL-Like query on the table. Utilizes LanceDB predicate pushdown. Args: query (str): SQL query to run. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pyarrow.Table): An arrow table containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.sql_query(query) ``` """ assert return_type in [ "pandas", "arrow", ], f"Return type should be either `pandas` or `arrow`, but got {return_type}" import duckdb if self.table is None: raise ValueError("Table is not created. Please create the table first.") # Note: using filter pushdown would be a better long term solution. Temporarily using duckdb for this. table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB if not query.startswith("SELECT") and not query.startswith("WHERE"): raise ValueError( f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" ) if query.startswith("WHERE"): query = f"SELECT * FROM 'table' {query}" LOGGER.info(f"Running query: {query}") rs = duckdb.sql(query) if return_type == "pandas": return rs.df() elif return_type == "arrow": return rs.arrow() def plot_sql_query(self, query: str, labels: bool = True) -> Image.Image: """ Plot the results of a SQL-Like query on the table. Args: query (str): SQL query to run. labels (bool): Whether to plot the labels or not. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() query = "SELECT * FROM 'table' WHERE labels LIKE '%person%'" result = exp.plot_sql_query(query) ``` """ result = self.sql_query(query, return_type="arrow") if len(result) == 0: LOGGER.info("No results found.") return None img = plot_query_result(result, plot_labels=labels) return Image.fromarray(img) def get_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, return_type: str = "pandas", ) -> Union[DataFrame, Any]: # pandas.dataframe or pyarrow.Table """ Query the table for similar images. Accepts a single image or a list of images. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. limit (int): Number of results to return. Defaults to 25. return_type (str): Type of the result to return. Can be either 'pandas' or 'arrow'. Defaults to 'pandas'. Returns: (pandas.DataFrame): A dataframe containing the results. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ assert return_type in [ "pandas", "arrow", ], f"Return type should be either `pandas` or `arrow`, but got {return_type}" img = self._check_imgs_or_idxs(img, idx) similar = self.query(img, limit=limit) if return_type == "pandas": return similar.to_pandas() elif return_type == "arrow": return similar def plot_similar( self, img: Union[str, np.ndarray, List[str], List[np.ndarray]] = None, idx: Union[int, List[int]] = None, limit: int = 25, labels: bool = True, ) -> Image.Image: """ Plot the similar images. Accepts images or indexes. Args: img (str or list): Path to the image or a list of paths to the images. idx (int or list): Index of the image in the table or a list of indexes. labels (bool): Whether to plot the labels or not. limit (int): Number of results to return. Defaults to 25. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg') ``` """ similar = self.get_similar(img, idx, limit, return_type="arrow") if len(similar) == 0: LOGGER.info("No results found.") return None img = plot_query_result(similar, plot_labels=labels) return Image.fromarray(img) def similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> DataFrame: """ Calculate the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running vector search. Defaults: None. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns include indices of similar images and their respective distances. Example: ```python exp = Explorer() exp.create_embeddings_table() sim_idx = exp.similarity_index() ``` """ if self.table is None: raise ValueError("Table is not created. Please create the table first.") sim_idx_table_name = f"{self.sim_idx_base_name}_thres_{max_dist}_top_{top_k}".lower() if sim_idx_table_name in self.connection.table_names() and not force: LOGGER.info("Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.") return self.connection.open_table(sim_idx_table_name).to_pandas() if top_k and not (1.0 >= top_k >= 0.0): raise ValueError(f"top_k must be between 0.0 and 1.0. Got {top_k}") if max_dist < 0.0: raise ValueError(f"max_dist must be greater than 0. Got {max_dist}") top_k = int(top_k * len(self.table)) if top_k else len(self.table) top_k = max(top_k, 1) features = self.table.to_lance().to_table(columns=["vector", "im_file"]).to_pydict() im_files = features["im_file"] embeddings = features["vector"] sim_table = self.connection.create_table(sim_idx_table_name, schema=get_sim_index_schema(), mode="overwrite") def _yield_sim_idx(): """Generates a dataframe with similarity indices and distances for images.""" for i in tqdm(range(len(embeddings))): sim_idx = self.table.search(embeddings[i]).limit(top_k).to_pandas().query(f"_distance <= {max_dist}") yield [ { "idx": i, "im_file": im_files[i], "count": len(sim_idx), "sim_im_files": sim_idx["im_file"].tolist(), } ] sim_table.add(_yield_sim_idx()) self.sim_index = sim_table return sim_table.to_pandas() def plot_similarity_index(self, max_dist: float = 0.2, top_k: float = None, force: bool = False) -> Image: """ Plot the similarity index of all the images in the table. Here, the index will contain the data points that are max_dist or closer to the image in the embedding space at a given index. Args: max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. top_k (float): Percentage of closest data points to consider when counting. Used to apply limit when running vector search. Defaults to 0.01. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. Returns: (PIL.Image): Image containing the plot. Example: ```python exp = Explorer() exp.create_embeddings_table() similarity_idx_plot = exp.plot_similarity_index() similarity_idx_plot.show() # view image preview similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file ``` """ sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force) sim_count = sim_idx["count"].tolist() sim_count = np.array(sim_count) indices = np.arange(len(sim_count)) # Create the bar plot plt.bar(indices, sim_count) # Customize the plot (optional) plt.xlabel("data idx") plt.ylabel("Count") plt.title("Similarity Count") buffer = BytesIO() plt.savefig(buffer, format="png") buffer.seek(0) # Use Pillow to open the image from the buffer return Image.fromarray(np.array(Image.open(buffer))) def _check_imgs_or_idxs( self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]] ) -> List[np.ndarray]: if img is None and idx is None: raise ValueError("Either img or idx must be provided.") if img is not None and idx is not None: raise ValueError("Only one of img or idx must be provided.") if idx is not None: idx = idx if isinstance(idx, list) else [idx] img = self.table.to_lance().take(idx, columns=["im_file"]).to_pydict()["im_file"] return img if isinstance(img, list) else [img] def ask_ai(self, query): """ Ask AI a question. Args: query (str): Question to ask. Returns: (pandas.DataFrame): A dataframe containing filtered results to the SQL query. Example: ```python exp = Explorer() exp.create_embeddings_table() answer = exp.ask_ai('Show images with 1 person and 2 dogs') ``` """ result = prompt_sql_query(query) try: df = self.sql_query(result) except Exception as e: LOGGER.error("AI generated query is not valid. Please try again with a different prompt") LOGGER.error(e) return None return df def visualize(self, result): """ Visualize the results of a query. TODO. Args: result (pyarrow.Table): Table containing the results of a query. """ pass def generate_report(self, result): """ Generate a report of the dataset. TODO """ pass
[ "lancedb.connect" ]
[((1681, 1874), 'ultralytics.data.augment.Format', 'Format', ([], {'bbox_format': '"""xyxy"""', 'normalize': '(False)', 'return_mask': 'self.use_segments', 'return_keypoint': 'self.use_keypoints', 'batch_idx': '(True)', 'mask_ratio': 'hyp.mask_ratio', 'mask_overlap': 'hyp.overlap_mask'}), "(bbox_format='xyxy', normalize=False, return_mask=self.use_segments,\n return_keypoint=self.use_keypoints, batch_idx=True, mask_ratio=hyp.\n mask_ratio, mask_overlap=hyp.overlap_mask)\n", (1687, 1874), False, 'from ultralytics.data.augment import Format\n'), ((2138, 2193), 'ultralytics.utils.checks.check_requirements', 'checks.check_requirements', (["['lancedb>=0.4.3', 'duckdb']"], {}), "(['lancedb>=0.4.3', 'duckdb'])\n", (2163, 2193), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2244, 2264), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2259, 2264), False, 'import lancedb\n'), ((2515, 2526), 'ultralytics.models.yolo.model.YOLO', 'YOLO', (['model'], {}), '(model)\n', (2519, 2526), False, 'from ultralytics.models.yolo.model import YOLO\n'), ((3858, 3886), 'ultralytics.data.utils.check_det_dataset', 'check_det_dataset', (['self.data'], {}), '(self.data)\n', (3875, 3886), False, 'from ultralytics.data.utils import check_det_dataset\n'), ((8493, 8531), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Running query: {query}"""'], {}), "(f'Running query: {query}')\n", (8504, 8531), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((8546, 8563), 'duckdb.sql', 'duckdb.sql', (['query'], {}), '(query)\n', (8556, 8563), False, 'import duckdb\n'), ((9525, 9545), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9540, 9545), False, 'from PIL import Image\n'), ((12170, 12190), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (12185, 12190), False, 'from PIL import Image\n'), ((16442, 16461), 'numpy.array', 'np.array', (['sim_count'], {}), '(sim_count)\n', (16450, 16461), True, 'import numpy as np\n'), ((16546, 16573), 'matplotlib.pyplot.bar', 'plt.bar', (['indices', 'sim_count'], {}), '(indices, sim_count)\n', (16553, 16573), True, 'from matplotlib import pyplot as plt\n'), ((16623, 16645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data idx"""'], {}), "('data idx')\n", (16633, 16645), True, 'from matplotlib import pyplot as plt\n'), ((16654, 16673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16664, 16673), True, 'from matplotlib import pyplot as plt\n'), ((16682, 16711), 'matplotlib.pyplot.title', 'plt.title', (['"""Similarity Count"""'], {}), "('Similarity Count')\n", (16691, 16711), True, 'from matplotlib import pyplot as plt\n'), ((16729, 16738), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (16736, 16738), False, 'from io import BytesIO\n'), ((16747, 16780), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {'format': '"""png"""'}), "(buffer, format='png')\n", (16758, 16780), True, 'from matplotlib import pyplot as plt\n'), ((3319, 3405), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Table already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Table already exists. Reusing it. Pass force=True to overwrite it.')\n", (3330, 3405), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((3507, 3617), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['f"""Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n f'Table {self.table_name} already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (3518, 3617), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((9393, 9425), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (9404, 9425), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((12037, 12069), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""No results found."""'], {}), "('No results found.')\n", (12048, 12069), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((13647, 13750), 'ultralytics.utils.LOGGER.info', 'LOGGER.info', (['"""Similarity matrix already exists. Reusing it. Pass force=True to overwrite it."""'], {}), "(\n 'Similarity matrix already exists. Reusing it. Pass force=True to overwrite it.'\n )\n", (13658, 13750), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((1191, 1202), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1198, 1202), True, 'import numpy as np\n'), ((1256, 1269), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (1266, 1269), False, 'import cv2\n'), ((16900, 16918), 'PIL.Image.open', 'Image.open', (['buffer'], {}), '(buffer)\n', (16910, 16918), False, 'from PIL import Image\n'), ((18136, 18235), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['"""AI generated query is not valid. Please try again with a different prompt"""'], {}), "(\n 'AI generated query is not valid. Please try again with a different prompt'\n )\n", (18148, 18235), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((18238, 18253), 'ultralytics.utils.LOGGER.error', 'LOGGER.error', (['e'], {}), '(e)\n', (18250, 18253), False, 'from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks\n'), ((2291, 2301), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (2295, 2301), False, 'from pathlib import Path\n'), ((6718, 6737), 'torch.stack', 'torch.stack', (['embeds'], {}), '(embeds)\n', (6729, 6737), False, 'import torch\n')]
from PIL import Image import streamlit as st import openai #exercise 11 from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI #exercise 12 from langchain.memory import ConversationBufferWindowMemory #exercise 13 from langchain.document_loaders import TextLoader,PyPDFLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import LanceDB import lancedb import os import tempfile # os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"] # openai.api_key = st.secrets["openapi_key"] #Global ex 13 cwd = os.getcwd() WORKING_DIRECTORY = os.path.join(cwd, "database") if not os.path.exists(WORKING_DIRECTORY): os.makedirs(WORKING_DIRECTORY) def ex11a(): # change in ex11a # langchain prompt template prompt = PromptTemplate( input_variables=["subject", "topic"], template="""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students""", ) # openai_api_key = st.secrets["openapi_key"] llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9) # creating a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt) if st.button("Run my chain"): input_prompt = prompt.format(subject="English", topic="Verbs") # Showing what is sent to LLM Chain st.write("Input prompt: ", input_prompt) # Showing the output from LLM Chain st.write(chain.run({"subject": "English", "topic": "Verbs"})) def prompt_inputs_form(): #Using st.form, create the starting prompt to your prompt template, this is an expert on a topic that is talking to a user of a certain age #langchain prompt template with st.form("Prompt Template"): occupation = st.text_input("Enter the occupation:") topic = st.text_input("Enter the topic:") age = st.text_input("Enter the age:") # Every form must have a submit button. submitted = st.form_submit_button("Submit") #return a dictionary of the values if submitted: return { 'occupation': occupation, 'topic': topic, 'age': age } def ex11b(): # create your template prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) # create a langchain function call to openai llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0.9, ) # create a LLM chain with the langchain call and prompt template chain = LLMChain(llm=llm, prompt=prompt_template) # call the prompt_inputs_form() dict_inputs = prompt_inputs_form() if dict_inputs: st.write(chain.run(dict_inputs)) def ex12(): memory = ConversationBufferWindowMemory(k=3) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) memory = ConversationBufferWindowMemory( k=3, return_messages=True) memory.save_context({"input": "hi"}, {"output": "whats up?"}) memory.save_context({"input": "not much"}, {"output": "what can I help you with?"}) st.write(memory.load_memory_variables({})) #exercise 13 - loading def upload_file_streamlit(): def get_file_extension(file_name): return os.path.splitext(file_name)[1] st.subheader("Upload your docs") # Streamlit file uploader to accept file input uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"]) if uploaded_file: # Reading file content file_content = uploaded_file.read() # Determine the suffix based on uploaded file's name file_suffix = get_file_extension(uploaded_file.name) # Saving the uploaded file temporarily to process it with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file: temp_file.write(file_content) temp_file.flush() # Ensure the data is written to the file temp_file_path = temp_file.name return temp_file_path #exercise 13 - split and chunk, embeddings and storing in vectorstores for reference def vectorstore_creator(): # WORKING_DIRECTORY set above in the main.py # Process the temporary file using UnstructuredFileLoader (or any other method you need) embeddings = OpenAIEmbeddings() db = lancedb.connect(WORKING_DIRECTORY) table = db.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Query unsuccessful"), "text": "Query unsuccessful", "id": "1", } ], mode="overwrite", ) # st.write(temp_file_path) temp_file_path = upload_file_streamlit() if temp_file_path: loader = PyPDFLoader(temp_file_path) documents = loader.load_and_split() db = LanceDB.from_documents(documents, embeddings, connection=table) return db def ex13(): if "vectorstore" not in st.session_state: st.session_state.vectorstore = False db = vectorstore_creator() st.session_state.vectorstore = db if st.session_state.vectorstore: query = st.text_input("Enter a query") if query: st.session_state.vectorstore = db docs = db.similarity_search(query) st.write(docs[0].page_content) def chat_completion_stream_prompt(prompt): MODEL = "gpt-3.5-turbo" #consider changing this to session_state response = openai.ChatCompletion.create( model=MODEL, messages=[ {"role": "system", "content": st.session_state.prompt_template}, {"role": "user", "content": prompt}, ], temperature= 0, # temperature stream=True #stream option ) return response # save the vectorstore in st.session_state # add semantic search prompt into memory prompt # integrate back into your chatbot def ex14_basebot(): # Prompt_template form from ex11 prompt_template = PromptTemplate( input_variables=["occupation", "topic", "age"], template="""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up""", ) dict_inputs = prompt_inputs_form() if dict_inputs: input_prompt = prompt_template.format( occupation=dict_inputs["occupation"], topic=dict_inputs["topic"], age=dict_inputs["age"], ) st.session_state.input_prompt = input_prompt if "input_prompt" not in st.session_state: st.session_state.input_prompt = "Speak like Yoda from Star Wars" if "memory" not in st.session_state: st.session_state.memory = ConversationBufferWindowMemory(k=5) # step 1 save the memory from your chatbot # step 2 integrate the memory in the prompt_template (st.session_state.prompt_template) show a hint memory_data = st.session_state.memory.load_memory_variables({}) st.write(memory_data) st.session_state.prompt_template = f""" st.session_state.input_prompt: {st.session_state.input_prompt} This is the last conversation history {memory_data} """ st.write("new prompt template: ", st.session_state.prompt_template) st.session_state.vectorstore = vectorstore_creator() # Initialize chat history if "msg" not in st.session_state: st.session_state.msg = [] # Showing Chat history for message in st.session_state.msg: with st.chat_message(message["role"]): st.markdown(message["content"]) try: # if prompt := st.chat_input("What is up?"): # query information if st.session_state.vectorstore: docs = st.session_state.vectorstore.similarity_search(prompt) docs = docs[0].page_content # add your query prompt vs_prompt = f"""You should reference this search result to help your answer, {docs} if the search result does not anwer the query, please say you are unable to answer, do not make up an answer""" else: vs_prompt = "" # add query prompt to your memory prompt and send it to LLM st.session_state.prompt_template = ( st.session_state.prompt_template + vs_prompt ) # set user prompt in chat history st.session_state.msg.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # streaming function for response in chat_completion_stream_prompt(prompt): full_response += response.choices[0].delta.get("content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.msg.append({"role": "assistant", "content": full_response}) st.session_state.memory.save_context( {"input": prompt}, {"output": full_response} ) except Exception as e: st.error(e)
[ "lancedb.connect" ]
[((649, 660), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (658, 660), False, 'import os\n'), ((681, 710), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (693, 710), False, 'import os\n'), ((719, 752), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (733, 752), False, 'import os\n'), ((755, 785), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (766, 785), False, 'import os\n'), ((859, 1014), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['subject', 'topic']", 'template': '"""Design a lesson plan on {subject} on the topic of {topic} for primary 1 students"""'}), "(input_variables=['subject', 'topic'], template=\n 'Design a lesson plan on {subject} on the topic of {topic} for primary 1 students'\n )\n", (873, 1014), False, 'from langchain.prompts import PromptTemplate\n'), ((1071, 1126), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (1081, 1126), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1205, 1237), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1213, 1237), False, 'from langchain.chains import LLMChain\n'), ((1242, 1267), 'streamlit.button', 'st.button', (['"""Run my chain"""'], {}), "('Run my chain')\n", (1251, 1267), True, 'import streamlit as st\n'), ((2158, 2497), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), "(input_variables=['occupation', 'topic', 'age'], template=\n 'Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information to the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up'\n )\n", (2172, 2497), False, 'from langchain.prompts import PromptTemplate\n'), ((2553, 2608), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (2563, 2608), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2692, 2733), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template'}), '(llm=llm, prompt=prompt_template)\n', (2700, 2733), False, 'from langchain.chains import LLMChain\n'), ((2880, 2915), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)'}), '(k=3)\n', (2910, 2915), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3123, 3180), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(3)', 'return_messages': '(True)'}), '(k=3, return_messages=True)\n', (3153, 3180), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3506, 3538), 'streamlit.subheader', 'st.subheader', (['"""Upload your docs"""'], {}), "('Upload your docs')\n", (3518, 3538), True, 'import streamlit as st\n'), ((3605, 3667), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['docx', 'txt', 'pdf']"}), "('Choose a file', type=['docx', 'txt', 'pdf'])\n", (3621, 3667), True, 'import streamlit as st\n'), ((4420, 4438), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4436, 4438), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4445, 4479), 'lancedb.connect', 'lancedb.connect', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (4460, 4479), False, 'import lancedb\n'), ((5402, 5592), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'MODEL', 'messages': "[{'role': 'system', 'content': st.session_state.prompt_template}, {'role':\n 'user', 'content': prompt}]", 'temperature': '(0)', 'stream': '(True)'}), "(model=MODEL, messages=[{'role': 'system',\n 'content': st.session_state.prompt_template}, {'role': 'user',\n 'content': prompt}], temperature=0, stream=True)\n", (5430, 5592), False, 'import openai\n'), ((5855, 6204), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['occupation', 'topic', 'age']", 'template': '"""Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""'}), '(input_variables=[\'occupation\', \'topic\', \'age\'], template=\n """Imagine you are a {occupation} who is an expert on the topic of {topic} , you are going to help , teach and provide information\n\t\t\t\t\t\tto the person who is {age} years old, if you do not not know the answer, you must tell the person , do not make any answer up"""\n )\n', (5869, 6204), False, 'from langchain.prompts import PromptTemplate\n'), ((6823, 6872), 'streamlit.session_state.memory.load_memory_variables', 'st.session_state.memory.load_memory_variables', (['{}'], {}), '({})\n', (6868, 6872), True, 'import streamlit as st\n'), ((6874, 6895), 'streamlit.write', 'st.write', (['memory_data'], {}), '(memory_data)\n', (6882, 6895), True, 'import streamlit as st\n'), ((7059, 7126), 'streamlit.write', 'st.write', (['"""new prompt template: """', 'st.session_state.prompt_template'], {}), "('new prompt template: ', st.session_state.prompt_template)\n", (7067, 7126), True, 'import streamlit as st\n'), ((1374, 1414), 'streamlit.write', 'st.write', (['"""Input prompt: """', 'input_prompt'], {}), "('Input prompt: ', input_prompt)\n", (1382, 1414), True, 'import streamlit as st\n'), ((1719, 1745), 'streamlit.form', 'st.form', (['"""Prompt Template"""'], {}), "('Prompt Template')\n", (1726, 1745), True, 'import streamlit as st\n'), ((1762, 1800), 'streamlit.text_input', 'st.text_input', (['"""Enter the occupation:"""'], {}), "('Enter the occupation:')\n", (1775, 1800), True, 'import streamlit as st\n'), ((1811, 1844), 'streamlit.text_input', 'st.text_input', (['"""Enter the topic:"""'], {}), "('Enter the topic:')\n", (1824, 1844), True, 'import streamlit as st\n'), ((1853, 1884), 'streamlit.text_input', 'st.text_input', (['"""Enter the age:"""'], {}), "('Enter the age:')\n", (1866, 1884), True, 'import streamlit as st\n'), ((1941, 1972), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (1962, 1972), True, 'import streamlit as st\n'), ((4777, 4804), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (4788, 4804), False, 'from langchain.document_loaders import TextLoader, PyPDFLoader\n'), ((4850, 4913), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (4872, 4913), False, 'from langchain.vectorstores import LanceDB\n'), ((5128, 5158), 'streamlit.text_input', 'st.text_input', (['"""Enter a query"""'], {}), "('Enter a query')\n", (5141, 5158), True, 'import streamlit as st\n'), ((6626, 6661), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(5)'}), '(k=5)\n', (6656, 6661), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3473, 3500), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3489, 3500), False, 'import os\n'), ((3925, 3986), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (3952, 3986), False, 'import tempfile\n'), ((5249, 5279), 'streamlit.write', 'st.write', (['docs[0].page_content'], {}), '(docs[0].page_content)\n', (5257, 5279), True, 'import streamlit as st\n'), ((7343, 7375), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7358, 7375), True, 'import streamlit as st\n'), ((7380, 7411), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7391, 7411), True, 'import streamlit as st\n'), ((7437, 7465), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (7450, 7465), True, 'import streamlit as st\n'), ((8093, 8157), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (8120, 8157), True, 'import streamlit as st\n'), ((8571, 8647), 'streamlit.session_state.msg.append', 'st.session_state.msg.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content': full_response})\n", (8598, 8647), True, 'import streamlit as st\n'), ((8651, 8737), 'streamlit.session_state.memory.save_context', 'st.session_state.memory.save_context', (["{'input': prompt}", "{'output': full_response}"], {}), "({'input': prompt}, {'output':\n full_response})\n", (8687, 8737), True, 'import streamlit as st\n'), ((8770, 8781), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (8778, 8781), True, 'import streamlit as st\n'), ((7537, 7591), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['prompt'], {}), '(prompt)\n', (7583, 7591), True, 'import streamlit as st\n'), ((8166, 8189), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (8181, 8189), True, 'import streamlit as st\n'), ((8195, 8214), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (8206, 8214), True, 'import streamlit as st\n'), ((8224, 8252), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8239, 8252), True, 'import streamlit as st\n'), ((8280, 8290), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (8288, 8290), True, 'import streamlit as st\n')]
from PIL import Image import streamlit as st import openai #exercise 11 from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain #exercise 12 from langchain.memory import ConversationBufferWindowMemory #exercise 13 from langchain.document_loaders import TextLoader,PyPDFLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import LanceDB import lancedb import os import tempfile #exercise 15 import sqlite3 import pandas as pd from datetime import datetime #exercise 16 from langchain.agents import ConversationalChatAgent, AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.tools import DuckDuckGoSearchRun #Exercise 17 from langchain.agents import tool import json # os.environ["OPENAI_API_KEY"] = st.secrets["openapi_key"] # openai.api_key = st.secrets["openapi_key"] #Global ex 13 cwd = os.getcwd() WORKING_DIRECTORY = os.path.join(cwd, "database") if not os.path.exists(WORKING_DIRECTORY): os.makedirs(WORKING_DIRECTORY) #ex15 DB_NAME = os.path.join(WORKING_DIRECTORY, "default_db") def ex15_initialise(): # Create or check for the 'database' directory in the current working directory # Set DB_NAME to be within the 'database' directory at the top of main.py # Connect to the SQLite database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Conversation data table cursor.execute( """ CREATE TABLE IF NOT EXISTS data_table ( id INTEGER PRIMARY KEY, date TEXT NOT NULL UNIQUE, username TEXT NOT NULL, chatbot_ans TEXT NOT NULL, user_prompt TEXT NOT NULL, tokens TEXT ) """ ) conn.commit() conn.close() def ex15_collect(username, chatbot_response, prompt): # collect data from bot conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() now = datetime.now() # Using ISO format for date tokens = len(chatbot_response) * 1.3 cursor.execute( """ INSERT INTO data_table (date, username,chatbot_ans, user_prompt, tokens) VALUES (?, ?, ?, ?, ?) """, (now, username, chatbot_response, prompt, tokens), ) conn.commit() conn.close() # implementing data collection and displaying def ex15(): # initialise database first ex15_initialise() # collect some data ex15_collect("yoda", "I am Yoda. The Force is strong with you", "Who are you?") # display data # Connect to the specified database conn = sqlite3.connect(DB_NAME) cursor = conn.cursor() # Fetch all data from data_table cursor.execute("SELECT * FROM data_table") rows = cursor.fetchall() column_names = [description[0] for description in cursor.description] df = pd.DataFrame(rows, columns=column_names) st.dataframe(df) conn.close() # smart agents accessing the internet for free # https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/search_and_chat.py def ex16_agent_bot(): st.title("🦜 LangChain: Chat with internet search") msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True ) tools = [DuckDuckGoSearchRun(name="Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ] def upload_file_streamlit(): def get_file_extension(file_name): return os.path.splitext(file_name)[1] st.subheader("Upload your docs") # Streamlit file uploader to accept file input uploaded_file = st.file_uploader("Choose a file", type=["docx", "txt", "pdf"]) if uploaded_file: # Reading file content file_content = uploaded_file.read() # Determine the suffix based on uploaded file's name file_suffix = get_file_extension(uploaded_file.name) # Saving the uploaded file temporarily to process it with tempfile.NamedTemporaryFile(delete=False, suffix=file_suffix) as temp_file: temp_file.write(file_content) temp_file.flush() # Ensure the data is written to the file temp_file_path = temp_file.name return temp_file_path #exercise 13 - split and chunk, embeddings and storing in vectorstores for reference def vectorstore_creator(): # WORKING_DIRECTORY set above in the main.py # Process the temporary file using UnstructuredFileLoader (or any other method you need) embeddings = OpenAIEmbeddings() db = lancedb.connect(WORKING_DIRECTORY) table = db.create_table( "my_table", data=[ { "vector": embeddings.embed_query("Query unsuccessful"), "text": "Query unsuccessful", "id": "1", } ], mode="overwrite", ) # st.write(temp_file_path) temp_file_path = upload_file_streamlit() if temp_file_path: loader = PyPDFLoader(temp_file_path) documents = loader.load_and_split() db = LanceDB.from_documents(documents, embeddings, connection=table) return db # agents ,vectorstores, wiki # https://python.langchain.com/docs/modules/agents/how_to/custom_agent_with_tool_retrieval # note tool @tool("Document search") def document_search(query: str) -> str: # this is the prompt to the tool itself "Use this function first to search for documents pertaining to the query before going into the internet" docs = st.session_state.vectorstore.similarity_search(query) docs = docs[0].page_content json_string = json.dumps(docs, ensure_ascii=False, indent=4) return json_string # combine vector store and internet search def ex17_agent_bot(): st.title("🦜 LangChain: Chat with internet search") st.session_state.vectorstore = vectorstore_creator() msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output", ) if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status( f"**{step[0].tool}**: {step[0].tool_input}", state="complete" ): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Enter a query on the Internet"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-3.5-turbo", openai_api_key=openai.api_key, streaming=True ) tools = [document_search, DuckDuckGoSearchRun(name="Internet Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response[ "intermediate_steps" ]
[ "lancedb.connect" ]
[((1106, 1117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1115, 1117), False, 'import os\n'), ((1138, 1167), 'os.path.join', 'os.path.join', (['cwd', '"""database"""'], {}), "(cwd, 'database')\n", (1150, 1167), False, 'import os\n'), ((1259, 1304), 'os.path.join', 'os.path.join', (['WORKING_DIRECTORY', '"""default_db"""'], {}), "(WORKING_DIRECTORY, 'default_db')\n", (1271, 1304), False, 'import os\n'), ((6373, 6396), 'langchain.agents.tool', 'tool', (['"""Document search"""'], {}), "('Document search')\n", (6377, 6396), False, 'from langchain.agents import tool\n'), ((1176, 1209), 'os.path.exists', 'os.path.exists', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1190, 1209), False, 'import os\n'), ((1212, 1242), 'os.makedirs', 'os.makedirs', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (1223, 1242), False, 'import os\n'), ((1527, 1551), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1542, 1551), False, 'import sqlite3\n'), ((1957, 1981), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (1972, 1981), False, 'import sqlite3\n'), ((2013, 2027), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2025, 2027), False, 'from datetime import datetime\n'), ((2579, 2603), 'sqlite3.connect', 'sqlite3.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2594, 2603), False, 'import sqlite3\n'), ((2810, 2850), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'column_names'}), '(rows, columns=column_names)\n', (2822, 2850), True, 'import pandas as pd\n'), ((2852, 2868), 'streamlit.dataframe', 'st.dataframe', (['df'], {}), '(df)\n', (2864, 2868), True, 'import streamlit as st\n'), ((3049, 3099), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (3057, 3099), True, 'import streamlit as st\n'), ((3109, 3138), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (3136, 3138), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((3149, 3266), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (3173, 3266), False, 'from langchain.memory import ConversationBufferMemory\n'), ((4818, 4850), 'streamlit.subheader', 'st.subheader', (['"""Upload your docs"""'], {}), "('Upload your docs')\n", (4830, 4850), True, 'import streamlit as st\n'), ((4917, 4979), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['docx', 'txt', 'pdf']"}), "('Choose a file', type=['docx', 'txt', 'pdf'])\n", (4933, 4979), True, 'import streamlit as st\n'), ((5732, 5750), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5748, 5750), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5757, 5791), 'lancedb.connect', 'lancedb.connect', (['WORKING_DIRECTORY'], {}), '(WORKING_DIRECTORY)\n', (5772, 5791), False, 'import lancedb\n'), ((6592, 6645), 'streamlit.session_state.vectorstore.similarity_search', 'st.session_state.vectorstore.similarity_search', (['query'], {}), '(query)\n', (6638, 6645), True, 'import streamlit as st\n'), ((6690, 6736), 'json.dumps', 'json.dumps', (['docs'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(docs, ensure_ascii=False, indent=4)\n', (6700, 6736), False, 'import json\n'), ((6824, 6874), 'streamlit.title', 'st.title', (['"""🦜 LangChain: Chat with internet search"""'], {}), "('🦜 LangChain: Chat with internet search')\n", (6832, 6874), True, 'import streamlit as st\n'), ((6939, 6968), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (6966, 6968), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((6979, 7096), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (7003, 7096), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3305, 3344), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (3322, 3344), True, 'import streamlit as st\n'), ((3912, 3970), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (3925, 3970), True, 'import streamlit as st\n'), ((4021, 4110), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (4031, 4110), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4176, 4240), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (4218, 4240), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((4254, 4399), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (4288, 4399), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((6089, 6116), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['temp_file_path'], {}), '(temp_file_path)\n', (6100, 6116), False, 'from langchain.document_loaders import TextLoader, PyPDFLoader\n'), ((6162, 6225), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (6184, 6225), False, 'from langchain.vectorstores import LanceDB\n'), ((7135, 7174), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (7152, 7174), True, 'import streamlit as st\n'), ((7742, 7800), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Enter a query on the Internet"""'}), "(placeholder='Enter a query on the Internet')\n", (7755, 7800), True, 'import streamlit as st\n'), ((7851, 7940), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai.api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai.api_key,\n streaming=True)\n", (7861, 7940), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8032, 8096), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (8074, 8096), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((8110, 8255), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (8144, 8255), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((3535, 3569), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (3550, 3569), True, 'import streamlit as st\n'), ((3875, 3896), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (3883, 3896), True, 'import streamlit as st\n'), ((4125, 4159), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (4144, 4159), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((4422, 4450), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (4437, 4450), True, 'import streamlit as st\n'), ((4584, 4612), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (4592, 4612), True, 'import streamlit as st\n'), ((4785, 4812), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (4801, 4812), False, 'import os\n'), ((5237, 5298), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'file_suffix'}), '(delete=False, suffix=file_suffix)\n', (5264, 5298), False, 'import tempfile\n'), ((7365, 7399), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (7380, 7399), True, 'import streamlit as st\n'), ((7705, 7726), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (7713, 7726), True, 'import streamlit as st\n'), ((7972, 8015), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Internet Search"""'}), "(name='Internet Search')\n", (7991, 8015), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((8278, 8306), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (8293, 8306), True, 'import streamlit as st\n'), ((8440, 8468), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (8448, 8468), True, 'import streamlit as st\n'), ((3974, 3997), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (3989, 3997), True, 'import streamlit as st\n'), ((4488, 4502), 'streamlit.container', 'st.container', ([], {}), '()\n', (4500, 4502), True, 'import streamlit as st\n'), ((7804, 7827), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7819, 7827), True, 'import streamlit as st\n'), ((8344, 8358), 'streamlit.container', 'st.container', ([], {}), '()\n', (8356, 8358), True, 'import streamlit as st\n'), ((3737, 3809), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (3746, 3809), True, 'import streamlit as st\n'), ((3827, 3848), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (3835, 3848), True, 'import streamlit as st\n'), ((3854, 3871), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (3862, 3871), True, 'import streamlit as st\n'), ((7567, 7639), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (7576, 7639), True, 'import streamlit as st\n'), ((7657, 7678), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (7665, 7678), True, 'import streamlit as st\n'), ((7684, 7701), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (7692, 7701), True, 'import streamlit as st\n')]
import os import glob import tqdm import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pickle from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import KBinsDiscretizer, StandardScaler from sklearn.pipeline import Pipeline import joblib # from sklearn.svm import SVC, LinearSVC # from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.preprocessing import StandardScaler from sklearn.manifold import TSNE, Isomap from .config import get_cfg from .util.step_annotations import load_object_annotations, get_obj_anns from IPython import embed import warnings warnings.simplefilter('once') STATE = 'state' def remap_labels(sdf, old_col, new_col): RENAME = { '[partial]': '', '[full]': '', 'floss-underneath': 'ends-cut', 'floss-crossed': 'ends-cut', 'raisins[cooked]': 'raisins', 'oatmeal[cooked]+raisins': 'oatmeal+raisins', 'teabag': 'tea-bag', '+stirrer': '', '[stirred]': '', 'water+honey': 'water', 'with-quesadilla': 'with-food', 'with-pinwheels': 'with-food', } sdf[new_col] = sdf[old_col].copy() for old, new in RENAME.items(): sdf[new_col] = sdf[new_col].str.replace(old, new) sdf = sdf[~sdf[new_col].isin(['folding', 'on-plate', 'rolling'])] return sdf # ---------------------------------------------------------------------------- # # Data Loading # # ---------------------------------------------------------------------------- # class bc: HEADER = '\033[95m' BLUE = '\033[94m' CYAN = '\033[96m' GREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' END = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def load_data(cfg, data_file_pattern, include=None): '''Load npz files (one per video) with embedding and label keys and concatenate ''' # if os.path.isfile('dataset.pkl'): # print('reading pickle') # df = pd.read_pickle('dataset.pkl') # print(df.head()) # return df use_aug = cfg.EVAL.USE_AUGMENTATIONS embeddings_list, df_list = [], [] class_map = {} # steps_df, meta_df, object_names = load_annotations(cfg) dfs = load_object_annotations(cfg) fs = glob.glob(data_file_pattern) # if cfg.EVAL.TRAIN_BASE_ROOT: # fs += glob.glob(f'{cfg.EVAL.TRAIN_BASE_ROOT}/embeddings/{cfg.EVAL.DETECTION_NAME}/*/clip/*.npz') if len(set(fs)) < len(fs): print("Warning duplicate files in training set!\n\n") input() print(f"Found {len(fs)} files", fs[:1]) for f in tqdm.tqdm(fs, desc='loading data...'): # if 'coffee_mit-eval' in f: # embed() if include and not any(fi in f for fi in include): print("Skipping", f) continue data = np.load(f) z = data['z'] z = z / np.linalg.norm(z, axis=-1, keepdims=True) frame_idx = data['frame_index'] # maybe filter out augmentations aug = data.get('augmented') if aug is None or use_aug: aug = np.zeros(len(z), dtype=bool) z = z[~aug] frame_idx = frame_idx[~aug] # get video ID and track ID video_id = data.get('video_name') if video_id is None: video_id = f.split('/')[-3] else: video_id = video_id.item() video_id = os.path.splitext(video_id)[0] track_id = data.get('track_id') if track_id is None: track_id = f.split('/')[-1].split('.')[0] else: track_id = track_id.item() track_id = int(track_id) if video_id not in dfs or track_id not in dfs[video_id]: tqdm.tqdm.write(f"{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}") continue tqdm.tqdm.write(f"Using: {video_id}: {track_id}") # get object state annotations ann = get_obj_anns(remap_labels(dfs[video_id][track_id], 'state', 'state'), frame_idx) embeddings_list.append(z) df_list.append(pd.DataFrame({ 'index': frame_idx, 'object': ann.object, 'state': ann.state, 'track_id': track_id, 'video_id': video_id, })) # print() # print(df_list[-1][['object', 'state']].value_counts()) # print() # if input(): embed() if input():embed() X = np.concatenate(embeddings_list) df = pd.concat(df_list) df['vector'] = list(X) df.to_pickle('dataset.pkl') return df def load_data_from_db(cfg, state_col, emb_type='clip'): import lancedb dfs = [] fs = cfg.EVAL.EMBEDDING_DBS f = os.path.join(cfg.DATASET.ROOT, f'{emb_type}.lancedb') if not fs and os.path.isfile(f): fs = [f] for db_fname in fs: print(db_fname) assert os.path.isdir(db_fname) db = lancedb.connect(db_fname) for object_name in tqdm.tqdm(db.table_names()): dfs.append(db.open_table(object_name).to_pandas()) df = pd.concat(dfs) if dfs else pd.DataFrame({state_col: []}) if state_col: df['state'] = df[state_col] return df def read_split_file(fname): lines = open(fname).read().splitlines() lines = [l.strip() for l in lines] lines = [l for l in lines if l and not l.startswith('#')] return lines # ---------------------------------------------------------------------------- # # Training # # ---------------------------------------------------------------------------- # def train_eval(run_name, model, X, y, i_train, i_test, video_ids, plot_dir='plots', **meta): '''Train and evaluate a model''' print(run_name, model) # plot_dir = f'{plot_dir}/{run_name}' # os.makedirs(plot_dir, exist_ok=True) X_train, X_test = X[i_train], X[i_test] y_train, y_test = y[i_train], y[i_test] # print(set(y_train)) # print(set(y_test)) # if input(): embed() # from imblearn.over_sampling import SMOTE # from imblearn.under_sampling import RandomUnderSampler # from imblearn.pipeline import Pipeline # # Create a pipeline to balance the classes using SMOTE # pipeline = Pipeline([ # # ('oversample', SMOTE(sampling_strategy='auto')), # You can adjust sampling_strategy # ('undersample', RandomUnderSampler(sampling_strategy='auto')) # You can adjust sampling_strategy # ]) # X_test2, y_test2 = X_test, y_test # X_test, y_test = pipeline.fit_resample(X_test, y_test) # print(X_test2.shape, y_test2.shape, X_test.shape, y_test.shape) assert not (set(video_ids[i_train]) & set(video_ids[i_test])), "Being extra sure... this is a nono" # Standardize features scaler = StandardScaler() pipeline = Pipeline([ ('scaler', scaler), ('model', model) ]) X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # ----------------------------------- Train ---------------------------------- # # Train the classifier model.fit(X_train, y_train) # Make predictions on the test set y_pred = model.predict(X_test) y_emis = model.predict_proba(X_test) # with open(os.path.join(plot_dir, f'{run_name}.pkl'), 'rb') as f: # pickle.dump(y_pred, f) # Save the entire pipeline with open(os.path.join(plot_dir, f'{run_name}_pipeline.pkl'), 'wb') as f: joblib.dump(pipeline, f) # ------------------------------- Visualization ------------------------------ # # Generate plots all_metrics = [] # compute vanilla metrics meta['run_name'] = meta['metric_name'] = run_name metrics = get_metrics(y_test, y_pred, **meta) all_metrics.append(metrics) tqdm.tqdm.write(f'Accuracy for {run_name}: {metrics["accuracy"]:.2f}') # generate vanilla plots video_ids_test = video_ids[i_test] emission_plot(plot_dir, y_emis, y_test, model.classes_, f'{run_name}_ma0_', video_ids=video_ids_test) emission_plot(plot_dir, y_emis, y_test, model.classes_, f'{run_name}_ma0_ypred_', show_ypred=True, video_ids=video_ids_test) cm_plot(plot_dir, y_test, y_pred, model.classes_, f'{run_name}_') # with moving average for winsize in [2, 4, 8, 16]: y_ = moving_average(y_emis, winsize) y_pred_ = np.asarray(model.classes_)[np.argmax(y_, axis=1)] # emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ma{winsize}_', video_ids=video_ids_test) emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ma{winsize}_ypred_', show_ypred=True, video_ids=video_ids_test) cm_plot(plot_dir, y_test, y_pred_, model.classes_, f'{run_name}_cm_ma{winsize}_') meta = {**meta} meta['metric_name'] = f'{run_name}_movingavg-{winsize}' metrics = get_metrics(y_test, y_pred_, smoothing='ma', win_size=winsize, **meta) all_metrics.append(metrics) for alpha in [0.1, 0.2, 0.5]: y_ = exponentially_decaying_average(y_emis, alpha) y_pred_ = np.asarray(model.classes_)[np.argmax(y_, axis=1)] emission_plot(plot_dir, y_, y_test, model.classes_, f'{run_name}_ema{alpha}_ypred_', show_ypred=True, video_ids=video_ids_test) cm_plot(plot_dir, y_test, y_pred, model.classes_, f'{run_name}_') meta = {**meta} meta['metric_name'] = f'{run_name}_expmovingavg-{alpha}' metrics = get_metrics(y_test, y_pred_, smoothing='ema', alpha=alpha, **meta) all_metrics.append(metrics) # y_hmm = hmm_forward(y_emis, len(model.classes_)) # emission_plot(plot_dir, y_hmm, y_test, model.classes_, f'{run_name}_trans_', video_ids=video_ids_test) # emission_plot(plot_dir, y_hmm, y_test, model.classes_, f'{run_name}_trans_ypred_', show_ypred=True, video_ids=video_ids_test) # # embed() # get per class metrics per_class_metrics = [] for c in np.unique(y): per_class_metrics.append(get_metrics( y_test[y_test==c], y_pred[y_test==c], label=c, **meta)) # tqdm.tqdm.write(f'F1 for {run_name}: {metrics["f1"]:.2f}') return all_metrics, per_class_metrics def get_metrics(y_test, y_pred, **meta): precision, recall, f1_score, _ = precision_recall_fscore_support(y_test, y_pred, zero_division=np.nan, average='macro') return { 'accuracy': accuracy_score(y_test, y_pred), 'f1': f1_score, 'ap': precision, 'avg_recal': recall, **meta } # ---------------------------------------------------------------------------- # # Visualization # # ---------------------------------------------------------------------------- # def emb_plot(plot_dir, X, y, prefix='', n=3000): fname = f'{plot_dir}/{prefix}_proj.png' if os.path.isfile(fname): return print("creating emb plot", fname) # Create a TSNE embedding plot (optional) # tsne = TSNE(n_components=2) m = Isomap() i = np.random.choice(np.arange(len(X)), size=n) X, y = X[i], y[i] Z = m.fit_transform(X) print(Z.shape) plt.figure(figsize=(10, 8)) for c in np.unique(y): plt.scatter(Z[y==c, 0], Z[y==c, 1], label=str(c), s=20, alpha=0.3) plt.legend() plt.title(f'Embedding Projection: {prefix}') pltsave(fname) def emission_plot(plot_dir, X, y, classes, prefix='', video_ids=None, show_ypred=False): plt.figure(figsize=(10, 8)) plt.imshow(X.T, cmap='cubehelix', aspect='auto') cs = {c: i for i, c in enumerate(classes)} classes = list(classes) for c in set(y) - set(cs): cs[c] = len(cs) classes.append(c) plt.plot(np.array([cs[yi] for yi in y]), c='r') if show_ypred: plt.scatter(np.arange(len(X)), np.argmax(X, axis=1), c='white', s=5, alpha=0.2) ic = range(len(classes)) plt.yticks(ic, [classes[i] for i in ic]) pltsave(f'{plot_dir}/{prefix}emissions.png') os.makedirs(f'{plot_dir}/npzs', exist_ok=True) np.savez( f'{plot_dir}/npzs/{prefix}emissions.npz', predictions=X, ground_truth=y, video_ids=video_ids, classes=classes) def cm_plot(plot_dir, y_test, y_pred, classes, prefix=''): # classes = np.unique(y_test) if classes is None else classes cm = confusion_matrix(y_test, y_pred, labels=classes, normalize='true')*100 # Plot and save the confusion matrix plt.figure(figsize=(10, 8)) sns.heatmap(cm, annot=True, fmt='.0f', cmap='magma', cbar=False, square=True, xticklabels=classes, yticklabels=classes) plt.xlabel('Predicted') plt.ylabel('Actual') plt.title(f'Confusion Matrix') pltsave(f'{plot_dir}/{prefix}confusion_matrix.png') def n_videos_metrics(plot_dir, all_metrics, prefix=''): # Plot accuracy and F1-score vs. the number of videos plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(all_metrics.n_videos, all_metrics.accuracy, marker='o') plt.title('Accuracy vs. Number of Videos') plt.xlabel('Number of Videos') plt.ylabel('Accuracy') plt.subplot(1, 2, 2) plt.plot(all_metrics.n_videos, all_metrics.f1, marker='o', color='orange') plt.title('F1 Score vs. Number of Videos') plt.xlabel('Number of Videos') plt.ylabel('F1 Score') plt.tight_layout() pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_videos.png') def cross_model_metrics(plot_dir, all_metrics, prefix=''): # Plot accuracy and F1-score vs. the number of videos plt.figure(figsize=(15, 6)) plt.subplot(1, 2, 1) for name, mdf in all_metrics[all_metrics.smoothing == 'ma'].groupby("run_name"): plt.plot(mdf.win_size, mdf.f1, label=name) plt.legend() plt.title('F1 Score vs. Number of Videos') plt.xlabel('Moving Average Window Size') plt.ylabel('F1 Score') plt.tight_layout() plt.subplot(1, 2, 2) for name, mdf in all_metrics[all_metrics.smoothing == 'ema'].groupby("run_name"): plt.plot(mdf.alpha, mdf.f1, label=name) plt.legend() plt.title('F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]') plt.xlabel('Exp Moving Average alpha') plt.ylabel('F1 Score') plt.tight_layout() pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_smooth.png') # def n_videos_class_metrics(plot_dir, all_metrics, prefix=''): # # Plot accuracy and F1-score vs. the number of videos # plt.figure(figsize=(12, 5)) # plt.subplot(1, 2, 1) # for c, df in all_metrics.groupby('label'): # # cc = df.class_count.mean() # plt.plot(df.n_videos, df.accuracy, marker='o', label=c)#f'{c} {cc:.0f}' # plt.legend() # plt.title('Accuracy vs. Number of Videos') # plt.xlabel('Number of Videos') # plt.ylabel('Accuracy') # plt.subplot(1, 2, 2) # for c, df in all_metrics.groupby('label'): # # cc = df.class_count.mean() # plt.plot(df.n_videos, df.accuracy, marker='o', label=c)#f'{c} {cc:.0f}' # plt.title('F1 Score vs. Number of Videos') # plt.xlabel('Number of Videos') # plt.ylabel('F1 Score') # plt.legend() # plt.tight_layout() # pltsave(f'{plot_dir}/{prefix}accuracy_f1_vs_videos_per_class.png') def pltsave(fname): os.makedirs(os.path.dirname(fname) or '.', exist_ok=True) plt.savefig(fname) plt.close() # ---------------------------------------------------------------------------- # # HMM # # ---------------------------------------------------------------------------- # # def create_hmm(num_states, p_self=0.9): # transition_matrix = np.eye(num_states) * p_self + (1.0 - p_self) / (num_states - 1) # emission_matrix = np.eye(num_states) # initial_prob = np.ones(num_states) / num_states # return initial_prob, emission_matrix, transition_matrix # Forward pass to compute the forward probabilities def hmm_forward(sequence, num_states, p_self=0.9): transition_matrix = np.eye(num_states) * p_self + (1.0 - p_self) / (num_states - 1) forward_prob = np.zeros((len(sequence), num_states)) forward_prob[0, :] = 1 / num_states for t in range(1, len(sequence)): for j in range(num_states): forward_prob[t] = np.sum(forward_prob[t - 1, i] * transition_matrix[i, j] for i in range(num_states)) forward_prob[t] *= 1.0 / np.sum(forward_prob[t]) return forward_prob def moving_average(a, n=3, axis=0): ret = np.cumsum(a, dtype=float, axis=axis) ret[n:] = (ret[n:] - ret[:-n]) / n ret[:n] = ret[:n] / np.arange(n)[:, None] return ret def exponentially_decaying_average(a, decay_rate): assert 0 < decay_rate < 1, "Decay rate must be between 0 and 1." result = a.copy() result[0, :] = a[0, :] for t in range(1, a.shape[0]): result[t, :] = decay_rate * result[t - 1, :] + (1 - decay_rate) * a[t, :] return result # ---------------------------------------------------------------------------- # # Training Meta Loop # # ---------------------------------------------------------------------------- # def get_data(cfg, STATE, full_split, emb_type='clip'): emb_dirs = cfg.EVAL.EMBEDDING_DIRS or [os.path.join(cfg.DATASET.ROOT, 'embeddings-all', cfg.EVAL.DETECTION_NAME)] ydf = load_data_from_db(cfg, state_col='mod_state') db_train_split = ydf.video_id.unique().tolist() ydf = pd.concat([ *[ load_data(cfg, f'{d}/{cfg.EVAL.DETECTION_NAME}/*/{emb_type}/*.npz', include=set(full_split) - set(ydf.video_id.unique())) for d in emb_dirs ], ydf ]) print(ydf.groupby('object').state.value_counts()) # sampling 12k per state ydf = sample_random(ydf, STATE, 15000) print(ydf.groupby('object').state.value_counts()) print('Nulls:', ydf[pd.isna(ydf.state)].video_id.value_counts()) assert None not in set(ydf.state) return ydf, db_train_split def sample_random(df, STATE, n): df = df.groupby(STATE, group_keys=False).apply(lambda x: x.sample(min(len(x), n))) return df def get_models(cfg): return [ # (KNeighborsClassifier, 'knn5', {'n_neighbors': 5}), # (KNeighborsClassifier, 'knn11-50', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 50)), # (KNeighborsClassifier, 'knn11-100', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 100)), (KNeighborsClassifier, 'knn5-2000', {'n_neighbors': 5}, lambda df: sample_random(df, STATE, 2000)), (KNeighborsClassifier, 'knn21-2000', {'n_neighbors': 21}, lambda df: sample_random(df, STATE, 2000)), (KNeighborsClassifier, 'knn11-1000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 1000)), (KNeighborsClassifier, 'knn11-2000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 2000)), (KNeighborsClassifier, 'knn11-5000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 5000)), (KNeighborsClassifier, 'knn11-12000', {'n_neighbors': 11}, lambda df: sample_random(df, STATE, 12000)), # (KNeighborsClassifier, 'knn50', {'n_neighbors': 50}), (LogisticRegression, 'logreg', {}, lambda df: sample_random(df, STATE, 5000)), (RandomForestClassifier, 'rf', {}, lambda df: sample_random(df, STATE, 5000)), # ( # make_pipeline( # StandardScaler(), # KBinsDiscretizer(encode="onehot", random_state=0), # LogisticRegression(random_state=0), # ), # 'kbins_logreg', # { # "kbinsdiscretizer__n_bins": np.arange(5, 8), # "logisticregression__C": np.logspace(-1, 1, 3), # }, # ), ] def prepare_data(odf, STATE, sampler, train_split, val_split): video_ids = odf['video_id'].values unique_video_ids = np.unique(video_ids) # obj_train_base_split = [f for f in train_base_split if f in video_ids and f not in val_split] obj_train_split = [f for f in train_split if f in video_ids and f not in val_split] obj_val_split = [f for f in val_split if f in video_ids] # embed() # obj_train_split = sorted(obj_train_split, key=lambda v: -len(odf[odf.video_id == v].state.unique())) # print("Base Training split:", obj_train_base_split) print("Training split:", obj_train_split) print("Validation split:", obj_val_split) print("Unused videos:", set(unique_video_ids) - set(obj_train_split+obj_val_split)) print("Missing videos:", set(obj_train_split+obj_val_split) - set(unique_video_ids)) i_train = np.isin(video_ids, obj_train_split) # i_train = np.isin(video_ids, obj_train_base_split + obj_train_split[:nvids]) i_val = np.isin(video_ids, obj_val_split) odfo=odf if sampler is not None: odf_train = sampler(odf.iloc[i_train]) odf_val = odf.iloc[i_val] i_train = np.arange(len(odf_train)) i_val = np.arange(len(odf_val)) + i_train.max()+1 odf = pd.concat([odf_train, odf_val]) video_ids = odf['video_id'].values i_train = np.isin(video_ids, obj_train_split) i_val = np.isin(video_ids, obj_val_split) X = np.array(list(odf['vector'].values)) y = odf[STATE].values print() print("all data:") print('X', X.shape) print('y', y.shape) print(odf[['video_id', 'track_id', STATE]].value_counts()) # embed() return X, y, video_ids, i_train, i_val import ipdb @ipdb.iex def run(config_name): cfg = get_cfg(config_name) root_plot_dir = root_plot_dir_ = cfg.EVAL.PLOT_DIR or 'plots' # i=0 # while os.path.isdir(root_plot_dir_): # root_plot_dir_ = root_plot_dir + f'_{i}' # i+=1 # root_plot_dir=root_plot_dir_ if os.path.isdir(root_plot_dir): raise RuntimeError(f"{root_plot_dir} exists") os.makedirs(root_plot_dir, exist_ok=True) # STATE = 'super_simple_state' # STATE = 'state' train_split = read_split_file(cfg.EVAL.TRAIN_CSV) train_base_split = read_split_file(cfg.EVAL.TRAIN_BASE_CSV) val_splits = [(f, read_split_file(f)) for f in cfg.EVAL.VAL_CSVS] print(len(train_base_split), train_base_split[:5]) print(len(train_split), train_split[:5]) print(len(val_splits), val_splits[:5]) full_train_split = train_split + train_base_split full_val_split = [x for f, xs in val_splits for x in xs] full_split = full_train_split + full_val_split print(full_split) for _,val_split in val_splits: assert not set(full_train_split) & set(val_split), f"what are you doing silly {set(full_train_split) & set(val_split)}" models = get_models(cfg) cfg.EVAL.EMBEDDING_TYPES=['clip'] for emb_type in tqdm.tqdm(cfg.EVAL.EMBEDDING_TYPES, desc='embedding type'): ydf, db_train_split = get_data(cfg, STATE, full_split) emb_plot(f'{root_plot_dir}/{emb_type}', np.array(list(ydf['vector'].values)), ydf['object'].values, 'object') # emb_plot(f'{root_plot_dir}/{emb_type}', np.array(list(ydf['vector'].values)), ydf[STATE].values, 'states') for (val_split_fname, val_split) in val_splits: val_split_name = val_split_fname.split('/')[-1].removesuffix('.txt') for object_name, odf in ydf.groupby('object'): plot_dir = f'{root_plot_dir}/{val_split_name}/{emb_type}/{object_name}' os.makedirs(plot_dir, exist_ok=True) all_metrics = [] all_per_class_metrics = [] for cls, name, kw, sampler in tqdm.tqdm(models, desc='models'): X, y, video_ids, i_train, i_val = prepare_data(odf, STATE, sampler, db_train_split, val_split) emb_plot(f'{root_plot_dir}/{emb_type}_{object_name}', X, y, 'states') if not i_val.sum(): print(f"\n\n\n\nSKIPPING i_val is empty. {val_split}\n\n\n") continue print(f"Training with: train size: {len(i_train)} val size: {len(i_val)}") print("Train Counts:") train_counts = show_counts(y[i_train]) print(train_counts) print("Val Counts:") val_counts = show_counts(y[i_val]) print(val_counts) model = cls(**kw) metrics, per_class_metrics = train_eval( f'{val_split_name}_{emb_type}_{name}', model, X, y, i_train, i_val, video_ids=video_ids, plot_dir=plot_dir, model_name=name, # n_videos=nvids, **kw) all_metrics.extend(metrics) all_per_class_metrics.extend(per_class_metrics) all_metrics_df = pd.DataFrame(all_metrics) all_per_class_metrics_df = pd.DataFrame(all_per_class_metrics) all_metrics_df.to_csv(f'{plot_dir}/metrics.csv') all_per_class_metrics_df.to_csv(f'{plot_dir}/class_metrics.csv') # ---------- Show how it performs as a function of number of videos ---------- # if len(all_metrics): cross_model_metrics(plot_dir, all_metrics_df, f'{emb_type}_') # n_videos_class_metrics(plot_dir, all_per_class_metrics_df, f'{emb_type}_') def show_counts(y): yu, counts = np.unique(y, return_counts=True) for yui, c in zip(yu, counts): print(yui, c) return dict(zip(yu, counts)) import ipdb @ipdb.iex def show_data(config_name, emb_type='clip'): cfg = get_cfg(config_name) emb_dir = os.path.join(cfg.DATASET.ROOT, 'embeddings1', cfg.EVAL.DETECTION_NAME) emb_types = cfg.EVAL.EMBEDDING_TYPES data_file_pattern = f'{emb_dir}/*/{emb_type}/*.npz' # dfs = load_object_annotations(cfg) # # for vid, odfs in dfs.items(): # # print(vid) # # print(set(odfs)) # # input() # for vid, odfs in dfs.items(): # print(vid) # print({k: odfs[k].shape for k in odfs}) # for vid, odfs in dfs.items(): # print(vid) # for k in odfs: # print(k) # print(odfs[k]) # embed() # X, y, video_ids, class_map = load_data(cfg, data_file_pattern, use_aug=False) # df = pd.DataFrame({'vids': video_ids, 'y': y}) # df['label'] = df.y.apply(lambda y: class_map[y]) # for v, rows in df.groupby('vids'): # print(v) # print(rows.label.value_counts()) df = load_data(cfg, data_file_pattern, use_aug=False) for object_name, odf in df.groupby('object'): print(object_name) print(odf[['state']].value_counts()) # print(odf[['state', 'video_id']].value_counts()) x = odf[['state', 'video_id']].value_counts().unstack().fillna(0) print(x) x.to_csv(f"{object_name}_video_counts.csv") # embed() if __name__ == '__main__': import fire fire.Fire()
[ "lancedb.connect" ]
[((938, 967), 'warnings.simplefilter', 'warnings.simplefilter', (['"""once"""'], {}), "('once')\n", (959, 967), False, 'import warnings\n'), ((2673, 2701), 'glob.glob', 'glob.glob', (['data_file_pattern'], {}), '(data_file_pattern)\n', (2682, 2701), False, 'import glob\n'), ((3011, 3048), 'tqdm.tqdm', 'tqdm.tqdm', (['fs'], {'desc': '"""loading data..."""'}), "(fs, desc='loading data...')\n", (3020, 3048), False, 'import tqdm\n'), ((4817, 4848), 'numpy.concatenate', 'np.concatenate', (['embeddings_list'], {}), '(embeddings_list)\n', (4831, 4848), True, 'import numpy as np\n'), ((4858, 4876), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4867, 4876), True, 'import pandas as pd\n'), ((5081, 5134), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', 'f"""{emb_type}.lancedb"""'], {}), "(cfg.DATASET.ROOT, f'{emb_type}.lancedb')\n", (5093, 5134), False, 'import os\n'), ((7191, 7207), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7205, 7207), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7223, 7271), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('scaler', scaler), ('model', model)]"], {}), "([('scaler', scaler), ('model', model)])\n", (7231, 7271), False, 'from sklearn.pipeline import Pipeline\n'), ((8186, 8256), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""Accuracy for {run_name}: {metrics[\'accuracy\']:.2f}"""'], {}), '(f"Accuracy for {run_name}: {metrics[\'accuracy\']:.2f}")\n', (8201, 8256), False, 'import tqdm\n'), ((10331, 10343), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (10340, 10343), True, 'import numpy as np\n'), ((10652, 10742), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'zero_division': 'np.nan', 'average': '"""macro"""'}), "(y_test, y_pred, zero_division=np.nan,\n average='macro')\n", (10683, 10742), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((11250, 11271), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (11264, 11271), False, 'import os\n'), ((11406, 11414), 'sklearn.manifold.Isomap', 'Isomap', ([], {}), '()\n', (11412, 11414), False, 'from sklearn.manifold import TSNE, Isomap\n'), ((11539, 11566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (11549, 11566), True, 'import matplotlib.pyplot as plt\n'), ((11580, 11592), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (11589, 11592), True, 'import numpy as np\n'), ((11673, 11685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11683, 11685), True, 'import matplotlib.pyplot as plt\n'), ((11690, 11734), 'matplotlib.pyplot.title', 'plt.title', (['f"""Embedding Projection: {prefix}"""'], {}), "(f'Embedding Projection: {prefix}')\n", (11699, 11734), True, 'import matplotlib.pyplot as plt\n'), ((11849, 11876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (11859, 11876), True, 'import matplotlib.pyplot as plt\n'), ((11881, 11929), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X.T'], {'cmap': '"""cubehelix"""', 'aspect': '"""auto"""'}), "(X.T, cmap='cubehelix', aspect='auto')\n", (11891, 11929), True, 'import matplotlib.pyplot as plt\n'), ((12278, 12318), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ic', '[classes[i] for i in ic]'], {}), '(ic, [classes[i] for i in ic])\n', (12288, 12318), True, 'import matplotlib.pyplot as plt\n'), ((12372, 12418), 'os.makedirs', 'os.makedirs', (['f"""{plot_dir}/npzs"""'], {'exist_ok': '(True)'}), "(f'{plot_dir}/npzs', exist_ok=True)\n", (12383, 12418), False, 'import os\n'), ((12423, 12546), 'numpy.savez', 'np.savez', (['f"""{plot_dir}/npzs/{prefix}emissions.npz"""'], {'predictions': 'X', 'ground_truth': 'y', 'video_ids': 'video_ids', 'classes': 'classes'}), "(f'{plot_dir}/npzs/{prefix}emissions.npz', predictions=X,\n ground_truth=y, video_ids=video_ids, classes=classes)\n", (12431, 12546), True, 'import numpy as np\n'), ((12822, 12849), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (12832, 12849), True, 'import matplotlib.pyplot as plt\n'), ((12854, 12978), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".0f"""', 'cmap': '"""magma"""', 'cbar': '(False)', 'square': '(True)', 'xticklabels': 'classes', 'yticklabels': 'classes'}), "(cm, annot=True, fmt='.0f', cmap='magma', cbar=False, square=\n True, xticklabels=classes, yticklabels=classes)\n", (12865, 12978), True, 'import seaborn as sns\n'), ((12994, 13017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (13004, 13017), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {}), "('Actual')\n", (13032, 13042), True, 'import matplotlib.pyplot as plt\n'), ((13047, 13077), 'matplotlib.pyplot.title', 'plt.title', (['f"""Confusion Matrix"""'], {}), "(f'Confusion Matrix')\n", (13056, 13077), True, 'import matplotlib.pyplot as plt\n'), ((13254, 13281), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (13264, 13281), True, 'import matplotlib.pyplot as plt\n'), ((13286, 13306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13297, 13306), True, 'import matplotlib.pyplot as plt\n'), ((13311, 13375), 'matplotlib.pyplot.plot', 'plt.plot', (['all_metrics.n_videos', 'all_metrics.accuracy'], {'marker': '"""o"""'}), "(all_metrics.n_videos, all_metrics.accuracy, marker='o')\n", (13319, 13375), True, 'import matplotlib.pyplot as plt\n'), ((13380, 13422), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy vs. Number of Videos"""'], {}), "('Accuracy vs. Number of Videos')\n", (13389, 13422), True, 'import matplotlib.pyplot as plt\n'), ((13427, 13457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Videos"""'], {}), "('Number of Videos')\n", (13437, 13457), True, 'import matplotlib.pyplot as plt\n'), ((13462, 13484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (13472, 13484), True, 'import matplotlib.pyplot as plt\n'), ((13490, 13510), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (13501, 13510), True, 'import matplotlib.pyplot as plt\n'), ((13515, 13589), 'matplotlib.pyplot.plot', 'plt.plot', (['all_metrics.n_videos', 'all_metrics.f1'], {'marker': '"""o"""', 'color': '"""orange"""'}), "(all_metrics.n_videos, all_metrics.f1, marker='o', color='orange')\n", (13523, 13589), True, 'import matplotlib.pyplot as plt\n'), ((13594, 13636), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. Number of Videos"""'], {}), "('F1 Score vs. Number of Videos')\n", (13603, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13641, 13671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Videos"""'], {}), "('Number of Videos')\n", (13651, 13671), True, 'import matplotlib.pyplot as plt\n'), ((13676, 13698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (13686, 13698), True, 'import matplotlib.pyplot as plt\n'), ((13703, 13721), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13719, 13721), True, 'import matplotlib.pyplot as plt\n'), ((13906, 13933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (13916, 13933), True, 'import matplotlib.pyplot as plt\n'), ((13938, 13958), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13949, 13958), True, 'import matplotlib.pyplot as plt\n'), ((14099, 14111), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14109, 14111), True, 'import matplotlib.pyplot as plt\n'), ((14117, 14159), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. Number of Videos"""'], {}), "('F1 Score vs. Number of Videos')\n", (14126, 14159), True, 'import matplotlib.pyplot as plt\n'), ((14164, 14204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Moving Average Window Size"""'], {}), "('Moving Average Window Size')\n", (14174, 14204), True, 'import matplotlib.pyplot as plt\n'), ((14209, 14231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (14219, 14231), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14254), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14252, 14254), True, 'import matplotlib.pyplot as plt\n'), ((14260, 14280), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (14271, 14280), True, 'import matplotlib.pyplot as plt\n'), ((14419, 14431), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14429, 14431), True, 'import matplotlib.pyplot as plt\n'), ((14437, 14502), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]"""'], {}), "('F1 Score vs. EMA alpha * x[t] + (1 - alpha) * x[t-1]')\n", (14446, 14502), True, 'import matplotlib.pyplot as plt\n'), ((14507, 14545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exp Moving Average alpha"""'], {}), "('Exp Moving Average alpha')\n", (14517, 14545), True, 'import matplotlib.pyplot as plt\n'), ((14550, 14572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (14560, 14572), True, 'import matplotlib.pyplot as plt\n'), ((14577, 14595), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14593, 14595), True, 'import matplotlib.pyplot as plt\n'), ((15666, 15684), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (15677, 15684), True, 'import matplotlib.pyplot as plt\n'), ((15689, 15700), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15698, 15700), True, 'import matplotlib.pyplot as plt\n'), ((16849, 16885), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float', 'axis': 'axis'}), '(a, dtype=float, axis=axis)\n', (16858, 16885), True, 'import numpy as np\n'), ((20276, 20296), 'numpy.unique', 'np.unique', (['video_ids'], {}), '(video_ids)\n', (20285, 20296), True, 'import numpy as np\n'), ((21011, 21046), 'numpy.isin', 'np.isin', (['video_ids', 'obj_train_split'], {}), '(video_ids, obj_train_split)\n', (21018, 21046), True, 'import numpy as np\n'), ((21142, 21175), 'numpy.isin', 'np.isin', (['video_ids', 'obj_val_split'], {}), '(video_ids, obj_val_split)\n', (21149, 21175), True, 'import numpy as np\n'), ((21502, 21537), 'numpy.isin', 'np.isin', (['video_ids', 'obj_train_split'], {}), '(video_ids, obj_train_split)\n', (21509, 21537), True, 'import numpy as np\n'), ((21550, 21583), 'numpy.isin', 'np.isin', (['video_ids', 'obj_val_split'], {}), '(video_ids, obj_val_split)\n', (21557, 21583), True, 'import numpy as np\n'), ((22162, 22190), 'os.path.isdir', 'os.path.isdir', (['root_plot_dir'], {}), '(root_plot_dir)\n', (22175, 22190), False, 'import os\n'), ((22250, 22291), 'os.makedirs', 'os.makedirs', (['root_plot_dir'], {'exist_ok': '(True)'}), '(root_plot_dir, exist_ok=True)\n', (22261, 22291), False, 'import os\n'), ((23125, 23183), 'tqdm.tqdm', 'tqdm.tqdm', (['cfg.EVAL.EMBEDDING_TYPES'], {'desc': '"""embedding type"""'}), "(cfg.EVAL.EMBEDDING_TYPES, desc='embedding type')\n", (23134, 23183), False, 'import tqdm\n'), ((25907, 25939), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (25916, 25939), True, 'import numpy as np\n'), ((26143, 26213), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', '"""embeddings1"""', 'cfg.EVAL.DETECTION_NAME'], {}), "(cfg.DATASET.ROOT, 'embeddings1', cfg.EVAL.DETECTION_NAME)\n", (26155, 26213), False, 'import os\n'), ((27455, 27466), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (27464, 27466), False, 'import fire\n'), ((3237, 3247), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3244, 3247), True, 'import numpy as np\n'), ((4220, 4269), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""Using: {video_id}: {track_id}"""'], {}), "(f'Using: {video_id}: {track_id}')\n", (4235, 4269), False, 'import tqdm\n'), ((4801, 4808), 'IPython.embed', 'embed', ([], {}), '()\n', (4806, 4808), False, 'from IPython import embed\n'), ((5153, 5170), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (5167, 5170), False, 'import os\n'), ((5252, 5275), 'os.path.isdir', 'os.path.isdir', (['db_fname'], {}), '(db_fname)\n', (5265, 5275), False, 'import os\n'), ((5289, 5314), 'lancedb.connect', 'lancedb.connect', (['db_fname'], {}), '(db_fname)\n', (5304, 5314), False, 'import lancedb\n'), ((5443, 5457), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (5452, 5457), True, 'import pandas as pd\n'), ((5470, 5499), 'pandas.DataFrame', 'pd.DataFrame', (['{state_col: []}'], {}), '({state_col: []})\n', (5482, 5499), True, 'import pandas as pd\n'), ((7860, 7884), 'joblib.dump', 'joblib.dump', (['pipeline', 'f'], {}), '(pipeline, f)\n', (7871, 7884), False, 'import joblib\n'), ((10772, 10802), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10786, 10802), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((12099, 12129), 'numpy.array', 'np.array', (['[cs[yi] for yi in y]'], {}), '([cs[yi] for yi in y])\n', (12107, 12129), True, 'import numpy as np\n'), ((12706, 12772), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {'labels': 'classes', 'normalize': '"""true"""'}), "(y_test, y_pred, labels=classes, normalize='true')\n", (12722, 12772), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support\n'), ((14052, 14094), 'matplotlib.pyplot.plot', 'plt.plot', (['mdf.win_size', 'mdf.f1'], {'label': 'name'}), '(mdf.win_size, mdf.f1, label=name)\n', (14060, 14094), True, 'import matplotlib.pyplot as plt\n'), ((14375, 14414), 'matplotlib.pyplot.plot', 'plt.plot', (['mdf.alpha', 'mdf.f1'], {'label': 'name'}), '(mdf.alpha, mdf.f1, label=name)\n', (14383, 14414), True, 'import matplotlib.pyplot as plt\n'), ((21416, 21447), 'pandas.concat', 'pd.concat', (['[odf_train, odf_val]'], {}), '([odf_train, odf_val])\n', (21425, 21447), True, 'import pandas as pd\n'), ((3286, 3327), 'numpy.linalg.norm', 'np.linalg.norm', (['z'], {'axis': '(-1)', 'keepdims': '(True)'}), '(z, axis=-1, keepdims=True)\n', (3300, 3327), True, 'import numpy as np\n'), ((3804, 3830), 'os.path.splitext', 'os.path.splitext', (['video_id'], {}), '(video_id)\n', (3820, 3830), False, 'import os\n'), ((4121, 4190), 'tqdm.tqdm.write', 'tqdm.tqdm.write', (['f"""{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}"""'], {}), "(f'{bc.FAIL}Skipping{bc.END}: {video_id}: {track_id}')\n", (4136, 4190), False, 'import tqdm\n'), ((4462, 4586), 'pandas.DataFrame', 'pd.DataFrame', (["{'index': frame_idx, 'object': ann.object, 'state': ann.state, 'track_id':\n track_id, 'video_id': video_id}"], {}), "({'index': frame_idx, 'object': ann.object, 'state': ann.state,\n 'track_id': track_id, 'video_id': video_id})\n", (4474, 4586), True, 'import pandas as pd\n'), ((7788, 7838), 'os.path.join', 'os.path.join', (['plot_dir', 'f"""{run_name}_pipeline.pkl"""'], {}), "(plot_dir, f'{run_name}_pipeline.pkl')\n", (7800, 7838), False, 'import os\n'), ((8755, 8781), 'numpy.asarray', 'np.asarray', (['model.classes_'], {}), '(model.classes_)\n', (8765, 8781), True, 'import numpy as np\n'), ((8782, 8803), 'numpy.argmax', 'np.argmax', (['y_'], {'axis': '(1)'}), '(y_, axis=1)\n', (8791, 8803), True, 'import numpy as np\n'), ((9474, 9500), 'numpy.asarray', 'np.asarray', (['model.classes_'], {}), '(model.classes_)\n', (9484, 9500), True, 'import numpy as np\n'), ((9501, 9522), 'numpy.argmax', 'np.argmax', (['y_'], {'axis': '(1)'}), '(y_, axis=1)\n', (9510, 9522), True, 'import numpy as np\n'), ((12196, 12216), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (12205, 12216), True, 'import numpy as np\n'), ((15616, 15638), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (15631, 15638), False, 'import os\n'), ((16366, 16384), 'numpy.eye', 'np.eye', (['num_states'], {}), '(num_states)\n', (16372, 16384), True, 'import numpy as np\n'), ((16949, 16961), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (16958, 16961), True, 'import numpy as np\n'), ((17638, 17711), 'os.path.join', 'os.path.join', (['cfg.DATASET.ROOT', '"""embeddings-all"""', 'cfg.EVAL.DETECTION_NAME'], {}), "(cfg.DATASET.ROOT, 'embeddings-all', cfg.EVAL.DETECTION_NAME)\n", (17650, 17711), False, 'import os\n'), ((16752, 16775), 'numpy.sum', 'np.sum', (['forward_prob[t]'], {}), '(forward_prob[t])\n', (16758, 16775), True, 'import numpy as np\n'), ((23784, 23820), 'os.makedirs', 'os.makedirs', (['plot_dir'], {'exist_ok': '(True)'}), '(plot_dir, exist_ok=True)\n', (23795, 23820), False, 'import os\n'), ((23945, 23977), 'tqdm.tqdm', 'tqdm.tqdm', (['models'], {'desc': '"""models"""'}), "(models, desc='models')\n", (23954, 23977), False, 'import tqdm\n'), ((25301, 25326), 'pandas.DataFrame', 'pd.DataFrame', (['all_metrics'], {}), '(all_metrics)\n', (25313, 25326), True, 'import pandas as pd\n'), ((25370, 25405), 'pandas.DataFrame', 'pd.DataFrame', (['all_per_class_metrics'], {}), '(all_per_class_metrics)\n', (25382, 25405), True, 'import pandas as pd\n'), ((18253, 18271), 'pandas.isna', 'pd.isna', (['ydf.state'], {}), '(ydf.state)\n', (18260, 18271), True, 'import pandas as pd\n')]
"""LanceDB vector store.""" from typing import Any, List, Optional from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode from llama_index.vector_stores.types import ( MetadataFilters, NodeWithEmbedding, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import node_to_metadata_dict def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.filters: if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) class LanceDBVectorStore(VectorStore): """The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", nprobes: int = 20, refine_factor: Optional[int] = None, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb # noqa: F401 except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.nprobes = nprobes self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return None def add( self, embedding_results: List[NodeWithEmbedding], ) -> List[str]: data = [] ids = [] for result in embedding_results: metadata = node_to_metadata_dict( result.node, remove_text=True, flat_metadata=self.flat_metadata ) append_data = { "id": result.id, "doc_id": result.ref_doc_id, "vector": result.embedding, "text": result.node.get_content(metadata_mode=MetadataMode.NONE), } append_data.update(metadata) data.append(append_data) ids.append(result.id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search(query.query_embedding) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_df() nodes = [] for _, item in results.iterrows(): node = TextNode( text=item.text, id_=item.id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo(node_id=item.doc_id), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=results["score"].tolist(), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((2271, 2291), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2286, 2291), False, 'import lancedb\n'), ((2716, 2807), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['result.node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(result.node, remove_text=True, flat_metadata=self.\n flat_metadata)\n', (2737, 2807), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((5023, 5059), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item.doc_id'}), '(node_id=item.doc_id)\n', (5038, 5059), False, 'from llama_index.schema import MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]