context-ai / configs /compute_rag_vector_index_meetings.yaml
chinmayjha's picture
Deploy complete Second Brain AI Assistant with custom UI
b27eb78
# Meeting Data RAG Configuration for Online App
# This file is for using meeting data from test_meetings collection
parameters:
extract_collection_name: test_meetings # Source: our BigQuery meeting data
fetch_limit: 100 # Process all meeting documents (we have ~100)
load_collection_name: rag # Destination: contextualized chunks
content_quality_score_threshold: 0.5 # Lower threshold for meetings
retriever_type: parent # Use parent retriever (performed better in our tests)
embedding_model_id: text-embedding-3-small
embedding_model_type: openai
embedding_model_dim: 1536
chunk_size: 640
mock: false
processing_batch_size: 8
processing_max_workers: 4
device: mps # Use MPS for Apple Silicon acceleration