backend / .env.example
Praneeth Yerrapragada
feat: repo setup
2636575
raw
history blame
No virus
938 Bytes
# The Llama Cloud API key.
LLAMA_CLOUD_API_KEY=
# The provider for the AI models to use.
MODEL_PROVIDER=openai
# The name of LLM model to use.
MODEL=gpt-3.5-turbo
# Name of the embedding model to use.
EMBEDDING_MODEL=text-embedding-3-large
# Dimension of the embedding model to use.
EMBEDDING_DIM=1024
# The OpenAI API key to use.
OPENAI_API_KEY=
# Temperature for sampling from the model.
# LLM_TEMPERATURE=
# Maximum number of tokens to generate.
# LLM_MAX_TOKENS=
# The number of similar embeddings to return when retrieving documents.
TOP_K=3
# Custom system prompt.
# Example:
# SYSTEM_PROMPT="You are a helpful assistant who helps users with their questions."
# SYSTEM_PROMPT=
# Configuration for Pinecone vector store
# The Pinecone API key.
# PINECONE_API_KEY=
# PINECONE_ENVIRONMENT=
# PINECONE_INDEX_NAME=
# The address to start the backend app.
APP_HOST=0.0.0.0
# The port to start the backend app.
APP_PORT=8000