Spaces:
Sleeping
Sleeping
File size: 1,502 Bytes
c4f4e53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
# cp .env.example .env
# Edit your .env file with your own values
# Don't commit your .env file to git/push to GitHub!
# Don't modify/delete .env.example unless adding extensions to the project
# which require new variable to be added to the .env file
# API CONFIG
# OPENAI_API_MODEL can be used instead
# Special values:
# human - use human as intermediary with custom LLMs
# llama - use llama.cpp with Llama, Alpaca, Vicuna, GPT4All, etc
LLM_MODEL=gpt-3.5-turbo # alternatively, gpt-4, text-davinci-003, etc
LLAMA_MODEL_PATH= # ex. models/llama-13B/ggml-model.bin
#LLAMA_THREADS_NUM=8 # Set the number of threads for llama (optional)
OPENAI_API_KEY=
OPENAI_TEMPERATURE=0.0
# STORE CONFIG
# TABLE_NAME can be used instead
RESULTS_STORE_NAME=baby-agi-test-table
# Weaviate config
# Uncomment and fill these to switch from local ChromaDB to Weaviate
# WEAVIATE_USE_EMBEDDED=true
# WEAVIATE_URL=
# WEAVIATE_API_KEY=
# Pinecone config
# Uncomment and fill these to switch from local ChromaDB to Pinecone
# PINECONE_API_KEY=
# PINECONE_ENVIRONMENT=
# COOPERATIVE MODE CONFIG
# BABY_NAME can be used instead
INSTANCE_NAME=BabyAGI
COOPERATIVE_MODE=none # local
# RUN CONFIG
OBJECTIVE=Solve world hunger
# For backwards compatibility
# FIRST_TASK can be used instead of INITIAL_TASK
INITIAL_TASK=Develop a task list
# Extensions
# List additional extension .env files to load (except .env.example!)
DOTENV_EXTENSIONS=
# Set to true to enable command line args support
ENABLE_COMMAND_LINE_ARGS=false
|