jbilcke-hf's picture
jbilcke-hf HF staff
update for oauth
3b81d2d
raw
history blame
4.17 kB
# Supported values:
# - VIDEOCHAIN
# - REPLICATE
# - INFERENCE_ENDPOINT
# - INFERENCE_API
# - OPENAI
RENDERING_ENGINE="INFERENCE_API"
# Supported values:
# - INFERENCE_ENDPOINT
# - INFERENCE_API
# - OPENAI
# - GROQ
# - ANTHROPIC
LLM_ENGINE="INFERENCE_API"
# set this to control the number of pages
MAX_NB_PAGES=6
# Set to "true" to create artificial delays and smooth out traffic
NEXT_PUBLIC_ENABLE_RATE_LIMITER="false"
# ------------- HUGGING FACE OAUTH -------------
ENABLE_HUGGING_FACE_OAUTH=
ENABLE_HUGGING_FACE_OAUTH_WALL=
HUGGING_FACE_OAUTH_CLIENT_ID=
# in production this should be the space's domain and/or URL
HUGGING_FACE_OAUTH_REDIRECT_URL=
# this one must be kept secret (and is unused for now)
HUGGING_FACE_OAUTH_SECRET=
# ------------- PROVIDER AUTH ------------
# You only need to configure the access token(s) for the provider(s) you want to use
# HuggingFace.co token: available for the LLM engine and the RENDERING engine
AUTH_HF_API_TOKEN=
# Replicate.com token: available for the RENDERING engine
AUTH_REPLICATE_API_TOKEN=
# OpenAI.dom token: available for the LLM engine and the RENDERING engine
AUTH_OPENAI_API_KEY=
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
AUTH_VIDEOCHAIN_API_TOKEN=
# Groq.com key: available for the LLM engine
AUTH_GROQ_API_KEY=
# Anthropic.com key: available for the LLM engine
AUTH_ANTHROPIC_API_KEY=
# ------------- RENDERING API CONFIG --------------
# If you decide to use Replicate for the RENDERING engine
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
# If you decide to use a private Hugging Face Inference Endpoint for the RENDERING engine
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
# If you decide to use a Hugging Face Inference API model for the RENDERING engine
RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
# If you decide to use a Hugging Face Inference API model for the RENDERING engine
RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
# If your model returns a different file type (eg. jpg or webp) change it here
RENDERING_HF_INFERENCE_API_FILE_TYPE="image/png"
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
RENDERING_OPENAI_API_BASE_URL="https://api.openai.com/v1"
RENDERING_OPENAI_API_MODEL="dall-e-3"
# ------------- LLM API CONFIG ----------------
LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
# If you decide to use OpenAI for the LLM engine
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
LLM_OPENAI_API_MODEL="gpt-4-turbo"
# If you decide to use Anthropic (eg. Claude) for the LLM engine
# https://docs.anthropic.com/claude/docs/models-overview
LLM_ANTHROPIC_API_MODEL="claude-3-opus-20240229"
# If you decide to use a private Hugging Face Inference Endpoint for the LLM engine
LLM_HF_INFERENCE_ENDPOINT_URL=""
# If you decide to use a Hugging Face Inference API model for the LLM engine
# LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
# ----------- COMMUNITY SHARING (OPTIONAL) -----------
# You don't need those community sharing options to run the AI Comic Factory
# locally or on your own server (they are meant to be used by the Hugging Face team)
NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING="false"
COMMUNITY_API_URL=
COMMUNITY_API_TOKEN=
COMMUNITY_API_ID=
# ----------- CENSORSHIP (OPTIONAL) -----------
# censorship is currently disabled, but will be required when we create a "community roll"
# (a public repositoruy of user-generated comic strips)
ENABLE_CENSORSHIP="false"
# Due to the sensitive nature of some of keywords we want to ban (users try all kind of crazy illegal things)
# the words are are not put in clear in the source code, but behind an encryption key
# (I don't want the project to be flagged by an AI robot police on GitHub or something)
SECRET_FINGERPRINT=""