ai-lab-comic / .env
jbilcke-hf's picture
jbilcke-hf HF staff
allow support for Inference API for SDXL
e52146b
raw
history blame
1.83 kB
# Supported values:
# - VIDEOCHAIN
# - REPLICATE
# - INFERENCE_ENDPOINT
# - INFERENCE_API
RENDERING_ENGINE="INFERENCE_API"
# Supported values:
# - INFERENCE_ENDPOINT
# - INFERENCE_API
LLM_ENGINE="INFERENCE_API"
# ------------- PROVIDER AUTH ------------
# Hugging Face token, if you plan to use the Inference API or Inference Endpoint
# for the LLM or SDXL generation
HF_API_TOKEN=
# Replicate token, if you wish to use them as a provider for SDXL
REPLICATE_API_TOKEN=
# OpenAI is not supported yet
OPENAI_TOKEN=
# VideoChain is a custom API used for SDXL but you don't need it for the base features
VIDEOCHAIN_API_TOKEN=
# ------------- RENDERING API CONFIG --------------
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
RENDERING_HF_INFERENCE_API_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
# ------------- LLM API CONFIG ----------------
# URL to a custom text-generation Inference Endpoint of your choice
# -> You can leave it empty if you decide to use an Inference API Model instead
LLM_HF_INFERENCE_ENDPOINT_URL=
# You can also use a model from the Inference API (not a custom inference endpoint)
# -> You can leave it empty if you decide to use an Inference Endpoint URL instead
LLM_HF_INFERENCE_API_MODEL="codellama/CodeLlama-7b-hf"
# ----------- COMMUNITY SHARING (OPTIONAL) -----------
NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING="false"
# You don't need those community sharing options to run the AI Comic Factory
# locally or on your own server (they are meant to be used by the Hugging Face team)
COMMUNITY_API_URL=
COMMUNITY_API_TOKEN=
COMMUNITY_API_ID=