Spaces:
Sleeping
Sleeping
File size: 3,192 Bytes
3ca0269 cdb6f86 e52146b 3ca0269 879455c e52146b 719ed9c ae2cf82 e52146b 879455c 3ca0269 879455c 3ca0269 879455c e52146b 879455c e52146b 879455c e52146b 879455c e52146b 879455c 9cfce0d 879455c 9cfce0d 879455c e52146b 879455c 3ca0269 879455c 46e8158 24b3b53 879455c 24b3b53 879455c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
# Supported values:
# - VIDEOCHAIN
# - REPLICATE
# - INFERENCE_ENDPOINT
# - INFERENCE_API
RENDERING_ENGINE="INFERENCE_API"
# Supported values:
# - INFERENCE_ENDPOINT
# - INFERENCE_API
# - OPENAI
LLM_ENGINE="INFERENCE_API"
# Not implemented for the Inference API yet - you can submit a PR if you have some ideas
NEXT_PUBLIC_CAN_UPSCALE="false"
# Not implemented for the Inference API yet - you can submit a PR if you have some ideas
NEXT_PUBLIC_CAN_REDRAW="false"
# Set to "true" to create artificial delays and smooth out traffic
NEXT_PUBLIC_ENABLE_RATE_LIMITER="false"
# ------------- PROVIDER AUTH ------------
# You only need to configure the access token(s) for the provider(s) you want to use
# HuggingFace.co token: available for the LLM engine and the RENDERING engine
AUTH_HF_API_TOKEN=
# Replicate.com token: available for the RENDERING engine
AUTH_REPLICATE_API_TOKEN=
# OpenAI.dom token: available for the LLM engine and the RENDERING engine
AUTH_OPENAI_TOKEN=
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
AUTH_VIDEOCHAIN_API_TOKEN=
# ------------- RENDERING API CONFIG --------------
# If you decided to use Replicate for the RENDERING engine
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
# If you decided to use a private Hugging Face Inference Endpoint for the RENDERING engine
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
RENDERING_HF_INFERENCE_API_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
# ------------- LLM API CONFIG ----------------
# If you decided to use OpenAI for the LLM engine
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
# If you decided to use a private Hugging Face Inference Endpoint for the LLM engine
LLM_HF_INFERENCE_ENDPOINT_URL=""
# If you decided to use a Hugging Face Inference API model for the LLM engine
LLM_HF_INFERENCE_API_MODEL="meta-llama/Llama-2-70b-chat-hf"
# ----------- COMMUNITY SHARING (OPTIONAL) -----------
# You don't need those community sharing options to run the AI Comic Factory
# locally or on your own server (they are meant to be used by the Hugging Face team)
NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING="false"
COMMUNITY_API_URL=
COMMUNITY_API_TOKEN=
COMMUNITY_API_ID=
# ----------- CENSORSHIP (OPTIONAL) -----------
# censorship is currently disabled, but will be required when we create a "community roll"
# (a public repositoruy of user-generated comic strips)
ENABLE_CENSORSHIP="false"
# Due to the sensitive nature of some of keywords we want to ban (users try all kind of crazy illegal things)
# the words are are not put in clear in the source code, but behind an encryption key
# (I don't want the project to be flagged by an AI robot police on GitHub or something)
SECRET_FINGERPRINT=""
|