Spaces:
Running
Running
| """ | |
| Configuration file for the Ling Spaces application. | |
| This file centralizes all the configuration variables, such as API endpoints, | |
| API keys, and system prompts for different functionalities. | |
| """ | |
| import os | |
| from dotenv import load_dotenv | |
| # Load environment variables from .secrets file | |
| load_dotenv(dotenv_path='.secrets') | |
| # --- API Configuration --- | |
| # API endpoint for OpenAI compatible services | |
| OPEN_AI_ENTRYPOINT = os.getenv("OPEN_AI_ENTRYPOINT") or "https://api.openai.com/v1" | |
| # API key for OpenAI compatible services | |
| OPEN_AI_KEY = os.getenv("OPEN_AI_KEY") | |
| # Brand name of the OpenAI compatible provider | |
| OPEN_AI_PROVIDER = os.getenv("OPEN_AI_PROVIDER") or "OpenAI Compatible API" | |
| # Fallback/warning for API keys | |
| if not OPEN_AI_KEY: | |
| print("โ ๏ธ Warning: OPEN_AI_KEY is not set. Remote models may not function correctly.") | |
| if not OPEN_AI_ENTRYPOINT: | |
| print("โ ๏ธ Warning: OPEN_AI_ENTRYPOINT is not set. Using default: https://api.openai.com/v1") | |
| # --- Model Specifications --- | |
| # Constants for easy referencing of models | |
| LING_MINI_2_0 = "ling-mini-2.0" | |
| LING_1T = "ling-1t" | |
| LING_FLASH_2_0 = "ling-flash-2.0" | |
| RING_1T = "ring-1t" | |
| RING_FLASH_2_0 = "ring-flash-2.0" | |
| RING_MINI_2_0 = "ring-mini-2.0" | |
| CHAT_MODEL_SPECS = { | |
| LING_MINI_2_0: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ling-mini-2.0", | |
| "display_name": "๐ฆ Ling-mini-2.0", | |
| "description": "A lightweight conversational model optimized for efficient operation on consumer-grade hardware, ideal for mobile or localized deployment scenarios.", | |
| "url": "https://huggingface.co/inclusionai" | |
| }, | |
| LING_1T: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ling-1t", | |
| "display_name": "๐ฆ Ling-1T", | |
| "description": "A trillion-parameter large language model designed for complex natural language understanding and generation tasks that require extreme performance and high fluency.", | |
| "url": "https://huggingface.co/inclusionai" | |
| }, | |
| LING_FLASH_2_0: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ling-flash-2.0", | |
| "display_name": "๐ฆ Ling-flash-2.0", | |
| "description": "A high-performance billion-parameter model optimized for scenarios requiring high-speed response and complex instruction following.", | |
| "url": "https://huggingface.co/inclusionai" | |
| }, | |
| RING_1T: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ring-1t", | |
| "display_name": "๐๏ธ Ring-1T", | |
| "description": "A brand-new trillion-parameter reasoning model with powerful code generation and tool use capabilities.", | |
| "url": "https://huggingface.co/inclusionai" | |
| }, | |
| RING_FLASH_2_0: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ring-flash-2.0", | |
| "display_name": "๐๏ธ Ring-flash-2.0", | |
| "description": "A billion-parameter reasoning model that strikes a good balance between performance and cost, suitable for general-purpose tasks that require step-by-step thinking or code generation.", | |
| "url": "https://huggingface.co/inclusionai" | |
| }, | |
| RING_MINI_2_0: { | |
| "provider": "openai_compatible", | |
| "model_id": "inclusionai/ring-mini-2.0", | |
| "display_name": "๐๏ธ Ring-mini-2.0", | |
| "description": "A quantized and extremely efficient reasoning model designed for resource-constrained environments with strict speed and efficiency requirements (such as edge computing).", | |
| "url": "https://huggingface.co/inclusionai" | |
| } | |
| } | |
| # --- Code Framework Specifications --- | |
| # Constants for easy referencing of code frameworks | |
| STATIC_PAGE = "static_page" | |
| GRADIO_APP = "gradio_app" | |
| CODE_FRAMEWORK_SPECS = { | |
| STATIC_PAGE: { | |
| "display_name": "้ๆ้กต้ข", | |
| "description": "็ๆไธไธช็ฌ็ซ็ใๅๅบๅผ็ HTML ๆไปถ๏ผๅ ๅซๆๆๅฟ ่ฆ็ CSS ๅ JavaScriptใ้ๅๅฟซ้ๅๅๅ็ฎๅ็็ฝ้กตๅฑ็คบใ" | |
| } | |
| } | |
| # --- Utility Functions --- | |
| _current_provider_name = OPEN_AI_PROVIDER | |
| def set_current_provider(provider_name: str): | |
| """Sets the current API provider name.""" | |
| global _current_provider_name | |
| _current_provider_name = provider_name | |
| def get_current_provider_name() -> str: | |
| """Returns the current API provider name.""" | |
| return _current_provider_name | |
| def get_model_id(model_constant: str) -> str: | |
| """ | |
| Retrieves the internal model ID for a given model constant. | |
| This is typically what's passed to the underlying API. | |
| """ | |
| return CHAT_MODEL_SPECS.get(model_constant, {}).get("model_id", model_constant) | |
| def get_model_display_name(model_constant: str) -> str: | |
| """ | |
| Retrieves the display name for a given model constant. | |
| This is what's shown in the UI. | |
| """ | |
| return CHAT_MODEL_SPECS.get(model_constant, {}).get("display_name", model_constant) | |