Spaces:
Running
Running
add kimi k2 thinking
Browse files
README.md
CHANGED
|
@@ -19,7 +19,7 @@ AnyCoder is an AI-powered code generator that helps you create applications by d
|
|
| 19 |
|
| 20 |
## Features
|
| 21 |
|
| 22 |
-
- **Multi-Model Support**: Choose from Moonshot Kimi-K2, Kimi K2 Turbo (Preview), DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M2, Qwen3-235B-A22B, Qwen3-30B-A3B-Instruct-2507, Qwen3-30B-A3B-Thinking-2507, SmolLM3-3B, GLM-4.1V-9B-Thinking, Gemini 2.5 Flash and Gemini 2.5 Pro (OpenAI-compatible)
|
| 23 |
- Claude-Opus-4.1 (via Poe)
|
| 24 |
- **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
|
| 25 |
- **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
|
|
@@ -73,6 +73,7 @@ python app.py
|
|
| 73 |
|
| 74 |
- Moonshot Kimi-K2
|
| 75 |
- Kimi K2 Turbo (Preview)
|
|
|
|
| 76 |
- DeepSeek V3
|
| 77 |
- DeepSeek V3.1
|
| 78 |
- DeepSeek V3.1 Terminus
|
|
|
|
| 19 |
|
| 20 |
## Features
|
| 21 |
|
| 22 |
+
- **Multi-Model Support**: Choose from Moonshot Kimi-K2, Kimi K2 Turbo (Preview), Kimi K2 Thinking, DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M2, Qwen3-235B-A22B, Qwen3-30B-A3B-Instruct-2507, Qwen3-30B-A3B-Thinking-2507, SmolLM3-3B, GLM-4.1V-9B-Thinking, Gemini 2.5 Flash and Gemini 2.5 Pro (OpenAI-compatible)
|
| 23 |
- Claude-Opus-4.1 (via Poe)
|
| 24 |
- **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
|
| 25 |
- **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
|
|
|
|
| 73 |
|
| 74 |
- Moonshot Kimi-K2
|
| 75 |
- Kimi K2 Turbo (Preview)
|
| 76 |
+
- Kimi K2 Thinking
|
| 77 |
- DeepSeek V3
|
| 78 |
- DeepSeek V3.1
|
| 79 |
- DeepSeek V3.1 Terminus
|
app.py
CHANGED
|
@@ -2430,10 +2430,15 @@ AVAILABLE_MODELS = [
|
|
| 2430 |
"name": "MiniMax M2",
|
| 2431 |
"id": "MiniMaxAI/MiniMax-M2",
|
| 2432 |
"description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2433 |
}
|
| 2434 |
]
|
| 2435 |
# Default model selection
|
| 2436 |
-
DEFAULT_MODEL_NAME = "
|
| 2437 |
DEFAULT_MODEL = None
|
| 2438 |
for _m in AVAILABLE_MODELS:
|
| 2439 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
|
@@ -2564,6 +2569,12 @@ def get_inference_client(model_id, provider="auto"):
|
|
| 2564 |
api_key=os.getenv("MOONSHOT_API_KEY"),
|
| 2565 |
base_url="https://api.moonshot.ai/v1",
|
| 2566 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2567 |
elif model_id == "stealth-model-1":
|
| 2568 |
# Use stealth model with generic configuration
|
| 2569 |
api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
|
|
|
|
| 2430 |
"name": "MiniMax M2",
|
| 2431 |
"id": "MiniMaxAI/MiniMax-M2",
|
| 2432 |
"description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider"
|
| 2433 |
+
},
|
| 2434 |
+
{
|
| 2435 |
+
"name": "Kimi K2 Thinking",
|
| 2436 |
+
"id": "kimi-k2-thinking",
|
| 2437 |
+
"description": "Moonshot Kimi K2 Thinking model for advanced reasoning and code generation"
|
| 2438 |
}
|
| 2439 |
]
|
| 2440 |
# Default model selection
|
| 2441 |
+
DEFAULT_MODEL_NAME = "Kimi K2 Thinking"
|
| 2442 |
DEFAULT_MODEL = None
|
| 2443 |
for _m in AVAILABLE_MODELS:
|
| 2444 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
|
|
|
| 2569 |
api_key=os.getenv("MOONSHOT_API_KEY"),
|
| 2570 |
base_url="https://api.moonshot.ai/v1",
|
| 2571 |
)
|
| 2572 |
+
elif model_id == "kimi-k2-thinking":
|
| 2573 |
+
# Use Moonshot AI (OpenAI-compatible) client for Kimi K2 Thinking
|
| 2574 |
+
return OpenAI(
|
| 2575 |
+
api_key=os.getenv("MOONSHOT_API_KEY"),
|
| 2576 |
+
base_url="https://api.moonshot.ai/v1",
|
| 2577 |
+
)
|
| 2578 |
elif model_id == "stealth-model-1":
|
| 2579 |
# Use stealth model with generic configuration
|
| 2580 |
api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
|