Spaces:
Running
Running
update to use grok 4.1 fast
Browse files- anycoder_app/config.py +4 -4
- anycoder_app/deploy.py +2 -2
- anycoder_app/models.py +2 -2
- backend_api.py +3 -3
- frontend/src/app/page.tsx +1 -1
- frontend/src/components/LandingPage.tsx +1 -1
anycoder_app/config.py
CHANGED
|
@@ -143,9 +143,9 @@ AVAILABLE_MODELS = [
|
|
| 143 |
"description": "Moonshot Kimi K2 Thinking model for advanced reasoning and code generation"
|
| 144 |
},
|
| 145 |
{
|
| 146 |
-
"name": "
|
| 147 |
-
"id": "
|
| 148 |
-
"description": "
|
| 149 |
},
|
| 150 |
{
|
| 151 |
"name": "Sherlock Think Alpha",
|
|
@@ -157,7 +157,7 @@ AVAILABLE_MODELS = [
|
|
| 157 |
k2_model_name_tag = "moonshotai/Kimi-K2-Thinking"
|
| 158 |
|
| 159 |
# Default model selection
|
| 160 |
-
DEFAULT_MODEL_NAME = "
|
| 161 |
DEFAULT_MODEL = None
|
| 162 |
for _m in AVAILABLE_MODELS:
|
| 163 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
|
|
|
| 143 |
"description": "Moonshot Kimi K2 Thinking model for advanced reasoning and code generation"
|
| 144 |
},
|
| 145 |
{
|
| 146 |
+
"name": "Grok 4.1 Fast",
|
| 147 |
+
"id": "x-ai/grok-4.1-fast",
|
| 148 |
+
"description": "Grok 4.1 Fast model via OpenRouter for advanced code generation and reasoning"
|
| 149 |
},
|
| 150 |
{
|
| 151 |
"name": "Sherlock Think Alpha",
|
|
|
|
| 157 |
k2_model_name_tag = "moonshotai/Kimi-K2-Thinking"
|
| 158 |
|
| 159 |
# Default model selection
|
| 160 |
+
DEFAULT_MODEL_NAME = "Grok 4.1 Fast"
|
| 161 |
DEFAULT_MODEL = None
|
| 162 |
for _m in AVAILABLE_MODELS:
|
| 163 |
if _m.get("name") == DEFAULT_MODEL_NAME:
|
anycoder_app/deploy.py
CHANGED
|
@@ -2318,7 +2318,7 @@ def _fetch_inference_provider_code(model_id: str) -> Optional[str]:
|
|
| 2318 |
"codestral-2508", "mistral-medium-2508",
|
| 2319 |
"stealth-model-1",
|
| 2320 |
"openrouter/sonoma-dusk-alpha", "openrouter/sonoma-sky-alpha",
|
| 2321 |
-
"
|
| 2322 |
]
|
| 2323 |
|
| 2324 |
if model_id in non_hf_models:
|
|
@@ -2422,7 +2422,7 @@ def import_model_from_hf(model_id: str, prefer_local: bool = False) -> Tuple[str
|
|
| 2422 |
"codestral-2508", "mistral-medium-2508",
|
| 2423 |
"stealth-model-1",
|
| 2424 |
"openrouter/sonoma-dusk-alpha", "openrouter/sonoma-sky-alpha",
|
| 2425 |
-
"
|
| 2426 |
]
|
| 2427 |
|
| 2428 |
if model_id in non_hf_models:
|
|
|
|
| 2318 |
"codestral-2508", "mistral-medium-2508",
|
| 2319 |
"stealth-model-1",
|
| 2320 |
"openrouter/sonoma-dusk-alpha", "openrouter/sonoma-sky-alpha",
|
| 2321 |
+
"x-ai/grok-4.1-fast", "openrouter/sherlock-think-alpha"
|
| 2322 |
]
|
| 2323 |
|
| 2324 |
if model_id in non_hf_models:
|
|
|
|
| 2422 |
"codestral-2508", "mistral-medium-2508",
|
| 2423 |
"stealth-model-1",
|
| 2424 |
"openrouter/sonoma-dusk-alpha", "openrouter/sonoma-sky-alpha",
|
| 2425 |
+
"x-ai/grok-4.1-fast", "openrouter/sherlock-think-alpha"
|
| 2426 |
]
|
| 2427 |
|
| 2428 |
if model_id in non_hf_models:
|
anycoder_app/models.py
CHANGED
|
@@ -124,8 +124,8 @@ def get_inference_client(model_id, provider="auto"):
|
|
| 124 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 125 |
base_url="https://openrouter.ai/api/v1",
|
| 126 |
)
|
| 127 |
-
elif model_id == "
|
| 128 |
-
# Use OpenRouter client for
|
| 129 |
return OpenAI(
|
| 130 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 131 |
base_url="https://openrouter.ai/api/v1",
|
|
|
|
| 124 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 125 |
base_url="https://openrouter.ai/api/v1",
|
| 126 |
)
|
| 127 |
+
elif model_id == "x-ai/grok-4.1-fast":
|
| 128 |
+
# Use OpenRouter client for Grok 4.1 Fast model
|
| 129 |
return OpenAI(
|
| 130 |
api_key=os.getenv("OPENROUTER_API_KEY"),
|
| 131 |
base_url="https://openrouter.ai/api/v1",
|
backend_api.py
CHANGED
|
@@ -66,7 +66,7 @@ print("[Startup] System prompts initialization complete")
|
|
| 66 |
# Define models and languages here to avoid importing Gradio UI
|
| 67 |
AVAILABLE_MODELS = [
|
| 68 |
{"name": "Gemini 3.0 Pro", "id": "gemini-3.0-pro", "description": "Google Gemini 3.0 Pro via Poe with advanced reasoning"},
|
| 69 |
-
{"name": "
|
| 70 |
{"name": "MiniMax M2", "id": "MiniMaxAI/MiniMax-M2", "description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider"},
|
| 71 |
{"name": "DeepSeek V3.2-Exp", "id": "deepseek-ai/DeepSeek-V3.2-Exp", "description": "DeepSeek V3.2 Experimental via HuggingFace"},
|
| 72 |
{"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1-0528", "description": "DeepSeek R1 model for code generation"},
|
|
@@ -115,7 +115,7 @@ user_sessions = {}
|
|
| 115 |
class CodeGenerationRequest(BaseModel):
|
| 116 |
query: str
|
| 117 |
language: str = "html"
|
| 118 |
-
model_id: str = "
|
| 119 |
provider: str = "auto"
|
| 120 |
history: List[List[str]] = []
|
| 121 |
agent_mode: bool = False
|
|
@@ -769,7 +769,7 @@ async def websocket_generate(websocket: WebSocket):
|
|
| 769 |
|
| 770 |
query = data.get("query")
|
| 771 |
language = data.get("language", "html")
|
| 772 |
-
model_id = data.get("model_id", "
|
| 773 |
|
| 774 |
# Send acknowledgment
|
| 775 |
await websocket.send_json({
|
|
|
|
| 66 |
# Define models and languages here to avoid importing Gradio UI
|
| 67 |
AVAILABLE_MODELS = [
|
| 68 |
{"name": "Gemini 3.0 Pro", "id": "gemini-3.0-pro", "description": "Google Gemini 3.0 Pro via Poe with advanced reasoning"},
|
| 69 |
+
{"name": "Grok 4.1 Fast", "id": "x-ai/grok-4.1-fast", "description": "Grok 4.1 Fast model via OpenRouter"},
|
| 70 |
{"name": "MiniMax M2", "id": "MiniMaxAI/MiniMax-M2", "description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider"},
|
| 71 |
{"name": "DeepSeek V3.2-Exp", "id": "deepseek-ai/DeepSeek-V3.2-Exp", "description": "DeepSeek V3.2 Experimental via HuggingFace"},
|
| 72 |
{"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1-0528", "description": "DeepSeek R1 model for code generation"},
|
|
|
|
| 115 |
class CodeGenerationRequest(BaseModel):
|
| 116 |
query: str
|
| 117 |
language: str = "html"
|
| 118 |
+
model_id: str = "x-ai/grok-4.1-fast"
|
| 119 |
provider: str = "auto"
|
| 120 |
history: List[List[str]] = []
|
| 121 |
agent_mode: bool = False
|
|
|
|
| 769 |
|
| 770 |
query = data.get("query")
|
| 771 |
language = data.get("language", "html")
|
| 772 |
+
model_id = data.get("model_id", "x-ai/grok-4.1-fast")
|
| 773 |
|
| 774 |
# Send acknowledgment
|
| 775 |
await websocket.send_json({
|
frontend/src/app/page.tsx
CHANGED
|
@@ -17,7 +17,7 @@ export default function Home() {
|
|
| 17 |
|
| 18 |
const [generatedCode, setGeneratedCode] = useState('');
|
| 19 |
const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
|
| 20 |
-
const [selectedModel, setSelectedModel] = useState('
|
| 21 |
const [isGenerating, setIsGenerating] = useState(false);
|
| 22 |
const [isAuthenticated, setIsAuthenticated] = useState(false);
|
| 23 |
const [currentRepoId, setCurrentRepoId] = useState<string | null>(null); // Track imported/deployed space
|
|
|
|
| 17 |
|
| 18 |
const [generatedCode, setGeneratedCode] = useState('');
|
| 19 |
const [selectedLanguage, setSelectedLanguage] = useState<Language>('html');
|
| 20 |
+
const [selectedModel, setSelectedModel] = useState('x-ai/grok-4.1-fast');
|
| 21 |
const [isGenerating, setIsGenerating] = useState(false);
|
| 22 |
const [isAuthenticated, setIsAuthenticated] = useState(false);
|
| 23 |
const [currentRepoId, setCurrentRepoId] = useState<string | null>(null); // Track imported/deployed space
|
frontend/src/components/LandingPage.tsx
CHANGED
|
@@ -26,7 +26,7 @@ export default function LandingPage({
|
|
| 26 |
onStart,
|
| 27 |
isAuthenticated,
|
| 28 |
initialLanguage = 'html',
|
| 29 |
-
initialModel = '
|
| 30 |
onAuthChange
|
| 31 |
}: LandingPageProps) {
|
| 32 |
const [prompt, setPrompt] = useState('');
|
|
|
|
| 26 |
onStart,
|
| 27 |
isAuthenticated,
|
| 28 |
initialLanguage = 'html',
|
| 29 |
+
initialModel = 'x-ai/grok-4.1-fast',
|
| 30 |
onAuthChange
|
| 31 |
}: LandingPageProps) {
|
| 32 |
const [prompt, setPrompt] = useState('');
|