Spaces:
Running
Running
update import project
Browse files- anycoder_app/deploy.py +3 -3
- backend_api.py +8 -5
- frontend/src/lib/api.ts +89 -34
anycoder_app/deploy.py
CHANGED
|
@@ -223,7 +223,7 @@ Generate the exact search/replace blocks needed to make these changes."""
|
|
| 223 |
response = client.chat.completions.create(
|
| 224 |
model=get_real_model_id(_current_model['id']),
|
| 225 |
messages=messages,
|
| 226 |
-
max_tokens=
|
| 227 |
temperature=0.1
|
| 228 |
)
|
| 229 |
changes_text = response.choices[0].message.content
|
|
@@ -231,7 +231,7 @@ Generate the exact search/replace blocks needed to make these changes."""
|
|
| 231 |
response = client.chat.complete(
|
| 232 |
model=get_real_model_id(_current_model['id']),
|
| 233 |
messages=messages,
|
| 234 |
-
max_tokens=
|
| 235 |
temperature=0.1
|
| 236 |
)
|
| 237 |
changes_text = response.choices[0].message.content
|
|
@@ -239,7 +239,7 @@ Generate the exact search/replace blocks needed to make these changes."""
|
|
| 239 |
completion = client.chat.completions.create(
|
| 240 |
model=get_real_model_id(_current_model['id']),
|
| 241 |
messages=messages,
|
| 242 |
-
max_tokens=
|
| 243 |
temperature=0.1
|
| 244 |
)
|
| 245 |
changes_text = completion.choices[0].message.content
|
|
|
|
| 223 |
response = client.chat.completions.create(
|
| 224 |
model=get_real_model_id(_current_model['id']),
|
| 225 |
messages=messages,
|
| 226 |
+
max_tokens=10000,
|
| 227 |
temperature=0.1
|
| 228 |
)
|
| 229 |
changes_text = response.choices[0].message.content
|
|
|
|
| 231 |
response = client.chat.complete(
|
| 232 |
model=get_real_model_id(_current_model['id']),
|
| 233 |
messages=messages,
|
| 234 |
+
max_tokens=10000,
|
| 235 |
temperature=0.1
|
| 236 |
)
|
| 237 |
changes_text = response.choices[0].message.content
|
|
|
|
| 239 |
completion = client.chat.completions.create(
|
| 240 |
model=get_real_model_id(_current_model['id']),
|
| 241 |
messages=messages,
|
| 242 |
+
max_tokens=10000,
|
| 243 |
temperature=0.1
|
| 244 |
)
|
| 245 |
changes_text = completion.choices[0].message.content
|
backend_api.py
CHANGED
|
@@ -345,18 +345,21 @@ async def auth_status(authorization: Optional[str] = Header(None)):
|
|
| 345 |
)
|
| 346 |
|
| 347 |
|
| 348 |
-
@app.
|
| 349 |
async def generate_code(
|
| 350 |
-
|
| 351 |
-
language: str = "html",
|
| 352 |
-
model_id: str = "openrouter/sherlock-dash-alpha",
|
| 353 |
-
provider: str = "auto",
|
| 354 |
authorization: Optional[str] = Header(None)
|
| 355 |
):
|
| 356 |
"""Generate code based on user query - returns streaming response"""
|
| 357 |
# Dev mode: No authentication required - just use server's HF_TOKEN
|
| 358 |
# In production, you would check real OAuth tokens here
|
| 359 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
async def event_stream() -> AsyncGenerator[str, None]:
|
| 361 |
"""Stream generated code chunks"""
|
| 362 |
# Use the model_id from outer scope
|
|
|
|
| 345 |
)
|
| 346 |
|
| 347 |
|
| 348 |
+
@app.post("/api/generate")
|
| 349 |
async def generate_code(
|
| 350 |
+
request: CodeGenerationRequest,
|
|
|
|
|
|
|
|
|
|
| 351 |
authorization: Optional[str] = Header(None)
|
| 352 |
):
|
| 353 |
"""Generate code based on user query - returns streaming response"""
|
| 354 |
# Dev mode: No authentication required - just use server's HF_TOKEN
|
| 355 |
# In production, you would check real OAuth tokens here
|
| 356 |
|
| 357 |
+
# Extract parameters from request body
|
| 358 |
+
query = request.query
|
| 359 |
+
language = request.language
|
| 360 |
+
model_id = request.model_id
|
| 361 |
+
provider = request.provider
|
| 362 |
+
|
| 363 |
async def event_stream() -> AsyncGenerator[str, None]:
|
| 364 |
"""Stream generated code chunks"""
|
| 365 |
# Use the model_id from outer scope
|
frontend/src/lib/api.ts
CHANGED
|
@@ -97,7 +97,7 @@ class ApiClient {
|
|
| 97 |
}
|
| 98 |
}
|
| 99 |
|
| 100 |
-
// Stream-based code generation using
|
| 101 |
generateCodeStream(
|
| 102 |
request: CodeGenerationRequest,
|
| 103 |
onChunk: (content: string) => void,
|
|
@@ -107,45 +107,100 @@ class ApiClient {
|
|
| 107 |
// Build the URL correctly whether we have a base URL or not
|
| 108 |
const baseUrl = API_URL || window.location.origin;
|
| 109 |
const url = new URL('/api/generate', baseUrl);
|
| 110 |
-
url.search = new URLSearchParams({
|
| 111 |
-
query: request.query,
|
| 112 |
-
language: request.language,
|
| 113 |
-
model_id: request.model_id,
|
| 114 |
-
provider: request.provider,
|
| 115 |
-
}).toString();
|
| 116 |
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
-
if (
|
| 125 |
-
|
| 126 |
-
} else if (data.type === 'complete' && data.code) {
|
| 127 |
-
console.log('[SSE] Generation complete, total code length:', data.code.length);
|
| 128 |
-
onComplete(data.code);
|
| 129 |
-
eventSource.close();
|
| 130 |
-
} else if (data.type === 'error') {
|
| 131 |
-
console.error('[SSE] Error:', data.message);
|
| 132 |
-
onError(data.message || 'Unknown error occurred');
|
| 133 |
-
eventSource.close();
|
| 134 |
}
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
// Return cleanup function
|
| 147 |
return () => {
|
| 148 |
-
|
| 149 |
};
|
| 150 |
}
|
| 151 |
|
|
|
|
| 97 |
}
|
| 98 |
}
|
| 99 |
|
| 100 |
+
// Stream-based code generation using Fetch API with streaming (supports POST)
|
| 101 |
generateCodeStream(
|
| 102 |
request: CodeGenerationRequest,
|
| 103 |
onChunk: (content: string) => void,
|
|
|
|
| 107 |
// Build the URL correctly whether we have a base URL or not
|
| 108 |
const baseUrl = API_URL || window.location.origin;
|
| 109 |
const url = new URL('/api/generate', baseUrl);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
let abortController = new AbortController();
|
| 112 |
+
let accumulatedCode = '';
|
| 113 |
+
let buffer = ''; // Buffer for incomplete SSE lines
|
| 114 |
+
|
| 115 |
+
// Use fetch with POST to support large payloads
|
| 116 |
+
fetch(url.toString(), {
|
| 117 |
+
method: 'POST',
|
| 118 |
+
headers: {
|
| 119 |
+
'Content-Type': 'application/json',
|
| 120 |
+
...(this.token ? { 'Authorization': `Bearer ${this.token}` } : {}),
|
| 121 |
+
},
|
| 122 |
+
body: JSON.stringify(request),
|
| 123 |
+
signal: abortController.signal,
|
| 124 |
+
})
|
| 125 |
+
.then(async (response) => {
|
| 126 |
+
if (!response.ok) {
|
| 127 |
+
throw new Error(`HTTP error! status: ${response.status}`);
|
| 128 |
+
}
|
| 129 |
|
| 130 |
+
if (!response.body) {
|
| 131 |
+
throw new Error('Response body is null');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
}
|
| 133 |
+
|
| 134 |
+
const reader = response.body.getReader();
|
| 135 |
+
const decoder = new TextDecoder();
|
| 136 |
+
|
| 137 |
+
while (true) {
|
| 138 |
+
const { done, value } = await reader.read();
|
| 139 |
+
|
| 140 |
+
if (done) {
|
| 141 |
+
console.log('[Stream] Stream ended, total code length:', accumulatedCode.length);
|
| 142 |
+
if (accumulatedCode) {
|
| 143 |
+
onComplete(accumulatedCode);
|
| 144 |
+
}
|
| 145 |
+
break;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// Decode chunk and add to buffer
|
| 149 |
+
buffer += decoder.decode(value, { stream: true });
|
| 150 |
+
|
| 151 |
+
// Process complete SSE messages (ending with \n\n)
|
| 152 |
+
const messages = buffer.split('\n\n');
|
| 153 |
+
|
| 154 |
+
// Keep the last incomplete message in the buffer
|
| 155 |
+
buffer = messages.pop() || '';
|
| 156 |
+
|
| 157 |
+
// Process each complete message
|
| 158 |
+
for (const message of messages) {
|
| 159 |
+
if (!message.trim()) continue;
|
| 160 |
+
|
| 161 |
+
// Parse SSE format: "data: {...}"
|
| 162 |
+
const lines = message.split('\n');
|
| 163 |
+
for (const line of lines) {
|
| 164 |
+
if (line.startsWith('data: ')) {
|
| 165 |
+
try {
|
| 166 |
+
const jsonStr = line.substring(6);
|
| 167 |
+
const data = JSON.parse(jsonStr);
|
| 168 |
+
console.log('[Stream] Received event:', data.type, data.content?.substring(0, 50));
|
| 169 |
+
|
| 170 |
+
if (data.type === 'chunk' && data.content) {
|
| 171 |
+
accumulatedCode += data.content;
|
| 172 |
+
onChunk(data.content);
|
| 173 |
+
} else if (data.type === 'complete') {
|
| 174 |
+
console.log('[Stream] Generation complete, total code length:', data.code?.length || accumulatedCode.length);
|
| 175 |
+
// Use the complete code from the message if available, otherwise use accumulated
|
| 176 |
+
const finalCode = data.code || accumulatedCode;
|
| 177 |
+
onComplete(finalCode);
|
| 178 |
+
return; // Exit the processing loop
|
| 179 |
+
} else if (data.type === 'error') {
|
| 180 |
+
console.error('[Stream] Error:', data.message);
|
| 181 |
+
onError(data.message || 'Unknown error occurred');
|
| 182 |
+
return; // Exit the processing loop
|
| 183 |
+
}
|
| 184 |
+
} catch (error) {
|
| 185 |
+
console.error('Error parsing SSE data:', error, 'Line:', line);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
})
|
| 192 |
+
.catch((error) => {
|
| 193 |
+
if (error.name === 'AbortError') {
|
| 194 |
+
console.log('[Stream] Request aborted');
|
| 195 |
+
return;
|
| 196 |
+
}
|
| 197 |
+
console.error('[Stream] Fetch error:', error);
|
| 198 |
+
onError(error.message || 'Connection error occurred');
|
| 199 |
+
});
|
| 200 |
|
| 201 |
// Return cleanup function
|
| 202 |
return () => {
|
| 203 |
+
abortController.abort();
|
| 204 |
};
|
| 205 |
}
|
| 206 |
|