ASICForge / app.py
yasserrmd's picture
Update app.py
4e8bbcd verified
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from huggingface_hub import InferenceClient
import json
app = FastAPI()
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Serve the static HTML file
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/")
async def get():
return HTMLResponse(open("static/asic-design-code-generator.html").read())
@app.websocket("/ws/generate_code")
async def generate_code(websocket: WebSocket):
await websocket.accept()
try:
while True:
data = await websocket.receive_text()
request_data = json.loads(data)
description = request_data.get("description")
language = request_data.get("language")
# Build the prompt for ASIC design code generation
prompt = f"Generate {language} code only for the following ASIC design description. Provide the code without any additional explanation or comments.\n\n\n{description}"
messages = [{"role": "user", "content": prompt}]
# Stream content as it is generated
generated_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
# Send generated chunks over WebSocket
for chunk in stream:
generated_code += chunk.choices[0].delta.content
await websocket.send_text(generated_code)
except WebSocketDisconnect:
print("Client disconnected")
@app.websocket("/ws/refine_code")
async def refine_code(websocket: WebSocket):
await websocket.accept()
try:
while True:
data = await websocket.receive_text()
request_data = json.loads(data)
existing_code = request_data.get("existing_code")
language = request_data.get("language")
# Build the refinement prompt
prompt = f"Refine the following {language} code based on the provided details. Return only the updated code, without explanations or comments.\n\n\n{existing_code}"
messages = [{"role": "user", "content": prompt}]
# Stream refined code content
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
# Send refined code chunks over WebSocket
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected")
@app.websocket("/ws/optimize_code")
async def optimize_code(websocket: WebSocket):
await websocket.accept()
try:
data = await websocket.receive_text()
request_data = json.loads(data)
code = request_data.get("code")
language = request_data.get("language")
prompt = f"Analyze the following {language} code for syntax, linting, and optimization suggestions:\n\n{code}"
messages = [{"role": "user", "content": prompt}]
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected")
# WebSocket for Failure Mode Analysis and Test Bench Generation
@app.websocket("/ws/generate_test_bench")
async def generate_test_bench(websocket: WebSocket):
await websocket.accept()
try:
data = await websocket.receive_text()
request_data = json.loads(data)
code = request_data.get("code")
language = request_data.get("language")
prompt = f"Generate test benches and identify potential failure modes for the following {language} code:\n\n{code}"
messages = [{"role": "user", "content": prompt}]
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=512,
top_p=0.7,
stream=True
)
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected")