Spaces:
Running
Running
File size: 2,877 Bytes
dfc5e93 5f4f3c1 dfc5e93 5f4f3c1 b752712 0950920 5f4f3c1 b752712 dfc5e93 b752712 dfc5e93 a289467 dfc5e93 b752712 dfc5e93 a289467 0950920 5f4f3c1 0950920 5f4f3c1 a289467 0950920 b752712 0950920 b752712 5f4f3c1 b752712 0950920 b752712 5f4f3c1 0950920 5f4f3c1 dfc5e93 a922063 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import gradio as gr
from huggingface_hub import InferenceClient
import re
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def clean_code_blocks(raw_response):
"""
Extract code for each file from a structured LLM response
Expected format:
index.html
<code>
static/style.css
<code>
"""
parts = re.split(r"(?:\n|^)([^\n\/\\<>:\"|?*]+(?:\.[a-z]+)?(?:\/[^\n]+)?)\n", raw_response)
file_blocks = {}
for i in range(1, len(parts), 2):
filename = parts[i].strip()
code = parts[i + 1].strip()
if filename and code:
file_blocks[filename] = code
return file_blocks
def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
full_system_prompt = f"""
You are a code-generation AI. You MUST generate a full website including an index.html file.
Use ONLY the {backend} backend structure.
Output all code for each file separately using this format:
filename.ext
<code without backticks>
Do NOT add commentary, do NOT use markdown. Output raw code only.
""".strip() + "\n\n" + system_message
messages = [
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": prompt}
]
response = ""
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p
):
token = chunk.choices[0].delta.content
if token:
response += token
# Parse and display each file in its own tab
files = clean_code_blocks(response)
tabs = []
for filename, code in files.items():
tabs.append(gr.TabItem(label=filename, elem_id=filename))
tabs.append(gr.Code(value=code, language="html" if filename.endswith(".html") else "python" if filename.endswith(".py") else "javascript" if filename.endswith(".js") else "css", label=filename))
return tabs
with gr.Blocks() as demo:
gr.Markdown("## WebGen AI β One Prompt β Multi-File Website Generator")
with gr.Row():
prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the website you want...")
backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Select Backend")
system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
max_tokens = gr.Slider(1, 2048, value=1024, label="Max Tokens")
temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
run_btn = gr.Button("Generate Code")
tabs_output = gr.Group()
def wrapper(*args):
return generate_code(*args)
run_btn.click(wrapper, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=tabs_output)
if __name__ == "__main__":
demo.launch() |