| """ |
| 나의 앱 |
| 안녕하세요 |
| Generated by MOUSE Workflow |
| """ |
|
|
| import os |
| import json |
| import gradio as gr |
| import requests |
|
|
| |
| WORKFLOW_DATA = { |
| "nodes": [ |
| { |
| "id": "input_1", |
| "type": "ChatInput", |
| "position": { |
| "x": 100, |
| "y": 200 |
| }, |
| "data": { |
| "label": "User Question", |
| "template": { |
| "input_value": { |
| "value": "What is the capital of Korea?" |
| } |
| } |
| } |
| }, |
| { |
| "id": "llm_1", |
| "type": "llmNode", |
| "position": { |
| "x": 400, |
| "y": 200 |
| }, |
| "data": { |
| "label": "AI Processing", |
| "template": { |
| "provider": { |
| "value": "VIDraft" |
| }, |
| "model": { |
| "value": "Gemma-3-r1984-27B" |
| }, |
| "temperature": { |
| "value": 0.7 |
| }, |
| "system_prompt": { |
| "value": "You are a helpful assistant." |
| } |
| } |
| } |
| }, |
| { |
| "id": "output_1", |
| "type": "ChatOutput", |
| "position": { |
| "x": 700, |
| "y": 200 |
| }, |
| "data": { |
| "label": "Answer" |
| } |
| } |
| ], |
| "edges": [ |
| { |
| "id": "e1", |
| "source": "input_1", |
| "target": "llm_1" |
| }, |
| { |
| "id": "e2", |
| "source": "llm_1", |
| "target": "output_1" |
| } |
| ] |
| } |
|
|
| def execute_workflow(*input_values): |
| """Execute the workflow with given inputs""" |
| |
| |
| vidraft_token = os.getenv("FRIENDLI_TOKEN") |
| openai_key = os.getenv("OPENAI_API_KEY") |
| |
| nodes = WORKFLOW_DATA.get("nodes", []) |
| edges = WORKFLOW_DATA.get("edges", []) |
| |
| results = {} |
| |
| |
| input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]] |
| |
| |
| for i, node in enumerate(input_nodes): |
| if i < len(input_values): |
| results[node["id"]] = input_values[i] |
| |
| |
| for node in nodes: |
| node_id = node.get("id") |
| node_type = node.get("type", "") |
| node_data = node.get("data", {}) |
| template = node_data.get("template", {}) |
| |
| if node_type == "textNode": |
| |
| base_text = template.get("text", {}).get("value", "") |
| connected_inputs = [] |
| |
| for edge in edges: |
| if edge.get("target") == node_id: |
| source_id = edge.get("source") |
| if source_id in results: |
| connected_inputs.append(f"{source_id}: {results[source_id]}") |
| |
| if connected_inputs: |
| results[node_id] = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs) |
| else: |
| results[node_id] = base_text |
| |
| elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]: |
| |
| provider = template.get("provider", {}).get("value", "VIDraft") |
| if provider not in ["VIDraft", "OpenAI"]: |
| provider = "VIDraft" |
| temperature = template.get("temperature", {}).get("value", 0.7) |
| system_prompt = template.get("system_prompt", {}).get("value", "") |
| |
| |
| input_text = "" |
| for edge in edges: |
| if edge.get("target") == node_id: |
| source_id = edge.get("source") |
| if source_id in results: |
| input_text = results[source_id] |
| break |
| |
| |
| if provider == "OpenAI" and openai_key: |
| try: |
| from openai import OpenAI |
| client = OpenAI(api_key=openai_key) |
| |
| messages = [] |
| if system_prompt: |
| messages.append({"role": "system", "content": system_prompt}) |
| messages.append({"role": "user", "content": input_text}) |
| |
| response = client.chat.completions.create( |
| model="gpt-4.1-mini", |
| messages=messages, |
| temperature=temperature, |
| max_tokens=1000 |
| ) |
| |
| results[node_id] = response.choices[0].message.content |
| except Exception as e: |
| results[node_id] = f"[OpenAI Error: {str(e)}]" |
| |
| elif provider == "VIDraft" and vidraft_token: |
| try: |
| headers = { |
| "Authorization": f"Bearer {vidraft_token}", |
| "Content-Type": "application/json" |
| } |
| |
| messages = [] |
| if system_prompt: |
| messages.append({"role": "system", "content": system_prompt}) |
| messages.append({"role": "user", "content": input_text}) |
| |
| payload = { |
| "model": "dep89a2fld32mcm", |
| "messages": messages, |
| "max_tokens": 16384, |
| "temperature": temperature, |
| "top_p": 0.8, |
| "stream": False |
| } |
| |
| response = requests.post( |
| "https://api.friendli.ai/dedicated/v1/chat/completions", |
| headers=headers, |
| json=payload, |
| timeout=30 |
| ) |
| |
| if response.status_code == 200: |
| results[node_id] = response.json()["choices"][0]["message"]["content"] |
| else: |
| results[node_id] = f"[VIDraft Error: {response.status_code}]" |
| except Exception as e: |
| results[node_id] = f"[VIDraft Error: {str(e)}]" |
| else: |
| |
| if provider == "OpenAI": |
| results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]" |
| elif provider == "VIDraft": |
| results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]" |
| else: |
| results[node_id] = f"[No API key found for {provider}. Using simulated response: {input_text[:50]}...]" |
| |
| elif node_type in ["ChatOutput", "textOutput", "Output"]: |
| |
| for edge in edges: |
| if edge.get("target") == node_id: |
| source_id = edge.get("source") |
| if source_id in results: |
| results[node_id] = results[source_id] |
| break |
| |
| |
| output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]] |
| return [results.get(n["id"], "") for n in output_nodes] |
|
|
| |
| with gr.Blocks(title="나의 앱", theme=gr.themes.Soft()) as demo: |
| gr.Markdown("# 나의 앱") |
| gr.Markdown("안녕하세요") |
| |
| |
| vidraft_token = os.getenv("FRIENDLI_TOKEN") |
| openai_key = os.getenv("OPENAI_API_KEY") |
| |
| with gr.Accordion("🔑 API Status", open=False): |
| if vidraft_token: |
| gr.Markdown("✅ **VIDraft API**: Connected (Gemma-3-r1984-27B)") |
| else: |
| gr.Markdown("❌ **VIDraft API**: Not configured") |
| |
| if openai_key: |
| gr.Markdown("✅ **OpenAI API**: Connected (gpt-4.1-mini)") |
| else: |
| gr.Markdown("⚠️ **OpenAI API**: Not configured (optional)") |
| |
| if not vidraft_token: |
| gr.Markdown(""" |
| **⚠️ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly. |
| |
| Go to: Space settings → Repository secrets → Add secret |
| """) |
| elif not openai_key: |
| gr.Markdown(""" |
| **💡 Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features. |
| """) |
| else: |
| gr.Markdown("**✨ All APIs configured! Your app is fully functional.**") |
| |
| |
| nodes = WORKFLOW_DATA.get("nodes", []) |
| input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]] |
| output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]] |
| |
| |
| inputs = [] |
| if input_nodes: |
| gr.Markdown("### 📥 Inputs") |
| for node in input_nodes: |
| label = node.get("data", {}).get("label", node.get("id")) |
| template = node.get("data", {}).get("template", {}) |
| default_value = template.get("input_value", {}).get("value", "") |
| |
| if node.get("type") == "numberInput": |
| inp = gr.Number(label=label, value=float(default_value) if default_value else 0) |
| else: |
| inp = gr.Textbox(label=label, value=default_value, lines=2) |
| inputs.append(inp) |
| |
| |
| btn = gr.Button("🚀 Execute Workflow", variant="primary") |
| |
| |
| outputs = [] |
| if output_nodes: |
| gr.Markdown("### 📤 Outputs") |
| for node in output_nodes: |
| label = node.get("data", {}).get("label", node.get("id")) |
| out = gr.Textbox(label=label, interactive=False, lines=3) |
| outputs.append(out) |
| |
| |
| btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs) |
| |
| gr.Markdown("---") |
| gr.Markdown("*Powered by MOUSE Workflow*") |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|