Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
|
| 4 |
+
# ---------- Model Pipelines ----------
|
| 5 |
+
prompt_refiner = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 6 |
+
code_model = pipeline("text-generation", model="codellama/CodeLlama-7b-Instruct-hf")
|
| 7 |
+
book_model = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
|
| 8 |
+
critic_1 = pipeline("text-generation", model="google/gemma-2-9b-it")
|
| 9 |
+
critic_2 = pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct")
|
| 10 |
+
|
| 11 |
+
# ---------- Core Logic ----------
|
| 12 |
+
|
| 13 |
+
def refine_prompt(idea):
|
| 14 |
+
refined = prompt_refiner(f"Refine this creative idea into a high-quality prompt: {idea}", max_new_tokens=200)[0]["generated_text"]
|
| 15 |
+
return refined.strip()
|
| 16 |
+
|
| 17 |
+
def generate_code(prompt):
|
| 18 |
+
pseudo = code_model(f"Create simple pseudocode for: {prompt}", max_new_tokens=200)[0]["generated_text"]
|
| 19 |
+
simple = code_model(f"Expand this pseudocode into a simple code snippet:\n{pseudo}", max_new_tokens=300)[0]["generated_text"]
|
| 20 |
+
full = code_model(f"Turn this snippet into a complete, working program:\n{simple}", max_new_tokens=700)[0]["generated_text"]
|
| 21 |
+
return pseudo.strip(), simple.strip(), full.strip()
|
| 22 |
+
|
| 23 |
+
def generate_book(prompt):
|
| 24 |
+
structure = ["Start", "Development", "Climax", "Conclusion", "End"]
|
| 25 |
+
parts = []
|
| 26 |
+
for section in structure:
|
| 27 |
+
part = book_model(f"Write the {section} section of a short book based on this idea: {prompt}", max_new_tokens=500)[0]["generated_text"]
|
| 28 |
+
parts.append(f"### {section}\n{part.strip()}\n")
|
| 29 |
+
return "\n".join(parts)
|
| 30 |
+
|
| 31 |
+
def get_critic_feedback(output_text):
|
| 32 |
+
critique_1 = critic_1(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
|
| 33 |
+
critique_2 = critic_2(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
|
| 34 |
+
return critique_1.strip(), critique_2.strip()
|
| 35 |
+
|
| 36 |
+
def refine_output_based_on_critics(output_text, feedback1, feedback2):
|
| 37 |
+
combined_feedback = f"Critic 1: {feedback1}\nCritic 2: {feedback2}"
|
| 38 |
+
refined = prompt_refiner(f"Refine this text based on the critics' feedback:\n{combined_feedback}\nOriginal text:\n{output_text}", max_new_tokens=700)[0]["generated_text"]
|
| 39 |
+
return refined.strip()
|
| 40 |
+
|
| 41 |
+
# ---------- Gradio Workflow ----------
|
| 42 |
+
|
| 43 |
+
def workflow(idea, mode):
|
| 44 |
+
refined_prompt = refine_prompt(idea)
|
| 45 |
+
|
| 46 |
+
if mode == "Code mode":
|
| 47 |
+
pseudo, simple, full = generate_code(refined_prompt)
|
| 48 |
+
initial_output = full
|
| 49 |
+
output_text = f"## Refined Prompt\n{refined_prompt}\n\n### Pseudocode\n{pseudo}\n\n### Simple Code\n{simple}\n\n### Final Code\n{full}"
|
| 50 |
+
else:
|
| 51 |
+
book_text = generate_book(refined_prompt)
|
| 52 |
+
initial_output = book_text
|
| 53 |
+
output_text = f"## Refined Prompt\n{refined_prompt}\n\n{book_text}"
|
| 54 |
+
|
| 55 |
+
feedback1, feedback2 = get_critic_feedback(initial_output)
|
| 56 |
+
refined_final = refine_output_based_on_critics(initial_output, feedback1, feedback2)
|
| 57 |
+
|
| 58 |
+
return refined_prompt, output_text, feedback1, feedback2, refined_final
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ---------- UI Layout ----------
|
| 62 |
+
|
| 63 |
+
with gr.Blocks() as demo:
|
| 64 |
+
gr.Markdown("# 🤖 AI Workflow: Code or Book Creator with Self-Critique")
|
| 65 |
+
|
| 66 |
+
idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
|
| 67 |
+
mode_select = gr.Radio(["Code mode", "Book mode"], label="Mode")
|
| 68 |
+
submit = gr.Button("Generate")
|
| 69 |
+
|
| 70 |
+
refined_prompt_box = gr.Textbox(label="Refined Prompt")
|
| 71 |
+
output_box = gr.Markdown(label="Generated Output")
|
| 72 |
+
critic1_box = gr.Textbox(label="Critic 1 Feedback")
|
| 73 |
+
critic2_box = gr.Textbox(label="Critic 2 Feedback")
|
| 74 |
+
refined_output_box = gr.Markdown(label="Final Refined Version")
|
| 75 |
+
|
| 76 |
+
submit.click(
|
| 77 |
+
fn=workflow,
|
| 78 |
+
inputs=[idea_input, mode_select],
|
| 79 |
+
outputs=[refined_prompt_box, output_box, critic1_box, critic2_box, refined_output_box],
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
demo.launch()
|