PatoFlamejanteTV commited on
Commit
68baf64
·
verified ·
1 Parent(s): c617c95

Added choosing models dropdown

Browse files
Files changed (1) hide show
  1. app.py +80 -40
app.py CHANGED
@@ -1,81 +1,121 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # ---------- Model Pipelines ----------
5
- prompt_refiner = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
6
- code_model = pipeline("text-generation", model="codellama/CodeLlama-7b-Instruct-hf")
7
- book_model = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1")
8
- critic_1 = pipeline("text-generation", model="google/gemma-2-9b-it")
9
- critic_2 = pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # ---------- Core Logic ----------
12
-
13
- def refine_prompt(idea):
14
- refined = prompt_refiner(f"Refine this creative idea into a high-quality prompt: {idea}", max_new_tokens=200)[0]["generated_text"]
15
  return refined.strip()
16
 
17
- def generate_code(prompt):
18
- pseudo = code_model(f"Create simple pseudocode for: {prompt}", max_new_tokens=200)[0]["generated_text"]
19
- simple = code_model(f"Expand this pseudocode into a simple code snippet:\n{pseudo}", max_new_tokens=300)[0]["generated_text"]
20
- full = code_model(f"Turn this snippet into a complete, working program:\n{simple}", max_new_tokens=700)[0]["generated_text"]
 
21
  return pseudo.strip(), simple.strip(), full.strip()
22
 
23
- def generate_book(prompt):
 
24
  structure = ["Start", "Development", "Climax", "Conclusion", "End"]
25
  parts = []
26
  for section in structure:
27
- part = book_model(f"Write the {section} section of a short book based on this idea: {prompt}", max_new_tokens=500)[0]["generated_text"]
28
  parts.append(f"### {section}\n{part.strip()}\n")
29
  return "\n".join(parts)
30
 
31
- def get_critic_feedback(output_text):
32
- critique_1 = critic_1(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
33
- critique_2 = critic_2(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
 
 
34
  return critique_1.strip(), critique_2.strip()
35
 
36
- def refine_output_based_on_critics(output_text, feedback1, feedback2):
 
37
  combined_feedback = f"Critic 1: {feedback1}\nCritic 2: {feedback2}"
38
- refined = prompt_refiner(f"Refine this text based on the critics' feedback:\n{combined_feedback}\nOriginal text:\n{output_text}", max_new_tokens=700)[0]["generated_text"]
39
  return refined.strip()
40
 
41
- # ---------- Gradio Workflow ----------
42
-
43
- def workflow(idea, mode):
44
- refined_prompt = refine_prompt(idea)
45
 
46
  if mode == "Code mode":
47
- pseudo, simple, full = generate_code(refined_prompt)
48
  initial_output = full
49
  output_text = f"## Refined Prompt\n{refined_prompt}\n\n### Pseudocode\n{pseudo}\n\n### Simple Code\n{simple}\n\n### Final Code\n{full}"
50
  else:
51
- book_text = generate_book(refined_prompt)
52
  initial_output = book_text
53
  output_text = f"## Refined Prompt\n{refined_prompt}\n\n{book_text}"
54
 
55
- feedback1, feedback2 = get_critic_feedback(initial_output)
56
- refined_final = refine_output_based_on_critics(initial_output, feedback1, feedback2)
57
 
58
  return refined_prompt, output_text, feedback1, feedback2, refined_final
59
 
60
 
61
- # ---------- UI Layout ----------
62
-
63
  with gr.Blocks() as demo:
64
- gr.Markdown("# 🤖 AI Workflow: Code or Book Creator with Self-Critique")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
67
- mode_select = gr.Radio(["Code mode", "Book mode"], label="Mode")
68
- submit = gr.Button("Generate")
69
 
70
- refined_prompt_box = gr.Textbox(label="Refined Prompt")
71
- output_box = gr.Markdown(label="Generated Output")
72
- critic1_box = gr.Textbox(label="Critic 1 Feedback")
73
- critic2_box = gr.Textbox(label="Critic 2 Feedback")
74
- refined_output_box = gr.Markdown(label="Final Refined Version")
75
 
76
  submit.click(
77
  fn=workflow,
78
- inputs=[idea_input, mode_select],
79
  outputs=[refined_prompt_box, output_box, critic1_box, critic2_box, refined_output_box],
80
  )
81
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # ---------- Default Models ----------
5
+ DEFAULT_MODELS = {
6
+ "prompt_refiner": "mistralai/Mixtral-8x7B-Instruct-v0.1",
7
+ "code_model": "codellama/CodeLlama-7b-Instruct-hf",
8
+ "book_model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
9
+ "critic_1": "google/gemma-2-9b-it",
10
+ "critic_2": "meta-llama/Meta-Llama-3-8B-Instruct"
11
+ }
12
+
13
+ # ---------- Model Descriptions ----------
14
+ MODEL_INFO = {
15
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": "Balanced generalist; produces natural, structured and coherent language for both fiction and logic.",
16
+ "codellama/CodeLlama-7b-Instruct-hf": "Strong in general-purpose code generation and pseudocode-to-code translation. Output is clean but formal.",
17
+ "tiiuae/falcon-7b-instruct": "Smaller and faster; good for creative and casual writing, but less logical accuracy.",
18
+ "google/gemma-2-9b-it": "Analytical critic with clear explanations; tends to rate conservatively.",
19
+ "meta-llama/Meta-Llama-3-8B-Instruct": "Balanced critic with nuanced feedback, generous creativity scores.",
20
+ "phind/Phind-CodeLlama-34B-v2": "Advanced reasoning on code, verbose but deeply structured logic explanations.",
21
+ "stabilityai/stablelm-2-12b": "Fluent text generator, ideal for fiction and storytelling with smooth narrative flow."
22
+ }
23
+
24
+ # ---------- Dynamic Model Loader ----------
25
+ def load_pipeline(model_name):
26
+ return pipeline("text-generation", model=model_name)
27
 
28
  # ---------- Core Logic ----------
29
+ def refine_prompt(idea, model_name):
30
+ model = load_pipeline(model_name)
31
+ refined = model(f"Refine this creative idea into a high-quality prompt: {idea}", max_new_tokens=200)[0]["generated_text"]
32
  return refined.strip()
33
 
34
+ def generate_code(prompt, model_name):
35
+ model = load_pipeline(model_name)
36
+ pseudo = model(f"Create simple pseudocode for: {prompt}", max_new_tokens=200)[0]["generated_text"]
37
+ simple = model(f"Expand this pseudocode into a simple code snippet:\n{pseudo}", max_new_tokens=300)[0]["generated_text"]
38
+ full = model(f"Turn this snippet into a complete, working program:\n{simple}", max_new_tokens=700)[0]["generated_text"]
39
  return pseudo.strip(), simple.strip(), full.strip()
40
 
41
+ def generate_book(prompt, model_name):
42
+ model = load_pipeline(model_name)
43
  structure = ["Start", "Development", "Climax", "Conclusion", "End"]
44
  parts = []
45
  for section in structure:
46
+ part = model(f"Write the {section} section of a short book based on this idea: {prompt}", max_new_tokens=500)[0]["generated_text"]
47
  parts.append(f"### {section}\n{part.strip()}\n")
48
  return "\n".join(parts)
49
 
50
+ def get_critic_feedback(output_text, model1_name, model2_name):
51
+ critic1 = load_pipeline(model1_name)
52
+ critic2 = load_pipeline(model2_name)
53
+ critique_1 = critic1(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
54
+ critique_2 = critic2(f"Rate this text from 0 to 100 and explain why:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
55
  return critique_1.strip(), critique_2.strip()
56
 
57
+ def refine_output_based_on_critics(output_text, feedback1, feedback2, model_name):
58
+ model = load_pipeline(model_name)
59
  combined_feedback = f"Critic 1: {feedback1}\nCritic 2: {feedback2}"
60
+ refined = model(f"Refine this text based on the critics' feedback:\n{combined_feedback}\nOriginal text:\n{output_text}", max_new_tokens=700)[0]["generated_text"]
61
  return refined.strip()
62
 
63
+ # ---------- Workflow Function ----------
64
+ def workflow(idea, mode, prompt_model, code_model, book_model, critic1_model, critic2_model):
65
+ refined_prompt = refine_prompt(idea, prompt_model)
 
66
 
67
  if mode == "Code mode":
68
+ pseudo, simple, full = generate_code(refined_prompt, code_model)
69
  initial_output = full
70
  output_text = f"## Refined Prompt\n{refined_prompt}\n\n### Pseudocode\n{pseudo}\n\n### Simple Code\n{simple}\n\n### Final Code\n{full}"
71
  else:
72
+ book_text = generate_book(refined_prompt, book_model)
73
  initial_output = book_text
74
  output_text = f"## Refined Prompt\n{refined_prompt}\n\n{book_text}"
75
 
76
+ feedback1, feedback2 = get_critic_feedback(initial_output, critic1_model, critic2_model)
77
+ refined_final = refine_output_based_on_critics(initial_output, feedback1, feedback2, prompt_model)
78
 
79
  return refined_prompt, output_text, feedback1, feedback2, refined_final
80
 
81
 
82
+ # ---------- UI ----------
 
83
  with gr.Blocks() as demo:
84
+ gr.Markdown("# 🤖 AI Workflow: Code or Book Creator with Self-Critique + Advanced Options")
85
+
86
+ with gr.Tab("Main"):
87
+ idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
88
+ mode_select = gr.Radio(["Code mode", "Book mode"], label="Mode")
89
+ submit = gr.Button("Generate")
90
+
91
+ refined_prompt_box = gr.Textbox(label="Refined Prompt")
92
+ output_box = gr.Markdown(label="Generated Output")
93
+ critic1_box = gr.Textbox(label="Critic 1 Feedback")
94
+ critic2_box = gr.Textbox(label="Critic 2 Feedback")
95
+ refined_output_box = gr.Markdown(label="Final Refined Version")
96
+
97
+ with gr.Accordion("🧩 Advanced Options", open=False):
98
+ gr.Markdown("### Choose models and learn their behavior:")
99
+
100
+ def model_choices():
101
+ return list(MODEL_INFO.keys())
102
+
103
+ prompt_model = gr.Dropdown(model_choices(), label="Prompt Refiner", value=DEFAULT_MODELS["prompt_refiner"])
104
+ code_model = gr.Dropdown(model_choices(), label="Code Generator", value=DEFAULT_MODELS["code_model"])
105
+ book_model = gr.Dropdown(model_choices(), label="Book Generator", value=DEFAULT_MODELS["book_model"])
106
+ critic1_model = gr.Dropdown(model_choices(), label="Critic 1", value=DEFAULT_MODELS["critic_1"])
107
+ critic2_model = gr.Dropdown(model_choices(), label="Critic 2", value=DEFAULT_MODELS["critic_2"])
108
 
109
+ model_info_box = gr.Markdown("Hover over a model to learn about it.")
 
 
110
 
111
+ def show_model_info(model_name):
112
+ return f"**{model_name}** → {MODEL_INFO.get(model_name, 'No info available.')}"
113
+
114
+ prompt_model.change(show_model_info, inputs=prompt_model, outputs=model_info_box)
 
115
 
116
  submit.click(
117
  fn=workflow,
118
+ inputs=[idea_input, mode_select, prompt_model, code_model, book_model, critic1_model, critic2_model],
119
  outputs=[refined_prompt_box, output_box, critic1_box, critic2_box, refined_output_box],
120
  )
121