Spaces:
Sleeping
Sleeping
# app.py | |
'''import gradio as gr | |
import pandas as pd | |
from inference import ( | |
evo_chat_predict, | |
get_gpt_response, | |
get_model_config, | |
get_system_stats, | |
retrain_from_feedback_csv, | |
load_model, | |
) | |
import os | |
import csv | |
FEEDBACK_LOG = "feedback_log.csv" | |
# π§ Ask Evo | |
def ask_evo(question, option1, option2, history, user_vote): | |
options = [option1.strip(), option2.strip()] | |
result = evo_chat_predict(history, question.strip(), options) | |
# Create feedback_log.csv with headers if it doesn't exist | |
if not os.path.exists(FEEDBACK_LOG): | |
with open(FEEDBACK_LOG, "w", encoding="utf-8", newline="") as f: | |
writer = csv.writer(f) | |
writer.writerow(["question", "option1", "option2", "evo_answer", "confidence", "reasoning", "context", "vote"]) | |
row = { | |
"question": question.strip(), | |
"option1": option1.strip(), | |
"option2": option2.strip(), | |
"evo_answer": result["answer"], | |
"confidence": result["confidence"], | |
"reasoning": result["reasoning"], | |
"context": result["context_used"], | |
"vote": user_vote.strip() if user_vote else "" | |
} | |
# Log feedback | |
with open(FEEDBACK_LOG, "a", newline='', encoding="utf-8") as f: | |
writer = csv.DictWriter(f, fieldnames=row.keys()) | |
writer.writerow(row) | |
# Prepare outputs | |
evo_output = f"Answer: {row['evo_answer']} (Confidence: {row['confidence']})\n\nReasoning: {row['reasoning']}\n\nContext used: {row['context']}" | |
gpt_output = get_gpt_response(question) | |
history.append(row) | |
stats = get_model_config() | |
sys_stats = get_system_stats() | |
stats_text = f"Layers: {stats.get('num_layers', '?')} | Heads: {stats.get('num_heads', '?')} | FFN: {stats.get('ffn_dim', '?')} | Memory: {stats.get('memory_enabled', '?')} | Accuracy: {stats.get('accuracy', '?')}" | |
sys_text = f"Device: {sys_stats['device']} | CPU: {sys_stats['cpu_usage_percent']}% | RAM: {sys_stats['memory_used_gb']}GB / {sys_stats['memory_total_gb']}GB | GPU: {sys_stats['gpu_name']} ({sys_stats['gpu_memory_used_gb']}GB / {sys_stats['gpu_memory_total_gb']}GB)" | |
return evo_output, gpt_output, stats_text, sys_text, history | |
# π Manual retrain button | |
def retrain_evo(): | |
msg = retrain_from_feedback_csv() | |
load_model(force_reload=True) | |
return msg | |
# π€ Export feedback | |
def export_feedback(): | |
if not os.path.exists(FEEDBACK_LOG): | |
return pd.DataFrame() | |
return pd.read_csv(FEEDBACK_LOG) | |
# π§Ή Clear | |
def clear_all(): | |
return "", "", "", "", [], None | |
# πΌοΈ UI | |
with gr.Blocks(title="π§ Evo β Reasoning AI") as demo: | |
gr.Markdown("## Why Evo? π Evo is not just another AI. It evolves. It learns from you. It adapts its architecture live based on feedback.\n\nNo retraining labs, no frozen weights. This is live reasoning meets evolution. Built to outperform, built to survive.") | |
with gr.Row(): | |
question = gr.Textbox(label="π§ Your Question", placeholder="e.g. Why is the sky blue?") | |
with gr.Row(): | |
option1 = gr.Textbox(label="β Option 1") | |
option2 = gr.Textbox(label="β Option 2") | |
with gr.Row(): | |
with gr.Column(): | |
evo_ans = gr.Textbox(label="π§ Evo", lines=6) | |
with gr.Column(): | |
gpt_ans = gr.Textbox(label="π€ GPT-3.5", lines=6) | |
with gr.Row(): | |
stats = gr.Textbox(label="π Evo Stats") | |
system = gr.Textbox(label="π΅ Status") | |
evo_radio = gr.Radio(["Evo", "GPT"], label="π§ Who was better?", info="Optional β fuels evolution") | |
history = gr.State([]) | |
with gr.Row(): | |
ask_btn = gr.Button("β‘ Ask Evo") | |
retrain_btn = gr.Button("π Retrain Evo") | |
clear_btn = gr.Button("π§Ή Clear") | |
export_btn = gr.Button("π€ Export Feedback CSV") | |
export_table = gr.Dataframe(label="π Conversation History") | |
ask_btn.click(fn=ask_evo, inputs=[question, option1, option2, history, evo_radio], outputs=[evo_ans, gpt_ans, stats, system, history]) | |
retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[stats]) | |
clear_btn.click(fn=clear_all, inputs=[], outputs=[question, option1, option2, evo_ans, gpt_ans, stats, system, history, evo_radio]) | |
export_btn.click(fn=export_feedback, inputs=[], outputs=[export_table]) | |
if __name__ == "__main__": | |
demo.launch() | |
''' | |
# app.py | |
# app.py | |
import gradio as gr | |
import pandas as pd | |
import os | |
import csv | |
from inference import ( | |
evo_chat_predict, | |
get_gpt_response, | |
get_model_config, | |
get_system_stats, | |
retrain_from_feedback_csv, | |
load_model, | |
) | |
GENOME_LOG = "genome_log.csv" | |
FEEDBACK_LOG = "feedback_log.csv" | |
# π§ Ask Evo | |
def ask_evo(question, option1, option2, history, user_vote): | |
options = [option1.strip(), option2.strip()] | |
result = evo_chat_predict(history, question.strip(), options) | |
# Create feedback_log.csv if it doesn't exist | |
if not os.path.exists(FEEDBACK_LOG): | |
with open(FEEDBACK_LOG, "w", encoding="utf-8", newline="") as f: | |
writer = csv.writer(f) | |
writer.writerow(["question", "option1", "option2", "evo_answer", "confidence", "reasoning", "context", "vote"]) | |
row = { | |
"question": question.strip(), | |
"option1": option1.strip(), | |
"option2": option2.strip(), | |
"evo_answer": result["answer"], | |
"confidence": result["confidence"], | |
"reasoning": result["reasoning"], | |
"context": result["context_used"], | |
"vote": user_vote.strip() if user_vote else "" | |
} | |
with open(FEEDBACK_LOG, "a", newline='', encoding="utf-8") as f: | |
writer = csv.DictWriter(f, fieldnames=row.keys()) | |
writer.writerow(row) | |
evo_output = f"Answer: {row['evo_answer']} (Confidence: {row['confidence']})\n\nReasoning: {row['reasoning']}\n\nContext used: {row['context']}" | |
gpt_output = get_gpt_response(question) | |
history.append(row) | |
stats = get_model_config() | |
sys_stats = get_system_stats() | |
stats_text = f"Layers: {stats.get('num_layers', '?')} | Heads: {stats.get('num_heads', '?')} | FFN: {stats.get('ffn_dim', '?')} | Memory: {stats.get('memory_enabled', '?')} | Accuracy: {stats.get('accuracy', '?')}" | |
sys_text = f"Device: {sys_stats['device']} | CPU: {sys_stats['cpu_usage_percent']}% | RAM: {sys_stats['memory_used_gb']}GB / {sys_stats['memory_total_gb']}GB | GPU: {sys_stats['gpu_name']} ({sys_stats['gpu_memory_used_gb']}GB / {sys_stats['gpu_memory_total_gb']}GB)" | |
genome_df = get_top_genomes() | |
return evo_output, gpt_output, stats_text, sys_text, history, genome_df | |
# π Top genome stats | |
def get_top_genomes(n=5): | |
if not os.path.exists(GENOME_LOG): | |
return pd.DataFrame() | |
try: | |
df = pd.read_csv(GENOME_LOG) | |
if "score" in df.columns: | |
df = df.sort_values(by="score", ascending=False) | |
return df.tail(n) | |
except Exception: | |
return pd.DataFrame() | |
# π Manual retrain button | |
def retrain_evo(): | |
msg = retrain_from_feedback_csv() | |
load_model(force_reload=True) | |
return msg | |
# π€ Export feedback | |
def export_feedback(): | |
if not os.path.exists(FEEDBACK_LOG): | |
return pd.DataFrame() | |
return pd.read_csv(FEEDBACK_LOG) | |
# π§Ή Clear UI | |
def clear_all(): | |
return "", "", "", "", "", "", pd.DataFrame(), {}, pd.DataFrame() | |
# πΌοΈ UI Layout | |
with gr.Blocks(title="π§ Evo β Reasoning AI") as demo: | |
gr.Markdown("## π Evo is not just another AI. It evolves. It learns from you. It mutates based on feedback.\n\nNo retraining labs. No frozen weights. This is live reasoning meets evolution.") | |
with gr.Row(): | |
question = gr.Textbox(label="π§ Your Question", placeholder="e.g. Why is the sky blue?") | |
with gr.Row(): | |
option1 = gr.Textbox(label="β Option 1") | |
option2 = gr.Textbox(label="β Option 2") | |
with gr.Row(): | |
with gr.Column(): | |
evo_ans = gr.Textbox(label="π§ Evo", lines=6) | |
with gr.Column(): | |
gpt_ans = gr.Textbox(label="π€ GPT-3.5", lines=6) | |
with gr.Row(): | |
stats = gr.Textbox(label="π Evo Stats") | |
system = gr.Textbox(label="π΅ System Status") | |
evo_radio = gr.Radio(["Evo", "GPT"], label="π§ Who was better?", info="Optional β leave blank if both were wrong") | |
history = gr.State([]) | |
with gr.Row(): | |
ask_btn = gr.Button("β‘ Ask Evo") | |
retrain_btn = gr.Button("π Retrain Evo") | |
clear_btn = gr.Button("π§Ή Clear") | |
export_btn = gr.Button("π€ Export Feedback CSV") | |
export_table = gr.Dataframe(label="π Conversation History") | |
genome_table = gr.Dataframe(label="𧬠Top Genomes") | |
ask_btn.click( | |
fn=ask_evo, | |
inputs=[question, option1, option2, history, evo_radio], | |
outputs=[evo_ans, gpt_ans, stats, system, history, genome_table] | |
) | |
retrain_btn.click(fn=retrain_evo, inputs=[], outputs=[stats]) | |
clear_btn.click( | |
fn=clear_all, | |
inputs=[], | |
outputs=[ | |
question, option1, option2, evo_ans, gpt_ans, | |
stats, export_table, system, genome_table | |
] | |
) | |
export_btn.click(fn=export_feedback, inputs=[], outputs=[export_table]) | |
if __name__ == "__main__": | |
demo.launch() | |