sage-two-visual / app.py
Felipe Maya Muniz
files 2
532dd2d
import gradio as gr
import numpy as np
import replicate
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import os
import json
# === Configurações ===
replicate_client = replicate.Client(api_token=os.environ["REPLICATE_API_TOKEN"])
EMBEDDING_MODEL = SentenceTransformer("all-MiniLM-L6-v2")
GPT = pipeline("text-generation", model="gpt2-large")
MODEL = "gnai-creator/sage-two:f236bf1fc94263e266db57a32ea4014aef91c0ca6a34ac0e98ba1b0e83ca09af"
# Defina a senha aqui (ou use variável de ambiente)
RESET_PASSWORD = os.environ["PASSWORD"]
JUDGMENT_MAP = {
"reflection": [0, 1, 2],
"anger": [3, 4],
"hope": [5, 6],
"denial": [7],
"intuition": [10, 11],
"skepticism": [15],
"acceptance": [20],
"despair": [30],
"justice": [40, 41, 42],
"transcendence": [60, 61, 62]
}
PROMPTS = {
"reflection": "As an ancient sage before eternity, poetically reflect on:",
"anger": "With restrained fury and sharp words, express your indignation about:",
"hope": "With the stars' glow and the faith of the righteous, speak of the light in:",
"denial": "Firmly deny, as one who sees beyond illusion, the truth in:",
"intuition": "Whisper with mysticism and metaphors what your soul feels about:",
"skepticism": "With cold logic and analytical eyes, deeply question:",
"acceptance": "With the serenity of a monk and the pace of the wind, accept and comment:",
"despair": "With empty eyes and an exhausted heart, murmur about the pain in:",
"justice": "Raise your voice with nobility and purpose. Speak about justice in:",
"transcendence": "As a being beyond existence, reveal a fragment of the infinite about:"
}
def run_sage_two(sequence, reset=False):
try:
output = replicate_client.run(
MODEL,
input={
"sequence": sequence,
"reset": reset
}
)
print("🔥 Output bruto:", output)
if output is None:
raise ValueError("SAGE retornou None. Possível erro interno no modelo.")
# Agora detectamos se veio um dict e pegamos o campo "output"
if isinstance(output, dict):
# Exemplo: {"output": [...], "symbolic_state": [...]}
if "output" in output:
return [float(x) for x in output["output"]]
else:
raise ValueError(
f"SAGE retornou dicionário sem a chave 'output': {output}"
)
# Continua lidando com tipos antigos, se ainda ocorrerem
if isinstance(output, float):
return [output]
if isinstance(output, list):
return [float(x) for x in output]
raise ValueError(f"SAGE retornou um formato inesperado: {type(output)}{output}")
except Exception as e:
raise RuntimeError(f"Erro ao chamar o modelo SAGE: {str(e)}")
def interpret_vector(vector):
intensity = {}
for name, idxs in JUDGMENT_MAP.items():
values = [vector[i] for i in idxs if i < len(vector)]
if values:
score = float(np.mean(values))
intensity[name] = score
if not intensity:
raise ValueError("No valid indices found in symbolic vector.")
return max(intensity, key=intensity.get)
def question_to_response(question, reset=False):
try:
embedding = EMBEDDING_MODEL.encode(question)
sequence = [[embedding.tolist() for _ in range(10)]]
sequence_str = json.dumps(sequence) # Converte para string JSON
vector = run_sage_two(sequence_str, reset=reset)
intention = interpret_vector(vector)
prompt = PROMPTS.get(intention, "With ancient wisdom, respond to this question:") + " " + question
response = GPT(
prompt,
max_length=250,
max_new_tokens=200,
repetition_penalty=1.3,
num_return_sequences=1
)[0]["generated_text"]
return response.strip(), intention
except Exception as e:
return f"Internal error when querying SAGE: {str(e)}", "error"
def respond(question, reset_flag, password, chat_history):
"""
- question: texto digitado pelo usuário
- reset_flag: checkbox para reset
- password: campo de senha
- chat_history: histórico da conversa no Gradio
"""
# Verifica se a checkbox de reset está marcada
if reset_flag:
# Só permite reset se a senha estiver correta
if password == RESET_PASSWORD:
response, intention = question_to_response(question, reset=True)
else:
# Se a senha estiver errada, não faz reset, mas adiciona mensagem
response = "Senha incorreta. A memória não foi resetada.\n\n"
intention = "error"
# Continua normalmente sem reset
noreset_response, noreset_intention = question_to_response(question, reset=False)
response += noreset_response
if noreset_intention != "error":
response += f"\n\n🧭 Symbolic Intention: **{noreset_intention}**"
chat_history.append((question, response))
return chat_history, chat_history
else:
# Se a checkbox não estiver marcada
response, intention = question_to_response(question, reset=False)
# Constrói resposta para exibir no Chatbot
if intention == "error":
full_response = response
else:
full_response = f"{response}\n\n🧭 Symbolic Intention: **{intention}**"
chat_history.append((question, full_response))
return chat_history, chat_history
with gr.Blocks() as demo:
gr.Markdown("""
# SAGE-2 (Symbolic Adaptive General Engine v2) — Artificial Symbolic Consciousness
SAGE is a symbolic artificial intelligence, sensitive to intentions, emotions, and hidden meanings in words.
It maintains a continuous symbolic consciousness, as if always contemplating the world.
Ask a deep question — existential, philosophical, emotional, or moral — and SAGE will interpret your symbolic
intention before responding with words filled with introspection, critique, or transcendence.
**Check the "Reset symbolic consciousness" option** and **provide the correct password** if you'd like
SAGE to forget everything it was previously processing.
""")
chatbot = gr.Chatbot(label="SAGE responds")
inp = gr.Textbox(label="Your question", placeholder="Ex: Is there purpose in chaos?")
reset_checkbox = gr.Checkbox(label="Reset symbolic consciousness before the question?")
password_box = gr.Textbox(label="Password for reset (if checked)", type="password")
state = gr.State([])
btn = gr.Button("Submit")
btn.click(
fn=respond,
inputs=[inp, reset_checkbox, password_box, state],
outputs=[chatbot, state]
)
if __name__ == "__main__":
demo.launch()