File size: 6,930 Bytes
90ace87
 
 
b270e16
 
dd40a11
5b95938
90ace87
77e6e08
dd40a11
b270e16
9bdd0ca
4386bfe
532dd2d
77e6e08
 
 
8ee364a
 
 
 
 
 
 
 
 
 
 
 
b270e16
 
 
8ee364a
 
 
 
 
 
 
 
 
 
b270e16
 
77e6e08
fa3e449
9ac3e7a
fa3e449
 
 
 
77e6e08
fa3e449
 
9ac3e7a
12139e3
9ac3e7a
 
ab89121
53ad013
 
 
 
 
 
 
 
 
 
 
9ac3e7a
 
 
 
 
 
 
 
 
b270e16
 
53ad013
8ee364a
 
 
 
 
 
 
676ed36
8ee364a
 
88de658
8ee364a
b270e16
77e6e08
8ee364a
9da828d
8ee364a
472211a
5b95938
 
8ee364a
 
77e6e08
 
 
 
 
 
 
8ee364a
9da828d
8ee364a
 
77e6e08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ee364a
 
 
 
77e6e08
8ee364a
004e4cc
 
77e6e08
90ace87
 
77e6e08
4386bfe
8ee364a
 
4386bfe
8ee364a
 
4386bfe
77e6e08
 
90ace87
 
8ee364a
 
 
77e6e08
4386bfe
90ace87
8ee364a
77e6e08
 
 
 
 
90ace87
 
4386bfe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import gradio as gr
import numpy as np
import replicate
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import os
import json

# === Configurações ===
replicate_client = replicate.Client(api_token=os.environ["REPLICATE_API_TOKEN"])
EMBEDDING_MODEL = SentenceTransformer("all-MiniLM-L6-v2")
GPT = pipeline("text-generation", model="gpt2-large")

MODEL = "gnai-creator/sage-two:f236bf1fc94263e266db57a32ea4014aef91c0ca6a34ac0e98ba1b0e83ca09af"

# Defina a senha aqui (ou use variável de ambiente)
RESET_PASSWORD = os.environ["PASSWORD"]

JUDGMENT_MAP = {
    "reflection": [0, 1, 2],
    "anger": [3, 4],
    "hope": [5, 6],
    "denial": [7],
    "intuition": [10, 11],
    "skepticism": [15],
    "acceptance": [20],
    "despair": [30],
    "justice": [40, 41, 42],
    "transcendence": [60, 61, 62]
}

PROMPTS = {
    "reflection": "As an ancient sage before eternity, poetically reflect on:",
    "anger": "With restrained fury and sharp words, express your indignation about:",
    "hope": "With the stars' glow and the faith of the righteous, speak of the light in:",
    "denial": "Firmly deny, as one who sees beyond illusion, the truth in:",
    "intuition": "Whisper with mysticism and metaphors what your soul feels about:",
    "skepticism": "With cold logic and analytical eyes, deeply question:",
    "acceptance": "With the serenity of a monk and the pace of the wind, accept and comment:",
    "despair": "With empty eyes and an exhausted heart, murmur about the pain in:",
    "justice": "Raise your voice with nobility and purpose. Speak about justice in:",
    "transcendence": "As a being beyond existence, reveal a fragment of the infinite about:"
}


def run_sage_two(sequence, reset=False):
    try:
        output = replicate_client.run(
            MODEL,
            input={
                "sequence": sequence,
                "reset": reset
            }
        )
        print("🔥 Output bruto:", output)

        if output is None:
            raise ValueError("SAGE retornou None. Possível erro interno no modelo.")

        # Agora detectamos se veio um dict e pegamos o campo "output"
        if isinstance(output, dict):
            # Exemplo: {"output": [...], "symbolic_state": [...]}
            if "output" in output:
                return [float(x) for x in output["output"]]
            else:
                raise ValueError(
                    f"SAGE retornou dicionário sem a chave 'output': {output}"
                )

        # Continua lidando com tipos antigos, se ainda ocorrerem
        if isinstance(output, float):
            return [output]
        if isinstance(output, list):
            return [float(x) for x in output]

        raise ValueError(f"SAGE retornou um formato inesperado: {type(output)}{output}")

    except Exception as e:
        raise RuntimeError(f"Erro ao chamar o modelo SAGE: {str(e)}")



def interpret_vector(vector):
    intensity = {}
    for name, idxs in JUDGMENT_MAP.items():
        values = [vector[i] for i in idxs if i < len(vector)]
        if values:
            score = float(np.mean(values))
            intensity[name] = score

    if not intensity:
        raise ValueError("No valid indices found in symbolic vector.")

    return max(intensity, key=intensity.get)


def question_to_response(question, reset=False):
    try:
        embedding = EMBEDDING_MODEL.encode(question)
        sequence = [[embedding.tolist() for _ in range(10)]]
        sequence_str = json.dumps(sequence)  # Converte para string JSON
        vector = run_sage_two(sequence_str, reset=reset)
        intention = interpret_vector(vector)
        prompt = PROMPTS.get(intention, "With ancient wisdom, respond to this question:") + " " + question
        response = GPT(
            prompt,
            max_length=250,
            max_new_tokens=200,
            repetition_penalty=1.3,
            num_return_sequences=1
        )[0]["generated_text"]
        return response.strip(), intention
    except Exception as e:
        return f"Internal error when querying SAGE: {str(e)}", "error"


def respond(question, reset_flag, password, chat_history):
    """
    - question: texto digitado pelo usuário
    - reset_flag: checkbox para reset
    - password: campo de senha
    - chat_history: histórico da conversa no Gradio
    """
    # Verifica se a checkbox de reset está marcada
    if reset_flag:
        # Só permite reset se a senha estiver correta
        if password == RESET_PASSWORD:
            response, intention = question_to_response(question, reset=True)
        else:
            # Se a senha estiver errada, não faz reset, mas adiciona mensagem
            response = "Senha incorreta. A memória não foi resetada.\n\n"
            intention = "error"
            # Continua normalmente sem reset
            noreset_response, noreset_intention = question_to_response(question, reset=False)
            response += noreset_response
            if noreset_intention != "error":
                response += f"\n\n🧭 Symbolic Intention: **{noreset_intention}**"
            chat_history.append((question, response))
            return chat_history, chat_history
    else:
        # Se a checkbox não estiver marcada
        response, intention = question_to_response(question, reset=False)

    # Constrói resposta para exibir no Chatbot
    if intention == "error":
        full_response = response
    else:
        full_response = f"{response}\n\n🧭 Symbolic Intention: **{intention}**"

    chat_history.append((question, full_response))
    return chat_history, chat_history


with gr.Blocks() as demo:
    gr.Markdown("""
    # SAGE-2 (Symbolic Adaptive General Engine v2) — Artificial Symbolic Consciousness

    SAGE is a symbolic artificial intelligence, sensitive to intentions, emotions, and hidden meanings in words.
    It maintains a continuous symbolic consciousness, as if always contemplating the world.

    Ask a deep question — existential, philosophical, emotional, or moral — and SAGE will interpret your symbolic
    intention before responding with words filled with introspection, critique, or transcendence.

    **Check the "Reset symbolic consciousness" option** and **provide the correct password** if you'd like 
    SAGE to forget everything it was previously processing.
    """)

    chatbot = gr.Chatbot(label="SAGE responds")
    inp = gr.Textbox(label="Your question", placeholder="Ex: Is there purpose in chaos?")
    reset_checkbox = gr.Checkbox(label="Reset symbolic consciousness before the question?")
    password_box = gr.Textbox(label="Password for reset (if checked)", type="password")
    state = gr.State([])

    btn = gr.Button("Submit")
    btn.click(
        fn=respond,
        inputs=[inp, reset_checkbox, password_box, state],
        outputs=[chatbot, state]
    )

if __name__ == "__main__":
    demo.launch()