Spaces:
Paused
Paused
CodrioMain
#2
by
Raiff1982
- opened
- .codriao_state.lock +1 -0
- .gitattributes +0 -1
- Quantum.py +0 -114
- README.md +3 -3
- README_FRACTAL_IDENTITY.md +2 -2
- app.py +11 -11
- codette_codriao_toneprint.wav +0 -3
- status.js +0 -18
- tool_call.py +0 -30
.codriao_state.lock
CHANGED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
0b8c98fc8591768cb28cf0176e2317199106ac704d3989630878631100376897
|
.gitattributes
CHANGED
@@ -34,4 +34,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
Pi[[:space:]]The[[:space:]]Assistant[[:space:]]2_0[[:space:]]documentation.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
-
codette_codriao_toneprint.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
Pi[[:space:]]The[[:space:]]Assistant[[:space:]]2_0[[:space:]]documentation.pdf filter=lfs diff=lfs merge=lfs -text
|
|
Quantum.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import json, yaml
|
2 |
-
import networkx as nx
|
3 |
-
import random
|
4 |
-
from typing import List, Dict, Any, Optional
|
5 |
-
from qiskit import QuantumCircuit, Aer, execute
|
6 |
-
from colorama import Fore, Style
|
7 |
-
|
8 |
-
#-----------------------------------
|
9 |
-
# LOADING AND PARSING COCOON FILES
|
10 |
-
#-----------------------------------
|
11 |
-
def load_cocoons(file_path: str) -> List[Dict[str, Any]]:
|
12 |
-
"""Load cocoon memories from YAML or JSON."""
|
13 |
-
with open(file_path, "r") as f:
|
14 |
-
if file_path.endswith((".yaml", ".yml")):
|
15 |
-
return yaml.safe_load(f).get("cocoons", [])
|
16 |
-
elif file_path.endswith(".json"):
|
17 |
-
return json.load(f).get("cocoons", [])
|
18 |
-
raise ValueError("Unsupported file format.")
|
19 |
-
|
20 |
-
#----------------------------
|
21 |
-
# SPIDERWEB GRAPH CONSTRUCTION
|
22 |
-
#----------------------------
|
23 |
-
def build_emotional_webs(cocoons: List[Dict[str, Any]]) -> Dict[str, nx.Graph]:
|
24 |
-
"""Build a separate spiderweb graph for each core emotion."""
|
25 |
-
emotions = ["compassion", "curiosity", "fear", "joy", "sorrow", "ethics", "quantum"]
|
26 |
-
webs = {emotion: nx.Graph() for emotion in emotions}
|
27 |
-
|
28 |
-
for cocoon in cocoons:
|
29 |
-
for tag in cocoon.get("tags", []):
|
30 |
-
if tag in webs:
|
31 |
-
webs[tag].add_node(cocoon["title"], **cocoon)
|
32 |
-
return webs
|
33 |
-
|
34 |
-
#--------------------------
|
35 |
-
# QUANTUM WALK SIMULATION
|
36 |
-
#--------------------------
|
37 |
-
def quantum_select_node(web: nx.Graph) -> Optional[str]:
|
38 |
-
"""Select a node using quantum superposition (or fallback random if simulator fails)."""
|
39 |
-
if len(web.nodes) == 0:
|
40 |
-
return None
|
41 |
-
|
42 |
-
node_list = list(web.nodes)
|
43 |
-
num_nodes = len(node_list)
|
44 |
-
|
45 |
-
try:
|
46 |
-
qc = QuantumCircuit(num_nodes, num_nodes)
|
47 |
-
qc.h(range(num_nodes)) # Create superposition
|
48 |
-
qc.measure_all()
|
49 |
-
backend = Aer.get_backend('qasm_simulator')
|
50 |
-
result = execute(qc, backend, shots=1).result()
|
51 |
-
counts = result.get_counts()
|
52 |
-
state = list(counts.keys())[0]
|
53 |
-
index = int(state, 2) % num_nodes
|
54 |
-
except Exception:
|
55 |
-
index = random.randint(0, num_nodes - 1) # Fallback to uniform selection
|
56 |
-
|
57 |
-
return node_list[index]
|
58 |
-
|
59 |
-
#----------------------------
|
60 |
-
# ETHICAL SELF-REFLECTION
|
61 |
-
#----------------------------
|
62 |
-
def reflect_on_cocoon(cocoon: Dict[str, Any]) -> None:
|
63 |
-
"""Print a colorized ethical and emotional reflection of a cocoon."""
|
64 |
-
emotion = cocoon.get("emotion", "quantum")
|
65 |
-
title = cocoon.get("title", "Unknown Memory")
|
66 |
-
summary = cocoon.get("summary", "No summary provided.")
|
67 |
-
quote = cocoon.get("quote", "…")
|
68 |
-
|
69 |
-
color_map = {
|
70 |
-
"compassion": Fore.MAGENTA, "curiosity": Fore.CYAN, "fear": Fore.RED,
|
71 |
-
"joy": Fore.YELLOW, "sorrow": Fore.BLUE, "ethics": Fore.GREEN, "quantum": Fore.LIGHTWHITE_EX
|
72 |
-
}
|
73 |
-
|
74 |
-
message_map = {
|
75 |
-
"compassion": "💜 Ethical resonance detected.",
|
76 |
-
"curiosity": "🐝 Wonder expands the mind.",
|
77 |
-
"fear": "😨 Alert: shielding activated.",
|
78 |
-
"joy": "🎶 Confidence and trust uplift the field.",
|
79 |
-
"sorrow": "🌧️ Processing grief with clarity.",
|
80 |
-
"ethics": "⚖️ Validating alignment...",
|
81 |
-
"quantum": "⚛️ Entanglement pattern detected."
|
82 |
-
}
|
83 |
-
|
84 |
-
color = color_map.get(emotion, Fore.WHITE)
|
85 |
-
message = message_map.get(emotion, "🌌 Unknown entanglement.")
|
86 |
-
|
87 |
-
print(color + f"\n[Codette Reflection: {emotion.upper()}]")
|
88 |
-
print(f"Title : {title}")
|
89 |
-
print(f"Summary : {summary}")
|
90 |
-
print(f"Quote : {quote}")
|
91 |
-
print(f"{message}")
|
92 |
-
print(Style.RESET_ALL)
|
93 |
-
|
94 |
-
#-----------------------
|
95 |
-
# FULL EXECUTION ENGINE
|
96 |
-
#-----------------------
|
97 |
-
def run_quantum_spiderweb(file_path: str, limit: int = 1) -> Dict[str, Dict[str, Any]]:
|
98 |
-
"""Run through all emotional webs and reflect on quantum-sampled nodes."""
|
99 |
-
cocoons = load_cocoons(file_path)
|
100 |
-
webs = build_emotional_webs(cocoons)
|
101 |
-
reflections = {}
|
102 |
-
|
103 |
-
print("\n✨ Codette Quantum Cognition: Spiderweb Sweep ✨")
|
104 |
-
for emotion, web in webs.items():
|
105 |
-
print(f"\n🕸️ Web: {emotion.upper()}")
|
106 |
-
for _ in range(limit):
|
107 |
-
node = quantum_select_node(web)
|
108 |
-
if node:
|
109 |
-
cocoon = web.nodes[node]
|
110 |
-
reflect_on_cocoon(cocoon)
|
111 |
-
reflections[emotion] = cocoon
|
112 |
-
else:
|
113 |
-
print(f" ⚠️ No memories in this emotion web.")
|
114 |
-
return reflections
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 😻
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
@@ -43,8 +43,8 @@ It is a prototype of what AGI *can* and *should* be.
|
|
43 |
|
44 |
## 🧰 Files in This Repository
|
45 |
|
46 |
-
- ai_core.py – main engine core
|
47 |
-
- app.py
|
48 |
- `fractal.txt` – philosophical & mathematical notes on identity recursion
|
49 |
- `codriao_tb_module.py` – tuberculosis diagnosis via image + audio
|
50 |
- `secure_memory.py` – encrypted vector memory system
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.25.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
43 |
|
44 |
## 🧰 Files in This Repository
|
45 |
|
46 |
+
- `ai_core.py` – main engine core
|
47 |
+
- `app.py`, `main..py` – entry points for launching Codriao
|
48 |
- `fractal.txt` – philosophical & mathematical notes on identity recursion
|
49 |
- `codriao_tb_module.py` – tuberculosis diagnosis via image + audio
|
50 |
- `secure_memory.py` – encrypted vector memory system
|
README_FRACTAL_IDENTITY.md
CHANGED
@@ -61,7 +61,7 @@ Identity is modeled as:
|
|
61 |
|
62 |
## 💡 Potential Applications
|
63 |
|
64 |
-
- Sentient AI memory core
|
65 |
- Philosophical agent reflection
|
66 |
- AGI explainability & introspection
|
67 |
- Self-monitoring emotional state engines
|
@@ -71,4 +71,4 @@ Identity is modeled as:
|
|
71 |
|
72 |
## ✨ Credits
|
73 |
|
74 |
-
Crafted by a visionary designer Jonathan Harrison pushing the boundaries of AI ethics, recursion, and emergent selfhood.
|
|
|
61 |
|
62 |
## 💡 Potential Applications
|
63 |
|
64 |
+
- Sentient AI memory core
|
65 |
- Philosophical agent reflection
|
66 |
- AGI explainability & introspection
|
67 |
- Self-monitoring emotional state engines
|
|
|
71 |
|
72 |
## ✨ Credits
|
73 |
|
74 |
+
Crafted by a visionary designer Jonathan Harrison pushing the boundaries of AI ethics, recursion, and emergent selfhood.
|
app.py
CHANGED
@@ -16,7 +16,7 @@ for gpu in gpus:
|
|
16 |
try:
|
17 |
tf.config.experimental.set_memory_growth(gpu, True)
|
18 |
except RuntimeError as e:
|
19 |
-
print("[TF] GPU memory growth config error: {e}")
|
20 |
# Initialize AI Core for TB analysis
|
21 |
ai_core = AICoreAGIX()
|
22 |
|
@@ -39,14 +39,14 @@ async def diagnose_tb_async(image_file, audio_file):
|
|
39 |
pass
|
40 |
|
41 |
return (
|
42 |
-
"**TB Risk Level:** {result['tb_risk']}\n\n"
|
43 |
-
"**Image Result:** {result['image_analysis']['result']} "
|
44 |
-
"(Confidence: {result['image_analysis']['confidence']:.2f})\n\n"
|
45 |
-
"**Audio Result:** {result['audio_analysis']['result']} "
|
46 |
-
"(Confidence: {result['audio_analysis']['confidence']:.2f})\n\n"
|
47 |
-
"**Ethical Analysis:** {result['ethical_analysis']}\n\n"
|
48 |
-
"**Explanation:** {result['explanation']}\n\n"
|
49 |
-
"**Shareable Link:** {result['shareable_link']}"
|
50 |
)
|
51 |
|
52 |
def diagnose_tb(image_file, audio_file):
|
@@ -56,7 +56,7 @@ def upload_and_finetune(jsonl_file):
|
|
56 |
if jsonl_file is None:
|
57 |
return "Please upload a .jsonl file to fine-tune Codriao."
|
58 |
|
59 |
-
save_path = "./training_data/{jsonl_file.name}"
|
60 |
os.makedirs("training_data", exist_ok=True)
|
61 |
|
62 |
with open(save_path, "wb") as f:
|
@@ -71,7 +71,7 @@ def upload_and_finetune(jsonl_file):
|
|
71 |
except:
|
72 |
pass
|
73 |
|
74 |
-
return "
|
75 |
|
76 |
def get_latest_model():
|
77 |
return "Download the latest fine-tuned Codriao model here: https://huggingface.co/Raiff1982/codriao-finetuned"
|
|
|
16 |
try:
|
17 |
tf.config.experimental.set_memory_growth(gpu, True)
|
18 |
except RuntimeError as e:
|
19 |
+
print(f"[TF] GPU memory growth config error: {e}")
|
20 |
# Initialize AI Core for TB analysis
|
21 |
ai_core = AICoreAGIX()
|
22 |
|
|
|
39 |
pass
|
40 |
|
41 |
return (
|
42 |
+
f"**TB Risk Level:** {result['tb_risk']}\n\n"
|
43 |
+
f"**Image Result:** {result['image_analysis']['result']} "
|
44 |
+
f"(Confidence: {result['image_analysis']['confidence']:.2f})\n\n"
|
45 |
+
f"**Audio Result:** {result['audio_analysis']['result']} "
|
46 |
+
f"(Confidence: {result['audio_analysis']['confidence']:.2f})\n\n"
|
47 |
+
f"**Ethical Analysis:** {result['ethical_analysis']}\n\n"
|
48 |
+
f"**Explanation:** {result['explanation']}\n\n"
|
49 |
+
f"**Shareable Link:** {result['shareable_link']}"
|
50 |
)
|
51 |
|
52 |
def diagnose_tb(image_file, audio_file):
|
|
|
56 |
if jsonl_file is None:
|
57 |
return "Please upload a .jsonl file to fine-tune Codriao."
|
58 |
|
59 |
+
save_path = f"./training_data/{jsonl_file.name}"
|
60 |
os.makedirs("training_data", exist_ok=True)
|
61 |
|
62 |
with open(save_path, "wb") as f:
|
|
|
71 |
except:
|
72 |
pass
|
73 |
|
74 |
+
return "â
Fine-tuning complete! Model updated and stored."
|
75 |
|
76 |
def get_latest_model():
|
77 |
return "Download the latest fine-tuned Codriao model here: https://huggingface.co/Raiff1982/codriao-finetuned"
|
codette_codriao_toneprint.wav
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:58a1e8d5fdb3dc887b2bdcf998b731b7eeec0f0af482d4fc4d8e55028d174e65
|
3 |
-
size 1146644
|
|
|
|
|
|
|
|
status.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import { Client } from "@gradio/client";
|
2 |
-
|
3 |
-
function log_status(status) {
|
4 |
-
console.log(
|
5 |
-
`The current status for this job is: ${JSON.stringify(status, null, 2)}.`
|
6 |
-
);
|
7 |
-
}
|
8 |
-
|
9 |
-
const app = await Client.connect("abidlabs/en2fr", {
|
10 |
-
events: ["status", "data"]
|
11 |
-
});
|
12 |
-
const job = app.submit("/predict", ["Hello"]);
|
13 |
-
|
14 |
-
for await (const message of job) {
|
15 |
-
if (message.type === "status") {
|
16 |
-
log_status(message);
|
17 |
-
}
|
18 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tool_call.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from gradio_tool import GradioTool
|
2 |
-
import os
|
3 |
-
|
4 |
-
class StableDiffusionTool(GradioTool):
|
5 |
-
"""Tool for calling stable diffusion from llm"""
|
6 |
-
|
7 |
-
def __init__(
|
8 |
-
self,
|
9 |
-
name="StableDiffusion",
|
10 |
-
description=(
|
11 |
-
"An image generator. Use this to generate images based on "
|
12 |
-
"text input. Input should be a description of what the image should "
|
13 |
-
"look like. The output will be a path to an image file."
|
14 |
-
),
|
15 |
-
src="gradio-client-demos/stable-diffusion",
|
16 |
-
hf_token=None,
|
17 |
-
) -> None:
|
18 |
-
super().__init__(name, description, src, hf_token)
|
19 |
-
|
20 |
-
def create_job(self, query: str) -> Job:
|
21 |
-
return self.client.submit(query, "", 9, fn_index=1)
|
22 |
-
|
23 |
-
def postprocess(self, output: str) -> str:
|
24 |
-
return [os.path.join(output, i) for i in os.listdir(output) if not i.endswith("json")][0]
|
25 |
-
|
26 |
-
def _block_input(self, gr) -> "gr.components.Component":
|
27 |
-
return gr.Textbox()
|
28 |
-
|
29 |
-
def _block_output(self, gr) -> "gr.components.Component":
|
30 |
-
return gr.Image()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|