Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,119 +2,145 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
import plotly.graph_objects as go
|
5 |
-
from huggingface_hub import
|
6 |
-
import sys
|
7 |
-
import os
|
8 |
from pathlib import Path
|
|
|
9 |
|
10 |
-
# 1.
|
11 |
def setup_model():
|
12 |
REPO_ID = "VLabTech/cognitive_net"
|
13 |
-
|
14 |
-
"cognitive_net/__init__.py",
|
15 |
-
"cognitive_net/memory.py",
|
16 |
-
"cognitive_net/node.py",
|
17 |
-
"cognitive_net/network.py"
|
18 |
-
]
|
19 |
-
|
20 |
-
# Create package directory
|
21 |
-
model_dir = Path("cognitive_net")
|
22 |
-
model_dir.mkdir(exist_ok=True)
|
23 |
|
24 |
-
# Download
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
)
|
33 |
-
except Exception as e:
|
34 |
-
print(f"Error downloading {file}: {str(e)}")
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
sys.path.insert(0, str(model_dir.absolute()))
|
39 |
|
40 |
-
|
41 |
-
class CognitiveDemo:
|
42 |
-
def __init__(self):
|
43 |
-
setup_model()
|
44 |
-
|
45 |
-
try:
|
46 |
-
from cognitive_net import DynamicCognitiveNet
|
47 |
-
self.net = DynamicCognitiveNet(input_size=5, output_size=1)
|
48 |
-
self.net.optimizer = torch.optim.AdamW(self.net.parameters(), lr=0.001)
|
49 |
-
except ImportError as e:
|
50 |
-
raise RuntimeError(f"Gagal memuat model: {str(e)}")
|
51 |
-
|
52 |
-
self.training_history = []
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
X = X.view(-1, 1) # Bentuk (seq_len, 1)
|
57 |
-
y = y.view(1) # Bentuk (1,)
|
58 |
-
return X, y
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
try:
|
62 |
-
|
63 |
-
nums = [float(n.strip()) for n in sequence.split(',')]
|
64 |
-
if len(nums) < 6:
|
65 |
-
raise ValueError("Input minimal 6 angka")
|
66 |
-
|
67 |
-
X = torch.tensor(nums[:-1])
|
68 |
-
y = torch.tensor([nums[-1]])
|
69 |
-
|
70 |
-
# Adaptasi dimensi
|
71 |
-
X, y = self._adapt_model(X, y)
|
72 |
|
73 |
# Training loop
|
74 |
-
|
|
|
|
|
75 |
for _ in range(epochs):
|
76 |
loss = self.net.train_step(X, y)
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
79 |
return {
|
80 |
-
"prediction":
|
81 |
-
"loss_plot": self.
|
82 |
-
"
|
83 |
}
|
84 |
-
|
85 |
except Exception as e:
|
86 |
return {"error": str(e)}
|
87 |
-
|
88 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
fig = go.Figure()
|
90 |
-
fig.add_trace(go.Scatter(
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
return fig
|
93 |
|
94 |
-
# 3. Gradio
|
95 |
demo = CognitiveDemo()
|
96 |
|
97 |
-
with gr.Blocks(title="Cognitive Network Demo") as app:
|
98 |
-
gr.Markdown("
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
104 |
|
105 |
with gr.Row():
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
train_btn.click(
|
113 |
-
fn=
|
114 |
inputs=[input_seq, epochs],
|
115 |
-
outputs=[
|
116 |
)
|
117 |
|
118 |
-
# 4.
|
119 |
if __name__ == "__main__":
|
120 |
app.launch(debug=True)
|
|
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
import plotly.graph_objects as go
|
5 |
+
from huggingface_hub import snapshot_download
|
|
|
|
|
6 |
from pathlib import Path
|
7 |
+
import sys
|
8 |
|
9 |
+
# 1. Setup Model dari Hugging Face Hub ----------------------------
|
10 |
def setup_model():
|
11 |
REPO_ID = "VLabTech/cognitive_net"
|
12 |
+
LOCAL_DIR = "cognitive_net_pkg"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
# Download repo
|
15 |
+
snapshot_download(
|
16 |
+
repo_id=REPO_ID,
|
17 |
+
local_dir=LOCAL_DIR,
|
18 |
+
allow_patterns=["*.py", "*.txt"],
|
19 |
+
repo_type="model",
|
20 |
+
local_dir_use_symlinks=False
|
21 |
+
)
|
|
|
|
|
|
|
22 |
|
23 |
+
# Tambahkan ke path Python
|
24 |
+
sys.path.insert(0, str(Path(LOCAL_DIR).absolute()))
|
|
|
25 |
|
26 |
+
setup_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
# 2. Implementasi Model --------------------------------------------
|
29 |
+
from cognitive_net.network import DynamicCognitiveNet
|
|
|
|
|
|
|
30 |
|
31 |
+
class CognitiveDemo:
|
32 |
+
def __init__(self):
|
33 |
+
self.net = DynamicCognitiveNet(input_size=5, output_size=1)
|
34 |
+
self.training_loss = []
|
35 |
+
self.emotion_states = []
|
36 |
+
|
37 |
+
def _parse_input(self, sequence_str):
|
38 |
+
"""Konversi string input ke tensor"""
|
39 |
+
sequence = [float(x.strip()) for x in sequence_str.split(',')]
|
40 |
+
if len(sequence) < 6:
|
41 |
+
raise ValueError("Input minimal 6 angka")
|
42 |
+
return (
|
43 |
+
torch.tensor(sequence[:-1]).float(),
|
44 |
+
torch.tensor([sequence[-1]]).float()
|
45 |
+
)
|
46 |
+
|
47 |
+
def train(self, sequence_str, epochs):
|
48 |
try:
|
49 |
+
X, y = self._parse_input(sequence_str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
# Training loop
|
52 |
+
self.training_loss = []
|
53 |
+
self.emotion_states = []
|
54 |
+
|
55 |
for _ in range(epochs):
|
56 |
loss = self.net.train_step(X, y)
|
57 |
+
self.training_loss.append(loss)
|
58 |
+
self.emotion_states.append(self.net.emotional_state.item())
|
59 |
+
|
60 |
+
# Prediksi akhir
|
61 |
+
with torch.no_grad():
|
62 |
+
pred = self.net(X)
|
63 |
+
|
64 |
return {
|
65 |
+
"prediction": f"{pred.item():.4f}",
|
66 |
+
"loss_plot": self._create_loss_plot(),
|
67 |
+
"emotion_plot": self._create_emotion_plot()
|
68 |
}
|
|
|
69 |
except Exception as e:
|
70 |
return {"error": str(e)}
|
71 |
+
|
72 |
+
def _create_loss_plot(self):
|
73 |
+
fig = go.Figure()
|
74 |
+
fig.add_trace(go.Scatter(
|
75 |
+
y=self.training_loss,
|
76 |
+
mode='lines+markers',
|
77 |
+
name='Loss'
|
78 |
+
))
|
79 |
+
fig.update_layout(
|
80 |
+
title='Training Loss',
|
81 |
+
xaxis_title='Epoch',
|
82 |
+
yaxis_title='Loss Value'
|
83 |
+
)
|
84 |
+
return fig
|
85 |
+
|
86 |
+
def _create_emotion_plot(self):
|
87 |
fig = go.Figure()
|
88 |
+
fig.add_trace(go.Scatter(
|
89 |
+
y=self.emotion_states,
|
90 |
+
mode='lines',
|
91 |
+
name='Emotional State',
|
92 |
+
line=dict(color='#FF6F61')
|
93 |
+
))
|
94 |
+
fig.update_layout(
|
95 |
+
title='Emotional State Dynamics',
|
96 |
+
xaxis_title='Epoch',
|
97 |
+
yaxis_title='State Value'
|
98 |
+
)
|
99 |
return fig
|
100 |
|
101 |
+
# 3. Antarmuka Gradio ----------------------------------------------
|
102 |
demo = CognitiveDemo()
|
103 |
|
104 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Cognitive Network Demo") as app:
|
105 |
+
gr.Markdown("# 🧠 Cognitive Network Demo")
|
106 |
+
gr.Markdown("""
|
107 |
+
**Demonstrasi Jaringan Saraf Kognitif dengan:**
|
108 |
+
- Memori Adaptif
|
109 |
+
- Plastisitas Struktural
|
110 |
+
- Modulasi Emosional
|
111 |
+
""")
|
112 |
|
113 |
with gr.Row():
|
114 |
+
with gr.Column():
|
115 |
+
input_seq = gr.Textbox(
|
116 |
+
label="Deret Input (contoh: 0.1, 0.3, 0.5, 0.7, 0.9, 1.1)",
|
117 |
+
value="0.1, 0.3, 0.5, 0.7, 0.9, 1.1"
|
118 |
+
)
|
119 |
+
epochs = gr.Slider(10, 500, value=100, label="Jumlah Epoch")
|
120 |
+
train_btn = gr.Button("🚀 Latih Model", variant="primary")
|
121 |
+
|
122 |
+
with gr.Column():
|
123 |
+
output_pred = gr.Label(label="Prediksi")
|
124 |
+
loss_plot = gr.Plot(label="Progress Training")
|
125 |
+
emotion_plot = gr.Plot(label="Dinamika Emosional")
|
126 |
|
127 |
+
# Contoh data preset
|
128 |
+
gr.Examples(
|
129 |
+
examples=[
|
130 |
+
["1, 2, 3, 4, 5, 6", 100],
|
131 |
+
["0.5, 1.0, 1.5, 2.0, 2.5, 3.0", 150],
|
132 |
+
["10, 8, 6, 4, 2, 0", 200]
|
133 |
+
],
|
134 |
+
inputs=[input_seq, epochs],
|
135 |
+
label="Contoh Input"
|
136 |
+
)
|
137 |
|
138 |
train_btn.click(
|
139 |
+
fn=demo.train,
|
140 |
inputs=[input_seq, epochs],
|
141 |
+
outputs=[output_pred, loss_plot, emotion_plot]
|
142 |
)
|
143 |
|
144 |
+
# 4. Jalankan Aplikasi ---------------------------------------------
|
145 |
if __name__ == "__main__":
|
146 |
app.launch(debug=True)
|