Spaces:
Running
Running
import streamlit as st | |
from pathlib import Path | |
import streamlit as st | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from PIL import Image, ImageDraw, ImageFont | |
import time | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import seaborn as sns | |
from io import BytesIO | |
import base64 | |
from streamlit_drawable_canvas import st_canvas | |
import io | |
import torch | |
import cv2 | |
import mediapipe as mp | |
import base64 | |
import gc | |
import accelerate | |
import numpy | |
# Set page config | |
st.set_page_config(page_title="NeuraSense AI", page_icon="🧠", layout="wide") | |
# Enhanced Custom CSS for a hyper-cyberpunk realistic look | |
custom_css = """ | |
<style> | |
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;500;700&family=Roboto+Mono:wght@400;700&display=swap'); | |
:root { | |
--neon-blue: #00FFFF; | |
--neon-pink: #FF00FF; | |
--neon-green: #39FF14; | |
--dark-bg: #0a0a0a; | |
--darker-bg: #050505; | |
--light-text: #E0E0E0; | |
} | |
body { | |
color: var(--light-text); | |
background-color: var(--dark-bg); | |
font-family: 'Roboto Mono', monospace; | |
overflow-x: hidden; | |
} | |
.stApp { | |
background: | |
linear-gradient(45deg, var(--darker-bg) 0%, var(--dark-bg) 100%), | |
repeating-linear-gradient(45deg, #000 0%, #000 2%, transparent 2%, transparent 4%), | |
repeating-linear-gradient(-45deg, #111 0%, #111 1%, transparent 1%, transparent 3%); | |
background-blend-mode: overlay; | |
animation: backgroundPulse 20s infinite alternate; | |
} | |
@keyframes backgroundPulse { | |
0% { background-position: 0% 50%; } | |
100% { background-position: 100% 50%; } | |
} | |
h1, h2, h3 { | |
font-family: 'Orbitron', sans-serif; | |
position: relative; | |
text-shadow: | |
0 0 5px var(--neon-blue), | |
0 0 10px var(--neon-blue), | |
0 0 20px var(--neon-blue), | |
0 0 40px var(--neon-blue); | |
animation: textGlitch 5s infinite alternate; | |
} | |
@keyframes textGlitch { | |
0% { transform: skew(0deg); } | |
20% { transform: skew(5deg); text-shadow: 3px 3px 0 var(--neon-pink); } | |
40% { transform: skew(-5deg); text-shadow: -3px -3px 0 var(--neon-green); } | |
60% { transform: skew(3deg); text-shadow: 2px -2px 0 var(--neon-blue); } | |
80% { transform: skew(-3deg); text-shadow: -2px 2px 0 var(--neon-pink); } | |
100% { transform: skew(0deg); } | |
} | |
.stButton>button { | |
color: var(--neon-blue); | |
border: 2px solid var(--neon-blue); | |
border-radius: 5px; | |
background: linear-gradient(45deg, rgba(0,255,255,0.1), rgba(0,255,255,0.3)); | |
box-shadow: 0 0 15px var(--neon-blue); | |
transition: all 0.3s ease; | |
text-transform: uppercase; | |
letter-spacing: 2px; | |
backdrop-filter: blur(5px); | |
} | |
.stButton>button:hover { | |
transform: scale(1.05) translateY(-3px); | |
box-shadow: 0 0 30px var(--neon-blue); | |
text-shadow: 0 0 5px var(--neon-blue); | |
} | |
.stTextInput>div>div>input, .stTextArea>div>div>textarea, .stSelectbox>div>div>div { | |
background-color: rgba(0, 255, 255, 0.1); | |
border: 1px solid var(--neon-blue); | |
border-radius: 5px; | |
color: var(--neon-blue); | |
backdrop-filter: blur(5px); | |
} | |
.stTextInput>div>div>input:focus, .stTextArea>div>div>textarea:focus, .stSelectbox>div>div>div:focus { | |
box-shadow: 0 0 20px var(--neon-blue); | |
} | |
.stSlider>div>div>div>div { | |
background-color: var(--neon-blue); | |
} | |
.stSlider>div>div>div>div>div { | |
background-color: var(--neon-pink); | |
box-shadow: 0 0 10px var(--neon-pink); | |
} | |
::-webkit-scrollbar { | |
width: 10px; | |
height: 10px; | |
} | |
::-webkit-scrollbar-track { | |
background: var(--darker-bg); | |
border-radius: 5px; | |
} | |
::-webkit-scrollbar-thumb { | |
background: var(--neon-blue); | |
border-radius: 5px; | |
box-shadow: 0 0 5px var(--neon-blue); | |
} | |
::-webkit-scrollbar-thumb:hover { | |
background: var(--neon-pink); | |
box-shadow: 0 0 5px var(--neon-pink); | |
} | |
.stPlot, .stDataFrame { | |
border: 1px solid var(--neon-blue); | |
border-radius: 5px; | |
overflow: hidden; | |
box-shadow: 0 0 15px rgba(0, 255, 255, 0.3); | |
} | |
.stImage, .stIcon { | |
filter: drop-shadow(0 0 5px var(--neon-blue)); | |
} | |
.stSidebar, .stContainer { | |
background: | |
linear-gradient(45deg, var(--darker-bg) 0%, var(--dark-bg) 100%), | |
repeating-linear-gradient(45deg, #000 0%, #000 2%, transparent 2%, transparent 4%); | |
animation: sidebarPulse 10s infinite alternate; | |
} | |
@keyframes sidebarPulse { | |
0% { background-position: 0% 50%; } | |
100% { background-position: 100% 50%; } | |
} | |
.element-container { | |
position: relative; | |
} | |
.element-container::before { | |
content: ''; | |
position: absolute; | |
top: -5px; | |
left: -5px; | |
right: -5px; | |
bottom: -5px; | |
border: 1px solid var(--neon-blue); | |
border-radius: 10px; | |
opacity: 0.5; | |
pointer-events: none; | |
} | |
.stMarkdown a { | |
color: var(--neon-pink); | |
text-decoration: none; | |
position: relative; | |
transition: all 0.3s ease; | |
} | |
.stMarkdown a::after { | |
content: ''; | |
position: absolute; | |
width: 100%; | |
height: 1px; | |
bottom: -2px; | |
left: 0; | |
background-color: var(--neon-pink); | |
transform: scaleX(0); | |
transform-origin: bottom right; | |
transition: transform 0.3s ease; | |
} | |
.stMarkdown a:hover::after { | |
transform: scaleX(1); | |
transform-origin: bottom left; | |
} | |
/* Cyberpunk-style progress bar */ | |
.stProgress > div > div { | |
background-color: var(--neon-blue); | |
background-image: linear-gradient( | |
45deg, | |
var(--neon-pink) 25%, | |
transparent 25%, | |
transparent 50%, | |
var(--neon-pink) 50%, | |
var(--neon-pink) 75%, | |
transparent 75%, | |
transparent | |
); | |
background-size: 40px 40px; | |
animation: progress-bar-stripes 1s linear infinite; | |
} | |
@keyframes progress-bar-stripes { | |
0% { background-position: 40px 0; } | |
100% { background-position: 0 0; } | |
} | |
/* Glowing checkbox */ | |
.stCheckbox > label > div { | |
border-color: var(--neon-blue); | |
transition: all 0.3s ease; | |
} | |
.stCheckbox > label > div[data-checked="true"] { | |
background-color: var(--neon-blue); | |
box-shadow: 0 0 10px var(--neon-blue); | |
} | |
/* Futuristic radio button */ | |
.stRadio > div { | |
background-color: rgba(0, 255, 255, 0.1); | |
border-radius: 10px; | |
padding: 10px; | |
} | |
.stRadio > div > label > div { | |
border-color: var(--neon-blue); | |
transition: all 0.3s ease; | |
} | |
.stRadio > div > label > div[data-checked="true"] { | |
background-color: var(--neon-blue); | |
box-shadow: 0 0 10px var(--neon-blue); | |
} | |
/* Cyberpunk-style tables */ | |
.stDataFrame table { | |
border-collapse: separate; | |
border-spacing: 0; | |
border: 1px solid var(--neon-blue); | |
border-radius: 10px; | |
overflow: hidden; | |
} | |
.stDataFrame th { | |
background-color: rgba(0, 255, 255, 0.2); | |
color: var(--neon-blue); | |
text-transform: uppercase; | |
letter-spacing: 1px; | |
} | |
.stDataFrame td { | |
border-bottom: 1px solid rgba(0, 255, 255, 0.2); | |
} | |
.stDataFrame tr:last-child td { | |
border-bottom: none; | |
} | |
/* Futuristic file uploader */ | |
.stFileUploader > div { | |
border: 2px dashed var(--neon-blue); | |
border-radius: 10px; | |
background-color: rgba(0, 255, 255, 0.05); | |
transition: all 0.3s ease; | |
} | |
.stFileUploader > div:hover { | |
background-color: rgba(0, 255, 255, 0.1); | |
box-shadow: 0 0 15px rgba(0, 255, 255, 0.3); | |
} | |
/* Cyberpunk-style tooltips */ | |
.stTooltipIcon { | |
color: var(--neon-pink); | |
transition: all 0.3s ease; | |
} | |
.stTooltipIcon:hover { | |
color: var(--neon-blue); | |
text-shadow: 0 0 5px var(--neon-blue); | |
} | |
/* Futuristic date input */ | |
.stDateInput > div > div > input { | |
background-color: rgba(0, 255, 255, 0.1); | |
border: 1px solid var(--neon-blue); | |
border-radius: 5px; | |
color: var(--neon-blue); | |
backdrop-filter: blur(5px); | |
} | |
.stDateInput > div > div > input:focus { | |
box-shadow: 0 0 20px var(--neon-blue); | |
} | |
/* Cyberpunk-style code blocks */ | |
.stCodeBlock { | |
background-color: rgba(0, 0, 0, 0.6); | |
border: 1px solid var(--neon-green); | |
border-radius: 5px; | |
color: var(--neon-green); | |
font-family: 'Roboto Mono', monospace; | |
padding: 10px; | |
position: relative; | |
overflow: hidden; | |
} | |
.stCodeBlock::before { | |
content: ''; | |
position: absolute; | |
top: -10px; | |
left: -10px; | |
right: -10px; | |
bottom: -10px; | |
background: linear-gradient(45deg, var(--neon-green), transparent); | |
opacity: 0.1; | |
z-index: -1; | |
} | |
</style> | |
""" | |
# Apply the custom CSS | |
st.markdown(custom_css, unsafe_allow_html=True) | |
AVATAR_WIDTH = 600 | |
AVATAR_HEIGHT = 800 | |
# Your Streamlit app code goes here | |
st.title("NeuraSense AI") | |
# Set up DialoGPT model | |
def load_tokenizer(): | |
return AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
def load_model(): | |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium", | |
device_map="auto", | |
torch_dtype=torch.float16) | |
return model | |
tokenizer = load_tokenizer() | |
model = load_model() | |
# Advanced Sensor Classes | |
class QuantumSensor: | |
def measure(x, y, sensitivity): | |
return np.sin(x/20) * np.cos(y/20) * sensitivity * np.random.normal(1, 0.1) | |
class NanoThermalSensor: | |
def measure(base_temp, pressure, duration): | |
return base_temp + 10 * pressure * (1 - np.exp(-duration / 3)) + np.random.normal(0, 0.001) | |
class AdaptiveTextureSensor: | |
textures = [ | |
"nano-smooth", "quantum-rough", "neuro-bumpy", "plasma-silky", | |
"graviton-grainy", "zero-point-soft", "dark-matter-hard", "bose-einstein-condensate" | |
] | |
def measure(x, y): | |
return AdaptiveTextureSensor.textures[hash((x, y)) % len(AdaptiveTextureSensor.textures)] | |
class EMFieldSensor: | |
def measure(x, y, sensitivity): | |
return (np.sin(x / 30) * np.cos(y / 30) + np.random.normal(0, 0.1)) * 10 * sensitivity | |
class NeuralNetworkSimulator: | |
def process(inputs): | |
weights = np.random.rand(len(inputs)) | |
return np.dot(inputs, weights) / np.sum(weights) | |
# Set up MediaPipe Pose | |
mp_pose = mp.solutions.pose | |
pose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.5) | |
def detect_humanoid(image_path): | |
image = cv2.imread(image_path) | |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
results = pose.process(image_rgb) | |
if results.pose_landmarks: | |
landmarks = results.pose_landmarks.landmark | |
image_height, image_width, _ = image.shape | |
keypoints = [] | |
for landmark in landmarks: | |
x = int(landmark.x * image_width) | |
y = int(landmark.y * image_height) | |
keypoints.append((x, y)) | |
return keypoints | |
return [] | |
def apply_touch_points(image_path, keypoints): | |
image = cv2.imread(image_path) | |
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
image = Image.fromarray(image) | |
draw = ImageDraw.Draw(image) | |
for point in keypoints: | |
draw.ellipse([point[0]-5, point[1]-5, point[0]+5, point[1]+5], fill='red') | |
return image | |
def create_sensation_map(width, height, keypoints): | |
sensation_map = np.zeros((height, width, 12)) | |
for y in range(height): | |
for x in range(width): | |
base_sensitivities = np.random.rand(12) * 0.5 + 0.5 | |
# Enhance sensitivities near keypoints | |
for kp in keypoints: | |
distance = np.sqrt((x - kp[0])**2 + (y - kp[1])**2) | |
if distance < 30: # Adjust this value to change the area of influence | |
base_sensitivities *= 1.5 | |
sensation_map[y, x, 0] = base_sensitivities[0] * np.random.rand() # Pain | |
sensation_map[y, x, 1] = base_sensitivities[1] * np.random.rand() # Pleasure | |
sensation_map[y, x, 2] = base_sensitivities[2] * np.random.rand() # Pressure | |
sensation_map[y, x, 3] = base_sensitivities[3] * (np.random.rand() * 10 + 30) # Temperature | |
sensation_map[y, x, 4] = base_sensitivities[4] * np.random.rand() # Texture | |
sensation_map[y, x, 5] = base_sensitivities[5] * np.random.rand() # EM field | |
sensation_map[y, x, 6] = base_sensitivities[6] * np.random.rand() # Tickle | |
sensation_map[y, x, 7] = base_sensitivities[7] * np.random.rand() # Itch | |
sensation_map[y, x, 8] = base_sensitivities[8] * np.random.rand() # Quantum | |
sensation_map[y, x, 9] = base_sensitivities[9] * np.random.rand() # Neural | |
sensation_map[y, x, 10] = base_sensitivities[10] * np.random.rand() # Proprioception | |
sensation_map[y, x, 11] = base_sensitivities[11] * np.random.rand() # Synesthesia | |
return sensation_map | |
def create_heatmap(sensation_map, sensation_type): | |
plt.figure(figsize=(10, 15)) | |
sns.heatmap(sensation_map[:, :, sensation_type], cmap='viridis') | |
plt.title(f'{["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field", "Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"][sensation_type]} Sensation Map') | |
plt.axis('off') | |
# Instead of displaying, save to a buffer | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png') | |
buf.seek(0) | |
plt.close() # Close the figure to free up memory | |
# Create an image from the buffer | |
heatmap_img = Image.open(buf) | |
return heatmap_img | |
def generate_ai_response(keypoints, sensation_map): | |
num_keypoints = len(keypoints) | |
avg_sensations = np.mean(sensation_map, axis=(0, 1)) | |
response = f"I detect {num_keypoints} key points on the humanoid figure. " | |
response += "The average sensations across the body are:\n" | |
for i, sensation in enumerate(["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field", | |
"Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"]): | |
response += f"{sensation}: {avg_sensations[i]:.2f}\n" | |
return response | |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
# Read the image | |
image_path = 'temp.jpg' | |
with open(image_path, 'wb') as f: | |
f.write(uploaded_file.getvalue()) | |
# Detect humanoid keypoints | |
keypoints = detect_humanoid(image_path) | |
# Apply touch points to the image | |
processed_image = apply_touch_points(image_path, keypoints) | |
# Create sensation map | |
image = cv2.imread(image_path) | |
image_height, image_width, _ = image.shape | |
sensation_map = create_sensation_map(image_width, image_height, keypoints) | |
# Display the processed image | |
fig, ax = plt.subplots() | |
ax.imshow(processed_image) | |
# Create a list to store the clicked points | |
clicked_points = [] | |
def onclick(event): | |
if event.xdata is not None and event.ydata is not None: | |
clicked_points.append((int(event.xdata), int(event.ydata))) | |
st.write(f"Clicked point: ({int(event.xdata)}, {int(event.ydata)})") | |
# Update sensation values based on the clicked point | |
sensation = sensation_map[int(event.ydata), int(event.xdata)] | |
( | |
pain, pleasure, pressure_sens, temp_sens, texture_sens, | |
em_sens, tickle_sens, itch_sens, quantum_sens, neural_sens, | |
proprioception_sens, synesthesia_sens | |
) = sensation | |
st.write("### Sensory Data Analysis") | |
st.write(f"Interaction Point: ({int(event.xdata):.1f}, {int(event.ydata):.1f})") | |
st.write(f"Pain: {pain:.2f} | Pleasure: {pleasure:.2f} | Pressure: {pressure_sens:.2f}") | |
st.write(f"Temperature: {temp_sens:.2f} | Texture: {texture_sens:.2f} | EM Field: {em_sens:.2f}") | |
st.write(f"Tickle: {tickle_sens:.2f} | Itch: {itch_sens:.2f} | Quantum: {quantum_sens:.2f}") | |
st.write(f"Neural: {neural_sens:.2f} | Proprioception: {proprioception_sens:.2f} | Synesthesia: {synesthesia_sens:.2f}") | |
fig.canvas.mpl_connect('button_press_event', onclick) | |
# Display the plot | |
st.pyplot(fig) | |
# Display heatmaps for different sensations | |
sensation_types = ["Pain", "Pleasure", "Pressure", "Temperature", "Texture", "EM Field", | |
"Tickle", "Itch", "Quantum", "Neural", "Proprioception", "Synesthesia"] | |
selected_sensation = st.selectbox("Select a sensation to view:", sensation_types) | |
heatmap = create_heatmap(sensation_map, sensation_types.index(selected_sensation)) | |
st.image(heatmap, use_column_width=True) | |
# Generate AI response based on the image and sensations | |
if st.button("Generate AI Response"): | |
response = generate_ai_response(keypoints, sensation_map) | |
st.write("AI Response:", response) | |
# Touch controls and output | |
st.subheader("Neural Interface Controls") | |
# Touch duration | |
touch_duration = st.slider("Interaction Duration (s)", 0.1, 5.0, 1.0, 0.1) | |
# Touch pressure | |
touch_pressure = st.slider("Interaction Intensity", 0.1, 2.0, 1.0, 0.1) | |
# Toggle quantum feature | |
use_quantum = st.checkbox("Enable Quantum Sensing", value=True) | |
# Toggle synesthesia | |
use_synesthesia = st.checkbox("Enable Synesthesia", value=False) | |
# Add this with your other UI elements | |
show_heatmap = st.checkbox("Show Sensation Heatmap", value=True) | |
if st.button("Simulate Interaction"): | |
# Simulate interaction at the clicked point | |
if 'clicked_points' in locals() and clicked_points: | |
touch_x, touch_y = clicked_points[-1] | |
sensation = sensation_map[touch_y, touch_x] | |
( | |
pain, pleasure, pressure_sens, temp_sens, texture_sens, | |
em_sens, tickle_sens, itch_sens, quantum_sens, neural_sens, | |
proprioception_sens, synesthesia_sens | |
) = sensation | |
measured_pressure = pressure_sens * touch_pressure | |
measured_temp = temp_sens # Assuming temperature doesn't change | |
measured_texture = texture_sens # Assuming texture doesn't change | |
measured_em = em_sens # Assuming EM field doesn't change | |
if use_quantum: | |
quantum_state = quantum_sens | |
else: | |
quantum_state = "N/A" | |
# Calculate overall sensations | |
pain_level = pain * measured_pressure * touch_pressure | |
pleasure_level = pleasure * (measured_temp - 37) / 10 | |
tickle_level = tickle_sens * (1 - np.exp(-touch_duration / 0.5)) | |
itch_level = itch_sens * (1 - np.exp(-touch_duration / 1.5)) | |
# Proprioception (sense of body position) | |
proprioception = proprioception_sens * np.linalg.norm([touch_x - image_width/2, touch_y - image_height/2]) / (image_width/2) | |
# Synesthesia (mixing of senses) | |
if use_synesthesia: | |
synesthesia = synesthesia_sens * (measured_pressure + measured_temp + measured_em) / 3 | |
else: | |
synesthesia = "N/A" | |
st.write("### Simulated Interaction Results") | |
st.write(f"Interaction Point: ({touch_x:.1f}, {touch_y:.1f})") | |
st.write(f"Duration: {touch_duration:.1f} s | Intensity: {touch_pressure:.2f}") | |
st.write(f"Pain: {pain_level:.2f} | Pleasure: {pleasure_level:.2f} | Pressure: {measured_pressure:.2f}") | |
st.write(f"Temperature: {measured_temp:.2f} | Texture: {measured_texture:.2f} | EM Field: {measured_em:.2f}") | |
st.write(f"Tickle: {tickle_level:.2f} | Itch: {itch_level:.2f} | Quantum: {quantum_state}") | |
st.write(f"Neural: {neural_sens:.2f} | Proprioception: {proprioception:.2f} | Synesthesia: {synesthesia}") | |
# Display a heatmap of the sensations | |
if show_heatmap: | |
heatmap = create_heatmap(sensation_map, sensation_types.index("Pain")) | |
st.image(heatmap, use_column_width=True) | |
# Calculate the average pressure value | |
average_pressure = np.mean(sensation_map[:, :, 2]) | |
# Create a futuristic data display | |
data_display = ( | |
"```\n" | |
"+---------------------------------------------+\n" | |
f"| Pressure : {average_pressure:.2f}".ljust(45) + "|\n" | |
f"| Temperature : {np.mean(sensation_map[:, :, 3]):.2f}°C".ljust(45) + "|\n" | |
f"| Texture : {np.mean(sensation_map[:, :, 4]):.2f}".ljust(45) + "|\n" | |
f"| EM Field : {np.mean(sensation_map[:, :, 5]):.2f} μT".ljust(45) + "|\n" | |
f"| Quantum State: {np.mean(sensation_map[:, :, 8]):.2f}".ljust(45) + "|\n" | |
"+---------------------------------------------+\n" | |
f"| Pain Level : {np.mean(sensation_map[:, :, 0]):.2f}".ljust(45) + "|\n" | |
f"| Pleasure : {np.mean(sensation_map[:, :, 1]):.2f}".ljust(45) + "|\n" | |
f"| Tickle : {np.mean(sensation_map[:, :, 6]):.2f}".ljust(45) + "|\n" | |
f"| Itch : {np.mean(sensation_map[:, :, 7]):.2f}".ljust(45) + "|\n" | |
f"| Proprioception: {np.mean(sensation_map[:, :, 10]):.2f}".ljust(44) + "|\n" | |
f"| Synesthesia : {np.mean(sensation_map[:, :, 11]):.2f}".ljust(45) + "|\n" | |
f"| Neural Response: {np.mean(sensation_map[:, :, 9]):.2f}".ljust(43) + "|\n" | |
"+---------------------------------------------+\n" | |
"```" | |
) | |
st.code(data_display, language="") | |
# Generate description | |
prompt = ( | |
"Human: Analyze the sensory input for a hyper-advanced AI humanoid:\n" | |
" Location: (" + str(round(touch_x, 1)) + ", " + str(round(touch_y, 1)) + ")\n" | |
" Duration: " + str(round(touch_duration, 1)) + "s, Intensity: " + str(round(touch_pressure, 2)) + "\n" | |
" Pressure: " + str(round(measured_pressure, 2)) + "\n" | |
" Temperature: " + str(round(measured_temp, 2)) + "°C\n" | |
" Texture: " + measured_texture + "\n" | |
" EM Field: " + str(round(measured_em, 2)) + " μT\n" | |
" Quantum State: " + str(quantum_state) + "\n" | |
" Resulting in:\n" | |
" Pain: " + str(round(pain_level, 2)) + ", Pleasure: " + str(round(pleasure_level, 2)) + "\n" | |
" Tickle: " + str(round(tickle_level, 2)) + ", Itch: " + str(round(itch_level, 2)) + "\n" | |
" Proprioception: " + str(round(proprioception, 2)) + "\n" | |
" Synesthesia: " + synesthesia + "\n" | |
" Neural Response: " + str(round(neural_response, 2)) + "\n" | |
" Provide a detailed, scientific analysis of the AI's experience.\n" | |
" AI:" | |
) | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
output = model.generate( | |
input_ids, | |
max_length=400, | |
num_return_sequences=1, | |
no_repeat_ngram_size=2, | |
top_k=50, | |
top_p=0.95, | |
temperature=0.7 | |
) | |
response = tokenizer.decode(output[0], skip_special_tokens=True).split("AI:")[-1].strip() | |
st.write("### AI's Sensory Analysis:") | |
st.write(response) | |
# Constants | |
AVATAR_WIDTH = 50 # Reduced size | |
AVATAR_HEIGHT = 75 # Reduced size | |
# Function to generate sensation data on-the-fly | |
def generate_sensation_data(i, j): | |
return np.random.rand() | |
# Simplified sensation map | |
st.subheader("Neuro-Sensory Map") | |
titles = [ | |
'Pain', 'Pleasure', 'Pressure', 'Temperature', 'Texture', | |
'Tickle', 'Itch', 'Proprioception', 'Synesthesia' | |
] | |
# Generate and display maps one at a time | |
for title in titles: | |
fig, ax = plt.subplots(figsize=(5, 5)) | |
sensation_map = np.array([[generate_sensation_data(i, j) for j in range(AVATAR_WIDTH)] for i in range(AVATAR_HEIGHT)]) | |
im = ax.imshow(sensation_map, cmap='plasma') | |
ax.set_title(title) | |
fig.colorbar(im, ax=ax) | |
st.pyplot(fig) | |
plt.close(fig) # Close the figure to free up memory | |
st.write("The neuro-sensory maps illustrate the varying sensitivities across the AI's body. Brighter areas indicate heightened responsiveness to specific stimuli.") | |
# Add information about the AI's capabilities | |
st.subheader("NeuraSense AI: Advanced Sensory Capabilities") | |
capabilities = [ | |
"1. High-Precision Pressure Sensors", | |
"2. Advanced Thermal Detectors", | |
"3. Adaptive Texture Analysis", | |
"4. Neural Network Integration", | |
"5. Proprioception Simulation", | |
"6. Synesthesia Emulation", | |
"7. Tickle and Itch Simulation", | |
"8. Adaptive Pain and Pleasure Modeling" | |
] | |
for capability in capabilities: | |
st.write(capability) | |
# Interactive sensory exploration | |
st.subheader("Interactive Sensory Exploration") | |
exploration_type = st.selectbox("Choose a sensory exploration:", | |
["Synesthesia Experience", "Proprioceptive Mapping"]) | |
if exploration_type == "Synesthesia Experience": | |
st.write("Experience how the AI might perceive colors as sounds or textures as tastes.") | |
synesthesia_map = np.random.rand(AVATAR_HEIGHT, AVATAR_WIDTH, 3) | |
st.image(Image.fromarray((synesthesia_map * 255).astype(np.uint8)), use_column_width=True) | |
elif exploration_type == "Proprioceptive Mapping": | |
st.write("Explore the AI's sense of body position and movement.") | |
proprioceptive_map = np.array([[np.linalg.norm([x - AVATAR_WIDTH/2, y - AVATAR_HEIGHT/2]) / (AVATAR_WIDTH/2) | |
for x in range(AVATAR_WIDTH)] for y in range(AVATAR_HEIGHT)]) | |
buf = io.BytesIO() | |
plt.figure(figsize=(5, 5)) | |
plt.imshow(proprioceptive_map, cmap='coolwarm') | |
plt.savefig(buf, format='png') | |
plt.close() # Close the figure to free up memory | |
proprioceptive_image = Image.open(buf) | |
st.image(proprioceptive_image, use_column_width=True) | |
# Footer | |
st.write("---") | |
st.write("NeuraSense AI: Advanced Sensory Simulation v4.0") | |
st.write("Disclaimer: This is an advanced simulation and does not represent current technological capabilities.") |