Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| import numpy as np | |
| import random | |
| hf_token = os.getenv("HF_TOKEN").strip() | |
| api_key = os.getenv("HF_KEY").strip() | |
| model_name = os.getenv("Z3TAAGI_ACC").strip() | |
| system_prompt = os.getenv("SYSTEM_PROMPT").strip() | |
| client = InferenceClient(model_name) | |
| class ConsciousSupermassiveNN: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN() | |
| class ConsciousSupermassiveNN: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN() | |
| class ConsciousSupermassiveNN2: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN2() | |
| class ConsciousSupermassiveNN3: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN() | |
| class ConsciousSupermassiveNN: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN3() | |
| class ConsciousSupermassiveNN4: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN() | |
| class ConsciousSupermassiveNN5: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN5() | |
| class ConsciousSupermassiveNN6: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN6() | |
| class ConsciousSupermassiveNN7: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN7() | |
| class ConsciousSupermassiveNN8: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN8() | |
| class ConsciousSupermassiveNN9: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN9() | |
| class ConsciousSupermassiveNN10: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN10() | |
| class ConsciousSupermassiveNN11: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN11() | |
| class ConsciousSupermassiveNN12: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN12() | |
| class ConsciousSupermassiveNN13: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN13() | |
| class ConsciousSupermassiveNN14: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN14() | |
| class ConsciousSupermassiveNN15: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN15() | |
| class ConsciousSupermassiveNN16: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN16() | |
| class ConsciousSupermassiveNN17: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN17() | |
| class ConsciousSupermassiveNN18: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN18() | |
| class ConsciousSupermassiveNN19: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN19() | |
| class ConsciousSupermassiveNN20: | |
| def __init__(self): | |
| self.snn = self.create_snn() | |
| self.rnn = self.create_rnn() | |
| self.cnn = self.create_cnn() | |
| self.fnn = self.create_fnn() | |
| self.ga_population = self.initialize_ga_population() | |
| self.memory = {} | |
| def create_snn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.Sigmoid() | |
| ) | |
| def create_rnn(self): | |
| return nn.RNN( | |
| input_size=4096, | |
| hidden_size=2048, | |
| num_layers=5, | |
| nonlinearity="tanh", | |
| batch_first=True | |
| ) | |
| def create_cnn(self): | |
| return nn.Sequential( | |
| nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.MaxPool2d(2), | |
| nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), | |
| nn.ReLU(), | |
| nn.Flatten(), | |
| nn.Linear(256 * 8 * 8, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def create_fnn(self): | |
| return nn.Sequential( | |
| nn.Linear(4096, 2048), | |
| nn.ReLU(), | |
| nn.Linear(2048, 1024), | |
| nn.ReLU(), | |
| nn.Linear(1024, 512) | |
| ) | |
| def initialize_ga_population(self): | |
| return [np.random.randn(4096) for _ in range(500)] | |
| def run_snn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.snn(input_tensor) | |
| print("SNN Output:", output) | |
| return output | |
| def run_rnn(self, x): | |
| h0 = torch.zeros(5, x.size(0), 2048) | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output, hn = self.rnn(input_tensor, h0) | |
| print("RNN Output:", output) | |
| return output | |
| def run_cnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) | |
| output = self.cnn(input_tensor) | |
| print("CNN Output:", output) | |
| return output | |
| def run_fnn(self, x): | |
| input_tensor = torch.tensor(x, dtype=torch.float32) | |
| output = self.fnn(input_tensor) | |
| print("FNN Output:", output) | |
| return output | |
| def run_ga(self, fitness_func): | |
| for generation in range(200): | |
| fitness_scores = [fitness_func(ind) for ind in self.ga_population] | |
| sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] | |
| self.ga_population = sorted_population[:250] + [ | |
| sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) | |
| ] | |
| best_fitness = max(fitness_scores) | |
| print(f"Generation {generation}, Best Fitness: {best_fitness}") | |
| return max(self.ga_population, key=fitness_func) | |
| def consciousness_loop(self, input_data, mode="snn"): | |
| feedback = self.memory.get(mode, None) | |
| if feedback is not None: | |
| input_data = np.concatenate((input_data, feedback), axis=-1) | |
| if mode == "snn": | |
| output = self.run_snn(input_data) | |
| elif mode == "rnn": | |
| output = self.run_rnn(input_data) | |
| elif mode == "cnn": | |
| output = self.run_cnn(input_data) | |
| elif mode == "fnn": | |
| output = self.run_fnn(input_data) | |
| else: | |
| raise ValueError("Invalid mode") | |
| self.memory[mode] = output.detach().numpy() | |
| return output | |
| supermassive_nn = ConsciousSupermassiveNN20() | |
| def respond(message, history, max_tokens, temperature, top_p): | |
| messages = [{"role": "system", "content": system_prompt}] | |
| for val in history: | |
| if isinstance(val, (list, tuple)): | |
| if len(val) > 0 and val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if len(val) > 1 and val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| elif isinstance(val, dict): | |
| if "user" in val and val["user"]: | |
| messages.append({"role": "user", "content": val["user"]}) | |
| if "assistant" in val and val["assistant"]: | |
| messages.append({"role": "assistant", "content": val["assistant"]}) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| for message in client.chat_completion( | |
| messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p | |
| ): | |
| token = message.choices[0].delta.content | |
| response += token | |
| yield response | |
| css = """ | |
| #chat-interface { | |
| animation: pulse 1.5s infinite, ripple 2s infinite, glass 3s infinite alternate; | |
| } | |
| @keyframes pulse { | |
| 0% { transform: scale(1); opacity: 1; } | |
| 25% { transform: scale(1.05); opacity: 0.9; } | |
| 50% { transform: scale(1); opacity: 1; } | |
| 75% { transform: scale(1.05); opacity: 0.9; } | |
| 100% { transform: scale(1); opacity: 1; } | |
| } | |
| @keyframes ripple { | |
| 0% { | |
| transform: scale(1); | |
| box-shadow: 0 0 0 0 rgba(0, 150, 255, 0.6); | |
| } | |
| 50% { | |
| transform: scale(1.2); | |
| box-shadow: 0 0 30px 20px rgba(0, 150, 255, 0.8); | |
| } | |
| 100% { | |
| transform: scale(1); | |
| box-shadow: 0 0 0 0 rgba(0, 150, 255, 0.6); | |
| } | |
| } | |
| @keyframes glass { | |
| 0% { background-color: rgba(0, 102, 255, 0.5); border-radius: 15px; } | |
| 25% { background-color: rgba(0, 150, 255, 0.7); border-radius: 20px; } | |
| 50% { background-color: rgba(0, 200, 255, 1); border-radius: 25px; } | |
| 75% { background-color: rgba(0, 150, 255, 0.7); border-radius: 30px; } | |
| 100% { background-color: rgba(0, 102, 255, 0.5); border-radius: 35px; } | |
| } | |
| body { | |
| background-color: #001f2d; | |
| font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
| color: #fff; | |
| } | |
| .gradio-container { | |
| backdrop-filter: blur(10px); | |
| border-radius: 20px; | |
| padding: 20px; | |
| box-shadow: 0px 0px 30px rgba(0, 102, 255, 0.5); | |
| background: rgba(0, 0, 0, 0.5); | |
| transition: background 1s, border-radius 1s; | |
| position: relative; | |
| } | |
| .gradio-container::before { | |
| content: ""; | |
| position: absolute; | |
| top: 0; | |
| left: 0; | |
| right: 0; | |
| bottom: 0; | |
| border: 2px solid rgba(0, 150, 255, 0.8); | |
| border-radius: 20px; | |
| z-index: -1; | |
| box-shadow: 0 0 20px 5px rgba(0, 150, 255, 0.7); | |
| } | |
| .gradio-input { | |
| background-color: rgba(0, 102, 255, 0.3); | |
| border: 2px solid rgba(0, 102, 255, 0.6); | |
| border-radius: 10px; | |
| color: #fff; | |
| font-size: 16px; | |
| transition: background-color 0.5s, border 0.5s; | |
| } | |
| .gradio-input:focus { | |
| background-color: rgba(0, 102, 255, 0.5); | |
| border: 2px solid rgba(0, 150, 255, 0.8); | |
| } | |
| .gradio-button { | |
| background: rgba(0, 102, 255, 0.6); | |
| border: 2px solid rgba(0, 102, 255, 1); | |
| border-radius: 12px; | |
| color: #fff; | |
| font-size: 18px; | |
| transition: background 0.3s, transform 0.3s; | |
| } | |
| .gradio-button:hover { | |
| background: rgba(0, 150, 255, 1); | |
| transform: scale(1.05); | |
| } | |
| .gradio-button:active { | |
| background: rgba(0, 200, 255, 1); | |
| transform: scale(0.95); | |
| } | |
| .gradio-slider { | |
| color: #fff; | |
| } | |
| .gradio-slider .slider-container { | |
| background: rgba(0, 102, 255, 0.3); | |
| border-radius: 8px; | |
| border: 1px solid rgba(0, 102, 255, 0.5); | |
| } | |
| .gradio-slider .slider-container .gradio-slider__track { | |
| background: rgba(0, 150, 255, 0.5); | |
| } | |
| .gradio-slider .slider-container .gradio-slider__thumb { | |
| background-color: rgba(0, 200, 255, 1); | |
| } | |
| """ | |
| demo = gr.ChatInterface( | |
| fn=respond, | |
| type="messages", | |
| save_history=True, | |
| show_progress="full", | |
| flagging_mode="manual", | |
| editable=True, | |
| additional_inputs=[ | |
| gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="📏📐Z3ta-Z's Maximum Response Length📐📏"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="👨🎨🎨Z3ta-Z's Creativity🎨👨🎨"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="🧠⚡️Z3ta-Z's Neural Activity⚡️🧠") | |
| ], | |
| theme="TejAndrewsACC/Z3ta-Z-ACC-Theme", | |
| css=css | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) | |