|
|
|
import numpy as np |
|
import pandas as pd |
|
import os |
|
import json |
|
import random |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from sklearn.ensemble import IsolationForest |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import OneHotEncoder |
|
from deap import base, creator, tools, algorithms |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoFeatureExtractor |
|
from transformers import pipeline |
|
from sentence_transformers import SentenceTransformer |
|
from textblob import TextBlob |
|
import speech_recognition as sr |
|
from PIL import Image |
|
import cv2 |
|
from googletrans import Translator |
|
import onnx |
|
import onnxruntime |
|
from torch.quantization import quantize_dynamic, quantize_static, prepare, convert |
|
import torch.nn.functional as F |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
data = { |
|
'context': [ |
|
'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm', |
|
'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated', |
|
'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated', |
|
'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic', |
|
'I am pessimistic', 'I feel bored', 'I am envious' |
|
], |
|
'emotion': [ |
|
'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger', |
|
'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust', |
|
'disgust', 'optimism', 'pessimism', 'boredom', 'envy' |
|
] |
|
} |
|
df = pd.DataFrame(data) |
|
|
|
|
|
encoder = OneHotEncoder(handle_unknown='ignore') |
|
contexts_encoded = encoder.fit_transform(df[['context']]).toarray() |
|
|
|
|
|
emotions_target = df['emotion'].astype('category').cat.codes |
|
emotion_classes = df['emotion'].astype('category').cat.categories |
|
|
|
|
|
class EmotionalNN(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(EmotionalNN, self).__init__() |
|
self.attention = nn.MultiheadAttention(hidden_size, num_heads=8) |
|
self.layers = nn.Sequential( |
|
nn.Linear(input_size, hidden_size), |
|
nn.ReLU(), |
|
nn.Linear(hidden_size, hidden_size), |
|
nn.ReLU(), |
|
nn.Linear(hidden_size, hidden_size), |
|
nn.ReLU(), |
|
nn.Linear(hidden_size, output_size), |
|
nn.Softmax(dim=1) |
|
) |
|
|
|
def forward(self, x): |
|
x, _ = self.attention(x, x, x) |
|
return self.layers(x) |
|
|
|
|
|
input_size = contexts_encoded.shape[1] |
|
hidden_size = 512 |
|
output_size = len(emotion_classes) |
|
emotional_nn = EmotionalNN(input_size, hidden_size, output_size).to(device) |
|
|
|
|
|
emotional_nn_quantized = quantize_dynamic(emotional_nn, {nn.Linear}, dtype=torch.qint8) |
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.Adam(emotional_nn_quantized.parameters(), lr=0.001) |
|
|
|
|
|
num_epochs = 5000 |
|
for epoch in range(num_epochs): |
|
inputs = torch.FloatTensor(contexts_encoded).to(device) |
|
targets = torch.LongTensor(emotions_target).to(device) |
|
|
|
outputs = emotional_nn_quantized(inputs) |
|
loss = criterion(outputs, targets) |
|
|
|
optimizer.zero_grad() |
|
loss.backward() |
|
optimizer.step() |
|
|
|
|
|
dummy_input = torch.randn(1, input_size, device=device) |
|
torch.onnx.export(emotional_nn_quantized, dummy_input, "emotional_nn.onnx") |
|
|
|
|
|
ort_session = onnxruntime.InferenceSession("emotional_nn.onnx") |
|
|
|
|
|
emotions = { |
|
'joy': {'percentage': 10, 'motivation': 'positive'}, |
|
'pleasure': {'percentage': 10, 'motivation': 'selfish'}, |
|
'sadness': {'percentage': 10, 'motivation': 'negative'}, |
|
'grief': {'percentage': 10, 'motivation': 'negative'}, |
|
'anger': {'percentage': 10, 'motivation': 'traumatic or strong'}, |
|
'calmness': {'percentage': 10, 'motivation': 'neutral'}, |
|
'determination': {'percentage': 10, 'motivation': 'positive'}, |
|
'resentment': {'percentage': 10, 'motivation': 'negative'}, |
|
'glory': {'percentage': 10, 'motivation': 'positive'}, |
|
'motivation': {'percentage': 10, 'motivation': 'positive'}, |
|
'ideal_state': {'percentage': 100, 'motivation': 'balanced'}, |
|
'fear': {'percentage': 10, 'motivation': 'defensive'}, |
|
'surprise': {'percentage': 10, 'motivation': 'unexpected'}, |
|
'anticipation': {'percentage': 10, 'motivation': 'predictive'}, |
|
'trust': {'percentage': 10, 'motivation': 'reliable'}, |
|
'disgust': {'percentage': 10, 'motivation': 'repulsive'}, |
|
'optimism': {'percentage': 10, 'motivation': 'hopeful'}, |
|
'pessimism': {'percentage': 10, 'motivation': 'doubtful'}, |
|
'boredom': {'percentage': 10, 'motivation': 'indifferent'}, |
|
'envy': {'percentage': 10, 'motivation': 'jealous'} |
|
} |
|
|
|
|
|
total_percentage = 200 |
|
default_percentage = total_percentage / len(emotions) |
|
for emotion in emotions: |
|
emotions[emotion]['percentage'] = default_percentage |
|
|
|
emotion_history_file = 'emotion_history.json' |
|
|
|
|
|
def load_historical_data(file_path=emotion_history_file): |
|
if os.path.exists(file_path): |
|
with open(file_path, 'r') as file: |
|
return json.load(file) |
|
return [] |
|
|
|
def save_historical_data(historical_data, file_path=emotion_history_file): |
|
with open(file_path, 'w') as file: |
|
json.dump(historical_data, file) |
|
|
|
emotion_history = load_historical_data() |
|
|
|
|
|
def update_emotion(emotion, percentage): |
|
emotions['ideal_state']['percentage'] -= percentage |
|
emotions[emotion]['percentage'] += percentage |
|
total_current = sum(e['percentage'] for e in emotions.values()) |
|
adjustment = total_percentage - total_current |
|
emotions['ideal_state']['percentage'] += adjustment |
|
|
|
|
|
def normalize_context(context): |
|
return context.lower().strip() |
|
|
|
|
|
def evolve_emotions(): |
|
def evaluate(individual): |
|
ideal_state = individual[-1] |
|
other_emotions = individual[:-1] |
|
return abs(ideal_state - 100), sum(other_emotions) |
|
|
|
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0)) |
|
creator.create("Individual", list, fitness=creator.FitnessMin) |
|
|
|
toolbox = base.Toolbox() |
|
toolbox.register("attribute", lambda: random.uniform(0, 20)) |
|
toolbox.register("individual", tools.initCycle, creator.Individual, toolbox.attribute, n=(len(emotions) - 1)) |
|
toolbox.register("ideal_state", lambda: random.uniform(80, 120)) |
|
toolbox.register("complete_individual", tools.initConcat, creator.Individual, toolbox.individual, toolbox.ideal_state) |
|
toolbox.register("population", tools.initRepeat, list, toolbox.complete_individual) |
|
|
|
toolbox.register("evaluate", evaluate) |
|
toolbox.register("mate", tools.cxBlend, alpha=0.5) |
|
toolbox.register("mutate", tools.mutGaussian, mu=10, sigma=5, indpb=0.3) |
|
toolbox.register("select", tools.selTournament, tournsize=3) |
|
|
|
population = toolbox.population(n=1000) |
|
population, log = algorithms.eaSimple(population, toolbox, cxpb=0.5, mutpb=0.2, ngen=50, verbose=False) |
|
|
|
best_individual = tools.selBest(population, k=1)[0] |
|
for idx, emotion in enumerate(emotions.keys()): |
|
emotions[emotion]['percentage'] = best_individual[idx] |
|
|
|
|
|
sentiment_analyzer = pipeline("sentiment-analysis") |
|
|
|
|
|
sentence_model = SentenceTransformer('all-MiniLM-L6-v2') |
|
|
|
|
|
def get_emotional_response(context): |
|
context = normalize_context(context) |
|
context_encoded = encoder.transform([[context]]).toarray() |
|
|
|
|
|
ort_inputs = {ort_session.get_inputs()[0].name: context_encoded.astype(np.float32)} |
|
ort_outputs = ort_session.run(None, ort_inputs) |
|
output = ort_outputs[0] |
|
predicted_emotion = emotion_classes[np.argmax(output)] |
|
|
|
|
|
sentiment = sentiment_analyzer(context)[0] |
|
sentiment_score = sentiment['score'] if sentiment['label'] == 'POSITIVE' else -sentiment['score'] |
|
|
|
|
|
context_embedding = sentence_model.encode(context) |
|
|
|
|
|
emotion_intensity = abs(sentiment_score) * np.max(output) |
|
|
|
|
|
update_emotion(predicted_emotion, emotion_intensity * 20) |
|
|
|
|
|
anomaly_score = isolation_forest.decision_function([output])[0] |
|
if anomaly_score < -0.5: |
|
print("Anomalous context detected. Adjusting emotional response.") |
|
update_emotion('calmness', 20) |
|
|
|
|
|
emotion_state = {emotion: data['percentage'] for emotion, data in emotions.items()} |
|
emotion_history.append(emotion_state) |
|
save_historical_data(emotion_history) |
|
|
|
|
|
for emotion, data in emotions.items(): |
|
print(f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)") |
|
|
|
return predicted_emotion, emotion_intensity |
|
|
|
|
|
def handle_idle_state(): |
|
print("Entering idle state...") |
|
evolve_emotions() |
|
print("Emotions evolved") |
|
for emotion, data in emotions.items(): |
|
print(f"{emotion.capitalize()}: {data['percentage']:.2f}% ({data['motivation']} motivation)") |
|
|
|
|
|
class SOUL: |
|
def __init__(self, model_name='tiiuae/falcon-40b'): |
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, trust_remote_code=True) |
|
self.model.to(device) |
|
|
|
|
|
self.model = quantize_dynamic(self.model, {nn.Linear}, dtype=torch.qint8) |
|
|
|
def generate_text(self, prompt, max_length=200): |
|
inputs = self.tokenizer(prompt, return_tensors="pt").to(device) |
|
|
|
with torch.no_grad(): |
|
generate_ids = self.model.generate( |
|
inputs.input_ids, |
|
max_length=max_length, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.7 |
|
) |
|
|
|
return self.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] |
|
|
|
def bridge_ai(self, prompt): |
|
print("\nFalcon-40B Response:") |
|
falcon_response = self.generate_text(prompt) |
|
print(falcon_response) |
|
|
|
print("\nEmotional Response:") |
|
emotion, intensity = get_emotional_response(falcon_response) |
|
return falcon_response, emotion, intensity |
|
|
|
|
|
def neural_genetic_convergence(): |
|
if len(emotion_history) % 10 == 0: |
|
print("Neural-Genetic Convergence...") |
|
evolve_emotions() |
|
|
|
X = np.array([list(state.values()) for state in emotion_history[-10:]]) |
|
y = np.argmax(X, axis=1) |
|
optimizer.zero_grad() |
|
inputs = torch.FloatTensor(X).to(device) |
|
targets = torch.LongTensor(y).to(device) |
|
outputs = emotional_nn_quantized(inputs) |
|
loss = criterion(outputs, targets) |
|
loss.backward() |
|
optimizer.step() |
|
print("Convergence complete.") |
|
|
|
|
|
def emotion_based_decision(emotion, intensity): |
|
if intensity > 0.8: |
|
if emotion in ['joy', 'excitement']: |
|
return "I'm feeling very positive! Let's do something fun!" |
|
elif emotion in ['sadness', 'grief']: |
|
return "I'm feeling down. I might need some time to process this." |
|
elif emotion in ['anger', 'frustration']: |
|
return "I'm feeling upset. It might be best to take a break and calm down." |
|
elif intensity > 0.5: |
|
return f"I'm feeling {emotion} at a moderate level. How about we discuss this further?" |
|
else: |
|
return f"I'm experiencing a mild sense of {emotion}. What are your thoughts on this?" |
|
|
|
|
|
def self_reflect(): |
|
dominant_emotion = max(emotions, key=lambda e: emotions[e]['percentage']) |
|
print(f"Self-reflection: My dominant emotion is {dominant_emotion}.") |
|
print("Analyzing my recent emotional states...") |
|
recent_states = emotion_history[-5:] |
|
emotion_trends = {} |
|
for state in recent_states: |
|
for emotion, percentage in state.items(): |
|
if emotion not in emotion_trends: |
|
emotion_trends[emotion] = [] |
|
emotion_trends[emotion].append(percentage) |
|
|
|
for emotion, trend in emotion_trends.items(): |
|
if len(trend) > 1: |
|
if trend[-1] > trend[0]: |
|
print(f"{emotion} has been increasing.") |
|
elif trend[-1] < trend[0]: |
|
print(f"{emotion} has been decreasing.") |
|
|
|
print("Based on this reflection, I should adjust my responses accordingly.") |
|
|
|
|
|
personality_traits = { |
|
'openness': 0.5, |
|
'conscientiousness': 0.5, |
|
'extraversion': 0.5, |
|
'agreeableness': 0.5, |
|
'neuroticism': 0.5 |
|
} |
|
|
|
def adapt_personality(): |
|
for trait in personality_traits: |
|
change = random.uniform(-0.1, 0.1) |
|
personality_traits[trait] = max(0, min(1, personality_traits[trait] + change)) |
|
print("Personality traits adapted:", personality_traits) |
|
|
|
|
|
def simulate_empathy(user_input): |
|
user_emotion = TextBlob(user_input).sentiment.polarity |
|
if user_emotion > 0.5: |
|
print("I sense that you're feeling positive. That's wonderful!") |
|
elif user_emotion < -0.5: |
|
print("I can tell you might be feeling down. Is there anything I can do to help?") |
|
else: |
|
print("I'm here to listen and support you, whatever you're feeling.") |
|
|
|
|
|
def dream_state(): |
|
print("Entering dream-like state for offline learning...") |
|
dream_contexts = [ |
|
"flying through clouds", |
|
"solving complex puzzles", |
|
"exploring ancient ruins", |
|
"conversing with historical figures", |
|
"inventing new technologies" |
|
] |
|
for context in dream_contexts: |
|
get_emotional_response(context) |
|
print("Dream-like state completed. New insights gained.") |
|
|
|
|
|
def calculate_eq_score(): |
|
eq_score = sum(emotions[e]['percentage'] for e in ['empathy', 'self_awareness', 'social_skills']) / 3 |
|
print(f"Current Emotional Intelligence Score: {eq_score:.2f}") |
|
return eq_score |
|
|
|
|
|
def process_multimodal_input(): |
|
text_input = input("You (text): ") |
|
|
|
|
|
r = sr.Recognizer() |
|
with sr.Microphone() as source: |
|
print("Speak now...") |
|
audio = r.listen(source) |
|
try: |
|
voice_input = r.recognize_google(audio) |
|
print(f"Voice input: {voice_input}") |
|
except sr.UnknownValueError: |
|
voice_input = None |
|
print("Voice input not recognized") |
|
|
|
|
|
image_path = input("Enter path to image (or press enter to skip): ") |
|
if image_path: |
|
image = cv2.imread(image_path) |
|
if image is not None: |
|
|
|
average_color = np.mean(image, axis=(0, 1)) |
|
image_input = f"Image with dominant color: RGB({average_color[2]:.0f}, {average_color[1]:.0f}, {average_color[0]:.0f})" |
|
print(image_input) |
|
else: |
|
image_input = None |
|
print("Failed to process image") |
|
else: |
|
image_input = None |
|
|
|
combined_input = f"{text_input} {voice_input or ''} {image_input or ''}" |
|
return combined_input.strip() |
|
|
|
|
|
translator = Translator() |
|
|
|
def translate_input(text, target_language='en'): |
|
translated = translator.translate(text, dest=target_language) |
|
return translated.text |
|
|
|
|
|
soul = SOUL() |
|
|
|
print("Welcome to the advanced SOUL AI. Type 'exit' to end the conversation.") |
|
conversation_turn = 0 |
|
while True: |
|
user_input = process_multimodal_input() |
|
if user_input.lower() == 'exit': |
|
print("Thank you for the conversation. Goodbye!") |
|
break |
|
|
|
conversation_turn += 1 |
|
|
|
|
|
translated_input = translate_input(user_input) |
|
|
|
response, emotion, intensity = soul.bridge_ai(translated_input) |
|
|
|
decision = emotion_based_decision(emotion, intensity) |
|
print("AI Decision:", decision) |
|
|
|
simulate_empathy(user_input) |
|
|
|
neural_genetic_convergence() |
|
|
|
if conversation_turn % 10 == 0: |
|
adapt_personality() |
|
calculate_eq_score() |
|
|
|
if conversation_turn % 20 == 0: |
|
self_reflect() |
|
dream_state() |
|
|
|
|
|
if conversation_turn % 5 == 0: |
|
handle_idle_state() |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
historical_data = np.array([emotional_nn_quantized(torch.FloatTensor(contexts_encoded).to(device)).detach().cpu().numpy()]) |
|
isolation_forest = IsolationForest(contamination=0.1, random_state=42) |
|
isolation_forest.fit(historical_data) |
|
|
|
|
|
try: |
|
|
|
pass |
|
except Exception as e: |
|
print(f"An error occurred: {e}") |
|
finally: |
|
print("SOUL AI is shutting down. Final self-reflection:") |
|
self_reflect() |
|
print("Thank you for using SOUL AI. Goodbye!") |
|
|