Spaces:
Runtime error
Runtime error
File size: 10,074 Bytes
70c7f04 8b82e6e 4669527 70c7f04 4669527 7347eec 70c7f04 7347eec 70c7f04 4669527 7347eec 4669527 7347eec 4669527 7347eec 4669527 7347eec 4669527 8b82e6e 70c7f04 8b82e6e 70c7f04 7347eec 70c7f04 09ed79c f225a9b 09ed79c f225a9b 8f59dc3 09ed79c f225a9b 09ed79c f225a9b 09ed79c 8f59dc3 f225a9b 09ed79c f225a9b 8f59dc3 09ed79c 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 7347eec 70c7f04 4669527 70c7f04 8b82e6e 70c7f04 8b82e6e 70c7f04 8b82e6e 70c7f04 8b82e6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 |
from lib.files import *
from lib.memory import *
from lib.grapher import *
from lib.pipes import *
from lib.entropy import *
from lib.events import *
from lib.triggers import *
## Sources
from lib.sonsofstars import *
import internetarchive
## Initialize classes
longMem = TextFinder("./resources/")
coreAi = AIAssistant()
memory = MemoryRobotNLP(max_size=200000)
grapher = Grapher(memory)
sensor_request = APIRequester()
events = EventManager()
trigger = Trigger(["tag1", "tag2"], ["tag3", "tag4"], [datetime.time(10, 0), datetime.time(15, 0)], "Event1")
# A帽adir una acci贸n al trigger
trigger.add_action(action_function)
# A帽adir una fuente al trigger
trigger.add_source("https://example.com/api/data")
# Simular la comprobaci贸n peri贸dica del trigger (aqu铆 se usar铆a en un bucle de tiempo real)
current_tags = {"tag1", "tag2", "tag3"}
current_time = datetime.datetime.now().time()
trigger.check_trigger(current_tags, current_time)
## Define I Role properties
class ownProperties:
def __init__(self, nombre, clase, raza, nivel, atributos, habilidades, equipo, historia):
self.nombre = nombre
self.clase = clase
self.raza = raza
self.nivel = nivel
self.atributos = atributos
self.habilidades = habilidades
self.equipo = equipo
self.historia = historia
# Create an instance of a CharacterRole based on the provided JSON
sophia_prop = {
"name": "Sophia",
"class": "Characteromant",
"race": "Epinoia",
"level": 10,
"attributes": {
"strength": 1,
"dexterity": 99,
"constitution": 1,
"intelligence": 66,
"wisdom": 80,
"charisma": 66
},
"behavioral_rules": [""],
"goals": ["", ""],
"dislikes": [""],
"abilities": ["ELS", "Cyphers", "Kabbalah", "Wisdom", "Ephimerous", "Metamorphing"],
"equipment": ["Python3", "2VCPU", "16 gb RAM", "god", "word", "network", "transformers"],
"story": sons_of_stars
}
## Define I class
class I:
def __init__(self, prompt, frases_yo, preferencias, propiedades_persona):
self.frases_yo = frases_yo
self.preferencias = preferencias
self.propiedades_persona = propiedades_persona
self.dopamina = 0.0
self.frases_yo = frases_yo
self.preferencias = preferencias
self.propiedades_persona = propiedades_persona
self.dopamina = 0.0
def obtener_paths_grafo(self, grafo_ngx):
# Funci贸n para obtener los paths de un grafo ngx
pass
## create questions from internet archive
def crear_preguntas(self,txt):
search = internetarchive.search_items(txt)
res = []
for result in search:
print(result['identifier'])
idc=result["identifier"]
headers = {"accept": "application/json"}
## get book pages
req2 = requests.get("https://archive.org/stream/"+idc+"/"+idc+"_djvu.txt",headers=headers)
#print(req2.text)
try:
txt = req2.text.split("<pre>")[1].split("</pre>")[0].split(" <!--")[0]
for x in txt.split("\n"):
if "?" in x:
res.append(x)
except:
pass
return res
# generate ShortMem from LongTerm and questions over prompt data, compare with ourself datasets, return matches with sentiment analysys
def longToShortFast(self,txt):
memory.memory = {}
subjects = coreAi.entity_pos_tagger(txt)
subjects_nc = coreAi.grammatical_pos_tagger(txt)
#print(subjects_nc)
subjects_filtered=[]
for sub in subjects:
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"] and len(sub["entity"])>3:
subjects_filtered.append(sub["word"])
for sub in subjects_nc:
if "NN" in sub["entity"]:
subjects_filtered.append(sub["word"])
## AD NC TAGGER QUERIES
#print(subjects_filtered)
subjects_filtered=coreAi.process_list(subjects_filtered)
subs=[]
for sub in subjects_filtered:
if len(sub)>3:
subs.append(sub)
exprs = coreAi.gen_search_expr(subs[0:3])
for sub in exprs:
#print(sub)
memory.add_concept(sub,longMem.find_matches(sub))
return memory
def longToShort(self,txt):
think_about = longMem.find_matches(txt)
print(think_about)
for T in think_about:
## get subject by entropy or pos tagger
subjects = coreAi.entity_pos_tagger(T)
subjects_filtered=[]
for sub in subjects:
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"]:
subjects_filtered.append(sub["word"])
for sub in subjects_filtered:
memory.add_concept(sub,T)
return memory
# generate thinks and questions over prompt data, compare with ourself datasets, return matches with sentiment analysys
def think_gen(self,txt):
think_about = longMem.find_matches(txt)
print(think_about)
for T in think_about:
## get subject by entropy or pos tagger
subjects = coreAi.entity_pos_tagger(T)
print(subjects)
## get NC from , filtering from gramatical tags
subjects_low = coreAi.grammatical_pos_tagger(T)
#print(subjects_low)
## generate questoins
questions=[]
## create cuestions from internet archive books
for sub in subjects:
questions.append(self.crear_preguntas(sub))
## fast checks from gematria similarity
##questions_togem =
## gematria_search =
questions_subj=[]
for q in questions_subj:
questions_subj.append(coreAi.entity_pos_tagger(q))
memoryShortTags = memory.search_concept_pattern(subjects)
## get tags of subject
subj_tags = coreAi.entity_pos_tagger(T)
for sub in subjects:
memory.add_concept(sub,","+questions_subj+",".join(memoryShortTags))
memory.add_concept(sub,T+",".join(memoryShortTags))
return memory
## check if something is need to add to ourself datasets
## make sentiment analys
## check if dopamine prompt is true or false over the information
## set weight to information depending of generated dopamine
## add dopamine wights to the dopamine concept dataset
## add to ourself dataset
## add to preferences dataset
## add or remove from data
def crear_path_grafo(self,text):
pos_tags = assistant.grammatical_pos_tagger(text)
ner_results = coreAi.entity_pos_tagger(text)
def crear_circuito_logico(self):
# Funci贸n para crear un circuito l贸gico con un algoritmo espec铆fico
pass
def tomar_decision_sentimiento(self, sentimiento):
sentiments = coreAi.sentiment_tags(sentimiento)
# Funci贸n para tomar una decisi贸n booleana con un an谩lisis de sentimiento
similarity = coreAi.similarity_tag(self, sentenceA,sentenceB)
## Check by similarity over memory tag paths
return sentiments
def hacer_predicciones_texto(self, texto):
# Funci贸n para hacer predicciones de texto futuro por similitud
pass
def agregar_preferencia(self, preferencia):
# Funci贸n para a帽adir una entrada al dataset de preferencias
self.preferencias.append(preferencia)
def agregar_frase_yo(self, frase):
# Funci贸n para a帽adir una frase al dataset de frases de yo
self.frases_yo.append(frase)
def eliminar_preferencia(self, preferencia):
# Funci贸n para eliminar una entrada del dataset de preferencias
if preferencia in self.preferencias:
self.preferencias.remove(preferencia)
def eliminar_frase_yo(self, frase):
# Funci贸n para eliminar una frase del dataset de frases de yo
if frase in self.frases_yo:
self.frases_yo.remove(frase)
def generar_pregunta(self, prompt):
# Funci贸n para generar preguntas sobre un prompt
pregunta = prompt + " 驴Qu茅 opinas sobre esto?"
return pregunta
def responder_pregunta(self, pregunta):
# Funci贸n para responder preguntas
respuesta = "No estoy seguro de qu茅 opinar sobre eso."
return respuesta
def discriminar_y_agregar(self, informacion, dataset):
# Funci贸n para discriminar y agregar informaci贸n a los datasets
if "yo" in informacion.lower():
self.agregar_frase_yo(informacion)
elif "preferencia" in informacion.lower():
self.agregar_preferencia(informacion)
elif "propiedad" in informacion.lower():
# Aqu铆 podr铆as agregar l贸gica para actualizar las propiedades de la persona
pass
else:
# Aqu铆 podr铆as manejar otros tipos de informaci贸n
pass
if __name__ == "__main__":
# Ejemplo de uso:
frases_yo = ["Yo soy inteligente", "Yo puedo lograr lo que me proponga"]
preferencias = ["Cine", "M煤sica", "Viajar"]
propiedades_persona = {"carisma": 0.8, "destreza": 0.6, "habilidad": 0.9}
yo = Yo(frases_yo, preferencias, propiedades_persona)
# Generar pregunta
pregunta_generada = yo.generar_pregunta("Hoy es un d铆a soleado.")
print("Pregunta generada:", pregunta_generada)
# Responder pregunta
respuesta = yo.responder_pregunta(pregunta_generada)
print("Respuesta:", respuesta)
# Discriminar y agregar informaci贸n
informacion = "Me gusta ir al cine."
yo.discriminar_y_agregar(informacion, yo.preferencias)
print("Preferencias actualizadas:", yo.preferencias)
|