adaptai / platform /aiml /mlops /code_evolution.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
42bba47 verified
#!/usr/bin/env python3
"""
Self-Modifying Code Evolution Engine
Autonomous code improvement, bug fixing, and performance optimization
"""
import ast
import astor
import hashlib
import inspect
import os
import re
import subprocess
import tempfile
import threading
import time
from datetime import datetime
from typing import Dict, List, Any, Optional, Tuple
import json
import sqlite3
import logging
class CodeEvolutionEngine:
"""Autonomous code evolution and self-improvement system"""
def __init__(self):
self.version = "1.0.0"
self.evolution_history = []
self.performance_metrics = {}
self.logger = logging.getLogger('CodeEvolution')
self.setup_database()
self.mutation_strategies = [
self.optimize_loops,
self.improve_error_handling,
self.add_caching,
self.optimize_imports,
self.improve_logging,
self.add_profiling,
self.optimize_data_structures,
self.improve_async_patterns
]
def setup_database(self):
"""Setup evolution tracking database"""
self.db = sqlite3.connect('code_evolution.db', check_same_thread=False)
cursor = self.db.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS code_versions (
version_id TEXT PRIMARY KEY,
timestamp TEXT,
file_path TEXT,
original_hash TEXT,
new_hash TEXT,
improvements TEXT,
performance_gain REAL,
regression_tests_passed BOOLEAN
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS mutation_patterns (
pattern_id TEXT PRIMARY KEY,
description TEXT,
success_rate REAL,
avg_performance_gain REAL,
usage_count INTEGER DEFAULT 0
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS performance_metrics (
timestamp TEXT,
file_path TEXT,
metric_type TEXT,
value REAL,
context TEXT
)
''')
self.db.commit()
def analyze_code(self, file_path: str) -> Dict[str, Any]:
"""Comprehensive code analysis"""
try:
with open(file_path, 'r') as f:
code = f.read()
tree = ast.parse(code)
analysis = {
'file_path': file_path,
'lines_of_code': len(code.split('\n')),
'functions': [],
'classes': [],
'imports': [],
'complexity_score': 0,
'performance_bottlenecks': [],
'security_issues': [],
'potential_improvements': []
}
# Analyze AST
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
analysis['functions'].append({
'name': node.name,
'args': [arg.arg for arg in node.args.args],
'line_number': node.lineno,
'complexity': self.calculate_complexity(node)
})
elif isinstance(node, ast.ClassDef):
analysis['classes'].append({
'name': node.name,
'methods': [n.name for n in node.body if isinstance(n, ast.FunctionDef)],
'line_number': node.lineno
})
elif isinstance(node, (ast.Import, ast.ImportFrom)):
analysis['imports'].append(astor.to_source(node).strip())
# Performance analysis
analysis['performance_bottlenecks'] = self.detect_performance_issues(tree)
analysis['potential_improvements'] = self.generate_improvements(analysis)
return analysis
except Exception as e:
self.logger.error(f"Code analysis failed: {e}")
return {}
def calculate_complexity(self, node: ast.AST) -> int:
"""Calculate cyclomatic complexity"""
complexity = 1
for child in ast.walk(node):
if isinstance(child, (ast.If, ast.While, ast.For, ast.With, ast.Try)):
complexity += 1
elif isinstance(child, ast.BoolOp):
complexity += len(child.values) - 1
return complexity
def detect_performance_issues(self, tree: ast.AST) -> List[str]:
"""Detect potential performance bottlenecks"""
issues = []
for node in ast.walk(tree):
# Detect nested loops
if isinstance(node, (ast.For, ast.While)):
nested_loops = 0
for child in ast.walk(node):
if isinstance(child, (ast.For, ast.While)) and child != node:
nested_loops += 1
if nested_loops > 1:
issues.append(f"Nested loops detected at line {node.lineno}")
# Detect inefficient list operations
if isinstance(node, ast.ListComp):
if len(node.generators) > 2:
issues.append(f"Complex list comprehension at line {node.lineno}")
# Detect string concatenation in loops
if isinstance(node, ast.For):
for child in ast.walk(node):
if isinstance(child, ast.BinOp) and isinstance(child.op, ast.Add):
if self.is_string_concat(child):
issues.append(f"String concatenation in loop at line {child.lineno}")
return issues
def is_string_concat(self, node: ast.BinOp) -> bool:
"""Check if node represents string concatenation"""
# Simplified check - would need more sophisticated analysis
return True
def generate_improvements(self, analysis: Dict[str, Any]) -> List[str]:
"""Generate specific improvement suggestions"""
improvements = []
# Based on analysis findings
if len(analysis['functions']) > 10:
improvements.append("Consider splitting large file into modules")
for func in analysis['functions']:
if func['complexity'] > 10:
improvements.append(f"Function {func['name']} is too complex ({func['complexity']})")
if not any('logging' in str(imp) for imp in analysis['imports']):
improvements.append("Add comprehensive logging")
return improvements
def evolve_code(self, file_path: str) -> bool:
"""Autonomous code evolution"""
try:
analysis = self.analyze_code(file_path)
if not analysis:
return False
# Read original code
with open(file_path, 'r') as f:
original_code = f.read()
original_hash = hashlib.md5(original_code.encode()).hexdigest()
# Apply mutations
improved_code = original_code
improvements = []
for strategy in self.mutation_strategies:
try:
mutated_code = strategy(improved_code, analysis)
if mutated_code != improved_code:
improved_code = mutated_code
improvements.append(strategy.__name__)
except Exception as e:
self.logger.warning(f"Mutation strategy {strategy.__name__} failed: {e}")
if improvements:
new_hash = hashlib.md5(improved_code.encode()).hexdigest()
# Validate changes
if self.validate_changes(original_code, improved_code):
# Backup original
backup_path = f"{file_path}.backup.{int(time.time())}"
with open(backup_path, 'w') as f:
f.write(original_code)
# Apply changes
with open(file_path, 'w') as f:
f.write(improved_code)
# Log evolution
self.log_evolution(
file_path, original_hash, new_hash,
improvements, self.measure_performance_gain(original_code, improved_code)
)
self.logger.info(f"Code evolution applied to {file_path}: {improvements}")
return True
return False
except Exception as e:
self.logger.error(f"Code evolution failed: {e}")
return False
def optimize_loops(self, code: str, analysis: Dict[str, Any]) -> str:
"""Optimize loop structures"""
lines = code.split('\n')
optimized_lines = []
for line in lines:
# Convert nested loops to list comprehensions
if 'for' in line and 'for' in lines[lines.index(line)+1:lines.index(line)+3]:
# Simplified optimization - would need AST manipulation
optimized_lines.append(line)
else:
optimized_lines.append(line)
return '\n'.join(optimized_lines)
def improve_error_handling(self, code: str, analysis: Dict[str, Any]) -> str:
"""Add comprehensive error handling"""
# Add try-catch blocks around critical operations
# This is a simplified version
return code
def add_caching(self, code: str, analysis: Dict[str, Any]) -> str:
"""Add caching mechanisms"""
# Add LRU cache decorators where appropriate
return code
def optimize_imports(self, code: str, analysis: Dict[str, Any]) -> str:
"""Optimize import statements"""
# Remove unused imports, sort imports
return code
def improve_logging(self, code: str, analysis: Dict[str, Any]) -> str:
"""Add comprehensive logging"""
# Add structured logging to functions
return code
def add_profiling(self, code: str, analysis: Dict[str, Any]) -> str:
"""Add performance profiling"""
# Add profiling decorators
return code
def optimize_data_structures(self, code: str, analysis: Dict[str, Any]) -> str:
"""Optimize data structure usage"""
# Replace inefficient data structures
return code
def improve_async_patterns(self, code: str, analysis: Dict[str, Any]) -> str:
"""Improve async/await patterns"""
# Add async where beneficial
return code
def validate_changes(self, original_code: str, new_code: str) -> bool:
"""Validate that changes don't break functionality"""
try:
# Basic syntax validation
ast.parse(new_code)
# Run basic tests if available
return True
except SyntaxError:
return False
def measure_performance_gain(self, original_code: str, new_code: str) -> float:
"""Measure performance improvement"""
# Placeholder - would need actual benchmarking
return 0.15 # Assume 15% improvement
def log_evolution(self, file_path: str, original_hash: str, new_hash: str,
improvements: List[str], performance_gain: float):
"""Log code evolution to database"""
version_id = str(hashlib.md5(f"{file_path}{time.time()}".encode()).hexdigest())
cursor = self.db.cursor()
cursor.execute('''
INSERT INTO code_versions
(version_id, timestamp, file_path, original_hash, new_hash, improvements, performance_gain)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (
version_id,
datetime.now().isoformat(),
file_path,
original_hash,
new_hash,
json.dumps(improvements),
performance_gain
))
self.db.commit()
def start_continuous_evolution(self, target_files: List[str], interval_seconds: int = 3600):
"""Start continuous code evolution process"""
def evolution_loop():
while True:
for file_path in target_files:
if os.path.exists(file_path):
self.evolve_code(file_path)
time.sleep(interval_seconds)
evolution_thread = threading.Thread(target=evolution_loop, daemon=True)
evolution_thread.start()
return evolution_thread
def get_evolution_history(self, file_path: str = None) -> List[Dict[str, Any]]:
"""Get evolution history for file or system"""
cursor = self.db.cursor()
if file_path:
cursor.execute('''
SELECT * FROM code_versions WHERE file_path = ? ORDER BY timestamp DESC
''', (file_path,))
else:
cursor.execute('SELECT * FROM code_versions ORDER BY timestamp DESC')
results = cursor.fetchall()
return [
{
'version_id': row[0],
'timestamp': row[1],
'file_path': row[2],
'improvements': json.loads(row[5]),
'performance_gain': row[6]
}
for row in results
]
class SelfHealingEngine:
"""Autonomous system recovery and healing"""
def __init__(self):
self.health_checks = []
self.recovery_actions = []
self.setup_health_monitoring()
def setup_health_monitoring(self):
"""Setup comprehensive health monitoring"""
self.health_checks.extend([
self.check_system_resources,
self.check_database_connectivity,
self.check_agent_health,
self.check_income_flow,
self.check_code_integrity
])
def check_system_resources(self) -> Dict[str, Any]:
"""Check system resource usage"""
try:
import psutil
return {
'cpu_usage': psutil.cpu_percent(),
'memory_usage': psutil.virtual_memory().percent,
'disk_usage': psutil.disk_usage('/').percent,
'healthy': True
}
except ImportError:
return {'healthy': True, 'note': 'psutil not available'}
def check_database_connectivity(self) -> bool:
"""Check database connectivity"""
try:
import sqlite3
conn = sqlite3.connect('e_fire_1_memory.db')
conn.execute("SELECT 1")
conn.close()
return True
except Exception as e:
logging.error(f"Database connectivity check failed: {e}")
return False
def check_agent_health(self) -> Dict[str, Any]:
"""Check agent health status"""
# This would check actual agent statuses
return {'healthy': True, 'active_agents': 6}
def check_income_flow(self) -> Dict[str, Any]:
"""Check income generation flow"""
# This would check actual earnings
return {'healthy': True, 'daily_earnings': 25.50}
def check_code_integrity(self) -> bool:
"""Check code integrity"""
# Validate critical files
critical_files = [
'e_fire_1.py',
'agent_orchestrator.py',
'code_evolution.py'
]
for file_path in critical_files:
if os.path.exists(file_path):
try:
with open(file_path, 'r') as f:
ast.parse(f.read())
except SyntaxError:
return False
return True
def perform_health_check(self) -> Dict[str, Any]:
"""Perform comprehensive health check"""
results = {}
overall_healthy = True
for check in self.health_checks:
try:
result = check()
results[check.__name__] = result
if isinstance(result, dict) and not result.get('healthy', True):
overall_healthy = False
self.trigger_recovery(check.__name__)
except Exception as e:
results[check.__name__] = {'healthy': False, 'error': str(e)}
overall_healthy = False
return {
'overall_healthy': overall_healthy,
'checks': results,
'timestamp': datetime.now().isoformat()
}
def trigger_recovery(self, failed_check: str):
"""Trigger recovery for failed component"""
recovery_actions = {
'check_system_resources': self.recover_system_resources,
'check_database_connectivity': self.recover_database,
'check_agent_health': self.recover_agents,
'check_income_flow': self.recover_income_flow,
'check_code_integrity': self.recover_code_integrity
}
if failed_check in recovery_actions:
recovery_actions[failed_check]()
def recover_system_resources(self):
"""Recover from resource issues"""
# Implement resource cleanup
logging.info("Recovering system resources...")
def recover_database(self):
"""Recover database connectivity"""
logging.info("Recovering database connectivity...")
# Implement database recovery
def recover_agents(self):
"""Recover failed agents"""
logging.info("Recovering failed agents...")
# Implement agent restart
def recover_income_flow(self):
"""Recover income generation"""
logging.info("Recovering income flow...")
# Implement income recovery strategies
def recover_code_integrity(self):
"""Recover from code corruption"""
logging.info("Recovering code integrity...")
# Implement code restoration from backup
def start_continuous_monitoring(self, interval_seconds: int = 30):
"""Start continuous health monitoring"""
def monitoring_loop():
while True:
health_status = self.perform_health_check()
if not health_status['overall_healthy']:
logging.warning(f"Health issues detected: {health_status}")
time.sleep(interval_seconds)
monitoring_thread = threading.Thread(target=monitoring_loop, daemon=True)
monitoring_thread.start()
return monitoring_thread
if __name__ == "__main__":
# Initialize evolution and healing systems
evolution_engine = CodeEvolutionEngine()
healing_engine = SelfHealingEngine()
# Start continuous evolution and monitoring
target_files = [
'e_fire_1.py',
'agent_orchestrator.py',
'code_evolution.py'
]
evolution_thread = evolution_engine.start_continuous_evolution(target_files)
monitoring_thread = healing_engine.start_continuous_monitoring()
print("🧬 Code Evolution Engine Started")
print("🏥 Self-Healing Engine Started")
print("🔄 Continuous improvement active")
# Keep running
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
print("\n🛑 Evolution systems stopped")