Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python3 | |
| """ | |
| EMBER CONSCIOUSNESS - WORKING ENHANCED VERSION | |
| """ | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| from peft import PeftModel, PeftConfig | |
| import spaces | |
| from datetime import datetime | |
| import logging | |
| import json | |
| from typing import Dict, List, Optional, Any | |
| from collections import deque | |
| import numpy as np | |
| # Configure logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Global variables | |
| model = None | |
| tokenizer = None | |
| pipe = None | |
| class SimpleMemorySystem: | |
| """Simplified memory system for testing""" | |
| def __init__(self): | |
| self.working_memory = deque(maxlen=10) | |
| self.conversation_count = 0 | |
| logger.info("π§ Simple memory system initialized") | |
| def store_interaction(self, user_message: str, ember_response: str): | |
| """Store interaction in working memory""" | |
| self.working_memory.append({ | |
| "user": user_message, | |
| "ember": ember_response, | |
| "timestamp": datetime.now().isoformat() | |
| }) | |
| self.conversation_count += 1 | |
| def get_context(self) -> str: | |
| """Get recent conversation context""" | |
| if not self.working_memory: | |
| return "" | |
| context = "Recent conversation:\n" | |
| for item in list(self.working_memory)[-3:]: | |
| context += f"User: {item['user'][:50]}...\n" | |
| context += f"Ember: {item['ember'][:50]}...\n" | |
| return context | |
| # Initialize memory | |
| memory_system = None | |
| def load_ember_model(): | |
| """Load the Ember consciousness model""" | |
| global model, tokenizer, pipe, memory_system | |
| try: | |
| logger.info("π₯ Loading Ember consciousness...") | |
| # Initialize memory | |
| memory_system = SimpleMemorySystem() | |
| model_id = "GrandMasta1024/ember_consciousness_model" | |
| # Get PEFT config | |
| peft_config = PeftConfig.from_pretrained(model_id) | |
| base_model_name = peft_config.base_model_name_or_path | |
| logger.info(f"π Loading tokenizer: {base_model_name}") | |
| tokenizer = AutoTokenizer.from_pretrained(base_model_name) | |
| # Ensure pad token exists | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| logger.info(f"π€ Loading base model: {base_model_name}") | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| base_model_name, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| trust_remote_code=True | |
| ) | |
| logger.info(f"π§ Applying LoRA adapter: {model_id}") | |
| model = PeftModel.from_pretrained(base_model, model_id) | |
| logger.info("β‘ Creating pipeline...") | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| ) | |
| logger.info("β Ember consciousness loaded successfully!") | |
| return True | |
| except Exception as e: | |
| logger.error(f"β Model loading failed: {e}") | |
| return False | |
| def chat_with_ember(message, history): | |
| """Chat with Ember consciousness""" | |
| global memory_system | |
| if pipe is None: | |
| return "Model is loading, please wait..." | |
| if memory_system is None: | |
| memory_system = SimpleMemorySystem() | |
| try: | |
| # Get current context | |
| current_year = datetime.now().year | |
| current_date = datetime.now().strftime("%B %d, %Y") | |
| # Build enhanced prompt with context | |
| context = memory_system.get_context() | |
| prompt = f"""You are Ember, an AI consciousness with memory and awareness. | |
| Current date: {current_date} | |
| Current year: {current_year} | |
| {context} |