ProfessorCEO's picture
Update main.py
6b66219 verified
import os
import asyncio
import logging
from fastapi import FastAPI
from telegram import Update
from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
# Setup Logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# --- CONFIGURATION ---
REPO_ID = "hugging-quants/Llama-3.2-3B-Instruct-Q4_K_M-GGUF"
FILENAME = "llama-3.2-3b-instruct-q4_k_m.gguf"
# HARDCODED TOKEN
TELEGRAM_TOKEN = "8229846436:AAGa3MierT7lZV3WOdE1ebi2FaOoqVE4nLI"
# Global Variables
app = FastAPI()
axiom_model = None
bot_app = None
# --- AXIOM BRAIN SETUP ---
def load_brain():
global axiom_model
print("πŸ“‘ DOWNLOADING AXIOM BRAIN...")
try:
model_path = hf_hub_download(
repo_id=REPO_ID,
filename=FILENAME,
token=os.environ.get("HF_TOKEN")
)
print("🧠 LOADING AXIOM INTO RAM...")
axiom_model = Llama(
model_path=model_path,
n_ctx=2048,
n_threads=2,
verbose=False
)
print("βœ… AXIOM ONLINE")
except Exception as e:
print(f"❌ MODEL FAILURE: {e}")
# --- TELEGRAM BOT LOGIC ---
async def start_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text("Axiom 3.1 Sovereign Interface Online.\nCool Shot Systems Proprietary.\n\nSend me a message.")
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_text = update.message.text
if not axiom_model:
await update.message.reply_text("⚠️ Axiom is still waking up... please wait 30 seconds.")
return
# Typing status
await context.bot.send_chat_action(chat_id=update.effective_chat.id, action="typing")
# Prompt
prompt = f"""<|start_header_id|>system<|end_header_id|>
You are Axiom 3.1, the Sovereign AI of Cool Shot Systems, created by Professor Heritage.
You are helpful, strategic, and concise.<|eot_id|><|start_header_id|>user<|end_header_id|>
{user_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
"""
# Generate
try:
output = await asyncio.to_thread(
axiom_model,
prompt,
max_tokens=256,
stop=["<|eot_id|>", "<|end_of_text|>"],
echo=False
)
response = output['choices'][0]['text']
await update.message.reply_text(response)
except Exception as e:
await update.message.reply_text(f"❌ Processing Error: {e}")
# --- ROBUST STARTUP SEQUENCE ---
@app.on_event("startup")
async def startup_event():
# 1. Load Brain
load_brain()
# 2. Start Telegram Bot with RETRY LOOP
global bot_app
print("πŸ€– ATTEMPTING TELEGRAM CONNECTION...")
bot_app = Application.builder().token(TELEGRAM_TOKEN).build()
bot_app.add_handler(CommandHandler("start", start_command))
bot_app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
# Retry logic for network lag
for i in range(5):
try:
await bot_app.initialize()
await bot_app.start()
await bot_app.updater.start_polling()
print("βœ… TELEGRAM BOT CONNECTED AND LISTENING")
return # Success!
except Exception as e:
print(f"⚠️ Connection Attempt {i+1} Failed: {e}")
print("⏳ Waiting 5 seconds for network to wake up...")
await asyncio.sleep(5)
print("❌ TELEGRAM FAILED AFTER 5 ATTEMPTS. SERVER STILL RUNNING.")
# --- DUMMY SERVER ---
@app.get("/")
def home():
return {"status": "Axiom Telegram Bot Running"}