File size: 1,275 Bytes
3301a66 6520b89 ecf9f11 3301a66 10d74c7 6520b89 e69b959 6520b89 17caa0e 6520b89 17caa0e 6520b89 17caa0e 6520b89 ecf9f11 17caa0e ecf9f11 17caa0e 3301a66 6520b89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import os
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# Model configuration
MODEL_ID = "PuruAI/Medini_Intelligence"
FALLBACK_MODEL = "gpt2"
HF_TOKEN = os.getenv("HF_TOKEN") # must be set in your env/secrets
def load_model(model_id):
"""Load Medini if available, otherwise fallback to GPT-2."""
try:
print(f"🔹 Loading model: {model_id}")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(model_id, token=HF_TOKEN)
return pipeline("text-generation", model=model, tokenizer=tokenizer)
except Exception as e:
print(f"❌ Failed to load {model_id}: {e}")
print("⏩ Falling back to GPT-2 (no token needed)")
return pipeline("text-generation", model=FALLBACK_MODEL)
# Initialize pipeline
generator = load_model(MODEL_ID)
def generate_text(prompt):
outputs = generator(prompt, max_length=200, num_return_sequences=1)
return outputs[0]["generated_text"]
# Gradio UI
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Medini Intelligence",
description="Custom AI Agent with fallback to GPT-2"
)
if __name__ == "__main__":
iface.launch()
|