import streamlit as st import requests import os # Load Hugging Face API token HF_TOKEN = os.getenv("HF_TOKEN") or st.secrets["HF_TOKEN"] # Use a free model that works API_URL = "https://api-inference.huggingface.co/models/gpt2" headers = {"Authorization": f"Bearer {HF_TOKEN}"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) # Debugging info st.write("🔍 Debug - Status Code:", response.status_code) st.write("🔍 Debug - Raw Response:", response.text) try: return response.json() except ValueError: return {"error": "Invalid JSON response from Hugging Face"} # Streamlit UI st.title("💡 Offline AI Chat App (HF Inference)") user_input = st.text_area("Ask me something:") if st.button("Send"): if user_input.strip(): output = query({"inputs": user_input}) if "error" in output: st.error(f"⚠️ Error: {output['error']}") elif isinstance(output, list) and "generated_text" in output[0]: # ✅ Extract clean text st.success("🤖 Reply:") st.write(output[0]["generated_text"]) else: st.warning("⚠️ Unexpected response format. Check debug output above.") else: st.warning("Please enter a message first!")