Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| # Hugging Face API 設置 | |
| API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3.1-8B" | |
| HUGGINGFACE_TOKEN = st.secrets["HUGGINGFACE_TOKEN"] | |
| headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"} | |
| def query(payload): | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| st.title("LLaMA 模型演示") | |
| user_input = st.text_area("輸入您的問題:", height=100) | |
| if st.button("獲取回答"): | |
| if user_input: | |
| with st.spinner("模型正在思考中..."): | |
| output = query({ | |
| "inputs": user_input, | |
| "parameters": {"max_length": 200} | |
| }) | |
| if isinstance(output, list) and len(output) > 0: | |
| st.write("模型回答:") | |
| st.write(output[0]['generated_text']) | |
| else: | |
| st.error("無法獲得有效的回應。請檢查您的API權限和配額。") | |
| else: | |
| st.warning("請輸入一些文字。") | |
| st.markdown("---") | |
| st.markdown("注意:這個應用使用了LLaMA模型的Inference API。請確保您有適當的權限和配額來使用此API。") |