Spaces:
Paused
Paused
import streamlit as st | |
from openai import OpenAI | |
from typing import Iterator | |
import os | |
from phoenix.otel import register | |
from datetime import datetime | |
tracer_provider = register( | |
project_name=st.secrets['PHOENIX_PROJECT_NAME'], | |
endpoint="https://app.phoenix.arize.com/v1/traces" | |
) | |
from openinference.instrumentation.openai import OpenAIInstrumentor | |
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider) | |
st.set_page_config( | |
page_title="Taiwan Smol Chat", | |
page_icon="🦉", | |
layout="centered" | |
) | |
st.title("🦉Taiwan Smol Chat") | |
st.info('Model based on [lianghsun/Llama-3.2-Taiwan-3B-Instruct](https://huggingface.co/lianghsun/Llama-3.2-Taiwan-3B-Instruct)', icon="🧠") | |
st.warning('Playgroud 有可能因為 GPU 被挪用至生成資料使用導致暫時性無法對話。', icon="⚠️") | |
client = OpenAI( | |
api_key=st.secrets['API_KEY'], | |
base_url=st.secrets['API_BASE_URL'], | |
) | |
if "openai_model" not in st.session_state: | |
st.session_state["openai_model"] = st.secrets['MODEL'] | |
if "messages" not in st.session_state: | |
current_date = datetime.now().strftime("%Y-%m-%d") | |
st.session_state.messages = [{"role": "system", "content": f"現在的日期: {current_date}"}] | |
for message in st.session_state.messages: | |
if message['role'] == "system": continue | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input("來聊點什麼吧"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message("assistant"): | |
stream = client.chat.completions.create( | |
model=st.session_state["openai_model"], | |
messages=[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
], | |
stream=True, | |
temperature=.2 | |
) | |
response = st.write_stream(stream) | |
st.session_state.messages.append( | |
{"role": "assistant", "content": response}) | |
# with st.container(): | |
# st.caption('Please be aware that current Large Language Models (LLMs) can exhibit “hallucinations,” producing plausible-sounding but inaccurate or fabricated information. It is crucial to carefully review and verify any content generated by LLMs to avoid misunderstandings or misinformation. Always cross-check facts and consult reliable sources before making important decisions based on LLM outputs.') |