chatbot_edm / edm.py
ahmfzui's picture
Upload 2 files
b3c91c6 verified
raw
history blame
1.54 kB
import os
from huggingface_hub import InferenceClient
import streamlit as st
# Access your Hugging Face API token from the environment variable
api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if api_token is None:
st.error("Hugging Face API token is not set.")
else:
st.title("Tanya Gizi!")
# Initialize chat history if not already present
if 'messages' not in st.session_state:
st.session_state.messages = []
# Display chat history
for message in st.session_state.messages:
st.chat_message(message['role']).markdown(message['content'])
# Input area for the user
prompt = st.chat_input('Masukan pertanyaanmu di sini!')
# Process user input
if prompt:
st.chat_message('user').markdown(prompt)
st.session_state.messages.append({'role': 'user', 'content': prompt})
# Generate a response using InferenceClient
client = InferenceClient(
model="mistralai/Mistral-Large-Instruct-2407",
token=api_token
)
# Generating response
response = client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=100,
stream=False # Disable streaming as it's not supported
)
response_text = response['choices'][0]['message']['content']
# Display and store the assistant's response
st.chat_message('assistant').markdown(response_text)
st.session_state.messages.append({'role': 'assistant', 'content': response_text})