import streamlit as st from huggingface_hub import InferenceClient # Initialize the Hugging Face Inference API client client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token="hf_gzFQTPmbxocKx"+"wyjRVSzJMWLdHCsZyQIjz") def send_message(message): messages = [{"role": "user", "content": message}] response = client.chat_completion(messages, max_tokens=150) return response def main(): st.title("Chat with AI") # Text area for user input user_input = st.text_input("Type your message:", key="user_input") if st.button("Send"): with st.spinner('AI is typing...'): response = send_message(user_input) # Display complete response for choice in response.choices: st.write(choice.message.content) if __name__ == "__main__": main()