import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM def load_model_tokenizer(model_name, hf_api_key): if model_name == "Mistral-7B": #model_name="mistralai/Mistral-7B-Instruct-v0.2" #model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_api_key) #tokenizer = AutoTokenizer.from_pretrained(model_name, tokenizer=hf_api_key) pass elif model_name == "blenderbot-400M-distill": model_name = "facebook/blenderbot-400M-distill" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) return (model,tokenizer) def generate_response(prompt_input, model, tokenizer): inputs = tokenizer.encode_plus(prompt_input, return_tensors="pt") # Generate the response from the model with additional parameters outputs = model.generate(**inputs, max_length=max_length, do_sample=True ,temperature=temperature) response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() return response st.set_page_config(page_title="Learn Geoscience") with st.sidebar: st.title('Learn Geoscience Chat') if 'hf_key' in st.secrets: st.success('Huggingface API key provided', icon='✅') hf_api_key = st.secrets['hf_key'] else: hf_api_key = st.text_input('Enter Huggingface API Key:', type='password') if not hf_api_key: st.warning('Please enter Huggingface API key!', icon='⚠️') else: st.success('Proceed to entering your prompt message!', icon='👉') max_length = st.slider("Max Length", 10, 100, 50) temperature = st.slider("Temperature", 0.0, 1.0, 0.7) if "messages" not in st.session_state.keys(): st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}] model_name = st.radio("Select model to chat", options=["Mistral-7B", "LLaMa-2B", "blenderbot-400M-distill"], horizontal=True, key='model_selection') model, tokenizer = load_model_tokenizer(model_name, hf_api_key) for message in st.session_state.messages: with st.chat_message(message["role"]): st.write(message["content"]) if prompt := st.chat_input(disabled = not hf_api_key): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.write(prompt) if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): with st.spinner("Thinking..."): response = generate_response(prompt, model, tokenizer) st.write(response) message = {"role": "assistant", "content": response} st.session_state.messages.append(message)