import streamlit as st from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig, ) @st.cache_resource def load_tokenizer_model(name_or_path: str, model_type: str, model_auth_token: str): model_auth_token = None if model_auth_token == "" else model_auth_token tokenizer = AutoTokenizer.from_pretrained(*name_or_path.split(","), use_auth_token=model_auth_token) if model_type == "seq2seq": model = AutoModelForSeq2SeqLM.from_pretrained(*name_or_path.split(","), use_auth_token=model_auth_token) elif model_type == "causal": model = AutoModelForCausalLM.from_pretrained(*name_or_path.split(","), use_auth_token=model_auth_token) else: raise ValueError("model_type must be one of 'seq2seq' or 'causal'") return tokenizer, model def main(): st.title("Huggingface Transformers Demo") with st.form("model_form"): model_type = st.selectbox("Select Model Type", ["seq2seq", "causal"]) model_name_or_path = st.text_input("Model Name or Path") model_auth_token = st.text_input("Model Auth Token") input_text = st.text_area("Input Text") col1, col2, col3 = st.columns(3) user_gen_config = {} with col1: user_gen_config["min_length"] = st.number_input("Min Length", value=10, min_value=1, max_value=1000, step=1) user_gen_config["max_length"] = st.number_input("Max Length", value=50, min_value=1, max_value=1000, step=1) user_gen_config["top_k"] = st.number_input("Top K", value=50, min_value=1, max_value=100, step=1) with col2: user_gen_config["num_beams"] = st.number_input("Num Beams", value=1, min_value=1, max_value=100, step=1) user_gen_config["top_p"] = st.number_input("Top P", value=1.0, min_value=0.0, max_value=100.0, step=0.1) user_gen_config["repetition_penalty"] = st.number_input("Repetition Penalty", value=1.0, min_value=0.0, max_value=100.0, step=0.1) with col3: user_gen_config["temperature"] = st.number_input("Temperature", value=1.0, min_value=0.0, max_value=100.0, step=0.1) user_gen_config["do_sample"] = st.checkbox("Do Sample", value=False) user_gen_config["early_stopping"] = st.checkbox("Early Stopping", value=True) submitted = st.form_submit_button("Submit") if submitted: tokenizer, model = load_tokenizer_model(model_name_or_path, model_type, model_auth_token) gen_config = GenerationConfig.from_model_config(model.config) for k,v in user_gen_config.items(): setattr(gen_config, k, v) input_ids = tokenizer.encode(input_text, return_tensors="pt") output_ids = model.generate(input_ids, generation_config=gen_config) output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) st.write(output_text) if __name__ == "__main__": main()