import streamlit as st import time from transformers import pipeline import torch trust_remote_code=True st.markdown('## Text-generation gpt Muse from Breadlicker45') @st.cache(allow_output_mutation=True, suppress_st_warning =True, show_spinner=False) def get_model(): return pipeline('text-generation', model=model, do_sample=True) col1, col2 = st.columns([2,1]) with st.sidebar: st.markdown('## Model Parameters') max_length = st.slider('Max text length', 0, 500, 80) num_beams = st.slider('N° tree beams search', 2, 15, 2) early_stopping = st.selectbox( 'Early stopping text generation', ('True', 'False'), key={'True' : True, 'False': False}, index=0) no_ngram_repeat = st.slider('Max repetition limit', 1, 5, 2) with col1: prompt= st.text_area('Your prompt here', '''2623 2619 3970 3976 2607 3973 2735 3973 2598 3985 2726 3973 2607 4009 2735 3973 2598 3973 2726 3973 2607 3973 2735 4009''') with col2: select_model = st.radio( "Select the model to use:", ('MuseWeb', 'MusePy', 'MuseNeo'), index = 2) if select_model == 'MuseWeb': model = 'breadlicker45/MuseWeb' elif select_model == 'MusePy': model = 'breadlicker45/MusePy' elif select_model == 'MuseNeo': model = 'breadlicker45/MuseNeo' with st.spinner('Loading Model... (This may take a while)'): generator = get_model() st.success('Model loaded correctly!') gen = st.info('Generating text...') answer = generator(prompt, max_length=max_length, no_repeat_ngram_size=no_ngram_repeat, early_stopping=early_stopping, num_beams=num_beams) gen.empty() lst = answer[0]['generated_text'] t = st.empty() for i in range(len(lst)): t.markdown("#### %s" % lst[0:i]) time.sleep(0.04)