File size: 2,480 Bytes
d0d5a73
 
 
 
1200764
87551e4
72f6b84
d0d5a73
 
1556d5c
d0d5a73
 
 
 
 
 
1556d5c
 
 
d0d5a73
85f0cd3
37ae943
 
7a3dcd9
d0d5a73
 
 
 
9dc6d1b
7a3dcd9
934edda
7a3dcd9
d0d5a73
 
 
392eb9f
d0d5a73
 
 
 
2ded7ac
d0d5a73
392eb9f
6081fa7
392eb9f
e75ec7f
392eb9f
b4fecc1
 
 
59531f5
5b872f6
d0d5a73
 
 
 
 
 
 
dcf7790
37ae943
d0d5a73
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import streamlit as st
import time
from transformers import pipeline
import torch
trust_remote_code=True
st.markdown('## Text-generation gpt Muse from Breadlicker45')
use_auth_token=True
@st.cache(allow_output_mutation=True, suppress_st_warning =True, show_spinner=False)
def get_model():
    return pipeline('text-generation', model=model, do_sample=True)
    
col1, col2 = st.columns([2,1])

with st.sidebar:
    st.markdown('## Model Parameters')

    max_length = st.slider('Max text length', 80, 2000, 80)

    min_length = st.slider('Min text length', 80, 500, 80)

    num_beams = st.slider('N° tree beams search', 1, 15,  1)

    temp = st.slider('temperature', 0, 1,  1)
    
    early_stopping = st.selectbox(
     'Early stopping text generation',
     ('True', 'False'), key={'True' : True, 'False': False}, index=0)

    no_ngram_repeat = st.slider('Max repetition limit', 1, 3,  1)

    st.markdown('## how to convert it into midi. go to this site https://mrcheeze.github.io/musenet-midi/ and then paste the numbers/musenet encoders you get from the ai into the big box and then click export midi')

    
with col1:
    prompt= st.text_area('Your prompt here',
        '''2623 2619 3970 3976 2607 3973 2735 3973 2598 3985 2726 3973 2607 4009 2735 3973 2598 3973 2726 3973 2607 3973 2735 4009''') 
        
with col2:
    select_model = st.radio(
        "Select the model to use:",
        ('MuseWeb', 'MusePy', 'MuseNeo', 'MusePy-1-1', 'MuseCan'), index = 4)

    if select_model == 'MuseWeb':
        model = 'breadlicker45/museweb'
    elif select_model == 'MusePy':
        model = 'breadlicker45/MusePy'
    elif select_model == 'MuseNeo':
        model = 'breadlicker45/MuseNeo'
    elif select_model == 'MusePy-1-1':
        model = 'BreadAi/MusePy-1-1'
    elif select_model == 'MuseCan':
        model = 'BreadAi/MuseCan'

    with st.spinner('Loading Model... (This may take a while)'):
        generator = get_model()    
        st.success('Model loaded correctly!')
     
gen = st.info('Generating text...')
answer = generator(prompt,
                       max_length=max_length, no_repeat_ngram_size=no_ngram_repeat,
                        early_stopping=early_stopping, num_beams=num_beams, min_length=min_length, temperature=temp)                      
gen.empty()                      
                       
lst = answer[0]['generated_text']
   
t = st.empty()
for i in range(len(lst)):
    t.markdown("#### %s" % lst[0:i])
    time.sleep(0.04)