import sys import time import os import streamlit as st import concurrent.futures from random import randint # initializing session_state #os.system('pip install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio===0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html') os.system('pip install transformers') st.title("Story AI") st.markdown("

I can generate intresting stories

", unsafe_allow_html=True) st.markdown('') st.markdown('') from transformers import pipeline, set_seed generator = pipeline('text-generation', model='openai-gpt') def generate(initial_text, length=10, return_sequences=1): set_seed(randint(1,1000)) result = generator(initial_text, max_length = length, num_return_sequences = return_sequences) return result[0]["generated_text"] def slice(text, mak_length=10): return text[-mak_length:] def type_text(text): for letter in text: sys.stdout.write(letter) time.sleep(0) #text = input("Enter something to begin with... ") #print(".\n.\n.\nGenerating\n.\n.\n.") #for _ in range(50): #result = generate(text) #text=slice(result) #with concurrent.futures.ThreadPoolExecutor() as executor: # executor.submit(type_text, result.replace(text,"")) #Streamlit