Light_generator / app.py
orendar's picture
Update app.py
abae92c
raw
history blame
4.86 kB
# -*- coding: utf-8 -*-
import argparse
import re
import os
import streamlit as st
import random
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import tokenizers
#os.environ["TOKENIZERS_PARALLELISM"] = "false"
random.seed(None)
suggested_text_list = ['诪讟专转 讛讘专讬讗讛']
@st.cache(hash_funcs={tokenizers.Tokenizer: id, tokenizers.AddedToken: id})
def load_model(model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)
return model, tokenizer
def extend(input_text, max_size=20, top_k=50, top_p=0.95):
if len(input_text) == 0:
input_text = ""
encoded_prompt = tokenizer.encode(
input_text, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
do_sample = False if top_k ==0 and top_p == 1.0 else True
output_sequences = model.generate(
input_ids=input_ids,
max_length=max_size + len(encoded_prompt[0]),
top_k=top_k,
top_p=top_p,
do_sample=do_sample,
repetition_penalty=25.0,
num_return_sequences=1)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(stop_token) if stop_token else None]
# Remove all text after 3 newlines
text = text[: text.find(new_lines) if new_lines else None]
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
input_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
)
generated_sequences.append(total_sequence)
parsed_text = total_sequence.replace("<|startoftext|>", "").replace("\r","").replace("\n\n", "\n")
if len(parsed_text) == 0:
parsed_text = "砖讙讬讗讛"
return parsed_text
if __name__ == "__main__":
st.title("Light generator")
pre_model_path = "orendar/light_generator"
model, tokenizer = load_model(pre_model_path)
stop_token = "<|endoftext|>"
new_lines = "\n\n\n"
np.random.seed(None)
random_seed = np.random.randint(10000,size=1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = 0 if torch.cuda.is_available()==False else torch.cuda.device_count()
torch.manual_seed(random_seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(random_seed)
model.to(device)
text_area = st.text_area("Enter the first few words (or leave blank), tap on \"Generate Text\" below. Tapping again may produce a different result.", '诪讟专转 讛讘专讬讗讛')
st.sidebar.subheader("Configurable parameters")
max_len = st.sidebar.slider("Max-Length", 0, 256, 70, help="The maximum length of the sequence to be generated.")
top_k = st.sidebar.slider("Top-K", 0, 100, 0, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 1.0, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
if st.button("Generate Text"):
with st.spinner(text="Generating results..."):
st.subheader("Result")
print(f"device:{device}, n_gpu:{n_gpu}, random_seed:{random_seed}, maxlen:{max_len}, top_k:{top_k}, top_p:{top_p}")
if len(text_area.strip()) == 0:
text_area = random.choice(suggested_text_list)
result = extend(input_text=text_area,
max_size=int(max_len),
top_k=int(top_k),
top_p=float(top_p))
#<div class="rtl" dir="rtl" style="text-align:right;">
st.markdown(f"<p dir=\"rtl\" style=\"text-align:right;\"> {result} </p>", unsafe_allow_html=True)
print(f"\"{result}\"")
st.markdown(
"""This model was trained on archive materials."""
)
st.markdown("<footer><hr><p style=\"font-size:14px\">Enjoy the light.</p><p style=\"font-size:12px\">Created by Oren Dar. Many thanks to Norod78 for providing the base model and the Spaces example!</a></p></footer> ", unsafe_allow_html=True)