cjayic's picture
reduce token
32e0ddf
import gradio as gr
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread
config = PeftConfig.from_pretrained("cjayic/qlora-phi-1_5B-ow-fanfic")
model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5")
model = PeftModel.from_pretrained(model, "cjayic/qlora-phi-1_5B-ow-fanfic")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
def greet(intro):
inputs = tokenizer(intro, return_tensors="pt", return_attention_mask=False)
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=False, skip_special_tokens=True)
generate_kwargs = dict(
inputs,
streamer=streamer,
max_new_tokens=150,
do_sample=True,
#top_p=0.95,
#top_k=1000,
#temperature=1.0,
num_beams=1,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
partial_message = ""
for new_token in streamer:
if new_token != '<':
partial_message += new_token
yield partial_message
with gr.Blocks() as demo:
inp = gr.Textbox(placeholder="Intro", value="<|startoftext|>\n# Chapter 1\n", label="Starting Text", info="Initial text that will be continued by the LLM. Use `<|startoftext|>` to generate the beginning of a chapter.")
out = gr.Markdown(sanitize_html=False)
btn = gr.Button()
btn.click(fn=greet, inputs=[inp], outputs=[out])
demo.launch()