import gradio as gr #import transformers #from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline from aitextgen import aitextgen title = "vggAI Offline Chat" description = "Demo for the vggAI text generation model, which is based on 355M GPT-2 fine-tuned on ~5.1 million offline chat messages sent on vaush.gg from January 2021 through May 2022. (“Offline” chat was crudely estimated by counting how many messages were sent during 10-minute windows, and only including messages from the 10-minute windows where 400 or fewer chat messages were sent.) This model is very rough and may produce text snippets identical to actual chat messages, and may not attribute them to the correct users. Please do not use any output without making clear that it's AI-generated fakery. Finally, please be aware that due to this model’s shallow “understanding” of language, it’s liable to generate yikesy statements about sensitive topics, incorrectly infer chatters’ genders (and other protected characteristics), and emulate messages from banned users." ai = aitextgen(model="chido/vggAI-offlinechat") #tokenizer = AutoTokenizer.from_pretrained("chido/vggAI-offlinechat") #model = AutoModelForCausalLM.from_pretrained("chido/vggAI-offlinechat") #generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, # do_sample=True, # max_length=100, # temperature=0.8, # top_p=0.95, # repetition_penalty=1.25, # ) def get_text(user_input = '', temp=0.8, rp=1.25): #result = generator(user_input, temperature=temp) #return result[0]["generated_text"] return ai.generate_one(prompt=user_input, max_length=256, top_p=0.95, temperature=temp, repetition_penalty=rp) gr.Interface( get_text, inputs=[ gr.inputs.Textbox(lines=8,placeholder="Give a prompt and the AI will attempt to continue the conversation, or leave it blank to generate a conversation from scratch!", label="Prompt (optional)", default="chido: JAMMIES kickin in the front seat JAMMIES\nchido: JAMMIES sittin in the"), gr.inputs.Slider(minimum=0.5, maximum=1.5, default=0.8, step=0.01, label='Temperature (controls “wildness” of output; recommended to set between 0.7 and 1)'), gr.inputs.Slider(minimum=1.0, maximum=1.5, default=1.25, step=0.01, label='Repetition penalty (the higher the value, the more it will try to avoid emote spamming and other repeating text)'), ], outputs=gr.Textbox(), title=title, description=description ).launch()