from transformers import AutoModelForCausalLM, AutoTokenizer
import modin.pandas as pd
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained("KoboldAI/OPT-2.7B-Erebus")
model = AutoModelForCausalLM.from_pretrained("KoboldAI/OPT-2.7B-Erebus")
def chat(Prompt):
input_ids = tokenizer(Prompt, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, use_cache=True, repetition_penalty = 2.5, top_k = 75, max_length=256, eos_token_id=tokenizer.eos_token_id)
bot = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return bot
title = 'Erebus ChatBot'
description = 'This is the second generation of the original Shinen made by Mr. Seeker. The full dataset consists of 6 different sources, all surrounding the "Adult" theme. The name "Erebus" comes from the greek mythology, also named "darkness". This is in line with Shinen, or "deep abyss". For inquiries, please contact the KoboldAI community.
Warning: THIS model is NOT suitable for use by minors. The model will output X-rated content.'
article = 'The data can be divided in 6 different datasets: Literotica (everything with 4.5/5 or higher), Sexstories (everything with 90 or higher), Dataset-G (private dataset of X-rated stories), Docs Lab (all stories), Pike Dataset (novels with "adult" rating), SoFurry (collection of various animals)'
gr.Interface(fn=chat, inputs='text', outputs='text', title=title, description=description, article=article).launch(max_threads=40, debug=True)