from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompt-generator-v12") model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompt-generator-v12", from_tf=True) # tokenizer2 = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum") model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True) def generate(prompt, max_new_tokens): batch = tokenizer(prompt, return_tensors="pt") generated_ids = model.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens)) output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate2(prompt, max_new_tokens): batch = tokenizer2(prompt, return_tensors="pt") generated_ids = model2.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens)) output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate2_test(prompt): batch = tokenizer2(prompt, return_tensors="pt") generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150) output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate_prompt(aitype, prompt, max_new_tokens): if aitype=='1': return generate(prompt, max_new_tokens) elif aitype=='2': return generate2(prompt, max_new_tokens) # input_aitype = gr.Textbox(label = "Input a persona, e.g. photographer", value = "2") input_prompt = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer") input_maxtokens = gr.Textbox(label = "max tokens", value = "150") output_component = gr.Textbox(label = "Prompt") examples = [["photographer"], ["developer"]] description = "" gr.Interface(generate_prompt, inputs = [input_aitype,input_prompt,input_maxtokens], outputs=output_component, examples=examples, title = "👨🏻‍🎤 ChatGPT Prompt Generator v12 👨🏻‍🎤", description=description).launch()