# Motivation-Letter-Generator from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, AutoModelForSeq2SeqLM, set_seed, pipeline import gradio as gr import torch torch.set_default_tensor_type(torch.cuda.FloatTensor) ### need more GPU power to call better models !!!!!! # model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", use_cache=True) # 11B param # tokenizer = AutoTokenizer.from_pretrained("bigscience/T0pp") model = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B', use_cache=True) tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B') set_seed(424242) def generate(Name, Employer, Position, Organization, Hard_skills, Soft_skills, max_length=500, top_k=1, temperature=0.9, repetition_penalty = 2.0): prompt = f'im {Name} and i want to write a motivation letter to {Employer} about the position {Position} at {Organization} mentioning the hard skills {Hard_skills} and soft skills {Soft_skills} you have acquired' input_ids = tokenizer(prompt, return_tensors="pt").to(0) sample = model.generate(**input_ids, max_length=max_length, top_k=top_k, temperature=temperature, repetition_penalty = repetition_penalty) return tokenizer.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"]) title = "Motivation Letter Generator w/ GPT-Neo-1.3B" article = "Impress your employer" gr = gr.Interface(fn=generate, inputs=["text", "text", "text", "text", "text", "text"], outputs="text", title=title, article=article, share=True) gr.launch()