File size: 1,285 Bytes
d66881f
3c162d3
b677313
3c162d3
 
e47ad50
3c162d3
a912e42
 
3c162d3
 
 
840bcf5
 
3c162d3
 
 
 
840bcf5
e47ad50
3c162d3
840bcf5
3c162d3
990aba4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Motivation-Letter-Generator

from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, AutoModelForSeq2SeqLM, set_seed, pipeline
import gradio as gr

### need more GPU power to call  T0pp 

model = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B', use_cache=True)
tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')

set_seed(424242)

def generate(Name, Position, Organization, max_length=500, top_k=1, temperature=0.9, repetition_penalty = 2.0):
  prompt = f"i'm {Name} and i want to write a motivation letter to an employer about the position of {Position} at {Organization} mentioning the hard skills and soft skills i have acquired"
  input_ids = tokenizer(prompt, return_tensors="pt").to(0)
  sample = model.generate(**input_ids, max_length=max_length,  top_k=top_k, temperature=temperature, repetition_penalty = repetition_penalty)
  return tokenizer.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])

title = "Motivation Letter Generator"
article = "For now this still a toy demo and no good results will came out. PS: if you have enough resources try using stronger models !"

gr = gr.Interface(fn = generate, inputs=["text", "text", "text"], outputs="text", title=title, article=article)

gr.launch()