File size: 1,899 Bytes
2d172d8
6a5e8f0
6569da3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04846e4
6569da3
 
 
 
95fd103
6569da3
 
 
 
 
 
6a5e8f0
 
83a9e09
6a5e8f0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio


import wandb
import torch

from transformers import GPT2Tokenizer,GPT2LMHeadModel
from peft import PeftModel

import os

import re

def clean_text(text):
    # Lowercase the text
    text = text.lower()
    # Remove special characters
    text = re.sub(r'\W', ' ', text)
    # Remove extra white spaces
    text = re.sub(r'\s+', ' ', text).strip()
    return text



os.environ["WANDB_API_KEY"] = "d2ad0a7285379c0808ca816971d965fc242d0b5e"

wandb.login()

run = wandb.init(project="Email_subject_gen", job_type="model_loading")

artifact = run.use_artifact('Email_subject_gen/final_model:v0')
artifact_dir = artifact.download()

#tokenizer= GPT2Tokenizer.from_pretrained(artifact_dir)

MODEL_KEY = 'olm/olm-gpt2-dec-2022'
tokenizer= GPT2Tokenizer.from_pretrained(MODEL_KEY)
tokenizer.add_special_tokens({'pad_token':'{PAD}'})

model = GPT2LMHeadModel.from_pretrained(MODEL_KEY)
model.resize_token_embeddings(len(tokenizer))
model.config.dropout = 0.1  # Set dropout rate
model.config.attention_dropout = 0.1

model = PeftModel.from_pretrained(model, artifact_dir)


def generateSubject(email):

    clean_text(email)

    email = "<email>" + clean_text(email) + "<subject>"

    prompts = list()
    prompts.append(email)
    tokenizer.padding_side='left'
    prompts_batch_ids = tokenizer(prompts,
            padding=True, truncation=True, return_tensors='pt').to(model.device)
    output_ids = model.generate(
            **prompts_batch_ids, max_new_tokens=10,
            pad_token_id=tokenizer.pad_token_id)
    outputs_batch = [seq.split('<subject>')[1] for seq in
            tokenizer.batch_decode(output_ids, skip_special_tokens=True)]
    tokenizer.padding_side='right'
    print(outputs_batch)

    return outputs_batch[0]



def predict(name):
    return "Hello " + name + "!!"

iface = gradio.Interface(fn=generateSubject, inputs="text", outputs="text")
iface.launch()