nouf-sst commited on
Commit
c87d7cd
1 Parent(s): 00d01a5

Add OpenAI API

Browse files
Files changed (1) hide show
  1. app.py +36 -0
app.py CHANGED
@@ -11,6 +11,13 @@ from autocorrect import Speller
11
  from transformers import BertTokenizer, BertForSequenceClassification
12
  import torch
13
  from torch.nn.utils.rnn import pad_sequence
 
 
 
 
 
 
 
14
 
15
  # ***************************** Load needed models *****************************
16
  nlp = stanza.Pipeline(lang='en', processors='tokenize,pos,constituency')
@@ -21,6 +28,35 @@ sentences_similarity_model = CrossEncoder('WillHeld/roberta-base-stsb')
21
  nli_model = BertForSequenceClassification.from_pretrained("nouf-sst/bert-base-MultiNLI", use_auth_token="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")
22
  nli_tokenizer = BertTokenizer.from_pretrained("nouf-sst/bert-base-MultiNLI", use_auth_token="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", do_lower_case=True)
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  # ***************************** TGRL Parsing *****************************
25
 
26
  def parse_tgrl(file_obj):
 
11
  from transformers import BertTokenizer, BertForSequenceClassification
12
  import torch
13
  from torch.nn.utils.rnn import pad_sequence
14
+ from openai import OpenAI
15
+ from tenacity import (
16
+ retry,
17
+ stop_after_attempt,
18
+ wait_random_exponential,
19
+ ) # for exponential backoff
20
+ import os
21
 
22
  # ***************************** Load needed models *****************************
23
  nlp = stanza.Pipeline(lang='en', processors='tokenize,pos,constituency')
 
28
  nli_model = BertForSequenceClassification.from_pretrained("nouf-sst/bert-base-MultiNLI", use_auth_token="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")
29
  nli_tokenizer = BertTokenizer.from_pretrained("nouf-sst/bert-base-MultiNLI", use_auth_token="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", do_lower_case=True)
30
 
31
+
32
+ # ***************************** GPT API *****************************
33
+ client = OpenAI(
34
+ api_key=os.getenv("OpenAI"),
35
+ )
36
+
37
+
38
+ @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
39
+ def completion_with_backoff(**kwargs):
40
+ return client.chat.completions.create(**kwargs)
41
+
42
+ def prompt(prompt_message, bad_smell):
43
+ message = [
44
+ {
45
+ "role": "system",
46
+ "content": prompt_message
47
+ },
48
+ {
49
+ "role": "user",
50
+ "content": bad_smell
51
+ }
52
+ ]
53
+ completion = completion_with_backoff(
54
+ model="gpt-3.5-turbo",
55
+ messages=message,
56
+ temperature= 0.2,
57
+ )
58
+ return completion.choices[0].message.content
59
+
60
  # ***************************** TGRL Parsing *****************************
61
 
62
  def parse_tgrl(file_obj):