JenkinsGage commited on
Commit
b3a9905
1 Parent(s): 051be48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -5,7 +5,6 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
6
  tokenizer = AutoTokenizer.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
7
  model = AutoModelForSeq2SeqLM.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
8
- model_quant = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
9
 
10
  def paraphrase(
11
  text,
@@ -25,7 +24,7 @@ def paraphrase(
25
  truncation=True,
26
  ).input_ids
27
 
28
- outputs = model_quant.generate(
29
  input_ids, temperature=temperature, repetition_penalty=repetition_penalty,
30
  num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size,
31
  num_beams=num_beams, num_beam_groups=num_beam_groups,
 
5
 
6
  tokenizer = AutoTokenizer.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
7
  model = AutoModelForSeq2SeqLM.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models')
 
8
 
9
  def paraphrase(
10
  text,
 
24
  truncation=True,
25
  ).input_ids
26
 
27
+ outputs = model.generate(
28
  input_ids, temperature=temperature, repetition_penalty=repetition_penalty,
29
  num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size,
30
  num_beams=num_beams, num_beam_groups=num_beam_groups,