Spaces:
Running
Running
Rohankumar31
commited on
Commit
•
acea4f2
1
Parent(s):
0ac4900
Update model.py
Browse files
model.py
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
from transformers import pipeline
|
2 |
-
import transformers
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import tensorflow as tf
|
6 |
-
from transformers import TFGPT2LMHeadModel ,GPT2Tokenizer, BitsAndBytesConfig
|
7 |
-
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
8 |
-
model = TFGPT2LMHeadModel.from_pretrained('gpt2',pad_token_id = tokenizer.eos_token_id)
|
9 |
-
def generate_text(inp):
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
qa_pipeline = pipeline("question-answering", model=
|
|
|
1 |
from transformers import pipeline
|
2 |
+
# import transformers
|
3 |
+
# import torch
|
4 |
+
# import torch.nn as nn
|
5 |
+
# import tensorflow as tf
|
6 |
+
# from transformers import TFGPT2LMHeadModel ,GPT2Tokenizer, BitsAndBytesConfig
|
7 |
+
# tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
8 |
+
# model = TFGPT2LMHeadModel.from_pretrained('gpt2',pad_token_id = tokenizer.eos_token_id)
|
9 |
+
# def generate_text(inp):
|
10 |
+
# input_ids = tokenizer.encode(inp,return_tensors = 'tf')
|
11 |
+
# beam_output = model.generate(input_ids, max_length = 100,num_beams = 5, no_repeat_ngram_size = 2, early_stopping = True)
|
12 |
+
# output = tokenizer.decode(beam_output[0],skip_special_tokens = True, clean_up_tokenization_spaces = True)
|
13 |
+
# return ".".join(output.split(".")[:-1]) + "."
|
14 |
+
qa_pipeline = pipeline("question-answering", model='deepset/roberta-base-squad2')
|