from transformers import pipeline # import transformers # import torch # import torch.nn as nn # import tensorflow as tf # from transformers import TFGPT2LMHeadModel ,GPT2Tokenizer, BitsAndBytesConfig # tokenizer = GPT2Tokenizer.from_pretrained('gpt2') # model = TFGPT2LMHeadModel.from_pretrained('gpt2',pad_token_id = tokenizer.eos_token_id) # def generate_text(inp): # input_ids = tokenizer.encode(inp,return_tensors = 'tf') # beam_output = model.generate(input_ids, max_length = 100,num_beams = 5, no_repeat_ngram_size = 2, early_stopping = True) # output = tokenizer.decode(beam_output[0],skip_special_tokens = True, clean_up_tokenization_spaces = True) # return ".".join(output.split(".")[:-1]) + "." qa_pipeline = pipeline("question-answering", model='deepset/roberta-base-squad2')