Alexander Slessor
refactor readme and test_endpoint
468b2e1
import requests
from hf_token import HF_TOKEN
def query(token: str, url: str, payload: dict):
'''
returns:: (dict) ::
{
"score": 0.9873963594436646,
"start": 34,
"end": 40,
"answer": "Berlin"
}
'''
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
response = requests.post(url, headers=headers, json=payload)
return response.json()
if __name__ == "__main__":
url = 'https://ciy95hpzki22rqvf.us-east-1.aws.endpoints.huggingface.cloud'
context_bert_abstract = "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pretrain deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be finetuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial taskspecific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement)."
input_ = {
"inputs": {
"question": "What does the 'B' in BERT stand for?",
"context": context_bert_abstract
}
}
output = query(
HF_TOKEN,
url,
input_
)
print(output)