|
|
|
from unittest import result |
|
from templates.Templates import PromptTemplate |
|
import openai |
|
import os |
|
import requests |
|
|
|
prompt = PromptTemplate() |
|
|
|
stop_words = ["###", "\n\n", "<br><br>", "The authors: "] |
|
|
|
|
|
def model(type, template, seq_len=250): |
|
train = '' |
|
if type == 'title': |
|
train = prompt.TITLE_TO_ABSTRACST |
|
if type == 'topic': |
|
train = prompt.TOPIC_TO_ABSTRACST |
|
|
|
HF_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" |
|
HF_KEY = os.environ["HF_KEY"] |
|
|
|
headers = {"Authorization": f"Bearer {HF_KEY}"} |
|
|
|
print(f"Inside model {seq_len}") |
|
|
|
payload = { |
|
"inputs": train + template, |
|
"parameters": { |
|
"temperature": 0.9, |
|
"max_new_tokens": seq_len, |
|
"return_full_text": False, |
|
"top_p": 0.8, |
|
"frequency_penalty": 1.0, |
|
"retention_penalty": 1.0, |
|
}, |
|
"options": { |
|
"use_cache": False |
|
} |
|
} |
|
|
|
response = requests.post(HF_URL, json=payload, headers=headers) |
|
response = response.json() |
|
result = response[0]['generated_text'] |
|
|
|
|
|
|
|
|
|
result = result.split("\n\n\n\n")[-1].strip().split("\n")[-1] |
|
|
|
|
|
return {"result": result} |
|
|