Spaces:
Runtime error
Runtime error
File size: 1,496 Bytes
e03f966 b43651d e03f966 b43651d e03f966 497ee47 b43651d 3ff77ad e03f966 b43651d e03f966 b43651d e03f966 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from transformers import Tool
from transformers import pipeline
class TextGenerationTool(Tool):
name = "text_generator"
description = (
"This is a tool for text generation. It takes a prompt as input and returns the generated text."
)
inputs = ["text"]
outputs = ["text"]
import requests
def __call__(self, prompt: str):
API_URL = "https://api-inference.huggingface.co/models/lukasdrg/clinical_longformer_same_tokens_220k"
headers = {"Authorization": "Bearer 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"}
#def query(payload):
generated_text = requests.post(API_URL, headers=headers, json=payload)
# return response.json()
#output = query({
# "inputs": "The answer to the universe is <mask>.",
#})
# Replace the following line with your text generation logic
#generated_text = f"Generated text based on the prompt: '{prompt}'"
# Initialize the text generation pipeline
#text_generator = pipeline("text-generation") llama mistralai/Mistral-7B-Instruct-v0.1
#text_generator = pipeline(model="gpt2")
#text_generator = pipeline(model="meta-llama/Llama-2-7b-chat-hf")
# Generate text based on a prompt
#generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
# Print the generated text
#print(generated_text)
return generated_text
|