File size: 2,241 Bytes
1fad4a0 ba1082d 28c8817 1fad4a0 e03f966 7846d6e e03f966 7846d6e eb9bba6 d44236e eb9bba6 f275817 eb9bba6 ba1082d 58b75c9 eb9bba6 ba1082d eb9bba6 8c30a54 eb9bba6 58b75c9 eb9bba6 ba1082d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e ba1082d b43651d 7846d6e e03f966 7846d6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import requests
import os
from transformers import pipeline
from transformers import Tool
# Import other necessary libraries if needed
class TextGenerationTool(Tool):
name = "text_generator"
description = (
"This is a tool for text generation. It takes a prompt as input and returns the generated text."
)
inputs = ["text"]
outputs = ["text"]
def __call__(self, prompt: str):
#API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5"
#headers = {"Authorization": "Bearer " + os.environ['hf']}
token=os.environ['hf']
#payload = {
# "inputs": prompt # Adjust this based on your model's input format
#}
#payload = {
# "inputs": "Can you please let us know more details about your ",
# }
#def query(payload):
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
#print(generated_text)
#return generated_text["text"]
# Replace the following line with your text generation logic
#generated_text = f"Generated text based on the prompt: '{prompt}'"
# Initialize the text generation pipeline
text_generator = pipeline(model="lgaalves/gpt2-dolly", token=token)
# Generate text based on a prompt
generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
# Print the generated text
print(generated_text)
return generated_text
# Define the payload for the request
#payload = {
# "inputs": prompt # Adjust this based on your model's input format
#}
# Make the request to the API
#generated_text = requests.post(API_URL, headers=headers, json=payload).json()
# Extract and return the generated text
#return generated_text["generated_text"]
# Uncomment and customize the following lines based on your text generation needs
# text_generator = pipeline(model="gpt2")
# generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
# Print the generated text if needed
# print(generated_text)
|