Chris4K commited on
Commit
7846d6e
1 Parent(s): d4221fc

Update text_generator.py

Browse files
Files changed (1) hide show
  1. text_generator.py +18 -32
text_generator.py CHANGED
@@ -1,7 +1,8 @@
 
1
  import requests
2
 
3
  from transformers import Tool
4
- from transformers import pipeline
5
 
6
  class TextGenerationTool(Tool):
7
  name = "text_generator"
@@ -11,40 +12,25 @@ class TextGenerationTool(Tool):
11
 
12
  inputs = ["text"]
13
  outputs = ["text"]
14
-
15
 
16
- def __call__(self, prompt: str):
 
 
17
 
 
 
 
 
18
 
19
- API_URL = "https://api-inference.huggingface.co/models/lukasdrg/clinical_longformer_same_tokens_220k"
20
- headers = {"Authorization": "Bearer "+os.environ['HF']}
21
-
22
- #def query(payload):
23
- generated_text = requests.post(API_URL, headers=headers, json=payload)
24
- # return response.json()
25
-
26
- #output = query({
27
- # "inputs": "The answer to the universe is <mask>.",
28
- #})
29
 
 
 
30
 
31
-
32
- # Replace the following line with your text generation logic
33
- #generated_text = f"Generated text based on the prompt: '{prompt}'"
34
-
35
- # Initialize the text generation pipeline
36
- #text_generator = pipeline("text-generation") llama mistralai/Mistral-7B-Instruct-v0.1
37
- #text_generator = pipeline(model="gpt2")
38
- #text_generator = pipeline(model="meta-llama/Llama-2-7b-chat-hf")
39
-
40
- # Generate text based on a prompt
41
- #generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
42
-
43
- # Print the generated text
44
- #print(generated_text)
45
-
46
-
47
-
48
- return generated_text
49
-
50
 
 
 
 
1
+ import os
2
  import requests
3
 
4
  from transformers import Tool
5
+ # Import other necessary libraries if needed
6
 
7
  class TextGenerationTool(Tool):
8
  name = "text_generator"
 
12
 
13
  inputs = ["text"]
14
  outputs = ["text"]
 
15
 
16
+ def __call__(self, prompt: str):
17
+ API_URL = "https://api-inference.huggingface.co/models/lukasdrg/clinical_longformer_same_tokens_220k"
18
+ headers = {"Authorization": "Bearer " + os.environ['HF']}
19
 
20
+ # Define the payload for the request
21
+ payload = {
22
+ "inputs": prompt # Adjust this based on your model's input format
23
+ }
24
 
25
+ # Make the request to the API
26
+ generated_text = requests.post(API_URL, headers=headers, json=payload).json()
 
 
 
 
 
 
 
 
27
 
28
+ # Extract and return the generated text
29
+ return generated_text["generated_text"]
30
 
31
+ # Uncomment and customize the following lines based on your text generation needs
32
+ # text_generator = pipeline(model="gpt2")
33
+ # generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ # Print the generated text if needed
36
+ # print(generated_text)