Chris4K commited on
Commit
1fad4a0
1 Parent(s): d07970f

Update text_generator.py

Browse files
Files changed (1) hide show
  1. text_generator.py +24 -23
text_generator.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  from transformers import Tool
2
  from transformers import pipeline
3
 
@@ -10,40 +12,39 @@ class TextGenerationTool(Tool):
10
  inputs = ["text"]
11
  outputs = ["text"]
12
 
13
- import requests
14
 
15
- def __call__(self, prompt: str):
16
 
17
 
18
- API_URL = "https://api-inference.huggingface.co/models/lukasdrg/clinical_longformer_same_tokens_220k"
19
- headers = {"Authorization": "Bearer "+os.environ['HF']+"}
 
 
 
 
20
 
21
- #def query(payload):
22
- generated_text = requests.post(API_URL, headers=headers, json=payload)
23
- # return response.json()
24
-
25
- #output = query({
26
- # "inputs": "The answer to the universe is <mask>.",
27
- #})
28
 
29
 
30
-
31
- # Replace the following line with your text generation logic
32
- #generated_text = f"Generated text based on the prompt: '{prompt}'"
33
 
34
- # Initialize the text generation pipeline
35
- #text_generator = pipeline("text-generation") llama mistralai/Mistral-7B-Instruct-v0.1
36
- #text_generator = pipeline(model="gpt2")
37
- #text_generator = pipeline(model="meta-llama/Llama-2-7b-chat-hf")
38
 
39
- # Generate text based on a prompt
40
- #generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
41
 
42
- # Print the generated text
43
- #print(generated_text)
44
 
45
 
46
 
47
- return generated_text
48
 
49
 
 
1
+ import requests
2
+
3
  from transformers import Tool
4
  from transformers import pipeline
5
 
 
12
  inputs = ["text"]
13
  outputs = ["text"]
14
 
 
15
 
16
+ def __call__(self, prompt: str):
17
 
18
 
19
+ API_URL = "https://api-inference.huggingface.co/models/lukasdrg/clinical_longformer_same_tokens_220k"
20
+ headers = {"Authorization": "Bearer "+os.environ['HF']+"}
21
+
22
+ #def query(payload):
23
+ generated_text = requests.post(API_URL, headers=headers, json=payload)
24
+ # return response.json()
25
 
26
+ #output = query({
27
+ # "inputs": "The answer to the universe is <mask>.",
28
+ #})
 
 
 
 
29
 
30
 
31
+
32
+ # Replace the following line with your text generation logic
33
+ #generated_text = f"Generated text based on the prompt: '{prompt}'"
34
 
35
+ # Initialize the text generation pipeline
36
+ #text_generator = pipeline("text-generation") llama mistralai/Mistral-7B-Instruct-v0.1
37
+ #text_generator = pipeline(model="gpt2")
38
+ #text_generator = pipeline(model="meta-llama/Llama-2-7b-chat-hf")
39
 
40
+ # Generate text based on a prompt
41
+ #generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
42
 
43
+ # Print the generated text
44
+ #print(generated_text)
45
 
46
 
47
 
48
+ return generated_text
49
 
50