Chris4K commited on
Commit
9f212f1
β€’
1 Parent(s): dfb0190

Update text_generator.py

Browse files
Files changed (1) hide show
  1. text_generator.py +68 -93
text_generator.py CHANGED
@@ -1,93 +1,68 @@
1
- import json
2
- import gradio as gr
3
-
4
- def generate_files(title="Text Generation Tool", emoji="πŸŒ–", colorFrom="blue", colorTo="blue",
5
- sdk="gradio", sdk_version="4.3.0", app_file="app.py", pinned=False,
6
- tags=["tool"], tool_name="text_generator", tool_description="This is a tool that chats with a user. "
7
- "It takes an input named `prompt` which contains a system_role, user_message, context and history. "
8
- "It returns a text message."):
9
- # Generate readme content
10
- readme_content = f'''## readme
11
- title: {title}
12
- emoji: {emoji}
13
- colorFrom: {colorFrom}
14
- colorTo: {colorTo}
15
- sdk: {sdk}
16
- sdk_version: {sdk_version}
17
- app_file: {app_file}
18
- pinned: {pinned}
19
- tags:
20
- - {tags[0]}
21
- '''
22
-
23
- # Generate tool config JSON content
24
- tool_config = {
25
- "description": tool_description,
26
- "name": tool_name,
27
- "tool_class": f"{tool_name.capitalize()}Tool"
28
- }
29
- tool_config_json = json.dumps(tool_config, indent=4)
30
-
31
- # Generate app.py content
32
- app_py_content = f'''from transformers.tools.base import launch_gradio_demo
33
- from {tool_name} import {tool_name.capitalize()}Tool
34
-
35
- launch_gradio_demo({tool_name.capitalize()}Tool)
36
- '''
37
-
38
- # Generate requirements.txt content
39
- requirements_content = '''transformers>=4.29.0
40
- # diffusers
41
- accelerate
42
- torch
43
- '''
44
-
45
- # Generate text_generator.py content
46
- text_generator_py_content = f'''import os
47
- from transformers import pipeline
48
- from transformers import Tool
49
-
50
- class {tool_name.capitalize()}Tool(Tool):
51
- name = "{tool_name}"
52
- description = (
53
- "{tool_description}"
54
- )
55
-
56
- inputs = ["text"]
57
- outputs = ["text"]
58
-
59
- def __call__(self, prompt: str):
60
- token = os.environ['hf']
61
- text_generator = pipeline(model="microsoft/Orca-2-13b", token=token)
62
- generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
63
- print(generated_text)
64
- return generated_text
65
- '''
66
-
67
- # Write content to files
68
- with open("README.md", "w") as readme_file:
69
- readme_file.write(readme_content)
70
-
71
- with open("tool_config.json", "w") as tool_config_file:
72
- tool_config_file.write(tool_config_json)
73
-
74
- with open("app.py", "w") as app_py_file:
75
- app_py_file.write(app_py_content)
76
-
77
- with open("requirements.txt", "w") as requirements_file:
78
- requirements_file.write(requirements_content)
79
-
80
- with open(f"{tool_name}.py", "w") as text_generator_py_file:
81
- text_generator_py_file.write(text_generator_py_content)
82
-
83
- # Return the generated files for download
84
- return "README.md", "tool_config.json", "app.py", "requirements.txt", f"{tool_name}.py"
85
-
86
-
87
- # Define the inputs for the Gradio interface
88
- io = gr.Interface(generate_files,
89
- inputs=["text", "text", "text", "text", "text", "text", "text", "text", "checkbox", "text", "text"],
90
- outputs=["text", "text", "text", "text", "text"])
91
-
92
- # Launch the Gradio interface
93
- io.launch()
 
1
+ import requests
2
+ import os
3
+ from transformers import pipeline
4
+
5
+
6
+ from transformers import Tool
7
+ # Import other necessary libraries if needed
8
+
9
+ class TextGenerationTool(Tool):
10
+ name = "text_generator"
11
+ description = (
12
+ "This is a tool for text generation. It takes a prompt as input and returns the generated text."
13
+ )
14
+
15
+ inputs = ["text"]
16
+ outputs = ["text"]
17
+
18
+ def __call__(self, prompt: str):
19
+ #API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5"
20
+ #headers = {"Authorization": "Bearer " + os.environ['hf']}
21
+ token=os.environ['HF_token']
22
+ #payload = {
23
+ # "inputs": prompt # Adjust this based on your model's input format
24
+ #}
25
+
26
+ #payload = {
27
+ # "inputs": "Can you please let us know more details about your ",
28
+ # }
29
+
30
+ #def query(payload):
31
+ #generated_text = requests.post(API_URL, headers=headers, json=payload).json()
32
+ #print(generated_text)
33
+ #return generated_text["text"]
34
+
35
+ # Replace the following line with your text generation logic
36
+ #generated_text = f"Generated text based on the prompt: '{prompt}'"
37
+
38
+ # Initialize the text generation pipeline
39
+ #text_generator = pipeline(model="lgaalves/gpt2-dolly", token=token)
40
+ text_generator = pipeline(model="microsoft/Orca-2-13b", token=token)
41
+
42
+ # Generate text based on a prompt
43
+ generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
44
+
45
+ # Print the generated text
46
+ print(generated_text)
47
+
48
+
49
+
50
+ return generated_text
51
+
52
+ # Define the payload for the request
53
+ #payload = {
54
+ # "inputs": prompt # Adjust this based on your model's input format
55
+ #}
56
+
57
+ # Make the request to the API
58
+ #generated_text = requests.post(API_URL, headers=headers, json=payload).json()
59
+
60
+ # Extract and return the generated text
61
+ #return generated_text["generated_text"]
62
+
63
+ # Uncomment and customize the following lines based on your text generation needs
64
+ # text_generator = pipeline(model="gpt2")
65
+ # generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
66
+
67
+ # Print the generated text if needed
68
+ # print(generated_text)