Spaces:
Runtime error
Runtime error
Sean-Case
commited on
Commit
β’
160f728
1
Parent(s):
ba85577
Initialise app with basic functionality
Browse files- .gitattributes +34 -0
- .gitignore +3 -0
- Dockerfile +30 -0
- app.py +239 -0
- chatfuncs/__init__.py +0 -0
- chatfuncs/chatfuncs.py +112 -0
- readme.md +13 -1
- requirements.txt +4 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
*.pyc
|
2 |
+
*.ipynb
|
3 |
+
*.csv
|
Dockerfile
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10
|
2 |
+
|
3 |
+
WORKDIR /src
|
4 |
+
|
5 |
+
COPY requirements.txt .
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
8 |
+
|
9 |
+
# Set up a new user named "user" with user ID 1000
|
10 |
+
RUN useradd -m -u 1000 user
|
11 |
+
# Switch to the "user" user
|
12 |
+
USER user
|
13 |
+
# Set home to the user's home directory
|
14 |
+
ENV HOME=/home/user \
|
15 |
+
PATH=/home/user/.local/bin:$PATH \
|
16 |
+
PYTHONPATH=$HOME/app \
|
17 |
+
PYTHONUNBUFFERED=1 \
|
18 |
+
GRADIO_ALLOW_FLAGGING=never \
|
19 |
+
GRADIO_NUM_PORTS=1 \
|
20 |
+
GRADIO_SERVER_NAME=0.0.0.0 \
|
21 |
+
GRADIO_THEME=huggingface \
|
22 |
+
SYSTEM=spaces
|
23 |
+
|
24 |
+
# Set the working directory to the user's home directory
|
25 |
+
WORKDIR $HOME/app
|
26 |
+
|
27 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
28 |
+
COPY --chown=user . $HOME/app
|
29 |
+
|
30 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from datetime import datetime
|
3 |
+
import pandas as pd
|
4 |
+
from transformers import pipeline
|
5 |
+
# # Load in packages
|
6 |
+
|
7 |
+
# +
|
8 |
+
import os
|
9 |
+
|
10 |
+
# Need to overwrite version of gradio present in Huggingface spaces as it doesn't have like buttons/avatars (Oct 2023)
|
11 |
+
#os.system("pip uninstall -y gradio")
|
12 |
+
os.system("pip install gradio==3.50.0")
|
13 |
+
|
14 |
+
from typing import TypeVar
|
15 |
+
#from langchain.embeddings import HuggingFaceEmbeddings#, HuggingFaceInstructEmbeddings
|
16 |
+
#from langchain.vectorstores import FAISS
|
17 |
+
import gradio as gr
|
18 |
+
|
19 |
+
from transformers import AutoTokenizer
|
20 |
+
|
21 |
+
# Alternative model sources
|
22 |
+
import ctransformers
|
23 |
+
|
24 |
+
PandasDataFrame = TypeVar('pd.core.frame.DataFrame')
|
25 |
+
|
26 |
+
import chatfuncs.chatfuncs as chatf
|
27 |
+
|
28 |
+
# Disable cuda devices if necessary
|
29 |
+
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
30 |
+
|
31 |
+
def create_hf_model(model_name):
|
32 |
+
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = chatf.context_length)
|
34 |
+
|
35 |
+
summariser = pipeline("summarization", model=model_name, tokenizer=tokenizer) # philschmid/bart-large-cnn-samsum
|
36 |
+
|
37 |
+
#from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
38 |
+
|
39 |
+
# if torch_device == "cuda":
|
40 |
+
# if "flan" in model_name:
|
41 |
+
# model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto")
|
42 |
+
# else:
|
43 |
+
# model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
44 |
+
# else:
|
45 |
+
# if "flan" in model_name:
|
46 |
+
# model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
47 |
+
# else:
|
48 |
+
# model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
return summariser, tokenizer, model_name
|
53 |
+
|
54 |
+
def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_device=None):
|
55 |
+
print("Loading model ", model_type)
|
56 |
+
|
57 |
+
# Default values inside the function
|
58 |
+
if gpu_config is None:
|
59 |
+
gpu_config = chatf.gpu_config
|
60 |
+
if cpu_config is None:
|
61 |
+
cpu_config = chatf.cpu_config
|
62 |
+
if torch_device is None:
|
63 |
+
torch_device = chatf.torch_device
|
64 |
+
|
65 |
+
if model_type == "Mistral Open Orca (larger, slow)":
|
66 |
+
hf_checkpoint = 'TheBloke/MistralLite-7B-GGUF'
|
67 |
+
|
68 |
+
if torch_device == "cuda":
|
69 |
+
gpu_config.update_gpu(gpu_layers)
|
70 |
+
else:
|
71 |
+
gpu_config.update_gpu(gpu_layers)
|
72 |
+
cpu_config.update_gpu(gpu_layers)
|
73 |
+
|
74 |
+
print("Loading with", cpu_config.gpu_layers, "model layers sent to GPU.")
|
75 |
+
|
76 |
+
print(vars(gpu_config))
|
77 |
+
print(vars(cpu_config))
|
78 |
+
|
79 |
+
#try:
|
80 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
|
81 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu())
|
82 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(gpu_config), hf=True) # **asdict(CtransRunConfig_cpu())
|
83 |
+
|
84 |
+
#except:
|
85 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(cpu_config)) #**asdict(CtransRunConfig_gpu())
|
86 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu())
|
87 |
+
#model = ctransformers.AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(cpu_config), hf=True) # **asdict(CtransRunConfig_cpu())
|
88 |
+
|
89 |
+
#tokenizer = ctransformers.AutoTokenizer.from_pretrained(model)
|
90 |
+
#summariser = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
91 |
+
|
92 |
+
model = []
|
93 |
+
tokenizer = []
|
94 |
+
summariser = []
|
95 |
+
|
96 |
+
if model_type == "flan-t5-large-stacked-samsum":
|
97 |
+
# Huggingface chat model
|
98 |
+
hf_checkpoint = 'stacked-summaries/flan-t5-large-stacked-samsum-1024'#'declare-lab/flan-alpaca-base' # # #
|
99 |
+
|
100 |
+
summariser, tokenizer, model_type = create_hf_model(model_name = hf_checkpoint)
|
101 |
+
|
102 |
+
if model_type == "flan-t5-small-stacked-samsum":
|
103 |
+
# Huggingface chat model
|
104 |
+
hf_checkpoint = 'stacked-summaries/flan-t5-small-stacked-samsum-1024' #'philschmid/flan-t5-small-stacked-samsum'#'declare-lab/flan-alpaca-base' # # #
|
105 |
+
|
106 |
+
|
107 |
+
summariser, tokenizer, model_type = create_hf_model(model_name = hf_checkpoint)
|
108 |
+
|
109 |
+
chatf.model = summariser
|
110 |
+
chatf.tokenizer = tokenizer
|
111 |
+
chatf.model_type = model_type
|
112 |
+
|
113 |
+
load_confirmation = "Finished loading model: " + model_type
|
114 |
+
|
115 |
+
print(load_confirmation)
|
116 |
+
return model_type, load_confirmation, model_type
|
117 |
+
|
118 |
+
# Both models are loaded on app initialisation so that users don't have to wait for the models to be downloaded
|
119 |
+
#model_type = "Mistral Open Orca (larger, slow)"
|
120 |
+
#load_model(model_type, chatf.gpu_layers, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
121 |
+
|
122 |
+
model_type = "flan-t5-large-stacked-samsum"
|
123 |
+
load_model(model_type, chatf.gpu_layers, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
124 |
+
|
125 |
+
model_type = "flan-t5-small-stacked-samsum"
|
126 |
+
load_model(model_type, 0, chatf.gpu_config, chatf.cpu_config, chatf.torch_device)
|
127 |
+
|
128 |
+
today = datetime.now().strftime("%d%m%Y")
|
129 |
+
today_rev = datetime.now().strftime("%Y%m%d")
|
130 |
+
|
131 |
+
def summarise_text(text, text_df, length_slider, in_colnames, model_type):
|
132 |
+
|
133 |
+
if text_df == None:
|
134 |
+
in_colnames="text"
|
135 |
+
in_colnames_list_first = in_colnames
|
136 |
+
|
137 |
+
in_text_df = pd.DataFrame({in_colnames_list_first:[text]})
|
138 |
+
|
139 |
+
else:
|
140 |
+
in_text_df = pd.read_csv(text_df.name, delimiter = ",", low_memory=False, encoding='cp1252')
|
141 |
+
in_colnames_list_first = in_colnames.tolist()[0][0]
|
142 |
+
|
143 |
+
if model_type != "Mistral Open Orca (larger, slow)":
|
144 |
+
summarised_text = chatf.model(list(in_text_df[in_colnames_list_first]), max_length=length_slider)
|
145 |
+
|
146 |
+
if model_type == "Mistral Open Orca (larger, slow)":
|
147 |
+
|
148 |
+
length = str(length_slider)
|
149 |
+
|
150 |
+
prompt = """<|im_start|>system
|
151 |
+
You are an AI assistant that follows instruction extremely well. Help as much as you can.
|
152 |
+
<|im_start|>user
|
153 |
+
Summarise the following text in less than {length} words.
|
154 |
+
Text: {text}
|
155 |
+
Answer:<|im_end|>"""
|
156 |
+
|
157 |
+
formatted_string = prompt.format(length=length, text=text)
|
158 |
+
|
159 |
+
print(formatted_string)
|
160 |
+
|
161 |
+
#summarised_text = chatf.model(formatted_string, max_new_tokens=length_slider)
|
162 |
+
|
163 |
+
summarised_text = "Mistral Open Orca summaries currently not working. Sorry!"
|
164 |
+
|
165 |
+
if text_df == None:
|
166 |
+
if model_type != "Mistral Open Orca (larger, slow)":
|
167 |
+
summarised_text_out = summarised_text[0].values()
|
168 |
+
|
169 |
+
if model_type == "Mistral Open Orca (larger, slow)":
|
170 |
+
summarised_text_out = summarised_text
|
171 |
+
|
172 |
+
else:
|
173 |
+
summarised_text_out = [d['summary_text'] for d in summarised_text] #summarised_text[0].values()
|
174 |
+
|
175 |
+
output_name = "summarise_output_" + today_rev + ".csv"
|
176 |
+
output_df = pd.DataFrame({"Original text":in_text_df[in_colnames_list_first],
|
177 |
+
"Summarised text":summarised_text_out})
|
178 |
+
|
179 |
+
summarised_text_out_str = str(output_df["Summarised text"][0])#.str.replace("dict_values([","").str.replace("])",""))
|
180 |
+
|
181 |
+
output_df.to_csv(output_name, index = None)
|
182 |
+
|
183 |
+
return summarised_text_out_str, output_name
|
184 |
+
|
185 |
+
# ## Gradio app - summarise
|
186 |
+
block = gr.Blocks(theme = gr.themes.Base())
|
187 |
+
|
188 |
+
with block:
|
189 |
+
|
190 |
+
model_type_state = gr.State(model_type)
|
191 |
+
|
192 |
+
gr.Markdown(
|
193 |
+
"""
|
194 |
+
# Text summariser
|
195 |
+
Enter open text below to get a summary. You can copy and paste text directly, or upload a file and specify the column that you want to summarise. Note that summarisation with Mistral Open Orca is still in development and does not currently work.
|
196 |
+
""")
|
197 |
+
|
198 |
+
with gr.Tab("Summariser"):
|
199 |
+
current_model = gr.Textbox(label="Current model", value=model_type, scale = 3)
|
200 |
+
|
201 |
+
with gr.Accordion("Paste open text", open = False):
|
202 |
+
in_text = gr.Textbox(label="Copy and paste your open text here", lines = 5)
|
203 |
+
|
204 |
+
with gr.Accordion("Summarise open text from a file", open = False):
|
205 |
+
in_text_df = gr.File(label="Input text from file")
|
206 |
+
in_colnames = gr.Dataframe(label="Write the column name for the open text to summarise",
|
207 |
+
type="numpy", row_count=(1,"fixed"), col_count = (1,"fixed"),
|
208 |
+
headers=["Open text column name"])#, "Address column name 2", "Address column name 3", "Address column name 4"])
|
209 |
+
|
210 |
+
with gr.Row():
|
211 |
+
summarise_btn = gr.Button("Summarise")
|
212 |
+
length_slider = gr.Slider(minimum = 30, maximum = 200, value = 100, step = 10, label = "Maximum length of summary")
|
213 |
+
|
214 |
+
with gr.Row():
|
215 |
+
output_single_text = gr.Textbox(label="Output example (first example in dataset)")
|
216 |
+
output_file = gr.File(label="Output file")
|
217 |
+
|
218 |
+
with gr.Tab("Advanced features"):
|
219 |
+
#out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.")
|
220 |
+
#temp_slide = gr.Slider(minimum=0.1, value = 0.1, maximum=1, step=0.1, label="Choose temperature setting for response generation.")
|
221 |
+
with gr.Row():
|
222 |
+
model_choice = gr.Radio(label="Choose a summariser model", value="flan-t5-small-stacked-samsum", choices = ["flan-t5-small-stacked-samsum", "flan-t5-large-stacked-samsum", "Mistral Open Orca (larger, slow)"])
|
223 |
+
change_model_button = gr.Button(value="Load model", scale=0)
|
224 |
+
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False):
|
225 |
+
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=5, step = 1, visible=True)
|
226 |
+
|
227 |
+
load_text = gr.Text(label="Load status")
|
228 |
+
|
229 |
+
|
230 |
+
change_model_button.click(fn=load_model, inputs=[model_choice, gpu_layer_choice], outputs = [model_type_state, load_text, current_model])
|
231 |
+
|
232 |
+
summarise_btn.click(fn=summarise_text, inputs=[in_text, in_text_df, length_slider, in_colnames, model_type_state],
|
233 |
+
outputs=[output_single_text, output_file], api_name="summarise_single_text")
|
234 |
+
|
235 |
+
block.queue(concurrency_count=1).launch()
|
236 |
+
# -
|
237 |
+
|
238 |
+
|
239 |
+
|
chatfuncs/__init__.py
ADDED
File without changes
|
chatfuncs/chatfuncs.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from typing import TypeVar
|
3 |
+
|
4 |
+
# Model packages
|
5 |
+
import torch.cuda
|
6 |
+
from transformers import pipeline
|
7 |
+
|
8 |
+
torch.cuda.empty_cache()
|
9 |
+
|
10 |
+
PandasDataFrame = TypeVar('pd.core.frame.DataFrame')
|
11 |
+
|
12 |
+
model_type = None # global variable setup
|
13 |
+
|
14 |
+
full_text = "" # Define dummy source text (full text) just to enable highlight function to load
|
15 |
+
|
16 |
+
model = [] # Define empty list for model functions to run
|
17 |
+
tokenizer = [] # Define empty list for model functions to run
|
18 |
+
|
19 |
+
|
20 |
+
# Currently set gpu_layers to 0 even with cuda due to persistent bugs in implementation with cuda
|
21 |
+
if torch.cuda.is_available():
|
22 |
+
torch_device = "cuda"
|
23 |
+
gpu_layers = 0
|
24 |
+
else:
|
25 |
+
torch_device = "cpu"
|
26 |
+
gpu_layers = 0
|
27 |
+
|
28 |
+
print("Running on device:", torch_device)
|
29 |
+
threads = 8 #torch.get_num_threads()
|
30 |
+
print("CPU threads:", threads)
|
31 |
+
|
32 |
+
# flan-t5-large-stacked-xsum Model parameters
|
33 |
+
temperature: float = 0.1
|
34 |
+
top_k: int = 3
|
35 |
+
top_p: float = 1
|
36 |
+
repetition_penalty: float = 1.3
|
37 |
+
flan_alpaca_repetition_penalty: float = 1.3
|
38 |
+
last_n_tokens: int = 64
|
39 |
+
max_new_tokens: int = 256
|
40 |
+
seed: int = 42
|
41 |
+
reset: bool = False
|
42 |
+
stream: bool = True
|
43 |
+
threads: int = threads
|
44 |
+
batch_size:int = 256
|
45 |
+
context_length:int = 4096
|
46 |
+
sample = True
|
47 |
+
|
48 |
+
|
49 |
+
class CtransInitConfig_gpu:
|
50 |
+
def __init__(self, temperature=temperature,
|
51 |
+
top_k=top_k,
|
52 |
+
top_p=top_p,
|
53 |
+
repetition_penalty=repetition_penalty,
|
54 |
+
last_n_tokens=last_n_tokens,
|
55 |
+
max_new_tokens=max_new_tokens,
|
56 |
+
seed=seed,
|
57 |
+
reset=reset,
|
58 |
+
stream=stream,
|
59 |
+
threads=threads,
|
60 |
+
batch_size=batch_size,
|
61 |
+
context_length=context_length,
|
62 |
+
gpu_layers=gpu_layers):
|
63 |
+
self.temperature = temperature
|
64 |
+
self.top_k = top_k
|
65 |
+
self.top_p = top_p
|
66 |
+
self.repetition_penalty = repetition_penalty# repetition_penalty
|
67 |
+
self.last_n_tokens = last_n_tokens
|
68 |
+
self.max_new_tokens = max_new_tokens
|
69 |
+
self.seed = seed
|
70 |
+
self.reset = reset
|
71 |
+
self.stream = stream
|
72 |
+
self.threads = threads
|
73 |
+
self.batch_size = batch_size
|
74 |
+
self.context_length = context_length
|
75 |
+
self.gpu_layers = gpu_layers
|
76 |
+
# self.stop: list[str] = field(default_factory=lambda: [stop_string])
|
77 |
+
|
78 |
+
def update_gpu(self, new_value):
|
79 |
+
self.gpu_layers = new_value
|
80 |
+
|
81 |
+
class CtransInitConfig_cpu(CtransInitConfig_gpu):
|
82 |
+
def __init__(self):
|
83 |
+
super().__init__()
|
84 |
+
self.gpu_layers = 0
|
85 |
+
|
86 |
+
gpu_config = CtransInitConfig_gpu()
|
87 |
+
cpu_config = CtransInitConfig_cpu()
|
88 |
+
|
89 |
+
|
90 |
+
class CtransGenGenerationConfig:
|
91 |
+
def __init__(self, temperature=temperature,
|
92 |
+
top_k=top_k,
|
93 |
+
top_p=top_p,
|
94 |
+
repetition_penalty=repetition_penalty,
|
95 |
+
last_n_tokens=last_n_tokens,
|
96 |
+
seed=seed,
|
97 |
+
threads=threads,
|
98 |
+
batch_size=batch_size,
|
99 |
+
reset=True
|
100 |
+
):
|
101 |
+
self.temperature = temperature
|
102 |
+
self.top_k = top_k
|
103 |
+
self.top_p = top_p
|
104 |
+
self.repetition_penalty = repetition_penalty# repetition_penalty
|
105 |
+
self.last_n_tokens = last_n_tokens
|
106 |
+
self.seed = seed
|
107 |
+
self.threads = threads
|
108 |
+
self.batch_size = batch_size
|
109 |
+
self.reset = reset
|
110 |
+
|
111 |
+
def update_temp(self, new_value):
|
112 |
+
self.temperature = new_value
|
readme.md
CHANGED
@@ -1 +1,13 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Open Text Summariser
|
3 |
+
emoji: π
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.50.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.50.0
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
ctransformers[cuda]
|