awacke1 commited on
Commit
1e7150f
β€’
1 Parent(s): e154fc1

Create backup-2-app.py

Browse files
Files changed (1) hide show
  1. backup-2-app.py +268 -0
backup-2-app.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import os
4
+ import urllib
5
+ import base64
6
+ from bs4 import BeautifulSoup
7
+ import hashlib
8
+ import json
9
+ import uuid
10
+ import glob
11
+ import zipfile
12
+ from PIL import Image
13
+
14
+ EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
15
+ URLS = {
16
+ "Lumiere": "https://lumiere-video.github.io/",
17
+ "National Library of Medicine": "https://www.nlm.nih.gov/",
18
+ "World Health Organization": "https://www.who.int/",
19
+ "UHCProvider - United Health and Optum": "https://www.uhcprovider.com/",
20
+ "CMS - Centers for Medicare & Medicaid Services": "https://www.cms.gov/",
21
+ "Mayo Clinic": "https://www.mayoclinic.org/",
22
+ "WebMD": "https://www.webmd.com/",
23
+ "MedlinePlus": "https://medlineplus.gov/",
24
+ "Healthline": "https://www.healthline.com/",
25
+ "CDC - Centers for Disease Control and Prevention": "https://www.cdc.gov/",
26
+ "Johns Hopkins Medicine": "https://www.hopkinsmedicine.org/"
27
+ }
28
+
29
+ if not os.path.exists("history.json"):
30
+ with open("history.json", "w") as f:
31
+ json.dump({}, f)
32
+
33
+ def zip_subdirs(start_dir):
34
+ for subdir, dirs, files in os.walk(start_dir):
35
+ if subdir != start_dir: # Skip the root directory
36
+ zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
37
+ allFileSummary = ""
38
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
39
+ for file in files:
40
+ file_path = os.path.join(subdir, file)
41
+ zipf.write(file_path, os.path.relpath(file_path, start_dir))
42
+ allFileSummary=allFileSummary+(f"Added: {file_path}")
43
+ st.write(allFileSummary)
44
+ yield zip_filename
45
+
46
+ def get_zip_download_link(zip_file):
47
+ with open(zip_file, 'rb') as f:
48
+ bytes = f.read()
49
+ b64 = base64.b64encode(bytes).decode()
50
+ link_name = os.path.basename(zip_file)
51
+ href = f'<a href="data:file/zip;base64,{b64}" download="{link_name}">Download: {link_name}</a>'
52
+ return href
53
+
54
+ @st.cache_resource
55
+ def create_zip_of_files(files):
56
+ zip_name = "all_files.zip"
57
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
58
+ for file in files:
59
+ zipf.write(file)
60
+ return zip_name
61
+
62
+ @st.cache_resource
63
+ def get_zip_download_link(zip_file):
64
+ with open(zip_file, 'rb') as f:
65
+ data = f.read()
66
+ b64 = base64.b64encode(data).decode()
67
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
68
+ return href
69
+
70
+ def download_file(url, local_filename):
71
+ if url.startswith('http://') or url.startswith('https://'):
72
+ try:
73
+ with requests.get(url, stream=True) as r:
74
+ r.raise_for_status()
75
+ with open(local_filename, 'wb') as f:
76
+ for chunk in r.iter_content(chunk_size=8192):
77
+ f.write(chunk)
78
+ return local_filename
79
+ except requests.exceptions.HTTPError as err:
80
+ print(f"HTTP error occurred: {err}")
81
+
82
+ def download_html_and_files(url, subdir):
83
+ html_content = requests.get(url).text
84
+ soup = BeautifulSoup(html_content, 'html.parser')
85
+ base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
86
+ for link in soup.find_all('a'):
87
+ file_url = urllib.parse.urljoin(base_url, link.get('href'))
88
+ local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
89
+ if not local_filename.endswith('/') and local_filename != subdir:
90
+ link['href'] = local_filename
91
+ download_file(file_url, local_filename)
92
+ with open(os.path.join(subdir, "index.html"), "w") as file:
93
+ file.write(str(soup))
94
+
95
+ def list_files(directory_path='.'):
96
+ files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
97
+ return [f for f in files if f not in EXCLUDED_FILES]
98
+
99
+ def file_editor(file_path):
100
+ st.write(f"Editing File: {os.path.basename(file_path)}")
101
+ file_content = ""
102
+ with open(file_path, "r") as f:
103
+ file_content = f.read()
104
+ file_content = st.text_area("Edit the file content:", value=file_content, height=250)
105
+ if st.button("πŸ’Ύ Save"):
106
+ with open(file_path, "w") as f:
107
+ f.write(file_content)
108
+ st.success(f"File '{os.path.basename(file_path)}' saved!")
109
+
110
+ def show_file_operations(file_path, sequence_number):
111
+ unique_key = hashlib.md5(file_path.encode()).hexdigest()
112
+ file_content = ""
113
+ col01, col02, col1, col2, col3 = st.columns(5)
114
+ with col01:
115
+ st.write(os.path.basename(file_path))
116
+ with col1:
117
+ edit_key = f"edit_{unique_key}_{sequence_number}"
118
+ if st.button(f"✏️ Edit", key=edit_key):
119
+ with open(file_path, "r") as f:
120
+ file_content = f.read()
121
+ text_area_key = f"text_area_{unique_key}_{sequence_number}"
122
+ file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)
123
+ with col2:
124
+ save_key = f"save_{unique_key}_{sequence_number}"
125
+ if st.button(f"πŸ’Ύ Save", key=save_key):
126
+ if file_content: # Ensure file_content is not empty
127
+ with open(file_path, "w") as f:
128
+ f.write(file_content)
129
+ st.success(f"File saved!")
130
+ with col3:
131
+ delete_key = f"delete_{unique_key}_{sequence_number}"
132
+ if st.button(f"πŸ—‘οΈ Delete", key=delete_key):
133
+ os.remove(file_path)
134
+ st.markdown(f"File deleted!")
135
+
136
+ file_sequence_numbers = {}
137
+
138
+ def show_file_content(file_path):
139
+ _, file_extension = os.path.splitext(file_path)
140
+ try:
141
+ if file_extension in ['.png', '.jpg', '.jpeg']:
142
+ image_url = file_path.replace('File:','').replace('/','')
143
+ st.write('Image URL:' + image_url)
144
+ markdown_link = f"[![Image]({image_url})]({image_url})" #file_path
145
+ st.markdown(markdown_link, unsafe_allow_html=True)
146
+ elif file_extension in ['.md', '.markdown']:
147
+ with open(file_path, "r") as file:
148
+ content = file.read()
149
+ edited_content = st.text_area(f"Edit {os.path.basename(file_path)}", value=content, height=250)
150
+ if st.button(f"Save {os.path.basename(file_path)}"):
151
+ with open(file_path, "w") as file:
152
+ file.write(edited_content)
153
+ st.success(f"Saved {os.path.basename(file_path)}!")
154
+ elif file_extension in ['.html', '.txt']:
155
+ with open(file_path, "r") as file:
156
+ st.markdown(file.read(), unsafe_allow_html=True)
157
+ except Exception as e:
158
+ st.error(f"Error reading file {file_path}: {e}")
159
+
160
+ def show_download_links(subdir):
161
+ global file_sequence_numbers
162
+ for file in list_files(subdir):
163
+ file_path = os.path.join(subdir, file)
164
+ if file_path not in file_sequence_numbers:
165
+ file_sequence_numbers[file_path] = 1
166
+ else:
167
+ file_sequence_numbers[file_path] += 1
168
+ sequence_number = file_sequence_numbers[file_path]
169
+ if os.path.isfile(file_path):
170
+ st.markdown(file_path) # Display file path
171
+ show_file_content(file_path) # Display file content based on type
172
+ else:
173
+ st.write(f"File not found: {file}")
174
+
175
+ def show_download_links_backup(subdir):
176
+ global file_sequence_numbers
177
+ for file in list_files(subdir):
178
+ file_path = os.path.join(subdir, file)
179
+ if file_path not in file_sequence_numbers:
180
+ file_sequence_numbers[file_path] = 1
181
+ else:
182
+ file_sequence_numbers[file_path] += 1
183
+ sequence_number = file_sequence_numbers[file_path]
184
+ if os.path.isfile(file_path):
185
+ st.markdown(file_path, unsafe_allow_html=True) # faster than encapsulating file into base64 download link
186
+ show_file_operations(file_path, sequence_number)
187
+ else:
188
+ st.write(f"File not found: {file}")
189
+
190
+ def get_download_link(file):
191
+ with open(file, "rb") as f:
192
+ bytes = f.read()
193
+ b64 = base64.b64encode(bytes).decode()
194
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Download: {os.path.basename(file)}</a>'
195
+ return href
196
+
197
+ def main():
198
+ st.sidebar.title('🌐 Web Datasets Bulk Downloader')
199
+ query_params = st.experimental_get_query_params()
200
+ file_to_edit = query_params.get('file_to_edit', [None])[0]
201
+ if file_to_edit and os.path.exists(file_to_edit):
202
+ file_editor(file_to_edit)
203
+ else:
204
+ url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"], index=1)
205
+ url = ""
206
+ if url_input_method == "Enter URL":
207
+ url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
208
+ else:
209
+ selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()), index=0)
210
+ url = URLS[selected_site]
211
+
212
+ # Reading or creating history.json
213
+ if not os.path.exists("history.json"):
214
+ with open("history.json", "w") as f:
215
+ json.dump({}, f)
216
+
217
+ with open("history.json", "r") as f:
218
+ try:
219
+ history = json.load(f)
220
+ except:
221
+ print('error')
222
+
223
+ # Handling URL submission
224
+ if url:
225
+ subdir = hashlib.md5(url.encode()).hexdigest()
226
+ if not os.path.exists(subdir):
227
+ os.makedirs(subdir)
228
+ if url not in history:
229
+ history[url] = subdir
230
+ with open("history.json", "w") as f:
231
+ json.dump(history, f)
232
+
233
+ if st.sidebar.button('πŸ“₯ Get All the Content', help="Download content from the selected URL"):
234
+ download_html_and_files(url, history[url])
235
+ show_download_links(history[url])
236
+
237
+ if st.sidebar.button('πŸ“‚ Show Download Links', help="Show all available download links"):
238
+ for subdir in history.values():
239
+ show_download_links(subdir)
240
+
241
+
242
+ if st.sidebar.button("πŸ—‘ Delete All", help="Delete all downloaded content"):
243
+ # Clear history file
244
+ with open("history.json", "w") as f:
245
+ json.dump({}, f)
246
+ # Delete all files in subdirectories
247
+ for subdir in glob.glob('*'):
248
+ if os.path.isdir(subdir) and subdir not in EXCLUDED_FILES:
249
+ for file in os.listdir(subdir):
250
+ file_path = os.path.join(subdir, file)
251
+ os.remove(file_path)
252
+ st.write(f"Deleted: {file_path}")
253
+ os.rmdir(subdir) # Remove the empty directory
254
+ st.experimental_rerun()
255
+ if st.sidebar.button("⬇️ Download All", help="Download all files in a zip"):
256
+ start_directory = '.' # Current directory
257
+ for zip_file in zip_subdirs(start_directory):
258
+ st.sidebar.markdown(zip_file, unsafe_allow_html=True)
259
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
260
+ with st.expander("URL History and Downloaded Files"):
261
+ try:
262
+ for url, subdir in history.items():
263
+ st.markdown(f"#### {url}")
264
+ except:
265
+ print('url history is empty')
266
+
267
+ if __name__ == "__main__":
268
+ main()