wldmr commited on
Commit
68d26c9
1 Parent(s): 6e7590f
Files changed (8) hide show
  1. .gitignore +131 -0
  2. app.py +545 -0
  3. deeppunkt.py +73 -0
  4. lexrank.py +93 -0
  5. metrics.py +69 -0
  6. mysheet.py +41 -0
  7. requirements.txt +15 -0
  8. yt_stats.py +160 -0
.gitignore ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ secret*
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ # Distribution / packaging
11
+ .streamlit
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ pip-wheel-metadata/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ .python-version
88
+
89
+ # pipenv
90
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
92
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
93
+ # install all needed dependencies.
94
+ #Pipfile.lock
95
+
96
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97
+ __pypackages__/
98
+
99
+ # Celery stuff
100
+ celerybeat-schedule
101
+ celerybeat.pid
102
+
103
+ # SageMath parsed files
104
+ *.sage.py
105
+
106
+ # Environments
107
+ .env
108
+ .venv
109
+ env/
110
+ venv/
111
+ ENV/
112
+ env.bak/
113
+ venv.bak/
114
+
115
+ # Spyder project settings
116
+ .spyderproject
117
+ .spyproject
118
+
119
+ # Rope project settings
120
+ .ropeproject
121
+
122
+ # mkdocs documentation
123
+ /site
124
+
125
+ # mypy
126
+ .mypy_cache/
127
+ .dmypy.json
128
+ dmypy.json
129
+
130
+ # Pyre type checker
131
+ .pyre/
app.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from youtube_transcript_api import YouTubeTranscriptApi as yta
2
+ from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled
3
+ import streamlit as st
4
+ from yt_stats import YTstats
5
+ from datetime import datetime
6
+ import isodate
7
+ import pandas as pd
8
+ import deeppunkt
9
+ import time
10
+ import lexrank
11
+ import mysheet
12
+
13
+ def time_it(func):
14
+ def wrapper(*args, **kwargs):
15
+ start = time.time()
16
+ result = func(*args, **kwargs)
17
+ end = time.time()
18
+ elapsed = end - start
19
+ #st.write(f"Elapsed time: {end - start}")
20
+ st.write('Load time: '+str(round(elapsed,1))+' sec')
21
+ return result
22
+ return wrapper
23
+
24
+ def reset_session():
25
+ if 'punkt' in st.session_state:
26
+ del st.session_state.punkt
27
+ if 'extract' in st.session_state:
28
+ del st.session_state.extract
29
+ if 'channel_id' in st.session_state:
30
+ del st.session_state.channel_id
31
+
32
+ def update_param_example():
33
+ #st.session_state.url_vid = st.session_state.ex_vid
34
+ video_id = get_id_from_link(st.session_state.ex_vid)
35
+ st.experimental_set_query_params(vid=video_id)
36
+ reset_session()
37
+
38
+ def update_param_textinput():
39
+ #st.session_state.url_vid = st.session_state.ti_vid
40
+ video_id = get_id_from_link(st.session_state.ti_vid)
41
+ st.experimental_set_query_params(vid=video_id)
42
+ reset_session()
43
+
44
+ def get_link_from_id(video_id):
45
+ if "v=" not in video_id:
46
+ return 'https://www.youtube.com/watch?v='+video_id
47
+ else:
48
+ return video_id
49
+
50
+
51
+ def get_id_from_link(link):
52
+ if "v=" in link:
53
+ return link.split("v=")[1].split("&")[0]
54
+ elif len(link)==11:
55
+ return link
56
+ else:
57
+ return "Error: Invalid Link."
58
+
59
+ # @st.cache(allow_output_mutation=True, suppress_st_warning=True)
60
+ # def retry_access_yt_object(url, max_retries=5, interval_secs=5, on_progress_callback=None):
61
+ # """
62
+ # Retries creating a YouTube object with the given URL and accessing its title several times
63
+ # with a given interval in seconds, until it succeeds or the maximum number of attempts is reached.
64
+ # If the object still cannot be created or the title cannot be accessed after the maximum number
65
+ # of attempts, the last exception is raised.
66
+ # """
67
+ # last_exception = None
68
+ # for i in range(max_retries):
69
+ # try:
70
+ # yt = YouTube(url, on_progress_callback=on_progress_callback)
71
+ # #title = yt.title # Access the title of the YouTube object.
72
+ # #views = yt.views
73
+ # return yt # Return the YouTube object if successful.
74
+ # except Exception as err:
75
+ # last_exception = err # Keep track of the last exception raised.
76
+ # st.write(f"Failed to create YouTube object or access title. Retrying... ({i+1}/{max_retries})")
77
+ # time.sleep(interval_secs) # Wait for the specified interval before retrying.
78
+
79
+ # # If the YouTube object still cannot be created or the title cannot be accessed after the maximum number of attempts, raise the last exception.
80
+ # raise last_exception
81
+
82
+ @st.cache_data()
83
+ def get_video_data(_yt, video_id):
84
+
85
+ yt_img = f'http://img.youtube.com/vi/{video_id}/mqdefault.jpg'
86
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
87
+ yt_img_html_link = '<a href='+url+'>'+yt_img_html+'</a>'
88
+
89
+ snippet = yt._get_single_video_data(video_id,'snippet')
90
+ yt_publish_date = snippet['publishedAt']
91
+ yt_title = snippet['title']
92
+ yt_author = snippet['channelTitle']
93
+ yt_channel_id = snippet['channelId']
94
+
95
+ try:
96
+ yt_keywords = snippet['tags']
97
+ except:
98
+ yt_keywords = []
99
+
100
+
101
+ statistics = yt._get_single_video_data(video_id,'statistics')
102
+ yt_views = statistics['viewCount']
103
+ contentDetails = yt._get_single_video_data(video_id,'contentDetails')
104
+ yt_length = contentDetails['duration']
105
+ yt_length_isodate = isodate.parse_duration(yt_length)
106
+ yt_length_isoformat = isodate.duration_isoformat(yt_length_isodate, "%H:%M:%S")[1:]
107
+
108
+ data = {'Video':[yt_img_html_link],
109
+ 'Author': [yt_author],
110
+ 'Title': [yt_title],
111
+ 'Published': [datetime.strptime(yt_publish_date, '%Y-%m-%dT%H:%M:%SZ').strftime('%B %d, %Y')],
112
+ 'Views':[format(int(yt_views), ",").replace(",", "'")],
113
+ 'Length':[yt_length_isoformat]}
114
+
115
+ return data, yt_keywords, yt_channel_id
116
+
117
+
118
+ @st.cache_data()
119
+ def get_video_data_from_gsheed(df, video_id):
120
+
121
+ yt_img_html_link = df.loc[df["ID"] == video_id]['Video'].to_list()[0]
122
+ yt_author = df.loc[df["ID"] == video_id]['Author'].to_list()[0]
123
+ yt_title = df.loc[df["ID"] == video_id]['Title'].to_list()[0]
124
+ yt_publish_date = df.loc[df["ID"] == video_id]['Published'].to_list()[0]
125
+ yt_views = df.loc[df["ID"] == video_id]['Views'].to_list()[0]
126
+ yt_length_isoformat = df.loc[df["ID"] == video_id]['Length'].to_list()[0]
127
+ yt_keywords = df.loc[df["ID"] == video_id]['Keywords'].to_list()[0].split(';')
128
+ yt_channel_id = df.loc[df["ID"] == video_id]['Channel'].to_list()[0]
129
+
130
+ data = {'Video':[yt_img_html_link],
131
+ 'Author': [yt_author],
132
+ 'Title': [yt_title],
133
+ 'Published': [yt_publish_date],
134
+ 'Views':[yt_views],
135
+ 'Length':[yt_length_isoformat]}
136
+
137
+ return data, yt_keywords, yt_channel_id
138
+
139
+ @time_it
140
+ def get_punctuated_text(raw_text):
141
+ response = deeppunkt.predict('sentences',raw_text)
142
+ st.session_state['punkt'] = response
143
+
144
+
145
+ def get_punctuated_text_to_dict(raw_text):
146
+ #st.session_state['punkt'] = {'data':[raw_text,0,0,0,0], 'duration':0}
147
+ st.session_state['punkt'] = [raw_text,0,0,0,0]
148
+
149
+
150
+ @time_it
151
+ def get_extracted_text(raw_text):
152
+
153
+ response = lexrank.summarize(raw_text)
154
+ st.session_state['extract'] = response
155
+
156
+ def get_extracted_text_to_dict(raw_text):
157
+ st.session_state['extract'] = [raw_text,0,0,0,0]
158
+
159
+
160
+ #######################################################################################
161
+ # Application Start
162
+ #######################################################################################
163
+
164
+
165
+ st.title("Transcriptifier")
166
+ st.subheader("Youtube Transcript Downloader")
167
+
168
+ example_urls = [
169
+ 'https://www.youtube.com/watch?v=8uQDDUfGNPA', # blog
170
+ 'https://www.youtube.com/watch?v=ofZEo0Rzo5s', # h-educate
171
+ 'https://www.youtube.com/watch?v=ReHGSGwV4-A', #wholesale ted
172
+ 'https://www.youtube.com/watch?v=n8JHnLgodRI', #kevindavid
173
+ 'https://www.youtube.com/watch?v=6MI0f6YjJIk', # Nicholas
174
+ 'https://www.youtube.com/watch?v=nr4kmlTr9xw', # Linus
175
+ 'https://www.youtube.com/watch?v=64Izfm24FKA', # Yannic
176
+ 'https://www.youtube.com/watch?v=Mt1P7p9HmkU', # Fogarty
177
+ 'https://www.youtube.com/watch?v=bj9snrsSook', #Geldschnurrbart
178
+ 'https://www.youtube.com/watch?v=0kJz0q0pvgQ', # fcc
179
+ 'https://www.youtube.com/watch?v=gNRGkMeITVU', # iman
180
+ 'https://www.youtube.com/watch?v=vAuQuL8dlXo', #ghiorghiu
181
+ 'https://www.youtube.com/watch?v=5scEDopRAi0', #infohaus
182
+ 'https://www.youtube.com/watch?v=lCnHfTHkhbE', #fcc tutorial
183
+ 'https://www.youtube.com/watch?v=QI2okshNv_4'
184
+ ]
185
+
186
+
187
+ par_vid = st.experimental_get_query_params().get("vid")
188
+ if par_vid:
189
+ par_url = par_vid[0]
190
+ else:
191
+ par_url = None
192
+
193
+ select_examples = st.selectbox(label="Choose an example",options=example_urls, key='ex_vid', on_change=update_param_example)
194
+ url = st.text_input("Or Enter the YouTube video URL or ID:", value=par_url if par_url else select_examples, key='ti_vid', on_change=update_param_textinput)
195
+
196
+
197
+ ########################
198
+ # Load the data for a given video
199
+ ########################
200
+
201
+
202
+ API_KEY = st.secrets["api_key"]
203
+ yt = YTstats(API_KEY)
204
+ #yt = retry_access_yt_object(get_link_from_id(url))
205
+
206
+ if url:
207
+ video_id = get_id_from_link(url)
208
+
209
+ if 'gsheed' not in st.session_state:
210
+ df = mysheet.read_gspread()
211
+ st.session_state.gsheed = df
212
+ #st.write("reading spradsheet")
213
+ else:
214
+ df = st.session_state.gsheed
215
+ #st.write("getting spreadsheed from session_state")
216
+
217
+ gslist=[]
218
+ try:
219
+ gslist = df.ID.to_list()
220
+ except:
221
+ st.write('no items available.')
222
+
223
+ if video_id in gslist:
224
+ #st.write(df.loc[df["ID"] == video_id])
225
+ st.write("reading from sheet")
226
+ #transcript_item_is_generated = False
227
+ #transcript_text = df.loc[df["ID"] == video_id]['Punkttext'].to_list()[0]
228
+ #get_punctuated_text_to_dict(transcript_text)
229
+ extracted_text = df.loc[df["ID"] == video_id]['Lextext'].to_list()[0]
230
+ get_extracted_text_to_dict(extracted_text)
231
+
232
+ video_data, yt_keywords, yt_channel_id = get_video_data_from_gsheed(df, video_id)
233
+ else:
234
+ st.write("reading from api")
235
+ video_data, yt_keywords, yt_channel_id = get_video_data(yt, video_id)
236
+
237
+ st.session_state["video_data"] = video_data
238
+ st.session_state["keywords"] = yt_keywords
239
+ st.session_state["channel_id"] = yt_channel_id
240
+
241
+
242
+ df = pd.DataFrame(st.session_state["video_data"])
243
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
244
+ st.write("")
245
+
246
+ ###########################
247
+ # Load Transcript
248
+ ###########################
249
+
250
+ transcript_list = yta.list_transcripts(video_id)
251
+
252
+ transcript_raw = None
253
+ transcript_item = transcript_list.find_transcript(['en'])
254
+ transcript_item_is_generated = transcript_item.is_generated
255
+ transcript_raw = transcript_item.fetch()
256
+
257
+ if transcript_raw is None:
258
+ st.error("No transcript available.")
259
+ st.stop()
260
+
261
+ transcript_text = '\n'.join([i['text'].replace('\n',' ') for i in transcript_raw])
262
+
263
+ ########################
264
+ # Load Author Keywords, that are not viewable by users
265
+ ########################
266
+
267
+ keywords_data = {'Authors Keywords':yt_keywords}
268
+ st.table(keywords_data)
269
+ st.write("")
270
+
271
+ # TODO
272
+ # or this video (bj9snrsSook) transcripts are available in the following languages:
273
+
274
+ # (MANUALLY CREATED)
275
+ # None
276
+
277
+ # (GENERATED)
278
+ # - de ("Deutsch (automatisch erzeugt)")[TRANSLATABLE]
279
+
280
+ # (TRANSLATION LANGUAGES)
281
+ # - af ("Afrikaans")
282
+
283
+
284
+ ########################
285
+ # Display the transcript along with the download button
286
+ ########################
287
+
288
+ with st.expander('Preview Transcript'):
289
+ st.code(transcript_text, language=None)
290
+ st.download_button('Download Transcript', transcript_text)
291
+
292
+ ########################
293
+ # API Call to deeppunkt-gr
294
+ ########################
295
+
296
+
297
+ st.subheader("Restore Punctuations of Transcript")
298
+ if not transcript_item_is_generated:
299
+ st.write("Transcript is punctuated by author.")
300
+ # TODO
301
+ #check if the transcript contains more than 5 sentences
302
+
303
+ if st.button('Load Punctuated Transcript'):
304
+ with st.spinner('Loading Punctuation...'):
305
+ if 'punkt' not in st.session_state:
306
+ # first figure out if transcript is already punctuated
307
+ if transcript_item_is_generated:
308
+ get_punctuated_text(transcript_text)
309
+ else:
310
+ get_punctuated_text_to_dict(transcript_text)
311
+ #st.write('Load time: '+str(round(st.session_state.punkt['duration'],1))+' sec')
312
+ metrics_data = {'Words':[int(st.session_state.punkt[1])],
313
+ 'Sentences': [int(st.session_state.punkt[2])],
314
+ 'Characters': [int(st.session_state.punkt[3])],
315
+ 'Tokens':[int(st.session_state.punkt[4])]}
316
+ df = pd.DataFrame(metrics_data)
317
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
318
+ st.write("")
319
+ with st.expander('Preview Transcript'):
320
+ st.code(st.session_state.punkt[0], language=None)
321
+
322
+ ########################
323
+ # Call to lexrank-gr
324
+ ########################
325
+
326
+ st.subheader("Extract Core Sentences from Transcript")
327
+
328
+ if st.button('Extract Sentences'):
329
+ # decide if the extract is already available, if not, text has to be punctuated first
330
+ with st.spinner('Loading Extractions ...'):
331
+ if 'extract' not in st.session_state:
332
+ with st.spinner('Loading Punctuation for Extraction ...'):
333
+ if 'punkt' not in st.session_state:
334
+ # first figure out if transcript is already punctuated
335
+ if transcript_item_is_generated:
336
+ get_punctuated_text(transcript_text)
337
+ else:
338
+ get_punctuated_text_to_dict(transcript_text)
339
+
340
+ get_extracted_text(st.session_state.punkt[0])
341
+
342
+ metrics_data = {'Words':[int(st.session_state.extract[1])],
343
+ 'Sentences': [int(st.session_state.extract[2])],
344
+ 'Characters': [int(st.session_state.extract[3])],
345
+ 'Tokens':[int(st.session_state.extract[4])]}
346
+
347
+ df = pd.DataFrame(metrics_data)
348
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
349
+ st.write("")
350
+
351
+ with st.expander('Preview Transcript'):
352
+ st.code(st.session_state.extract[0], language=None)
353
+
354
+ ################
355
+ if 'extract' not in st.session_state:
356
+ st.error('Please run extraction first.', icon="🚨")
357
+ else:
358
+ transcript_info = {'Words':[int(st.session_state.extract[1])],
359
+ 'Sentences': [int(st.session_state.extract[2])],
360
+ 'Characters': [int(st.session_state.extract[3])],
361
+ 'Tokens':[int(st.session_state.extract[4])],
362
+ 'Lextext':[st.session_state.extract[0]]}
363
+
364
+ yt_img = f'http://img.youtube.com/vi/{video_id}/mqdefault.jpg'
365
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
366
+ yt_img_html_link = '<a href='+url+'>'+yt_img_html+'</a>'
367
+ video_info = {'ID': [video_id],
368
+ 'Video':[yt_img_html_link],
369
+ 'Author': [st.session_state["video_data"]["Author"][0]],
370
+ 'Channel':[st.session_state["channel_id"]],
371
+ 'Title': [st.session_state["video_data"]["Title"][0]],
372
+ 'Published': [st.session_state["video_data"]["Published"][0]],
373
+ 'Views':[st.session_state["video_data"]["Views"][0]],
374
+ 'Length':[st.session_state["video_data"]["Length"][0]],
375
+ 'Keywords':['; '.join(st.session_state["keywords"])]}
376
+ df_current_ts = pd.DataFrame({**video_info,**transcript_info})
377
+
378
+ # initial write.
379
+ #df_new_sheet = pd.concat([df_current_ts])
380
+ #mysheet.write_gspread(df_new_sheet)
381
+ #st.write(video_info)
382
+
383
+ if 'gsheed' not in st.session_state:
384
+ df = mysheet.read_gspread()
385
+ st.session_state.gsheed = df
386
+
387
+ df_sheet = st.session_state.gsheed
388
+ df_current_ts_id = list(df_current_ts.ID)[0]
389
+ if df_current_ts_id not in list(df_sheet.ID):
390
+ df_new_sheet = pd.concat([df_sheet,df_current_ts])
391
+ mysheet.write_gspread(df_new_sheet)
392
+ st.session_state.gsheed = df_new_sheet
393
+ st.write('video added to sheet')
394
+ #else:
395
+ # st.write('video already in sheet')
396
+ # st.write(df_sheet)
397
+
398
+
399
+ #######################
400
+ # write to gspread file
401
+ ########################
402
+
403
+ if st.button('Read Spreadsheet'):
404
+
405
+ if 'gsheed' not in st.session_state:
406
+ df = mysheet.read_gspread()
407
+ st.session_state.gsheed = df
408
+
409
+ st.write(st.session_state.gsheed)
410
+
411
+
412
+ #if st.button('Add to Spreadsheet'):
413
+
414
+
415
+
416
+
417
+ #######################
418
+ # API Call to summarymachine
419
+ ########################
420
+
421
+ # def get_summarized_text(raw_text):
422
+ # response = requests.post("https://wldmr-summarymachine.hf.space/run/predict", json={
423
+ # "data": [
424
+ # raw_text,
425
+ # ]})
426
+ # #response_id = response
427
+ # if response.status_code == 504:
428
+ # raise "Error: Request took too long (>60sec), please try a shorter text."
429
+ # return response.json()
430
+
431
+ # st.subheader("Summarize Extracted Sentences with Flan-T5-large")
432
+
433
+ # if st.button('Summarize Sentences'):
434
+ # command = 'Summarize the transcript in one sentence:\n\n'
435
+ # with st.spinner('Loading Punctuation (Step 1/3)...'):
436
+ # if 'punkt' not in st.session_state:
437
+ # # first figure out if transcript is already punctuated
438
+ # if transcript_item.is_generated:
439
+ # get_punctuated_text(transcript_text)
440
+ # else:
441
+ # get_punctuated_text_to_dict(transcript_text)
442
+ # with st.spinner('Loading Extraction (Step 2/3)...'):
443
+ # if 'extract' not in st.session_state:
444
+ # get_extracted_text(st.session_state.punkt['data'][0])
445
+ # with st.spinner('Loading Summary (Step 3/3)...'):
446
+ # summary_text = get_summarized_text(command+st.session_state.extract['data'][0])
447
+ # st.write('Load time: '+str(round(summary_text['duration'],1))+' sec')
448
+ # with st.expander('Preview Transcript'):
449
+ # st.write(summary_text['data'][0], language=None)
450
+
451
+ ########################
452
+ # Channel
453
+ ########################
454
+
455
+
456
+ st.subheader("Other Videos of the Channel")
457
+ #st.write(st.session_state["channel_id"])
458
+ if 'channel_id' not in st.session_state:
459
+ st.error('Channel ID not available.', icon="🚨")
460
+ else:
461
+ yt.get_channel_statistics(st.session_state["channel_id"])
462
+ stats_data = {'Channel ID': [st.session_state["channel_id"]],
463
+ 'Total Views':[format(int(yt.channel_statistics["viewCount"]), ",").replace(",", "'")],
464
+ 'Total Subscribers':[format(int(yt.channel_statistics["subscriberCount"]), ",").replace(",", "'")],
465
+ 'Total Videos':[format(int(yt.channel_statistics["videoCount"]), ",").replace(",", "'")],
466
+ }
467
+ df = pd.DataFrame(stats_data)
468
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
469
+ st.write("")
470
+
471
+
472
+ if st.button('Load Videos'):
473
+
474
+ progress_text = 'Loading...'
475
+ loading_bar = st.progress(0, text=progress_text)
476
+ item_limit=3
477
+ yt.get_channel_video_data(st.session_state["channel_id"],loading_bar, progress_text, item_limit)
478
+
479
+ #with st.spinner('Loading...'):
480
+ #yt.get_channel_video_data(st.session_state["channel_id"])
481
+ #videos = scrapetube.get_channel(yt.channel_id, limit=3, sleep=2)
482
+
483
+
484
+ vids_thumbnails = []
485
+ vids_videoIds = []
486
+ vids_titles = []
487
+ vids_lengths = []
488
+ vids_published= []
489
+ vids_views= []
490
+ item=0
491
+ for video in yt.video_data:
492
+ if item == item_limit:
493
+ break
494
+ item = item+1
495
+
496
+ vids_video_id = video
497
+ vids_url = 'https://www.youtube.com/watch?v='+vids_video_id
498
+
499
+ yt_img = f'http://img.youtube.com/vi/{vids_video_id}/mqdefault.jpg'
500
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
501
+ yt_img_html_link = '<a href='+vids_url+'>'+yt_img_html+'</a>'
502
+ vids_thumbnails.append(yt_img_html_link)
503
+
504
+ vids_video_id_link = '<a target="_self" href="/?vid='+vids_video_id+'">'+vids_video_id+'</a>'
505
+ vids_videoIds.append(vids_video_id_link)
506
+
507
+ vids_titles.append(yt.video_data[video]['title'])
508
+
509
+ yt_length = yt.video_data[video]['duration']
510
+ yt_length_isodate = isodate.parse_duration(yt_length)
511
+ yt_length_isoformat = isodate.duration_isoformat(yt_length_isodate, "%H:%M:%S")[1:]
512
+ vids_lengths.append(yt_length_isoformat)
513
+
514
+ yt_publish_date = yt.video_data[video]['publishedAt']
515
+ yt_publish_date_formatted = datetime.strptime(yt_publish_date, '%Y-%m-%dT%H:%M:%SZ').strftime('%B %d, %Y')
516
+ vids_published.append(yt_publish_date_formatted)
517
+
518
+ yt_views = yt.video_data[video]['viewCount']
519
+ yt_viws_formatted = format(int(yt_views), ",").replace(",", "'")
520
+ vids_views.append(yt_viws_formatted)
521
+
522
+ df_videos = {'Video': vids_thumbnails,
523
+ 'Video ID':vids_videoIds,
524
+ 'Title':vids_titles,
525
+ 'Published':vids_published,
526
+ 'Views':vids_views,
527
+ 'Length':vids_lengths}
528
+
529
+
530
+ dataset = pd.DataFrame(df_videos)
531
+ st.markdown(dataset.style.hide(axis="index").to_html(), unsafe_allow_html=True)
532
+
533
+
534
+
535
+ ###############
536
+ # End of File #
537
+ ###############
538
+ # hide_streamlit_style = """
539
+ # <style>
540
+ # #MainMenu {visibility: hidden;}
541
+ # footer {visibility: hidden;}
542
+ # </style>
543
+ # """
544
+ # st.markdown(hide_streamlit_style, unsafe_allow_html=True)
545
+
deeppunkt.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepmultilingualpunctuation import PunctuationModel
2
+ import re
3
+ import metrics
4
+
5
+ def remove_filler_words(transcript):
6
+
7
+ # preserve line brakes
8
+ transcript_hash = " # ".join(transcript.strip().splitlines())
9
+ # preprocess the text by removing filler words
10
+ # Define a list of filler words to remove
11
+ filler_words = ["um", "uh", "hmm", "ha", "er", "ah", "yeah"]
12
+ words = transcript_hash.split()
13
+ clean_words = [word for word in words if word.lower() not in filler_words]
14
+ input_text_clean = ' '.join(clean_words)
15
+ # restore the line brakes
16
+ input_text= input_text_clean.replace(' # ','\n')
17
+ return input_text
18
+ # Define a regular expression pattern that matches any filler word surrounded by whitespace or punctuation
19
+ #pattern = r"(?<=\s|\b)(" + "|".join(fillers) + r")(?=\s|\b)"
20
+ # Use re.sub to replace the filler words with empty strings
21
+ #clean_input_text = re.sub(pattern, "", input_text)
22
+
23
+ def predict(brakes, transcript):
24
+
25
+ input_text = remove_filler_words(transcript)
26
+ # Do the punctuation restauration
27
+ model = PunctuationModel()
28
+ output_text = model.restore_punctuation(input_text)
29
+
30
+ # if any of the line brake methods are implemented,
31
+ # return the text as a single line
32
+ pcnt_file_cr = output_text
33
+
34
+ if 'textlines' in brakes:
35
+
36
+ # preserve line brakes
37
+ srt_file_hash = '# '.join(input_text.strip().splitlines())
38
+ #srt_file_sub=re.sub('\s*\n\s*','# ',srt_file_strip)
39
+ srt_file_array=srt_file_hash.split()
40
+ pcnt_file_array=output_text.split()
41
+
42
+ # goal: restore the break points i.e. the same number of lines as the srt file
43
+ # this is necessary, because each line in the srt file corresponds to a frame from the video
44
+ if len(srt_file_array)!=len(pcnt_file_array):
45
+ return "AssertError: The length of the transcript and the punctuated file should be the same: ",len(srt_file_array),len(pcnt_file_array)
46
+
47
+ pcnt_file_array_hash = []
48
+ for idx, item in enumerate(srt_file_array):
49
+ if item.endswith('#'):
50
+ pcnt_file_array_hash.append(pcnt_file_array[idx]+'#')
51
+ else:
52
+ pcnt_file_array_hash.append(pcnt_file_array[idx])
53
+
54
+ # assemble the array back to a string
55
+ pcnt_file_cr=' '.join(pcnt_file_array_hash).replace('#','\n')
56
+
57
+ elif 'sentences' in brakes:
58
+ split_text = output_text.split('. ')
59
+ pcnt_file_cr = '.\n'.join(split_text)
60
+
61
+ regex1 = r"\bi\b"
62
+ regex2 = r"(?<=[.?!;])\s*\w"
63
+ regex3 = r"^\w"
64
+ pcnt_file_cr_cap = re.sub(regex3, lambda x: x.group().upper(), re.sub(regex2, lambda x: x.group().upper(), re.sub(regex1, "I", pcnt_file_cr)))
65
+
66
+ metrics.load_nltk()
67
+ n_tokens= metrics.num_tokens(pcnt_file_cr_cap)
68
+ n_sents = metrics.num_sentences(pcnt_file_cr_cap)
69
+ n_words = metrics.num_words(pcnt_file_cr_cap)
70
+ n_chars = metrics.num_chars(pcnt_file_cr_cap)
71
+
72
+ return pcnt_file_cr_cap, n_words, n_sents, n_chars, n_tokens
73
+
lexrank.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from sumy.parsers.html import HtmlParser
4
+ from sumy.parsers.plaintext import PlaintextParser
5
+ from sumy.nlp.tokenizers import Tokenizer
6
+ from sumy.summarizers.lex_rank import LexRankSummarizer
7
+ from sumy.nlp.stemmers import Stemmer
8
+ from sumy.utils import get_stop_words
9
+ import metrics
10
+ import os
11
+ import nltk
12
+
13
+ def summarize(in_text):
14
+
15
+ if len(in_text)==0:
16
+ return 'Error: No text provided', None
17
+
18
+ nltk_file = '/home/user/nltk_data/tokenizers/punkt.zip'
19
+ if os.path.exists(nltk_file):
20
+ print('nltk punkt file exists in ', nltk_file)
21
+ else:
22
+ print("downloading punkt file")
23
+ nltk.download('punkt')
24
+
25
+ in_longtext = []
26
+ # Discard all senteces that have less than 10 words in them
27
+ in_text_sentenses = in_text.split('.')
28
+
29
+ for sen in in_text_sentenses:
30
+ sen_split = sen.split()
31
+ len_sen_split = len(sen_split)
32
+ if len_sen_split > 10:
33
+ in_longtext.append(sen)
34
+ in_text = '.'.join(in_longtext)+'.'
35
+
36
+ # The size of the summary is limited to 1024
37
+ # The Lexrank algorith accepts only sentences as a limit
38
+ # We start with one sentece and check the token size
39
+ # Then increase the number of sentences until the tokensize
40
+ # of the next sentence exceed the limit
41
+ target_tokens = 1024
42
+
43
+ in_sents = metrics.num_sentences(in_text)
44
+
45
+ out_text = get_Summary(in_text,1)
46
+ n_tokens= metrics.num_tokens(out_text)
47
+ prev_n_tokens=0
48
+ for sen in range(2, in_sents):
49
+ if n_tokens >= target_tokens:
50
+ n_tokens = prev_n_tokens
51
+ break
52
+ else:
53
+ out_text = get_Summary(in_text,sen)
54
+ prev_n_tokens = n_tokens
55
+ n_tokens= metrics.num_tokens(out_text)
56
+
57
+ n_sents = metrics.num_sentences(out_text)
58
+ n_words = metrics.num_words(out_text)
59
+ n_chars = metrics.num_chars(out_text)
60
+
61
+ return out_text, n_words, n_sents, n_chars, n_tokens
62
+
63
+ def get_Summary(in_text, nr_sentences):
64
+
65
+ #sentences = in_text.split('. ')
66
+ # summarize small part of the text
67
+ #nr_sentences = 1 #len(sentences)
68
+ #print('nr_sentences: '+str(nr_sentences))
69
+
70
+ if nr_sentences == 0:
71
+ return 'Error: No sentences available', None
72
+ list_summary = get_Lexrank(in_text,nr_sentences)
73
+ # it can happen that for lexrank a sentence consists of multiple actual sentences,
74
+ # that are separated with full stops. Then the correspoinding timestamp cannot be found
75
+ # all items from the lexrank summary must be concatinated and split up by full stops.
76
+ concat_list_summary = '. '.join([str(item).replace('.','') for item in list_summary])#.split('. ')
77
+ concat_list_summary = concat_list_summary.replace('\\n','')
78
+ concat_list_summary = concat_list_summary.replace('. ','.\n')+'.'
79
+
80
+ return concat_list_summary
81
+
82
+ def get_Lexrank(text, nr_sentences):
83
+ summary=[]
84
+ LANGUAGE = "english"
85
+ SENTENCES_COUNT = nr_sentences
86
+ parser = PlaintextParser.from_string(text, Tokenizer(LANGUAGE))
87
+ stemmer = Stemmer(LANGUAGE)
88
+ summarizer = LexRankSummarizer(stemmer)
89
+ summarizer.stop_words = get_stop_words(LANGUAGE)
90
+ for sentence in summarizer(parser.document, SENTENCES_COUNT):
91
+ summary.append(sentence)
92
+
93
+ return summary
metrics.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import nltk library for natural language processing
2
+ import nltk
3
+ import os
4
+ from transformers import AutoTokenizer
5
+
6
+ def load_nltk():
7
+ nltk_file = '/home/user/nltk_data/tokenizers/punkt.zip'
8
+ if os.path.exists(nltk_file):
9
+ print('nltk punkt file exists in ', nltk_file)
10
+ else:
11
+ print("downloading punkt file")
12
+ nltk.download('punkt')
13
+
14
+
15
+ # Define a function that takes some text as input and returns the number of tokens
16
+ def token_count(text):
17
+ # Import the Encoder class from bpe
18
+ from bpe import Encoder
19
+ # Create an encoder object with a vocabulary size of 10
20
+ encoder = Encoder(vocab_size=14735746)
21
+
22
+ # Train the encoder on the text
23
+ encoder.fit(text.split())
24
+
25
+ # Encode the text into tokens
26
+ tokens = encoder.tokenize(text)
27
+
28
+ # Return the number of tokens
29
+ return tokens
30
+
31
+ def num_tokens(text):
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
34
+
35
+ token_ids = tokenizer.encode(text)
36
+
37
+ token_size = len(token_ids)
38
+
39
+ return token_size
40
+
41
+ def num_words(text):
42
+ sentences = nltk.sent_tokenize(text)
43
+ # Tokenize each sentence into words using nltk.word_tokenize()
44
+ words = []
45
+ for sentence in sentences:
46
+ words.extend(nltk.word_tokenize(sentence))
47
+
48
+ num_words = len(words)
49
+
50
+ return num_words
51
+
52
+ def num_sentences(text):
53
+ # Tokenize the text into sentences using nltk.sent_tokenize()
54
+ sentences = nltk.sent_tokenize(text)
55
+ num_sentences = len(sentences)
56
+ return num_sentences
57
+
58
+
59
+ def num_chars(text):
60
+ num_characters = len(text)
61
+ return num_characters
62
+
63
+
64
+ # Print out the results
65
+ # print(f"Number of sentences: {num_sentences}")
66
+ # print(f"Number of words: {num_words}")
67
+ # print(f"Number of tokens: {num_tokens}")
68
+ # print(f"Number of trans_tokens: {trans_tokens}")
69
+ # print(f"Number of characters: {num_characters}")
mysheet.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ #from google.oauth2 import service_account
3
+ import pandas as pd
4
+ import gspread
5
+ import json
6
+
7
+ def get_gspread_connection():
8
+ # Create a connection object.
9
+ # credentials = service_account.Credentials.from_service_account_info(
10
+ # st.secrets["gcp_service_account"],
11
+ # scopes=[
12
+ # "https://www.googleapis.com/auth/spreadsheets",
13
+ # ],
14
+ # )
15
+ #client = gspread.authorize(credentials)
16
+
17
+ st_credentials = st.secrets["gcp_service_account"]
18
+ if type(st_credentials) is str:
19
+ print("INFO: transforming str to dict")
20
+ credentials_dict = json.loads(st_credentials, strict=False)
21
+ client = gspread.service_account_from_dict(credentials_dict)
22
+ else:
23
+ print("INFO: using credentials in dict")
24
+ client = gspread.service_account_from_dict(st_credentials)
25
+
26
+
27
+ st_sheet_url = st.secrets["private_gsheets_url"]
28
+ spreadsheet = client.open_by_url(st_sheet_url)
29
+ worksheet = spreadsheet.get_worksheet(0)
30
+ return worksheet
31
+
32
+ #@st.cache_data
33
+ def read_gspread():
34
+ worksheet = get_gspread_connection()
35
+ df = pd.DataFrame(worksheet.get_all_records())
36
+ return df
37
+
38
+ def write_gspread(df):
39
+ #df.loc[len(df)] = ['Mia','worst']
40
+ worksheet = get_gspread_connection()
41
+ worksheet.update([df.columns.values.tolist()] + df.values.tolist())
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ youtube-transcript-api
3
+ pandas
4
+ requests
5
+ transformers
6
+ torch
7
+ sentencepiece
8
+ deepmultilingualpunctuation
9
+ nltk
10
+ sumy
11
+ google
12
+ google-auth
13
+ google-auth-oauthlib
14
+ gspread
15
+ isodate
yt_stats.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ from tqdm import tqdm
4
+ import isodate
5
+
6
+ class YTstats:
7
+
8
+ def __init__(self, api_key):
9
+ self.api_key = api_key
10
+ self.channel_statistics = None
11
+ self.video_data = None
12
+
13
+ def extract_all(self, channel_id):
14
+ self.get_channel_statistics(channel_id)
15
+ self.get_channel_video_data(channel_id)
16
+
17
+ def get_channel_statistics(self, channel_id):
18
+ """Extract the channel statistics"""
19
+ print('get channel statistics...')
20
+ url = f'https://www.googleapis.com/youtube/v3/channels?part=statistics&id={channel_id}&key={self.api_key}'
21
+ #pbar = tqdm(total=1)
22
+
23
+ json_url = requests.get(url)
24
+ data = json.loads(json_url.text)
25
+ try:
26
+ data = data['items'][0]['statistics']
27
+ except KeyError:
28
+ print('Could not get channel statistics')
29
+ data = {}
30
+
31
+ self.channel_statistics = data
32
+ #pbar.update()
33
+ #pbar.close()
34
+ return data
35
+
36
+ def get_channel_video_data(self, channel_id, loading_bar, progress_text, item_limit=3):
37
+ "Extract all video information of the channel"
38
+ print('get video data...')
39
+ channel_videos, channel_playlists = self._get_channel_content(channel_id, limit=50)
40
+
41
+ channel_videos_out = dict()
42
+
43
+ total_items = len(channel_videos)
44
+ item = 0
45
+ step_size=0
46
+ step=0
47
+ if total_items!=0:
48
+ step_size=round(1/total_items,4)
49
+ #step = step_size
50
+ parts=["snippet", "statistics","contentDetails", "topicDetails"]
51
+ for video_id in tqdm(channel_videos):
52
+ if item == item_limit:
53
+ break
54
+
55
+ loading_bar.progress(step, text=progress_text)
56
+
57
+ for part in parts:
58
+ data = self._get_single_video_data(video_id, part)
59
+ channel_videos[video_id].update(data)
60
+
61
+ duration = isodate.parse_duration(channel_videos[video_id]['duration'])
62
+ short_duration = isodate.parse_duration('PT4M')
63
+
64
+ if duration > short_duration:
65
+ item = item+1
66
+ step = step +step_size
67
+ channel_videos_out[video_id] = channel_videos[video_id]
68
+
69
+
70
+ step=1.0
71
+ loading_bar.progress(step, text=progress_text)
72
+ self.video_data = channel_videos_out
73
+
74
+
75
+ def _get_single_video_data(self, video_id, part):
76
+ """
77
+ Extract further information for a single video
78
+ parts can be: 'snippet', 'statistics', 'contentDetails', 'topicDetails'
79
+ """
80
+
81
+ url = f"https://www.googleapis.com/youtube/v3/videos?part={part}&id={video_id}&key={self.api_key}"
82
+ json_url = requests.get(url)
83
+ data = json.loads(json_url.text)
84
+ try:
85
+ data = data['items'][0][part]
86
+ except KeyError as e:
87
+ print(f'Error! Could not get {part} part of data: \n{data}')
88
+ data = dict()
89
+ return data
90
+
91
+ def _get_channel_content(self, channel_id, limit=None, check_all_pages=True):
92
+ """
93
+ Extract all videos and playlists, can check all available search pages
94
+ channel_videos = videoId: title, publishedAt
95
+ channel_playlists = playlistId: title, publishedAt
96
+ return channel_videos, channel_playlists
97
+ """
98
+ url = f"https://www.googleapis.com/youtube/v3/search?key={self.api_key}&channelId={channel_id}&part=snippet,id&order=date"
99
+ if limit is not None and isinstance(limit, int):
100
+ url += "&maxResults=" + str(limit)
101
+
102
+ vid, pl, npt = self._get_channel_content_per_page(url)
103
+ idx = 0
104
+ while(check_all_pages and npt is not None and idx < 10):
105
+ nexturl = url + "&pageToken=" + npt
106
+ next_vid, next_pl, npt = self._get_channel_content_per_page(nexturl)
107
+ vid.update(next_vid)
108
+ pl.update(next_pl)
109
+ idx += 1
110
+
111
+ return vid, pl
112
+
113
+ def _get_channel_content_per_page(self, url):
114
+ """
115
+ Extract all videos and playlists per page
116
+ return channel_videos, channel_playlists, nextPageToken
117
+ """
118
+ json_url = requests.get(url)
119
+ data = json.loads(json_url.text)
120
+ channel_videos = dict()
121
+ channel_playlists = dict()
122
+ if 'items' not in data:
123
+ print('Error! Could not get correct channel data!\n', data)
124
+ return channel_videos, channel_videos, None
125
+
126
+ nextPageToken = data.get("nextPageToken", None)
127
+
128
+ item_data = data['items']
129
+ for item in item_data:
130
+ try:
131
+ kind = item['id']['kind']
132
+ published_at = item['snippet']['publishedAt']
133
+ title = item['snippet']['title']
134
+ if kind == 'youtube#video':
135
+ video_id = item['id']['videoId']
136
+ channel_videos[video_id] = {'publishedAt': published_at, 'title': title}
137
+ elif kind == 'youtube#playlist':
138
+ playlist_id = item['id']['playlistId']
139
+ channel_playlists[playlist_id] = {'publishedAt': published_at, 'title': title}
140
+ except KeyError as e:
141
+ print('Error! Could not extract data from item:\n', item)
142
+
143
+ return channel_videos, channel_playlists, nextPageToken
144
+
145
+ def dump(self, channel_id):
146
+ """Dumps channel statistics and video data in a single json file"""
147
+ if self.channel_statistics is None or self.video_data is None:
148
+ print('data is missing!\nCall get_channel_statistics() and get_channel_video_data() first!')
149
+ return
150
+
151
+ fused_data = {channel_id: {"channel_statistics": self.channel_statistics,
152
+ "video_data": self.video_data}}
153
+
154
+ channel_title = self.video_data.popitem()[1].get('channelTitle', channel_id)
155
+ channel_title = channel_title.replace(" ", "_").lower()
156
+ filename = channel_title + '.json'
157
+ with open(filename, 'w') as f:
158
+ json.dump(fused_data, f, indent=4)
159
+
160
+ print('file dumped to', filename)