AkashKhamkar commited on
Commit
d47a984
1 Parent(s): 521e17f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -152,6 +152,7 @@ def clean_text(link,start,end):
152
  sf = pd.DataFrame(columns=['Segmented_Text','video_id'])
153
 
154
  text = segment(transcript.at[0,'text'])
 
155
  for i in range(len(text)):
156
  sf.loc[i, 'Segmented_Text'] = text[i]
157
  sf.loc[i, 'video_id'] = transcript.at[0,'video_id']
@@ -164,6 +165,7 @@ def clean_text(link,start,end):
164
  return texts
165
 
166
  for i in range(len(sf)):
 
167
  sf.loc[i, 'Segmented_Text'] = word_seg(sf.at[i, 'Segmented_Text'])
168
  sf.loc[i, 'Lengths'] = len(tokenizer(sf.at[i, 'Segmented_Text'])['input_ids'])
169
 
 
152
  sf = pd.DataFrame(columns=['Segmented_Text','video_id'])
153
 
154
  text = segment(transcript.at[0,'text'])
155
+
156
  for i in range(len(text)):
157
  sf.loc[i, 'Segmented_Text'] = text[i]
158
  sf.loc[i, 'video_id'] = transcript.at[0,'video_id']
 
165
  return texts
166
 
167
  for i in range(len(sf)):
168
+ st.write(sf.at[i, 'Segmeneted_Text'])
169
  sf.loc[i, 'Segmented_Text'] = word_seg(sf.at[i, 'Segmented_Text'])
170
  sf.loc[i, 'Lengths'] = len(tokenizer(sf.at[i, 'Segmented_Text'])['input_ids'])
171