dioarafl commited on
Commit
bcb6910
1 Parent(s): 8918d97

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import requests
4
+ import streamlit as st
5
+ from youtube_transcript_api import YouTubeTranscriptApi
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from dotenv import load_dotenv
8
+
9
+ # Load environment variables from the .env file in the project directory
10
+ load_dotenv()
11
+
12
+ # Access environment variables
13
+ API_URL = os.getenv('HUGGING_FACE_API_URL')
14
+ API_KEY = os.getenv('HUGGING_FACE_API_KEY')
15
+
16
+ def get_transcript(youtube_url):
17
+ video_id = youtube_url.split("v=")[-1]
18
+ transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
19
+
20
+ try:
21
+ # Try to fetch the manual transcript
22
+ transcript = transcript_list.find_manually_created_transcript()
23
+ language_code = transcript.language_code # Save the detected language
24
+ except:
25
+ try:
26
+ # If no manual transcript is found, try fetching an auto-generated transcript in a supported language
27
+ generated_transcripts = [trans for trans in transcript_list if trans.is_generated]
28
+ transcript = generated_transcripts[0]
29
+ language_code = transcript.language_code # Save the detected language
30
+ except:
31
+ # If no auto-generated transcript is found, raise an exception
32
+ raise Exception("No suitable transcript found.")
33
+
34
+ full_transcript = " ".join([part['text'] for part in transcript.fetch()])
35
+ return full_transcript, language_code # Return both the transcript and detected language
36
+
37
+ def summarize_with_hugging_face(transcript, language_code, model_name='meta-llama/Meta-Llama-3-8B'):
38
+ # Split the document if it's too long
39
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
40
+ texts = text_splitter.split_text(transcript)
41
+ text_to_summarize = " ".join(texts[:4]) # Adjust this as needed
42
+
43
+ # Prepare the payload for summarization
44
+ payload = {
45
+ "inputs": {
46
+ "prompt": f'''Summarize the following text in {language_code}.
47
+ Text: {text_to_summarize}
48
+
49
+ Add a title to the summary in {language_code}.
50
+ Include an INTRODUCTION, BULLET POINTS if possible, and a CONCLUSION in {language_code}.'''
51
+ }
52
+ }
53
+
54
+ # Start summarizing using Hugging Face
55
+ headers = {"Authorization": f"Bearer {API_KEY}"}
56
+ response = requests.post(API_URL, headers=headers, json=payload)
57
+
58
+ if response.status_code == 200:
59
+ return response.json()["generated_text"]
60
+ else:
61
+ raise Exception("Summarization failed.")
62
+
63
+ def main():
64
+ st.title('YouTube Video Summarizer')
65
+ link = st.text_input('Enter the link of the YouTube video you want to summarize:')
66
+
67
+ if st.button('Start'):
68
+ if link:
69
+ try:
70
+ progress = st.progress(0)
71
+ status_text = st.empty()
72
+
73
+ status_text.text('Loading the transcript...')
74
+ progress.progress(25)
75
+
76
+ # Get both the transcript and language_code
77
+ transcript, language_code = get_transcript(link)
78
+
79
+ status_text.text(f'Creating summary...')
80
+ progress.progress(75)
81
+
82
+ summary = summarize_with_hugging_face(transcript, language_code)
83
+
84
+ status_text.text('Summary:')
85
+ st.markdown(summary)
86
+ progress.progress(100)
87
+ except Exception as e:
88
+ st.write(str(e))
89
+ else:
90
+ st.write('Please enter a valid YouTube link.')
91
+
92
+ if __name__ == "__main__":
93
+ main()
94
+