import os import re import torch import ffmpeg import yt_dlp import torchaudio import gradio as gr import shutil from torch.utils.data import Dataset, DataLoader from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled, NoTranscriptFound, CouldNotRetrieveTranscript, VideoUnavailable from youtube_transcript_api.formatters import TextFormatter from transformers import ( pipeline, WhisperProcessor, WhisperForConditionalGeneration, ) # === UTILITIES FROM CODE 1 === def get_video_id(url): match = re.search(r'(?:v=|\/)([0-9A-Za-z_-]{11})', url) return match.group(1) if match else None def try_download_transcript_file(video_id, lang="en"): try: transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[lang]) formatted = TextFormatter().format_transcript(transcript) path = f"{video_id}_transcript.txt" with open(path, "w", encoding="utf-8") as f: f.write(formatted) return path except Exception: return None def try_download_audio_file(url, sabr_only=True): try: ydl_opts = { 'format': 'bestaudio[asr>0]/bestaudio/best' if sabr_only else 'bestaudio/best', 'outtmpl': 'fallback_audio.%(ext)s', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', }], } with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download([url]) return "fallback_audio.mp3" except Exception: return None def try_download_video_file(url, sabr_only=True): try: ydl_opts = { 'format': 'bestvideo+bestaudio/best' if sabr_only else 'best', 'outtmpl': 'fallback_video.%(ext)s', 'merge_output_format': 'mp4', } with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download([url]) return "fallback_video.mp4" except Exception: return None # === AUDIO / NLP UTILS === def extract_audio_from_video(video_path, audio_path="audio.wav"): ffmpeg.input(video_path).output(audio_path, ac=1, ar=16000).run(overwrite_output=True) return audio_path def split_audio(input_path, chunk_length_sec=30, target_sr=16000): waveform, sr = torchaudio.load(input_path) if sr != target_sr: resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr) waveform = resampler(waveform) if waveform.shape[0] > 1: waveform = waveform.mean(dim=0, keepdim=True) chunk_samples = target_sr * chunk_length_sec chunks = [waveform[:, i:i+chunk_samples] for i in range(0, waveform.shape[1], chunk_samples)] return chunks, target_sr class AudioChunksDataset(Dataset): def __init__(self, chunks): self.chunks = chunks def __len__(self): return len(self.chunks) def __getitem__(self, idx): return self.chunks[idx].squeeze(0) def collate_audio_batch(batch): max_len = max([b.shape[0] for b in batch]) padded_batch = [torch.nn.functional.pad(b, (0, max_len - b.shape[0])) for b in batch] return torch.stack(padded_batch) def transcribe_chunks_dataset(chunks, sr, model_name="openai/whisper-small", batch_size=4): device = "cuda" if torch.cuda.is_available() else "cpu" processor = WhisperProcessor.from_pretrained(model_name) model = WhisperForConditionalGeneration.from_pretrained(model_name).to(device) model.eval() dataset = AudioChunksDataset(chunks) dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_audio_batch) full_transcript = [] for batch_waveforms in dataloader: wave_list = [waveform.numpy() for waveform in batch_waveforms] input_features = processor(wave_list, sampling_rate=sr, return_tensors="pt", padding="max_length").input_features.to(device) with torch.no_grad(): predicted_ids = model.generate(input_features, language="en") transcriptions = processor.batch_decode(predicted_ids, skip_special_tokens=True) full_transcript.extend(transcriptions) return " ".join(full_transcript) def summarize_with_bart(text, max_tokens=1024): summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=0 if torch.cuda.is_available() else -1) sentences = text.split(". ") chunks = [] current_chunk = "" for sentence in sentences: if len(current_chunk + sentence) <= max_tokens: current_chunk += sentence + ". " else: chunks.append(current_chunk.strip()) current_chunk = sentence + ". " if current_chunk: chunks.append(current_chunk.strip()) summary = "" for chunk in chunks: out = summarizer(chunk, max_length=150, min_length=30, do_sample=False) summary += out[0]['summary_text'] + " " return summary.strip() def generate_questions_with_pipeline(text, num_questions=5): question_generator = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl", device=0 if torch.cuda.is_available() else -1) sentences = text.split(". ") questions = [] for sentence in sentences[:num_questions * 2]: if not sentence.strip(): continue input_text = f"generate question: {sentence.strip()}" out = question_generator(input_text, max_length=50, do_sample=True, temperature=0.9) question = out[0]["generated_text"].strip() if question: questions.append(question) return questions[:num_questions] # === MAIN PROCESSING FUNCTION === def process_input_gradio(url_input, file_input, cookies_file): try: cookies_path = None if cookies_file is not None: cookies_path = "cookies.txt" shutil.copyfile(cookies_file.name, cookies_path) if file_input is not None: audio_path = extract_audio_from_video(file_input.name) chunks, sr = split_audio(audio_path, chunk_length_sec=15) transcript = transcribe_chunks_dataset(chunks, sr) elif url_input: video_id = get_video_id(url_input) transcript_path = try_download_transcript_file(video_id) if transcript_path: with open(transcript_path, "r", encoding="utf-8") as f: transcript = f.read() else: audio_file = try_download_audio_file(url_input) if audio_file and os.path.exists(audio_file): audio_path = extract_audio_from_video(audio_file) chunks, sr = split_audio(audio_path, chunk_length_sec=15) transcript = transcribe_chunks_dataset(chunks, sr) else: video_file = try_download_video_file(url_input) if video_file and os.path.exists(video_file): audio_path = extract_audio_from_video(video_file) chunks, sr = split_audio(audio_path, chunk_length_sec=15) transcript = transcribe_chunks_dataset(chunks, sr) else: return "⚠️ Could not download transcript, audio, or video for this URL. Try uploading manually.", "" else: return "Please provide a URL or upload a video file.", "" summary = summarize_with_bart(transcript) questions = generate_questions_with_pipeline(summary) return summary, "\n".join([f"{i+1}. {q}" for i, q in enumerate(questions)]) except Exception as e: return f"Error: {str(e)}", "" # === GRADIO UI === iface = gr.Interface( fn=process_input_gradio, inputs=[ gr.Textbox(label="YouTube or Direct Video URL", placeholder="https://..."), gr.File(label="Or Upload a Video File", file_types=[".mp4", ".mkv", ".webm"]), gr.File(label="Optional cookies.txt for YouTube", file_types=[".txt"]), ], outputs=[ gr.Textbox(label="Summary", lines=10), gr.Textbox(label="Generated Questions", lines=10), ], title="Lecture Summary & Question Generator", description="Provide a YouTube/Direct video URL or upload a video file. If the video is restricted, upload cookies.txt or the video file directly." ) iface.launch()