script_writing / confusedautoshortvideogen.py
Abu1998's picture
Update confusedautoshortvideogen.py
ffc5475 verified
raw
history blame contribute delete
No virus
16 kB
# -*- coding: utf-8 -*-
"""ConfusedAutoShortVideoGen.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1qGRLgmJahs6-cNBhO_SIsz_yXKz2OqEW
"""
!pip install gradio
!pip install gradio_client
!pip install whisperx
!pip install pydub
## Menu
##script_writing.py
mscript_input = "what is depression"
mscript_music_input = "What is depression"
final_video_output = "final_video_output.mp4"
musicownpath = '/content/tmp1mbn3d3s.mp4'
import csv
import re
from datetime import datetime
from gradio_client import Client
# Initialize the client with the correct Hugging Face Space
client = Client("Abu1998/Meme_finder")
# Define the system message and input sentence
system_message = """Task: Act as a YouTube Shorts content writer.
Objective: Create engaging, catchy, and trendy scripts for YouTube Shorts videos that are brief, attention-grabbing, and optimized for viral potential.
Guidelines:
Each script should be 15-30 seconds long.
Use a hook in the first few seconds to capture viewers' attention.
Ensure the content is aligned with trending topics, challenges, or popular culture.
Incorporate humor, relatable scenarios, or strong emotions to resonate with the audience.
End with a clear call-to-action (CTA) like “Follow for more!” or a cliffhanger.
Example Flow:
User Input: “Write a script about the Monday blues.”
AI Output:
Script: "POV: It’s Monday morning, and you’re already done with the week. [Clip shows someone groggily hitting the snooze button, dragging themselves out of bed]. But wait… there’s coffee. And suddenly, everything’s okay! ☕✨ [Cut to a quick burst of energy with upbeat music]. If you’re just surviving till the weekend, hit that follow button for more relatable vibes!"
"""
# Define the user input (the sentence for which you want to find the main keyword)
user_input = mscript_input
# Make the API call with the specified parameters
result = client.predict(
message=user_input,
system_message=system_message,
max_tokens=512,
temperature=0.7,
top_p=0.95,
api_name="/chat"
)
# Extract the script from the result
script = result.strip()
# Function to split script into words
def split_into_words(script_text):
words = re.findall(r'\w+', script_text) # Find all words
return words
# Convert the script to a list of words
words = split_into_words(script)
# Define the file names with timestamp
csv_file = f'updates.csv'
txt_file = f'script_output'
# Save to CSV
with open(csv_file, mode='w', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow(['Content', 'Word']) # Headers
for word in words:
writer.writerow([user_input, word]) # Write each word as a separate row
print(f"Script generated, split into words, and saved to {csv_file}.")
# Save to TXT
with open(txt_file, mode='w', encoding='utf-8') as file:
file.write(script)
print(f"Script saved to {txt_file}.")
"""### audio_gen.py"""
# Install the gradio_client library
from gradio_client import Client
from google.colab import files
import shutil
# Initialize the client with the correct Hugging Face Space
client = Client("innoai/Edge-TTS-Text-to-Speech")
# Upload the script file
file_path = "/content/script_output"
# Read the content from the uploaded script file
with open(file_path, 'r', encoding='utf-8') as file:
text_input = file.read().strip() # Read and strip any extra whitespace
# Make the API call with the file content as input
result = client.predict(
text=text_input,
voice="en-US-AvaMultilingualNeural - en-US (Female)", # You can change the voice as needed
rate=0, # You can adjust the speech rate if needed
pitch=0, # You can adjust the pitch if needed
api_name="/predict"
)
# Check the result type and content
print(result)
# Extract the local file path from the result
audio_file_path = result[0] # Assuming the audio file path is the first element
# Define the output file name and path
output_file_path = "/content/audio_output.mp3"
# Copy the file to the desired location
shutil.copy(audio_file_path, output_file_path)
# Provide download link for the generated audio file
#files.download(output_file_path)
"""###Music Gen"""
"""### Time Stamp"""
!pip install whisperx
import whisperx
import torch
import pandas as pd
# Initialize the WhisperX model
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float32" if device == "cpu" else "float16"
model = whisperx.load_model("large-v2", device, compute_type=compute_type)
def transcribe_and_align(audio_file):
# Load audio
audio = whisperx.load_audio(audio_file)
print("Audio loaded successfully.")
# Transcribe
result = model.transcribe(audio, batch_size=16)
print("Transcription result:", result)
# Align transcription
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
result = whisperx.align(result["segments"], model_a, metadata, audio, device, return_char_alignments=True)
print("Alignment result:", result)
# Process segments to get word-level timestamps
word_segments = []
for segment in result["segments"]:
for word_info in segment.get("words", []): # Ensure 'words' is used
if "word" in word_info and "start" in word_info and "end" in word_info:
word_segments.append({
"word": word_info["word"],
"start": word_info["start"],
"end": word_info["end"],
"duration": word_info["end"] - word_info["start"]
})
# Debug: Print word segments to check if they are being populated
print("Word segments:", word_segments)
# Convert the word segments to a DataFrame
df = pd.DataFrame(word_segments)
# Save the result to a CSV file
output_file = "/content/transcription_with_word_timestamps.csv" # Ensure correct file path
df.to_csv(output_file, index=False)
return output_file
# Provide the path to your audio file
audio_file_path = "/content/audio_output.mp3"
# Transcribe and align the audio file
output_file = transcribe_and_align(audio_file_path)
# Print the path to the output file
print(f"Word-level transcription with timestamps saved to: {output_file}")
"""### common_words_remover"""
# prompt: write a code to drop these common words from output_file word column , COMMON_WORDS = {"the", "and", "is", "in", "to", "of", "a", "with", "for", "on", "it", "as", "at", "by", "an","this", "that", "which", "or", "be", "are", "was", "were", "has", "have", "had", "why", "such","here", "some", "so", "easy"}
import pandas as pd
def drop_common_words(input_file, output_file, common_words):
"""
Drops rows containing common words in the 'word' column and saves the result to a new CSV file.
Args:
input_file (str): The path to the input CSV file.
output_file (str): The path to the output CSV file.
common_words (set): A set of common words to be removed.
"""
df = pd.read_csv(input_file)
df['word'] = df['word'].str.lower() # Convert words to lowercase for comparison
df = df[~df['word'].isin(common_words)] # Filter out rows with common words
df.to_csv(output_file, index=False)
# Set of common words to drop
COMMON_WORDS = {"the", "and", "is", "in", "to", "of", "a", "with", "for", "on", "it", "as", "at", "by", "an","this", "that", "which", "or", "be", "are", "was", "were", "has", "have", "had", "why", "such","here", "some", "so", "easy"}
# Input and output file paths
input_file = "/content/transcription_with_word_timestamps.csv"
output_file = "/content/filtered_transcription.csv"
# Call the function to drop common words
drop_common_words(input_file, output_file, COMMON_WORDS)
print(f"Rows with common words dropped and saved to {output_file}")
"""### common_words_remover 2nd step"""
import pandas as pd
from pydub import AudioSegment
def update_dataframe_with_audio_duration(csv_file, audio_file):
# Load the CSV file into a DataFrame
df = pd.read_csv(csv_file)
# Calculate the total duration of the audio
audio = AudioSegment.from_file(audio_file)
total_duration = audio.duration_seconds
# Drop existing 'end' and 'duration' columns
df = df.drop(columns=['end', 'duration'], errors='ignore')
# Create a new 'end' column with the next 'start' value
df['end'] = df['start'].shift(-1)
# The first row should start with 0.01
df.loc[0, 'start'] = 0.01
# The last row's 'end' should be the total audio duration
df.loc[df.index[-1], 'end'] = total_duration
# Create a new 'duration' column based on the difference between 'start' and 'end'
df['duration'] = df['end'] - df['start']
# Save the updated DataFrame back to CSV, extracting filename and prepending 'updated_'
updated_csv_file = 'updated_' + csv_file.split('/')[-1] # Extract filename and prepend 'updated_'
df.to_csv(updated_csv_file, index=False)
print(f"Updated DataFrame saved to: {updated_csv_file}")
return updated_csv_file
# Example usage
csv_file = '/content/filtered_transcription.csv'
audio_file = musicownpath
update_dataframe_with_audio_duration(csv_file, audio_file)
"""### **Giphy Gif Download**"""
# prompt: write a code for "/content/dropped_2024-08-21_18-58-34.csv" to use Word column search in giphy api (API_KEY = "KzPlVn6nz6czmjWpPEy6reL52r1H5gs7") search and download in /content/memes this folder name as the word name
import requests
import csv
import os
# Giphy API details
API_KEY = "KzPlVn6nz6czmjWpPEy6reL52r1H5gs7"
SEARCH_URL = "https://api.giphy.com/v1/gifs/search"
# CSV and download directory
CSV_FILE = "/content/updated_filtered_transcription.csv"
DOWNLOAD_DIR = '/content/memes2'
# Create download directory if it doesn't exist
os.makedirs(DOWNLOAD_DIR, exist_ok=True)
def download_giphy_gif(search_term, filename):
"""Downloads a GIF from Giphy based on the search term."""
params = {
'api_key': API_KEY,
'q': search_term,
'limit': 1
}
response = requests.get(SEARCH_URL, params=params)
data = response.json()
if data['data']:
gif_url = data['data'][0]['images']['original']['url']
gif_response = requests.get(gif_url)
with open(os.path.join(DOWNLOAD_DIR, filename), 'wb') as f:
f.write(gif_response.content)
print(f"Downloaded GIF for '{search_term}' as '{filename}'")
else:
print(f"No GIF found for '{search_term}'")
# Process the CSV file
with open(CSV_FILE, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
word = row['word']
filename = f"{word}.gif"
download_giphy_gif(word, filename)
import moviepy.editor as mpe
import os
import csv
# CSV and download directory paths
CSV_FILE = '/content/updated_filtered_transcription.csv'
DOWNLOAD_DIR = '/content/memes2'
OUTPUT_VIDEO = 'updated_concatenated_memes.mp4'
# Get the GIF order and durations from the CSV file
gif_order = []
durations = {}
with open(CSV_FILE, 'r', encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
gif_filename = row['word'] + '.gif'
duration = float(row['duration']) # Ensure this matches the column name in your CSV
gif_order.append(gif_filename)
durations[gif_filename] = duration
# Load, crop, and concatenate GIFs
clips = []
for gif_filename in gif_order:
gif_path = os.path.join(DOWNLOAD_DIR, gif_filename)
if os.path.exists(gif_path):
clip = mpe.VideoFileClip(gif_path).resize(height=480) # Resize to the same height
clip = clip.set_fps(24) # Match the frame rate for consistency
# Crop each GIF to the specified duration from the new CSV
max_duration = durations.get(gif_filename, clip.duration) # Use the duration from the CSV or the full clip duration if not found
if clip.duration > max_duration:
clip = clip.subclip(0, max_duration) # Keep up to the specified duration
clips.append(clip)
else:
print(f"Warning: GIF not found: {gif_filename}")
# Concatenate and save the video
if clips:
final_clip = mpe.concatenate_videoclips(clips, method="compose")
final_clip.write_videofile(OUTPUT_VIDEO, fps=24) # Set fps to match the GIFs
print(f"Concatenated video saved as {OUTPUT_VIDEO}")
else:
print("No GIFs found to concatenate.")
"""### concate_audio_gif_music"""
import moviepy.editor as mpe
import os
# File paths
video_file = '/content/updated_concatenated_memes.mp4'
music_file = musicownpath
audio_file = "/content/audio_output.mp3"
output_file = '/content/final_output.mp4'
# Load the video, music, and audio files
video_clip = mpe.VideoFileClip(video_file)
music_clip = mpe.VideoFileClip(music_file)
audio_clip = mpe.AudioFileClip(audio_file)
# Duration of the video
video_duration = video_clip.duration
# Ensure the music duration matches the video duration
if music_clip.duration < video_duration:
# Repeat the music to match the video duration
n_repeats = int(video_duration // music_clip.duration) + 1
music_clip = mpe.concatenate_videoclips([music_clip] * n_repeats).subclip(0, video_duration)
elif music_clip.duration > video_duration:
music_clip = music_clip.subclip(0, video_duration)
# Adjust music volume to 50% and keep audio volume at 100%
music_clip = music_clip.volumex(0.3) # Reduce music volume to 50%
# Ensure the audio duration matches the video duration
if audio_clip.duration < video_duration:
# Repeat the audio to match the video duration
n_repeats = int(video_duration // audio_clip.duration) + 1
audio_clip = mpe.concatenate_audioclips([audio_clip] * n_repeats).subclip(0, video_duration)
elif audio_clip.duration > video_duration:
audio_clip = audio_clip.subclip(0, video_duration)
# Set the audio of the video clip to the adjusted audio
video_clip = video_clip.set_audio(audio_clip)
# Write the final output video with the adjusted music and audio
final_clip = video_clip.set_audio(music_clip.audio)
final_clip.write_videofile(output_file, codec='libx264', audio_codec='aac')
print(f"Final video saved as {output_file}")
import moviepy.editor as mpe
import os
# File paths
video_file = '/content/updated_concatenated_memes.mp4'
music_file = musicownpath
audio_file = "/content/audio_output.mp3"
output_file = '/content/final_output2.mp4'
# Load the video, music, and audio files
video_clip = mpe.VideoFileClip(video_file)
music_clip = mpe.VideoFileClip(music_file)
audio_clip = mpe.AudioFileClip(audio_file)
# Duration of the video
video_duration = video_clip.duration
# Ensure the music duration matches the video duration
if music_clip.duration < video_duration:
# Repeat the music to match the video duration
n_repeats = int(video_duration // music_clip.duration) + 1
music_clip = mpe.concatenate_videoclips([music_clip] * n_repeats).subclip(0, video_duration)
elif music_clip.duration > video_duration:
music_clip = music_clip.subclip(0, video_duration)
# Ensure the audio duration matches the video duration
if audio_clip.duration < video_duration:
# Repeat the audio to match the video duration
n_repeats = int(video_duration // audio_clip.duration) + 1
audio_clip = mpe.concatenate_audioclips([audio_clip] * n_repeats).subclip(0, video_duration)
elif audio_clip.duration > video_duration:
audio_clip = audio_clip.subclip(0, video_duration)
# Adjust music volume to 50% and keep audio volume at 100%
music_clip = music_clip.volumex(0.2) # Reduce music volume to 50%
# Set the audio of the video clip to the adjusted audio
video_clip = video_clip.set_audio(audio_clip)
# Combine the video with adjusted music
final_audio = mpe.CompositeAudioClip([music_clip.audio, audio_clip])
final_clip = video_clip.set_audio(final_audio)
# Write the final output video
final_clip.write_videofile(output_file, codec='libx264', audio_codec='aac')
print(f"Final video saved as {output_file}")