project_charles / charles_animator.py
sohojoe's picture
tweak animation speed
b17c519
raw
history blame
4.94 kB
# Modifying the code to ensure the mouth is open when the character starts talking
import random
import time
import cv2
import av
import numpy as np
def resize_and_crop(image, dim=(640, 480)):
h, w = image.shape[:2]
aspect_ratio = w / h
target_width, target_height = dim
target_aspect = target_width / target_height
if aspect_ratio > target_aspect:
# Original aspect is wider than target, fit by height
new_height = target_height
new_width = int(target_height * aspect_ratio)
else:
# Original aspect is taller than target, fit by width
new_width = target_width
new_height = int(target_width / aspect_ratio)
# Resize the image with new dimensions
resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
# Crop to target dimensions
x_offset = (new_width - target_width) // 2
y_offset = (new_height - target_height) // 2
cropped_image = resized_image[y_offset:y_offset + target_height, x_offset:x_offset + target_width]
return cropped_image
def overlay_images(background, overlay, x, y):
"""
Overlay an image with transparency over another image.
"""
# Check if overlay dimensions fit within the background at the given (x, y) position
if y + overlay.shape[0] > background.shape[0] or x + overlay.shape[1] > background.shape[1]:
raise ValueError("Overlay dimensions exceed background dimensions at the specified position.")
# Extract the alpha channel from the overlay and create an inverse alpha channel
alpha = overlay[:, :, 3] / 255.0
inverse_alpha = 1.0 - alpha
# Convert overlay to BGR if it's in RGB
if overlay.shape[2] == 4: # If it has an alpha channel
overlay = cv2.cvtColor(overlay[:, :, :3], cv2.COLOR_RGB2BGR)
overlay = np.concatenate([overlay, overlay[:, :, 3:]], axis=2) # Add alpha channel back
else:
overlay = cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR)
# Overlay the images
for c in range(0, 3):
background[y:overlay.shape[0]+y, x:overlay.shape[1]+x, c] = (
alpha * overlay[:, :, c] + inverse_alpha * background[y:overlay.shape[0]+y, x:overlay.shape[1]+x, c]
)
return background
def create_charles_frames(background, charles_frames):
output_frames = []
# Load background image
background = cv2.imread(background, cv2.COLOR_BGR2RGB)
background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
# resize background to match user image
background = resize_and_crop(background, (640, 480))
for bot_image_path in charles_frames:
bot_image = cv2.imread(bot_image_path, cv2.IMREAD_UNCHANGED)
# assert bot image is square
assert bot_image.shape[0] == bot_image.shape[1]
# resize bot image if it is larger than backgroun impage in any direction
if bot_image.shape[0] > background.shape[0]:
bot_image = cv2.resize(bot_image, (background.shape[0], background.shape[0]), interpolation=cv2.INTER_AREA)
# Overlay bot image on the right-hand side
x_bot = background.shape[1] - bot_image.shape[1]
y_bot = background.shape[0] - bot_image.shape[0]
background_with_bot = overlay_images(background.copy(), bot_image, x_bot, y_bot)
output_frames.append(background_with_bot)
return output_frames
class CharlesAnimator:
def __init__(self):
self.mouth_open = False
self.last_change_time = 0
self.next_change_in = 0
self.was_talking = False
# use static frames for pefromance
self.static_frames = create_charles_frames("./images/zoom-background.png", [
"./images/charles.png",
"./images/charles-open.png"
])
def update(self, is_talking):
start_talking = True if is_talking and not self.was_talking else False
self.was_talking = is_talking
current_time = time.time()
# Open the mouth when the character starts talking
if start_talking:
self.mouth_open = True
self.next_change_in = current_time + random.uniform(0.05, 0.25)
return self.mouth_open
# Initialize the next change time if it's zero.
if self.next_change_in == 0:
self.next_change_in = current_time + random.uniform(0.05, 0.25)
# Update the mouth state only if the character is talking.
if is_talking:
# Check if it's time to change the mouth state.
if current_time >= self.next_change_in:
self.mouth_open = not self.mouth_open
self.next_change_in = current_time + random.uniform(0.05, 0.25)
else:
# Close the mouth if the character is not talking.
self.mouth_open = False
frame = self.static_frames[1] if self.mouth_open else self.static_frames[0]
return frame