syllables_matching_experiment / emotionanalysis.py
root
ss
8515dc5
raw
history blame
41.9 kB
import librosa
import numpy as np
from scipy import signal
from collections import Counter
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore') # Suppress librosa warnings
class MusicAnalyzer:
def __init__(self):
# Emotion feature mappings - these define characteristics of different emotions
self.emotion_profiles = {
'happy': {'tempo': (100, 180), 'energy': (0.6, 1.0), 'major_mode': True, 'brightness': (0.6, 1.0)},
'sad': {'tempo': (40, 90), 'energy': (0, 0.5), 'major_mode': False, 'brightness': (0, 0.5)},
'calm': {'tempo': (50, 90), 'energy': (0, 0.4), 'major_mode': True, 'brightness': (0.3, 0.6)},
'energetic': {'tempo': (110, 200), 'energy': (0.7, 1.0), 'major_mode': True, 'brightness': (0.5, 0.9)},
'tense': {'tempo': (70, 140), 'energy': (0.5, 0.9), 'major_mode': False, 'brightness': (0.3, 0.7)},
'nostalgic': {'tempo': (60, 100), 'energy': (0.3, 0.7), 'major_mode': None, 'brightness': (0.4, 0.7)}
}
# Theme mappings based on musical features
self.theme_profiles = {
'love': {'emotion': ['happy', 'nostalgic', 'sad'], 'harmony_complexity': (0.3, 0.7)},
'triumph': {'emotion': ['energetic', 'happy'], 'harmony_complexity': (0.4, 0.8)},
'loss': {'emotion': ['sad', 'nostalgic'], 'harmony_complexity': (0.3, 0.7)},
'adventure': {'emotion': ['energetic', 'tense'], 'harmony_complexity': (0.5, 0.9)},
'reflection': {'emotion': ['calm', 'nostalgic'], 'harmony_complexity': (0.4, 0.8)},
'conflict': {'emotion': ['tense', 'energetic'], 'harmony_complexity': (0.6, 1.0)}
}
# Musical key mapping
self.key_names = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
# Common time signatures and their beat patterns with weights for prior probability
# Simplified to only include 4/4, 3/4, 2/4, and 6/8
self.common_time_signatures = {
"4/4": {"beats_per_bar": 4, "beat_pattern": [1.0, 0.2, 0.5, 0.2], "weight": 0.45},
"3/4": {"beats_per_bar": 3, "beat_pattern": [1.0, 0.2, 0.3], "weight": 0.25},
"2/4": {"beats_per_bar": 2, "beat_pattern": [1.0, 0.3], "weight": 0.15},
"6/8": {"beats_per_bar": 6, "beat_pattern": [1.0, 0.2, 0.3, 0.8, 0.2, 0.3], "weight": 0.15}
}
# Add common accent patterns for different time signatures
self.accent_patterns = {
"4/4": [[1, 0, 0, 0], [1, 0, 2, 0], [1, 0, 2, 0, 3, 0, 2, 0]],
"3/4": [[1, 0, 0], [1, 0, 2]],
"2/4": [[1, 0], [1, 2]],
"6/8": [[1, 0, 0, 2, 0, 0], [1, 0, 0, 2, 0, 3]]
}
# Expected rhythm density (relative note density per beat) for different time signatures
self.rhythm_density = {
"4/4": [1.0, 0.7, 0.8, 0.6],
"3/4": [1.0, 0.6, 0.7],
"6/8": [1.0, 0.5, 0.4, 0.8, 0.5, 0.4],
"2/4": [1.0, 0.6]
}
def load_audio(self, file_path, sr=22050, duration=None):
"""Load audio file and return time series and sample rate"""
try:
y, sr = librosa.load(file_path, sr=sr, duration=duration)
return y, sr
except Exception as e:
print(f"Error loading audio file: {e}")
return None, None
def analyze_rhythm(self, y, sr):
"""Analyze rhythm-related features: tempo, beats, time signature"""
# Tempo and beat detection
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
tempo, beat_frames = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
# Beat intervals and regularity
beat_intervals = np.diff(beat_times) if len(beat_times) > 1 else np.array([0])
beat_regularity = 1.0 / np.std(beat_intervals) if len(beat_intervals) > 0 and np.std(beat_intervals) > 0 else 0
# Rhythm pattern analysis through autocorrelation
ac = librosa.autocorrelate(onset_env, max_size=sr // 2)
ac = librosa.util.normalize(ac, norm=np.inf)
# Advanced time signature detection
time_sig_result = self._detect_time_signature(y, sr)
# Extract results from the time signature detection
estimated_signature = time_sig_result["time_signature"]
time_sig_confidence = time_sig_result["confidence"]
# Compute onset strength to get a measure of rhythm intensity
rhythm_intensity = np.mean(onset_env) / np.max(onset_env) if np.max(onset_env) > 0 else 0
# Rhythm complexity based on variation in onset strength
rhythm_complexity = np.std(onset_env) / np.mean(onset_env) if np.mean(onset_env) > 0 else 0
# Convert numpy arrays to regular Python types for JSON serialization
beat_times_list = [float(t) for t in beat_times.tolist()]
beat_intervals_list = [float(i) for i in beat_intervals.tolist()]
return {
"tempo": float(tempo),
"beat_times": beat_times_list,
"beat_intervals": beat_intervals_list,
"beat_regularity": float(beat_regularity),
"rhythm_intensity": float(rhythm_intensity),
"rhythm_complexity": float(rhythm_complexity),
"estimated_time_signature": estimated_signature,
"time_signature_confidence": float(time_sig_confidence),
"time_signature_candidates": time_sig_result.get("all_candidates", {})
}
def _detect_time_signature(self, y, sr):
"""
Multi-method approach to time signature detection
Args:
y: Audio signal
sr: Sample rate
Returns:
dict with detected time signature and confidence
"""
# 1. Compute onset envelope and beat positions
onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
# Get tempo and beat frames
tempo, beat_frames = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
# Return default if not enough beats detected
if len(beat_times) < 8:
return {"time_signature": "4/4", "confidence": 0.5}
# 2. Extract beat strengths and normalize
beat_strengths = self._get_beat_strengths(y, sr, beat_times, onset_env)
# 3. Compute various time signature features using different methods
results = {}
# Method 1: Beat pattern autocorrelation
autocorr_result = self._detect_by_autocorrelation(onset_env, sr)
results["autocorrelation"] = autocorr_result
# Method 2: Beat strength pattern matching
pattern_result = self._detect_by_pattern_matching(beat_strengths)
results["pattern_matching"] = pattern_result
# Method 3: Spectral rhythmic analysis
spectral_result = self._detect_by_spectral_analysis(onset_env, sr)
results["spectral"] = spectral_result
# Method 4: Note density analysis
density_result = self._detect_by_note_density(y, sr, beat_times)
results["note_density"] = density_result
# Method 5: Tempo-based estimation
tempo_result = self._estimate_from_tempo(tempo)
results["tempo_based"] = tempo_result
# 4. Combine results with weighted voting
final_result = self._combine_detection_results(results, tempo)
return final_result
def _get_beat_strengths(self, y, sr, beat_times, onset_env):
"""Extract normalized strengths at beat positions"""
# Convert beat times to frames
beat_frames = librosa.time_to_frames(beat_times, sr=sr, hop_length=512)
beat_frames = [min(f, len(onset_env)-1) for f in beat_frames]
# Get beat strengths from onset envelope
beat_strengths = np.array([onset_env[f] for f in beat_frames])
# Also look at energy and spectral flux at beat positions
hop_length = 512
frame_length = 2048
# Get energy at each beat
energy = librosa.feature.rms(y=y, frame_length=frame_length, hop_length=hop_length)[0]
beat_energy = np.array([energy[min(f, len(energy)-1)] for f in beat_frames])
# Combine onset strength with energy (weighted average)
beat_strengths = 0.7 * beat_strengths + 0.3 * beat_energy
# Normalize
if np.max(beat_strengths) > 0:
beat_strengths = beat_strengths / np.max(beat_strengths)
return beat_strengths
def _detect_by_autocorrelation(self, onset_env, sr):
"""Detect meter using autocorrelation of onset strength"""
# Calculate autocorrelation of onset envelope
hop_length = 512
ac = librosa.autocorrelate(onset_env, max_size=4 * sr // hop_length)
ac = librosa.util.normalize(ac)
# Find significant peaks in autocorrelation
peaks = signal.find_peaks(ac, height=0.2, distance=sr//(8*hop_length))[0]
if len(peaks) < 2:
return {"time_signature": "4/4", "confidence": 0.4}
# Analyze peak intervals in terms of beats
peak_intervals = np.diff(peaks)
# Convert peaks to time
peak_times = peaks * hop_length / sr
# Analyze for common time signature patterns
time_sig_votes = {}
# Check if peaks match expected bar lengths
for ts, info in self.common_time_signatures.items():
beats_per_bar = info["beats_per_bar"]
# Check how well peaks match this meter
score = 0
for interval in peak_intervals:
# Check if this interval corresponds to this time signature
# Allow some tolerance around the expected value
expected = beats_per_bar * (hop_length / sr) # in seconds
tolerance = 0.25 * expected
if abs(interval * hop_length / sr - expected) < tolerance:
score += 1
if len(peak_intervals) > 0:
time_sig_votes[ts] = score / len(peak_intervals)
# Return most likely time signature
if time_sig_votes:
best_ts = max(time_sig_votes.items(), key=lambda x: x[1])
return {"time_signature": best_ts[0], "confidence": best_ts[1]}
return {"time_signature": "4/4", "confidence": 0.4}
def _detect_by_pattern_matching(self, beat_strengths):
"""Match beat strength patterns against known time signature patterns"""
if len(beat_strengths) < 6:
return {"time_signature": "4/4", "confidence": 0.4}
results = {}
# Try each possible time signature
for ts, info in self.common_time_signatures.items():
beats_per_bar = info["beats_per_bar"]
expected_pattern = info["beat_pattern"]
# Calculate correlation scores for overlapping segments
scores = []
# We need at least one complete pattern
if len(beat_strengths) >= beats_per_bar:
# Try different offsets to find best alignment
for offset in range(min(beats_per_bar, len(beat_strengths) - beats_per_bar + 1)):
# Calculate scores for each complete pattern
pattern_scores = []
for i in range(offset, len(beat_strengths) - beats_per_bar + 1, beats_per_bar):
segment = beat_strengths[i:i+beats_per_bar]
# If expected pattern is longer than segment, truncate it
pattern = expected_pattern[:len(segment)]
# Normalize segment and pattern
if np.std(segment) > 0 and np.std(pattern) > 0:
# Calculate correlation
corr = np.corrcoef(segment, pattern)[0, 1]
if not np.isnan(corr):
pattern_scores.append(corr)
if pattern_scores:
scores.append(np.mean(pattern_scores))
# Use the best score among different offsets
if scores:
confidence = max(scores)
results[ts] = confidence
# Find best match
if results:
best_ts = max(results.items(), key=lambda x: x[1])
return {"time_signature": best_ts[0], "confidence": best_ts[1]}
# Default
return {"time_signature": "4/4", "confidence": 0.5}
def _detect_by_spectral_analysis(self, onset_env, sr):
"""Analyze rhythm in frequency domain"""
# Get rhythm periodicity through Fourier Transform
# Focus on periods corresponding to typical bar lengths (1-8 seconds)
hop_length = 512
# Calculate rhythm periodicity
fft_size = 2**13 # Large enough to give good frequency resolution
S = np.abs(np.fft.rfft(onset_env, n=fft_size))
# Convert frequency to tempo in BPM
freqs = np.fft.rfftfreq(fft_size, d=hop_length/sr)
tempos = 60 * freqs
# Focus on reasonable tempo range (40-240 BPM)
tempo_mask = (tempos >= 40) & (tempos <= 240)
S_tempo = S[tempo_mask]
tempos = tempos[tempo_mask]
# Find peaks in spectrum
peaks = signal.find_peaks(S_tempo, height=np.max(S_tempo)*0.1, distance=5)[0]
if len(peaks) == 0:
return {"time_signature": "4/4", "confidence": 0.4}
# Get peak tempos and strengths
peak_tempos = tempos[peaks]
peak_strengths = S_tempo[peaks]
# Sort by strength
peak_indices = np.argsort(peak_strengths)[::-1]
peak_tempos = peak_tempos[peak_indices]
peak_strengths = peak_strengths[peak_indices]
# Analyze relationships between peaks
# For example, 3/4 typically has peaks at multiples of 3 beats
# 4/4 has peaks at multiples of 4 beats
time_sig_scores = {}
# Check relationships between top peaks
if len(peak_tempos) >= 2:
tempo_ratios = []
for i in range(len(peak_tempos)):
for j in range(i+1, len(peak_tempos)):
if peak_tempos[j] > 0:
ratio = peak_tempos[i] / peak_tempos[j]
tempo_ratios.append(ratio)
# Check for patterns indicative of different time signatures
for ts in self.common_time_signatures:
score = 0
if ts == "4/4" or ts == "2/4":
# Look for ratios close to 2 or 4
for ratio in tempo_ratios:
if abs(ratio - 2) < 0.2 or abs(ratio - 4) < 0.2:
score += 1
elif ts == "3/4" or ts == "6/8":
# Look for ratios close to 3 or 6
for ratio in tempo_ratios:
if abs(ratio - 3) < 0.2 or abs(ratio - 6) < 0.3:
score += 1
# Normalize score
if tempo_ratios:
time_sig_scores[ts] = min(1.0, score / len(tempo_ratios) + 0.4)
# If we have meaningful scores, return best match
if time_sig_scores:
best_ts = max(time_sig_scores.items(), key=lambda x: x[1])
return {"time_signature": best_ts[0], "confidence": best_ts[1]}
# Default fallback
return {"time_signature": "4/4", "confidence": 0.4}
def _detect_by_note_density(self, y, sr, beat_times):
"""Analyze note density patterns between beats"""
if len(beat_times) < 6:
return {"time_signature": "4/4", "confidence": 0.4}
# Extract note onsets (not just beats)
onset_times = librosa.onset.onset_detect(y=y, sr=sr, units='time')
if len(onset_times) < len(beat_times):
return {"time_signature": "4/4", "confidence": 0.4}
# Count onsets between consecutive beats
note_counts = []
for i in range(len(beat_times) - 1):
start = beat_times[i]
end = beat_times[i+1]
# Count onsets in this beat
count = sum(1 for t in onset_times if start <= t < end)
note_counts.append(count)
# Look for repeating patterns in the note counts
time_sig_scores = {}
for ts, info in self.common_time_signatures.items():
beats_per_bar = info["beats_per_bar"]
# Skip if we don't have enough data
if len(note_counts) < beats_per_bar:
continue
# Calculate pattern similarity for this time signature
scores = []
for offset in range(min(beats_per_bar, len(note_counts) - beats_per_bar + 1)):
similarities = []
for i in range(offset, len(note_counts) - beats_per_bar + 1, beats_per_bar):
# Get current bar pattern
pattern = note_counts[i:i+beats_per_bar]
# Compare with expected density pattern
expected = self.rhythm_density.get(ts, [1.0] * beats_per_bar)
expected = expected[:len(pattern)] # Truncate if needed
# Normalize both patterns
if sum(pattern) > 0 and sum(expected) > 0:
pattern_norm = [p/max(1, sum(pattern)) for p in pattern]
expected_norm = [e/sum(expected) for e in expected]
# Calculate similarity (1 - distance)
distance = sum(abs(p - e) for p, e in zip(pattern_norm, expected_norm)) / len(pattern)
similarity = 1 - min(1.0, distance)
similarities.append(similarity)
if similarities:
scores.append(np.mean(similarities))
# Use the best score
if scores:
time_sig_scores[ts] = max(scores)
# Return best match
if time_sig_scores:
best_ts = max(time_sig_scores.items(), key=lambda x: x[1])
return {"time_signature": best_ts[0], "confidence": best_ts[1]}
# Default
return {"time_signature": "4/4", "confidence": 0.4}
def _estimate_from_tempo(self, tempo):
"""Use tempo to help estimate likely time signature"""
# Statistical tendencies: slower tempos often in compound meters (6/8)
# Fast tempos often favor simple meters (2/4)
scores = {}
if tempo < 70:
# Slow tempos favor compound meters
scores = {
"4/4": 0.5,
"3/4": 0.4,
"2/4": 0.3,
"6/8": 0.7
}
elif 70 <= tempo <= 120:
# Medium tempos favor 4/4, 3/4
scores = {
"4/4": 0.7,
"3/4": 0.6,
"2/4": 0.4,
"6/8": 0.3
}
else:
# Fast tempos favor simpler meters
scores = {
"4/4": 0.6,
"2/4": 0.7,
"3/4": 0.4,
"6/8": 0.2
}
# Find best match
best_ts = max(scores.items(), key=lambda x: x[1])
return {"time_signature": best_ts[0], "confidence": best_ts[1]}
def _combine_detection_results(self, results, tempo):
"""Combine results from different detection methods"""
# Define weights for different methods
method_weights = {
"autocorrelation": 0.25,
"pattern_matching": 0.30,
"spectral": 0.20,
"note_density": 0.20,
"tempo_based": 0.05
}
# Prior probability (based on frequency in music)
prior_weights = {ts: info["weight"] for ts, info in self.common_time_signatures.items()}
# Combine votes
total_votes = {ts: prior_weights.get(ts, 0.1) for ts in self.common_time_signatures}
for method, result in results.items():
ts = result["time_signature"]
confidence = result["confidence"]
weight = method_weights.get(method, 0.1)
# Add weighted vote
if ts in total_votes:
total_votes[ts] += confidence * weight
else:
total_votes[ts] = confidence * weight
# Special case: disambiguate between 3/4 and 6/8
if "3/4" in total_votes and "6/8" in total_votes:
# If the two are close, use tempo to break tie
if abs(total_votes["3/4"] - total_votes["6/8"]) < 0.1:
if tempo < 100: # Slower tempo favors 6/8
total_votes["6/8"] += 0.1
else: # Faster tempo favors 3/4
total_votes["3/4"] += 0.1
# Get highest scoring time signature
best_ts = max(total_votes.items(), key=lambda x: x[1])
# Calculate confidence score (normalize to 0-1)
confidence = best_ts[1] / (sum(total_votes.values()) + 0.001)
confidence = min(0.95, max(0.4, confidence)) # Bound confidence
return {
"time_signature": best_ts[0],
"confidence": confidence,
"all_candidates": {ts: float(score) for ts, score in total_votes.items()}
}
def _evaluate_beat_pattern(self, beat_strengths, pattern_length):
"""
Evaluate how consistently a specific pattern length fits the beat strengths
Args:
beat_strengths: Array of normalized beat strengths
pattern_length: Length of pattern to evaluate
Returns:
score: How well this pattern length explains the data (0-1)
"""
if len(beat_strengths) < pattern_length * 2:
return 0.0
# Calculate correlation between consecutive patterns
correlations = []
num_full_patterns = len(beat_strengths) // pattern_length
for i in range(num_full_patterns - 1):
pattern1 = beat_strengths[i*pattern_length:(i+1)*pattern_length]
pattern2 = beat_strengths[(i+1)*pattern_length:(i+2)*pattern_length]
# Calculate similarity between consecutive patterns
if len(pattern1) == len(pattern2) and len(pattern1) > 0:
corr = np.corrcoef(pattern1, pattern2)[0, 1]
if not np.isnan(corr):
correlations.append(corr)
# Calculate variance of beat strengths within each position
variance_score = 0
if num_full_patterns >= 2:
position_values = [[] for _ in range(pattern_length)]
for i in range(num_full_patterns):
for pos in range(pattern_length):
idx = i * pattern_length + pos
if idx < len(beat_strengths):
position_values[pos].append(beat_strengths[idx])
# Calculate variance ratio (higher means consistent accent patterns)
between_pos_var = np.var([np.mean(vals) for vals in position_values if vals])
within_pos_var = np.mean([np.var(vals) for vals in position_values if len(vals) > 1])
if within_pos_var > 0:
variance_score = between_pos_var / within_pos_var
variance_score = min(1.0, variance_score / 2.0) # Normalize
# Combine correlation and variance scores
if correlations:
correlation_score = np.mean(correlations)
return 0.7 * correlation_score + 0.3 * variance_score
return 0.5 * variance_score # Lower confidence if we couldn't calculate correlations
def _extract_average_pattern(self, beat_strengths, pattern_length):
"""
Extract the average beat pattern of specified length
Args:
beat_strengths: Array of beat strengths
pattern_length: Length of pattern to extract
Returns:
Average pattern of the specified length
"""
if len(beat_strengths) < pattern_length:
return np.array([])
# Number of complete patterns
num_patterns = len(beat_strengths) // pattern_length
if num_patterns == 0:
return np.array([])
# Reshape to stack patterns and calculate average
patterns = beat_strengths[:num_patterns * pattern_length].reshape((num_patterns, pattern_length))
return np.mean(patterns, axis=0)
def analyze_tonality(self, y, sr):
"""Analyze tonal features: key, mode, harmonic features"""
# Compute chromagram
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
# Krumhansl-Schmuckler key-finding algorithm (simplified)
# Major and minor profiles from music theory research
major_profile = np.array([6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88])
minor_profile = np.array([6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17])
# Calculate the correlation of the chroma with each key profile
chroma_avg = np.mean(chroma, axis=1)
major_corr = np.zeros(12)
minor_corr = np.zeros(12)
for i in range(12):
major_corr[i] = np.corrcoef(np.roll(chroma_avg, i), major_profile)[0, 1]
minor_corr[i] = np.corrcoef(np.roll(chroma_avg, i), minor_profile)[0, 1]
# Find the key with the highest correlation
max_major_idx = np.argmax(major_corr)
max_minor_idx = np.argmax(minor_corr)
# Determine if the piece is in a major or minor key
if major_corr[max_major_idx] > minor_corr[max_minor_idx]:
mode = "major"
key = self.key_names[max_major_idx]
else:
mode = "minor"
key = self.key_names[max_minor_idx]
# Calculate harmony complexity (variability in harmonic content)
harmony_complexity = np.std(chroma) / np.mean(chroma) if np.mean(chroma) > 0 else 0
# Calculate tonal stability (consistency of tonal center)
tonal_stability = 1.0 / (np.std(chroma_avg) + 0.001) # Add small value to avoid division by zero
# Calculate spectral brightness (center of mass of the spectrum)
spectral_centroid = librosa.feature.spectral_centroid(y=y, sr=sr)[0]
brightness = np.mean(spectral_centroid) / (sr/2) # Normalize by Nyquist frequency
# Calculate dissonance using spectral contrast
spectral_contrast = librosa.feature.spectral_contrast(y=y, sr=sr)
dissonance = np.mean(spectral_contrast[0]) # Higher values may indicate more dissonance
return {
"key": key,
"mode": mode,
"is_major": mode == "major",
"harmony_complexity": float(harmony_complexity),
"tonal_stability": float(tonal_stability),
"brightness": float(brightness),
"dissonance": float(dissonance)
}
def analyze_energy(self, y, sr):
"""Analyze energy characteristics of the audio"""
# RMS Energy (overall loudness)
rms = librosa.feature.rms(y=y)[0]
# Energy metrics
mean_energy = np.mean(rms)
energy_std = np.std(rms)
energy_dynamic_range = np.max(rms) - np.min(rms) if len(rms) > 0 else 0
# Energy distribution across frequency ranges
spec = np.abs(librosa.stft(y))
# Divide the spectrum into low, mid, and high ranges
freq_bins = spec.shape[0]
low_freq_energy = np.mean(spec[:int(freq_bins*0.2), :])
mid_freq_energy = np.mean(spec[int(freq_bins*0.2):int(freq_bins*0.8), :])
high_freq_energy = np.mean(spec[int(freq_bins*0.8):, :])
# Normalize to create a distribution
total_energy = low_freq_energy + mid_freq_energy + high_freq_energy
if total_energy > 0:
low_freq_ratio = low_freq_energy / total_energy
mid_freq_ratio = mid_freq_energy / total_energy
high_freq_ratio = high_freq_energy / total_energy
else:
low_freq_ratio = mid_freq_ratio = high_freq_ratio = 1/3
return {
"mean_energy": float(mean_energy),
"energy_std": float(energy_std),
"energy_dynamic_range": float(energy_dynamic_range),
"frequency_distribution": {
"low_freq": float(low_freq_ratio),
"mid_freq": float(mid_freq_ratio),
"high_freq": float(high_freq_ratio)
}
}
def analyze_emotion(self, rhythm_data, tonal_data, energy_data):
"""Classify the emotion based on musical features"""
# Extract key features for emotion detection
tempo = rhythm_data["tempo"]
is_major = tonal_data["is_major"]
energy = energy_data["mean_energy"]
brightness = tonal_data["brightness"]
# Calculate scores for each emotion
emotion_scores = {}
for emotion, profile in self.emotion_profiles.items():
score = 0.0
# Tempo contribution (0-1 score)
tempo_range = profile["tempo"]
if tempo_range[0] <= tempo <= tempo_range[1]:
score += 1.0
else:
# Partial score based on distance
distance = min(abs(tempo - tempo_range[0]), abs(tempo - tempo_range[1]))
max_distance = 40 # Maximum distance to consider
score += max(0, 1 - (distance / max_distance))
# Energy contribution (0-1 score)
energy_range = profile["energy"]
if energy_range[0] <= energy <= energy_range[1]:
score += 1.0
else:
# Partial score based on distance
distance = min(abs(energy - energy_range[0]), abs(energy - energy_range[1]))
max_distance = 0.5 # Maximum distance to consider
score += max(0, 1 - (distance / max_distance))
# Mode contribution (0-1 score)
if profile["major_mode"] is not None: # Some emotions don't have strong mode preference
score += 1.0 if profile["major_mode"] == is_major else 0.0
else:
score += 0.5 # Neutral contribution
# Brightness contribution (0-1 score)
brightness_range = profile["brightness"]
if brightness_range[0] <= brightness <= brightness_range[1]:
score += 1.0
else:
# Partial score based on distance
distance = min(abs(brightness - brightness_range[0]), abs(brightness - brightness_range[1]))
max_distance = 0.5 # Maximum distance to consider
score += max(0, 1 - (distance / max_distance))
# Normalize score (0-1 range)
emotion_scores[emotion] = score / 4.0
# Find primary emotion
primary_emotion = max(emotion_scores.items(), key=lambda x: x[1])
# Calculate valence and arousal (dimensional emotion model)
# Mapping different emotions to valence-arousal space
valence_map = {
'happy': 0.8, 'sad': 0.2, 'calm': 0.6,
'energetic': 0.7, 'tense': 0.3, 'nostalgic': 0.5
}
arousal_map = {
'happy': 0.7, 'sad': 0.3, 'calm': 0.2,
'energetic': 0.9, 'tense': 0.8, 'nostalgic': 0.4
}
# Calculate weighted valence and arousal
total_weight = sum(emotion_scores.values())
if total_weight > 0:
valence = sum(score * valence_map[emotion] for emotion, score in emotion_scores.items()) / total_weight
arousal = sum(score * arousal_map[emotion] for emotion, score in emotion_scores.items()) / total_weight
else:
valence = 0.5
arousal = 0.5
return {
"primary_emotion": primary_emotion[0],
"confidence": primary_emotion[1],
"emotion_scores": emotion_scores,
"valence": float(valence), # Pleasure dimension (0-1)
"arousal": float(arousal) # Activity dimension (0-1)
}
def analyze_theme(self, rhythm_data, tonal_data, emotion_data):
"""Infer potential themes based on musical features and emotion"""
# Extract relevant features
primary_emotion = emotion_data["primary_emotion"]
harmony_complexity = tonal_data["harmony_complexity"]
# Calculate theme scores
theme_scores = {}
for theme, profile in self.theme_profiles.items():
score = 0.0
# Emotion contribution
if primary_emotion in profile["emotion"]:
# Emotions listed earlier have stronger connection to the theme
position_weight = 1.0 / (profile["emotion"].index(primary_emotion) + 1)
score += position_weight
# Secondary emotions contribution
secondary_emotions = [e for e, s in emotion_data["emotion_scores"].items()
if s > 0.5 and e != primary_emotion]
for emotion in secondary_emotions:
if emotion in profile["emotion"]:
score += 0.3 # Less weight than primary emotion
# Harmony complexity contribution
complexity_range = profile["harmony_complexity"]
if complexity_range[0] <= harmony_complexity <= complexity_range[1]:
score += 1.0
else:
# Partial score based on distance
distance = min(abs(harmony_complexity - complexity_range[0]),
abs(harmony_complexity - complexity_range[1]))
max_distance = 0.5 # Maximum distance to consider
score += max(0, 1 - (distance / max_distance))
# Normalize score
theme_scores[theme] = min(1.0, score / 2.5)
# Find primary theme
primary_theme = max(theme_scores.items(), key=lambda x: x[1])
# Find secondary themes (scores > 0.5)
secondary_themes = [(theme, score) for theme, score in theme_scores.items()
if score > 0.5 and theme != primary_theme[0]]
secondary_themes.sort(key=lambda x: x[1], reverse=True)
return {
"primary_theme": primary_theme[0],
"confidence": primary_theme[1],
"secondary_themes": [t[0] for t in secondary_themes[:2]], # Top 2 secondary themes
"theme_scores": theme_scores
}
def analyze_music(self, file_path):
"""Main function to perform comprehensive music analysis"""
# Load the audio file
y, sr = self.load_audio(file_path)
if y is None:
return {"error": "Failed to load audio file"}
# Run all analyses
rhythm_data = self.analyze_rhythm(y, sr)
tonal_data = self.analyze_tonality(y, sr)
energy_data = self.analyze_energy(y, sr)
# Higher-level analyses that depend on the basic features
emotion_data = self.analyze_emotion(rhythm_data, tonal_data, energy_data)
theme_data = self.analyze_theme(rhythm_data, tonal_data, emotion_data)
# Convert any remaining numpy values to native Python types
def convert_numpy_to_python(obj):
if isinstance(obj, dict):
return {k: convert_numpy_to_python(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_numpy_to_python(item) for item in obj]
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.number):
return float(obj)
else:
return obj
# Ensure all numpy values are converted
rhythm_data = convert_numpy_to_python(rhythm_data)
tonal_data = convert_numpy_to_python(tonal_data)
energy_data = convert_numpy_to_python(energy_data)
emotion_data = convert_numpy_to_python(emotion_data)
theme_data = convert_numpy_to_python(theme_data)
# Combine all results
return {
"file": file_path,
"rhythm_analysis": rhythm_data,
"tonal_analysis": tonal_data,
"energy_analysis": energy_data,
"emotion_analysis": emotion_data,
"theme_analysis": theme_data,
"summary": {
"tempo": float(rhythm_data["tempo"]),
"time_signature": rhythm_data["estimated_time_signature"],
"key": tonal_data["key"],
"mode": tonal_data["mode"],
"primary_emotion": emotion_data["primary_emotion"],
"primary_theme": theme_data["primary_theme"]
}
}
# def visualize_analysis(self, file_path):
# """Create visualizations for the music analysis results"""
# # Check if matplotlib is available
# if plt is None:
# print("Error: matplotlib is not installed. Visualization is not available.")
# return
#
# # Load audio and run analysis
# y, sr = self.load_audio(file_path)
# if y is None:
# print("Error: Failed to load audio file")
# return
#
# results = self.analyze_music(file_path)
#
# # Create visualization
# plt.figure(figsize=(15, 12))
# # Waveform
# plt.subplot(3, 2, 1)
# librosa.display.waveshow(y, sr=sr, alpha=0.6)
# plt.title(f'Waveform (Tempo: {results["rhythm_analysis"]["tempo"]:.1f} BPM)')
# # Spectrogram
# plt.subplot(3, 2, 2)
# D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
# librosa.display.specshow(D, sr=sr, x_axis='time', y_axis='log')
# plt.colorbar(format='%+2.0f dB')
# plt.title(f'Spectrogram (Key: {results["tonal_analysis"]["key"]} {results["tonal_analysis"]["mode"]})')
# # Chromagram
# plt.subplot(3, 2, 3)
# chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
# librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
# plt.colorbar()
# plt.title('Chromagram')
# # Onset strength and beats
# plt.subplot(3, 2, 4)
# onset_env = librosa.onset.onset_strength(y=y, sr=sr)
# times = librosa.times_like(onset_env, sr=sr)
# plt.plot(times, librosa.util.normalize(onset_env), label='Onset strength')
# plt.vlines(results["rhythm_analysis"]["beat_times"], 0, 1, alpha=0.5, color='r',
# linestyle='--', label='Beats')
# plt.legend()
# plt.title('Rhythm Analysis')
# # Emotion scores
# plt.subplot(3, 2, 5)
# emotions = list(results["emotion_analysis"]["emotion_scores"].keys())
# scores = list(results["emotion_analysis"]["emotion_scores"].values())
# plt.bar(emotions, scores, color='skyblue')
# plt.ylim(0, 1)
# plt.title(f'Emotion Analysis (Primary: {results["emotion_analysis"]["primary_emotion"]})')
# plt.xticks(rotation=45)
# # Theme scores
# plt.subplot(3, 2, 6)
# themes = list(results["theme_analysis"]["theme_scores"].keys())
# scores = list(results["theme_analysis"]["theme_scores"].values())
# plt.bar(themes, scores, color='lightgreen')
# plt.ylim(0, 1)
# plt.title(f'Theme Analysis (Primary: {results["theme_analysis"]["primary_theme"]})')
# plt.xticks(rotation=45)
# plt.tight_layout()
# plt.show()
# Create an instance of the analyzer
analyzer = MusicAnalyzer()
# The following code is for demonstration purposes only
# and will only run if executed directly (not when imported)
if __name__ == "__main__":
# Replace this with a real audio file path when running as a script
demo_file = "path/to/your/audio/file.mp3"
# Analyze the uploaded audio file
results = analyzer.analyze_music(demo_file)
# Print analysis summary
print("\n=== MUSIC ANALYSIS SUMMARY ===")
print(f"Tempo: {results['summary']['tempo']:.1f} BPM")
print(f"Time Signature: {results['summary']['time_signature']}")
print(f"Key: {results['summary']['key']} {results['summary']['mode']}")
print(f"Primary Emotion: {results['summary']['primary_emotion']}")
print(f"Primary Theme: {results['summary']['primary_theme']}")
# Show detailed results (optional)
import json
print("\n=== DETAILED ANALYSIS ===")
print(json.dumps(results, indent=2))
# Visualize the analysis
# analyzer.visualize_analysis(demo_file)