|
|
import joblib |
|
|
import os |
|
|
import re |
|
|
import requests |
|
|
import numpy as np |
|
|
import html |
|
|
from deep_translator import GoogleTranslator |
|
|
from youtube_transcript_api import YouTubeTranscriptApi |
|
|
|
|
|
import time |
|
|
|
|
|
|
|
|
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
|
|
MBTI_PATH = os.path.join(BASE_DIR, 'data', 'model_mbti.pkl') |
|
|
EMOTION_PATH = os.path.join(BASE_DIR, 'data', 'model_emotion.pkl') |
|
|
|
|
|
_model_mbti = None |
|
|
_classifier_mbti_transformer = None |
|
|
_classifier_roberta = None |
|
|
_classifier_distilbert = None |
|
|
|
|
|
EMOTION_TRANSLATIONS = { |
|
|
'admiration': 'Kagum', 'amusement': 'Terhibur', 'anger': 'Marah', |
|
|
'annoyance': 'Kesal', 'approval': 'Setuju', 'caring': 'Peduli', |
|
|
'confusion': 'Bingung', 'curiosity': 'Penasaran', 'desire': 'Keinginan', |
|
|
'disappointment': 'Kecewa', 'disapproval': 'Tidak Setuju', 'disgust': 'Jijik', |
|
|
'embarrassment': 'Malu', 'excitement': 'Semangat', 'fear': 'Takut', |
|
|
'gratitude': 'Bersyukur', 'grief': 'Berduka', 'joy': 'Gembira', |
|
|
'love': 'Cinta', 'nervousness': 'Gugup', 'optimism': 'Optimis', |
|
|
'pride': 'Bangga', 'realization': 'Sadar', 'relief': 'Lega', |
|
|
'remorse': 'Menyesal', 'sadness': 'Sedih', 'surprise': 'Terkejut', |
|
|
'neutral': 'Netral' |
|
|
} |
|
|
|
|
|
|
|
|
MBTI_EXPLANATIONS = { |
|
|
'ISTJ': {'en': "The Logistician. Practical and fact-minded individuals, whose reliability cannot be doubted.", |
|
|
'id': "Si Organisator. Lo orangnya logis, praktis, dan bisa diandelin banget. Anti ribet-ribet club."}, |
|
|
'ISFJ': {'en': "The Defender. Very dedicated and warm protectors, always ready to defend their loved ones.", |
|
|
'id': "Si Pelindung. Hati lo lembut, setia, dan care banget sama orang terdekat. Temen curhat terbaik."}, |
|
|
'INFJ': {'en': "The Advocate. Quiet and mystical, yet very inspiring and tireless idealists.", |
|
|
'id': "Si Visioner Misterius. Lo peka, idealis, dan suka mikirin makna hidup mendalam. Langka nih!"}, |
|
|
'INTJ': {'en': "The Architect. Imaginative and strategic thinkers, with a plan for everything.", |
|
|
'id': "Si Strategis. Otak lo jalan terus, visioner, dan selalu punya rencana cadangan buat segala hal."}, |
|
|
'ISTP': {'en': "The Virtuoso. Bold and practical experimenters, masters of all kinds of tools.", |
|
|
'id': "Si Pengrajin. Lo cool, santuy, tapi jago banget mecahin masalah teknis secara praktis."}, |
|
|
'ISFP': {'en': "The Adventurer. Flexible and charming artists, always ready to explore and experience something new.", |
|
|
'id': "Si Seniman Bebas. Lo estetik, santai, dan suka banget nge-explore hal baru tanpa banyak drama."}, |
|
|
'INFP': {'en': "The Mediator. Poetic, kind and altruistic people, always eager to help a good cause.", |
|
|
'id': "Si Paling Perasa. Hati lo kayak kapas, puitis, idealis banget, dan selalu mau bikin dunia lebih baik."}, |
|
|
'INTP': {'en': "The Logician. Innovative inventors with an unquenchable thirst for knowledge.", |
|
|
'id': "Si Pemikir Kritis. Lo kepoan parah, logis abis, dan suka banget debat teori sampe pagi."}, |
|
|
'ESTP': {'en': "The Entrepreneur. Smart, energetic and very perceptive people, who truly enjoy living on the edge.", |
|
|
'id': "Si Pemberani. Lo enerjik, spontan, dan jago banget ngambil peluang dalam situasi mepet."}, |
|
|
'ESFP': {'en': "The Entertainer. Spontaneous, energetic and enthusiastic people - life is never boring around them.", |
|
|
'id': "Si Penghibur. Lo asik parah, spontan, dan selalu jadi pusat perhatian di tongkrongan."}, |
|
|
'ENFP': {'en': "The Campaigner. Enthusiastic, creative and sociable free spirits, who can always find a reason to smile.", |
|
|
'id': "Si Semangat 45. Lo kreatif, ramah, dan punya energi positif yang nular ke semua orang."}, |
|
|
'ENTP': {'en': "The Debater. Smart and curious thinkers who cannot resist an intellectual challenge.", |
|
|
'id': "Si Pendebat Ulung. Lo pinter, kritis, dan iseng banget suka mancing debat cuma buat seru-seruan."}, |
|
|
'ESTJ': {'en': "The Executive. Excellent administrators, unsurpassed at managing things - or people.", |
|
|
'id': "Si Bos Tegas. Lo jago ngatur, disiplin, dan gak suka liat ada yang lelet atau berantakan."}, |
|
|
'ESFJ': {'en': "The Consul. Extraordinarily caring, social and popular people, always eager to help.", |
|
|
'id': "Si Paling Gaul. Lo ramah, suka nolong, dan care banget sama harmoni di sirkel pertemanan."}, |
|
|
'ENFJ': {'en': "The Protagonist. Charismatic and inspiring leaders, able to mesmerize their listeners.", |
|
|
'id': "Si Pemimpin Karismatik. Lo jago banget ngomong, inspiratif, dan bisa bikin orang lain nurut sama lo."}, |
|
|
'ENTJ': {'en': "The Commander. Bold, imaginative and strong-willed leaders, always finding a way - or making one.", |
|
|
'id': "Si Jenderal. Lo ambisius, tegas, dan punya bakat alami buat mimpin dan naklukin tantangan."} |
|
|
} |
|
|
|
|
|
class NLPHandler: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def load_models(): |
|
|
global _model_mbti, _classifier_mbti_transformer, _classifier_roberta, _classifier_distilbert |
|
|
print(f"Loading models from: {BASE_DIR}") |
|
|
|
|
|
if _model_mbti is None and os.path.exists(MBTI_PATH): |
|
|
try: |
|
|
print(f"Loading MBTI Model (SVM) from: {MBTI_PATH}") |
|
|
_model_mbti = joblib.load(MBTI_PATH) |
|
|
except Exception as e: print(f"MBTI Load Error: {e}") |
|
|
|
|
|
if _classifier_mbti_transformer is None: |
|
|
try: |
|
|
print(f"Loading MBTI Model (Transformer): parka735/mbti-classifier") |
|
|
from transformers import pipeline |
|
|
_classifier_mbti_transformer = pipeline("text-classification", model="parka735/mbti-classifier", top_k=1) |
|
|
except Exception as e: print(f"MBTI Transformer Load Error: {e}") |
|
|
|
|
|
if _classifier_roberta is None: |
|
|
try: |
|
|
print("Loading Emotion Model 1: SamLowe/roberta-base-go_emotions") |
|
|
from transformers import pipeline |
|
|
_classifier_roberta = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=None) |
|
|
except Exception as e: print(f"Emotion 1 Load Error: {e}") |
|
|
|
|
|
if _classifier_distilbert is None: |
|
|
try: |
|
|
print("Loading Emotion Model 2: joeddav/distilbert-base-uncased-go-emotions-student") |
|
|
from transformers import pipeline |
|
|
_classifier_distilbert = pipeline("text-classification", model="joeddav/distilbert-base-uncased-go-emotions-student", top_k=None) |
|
|
except Exception as e: print(f"Emotion 2 Load Error: {e}") |
|
|
|
|
|
|
|
|
_gemini_client = None |
|
|
|
|
|
@staticmethod |
|
|
def _init_gemini(): |
|
|
"""Initialize Gemini Client for validation (lazy loading)""" |
|
|
if NLPHandler._gemini_client is None: |
|
|
api_key = os.getenv("GEMINI_API_KEY") |
|
|
if api_key: |
|
|
try: |
|
|
from google import genai |
|
|
NLPHandler._gemini_client = genai.Client(api_key=api_key) |
|
|
print("Gemini Validator Ready (google-genai SDK)") |
|
|
except Exception as e: |
|
|
print(f"Gemini Init Failed: {e}") |
|
|
return NLPHandler._gemini_client is not None |
|
|
|
|
|
@staticmethod |
|
|
def _validate_with_gemini(text, ml_prediction): |
|
|
""" |
|
|
Use Gemini to validate ML prediction. |
|
|
Returns: (validated_mbti, confidence, reasoning) |
|
|
""" |
|
|
if not NLPHandler._init_gemini(): |
|
|
return ml_prediction, 0.6, "ML only (Gemini unavailable)" |
|
|
|
|
|
|
|
|
|
|
|
prompt = f"""You are an MBTI expert. Analyze this text and determine the MOST LIKELY MBTI type based ONLY on the content. |
|
|
|
|
|
TEXT TO ANALYZE: |
|
|
"{text}" |
|
|
|
|
|
ANALYSIS FRAMEWORK: |
|
|
1. I/E (Introversion/Extraversion): |
|
|
- E indicators: Mentions of social events, leading teams, networking, group activities, energized by people |
|
|
- I indicators: Preference for solitude, reflection, working alone, drained by social interaction |
|
|
|
|
|
2. N/S (Intuition/Sensing): |
|
|
- N indicators: Abstract thinking, future-focused, big picture, patterns, possibilities, theory |
|
|
- S indicators: Concrete details, present-focused, practical, facts, reality, hands-on |
|
|
|
|
|
3. T/F (Thinking/Feeling): |
|
|
- T indicators: Logic, efficiency, objectivity, direct communication, "facts over feelings" |
|
|
- F indicators: Empathy, harmony, values, subjective decisions, people-focused |
|
|
|
|
|
4. J/P (Judging/Perceiving): |
|
|
- J indicators: Planning, structure, deadlines, organization, schedules, decisive |
|
|
- P indicators: Spontaneous, flexible, adaptable, open-ended, exploratory |
|
|
|
|
|
CRITICAL INSTRUCTIONS: |
|
|
- Analyze INDEPENDENTLY - ignore any preconceptions |
|
|
- Look for EXPLICIT behavioral indicators in the text |
|
|
- Weight E/I heavily on social energy language (not just content topic) |
|
|
- If text mentions "leading", "networking", "team meetings" → strong E signal |
|
|
- If text emphasizes "planning", "deadlines", "structure" → strong J signal |
|
|
|
|
|
Respond in this EXACT format: |
|
|
MBTI: [4-letter type] |
|
|
CONFIDENCE: [0.0-1.0] |
|
|
REASON: [One sentence citing specific text evidence] |
|
|
|
|
|
Example: |
|
|
MBTI: ENTJ |
|
|
CONFIDENCE: 0.88 |
|
|
REASON: Explicit mentions of networking, leading teams, and structured planning indicate ENTJ. |
|
|
""" |
|
|
|
|
|
try: |
|
|
response = NLPHandler._gemini_client.models.generate_content( |
|
|
model='gemini-2.0-flash', |
|
|
contents=prompt |
|
|
) |
|
|
result_text = response.text.strip() |
|
|
|
|
|
|
|
|
lines = result_text.split('\n') |
|
|
validated_mbti = ml_prediction |
|
|
confidence = 0.7 |
|
|
reason = "Gemini validation" |
|
|
|
|
|
for line in lines: |
|
|
if line.startswith('MBTI:'): |
|
|
validated_mbti = line.split(':', 1)[1].strip().upper() |
|
|
elif line.startswith('CONFIDENCE:'): |
|
|
try: |
|
|
confidence = float(line.split(':', 1)[1].strip()) |
|
|
except: |
|
|
confidence = 0.7 |
|
|
elif line.startswith('REASON:'): |
|
|
reason = line.split(':', 1)[1].strip() |
|
|
|
|
|
|
|
|
if len(validated_mbti) != 4 or not all(c in 'IENTFSJP' for c in validated_mbti): |
|
|
print(f"Invalid Gemini MBTI: {validated_mbti}, using ML: {ml_prediction}") |
|
|
return ml_prediction, 0.6, "Invalid Gemini response - using ML" |
|
|
|
|
|
return validated_mbti, confidence, reason |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Gemini Validation Error: {e}") |
|
|
return ml_prediction, 0.6, f"Gemini error - using ML" |
|
|
|
|
|
@staticmethod |
|
|
def translate_to_english(text): |
|
|
try: |
|
|
if len(text) > 4500: text = text[:4500] |
|
|
return GoogleTranslator(source='auto', target='en').translate(text) |
|
|
except: return text |
|
|
|
|
|
@staticmethod |
|
|
def extract_keywords(text): |
|
|
stopwords = ["the", "and", "is", "to", "in", "it", "of", "for", "with", "on", "that", "this", "my", "was", "as", "are", "have", "you", "but", "so", "ini", "itu", "dan", "yang", "di", "ke"] |
|
|
words = re.findall(r'\w+', text.lower()) |
|
|
filtered = [w for w in words if len(w) > 3 and w not in stopwords] |
|
|
freq = {} |
|
|
for w in filtered: freq[w] = freq.get(w, 0) + 1 |
|
|
sorted_words = sorted(freq.items(), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
keywords_en = [w[0] for w in sorted_words[:5]] |
|
|
keywords_id = [] |
|
|
try: |
|
|
translator = GoogleTranslator(source='auto', target='id') |
|
|
for k in keywords_en: keywords_id.append(translator.translate(k)) |
|
|
except: keywords_id = keywords_en |
|
|
return {"en": keywords_en, "id": keywords_id} |
|
|
|
|
|
@staticmethod |
|
|
def predict_all(raw_text): |
|
|
NLPHandler.load_models() |
|
|
processed_text = NLPHandler.translate_to_english(raw_text) |
|
|
|
|
|
|
|
|
mbti_result = "UNKNOWN" |
|
|
mbti_confidence = 0.0 |
|
|
mbti_reasoning = "" |
|
|
|
|
|
if _model_mbti and _classifier_mbti_transformer: |
|
|
try: |
|
|
|
|
|
svm_pred = _model_mbti.predict([processed_text])[0] |
|
|
|
|
|
|
|
|
trans_input = processed_text[:2000] |
|
|
trans_output = _classifier_mbti_transformer(trans_input) |
|
|
|
|
|
|
|
|
|
|
|
if isinstance(trans_output, list) and isinstance(trans_output[0], list): |
|
|
trans_res = trans_output[0][0] |
|
|
elif isinstance(trans_output, list): |
|
|
trans_res = trans_output[0] |
|
|
else: |
|
|
trans_res = trans_output |
|
|
|
|
|
trans_pred = trans_res['label'].upper() |
|
|
trans_conf = trans_res['score'] |
|
|
|
|
|
print(f"[Voting] SVM='{svm_pred}' vs Transformer='{trans_pred}' ({trans_conf:.2%})") |
|
|
|
|
|
|
|
|
if svm_pred == trans_pred: |
|
|
|
|
|
print("[Check] Models AGREE! Auto-approving.") |
|
|
mbti_result = svm_pred |
|
|
mbti_confidence = 0.95 |
|
|
mbti_reasoning = f"Both AI models agreed strictly on {mbti_result}." |
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
print("[Warning] Models DISAGREE! Summoning Gemini Judge...") |
|
|
|
|
|
|
|
|
validation_context = f"Model A (Keyword) detected {svm_pred}. Model B (Context) detected {trans_pred}." |
|
|
|
|
|
validated_mbti, confidence, reason = NLPHandler._validate_with_gemini( |
|
|
processed_text, validation_context |
|
|
) |
|
|
|
|
|
mbti_result = validated_mbti |
|
|
mbti_confidence = confidence |
|
|
mbti_reasoning = reason |
|
|
print(f"[Gemini] Verdict: {mbti_result} (Confidence: {confidence})") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"[Error] Hybrid MBTI Error: {e}") |
|
|
|
|
|
try: |
|
|
mbti_result = _model_mbti.predict([processed_text])[0] |
|
|
mbti_confidence = 0.4 |
|
|
except: |
|
|
mbti_result = "INTJ" |
|
|
mbti_reasoning = "System fallback due to hybrid error." |
|
|
|
|
|
|
|
|
emotion_data = {"id": "Netral", "en": "Neutral", "raw": "neutral", "list": []} |
|
|
confidence_score = 0.0 |
|
|
|
|
|
try: |
|
|
|
|
|
global _classifier_roberta, _classifier_distilbert |
|
|
|
|
|
|
|
|
emo_input = processed_text[:1500] |
|
|
|
|
|
combined_scores = {} |
|
|
|
|
|
def add_scores(results): |
|
|
if isinstance(results, list) and isinstance(results[0], list): |
|
|
results = results[0] |
|
|
for item in results: |
|
|
label = item['label'] |
|
|
score = item['score'] |
|
|
combined_scores[label] = combined_scores.get(label, 0) + score |
|
|
|
|
|
if _classifier_roberta: |
|
|
add_scores(_classifier_roberta(emo_input)) |
|
|
if _classifier_distilbert: |
|
|
add_scores(_classifier_distilbert(emo_input)) |
|
|
|
|
|
|
|
|
if 'neutral' in combined_scores: |
|
|
del combined_scores['neutral'] |
|
|
|
|
|
sorted_emotions = sorted(combined_scores.items(), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
top_3_list = [] |
|
|
if sorted_emotions: |
|
|
|
|
|
best_label, total_score = sorted_emotions[0] |
|
|
confidence_score = (total_score / 2.0) |
|
|
|
|
|
indo_label = EMOTION_TRANSLATIONS.get(best_label, best_label.capitalize()) |
|
|
emotion_data = { |
|
|
"id": indo_label, |
|
|
"en": best_label.capitalize(), |
|
|
"raw": best_label, |
|
|
"list": [] |
|
|
} |
|
|
|
|
|
|
|
|
for label, score in sorted_emotions[:3]: |
|
|
norm_score = score / 2.0 |
|
|
top_3_list.append({ |
|
|
"en": label.capitalize(), |
|
|
"id": EMOTION_TRANSLATIONS.get(label, label.capitalize()), |
|
|
"score": norm_score |
|
|
}) |
|
|
|
|
|
emotion_data["list"] = top_3_list |
|
|
print(f"Emotion Hybrid Top 1: {emotion_data['en']} ({confidence_score:.2%})") |
|
|
else: |
|
|
print("Emotion Hybrid: No clear emotion found (Neutral)") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Emotion Prediction Error: {e}") |
|
|
|
|
|
|
|
|
mbti_desc = MBTI_EXPLANATIONS.get(mbti_result, { |
|
|
'en': "Complex personality type.", |
|
|
'id': "Kepribadian yang cukup kompleks." |
|
|
}) |
|
|
|
|
|
|
|
|
if mbti_reasoning: |
|
|
mbti_desc['validation'] = mbti_reasoning |
|
|
mbti_desc['confidence'] = mbti_confidence |
|
|
|
|
|
|
|
|
conf_percent = int(confidence_score * 100) |
|
|
|
|
|
|
|
|
em_list_str = "" |
|
|
if 'list' in emotion_data and emotion_data['list']: |
|
|
labels = [f"{item['en']} ({int(item['score']*100)}%)" for item in emotion_data['list']] |
|
|
em_list_str = ", ".join(labels) |
|
|
|
|
|
emotion_reasoning = { |
|
|
'en': f"Dominant emotion is '{emotion_data['en']}'. Mix: {em_list_str}.", |
|
|
'id': f"Emosi dominan '{emotion_data['id']}'. Campuran: {em_list_str}." |
|
|
} |
|
|
|
|
|
|
|
|
keywords_reasoning = { |
|
|
'en': "These words appeared most frequently and define the main topic.", |
|
|
'id': "Kata-kata ini paling sering muncul dan jadi inti topik lo." |
|
|
} |
|
|
|
|
|
return { |
|
|
"mbti": mbti_result, |
|
|
"emotion": emotion_data, |
|
|
"keywords": NLPHandler.extract_keywords(processed_text), |
|
|
"reasoning": { |
|
|
"mbti": mbti_desc, |
|
|
"emotion": emotion_reasoning, |
|
|
"keywords": keywords_reasoning |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def _fetch_official_api(video_id, api_key): |
|
|
print(f"Using Official API Key for {video_id}...") |
|
|
|
|
|
result = { |
|
|
"video": None, |
|
|
"comments": [], |
|
|
"text_for_analysis": "" |
|
|
} |
|
|
text_parts = [] |
|
|
|
|
|
try: |
|
|
|
|
|
url_meta = f"https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics&id={video_id}&key={api_key}" |
|
|
res_meta = requests.get(url_meta, timeout=5) |
|
|
|
|
|
if res_meta.status_code == 200: |
|
|
data = res_meta.json() |
|
|
if "items" in data and len(data["items"]) > 0: |
|
|
item = data["items"][0] |
|
|
snippet = item["snippet"] |
|
|
stats = item.get("statistics", {}) |
|
|
|
|
|
|
|
|
title = html.unescape(snippet['title']) |
|
|
desc = html.unescape(snippet['description']) |
|
|
|
|
|
|
|
|
thumbnails = snippet.get('thumbnails', {}) |
|
|
thumbnail = (thumbnails.get('maxres') or thumbnails.get('high') or thumbnails.get('medium') or thumbnails.get('default', {})).get('url', '') |
|
|
|
|
|
result["video"] = { |
|
|
"title": title, |
|
|
"description": desc, |
|
|
"thumbnail": thumbnail, |
|
|
"channel": snippet.get('channelTitle', 'Unknown Channel'), |
|
|
"publishedAt": snippet.get('publishedAt', ''), |
|
|
"viewCount": stats.get('viewCount', '0'), |
|
|
"likeCount": stats.get('likeCount', '0'), |
|
|
"commentCount": stats.get('commentCount', '0') |
|
|
} |
|
|
|
|
|
text_parts.append(title) |
|
|
text_parts.append(desc) |
|
|
|
|
|
|
|
|
url_comm = f"https://www.googleapis.com/youtube/v3/commentThreads?part=snippet&videoId={video_id}&maxResults=20&order=relevance&key={api_key}" |
|
|
res_comm = requests.get(url_comm, timeout=5) |
|
|
|
|
|
if res_comm.status_code == 200: |
|
|
data = res_comm.json() |
|
|
for item in data.get("items", []): |
|
|
comment_snippet = item["snippet"]["topLevelComment"]["snippet"] |
|
|
raw_text = comment_snippet.get("textDisplay", "") |
|
|
clean_text = re.sub(r'<[^>]+>', '', raw_text) |
|
|
clean_text = html.unescape(clean_text) |
|
|
|
|
|
result["comments"].append({ |
|
|
"text": clean_text, |
|
|
"author": comment_snippet.get("authorDisplayName", "Anonymous"), |
|
|
"authorImage": comment_snippet.get("authorProfileImageUrl", ""), |
|
|
"likeCount": comment_snippet.get("likeCount", 0), |
|
|
"publishedAt": comment_snippet.get("publishedAt", ""), |
|
|
"replyCount": item["snippet"].get("totalReplyCount", 0) |
|
|
}) |
|
|
|
|
|
text_parts.append(clean_text) |
|
|
|
|
|
if not text_parts: |
|
|
return None |
|
|
|
|
|
result["text_for_analysis"] = " ".join(text_parts) |
|
|
return result |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Official API Error: {e}") |
|
|
return None |
|
|
|
|
|
@staticmethod |
|
|
def fetch_youtube_transcript(video_id): |
|
|
|
|
|
api_key = os.getenv("YOUTUBE_API_KEY") |
|
|
|
|
|
if api_key: |
|
|
official_data = NLPHandler._fetch_official_api(video_id, api_key) |
|
|
if official_data: |
|
|
return official_data |
|
|
|
|
|
|
|
|
print(f"Fetching transcript (fallback) for: {video_id}") |
|
|
try: |
|
|
transcript_list = YouTubeTranscriptApi.get_transcript(video_id, languages=['id', 'en', 'en-US']) |
|
|
full_text = " ".join([item['text'] for item in transcript_list]) |
|
|
clean_text = re.sub(r'\[.*?\]|\(.*?\)', '', full_text).strip() |
|
|
|
|
|
return html.unescape(clean_text) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
return None |