import pandas as pd
import json
from pathlib import Path
import argparse

LINKS_CSV = 'data/raw/links.csv'
TMDB_CSV = 'data/raw/TMDB_movie_dataset_v11.csv'
OUTPUT_DIR = 'data/tmdb/json/'
Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)

def _parse_json_str(val):
    """把可能是 JSON 字符串或 Python 字符串的列解析成 Python 对象。失败则返回 None。"""
    if isinstance(val, (list, dict)):
        return val
    if not isinstance(val, str) or not val.strip():
        return None
    s = val.strip()
    try:
        return json.loads(s)
    except Exception:
        # 有些数据用单引号或不完全 JSON，做一次简单替换再试
        try:
            s2 = s.replace("'", '"')
            return json.loads(s2)
        except Exception:
            return None


def extract_info(movie_row):
    info = {
        'title': movie_row.get('title'),
        'overview': movie_row.get('overview'),
        'genres': movie_row.get('genres'),
        'poster_path': movie_row.get('poster_path'),
        'release_date': movie_row.get('release_date'),
        'tmdb_id': movie_row.get('id'),
        'imdb_id': movie_row.get('imdb_id'),
    }
    # 额外元信息直通（来自 CSV）
    passthrough_keys = [
        'vote_average', 'vote_count', 'status', 'revenue', 'runtime', 'adult', 'backdrop_path', 'budget',
        'homepage', 'original_language', 'original_title', 'tagline',
        'production_companies', 'production_countries', 'spoken_languages', 'keywords', 'popularity'
    ]
    for k in passthrough_keys:
        if k in movie_row:
            info[k] = movie_row.get(k)
    # 1) 尝试从 cast/actors/credits 中解析演员
    actors: list[str] = []
    raw_cast = movie_row.get('cast') or movie_row.get('actors') or movie_row.get('credits')
    parsed_cast = _parse_json_str(raw_cast)
    if isinstance(parsed_cast, list):
        # 形如 [{name: .., character: ..}, ...]
        names = []
        for x in parsed_cast:
            if isinstance(x, dict) and x.get('name'):
                names.append(str(x.get('name')).strip())
            elif isinstance(x, str) and x.strip():
                names.append(x.strip())
        actors = [n for n in names if n][:6]
    elif isinstance(raw_cast, str):
        # 逗号分隔字符串
        actors = [a.strip() for a in raw_cast.split(',') if a.strip()][:6]
    info['actors'] = actors

    # 2) 尝试从 director/crew/credits 中解析导演
    directors: list[str] = []
    raw_dir = movie_row.get('director')
    parsed_dir = _parse_json_str(raw_dir)
    if isinstance(parsed_dir, list):
        # 形如 ['A', 'B']
        directors = [str(d).strip() for d in parsed_dir if isinstance(d, (str, int))]
    elif isinstance(raw_dir, str) and raw_dir.strip():
        directors = [d.strip() for d in raw_dir.split(',') if d.strip()]

    # crew JSON 中提取 job==Director
    if not directors:
        raw_crew = movie_row.get('crew') or movie_row.get('credits')
        parsed_crew = _parse_json_str(raw_crew)
        if isinstance(parsed_crew, list):
            dirs = []
            for m in parsed_crew:
                if isinstance(m, dict):
                    job = str(m.get('job') or '').lower()
                    name = m.get('name')
                    if name and ('director' in job or job == 'dir'):
                        dirs.append(str(name).strip())
            directors = dirs or directors

    info['directors'] = directors

    # 3) 解析视频信息（优先标准 videos 列，其次常见简化列名）
    videos: list[dict] = []
    raw_videos = None
    for col in ('videos', 'video', 'trailers', 'videos_json'):
        if col in movie_row and isinstance(movie_row.get(col), str) and movie_row.get(col).strip():
            raw_videos = movie_row.get(col)
            break
    parsed_videos = _parse_json_str(raw_videos)
    if isinstance(parsed_videos, list):
        for v in parsed_videos:
            if not isinstance(v, dict):
                continue
            site = (v.get('site') or '').strip()
            key = (v.get('key') or '').strip()
            vtype = (v.get('type') or '').strip()
            name = (v.get('name') or '').strip()
            official = bool(v.get('official')) if 'official' in v else None
            if site and key:
                videos.append({
                    'site': site,
                    'key': key,
                    'type': vtype or None,
                    'name': name or None,
                    'official': official,
                })
    else:
        # 兼容只有 key/URL 的简单列
        yt_key = None
        for k in ('trailer_key', 'youtube_key', 'yt_key'):
            val = movie_row.get(k)
            if isinstance(val, str) and val.strip():
                yt_key = val.strip()
                break
        if yt_key:
            videos.append({'site': 'YouTube', 'key': yt_key, 'type': 'Trailer', 'name': 'Trailer', 'official': None})
        else:
            for k in ('trailer', 'youtube_url', 'yt_url'):
                val = movie_row.get(k)
                if isinstance(val, str) and 'youtube' in val.lower():
                    # 从 URL 提取 v=KEY 或最后一段路径
                    url = val.strip()
                    key = None
                    if 'v=' in url:
                        try:
                            key = url.split('v=', 1)[1].split('&', 1)[0]
                        except Exception:
                            key = None
                    if not key and '/embed/' in url:
                        key = url.split('/embed/', 1)[1].split('?', 1)[0]
                    if not key and '/' in url:
                        key = url.rstrip('/').split('/')[-1]
                    if key:
                        videos.append({'site': 'YouTube', 'key': key, 'type': 'Trailer', 'name': 'Trailer', 'official': None})
                    break

    if videos:
        info['videos'] = videos
    return info

def main():
    parser = argparse.ArgumentParser(description='Extract TMDB fields to local JSON files')
    parser.add_argument('--overwrite', action='store_true', help='Overwrite existing JSON files')
    parser.add_argument('--only', type=str, default=None, help='Only process a single tmdb_id')
    args = parser.parse_args()

    if args.only:
        needed_ids = {str(int(args.only))}
    else:
        links = pd.read_csv(LINKS_CSV)
        needed_ids = set(str(int(x)) for x in links['tmdbId'] if not pd.isna(x))

    processed = set()
    for chunk in pd.read_csv(TMDB_CSV, chunksize=10000, low_memory=False):
        if 'id' not in chunk.columns:
            continue
        chunk['id'] = chunk['id'].astype(str)
        # 仅选出我们关心的 id，提高效率
        sub = chunk[chunk['id'].isin(needed_ids)]
        for _, movie_row in sub.iterrows():
            tmdb_id = movie_row['id']
            out_path = Path(OUTPUT_DIR) / f'{tmdb_id}.json'
            if out_path.exists() and not args.overwrite and tmdb_id in processed:
                continue
            if out_path.exists() and not args.overwrite:
                processed.add(tmdb_id)
                continue
            info = extract_info(movie_row)
            with open(out_path, 'w', encoding='utf-8') as f:
                json.dump(info, f, ensure_ascii=False, indent=2)
            processed.add(tmdb_id)
        if len(processed) == len(needed_ids):
            break

if __name__ == '__main__':
    main()