import json
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional

import pandas as pd

# dataset config
url_metadata_dict = {
    "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
    "enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
}
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_feature, exist_ok=True)
for s in sides:
    os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
# processor config
n_pool = int(os.getenv("N_POOL", 8))
wget_max_retry = os.getenv("MAX_RETRY", "1")
wget_timeout = os.getenv("TIMEOUT", "20")
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))


def wget(url: str, output_file: Optional[str] = None):
    os.makedirs(os.path.dirname(output_file), exist_ok=True)
    subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
    if not os.path.exists(output_file):
        return False
    if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
        if output_file.endswith('.tar'):
            tar = tarfile.open(output_file)
        else:
            tar = tarfile.open(output_file, "r:gz")
        tar.extractall(os.path.dirname(output_file))
        tar.close()
        os.remove(output_file)
    elif output_file.endswith('.gz'):
        with gzip.open(output_file, 'rb') as f:
            with open(output_file.replace('.gz', ''), 'wb') as f_write:
                f_write.write(f.read())
        os.remove(output_file)
    elif output_file.endswith('.zip'):
        with zipfile.ZipFile(output_file, 'r') as zip_ref:
            zip_ref.extractall()
        os.remove(output_file)
    return True


def get_metadata():
    url_metadata = url_metadata_dict[direction]
    meta_data_filename = os.path.basename(url_metadata)
    meta_data_path = p_join("download", "meta", meta_data_filename)
    if not os.path.exists(meta_data_path.replace(".gz", "")):
        assert wget(url_metadata, output_file=meta_data_path)
    df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
    df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
    df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
    if direction == "enA-jpn":
        df = df[df["side"] == "enA"]
    assert len(df["direction"].unique()) == 1
    df.pop("direction")
    return df.sort_values(by=["line_no", "side"])


def to_json_serializable(val):
    if "float" in str(type(val)):
        return float(val)
    if "int" in str(type(val)):
        return int(val)
    return str(val)


def get_audio(dataframe: pd.DataFrame):
    features = {"line_no": int(dataframe.pop('line_no').values[0])}
    for side, df in dataframe.groupby("side"):
        df.pop("side")
        features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
        identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
        features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
        if not os.path.exists(features[f"{side}.path"]):
            flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
            if not flag:
                return False
    with open(p_join(cache_dir_feature, f'{features["line_no"]}.json'), "w") as f:
        json.dump(features, f)
    return True


def process_dataset():
    df_metadata = get_metadata()
    print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
    inputs = [
        g for line_no, g in df_metadata.groupby("line_no")
        if line_no_start <= line_no < line_no_end and not os.path.exists(
            p_join(cache_dir_feature, f'{int(line_no)}.json')
        )
    ]
    print(f"filtered unique lines: {len(inputs)}")
    if direction == "enA-jaA":
        inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
        print(f"removed side != 2: {len(inputs)}")
    if n_pool == 1:
        for g in tqdm(inputs, total=len(inputs)):
            flag = get_audio(g)
            if not flag:
                print(f"failed:\n{g['url']}")
    else:
        with Pool(n_pool) as pool:
            pool.map(get_audio, tqdm(inputs, total=len(inputs)))


if __name__ == '__main__':
    process_dataset()