# coding=utf-8
# @Time   : 2023/11/26
# @Author : wangjl
# @Email  : 1975039138@qq.com
import json
import os
import math

import numpy as np
import pandas as pd


def read_json(data_path, default_value=None):
    if not os.path.exists(data_path):
        os.makedirs(os.path.dirname(data_path), exist_ok=True)
        datas = {} if not default_value else default_value
    else:
        with open(data_path, "r") as fp:
            print(f"## read json from {data_path}...")
            datas = json.load(fp)
    return datas


def write_json(data_path, data):
    parent_dir = os.path.dirname(data_path)
    if not os.path.exists(parent_dir):
        os.makedirs(parent_dir, exist_ok=True)

    with open(data_path, "w") as fp:
        print(f"## write file to {data_path}...")
        json.dump(data, fp, ensure_ascii=False)


def get_pd_data(file_paths, limit=None, rm_unnamed=False, ext=".csv", verbose=False):
    assert ext in [".csv", ".xlsx"]
    read_fn = pd.read_csv if ext == ".csv" else pd.read_excel

    df_data = read_fn(file_paths[0])
    columns = df_data.columns
    res_df = pd.DataFrame(columns=columns)

    for file_path in file_paths:
        pd_data = read_fn(file_path)
        if limit and limit > 0:
            pd_data = pd_data.iloc[:limit]
        res_df = pd.concat([res_df, pd_data], ignore_index=True)

    if rm_unnamed:
        res_df = res_df.loc[:, ~res_df.columns.str.contains("^Unnamed")]
    if verbose:
        print(f"## Total read {len(res_df)} rows.")
    return res_df


def json_to_txt(json_path, txt_path, process_fn):
    json_data = read_json(data_path=json_path)
    texts = [process_fn(x) for x in json_data]
    with open(txt_path, "w") as fp:
        print(f"## write text lines to {txt_path}")
        for txt_line in texts:
            fp.write(txt_line)


def split_data(datas, expected_batch=1, shuffle=False):
    data_copy = datas.copy()
    if expected_batch > 1:
        indices = list(range(len(data_copy)))
        if shuffle:
            np.random.shuffle(indices)
        data_copy = [data_copy[i] for i in indices]

        batch_size = math.ceil(len(indices) / expected_batch)
        for batch_idx in range(expected_batch):
            yield data_copy[batch_idx * batch_size: (batch_idx + 1) * batch_size]
    else:
        return data_copy
