from my_py_toolkit.file.file_toolkit import *
import sys
import re
import argparse
from tqdm import tqdm
from my_py_toolkit.decorator.multi_thread_decorator import multi_thread
# from .utils import get_save_path
# from my_py_toolkit.data_handle.multi_modal.meta_data.utils import get_save_path
from my_py_toolkit.decorator.decorator import try_except

"""
生成 JourneyDB 的 meta_data.

原始数据存储格式：
+--imgs
| +--000
| | +--00027bed-4b41-4ae1-99b0-b9042352b231.jpg
| | +--00027bed-4b41-4ae1-99b0-b9042352b231.jpg.img_str
| | +--00031208-f088-4e79-b76a-ca8cde7eae4a.jpg
| | +--00049474-673a-46f9-95f7-66b7330992cd.jpg
| | +--0007d7ee-9b6d-4cf0-8a49-95cad4fd0570.jpg
| | +--...
| +--001
| | +--00011cca-5627-4656-966c-a0fd852d3e81.jpg
| | +--0008297d-9b3d-4632-91cd-8e8dd4e53246.jpg
| | +--000bcde1-3688-4049-ae1e-6876878eee20.jpg
| | +--000ea3b3-068a-4f35-9ff7-1a652b9f78b9.jpg
| | +--...
| +--002
| | +--0004b36e-a823-4904-8c32-94ebeb388267.jpg
| | +--000a65bf-fc55-4335-89cd-c708cfbed052.jpg
| | +--000be67b-64e3-444c-85d0-809e9e6fd296.jpg
| | +--0011cc63-80ff-404b-bacb-ee139bab9712.jpg

meta data:
'{"image": "path", "image_str": "img to tokens to strs", "text": "text desc", 'desc': '数据集原始携带的 .json 文件，是对数据的描述。'}'


使用示例：
python gpt4v.py /home/centos/ll/datasets/multi_modal/gpt4v --prefix /home/centos/ll/datasets/multi_modal
"""
def get_save_path(data_dir_or_path):
    """
    获取文件保存路径
    """
    suffix = '_meta_data.jsonl'
    if os.path.isfile(data_dir_or_path):
        path_s = split_path(data_dir_or_path)
        path_s[-2] = 'JourneyDB_' + path_s[-2]
        data_dir_or_path = '/'.join(path_s[:-1])

    save_path = data_dir_or_path + suffix
    return save_path

# ======================== 获取输入参数

parser = argparse.ArgumentParser(description='JourneyDB metadata')
parser.add_argument('data_dir_or_path', type=str, help='数据文件夹，或 filelist 文件')
parser.add_argument('--save_path', type=str, default=None, help='结果保存路径')
parser.add_argument('--prefix', type=str, default=None, help='文件路径前缀，在 metadata 保存数据中，需要把路径前缀删除，保存相对路径')
args = parser.parse_args()
if not args.save_path:
    args.save_path = get_save_path(args.data_dir_or_path)


def handle_img_path(file_path, prefix=None):
    if not prefix:
        return file_path
    
    file_path = file_path.replace(prefix, '').strip('\/')
    return file_path

def read_file_data(path, load_json=False):
    if not os.path.exists(path):
        return ''

    data = read_file(path)
    if load_json:
        data = json.loads(data)

    return data

def generate_metadata(files_or_filelist_path, save_path=None, prefix=None):
    if not save_path:
        save_path = get_save_path(files_or_filelist_path)
    writer = open(save_path, 'w', encoding='utf-8')

    files = []
    if isinstance(files_or_filelist_path, str) and os.path.isfile(files_or_filelist_path):
        files = read_line_by_line(files_or_filelist_path)
    else:
        files = files_or_filelist_path

    p_bar = tqdm()
    p_bar.set_description('gpt4v generate_metadata:')
    for file in files:
        text_path = file[:file.rfind('.')] + '.txt'
        img_str_path = file[:file.rfind('.')] + '.img_str'
        json_path = file[:file.rfind('.')] + '.json'
        cur = {
            'image': file,
            'image_str': img_str_path,
            'text': text_path,
            'desc': json_path
        }
        cur = json.dumps(cur, ensure_ascii=False)
        writer.write(cur + '\n')
        p_bar.update(1)

    p_bar.close()

    
@multi_thread(logger=None, res_path=args.save_path)
@try_except()
def generate_metadata_multi_thread(files_or_filelist_path, data_dir, prefix=None):
    result = []
    if isinstance(files_or_filelist_path, str) and os.path.isfile(files_or_filelist_path):
        datas = read_line_by_line(files_or_filelist_path)
    else:
        datas = files_or_filelist_path

    dir_name = split_path(data_dir)[-1]

    for data in datas:
        data = json.loads(data)
        img_str_path = os.path.join(data_dir, 'imgs', data.get('img_path')) + '.img_str'
        data['img_path'] = os.path.join(dir_name, 'imgs', data['img_path'].strip('./'))
        # print(f'img_str_path: {img_str_path}')
        data['image_str'] = read_file_data(img_str_path)
        data['text'] = data.pop('Task2').pop('Caption')
        data['QA'] = data.pop('Task3')
        data['Style'] = data.pop('Task1')
        cur = json.dumps(data, ensure_ascii=False)
        result.append(cur)

    if len(result) == 1:
        result = result[0]

    return result

if __name__ == '__main__':
    # files = get_file_paths(args.data_dir_or_path, ['jpg', 'jpeg', 'png', 'bmp'])
    data_dir = '/'.join(split_path(args.data_dir_or_path)[:-1])
    generate_metadata_multi_thread(args.data_dir_or_path, data_dir, prefix=args.prefix)


    



