from my_py_toolkit.file.file_toolkit import *
import sys
import re
import argparse
from tqdm import tqdm
from my_py_toolkit.decorator.multi_thread_decorator import multi_thread
# from .utils import get_save_path
from my_py_toolkit.data_handle.multi_modal.meta_data.utils import get_save_path
from my_py_toolkit.decorator.decorator import try_except

"""
生成 gpt4v 的 meta_data.

原始数据存储格式：
000000000.jpg
000000000.jpg.img_str
000000000.json
000000000.txt
000000001.jpg
000000001.jpg.img_str
000000001.json
000000001.txt
000000002.jpg
000000002.jpg.img_str
000000002.json
000000002.txt
000000003.jpg

meta data:
'{"image": "path", "image_str": "img to tokens to strs", "text": "text desc", 'desc': '数据集原始携带的 .json 文件，是对数据的描述。'}'


使用示例：
python gpt4v.py /home/centos/ll/datasets/multi_modal/gpt4v --prefix /home/centos/ll/datasets/multi_modal
"""
# ======================== 获取输入参数

parser = argparse.ArgumentParser(description='gpt4v metadata')
parser.add_argument('data_dir_or_path', type=str, help='数据文件夹，或 filelist 文件')
parser.add_argument('--save_path', type=str, default=None, help='结果保存路径')
parser.add_argument('--prefix', type=str, default=None, help='文件路径前缀，在 metadata 保存数据中，需要把路径前缀删除，保存相对路径')
args = parser.parse_args()
if not args.save_path:
    args.save_path = get_save_path(args.data_dir_or_path)


def handle_img_path(file_path, prefix=None):
    if not prefix:
        return file_path
    
    file_path = file_path.replace(prefix, '').strip('\/')
    return file_path

def read_file_data(path, load_json=False):
    if not os.path.exists(path):
        return ''

    data = read_file(path)
    if load_json:
        data = json.loads(data)

    return data
def generate_metadata(files_or_filelist_path, save_path=None, prefix=None):
    if not save_path:
        save_path = get_save_path(files_or_filelist_path)
    writer = open(save_path, 'w', encoding='utf-8')

    files = []
    if isinstance(files_or_filelist_path, str) and os.path.isfile(files_or_filelist_path):
        files = read_line_by_line(files_or_filelist_path)
    else:
        files = files_or_filelist_path

    p_bar = tqdm()
    p_bar.set_description('gpt4v generate_metadata:')
    for file in files:
        text_path = file[:file.rfind('.')] + '.txt'
        img_str_path = file[:file.rfind('.')] + '.img_str'
        json_path = file[:file.rfind('.')] + '.json'
        cur = {
            'image': file,
            'image_str': img_str_path,
            'text': text_path,
            'desc': json_path
        }
        cur = json.dumps(cur, ensure_ascii=False)
        writer.write(cur + '\n')
        p_bar.update(1)

    p_bar.close()

    
@multi_thread(logger=None, res_path=args.save_path)
@try_except()
def generate_metadata_multi_thread(files_or_filelist_path, prefix=None):
    result = []

    if isinstance(files_or_filelist_path, str) and os.path.isfile(files_or_filelist_path):
        files = read_line_by_line(files_or_filelist_path)
    else:
        files = files_or_filelist_path

    for file in files:
        img_path = handle_img_path(file, prefix)
        text_path = file[:file.rfind('.')] + '.txt'
        img_str_path = file + '.img_str'
        json_path = file[:file.rfind('.')] + '.json'

        cur = {
            'image': img_path,
            'image_str': read_file_data(img_str_path),
            'text': read_file_data(text_path),
            'desc': read_file_data(json_path, True)
        }
        cur = json.dumps(cur, ensure_ascii=False)
        result.append(cur)

    if len(result) == 1:
        result = result[0]

    return result

if __name__ == '__main__':
    files = get_file_paths(args.data_dir_or_path, ['jpg', 'jpeg', 'png', 'bmp'])
    generate_metadata_multi_thread(files, prefix=args.prefix)


    



