import json
import random


def timechat_format_reformat_online_instruction_no_time(input_fname, output_fname, video_folder='shot2story'):
    input_example_list = json.load(open(input_fname, 'r'))
    output_example_list = []
    for input_example in input_example_list:
        human_contents = ['<image>'] + [turn['content'] for turn in input_example['conversation'] if turn['role'] == 'user']
        gpt_contents = [turn['content'] for turn in input_example['conversation'] if turn['role'] == 'assistant']
        output_example = {'video': video_folder + '/' + input_example['video_uid'], 'conversations': [
            {'from': 'human', 'value': '\n'.join(human_contents)},
            {'from': 'gpt', 'value': '\n\n'.join(gpt_contents)},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def timechat_format_reformat_online_instruction_with_time(input_fname, output_fname, video_folder='shot2story'):
    input_example_list = json.load(open(input_fname, 'r'))
    output_example_list = []
    for input_example in input_example_list:
        human_contents = ['<image>'] + [turn['content'] for turn in input_example['conversation'] if turn['role'] == 'user']
        gpt_contents = [f'{turn["time"]:.1f} seconds, {turn["content"]}' for turn in input_example['conversation'] if turn['role'] == 'assistant']
        output_example = {'video': video_folder + '/' + input_example['video_uid'], 'conversations': [
            {'from': 'human', 'value': '\n'.join(human_contents)},
            {'from': 'gpt', 'value': '\n\n'.join(gpt_contents)},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def timechat_format_reformat_online_narration(input_fname, output_fname, video_folder='shot2story'):
    instructions = [
        "Please concisely narrate the video in real time.",
        "Help me to illustrate my view in short.",
        "Please simply describe what do you see.",
        "Continuously answer what you observed with simple text.",
        "Do concise real-time narration.",
        "Hey assistant, do you know the current video content? Reply me concisely.",
        "Simply interpret the scene for me.",
        "What can you tell me about? Be concise.",
        "Use simple text to explain what is shown in front of me.",
        "What is the action now? Please response in short.",
    ]
    input_example_dict = json.load(open(input_fname, 'r'))
    output_example_list = []
    for video_uid, input_example in input_example_dict.items():
        human_contents = f'<image>\n{random.choice(instructions)}'
        gpt_contents = [f'{turn["time"]:.1f} seconds, {turn["text"]}' for turn in input_example['0']]
        gpt_contents = [c if c.endswith('.') else c + '.' for c in gpt_contents]
        output_example = {'video': f'{video_folder}/{video_uid}', 'conversations': [
            {'from': 'human', 'value': human_contents},
            {'from': 'gpt', 'value': '\n\n'.join(gpt_contents)},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def timechat_format_reformat_online_grounding(input_fname, output_fname, video_folder='queryd'):
    grounding_templates = [
        "What segment of the video addresses the topic '%s'?",
        "At what timestamp can I find information about '%s' in the video?",
        "Can you highlight the section of the video that pertains to '%s'?",
        "Which moments in the video discuss '%s' in detail?",
        "Identify the parts that mention '%s'.",
        "Where in the video is '%s' demonstrated or explained?",
        "What parts are relevant to the concept of '%s'?",
        "Which clips in the video relate to the query '%s'?",
        "Can you point out the video segments that cover '%s'?",
        "What are the key timestamps in the video for the topic '%s'?"
    ]

    input_example_list = json.load(open(input_fname, 'r'))
    output_example_list = []
    for input_example in input_example_list:
        human_contents = f'<image>\n{random.choice(grounding_templates) % input_example["query"]}'
        gpt_contents = f'The given query happens in {input_example["timestamps"][0][0]:.1f} - {input_example["timestamps"][0][1]:.1f} seconds.'
        output_example = {'video': f'{video_folder}/{input_example["video_uid"]}', 'conversations': [
            {'from': 'human', 'value': human_contents},
            {'from': 'gpt', 'value': gpt_contents},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def vtimellm_format_reformat_online_instruction_with_time(input_fname, output_fname, metadata_fname, video_folder='shot2story'):
    input_example_list = json.load(open(input_fname, 'r'))
    video_metadata = json.load(open(metadata_fname, 'r'))
    output_example_list = []
    for input_example in input_example_list:
        if input_example['video_uid'] not in video_metadata:
            continue
        duration_secs = video_metadata[input_example['video_uid']]['duration']
        if duration_secs == 0:
            continue
        human_contents = ['<image>'] + [turn['content'] for turn in input_example['conversation'] if turn['role'] == 'user']
        gpt_contents = list()
        for turn in input_example['conversation']:
            if turn['role'] == 'assistant':
                start_time, end_time = int(turn['timespan'][0] / duration_secs * 100), int(turn['timespan'][1] / duration_secs * 100)
                start_time, end_time = max(0, min(99, start_time)), max(0, min(99, end_time))
                gpt_contents.append(f'From {start_time:02d} to {end_time:02d}, {turn["content"]}')
        output_example = {'video': video_folder + '/' + input_example['video_uid'], 'conversations': [
            {'from': 'human', 'value': '\n'.join(human_contents)},
            {'from': 'gpt', 'value': '\n\n'.join(gpt_contents)},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def vtimellm_format_reformat_online_narration(input_fname, output_fname, metadata_fname, video_folder='shot2story'):
    instructions = [
        "Please concisely narrate the video in real time.",
        "Help me to illustrate my view in short.",
        "Please simply describe what do you see.",
        "Continuously answer what you observed with simple text.",
        "Do concise real-time narration.",
        "Hey assistant, do you know the current video content? Reply me concisely.",
        "Simply interpret the scene for me.",
        "What can you tell me about? Be concise.",
        "Use simple text to explain what is shown in front of me.",
        "What is the action now? Please response in short.",
    ]
    input_example_dict = json.load(open(input_fname, 'r'))
    video_metadata = json.load(open(metadata_fname, 'r'))
    output_example_list = []
    for video_uid, input_example in input_example_dict.items():
        if video_uid not in video_metadata:
            continue
        duration_secs = video_metadata[video_uid]['duration']
        if duration_secs == 0:
            continue
        human_contents = f'<image>\n{random.choice(instructions)}'
        gpt_contents = list()
        start_sec = 0
        for turn in input_example['0']:
            end_sec = turn['time']
            start_time, end_time = int(start_sec / duration_secs * 100), int(end_sec / duration_secs * 100)
            start_time, end_time = max(0, min(99, start_time)), max(0, min(99, end_time))
            gpt_contents.append(f'From {start_time:02d} to {end_time:02d}, {turn["text"]}')
            start_sec = end_sec
        gpt_contents = [c if c.endswith('.') else c + '.' for c in gpt_contents]
        output_example = {'video': f'{video_folder}/{video_uid}', 'conversations': [
            {'from': 'human', 'value': human_contents},
            {'from': 'gpt', 'value': '\n\n'.join(gpt_contents)},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


def vtimellm_format_reformat_online_grounding(input_fname, output_fname, metadata_fname, video_folder='queryd'):
    grounding_templates = [
        "What segment of the video addresses the topic '%s'?",
        "At what timestamp can I find information about '%s' in the video?",
        "Can you highlight the section of the video that pertains to '%s'?",
        "Which moments in the video discuss '%s' in detail?",
        "Identify the parts that mention '%s'.",
        "Where in the video is '%s' demonstrated or explained?",
        "What parts are relevant to the concept of '%s'?",
        "Which clips in the video relate to the query '%s'?",
        "Can you point out the video segments that cover '%s'?",
        "What are the key timestamps in the video for the topic '%s'?"
    ]

    input_example_list = json.load(open(input_fname, 'r'))
    video_metadata = json.load(open(metadata_fname, 'r'))
    output_example_list = []
    for input_example in input_example_list:
        if input_example['video_uid'] not in video_metadata:
            continue
        duration_secs = video_metadata[input_example['video_uid']]['duration']
        if duration_secs == 0:
            continue
        human_contents = f'<image>\n{random.choice(grounding_templates) % input_example["query"]}'
        start_time, end_time = int(input_example['timestamps'][0][0] / duration_secs * 100), int(input_example['timestamps'][0][1] / duration_secs * 100)
        start_time, end_time = max(0, min(99, start_time)), max(0, min(99, end_time))
        gpt_contents = f'From {start_time:02d} to {end_time:02d}.'
        output_example = {'video': f'{video_folder}/{input_example["video_uid"]}', 'conversations': [
            {'from': 'human', 'value': human_contents},
            {'from': 'gpt', 'value': gpt_contents},
        ]}
        output_example_list.append(output_example)
    print(f'writing {len(output_example_list)} examples to {output_fname}')
    json.dump(output_example_list, open(output_fname, 'w'), indent=4)


if __name__ == '__main__':
    # timechat_format_reformat_online_instruction_with_time(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/livechat_train-multiturn-0.25_earlier.json',
    #     './onevision_data/annotations/timechat_format/shot2story_multiturn_train.json'
    # )

    # timechat_format_reformat_online_instruction_with_time(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/livechat_train-singleturn-no_general_questions-0.25_earlier.json',
    #     './onevision_data/annotations/timechat_format/shot2story_singleturn_train.json'
    # )

    # timechat_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/didemo/annotations/train.json',
    #     './onevision_data/annotations/timechat_format/didemo_train.json', 'didemo'
    # )

    # timechat_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/queryd/annotations/train.json',
    #     './onevision_data/annotations/timechat_format/queryd_train.json', 'queryd'
    # )

    # timechat_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/hirest_grounding/annotations/train.json',
    #     './onevision_data/annotations/timechat_format/hirest_grounding_train.json', 'hirest'
    # )

    # timechat_format_reformat_online_narration(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/vitt/annotations/train.json',
    #     './onevision_data/annotations/timechat_format/vitt_train.json', 'vitt'
    # )

    # timechat_format_reformat_online_narration(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/coin/annotations/train.json',
    #     './onevision_data/annotations/timechat_format/coin_train.json', 'coin'
    # )

    # timechat_format_reformat_online_narration(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/narration_stream_train-human_anno.json',
    #     './onevision_data/annotations/timechat_format/shot2story_human_anno_train.json', 'shot2story'
    # )

    vtimellm_format_reformat_online_narration(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/narration_stream_train-human_anno.json',
        './onevision_data/annotations/vtimellm_format/shot2story_human_anno_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/videos_2fps_max384_metadata.json',
        'shot2story'
    )

    vtimellm_format_reformat_online_instruction_with_time(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/livechat_train-multiturn-0.25_earlier.json',
        './onevision_data/annotations/vtimellm_format/shot2story_multiturn_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/videos_2fps_max384_metadata.json',
        'shot2story'
    )

    vtimellm_format_reformat_online_instruction_with_time(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/annotations/livechat_train-singleturn-no_general_questions-0.25_earlier.json',
        './onevision_data/annotations/vtimellm_format/shot2story_singleturn_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/shot2story/videos_2fps_max384_metadata.json',
        'shot2story'
    )


    vtimellm_format_reformat_online_narration(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/vitt/annotations/train.json',
        './onevision_data/annotations/vtimellm_format/vitt_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/vitt/videos_metadata.json',
        'vitt'
    )

    vtimellm_format_reformat_online_narration(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/coin/annotations/train.json',
        './onevision_data/annotations/vtimellm_format/coin_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/coin/videos_metadata.json',
        'coin'
    )

    vtimellm_format_reformat_online_narration(
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/hirest_step/annotations/train.json',
        './onevision_data/annotations/vtimellm_format/hirest_step_train.json',
        '/share2/wangyq/projects/online_videollm/videollm-online/datasets/hirest_step/videos_metadata.json',
        'coin'
    )

    # vtimellm_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/didemo/annotations/train.json',
    #     './onevision_data/annotations/vtimellm_format/didemo_train.json',
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/didemo/videos_2.0fps_max384_metadata.json',
    #     'didemo'
    # )

    # vtimellm_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/queryd/annotations/train.json',
    #     './onevision_data/annotations/vtimellm_format/queryd_train.json',
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/queryd/videos_metadata.json',
    #     'queryd'
    # )

    # vtimellm_format_reformat_online_grounding(
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/hirest_grounding/annotations/train.json',
    #     './onevision_data/annotations/vtimellm_format/hirest_grounding_train.json',
    #     '/share2/wangyq/projects/online_videollm/videollm-online/datasets/hirest_grounding/videos_metadata.json',
    #     'hirest'
    # )