import json
import util.llm as llm
from tqdm import tqdm
import concurrent.futures

# 读取保存的summary
def get_course_summary(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data


# 获取计算机视频id列表
def get_computer_ids(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data['video_ids']


# 读取视频名字和字幕
def gain_video_name_text(video_file_path):
    video_ids = []
    video_names = []
    video_texts = []
    c_ids = get_computer_ids('data/computer_ids.customization')
    with open(video_file_path, 'r', encoding='utf-8') as f:
        total_lines = sum(1 for _ in f)
        f.seek(0)
        for line in tqdm(f, total=total_lines, desc="提取视频信息"):
            video_file = json.loads(line)
            # 视频id
            video_id = video_file['id']
            if video_id not in c_ids:
                continue
            # 视频名字
            video_name = video_file['name']
            # 视频字幕
            try:
                video_text = ' '.join(video_file['text'])
            except KeyError:
                video_text = ''
            video_ids.append(video_id)
            video_names.append(video_name)
            video_texts.append(video_text)
    return video_ids, video_names, video_texts
    # sample_len = 10
    # return video_ids[0:sample_len], video_names[0:sample_len], video_texts[0:sample_len]


def get_all_summary(video_texts):
    # all_summary = []
    # for video_text in tqdm(video_texts, desc="获取summary"):
    #     all_summary.append(gptAI.get_responses(video_text))
    #     break
    # return all_summary
    with concurrent.futures.ThreadPoolExecutor() as executor:
        # 使用列表推导式并行获取结果
        all_summary = list(tqdm(executor.map(llm.get_responses, video_texts),
                                total=len(video_texts), desc="获取summary"))
    return all_summary


def save_all_summaries(save_path):
    ids, names, texts = gain_video_name_text('data/MOOCCube/entities/video.json')
    summaries = get_all_summary(texts)
    data = []
    for i in tqdm(range(len(ids)), desc="保存summary"):
        data.append({
            "id": ids[i],
            "name": names[i],
            "text": texts[i],
            "summary": summaries[i]
        })
    json_output = json.dumps(data, indent=4, ensure_ascii=False)
    with open(save_path, 'w', encoding='utf-8') as file:
        file.write(json_output)
