import time

from jdk.huoshan import tts_http
from datetime import datetime
import random
from moviepy.editor import *
from moviepy.editor import AudioFileClip
from config.configBusiness import ai_make_config
from PIL import Image, ImageSequence, ImageDraw
import numpy as np
from pydub import AudioSegment
import subprocess
from common import left_bubble, right_bubble
from https.UploadPaths import upload_file


def text_to_amio_4(templ_parm, text_json):
    writing_text = text_json.get("writing", "")
    emotion = text_json.get("emotion", "")

    # 匹配 emotion 是否包含在 EmotionCategory 的 JSON 键中
    matched_value = None
    for key, value in templ_parm["EmotionCategory"].items():
        if key in emotion:
            matched_value = value
            break

    # 如果找到匹配的值，使用它；否则使用 EmotionCategory 第一个键对应的值
    emotion_category_value = matched_value if matched_value is not None else next(
        iter(templ_parm["EmotionCategory"].values()), None)

    # 用于存储结果的列表
    result_list = []

    # 使用逗号分割字符串
    texts_list = writing_text.split('，')
    # text_json["writing_text_music"] = tts_http.task_process(writing_text, templ_parm['VoiceType'],emotion_category_value, templ_parm['tts_speed_ratio'])
    for index, item in enumerate(texts_list):
        is_last_item = index == len(texts_list) - 1
        # 调用 tts_http_demo.task_process 函数
        response = tts_http.task_process(item, templ_parm['VoiceType'], emotion_category_value,
                                         templ_parm['tts_speed_ratio'], is_last_item)
        # 将对象添加到结果列表中
        result_list.append(response)

    # 将结果列表存入 text_temp 属性中
    text_json["text_temp"] = result_list
    return text_json


def gifs_nubmer(list_gif, text_parm):
    # 获取 text_temp
    text_temp = text_parm.get("text_temp", [])
    # 初始化结果列表
    selected_gifs = []
    # 过滤出包含指定 emotion 的 gif 对象列表

    text_parm_emotions = text_parm.get("emotion", "").split(',')
    # 过滤出包含指定 emotion 的 gif 对象列表
    filtered_gifs = [music for music in list_gif if
                     any(emotion in music["emotion"] for emotion in text_parm_emotions)]
    # 遍历 text_temp
    for text_info in text_temp:
        # 随机选择一个不重复的满足条件的 gif 对象
        if filtered_gifs:
            selected_gif = random.choice(filtered_gifs)
            selected_gifs.append(selected_gif)
            # 从 filtered_gifs 中移除已选择的对象，确保不重复
            filtered_gifs.remove(selected_gif)
    # 如果 selected_gifs 的数量不足，从 list_gif 中随机取补足
    while len(selected_gifs) < len(text_parm['text_temp']):
        if list_gif:
            selected_gif = random.choice(list_gif)
            selected_gifs.append(selected_gif)
        else:
            # 如果 list_gif 也用尽，可以考虑重新筛选或者其他处理方式
            break

    return selected_gifs


def audio_compositing(text_temp, upload_folder):
    audio_clips = []
    fade_duration = 0.5  # 设置淡入淡出的时长
    for text_info in text_temp:
        audio_clip = AudioFileClip(text_info['file_path'])
        audio_clips.append(audio_clip)
    # 合并所有音频片段
    final_audio = concatenate_audioclips(audio_clips)
    # 导出最终音频文件
    current_time = datetime.now().strftime("%Y%m%d%H%M%S")
    path = f"{upload_folder}/{current_time}.mp3"
    path2 = f"./{path}"
    final_audio.write_audiofile(path2, codec='mp3')
    return path


def gifs_nubmer_16(list_gif, text_parm, gif_number):
    # 初始化结果列表
    selected_gifs = []
    text_parm_emotions = text_parm.get("emotion", "").split(',')
    # 过滤出包含指定 emotion 的 gif 对象列表
    filtered_gifs = [music for music in list_gif if
                     any(emotion in music["emotion"] for emotion in text_parm_emotions)]

    # 如果 filtered_gifs 有值，则随机选择一个放入结果列表
    if filtered_gifs:
        selected_gif = random.choice(filtered_gifs)
        selected_gifs.append(selected_gif)
    else:
        # 如果 filtered_gifs 为空，则从 list_gif 中随机选择一个放入结果列表
        selected_gif = random.choice(list_gif)
        selected_gifs.append(selected_gif)
        # 如果需要多个 gif，继续从 list_gif 中随机选择直到达到 gif_number

    while len(selected_gifs) < gif_number:
        selected_gif = random.choice(list_gif)
        selected_gifs.append(selected_gif)
    return selected_gifs


def get_png_16(list_png, text_parm):
    # 过滤出包含指定 emotion 的 png 对象列表
    text_parm_emotions = text_parm.get("emotion", "").split(',')
    filtered_png = [music for music in list_png if
                    any(emotion in music["emotion"] for emotion in text_parm_emotions)]

    # 如果 filtered_png 不为空，随机返回其中一个对象
    if filtered_png:
        return random.choice(filtered_png)
    else:
        # 如果 filtered_png 为空，随机返回 list_png 中的一个对象
        return random.choice(list_png)


def get_background_music(list_music, text_parm):
    # 过滤出包含指定 emotion 的音乐对象列表
    text_parm_emotions = text_parm.get("emotion", "").split(',')
    filtered_musics = [music for music in list_music if
                       any(emotion in music["emotion"] for emotion in text_parm_emotions)]

    if filtered_musics:
        # 如果 filtered_musics 有值，随机选择一个音乐对象
        selected_music = random.choice(filtered_musics)
    elif list_music:
        # 如果 filtered_musics 为空，但 list_music 不为空，随机选择一个音乐对象
        selected_music = random.choice(list_music)
    else:
        # 如果 filtered_musics 和 list_music 都为空，返回 None 或其他处理方式
        selected_music = None

    return selected_music


def get_background(id):
    path = f"static/templateSource/{id}/background.png"
    return path


def get_background_54(text_parm, templ_parm, upload_folder, random_png, list_avatar, list_typeface):
    # 背景图路径
    background_path = "static/templateSource/54/wx_背景图3.png"
    # 随机选择一个男性和一个女性对象
    random_man_avatar = random.choice([a for a in list_avatar if a['sex'] == '男'])
    random_girl_avatar = random.choice([a for a in list_avatar if a['sex'] == '女'])
    selected_avatars = [random_man_avatar, random_girl_avatar]
    # 打乱列表中的对象
    random.shuffle(selected_avatars)
    head_path1 = upload_file(selected_avatars[0]['headimg'], upload_folder)
    head_path2 = upload_file(selected_avatars[1]['headimg'], upload_folder)

    time_font_size = 40

    # 获取当前时间
    current_time = datetime.now()
    # 获取小时和分钟
    hour = str(current_time.hour).zfill(2)
    minute = str(current_time.minute).zfill(2)
    time = f"{hour}:{minute}"
    font_ttf_session = upload_file(get_http_font(list_typeface, templ_parm['font_ttf_session_id'])['src'],
                                   upload_folder)
    # 使用 MoviePy 的 TextClip 创建文本视频片段
    # 时间日期
    text_clip = TextClip(time,
                         fontsize=time_font_size,
                         color="gray",
                         align='center',
                         stroke_width=None,
                         stroke_color=None,
                         font=font_ttf_session  # 指定中文字体
                         )
    # 微信名称
    # name = repCli.get_open_ai_wx_text_54("")
    name = selected_avatars[0]['nickname']
    font_ttf_name = upload_file(get_http_font(list_typeface, templ_parm['font_ttf_name_id'])['src'],
                                upload_folder)
    text_clip1 = TextClip(name,
                          fontsize=52,
                          color="#000000",
                          align='center',
                          stroke_width=None,
                          stroke_color=None,
                          font=font_ttf_name  # 指定中文字体
                          )
    # 使用 MoviePy 的 ImageClip 加载背景图
    pil_image = Image.open(background_path)
    resized_image = pil_image.resize((ai_make_config.scales_video_54['w'], ai_make_config.scales_video_54['h']),
                                     resample=Image.BILINEAR)
    background = ImageClip(np.array(resized_image)).set_duration(templ_parm["time_len"])
    # 计算文本位置使其居中
    text_width, text_height = text_clip.size
    image_width, image_height = background.size
    text_position = ((image_width - text_width) // 2, (image_height - text_height) // 2)
    # 微信名称
    text_width1, text_height1 = text_clip1.size
    text1_position = ((image_width - text_width1) // 2, (image_height * 0.068))

    # 计算向上移动的偏移量（百分之35）
    offset_percentage = 0.37
    vertical_offset = int(image_height * offset_percentage)

    man_head_clip_left = get_rounded_image(head_path1, templ_parm, upload_folder)
    girl_head_clip_right = get_rounded_image(head_path2, templ_parm, upload_folder)
    girl_w, girl_h = girl_head_clip_right.size
    man_w, man_h = man_head_clip_left.size
    # 气泡 到 头像的 间隙
    qp_tx_jx = 30
    # 背景图边框到头像的间隙
    bjt_tx_jx = 20
    girl_x = image_width - girl_w - qp_tx_jx
    man_x = qp_tx_jx
    texts = text_parm.get('writing', '').split('&')
    girl_bubble_1_clip, girl_bubble_w_1, girl_bubble_h_1 = right_bubble.main(texts[0], templ_parm, font_ttf_session)
    girl_bubble_x_1 = girl_x - girl_bubble_w_1 - bjt_tx_jx

    man_bubble_1_clip, man_bubble_w_1, man_bubble_h_1 = left_bubble.main(texts[1], templ_parm, font_ttf_session)

    girl_bubble_2_clip, girl_bubble_w_2, girl_bubble_h_2 = right_bubble.main(texts[2], templ_parm, font_ttf_session)
    girl_bubble_x_2 = girl_x - girl_bubble_w_2 - bjt_tx_jx
    # 初始高度
    cs_y = image_height * 0.16
    cs_y1 = cs_y + girl_bubble_h_1 + qp_tx_jx

    gif_w_1 = girl_x - templ_parm['bottom_gif_max'] - bjt_tx_jx
    gif_bottom = []
    # 计算中部第一个gif位置
    gif_bottom.append(((gif_w_1, cs_y1)))

    cs_y2 = cs_y1 + qp_tx_jx + templ_parm['bottom_gif_max']
    gif_bottom.append(((man_x + man_w + bjt_tx_jx, cs_y2)))
    cs_y3 = cs_y2 + qp_tx_jx + templ_parm['bottom_gif_max']
    cs_y4 = cs_y3 + man_bubble_h_1 + qp_tx_jx
    gif_bottom.append(((gif_w_1, cs_y4)))
    cs_y5 = cs_y4 + qp_tx_jx + templ_parm['bottom_gif_max']

    # 贴背景图
    random_png_image = Image.open(random_png)
    y_position = (0.1 * image_height)
    png_y = int(image_height - (0.2 * image_height))
    png_y = 2015
    resized_png = random_png_image.resize((image_width, png_y),
                                          resample=Image.BILINEAR)
    random_png_clip = ImageClip(np.array(resized_png), duration=1)

    result_clip = CompositeVideoClip(
        [background,
         random_png_clip.set_position((0, y_position)),
         text_clip.set_position((text_position[0], text_position[1] - vertical_offset)),
         text_clip1.set_position(text1_position),
         # 第一行
         girl_bubble_1_clip.set_position((girl_bubble_x_1, cs_y)),
         girl_head_clip_right.set_position((girl_x, cs_y)),
         # 第二行
         girl_head_clip_right.set_position((girl_x, cs_y1)),
         # 第三行
         man_head_clip_left.set_position((man_x, cs_y2)),
         # 第四行
         man_head_clip_left.set_position((man_x, cs_y3)),
         man_bubble_1_clip.set_position((man_x + man_w + bjt_tx_jx, cs_y3)),
         # 第五行
         girl_head_clip_right.set_position((girl_x, cs_y4)),
         # 第六行
         girl_head_clip_right.set_position((girl_x, cs_y5)),
         girl_bubble_2_clip.set_position((girl_bubble_x_2, cs_y5)),

         ])

    # 将文本图像片段覆盖到背景图上
    # 保存新图像
    current_time_str = datetime.now().strftime("%Y%m%d%H%M%S")
    file_path = os.path.join(upload_folder, current_time_str + "_output.png")
    result_clip.save_frame(file_path, t=0)  # 保存第一帧作为 PNG 图像
    return file_path, gif_bottom, selected_avatars


def get_rounded_image(image_path, templ_parm, upload_folder):
    # 打开图像
    image = Image.open(image_path)
    # 调整图像大小
    image = image.resize((templ_parm["head"]['wh'], templ_parm["head"]['wh']), Image.BILINEAR)
    # 创建一个新的图像用于输出
    result = Image.new("L", (templ_parm["head"]['wh'], templ_parm["head"]['wh']), 0)
    # 创建一个用于绘制的对象
    draw = ImageDraw.Draw(result)
    # 绘制一个白色的圆角矩形
    draw.rounded_rectangle([0, 0, templ_parm["head"]['wh'], templ_parm["head"]['wh']],
                           radius=templ_parm["head"]['radius'], fill=255)
    # 将原始图像和蒙版图像组合在一起
    image.putalpha(result)
    # 将PIL图像转换为ImageClip
    img_clip = ImageClip(np.array(image), duration=templ_parm["time_len"])
    return img_clip


def gif_to_clip_3(gif_path):
    gif = Image.open(gif_path)
    frames = []
    for frame in ImageSequence.Iterator(gif):
        frame = frame.convert('RGBA')
        frame = frame.resize((1080, 1080))
        frames.append(np.array(frame))
    clip = ImageSequenceClip(frames, fps=25)
    # 设置视频剪辑的位置为屏幕左右和上下居中
    return clip


def create_text(text_params, templ_parm, font):
    text_clips = []

    for text_param in text_params:
        text_content = text_param.get('text', '')  # 获取文本内容
        font_size = templ_parm['fontSize']  # 设置默认字体大小
        color = 'white'  # 设置默认文本颜色为白色
        align = 'center'  # 设置默认文本对齐方式为居中
        stroke_width = None  # 设置默认描边宽度
        stroke_color = None  # 设置默认描边颜色
        display_time = text_param.get('duration', 1.0)  # 获取文本显示时间，默认为1秒
        max_chars_per_line = templ_parm['max_chars_per_line']
        lines = [text_content[i:i + max_chars_per_line] for i in range(0, len(text_content), max_chars_per_line)]
        formatted_text = '\n'.join(lines)
        try:
            # 创建文本剪辑
            text_clip = TextClip(formatted_text,
                                 fontsize=font_size,
                                 color=color,
                                 font=font,
                                 align=align,
                                 stroke_width=stroke_width,
                                 stroke_color=stroke_color
                                 ).set_duration(display_time)
            # 获取文本剪辑的行高和行数
            line_height = text_clip.h / len(lines)
            line_count = len(lines)
            # 计算字体的总高度
            total_text_height = line_height * line_count
            text_param['total_text_height'] = total_text_height
            text_clips.append(text_clip)


        except Exception as e:
            print(f"An error occurred: {e}")
    print("create_text function executed successfully")
    return text_clips


def create_text_clips_16(text, templ_parm, font):
    text_clips = []
    split_texts = text.split('，')
    time_len = (templ_parm['time_len'] - 0.5) / len(split_texts)
    positions_top = templ_parm['positions_top']
    positions_left = templ_parm['positions_left']
    positions_right = templ_parm['positions_right']

    offset = 0  # 初始化起始时间偏移量
    used_positions = set()  # 用来存储已经使用过的位置
    for i, txt in enumerate(split_texts):
        txt = wrap_text(txt, 8)  # 超过6个字换行
        font_size = templ_parm['fontSize']  # 设置默认字体大小
        color = templ_parm['color']
        align = 'center'  # 设置默认文本对齐方式为居中
        stroke_width = 1  # 设置默认描边宽度为1
        stroke_color = "black"  # 设置默认描边颜色为黑色
        display_time = time_len  # 获取文本显示时间

        # 创建文本剪辑
        text_clip = TextClip(txt,
                             fontsize=font_size,
                             color=color,
                             font=font,
                             align=align,
                             stroke_width=stroke_width,
                             stroke_color=stroke_color
                             ).set_start(offset)
        text_width, text_height = text_clip.size

        # 更新offset以便下一个文本片段正确出现
        offset += display_time
        # 第一位使用positions_top，之后交替使用positions_left和positions_right
        if i == 0:
            position = positions_top[0]
        else:
            available_positions = positions_left if i % 2 == 1 else positions_right
            position = random.choice(available_positions)
            if position in used_positions:
                continue  # 如果选中的位置已被使用，则跳过（或者选择另一种处理方式）
            used_positions.add(position)
            # 对左右位置进行随机偏移
            if available_positions == positions_left:
                left_x = 0
                if len(txt) > 6:
                    left_x = 100
                position = (position[0] - random.randint(0, 50) + random.randint(0, 50) - left_x, position[1])
            else:  # positions_right
                position = (position[0] + random.randint(0, 100), position[1])
        position = (position[0], position[1] - random.randint(0, 50) + random.randint(0, 50))
        if position[0] + text_width > 1080:
            position = (1080 - text_width, position[1])  # 调整x坐标以避免超出边界
        text_clips.append(text_clip.set_position(position))

    return text_clips


def get_txt_to_clip_47(texts, templ_parm, font):
    text_clips = []

    for text in texts:
        font_size = templ_parm['fontSize']  # 设置默认字体大小
        color = '#000000'  # 设置默认文本颜色为白色
        align = 'center'  # 设置默认文本对齐方式为居中
        stroke_width = None  # 设置默认描边宽度
        stroke_color = None  # 设置默认描边颜色
        # 创建文本剪辑
        text_clip = TextClip(text,
                             fontsize=font_size,
                             color=color,
                             font=font,
                             align=align,
                             stroke_width=stroke_width,
                             stroke_color=stroke_color
                             )
        text_clips.append(text_clip)

    return text_clips


def get_background_path_to_clip(w, h, video_time, png):
    pil_image = Image.open(png)
    resized_image = pil_image.resize((w, h), resample=Image.BILINEAR)
    background_clip = ImageClip(np.array(resized_image)).set_duration(video_time)
    return background_clip


def create_bg2(video_time, png):
    background = Image.open(png)
    scale_factor = max(1080 / background.width, 1920 / background.height)
    resized_background = background.resize(
        (int(background.width * scale_factor), int(background.height * scale_factor)),
        resample=Image.BILINEAR
    )
    cropped_background = resized_background.crop(
        (
            (resized_background.width - 1080) // 2,
            (resized_background.height - 1920) // 2,
            (resized_background.width + 1080) // 2,
            (resized_background.height + 1920) // 2
        )
    )

    final_clip = ImageClip(np.array(cropped_background))
    final_clip = final_clip.set_duration(video_time)
    return final_clip


def create_bg_16(video_time, png):
    # 打开PNG图片
    pil_image = Image.open(png)
    # 获取视频的宽度和高度
    w = ai_make_config.scales_video['w']
    h = ai_make_config.scales_video['h']
    # 调整图片大小
    resized_image = pil_image.resize((w, h), resample=Image.BILINEAR)
    # 创建一个绘图对象
    draw = ImageDraw.Draw(resized_image)
    # 计算上下各20%区域的高度
    top_bottom_height = h * 0.3
    # 在上方和下方区域填充黑色
    draw.rectangle([0, 0, w, top_bottom_height], fill="black")  # 顶部区域
    draw.rectangle([0, h - top_bottom_height, w, h], fill="black")  # 底部区域
    # 使用MoviePy创建视频背景
    background = ImageClip(np.array(resized_image)).set_duration(video_time)
    return background


def gif_to_clip_2(gif_path, templ_parm):
    gif = Image.open(gif_path)
    frames = []
    for frame in ImageSequence.Iterator(gif):
        frame = frame.convert('RGBA')
        frame = frame.resize((int(1080 // templ_parm["w_pro"]), int(1080 // templ_parm["h_pro"])))
        frames.append(np.array(frame))
    clip = ImageSequenceClip(frames, fps=25)
    return clip


def gif_to_clip_51_4(gif_path, image_layout):
    gif = Image.open(gif_path)
    frames = []
    for frame in ImageSequence.Iterator(gif):
        frame = frame.convert('RGBA')
        frame = frame.resize(image_layout['dimension'])
        frames.append(np.array(frame))
    clip = ImageSequenceClip(frames, fps=25)
    return clip


def gif_to_clip_54(gif_paths, templ_parm, gif_bottom):
    gif_clips = []
    position = [
        gif_bottom[0],
        gif_bottom[1],
        gif_bottom[2],
    ]
    for idx, gif_path in enumerate(gif_paths):
        gif = Image.open(gif_path)
        frames = []

        for frame in ImageSequence.Iterator(gif):
            frame = frame.convert('RGBA')
            # 根据范围设置不同的 templ_parm 参数值
            frame = frame.resize((templ_parm['bottom_gif_max'], templ_parm['bottom_gif_max']))
            frames.append(np.array(frame))
        clip = ImageSequenceClip(frames, fps=25)
        gif_duration = clip.duration
        loop_time = templ_parm["time_len"] // gif_duration + 1
        loop_time = int(loop_time)
        clip = concatenate_videoclips([clip] * loop_time)
        gif_clips.append(clip)
    return gif_clips, position


def gif_to_clip_16(gif_path, templ_parm):
    gif = Image.open(gif_path)
    frames = []
    for frame in ImageSequence.Iterator(gif):
        frame = frame.convert('RGBA')
        frame = frame.resize((templ_parm["w_h"], templ_parm["w_h"]))
        frames.append(np.array(frame))
    clip = ImageSequenceClip(frames, fps=25)
    return clip


def gif_to_clip_47(gif_paths, templ_parm):
    gif_clips = []
    # 遍历每个GIF文件并提取第一帧
    for gif_path in gif_paths:
        # 打开GIF文件
        gif_image = Image.open(gif_path)
        # 提取第一帧
        frame = ImageSequence.Iterator(gif_image)[0]
        frame = frame.convert("RGBA").resize((templ_parm["w_h"], templ_parm["w_h"]))
        # 将PIL Image对象转换为numpy数组
        np_frame = np.array(frame)
        # 确保numpy数组是RGBA格式，moviepy可以正确处理
        if np_frame.shape[2] == 4:
            # 创建ImageClip对象，这里可以设置持续时间
            clip = ImageClip(np_frame, duration=templ_parm["duration"])
            gif_clips.append(clip)
    return gif_clips


def text_to_scene(list_text, scene):
    filtered_list = [item for item in list_text if scene in item.get('scene', '')]
    filtered_list1 = [item for item in filtered_list if item.get('writing', '').count('&') >= 2]

    return filtered_list1


def text_to_amio_16(text_json):
    writing_text = text_json.get("writing", "")
    texts_list = writing_text.split('，')
    text_json["texts_list"] = texts_list
    return text_json


def music_yanchang(path, total_duration, upload_folder):
    """
     处理短音乐问题
    """
    # 打开音频文件
    audio = AudioSegment.from_file(path)

    # 获取当前音频的时长
    current_duration = len(audio) / 1000  # 转换为秒

    # 判断音频时长是否大于目标时长
    if current_duration >= total_duration:
        return path
    else:
        # 计算需要重复的次数
        repeat_cont = int(total_duration / current_duration) + 1
        # 重复音频片段并拼接
        audio_clips = []
        for _ in range(repeat_cont):
            audio_clip = AudioFileClip(path)
            audio_clips.append(audio_clip)
            # 合并所有音频片段
        final_audio = concatenate_audioclips(audio_clips)

        # 保存拼接后的音频文件
        filename = os.path.basename(path)
        current_time = datetime.now().strftime("%Y%m%d%H%M%S")
        file_path = os.path.join(upload_folder, current_time + "_" + filename)
        final_audio.write_audiofile(file_path, codec='mp3')

        return file_path


def convert_mp3_to_aac(mp3_file, upload_folder):
    current_time = datetime.now().strftime("%Y%m%d%H%M%S")
    aac_file_path = os.path.join(upload_folder, current_time + "_.aac")

    # Use ffmpeg to perform the conversion
    command = [
        'ffmpeg',
        '-y',
        '-i', mp3_file,
        '-c:a', 'aac',
        '-strict', 'experimental',
        aac_file_path
    ]

    subprocess.run(command, check=True)
    return aac_file_path


def wrap_text(txt, max_length):
    """
    将文本换行，每max_length个字符添加一个换行符，尽量保持单词完整性。
    这里简单按照字符数分割，没有考虑单词边界。
    """
    wrapped_text = ''
    for i in range(0, len(txt), max_length):
        wrapped_text += txt[i:i + max_length] + '\n'
    return wrapped_text.strip()


def create_background(w, h):
    background_color = (255, 255, 255)  # 白色 RGB 值
    image = Image.new('RGB', (w, h), background_color)
    image.save('background.png')


def get_http_font(list_font, id):
    return next((item for item in list_font if item.get('id') == id), None)


if __name__ == '__main__':
    create_background(1080, 1920)
