from moviepy.editor import VideoClip, AudioFileClip, ImageClip, concatenate_audioclips, concatenate_videoclips
import ffmpeg
from pydub import AudioSegment
from PIL import Image
import pytesseract


def get_media_duration(media_path):
    audio = AudioSegment.from_file(media_path)
    # 获取音频时长（毫秒）
    duration_ms = len(audio)
    # 转换为秒
    duration_sec = round(duration_ms / 1000)
    return duration_sec



def make_video_normal(output, img1, img2, audio):
    # 音频路径
    audio_path = audio
    output_path = output
    # 视频持续时间（秒）
    duration = get_media_duration(audio_path)

    print(f"duration:{duration}")

    # 创建一个空的视频剪辑
    clip1 = ImageClip(img1).set_duration(duration-10)  # 设置每张图片显示时间为3秒
    clip2 = ImageClip(img2).set_duration(10)  # 设置每张图片显示时间为3秒
    video_clip = concatenate_videoclips([clip1, clip2], method='compose')
    # 加载音频文件
    audio_clip = AudioFileClip(audio_path)
    # 设置音频到视频剪辑中
    video_clip_with_audio = video_clip.set_audio(audio_clip)

    # 导出视频
    video_clip_with_audio.write_videofile(output_path, codec="libx264", audio_codec="aac",fps=15)



def make_video_smart(output, img1, img2, audio, clip1_time):
    # 音频路径
    audio_path = audio
    output_path = output
    # 视频持续时间（秒）
    duration = get_media_duration(audio_path)

    print(f"duration:{duration}")

    # 创建一个空的视频剪辑
    clip1 = ImageClip(img1).set_duration(clip1_time)  # 设置每张图片显示时间为3秒
    clip2 = ImageClip(img2).set_duration(duration - clip1_time)  # 设置每张图片显示时间为3秒
    video_clip = concatenate_videoclips([clip1, clip2], method='compose')
    # 加载音频文件
    audio_clip = AudioFileClip(audio_path)
    # 设置音频到视频剪辑中
    video_clip_with_audio = video_clip.set_audio(audio_clip)

    # 导出视频
    video_clip_with_audio.write_videofile(output_path, codec="libx264", audio_codec="aac",fps=15)



def ocr_img_words(img):
    image = Image.open(img)
    text = pytesseract.image_to_string(image)
    return text



import speech_recognition as sr
from pydub import AudioSegment

from pydub.silence import split_on_silence

def parse_audio_text(audio_file):
    # 创建 Recognizer 实例
    recognizer = sr.Recognizer()

    # 加载音频文件（这里假设是wav格式，pydub支持多种格式转换）
    audio_file_path = audio_file
    audio_segment = AudioSegment.from_wav(audio_file_path)


    # 将音频分割成较小的片段（这里以1秒为间隔，可根据需要调整）
    segment_duration = 5000  # 1秒，单位是毫秒
    segments = [audio_segment[i:i + segment_duration] for i in range(0, len(audio_segment), segment_duration)]

    # 存储识别结果和对应的时间点
    recognition_results = []

    for i, segment in enumerate(segments):
        with sr.AudioFile(segment.export(format="wav")) as source:
            audio_data = recognizer.record(source)
            try:
                text = recognizer.recognize_google(audio_data)
                start_time = i * segment_duration
                end_time = (i + 1) * segment_duration
                recognition_results.append((text, start_time, end_time))
            except sr.UnknownValueError:
                continue
            except sr.RequestError as e:
                print("请求语音识别服务出错: {0}".format(e))

    return recognition_results

    # for result in recognition_results:
    #     print(f"文字: {result[0]}, 开始时间: {result[1]}ms, 结束时间: {result[2]}ms")