# coding=utf8
import wave
import threading
from os import remove, listdir, makedirs
from os.path import exists, splitext, basename, join
from datetime import datetime
from time import sleep
from shutil import rmtree
import pyaudio
from PIL import ImageGrab
from numpy import array
import cv2
from moviepy.editor import *

CHUNK_SIZE = 1024
CHANNELS = 2
FORMAT = pyaudio.paInt16
RATE = 48000
allowRecording = True
event = threading.Event()

def record_audio(audio_file_name):
    p = pyaudio.PyAudio()
    event.wait()
    sleep(3)  # 等待三秒，摄像头启动好
    #     创建输入流
    stream = p.open(
        format=FORMAT,
        channels=CHANNELS,
        rate=RATE,
        input=True,
        frames_per_buffer=CHUNK_SIZE
    )
    wf = wave.open(audio_file_name, "wb")
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    while allowRecording:
        data = stream.read(CHUNK_SIZE)
        wf.writeframes(data)
    wf.close()
    stream.stop_stream()
    stream.close()
    p.terminate()

def record_screen(screen_video_file_name, ):
    #     录制屏幕,等待摄像头启动好,然后大家一起等待3秒开始录制
    event.wait()
    sleep(3)
    im = ImageGrab.grab()
    video = cv2.VideoWriter(screen_video_file_name,
                            cv2.VideoWriter_fourcc(*'XVID'),
                            25, im.size)
    while allowRecording:
        im = ImageGrab.grab()
        im = cv2.cvtColor(array(im), cv2.COLOR_RGB2BGR)
        video.write(im)
    video.release()

def record_webcam(webcam_dideo_file_name, ):
    # 参数零表示笔记本自带摄像头
    cap = cv2.VideoCapture(0)
    # 启动摄像头，发出通知，然后大家一起等待3秒开始录制
    event.set()
    sleep(3)
    avi_file = cv2.VideoWriter(webcam_dideo_file_name,
                               cv2.VideoWriter_fourcc(*'MJPG'),
                               25, (648, 480))
    while allowRecording and cap.isOpened():
        # 捕获当前图像ret=true表示成功，false表示失败
        ret, frame = cap.read()
        if ret:
            avi_file.write(frame)
    avi_file.release()
    cap.release()

def start_record():
    now = str(datetime.now())[:19].replace(":", "_")
    audio_file_name = f'tmp/{now}.mp3'
    webcam_video_file_name = f'tmp/webcam{now}.avi'
    screen_video_file_name = f'tmp/screen{now}.avi'
    video_file_name = f'tmp/{now}.avi'
    # 创建多个线程，分别录制
    t1 = threading.Thread(target=record_audio, args=[audio_file_name])
    t2 = threading.Thread(target=record_webcam, args=[webcam_video_file_name])
    t3 = threading.Thread(target=record_screen, args=[screen_video_file_name])
    event.clear()
    for t in (t1, t2, t3):
        t.start()
    event.wait()
    print("3秒后开始录制，按q键结束录制。")
    while True:
        if input() == "q":
            print("结束录制视频合成中···")
            break
    global allowRecording
    allowRecording = False
    for t in (t1, t2, t3):
        t.join()
    try:
        # 合并以上素材为视频文件
        audio = AudioFileClip(audio_file_name)
        video1 = VideoFileClip(screen_video_file_name)
        ratio1 = audio.duration / video1.duration
        video1 = (video1.fl_time(lambda video_time: video_time / ratio1, apply_to=["video"]).set_end(audio.duration))
        video2 = VideoFileClip(screen_video_file_name)     # 这里由于公司电脑没有摄像头，会报错。临时改为使用录屏文件
        ratio2 = audio.duration / video2.duration
        video2 = (video2.fl_time(lambda video_time: video_time / ratio2, apply_to=["video"])
                  .set_end(audio.duration)
                  .resize((320, 240))
                  .set_position(('right', 'bottom')))
        video = CompositeVideoClip([video1, video2]).set_audio(audio)
        video.write_videofile(video_file_name, codec='libx264', fps=25)
        remove(audio_file_name)
        remove(screen_video_file_name)
        remove(webcam_video_file_name)
    except Exception as e:
        print(e)

if __name__ == '__main__':
    start_record()
