//
//  SampleHandler.swift
//  Upload
//
//  Created by Wei Zhang on 2022/2/10.
//

import ReplayKit

class SampleHandler: RPBroadcastSampleHandler {
    
    var assetWriter : AVAssetWriter!
    var videoInput : AVAssetWriterInput!
    var audioInput : AVAssetWriterInput!
    var micAudioInput : AVAssetWriterInput!
    
    override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
        // User has requested to start the broadcast. Setup info from the UI extension can be supplied but optional.
        print("#######started")
        assetWriterAndVideoInputInitial()
    }
    
    override func broadcastPaused() {
        // User has requested to pause the broadcast. Samples will stop being delivered.
    }
    
    override func broadcastResumed() {
        // User has requested to resume the broadcast. Samples delivery will resume.
    }
    
    override func broadcastFinished() {
        // User has requested to finish the broadcast.
        //录屏结束
        videoInput.markAsFinished()
        audioInput.markAsFinished()
        micAudioInput.markAsFinished()
        assetWriter.finishWriting {
            self.videoInput = nil
            self.audioInput = nil
            self.micAudioInput = nil
            self.assetWriter = nil
        }
    }
    
    override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
        switch sampleBufferType {
        case RPSampleBufferType.video:
            // Handle video sample buffer
            print("video###:\(sampleBuffer)")
            if CMSampleBufferDataIsReady(sampleBuffer)
            {
                if assetWriter.status == AVAssetWriter.Status.unknown
                {
                    assetWriter.startWriting()
                    assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
                    print("startWriting")
                }
                if assetWriter.status == AVAssetWriter.Status.failed {
                    print("录屏写入失败 = \(assetWriter.status.rawValue), \(assetWriter.error!.localizedDescription) \(String(describing: assetWriter.error))")
                    return
                }
                if videoInput.isReadyForMoreMediaData
                {
                    videoInput.append(sampleBuffer)
                    print("拼接视频完成")
                }
            }
            
            break
        case RPSampleBufferType.audioApp:
            print("audio###:\(sampleBuffer)")
            // Handle audio sample buffer for app audio
            if CMSampleBufferDataIsReady(sampleBuffer)
            {
                if assetWriter.status == AVAssetWriter.Status.unknown
                {
                    assetWriter.startWriting()
                    assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
                    print("startWriting")
                }
                if assetWriter.status == AVAssetWriter.Status.failed {
                    print("录屏写入失败 = \(assetWriter.status.rawValue), \(assetWriter.error!.localizedDescription) \(String(describing: assetWriter.error))")
                    return
                }
                if audioInput.isReadyForMoreMediaData
                {
                    audioInput.append(sampleBuffer)
                    print("拼接音频完成")
                }
            }
            break
        case RPSampleBufferType.audioMic:
            print("audioMic###:\(sampleBuffer)")
            // Handle audio sample buffer for mic audio
            if CMSampleBufferDataIsReady(sampleBuffer)
            {
                if assetWriter.status == AVAssetWriter.Status.unknown
                {
                    assetWriter.startWriting()
                    assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
                    print("startWriting")
                }
                if assetWriter.status == AVAssetWriter.Status.failed {
                    print("录屏写入失败 = \(assetWriter.status.rawValue), \(assetWriter.error!.localizedDescription) \(String(describing: assetWriter.error))")
                    return
                }
                if micAudioInput.isReadyForMoreMediaData
                {
                    micAudioInput.append(sampleBuffer)
                    print("拼接麦克风完成")
                }
            }
            break
        @unknown default:
            // Handle other sample buffer types
            fatalError("Unknown type of sample buffer")
        }
    }
    
    func assetWriterAndVideoInputInitial() {
        let containerURL = FileManager.default.containerURL(forSecurityApplicationGroupIdentifier: "group.zw.app")
        let logsPath = containerURL!.appendingPathComponent("ShareGroup")
        do {
            try FileManager.default.createDirectory(at: logsPath, withIntermediateDirectories: true, attributes: nil)
        } catch let error as NSError {
            NSLog("Unable to create directory \(error.debugDescription)")
        }
        let date = Date()
        let formatter = DateFormatter.init()
        formatter.dateFormat = "yyyyMMddHHmmss"
        let name = formatter.string(from: date)
        let fileURL = URL(fileURLWithPath:"\(logsPath.path)/\(name).mp4")
        
        assetWriter = try! AVAssetWriter(outputURL: fileURL, fileType: AVFileType.mp4)
        
        
        let compressionProperties:[String:Any] = [AVVideoProfileLevelKey:AVVideoProfileLevelH264HighAutoLevel,
                                               AVVideoH264EntropyModeKey:AVVideoH264EntropyModeCABAC,
                                                AVVideoAverageBitRateKey:1920*1080*30*24,
                                           AVVideoMaxKeyFrameIntervalKey:30,
                                          AVVideoAllowFrameReorderingKey:false
        ]
        let videoSettings:[String:Any] = [AVVideoCompressionPropertiesKey:compressionProperties,
                                                          AVVideoCodecKey:AVVideoCodecType.h264,
                                          //宽度和高度
                                                         AVVideoWidthKey : UIScreen.main.bounds.size.width,
                                                        AVVideoHeightKey : UIScreen.main.bounds.size.height
        ]
        var channelLayout = AudioChannelLayout()
        memset(&channelLayout, 0, MemoryLayout<AudioChannelLayout>.size)
        channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
        /*CMSampleBuffer 0x102e0b840 retainCount: 4 allocator: 0x1dbf401b8
         invalid = NO
         dataReady = YES
         makeDataReadyCallback = 0x0
         makeDataReadyRefcon = 0x0
         formatDescription = <CMAudioFormatDescription 0x2831ee1c0 [0x1dbf401b8]> {
         mediaType:'soun'
         mediaSubType:'lpcm'
         mediaSpecific: {
             ASBD: {
                 mSampleRate: 44100.000000
                 mFormatID: 'lpcm'
                 mFormatFlags: 0xe
                 mBytesPerPacket: 4
                 mFramesPerPacket: 1
                 mBytesPerFrame: 4
                 mChannelsPerFrame: 2
                 mBitsPerChannel: 16     }
             cookie: {(null)}
             ACL: {(null)}
             FormatList Array: {
                 Index: 0
                 ChannelLayoutTag: 0x650002
                 ASBD: {
                 mSampleRate: 44100.000000
                 mFormatID: 'lpcm'
                 mFormatFlags: 0xe
                 mBytesPerPacket: 4
                 mFramesPerPacket: 1
                 mBytesPerFrame: 4
                 mChannelsPerFrame: 2
                 mBitsPerChannel: 16     }}
         } */
        let audioSettings:[String:Any] = [AVFormatIDKey : kAudioFormatLinearPCM,    // 音频格式
                                        AVSampleRateKey : 44100.0,    // 采样率
                                        AVNumberOfChannelsKey : 2,    // 通道数 1 || 2
                                        AVChannelLayoutKey : Data.init(bytes: &channelLayout, count: MemoryLayout<AudioChannelLayout>.size),  // 声音效果（立体声）
                                        AVLinearPCMBitDepthKey : 16,  // 音频的每个样点的位数
                                        AVLinearPCMIsNonInterleaved : false,  // 音频采样是否非交错
                                        AVLinearPCMIsFloatKey : false,    // 采样信号是否浮点数
                                        AVLinearPCMIsBigEndianKey : false // 音频采用高位优先的记录格式
        ]
        
        let micAudioSettings:[String:Any] = [AVFormatIDKey : kAudioFormatLinearPCM,    // 音频格式
                                           AVSampleRateKey : 48000.0,    // 采样率
                                        AVNumberOfChannelsKey : 2,    // 通道数 1 || 2
                                        AVChannelLayoutKey : Data.init(bytes: &channelLayout, count: MemoryLayout<AudioChannelLayout>.size),  // 声音效果（立体声）
                                        AVLinearPCMBitDepthKey : 16,  // 音频的每个样点的位数
                                        AVLinearPCMIsNonInterleaved : false,  // 音频采样是否非交错
                                        AVLinearPCMIsFloatKey : false,    // 采样信号是否浮点数
                                        AVLinearPCMIsBigEndianKey : false // 音频采用高位优先的记录格式
        ]
    
        
        videoInput  = AVAssetWriterInput (mediaType: AVMediaType.video, outputSettings: videoSettings)
        videoInput.expectsMediaDataInRealTime = true
        
        audioInput  = AVAssetWriterInput (mediaType: AVMediaType.audio, outputSettings: audioSettings)
        audioInput.expectsMediaDataInRealTime = true
        
        micAudioInput  = AVAssetWriterInput (mediaType: AVMediaType.audio, outputSettings: micAudioSettings)
        micAudioInput.expectsMediaDataInRealTime = true
        assetWriter.add(videoInput)
        assetWriter.add(audioInput)
        assetWriter.add(micAudioInput)
    }
    
}
