//
//  File.swift
//  DemoTest
//
//  Created by 韦小新 on 2022/4/20.
//

//在视频捕捉的基础上创建一个链式的帧处理
import Vision
import UIKit
//通过组合事件来处理异步事件
import Combine
//用过滤器处理静态和视频图像的库
import CoreImage

//MARK: -视频链处理协议
protocol VideoProcessingChainDelegate: AnyObject {
    //当vision处理帧链时，通知VideoProcessingChainDelegate
    /// - Parameters:
    ///   - chain: A video-processing chain.
    ///   - poses: An array of poses.
    ///   - frame: A `CGImage` of the frame.
    func videoProcessingChain(_ chain:VideoProcessingChain, didDetect poses: [Pose]?, in frame: CGImage)
    
    /// Informs the delegate when a video frame chain predicts an action.
    /// - Parameters:
    ///   - chain: A video-processing chain.
    ///   - actionPrediction: An action prediction.
    ///   - duration: The span of time the prediction represents.
    func videoProcessingChain(_ chain:VideoProcessingChain, didPredict actionPrediction: ActionPrediction, for frames:  Int)
    
}

//MARK: -视频处理链
struct VideoProcessingChain {
    //接收姿势和动作预测
    weak var delegate: VideoProcessingChainDelegate?
    
    //提取姿势和预测
    var upstreamFramePublisher: AnyPublisher<Frame, Never>! {
        didSet { buildProcessingChain() }
    }
    /// A cancellation token for the active video-processing chain.
    ///
    /// To tear down the frame processing chain, call this property's `cancel()`
    /// method, or allow it to deinitialize.
    private var frameProcessingChain: AnyCancellable?
    
    /// A human hand pose request instance that finds poses in each video frame.
     ///
     /// The video-processing chain reuses this instance for all frames from any
     /// upstream publisher.
     /// - Tag: humanBodyPoseRequest
    private let humanHandPoseRequest = VNDetectHumanHandPoseRequest()
    
    /// The action classifier that recognizes exercise activities.
    private let actionClassifier = MyHandActionClassifierOne.shared
    
    /// The number of pose data instances the action classifier needs
    /// to make a prediction.
    /// - Tag: predictionWindowSize
    private let predictionWindowSize: Int

    /// The number of pose data instances the window advances after each
    /// prediction.
    ///
    /// Increase the stride's value to make predictions less frequently.
    /// - Tag: windowStride
    private let windowStride = 10
    /// A performance reporter that logs the number of predictions and frames
    /// that pass through the chain.
    ///
    /// The reporter prints the prediction and frame counts to the console
    /// every second.
    private var performanceReporter = PerformanceReporter()
    
    init() {
        predictionWindowSize = actionClassifier.calculatePredictionWindowSize()
    }
    
}

//MARK: -将这些链进行连接
extension VideoProcessingChain {
    private mutating func buildProcessingChain() {
        //承接这上一个点
        guard upstreamFramePublisher != nil else { return }
        
        frameProcessingChain = upstreamFramePublisher
        
        // Convert each frame to a CGImage, skipping any that don't convert.
        .compactMap(imageFromFrame)

        // ---- CGImage -- CGImage ----

        // Detect any human body poses (or lack of them) in the frame.
        .map(findPosesInFrame)

        // ---- [Pose]? -- [Pose]? ----

        // Isolate the pose with the largest area in the frame.
        .map(isolateLargestPose)

        // ---- Pose? -- Pose? ----

        // Publish the locations of the pose's landmarks as an
        // `MLMultiArray` to the next subscriber.
        .map(multiArryFrompose(_:))

        // ---- MLMultiArray? -- MLMultiArray? ----

        // Gather a window of multiarrays, starting with an empty window.
        .scan([MLMultiArray?](), gatherWindow)

        // ---- [MLMultiArray?] -- [MLMultiArray?] ----

        // Only publish a window when it grows to the correct size.
        .filter(gateWindow)

        // ---- [MLMultiArray?] -- [MLMultiArray?] ----

        // Make an activity prediction from the window.
        .map(predictActionWithWindow)

        // ---- ActionPrediction -- ActionPrediction ----

        // Send the action prediction to the delegate.
        .sink(receiveValue: sendPrediction)

        
    }
}

//MARK: -帧数图片转换
extension VideoProcessingChain {
    /// Converts a sample buffer into a core graphics image.
    /// - Parameter buffer: A sample buffer, typically from a video capture.
    /// - Returns: A `CGImage` if Core Image successfully converts the sample
    /// buffer; otherwise `nil`.
    /// - Tag: imageFromFrame
    private func imageFromFrame(_ buffer: Frame) -> CGImage? {
        // Inform the performance reporter to log the frame in its count.
        performanceReporter?.incrementFrameCount()

        guard let imageBuffer = buffer.imageBuffer else {
            print("The frame doesn't have an underlying image buffer.")
            return nil
        }

        // Create a Core Image context.
        let ciContext = CIContext(options: nil)

        // Create a Core Image image from the sample buffer.
        let ciImage = CIImage(cvPixelBuffer: imageBuffer)

        // Generate a Core Graphics image from the Core Image image.
        guard let cgImage = ciContext.createCGImage(ciImage,
                                                    from: ciImage.extent) else {
            print("Unable to create an image from a frame.")
            return nil
        }
        
        let new = cgImage.cropping(to: CGRect(x: 0, y: 0, width: cgImage.width , height: cgImage.height))
//        print(new?.width)
        return new
    }
    /// Locates human hand poses in an image.
    /// - Parameter frame: An image.
    /// - Returns: A `Pose` array if `VNDetectHumanBodyPoseRequest` succeeds
    /// and its `results` property isn't `nil`; otherwise `nil`.
    ///
    /// The method also sends the frame and any poses in it to the delegate.
    /// - Tag: findPosesInFrame
    private func findPosesInFrame(_ frame: CGImage) -> [Pose]? {
        //Create a request handler for the image.
        
        let visionRequestHandler = VNImageRequestHandler(cgImage: frame)
        //Use Vision to find human hand pose in the frame.
        do { try visionRequestHandler.perform([humanHandPoseRequest])} catch {
            assertionFailure("Human Pose Request failed: \(error)")
        }
        
        let pose = Pose.fromObservations(humanHandPoseRequest.results)
        
        //Send the frame and psoe, if any, to the delegate on the main queue.
        DispatchQueue.main.async {
            self.delegate?.videoProcessingChain(self, didDetect: pose, in: frame)
        }
        return pose
    }
    
    /// Returns the largest pose by area.
    /// - Parameter poses: A `Pose` array optional.
    /// - Returns: The largest`Pose` when the array isn't empty; otherwise `nil`.
    /// - Tag: isolateLargestPose
    /// 返回最大的手势姿势数
    private func isolateLargestPose(_ poses: [Pose]?) -> Pose? {
        return poses?.max(by:) { pose1, pose2 in pose1.area < pose2.area }
    }
    
    
    /// Returns a pose's multiarray.
    /// - Parameter item: A pose from a human hand-pose request.
    /// - Returns: The locations of the pose's landmarks in an `MLMultiArray`.
    /// - Tag: multiArrayFromPose
    private func multiArryFrompose(_ item: Pose?) -> MLMultiArray? {
        return item?.multiArray
    }
    
    /// - Parameters:
    ///   - previousWindow: The previous window state from the last invocation.
    ///   - multiArray: The newest multiarray.
    /// - Returns: An`MLMultiArray` array.
    /// Before the methods appends the most recent body pose multiarray
    /// to the window, it removes the oldest multiarray elements
    /// if the previous window's count is the target size.
    /// - Tag: gatherWindow
    private func gatherWindow(previousWindow: [MLMultiArray?],
                              multiArray: MLMultiArray?) -> [MLMultiArray?] {
        var currentWindow = previousWindow

        // If the previous window size is the target size, it
        // means sendWindowWhenReady() just published an array window.
        if previousWindow.count == predictionWindowSize {
            // Advance the sliding array window by stride elements.
            currentWindow.removeFirst(windowStride)
        }

        // Add the newest multiarray to the window.
        currentWindow.append(multiArray)

        // Publish the array window to the next subscriber.
        // The currentWindow becomes this method's next previousWindow when
        // it receives the next multiarray from the upstream publisher.
        return currentWindow
    }
    //检测是否包含动作分类器所需要的多元素组元素数
    private func gateWindow(_ currentWindow: [MLMultiArray?]) -> Bool {
        return currentWindow.count == predictionWindowSize
    }
    /// Makes a prediction from the multiarray window.
    /// - Parameter currentWindow: An`MLMultiArray?` array.
    /// - Returns: An `ActionPrediction`.
    /// - Tag: predictActionWithWindow
    private func predictActionWithWindow(_ currentWindow: [MLMultiArray?]) -> ActionPrediction {
        var poseCount = 0

        // Fill the nil elements with an empty pose array.
        let filledWindow: [MLMultiArray] = currentWindow.map { multiArray in
            if let multiArray = multiArray {
                poseCount += 1
                return multiArray
            } else {
                return Pose.emptyPoseMultiArray
            }
        }

        // Only use windows with at least 60% real data to make a prediction
        // with the action classifier.
        let minimum = predictionWindowSize * 60 / 100
        guard poseCount >= minimum else {
            return ActionPrediction.noPersonPrediction
        }

        // Merge the array window of multiarrays into one multiarray.
        let mergedWindow = MLMultiArray(concatenating: filledWindow,
                                        axis: 0,
                                        dataType: .float)

        // Make a genuine prediction with the action classifier.
        let prediction = actionClassifier.predictActionFromWindow(mergedWindow)

        // Return the model's prediction if the confidence is high enough.
        // Otherwise, return a "Low Confidence" prediction.
        return checkConfidence(prediction)
    }
    
    /// Sends an action prediction to the delegate on the main thread.
    /// - Parameter actionPrediction: The action classifier's prediction.
    /// - Tag: checkConfidence
    // 向主线程发送预测
    private func checkConfidence(_ actionPrediction: ActionPrediction) -> ActionPrediction {
        let minimumConfidence = 0.6
        let lowConfidence = actionPrediction.confidence < minimumConfidence
        return lowConfidence ? .lowConfidencePrediction : actionPrediction
    }
    //向代理发送操作预测
    private func sendPrediction(_ actionPrediction: ActionPrediction) {
        //将预测发送到代理的mainQueue上
        DispatchQueue.main.async {
            self.delegate?.videoProcessingChain(self, didPredict: actionPrediction, for: windowStride)
        }
        //记录预测数
        performanceReporter?.inceementPrediction()
    }
    
    
    
    
    
    
}

