//
//  SSDPipeline.swift
//  SitPosition
//
//  Created by apple on 2019/2/19.
//  Copyright © 2019 apple. All rights reserved.
//

import Foundation
import CoreML
import Vision

/**
 * EXIF方向信息
 */
fileprivate enum EXIFOrientation : Int32 {
    case topLeft = 1
    case topRight
    case bottomRight
    case bottomLeft
    case leftTop
    case rightTop
    case rightBottom
    case leftBottom
    
    var isReflect:Bool {
        switch self {
        case .topLeft,.bottomRight,.rightTop,.leftBottom: return false
        default: return true
        }
    }
}

class SSPipeline {
    
    // MARK: - Properties
    
    // 没有检测到人时的处理回调
    public var humanChangedHandler: (_ hasNobody: Bool) -> Void = { _ in }
    
    // CoreML检测模型
    fileprivate let semaphore = DispatchSemaphore(value: 1)
    fileprivate let ssdPostProcessor = SSDPostProcessor(numAnchors: 1917, numClasses: 90)
    fileprivate var visionModel: VNCoreMLModel?
    
    // MARK: - Object lifecycle
    
    init() {
        setupVision()
    }
    
    // 运行CoreML模型处理一帧图像
    // @sampleBuffer：摄像头采集的一帧数据
    // @completion：完成回调
    //      @image：抠出来的人像
    public func runCoreML(_ sampleBuffer: CMSampleBuffer, completion: @escaping (_ image: UIImage?) -> Void) {
        guard let classNames = self.ssdPostProcessor.classNames,
            let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
            let visionModel = self.visionModel else {
            completion(nil)
            return
        }
        var requestOptions:[VNImageOption : Any] = [:]
        if let cameraIntrinsicData = CMGetAttachment(sampleBuffer,
                                                     key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix,
                                                     attachmentModeOut: nil) {
            requestOptions = [.cameraIntrinsics : cameraIntrinsicData]
        }
        let imgSize: CGSize = CVImageBufferGetEncodedSize(pixelBuffer)
        let orientation = CGImagePropertyOrientation(rawValue: UInt32(EXIFOrientation.rightTop.rawValue))
        let trackingRequest = VNCoreMLRequest(model: visionModel) { (request, error) in
            defer { self.semaphore.signal() }
            
            // 获取分类结果
            guard let predictions = self.processClassifications(for: request, error: error) else {
                self.humanChangedHandler(true)
                completion(nil)
                return
            }
            // 过滤出“person”分类数据
            if let prediction: Prediction = predictions.first(where :{ classNames[$0.detectedClass] == "person" }) {
                // 找出目标物体的位置区域
                let rect = prediction.finalPrediction
                    .toCGRect(imgWidth: Double(imgSize.width),
                              imgHeight: Double(imgSize.width),
                              xOffset: 0,
                              yOffset: Double(imgSize.height - imgSize.width)/2)
                
                // 抠出目标物体
                let image = sampleBuffer.stillImage(cropRect: rect)
                self.humanChangedHandler(false)
                completion(image)
            } else {
                self.humanChangedHandler(true)
                completion(nil)
            }
        }
        trackingRequest.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop
        
        self.semaphore.wait()
        do {
            let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
                                                            orientation: orientation!,
                                                            options: requestOptions)
            try imageRequestHandler.perform([trackingRequest])
        } catch {
            dbgPrint(error)
            completion(nil)
            self.semaphore.signal()
        }
    }
}


// MARK: - Private Methods

extension SSPipeline {
    
    // 设置Vision组件
    func setupVision() {
        guard let visionModel = try? VNCoreMLModel(for: ssd_mobilenet_feature_extractor().model)
            else { fatalError("Can't load VisionML model") }
        self.visionModel = visionModel
    }
    
    // 分类处理
    func processClassifications(for request: VNRequest, error: Error?) -> [Prediction]? {
        guard let results = request.results as? [VNCoreMLFeatureValueObservation] else {
            return nil
        }
        guard results.count == 2 else {
            return nil
        }
        guard let boxPredictions = results[1].featureValue.multiArrayValue,
            let classPredictions = results[0].featureValue.multiArrayValue else {
                return nil
        }
        let predictions = self.ssdPostProcessor.postprocess(boxPredictions: boxPredictions, classPredictions: classPredictions)
        return predictions
    }
}
