//
//  CMPipeline.swift
//  SitPosition
//
//  Created by apple on 2019/2/9.
//  Copyright © 2019 apple. All rights reserved.
//

import Foundation
import CoreML
import Vision


class CMPipeline {
    
    // MARK: - Properties
    
    // 没有检测到人时的处理回调
    public var humanChangedHandler: (_ hasNobody: Bool) -> Void = { _ in }
    // 姿态检测结果回调
    public var poseChangedHandler: (_ pose: PoseType, _ image: UIImage) -> Void = { _, _ in }
    
    #if ENABLE_RENDER_DEBUG
    // 用于调试算法
    public var renderView: UIView? = nil
    private lazy var renderLayer = CALayer()
    #endif
    
    // 接收的图像Size
    private let imageSize: CGSize
    
    // CoreML模型
    private let modelCoreML = MobileOpenPose()
    // CoreML分类请求
    lazy var classificationRequest: [VNRequest] = {
        do {
            // Load the Custom Vision model.
            // To add a new model, drag it to the Xcode project browser making sure that the "Target Membership" is checked.
            // Then update the following line with the name of your new model.
            let model = try VNCoreMLModel(for: modelCoreML.model)
            let classificationRequest = VNCoreMLRequest(model: model, completionHandler: _handleClassification)
            return [ classificationRequest ]
        } catch {
            fatalError("Can't load Vision ML model: \(error)")
        }
    }()
    // 同步信号
    let semaphore = DispatchSemaphore(value: 1)
    // 当前分析的图片
    private var currentImage: UIImage!
    
    // MARK: - Object lifecycle
    
    init(size: CGSize) {
        self.imageSize = size
    }
    
    // 运行CoreML模型处理一帧图像
    public func runCoreML(_ image: UIImage) {
        guard let cgImage = image.resize(to: imageSize).cgImage else {
            return
        }
        semaphore.wait()
        currentImage = image
        do {
            let classifierRequestHandler = VNImageRequestHandler(cgImage: cgImage, options: [:])
            try classifierRequestHandler.perform(classificationRequest)
        } catch {
            dbgPrint(error)
            semaphore.signal()
        }
    }
    
    // 分类完成回调
    private func _handleClassification(request: VNRequest, error: Error?) {
        defer {
            // 开始处理下一张图片
            semaphore.signal()
        }
        guard let observations = request.results as? [VNCoreMLFeatureValueObservation],
            let observation = observations.first else {
            fatalError()
        }
        let mlarray = observation.featureValue.multiArrayValue!
        let length = mlarray.count
        let doublePtr =  mlarray.dataPointer.bindMemory(to: Double.self, capacity: length)
        let doubleBuffer = UnsafeBufferPointer(start: doublePtr, count: length)
        let mm = Array(doubleBuffer)
        
        #if ENABLE_RENDER_DEBUG
        let image = _buildLineImage(mm)
        DispatchQueue.main.async { [unowned self] in
            if self.renderLayer.superlayer == nil {
                self.renderLayer.frame = self.renderView?.bounds ??
                    CGRect(x: 0, y: 0, width: self.imageSize.width, height: self.imageSize.height)
                self.renderLayer.opacity = 0.6
                self.renderLayer.masksToBounds = true
                self.renderView?.layer.addSublayer(self.renderLayer)
            }
            self.renderLayer.contents = image?.cgImage
        }
        #endif
        let estimator = HumanEstimator(Int(imageSize.width), Int(imageSize.height))
        let humans: [Human] = estimator.estimate(mm)
        guard let human: Human = humans.first else {
            humanChangedHandler(true)
            return
        }
        humanChangedHandler(false)
        
        // 姿态检测器
        var detctor: PoseDector = PoseDector()
        let poseType: PoseType = detctor.detect(by: human)/*.reflection*/
        guard !poseType.needIgnoreIt else {
            return
        }
        poseChangedHandler(poseType, currentImage)
    }
    
    func _buildLineImage(_ mm: Array<Double>) -> UIImage? {
        let com = HumanEstimator(Int(imageSize.width), Int(imageSize.height))
        let humans = com.estimate(mm)
        
        var keypoint = [Int32]()
        var pos = [CGPoint]()
        for human in humans {
            var centers = [Int: CGPoint]()
            for i in 0...CocoPart.Background.rawValue {
                if human.bodyParts.keys.index(of: i) == nil {
                    continue
                }
                let bodyPart = human.bodyParts[i]!
                centers[i] = CGPoint(x: bodyPart.x, y: bodyPart.y)
            }
            
            for (pairOrder, (pair1,pair2)) in CocoPairsRender.enumerated() {
                
                if human.bodyParts.keys.index(of: pair1) == nil || human.bodyParts.keys.index(of: pair2) == nil {
                    continue
                }
                if centers.index(forKey: pair1) != nil && centers.index(forKey: pair2) != nil{
                    keypoint.append(Int32(pairOrder))
                    pos.append(centers[pair1]!)
                    pos.append(centers[pair2]!)
                }
            }
        }
        let opencv = OpenCVWrapper()
        let bounds = CGRect(x: 0, y: 0, width: imageSize.width, height: imageSize.height)
        let uiImage = opencv.renderKeyPoint(bounds, keypoint: &keypoint, keypoint_size: Int32(keypoint.count), pos: &pos)
        return uiImage
    }
}

