//
//  XZQRCode.swift
//  XZQRCode_Swift
//
//  Created by MYKJ on 17/1/6.
//  Copyright © 2017年 zhaoyongjie. All rights reserved.
//

import UIKit
import AVFoundation

class XZQRCode: NSObject, AVCaptureMetadataOutputObjectsDelegate,AVCaptureVideoDataOutputSampleBufferDelegate {
    weak var currentView: UIView?
    /// corner line width
    var lineWidth: CGFloat
    /// corner stroke color
    var strokeColor: UIColor
    /// the max count for detection
    var maxDetectedCount: Int
    /// current count for detection
    var currentDetectedCount: Int = 0
    /// auto remove sub layers when detection completed
    var autoRemoveSubLayers: Bool
    /// completion call back
    var completedCallBack: ((UIImage) -> Void)?
    /// the scan rect, default is the bounds of the scan view, can modify it if need
    var scanFrame: CGRect = CGRect.zero
    //
    var usingFrontCamera: Bool = true
    //
    var codeObject: AVMetadataFaceObject?
    ///  init function
    ///  - returns: the scanner object
    var scanAearSize: CGSize!
    var scanAearTopMargin: CGFloat!
    var cropRect: CGRect!
    /// previewLayer
    lazy var previewLayer: AVCaptureVideoPreviewLayer = {
        let layer = AVCaptureVideoPreviewLayer(session: self.session)
        layer.videoGravity = AVLayerVideoGravity.resizeAspectFill//不能改，和有效识别区域metadataOutput.rectOfInterest有关系
        
        return layer
    }()
    
    /// drawLayer
    lazy var drawLayer = CALayer()
    /// session
    lazy var session = AVCaptureSession()
    var captureDevice: AVCaptureDevice!
    /// input
    lazy var videoInput: AVCaptureDeviceInput? = {
        if let device = self.getCamera(.front){
            self.captureDevice = device
            return try? AVCaptureDeviceInput(device: device)
        }
        return nil
    }()
    
    lazy var photoOutput = AVCapturePhotoOutput()
    /// output
    lazy var dataOutput: AVCaptureMetadataOutput = {
        //创建输出对象
        let metadataOutput = AVCaptureMetadataOutput()
        //获取容器视图的frame
        self.cropRect = CGRect(x: (APPConfig.WidthForPortrait - scanAearSize.width) / 2, y: scanAearTopMargin, width: scanAearSize.width, height: scanAearSize.height)//self.currentView!.frame
        let size = UIScreen.main.bounds.size
        let  p1: CGFloat = size.height/size.width
        let  p2: CGFloat = 1920 / 1080  //使用了1080p的图像输出
        ////设置解析数据所感兴趣的区域recOfInterect和两个因素有关系：一个是AVCaptureSession(会话对象)的sessionPreset属性，另一个是AVCaptureVideoPreviewLayer(预览图层)的videoGravity属性
        if (p1 < p2) {
            let fixHeight: CGFloat  = size.width * p2
            let fixPadding: CGFloat = (fixHeight - size.height) / 2
            metadataOutput.rectOfInterest = CGRect(x: ( cropRect.origin.y + fixPadding) / fixHeight,
                                                   y: (size.width-(cropRect.size.width+cropRect.origin.x)) / size.width,
                                                   width: cropRect.size.height / fixHeight,
                                                   height: cropRect.size.width / size.width)
            
        } else {
            let fixWidth: CGFloat  = size.width * (1/p2)
            let fixPadding: CGFloat = (fixWidth - size.width) / 2

            metadataOutput.rectOfInterest = CGRect(x:  cropRect.origin.y  / size.height,
                                                   y: (size.width-(cropRect.size.width+cropRect.origin.x)+fixPadding) / fixWidth,
                                                   width: cropRect.size.height / size.height,
                                                   height: cropRect.size.width / fixWidth)

        }
        return metadataOutput
    }()
    var autoDetectingFlag: Bool = true//true表示开启自动识别人脸模式，false关闭自动识别格式
    override init() {
        self.lineWidth = 2
        self.strokeColor = APPConfig.ScanPanelColor
        self.maxDetectedCount = 50//默认face识别到30后自拍，给自拍对焦时间
        self.autoRemoveSubLayers = false
        
        super.init()
    }
    
    ///  init function
    ///
    ///  - parameter autoRemoveSubLayers: remove sub layers auto after detected code image
    ///  - parameter lineWidth:           line width, default is 4
    ///  - parameter strokeColor:         stroke color, default is Green
    ///  - parameter maxDetectedCount:    max detecte count, default is 20
    ///
    ///  - returns: the scanner object
    init(autoRemoveSubLayers: Bool, lineWidth: CGFloat = 2, strokeColor: UIColor = UIColor.green, maxDetectedCount: Int = 20) {
        
        self.lineWidth = lineWidth
        self.strokeColor = strokeColor
        self.maxDetectedCount = maxDetectedCount
        self.autoRemoveSubLayers = autoRemoveSubLayers
    }
    
    deinit {
        if session.isRunning {
            session.stopRunning()
        }
        
        removeAllLayers()
        DebugLog("原生扫描XZQRCode deinit")
    }
    
    // MARK: - Video Scan
    ///  prepare scan
    ///
    ///  - parameter view:       the scan view, the preview layer and the drawing layer will be insert into this view
    ///  - parameter completion: the completion call back
    func prepareScan(_ view: UIView,scanSize: CGSize,scanViewTopMargin: CGFloat, completion:@escaping (_ image: UIImage)->Void) {
        currentView = view
        scanFrame = view.bounds
        scanAearSize = scanSize
        scanAearTopMargin = scanViewTopMargin
        completedCallBack = completion
        currentDetectedCount = 0
        
        setupSession()
        setupLayers(view)
    }
    
    // MARK: - Image Scan
    ///  prepare scan
    ///
    ///  - parameter image:
    ///  - parameter completion: the completion call back
    func scanImage(_ image: UIImage, completion: @escaping ([String]) -> ()) {
        DispatchQueue.global(qos: .default).async {
            let detector = CIDetector(ofType: CIDetectorTypeQRCode, context: CIContext(options: nil), options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
            
            let ciImage = CIImage(image: image)
            
            let features = detector?.features(in: ciImage!)
            
            var arrayM: [String] = []
            for feature in features as! [CIQRCodeFeature] {
                arrayM.append(feature.messageString ?? "")
            }
            
            DispatchQueue.main.async {
                completion(arrayM)
            }
        }
    }
    /// start scan
    func startAutoDetecting() {
        self.autoDetectingFlag = true
    }
    /// start scan
    func stopAutoDetecting() {
        self.autoDetectingFlag = false
    }
    /// start scan
    func startScan() {
        if session.isRunning {
//            debugPrint("====QRCodeScanner====")
//            debugPrint("Start扫描器已运行")
            
            return
        }
        session.startRunning()
    }
    
    /// stop scan
    func stopScan() {
        if !session.isRunning {
//            debugPrint("====QRCodeScanner====")
//            debugPrint("Stop扫描器已运行")
            
            return
        }
        session.stopRunning()
    }
    
    func setupLayers(_ view: UIView) {
        drawLayer.frame = view.bounds
        
        view.layer.insertSublayer(drawLayer, at: 0)
        previewLayer.frame = view.bounds
        view.layer.insertSublayer(previewLayer, at: 0)
    }
    
    func setupSession() {
        //没有设备就退出
        guard let _ = self.videoInput else {
            return
        }
        if session.isRunning {
            debugPrint("====QRCodeScanner====")
            debugPrint("setUp扫描器已运行")
            return
        }
        
        if !session.canAddInput(videoInput!) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        
        if !session.canAddOutput(dataOutput) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        if !session.canAddOutput(photoOutput) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        session.addInput(videoInput!)
        session.addOutput(dataOutput)
        session.addOutput(photoOutput)
        
        self.session.sessionPreset = .photo
        photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg] )], completionHandler: nil)
        //Moennig
        do {
            try videoInput?.device.lockForConfiguration()
            if session.canSetSessionPreset(AVCaptureSession.Preset.hd1920x1080) {
                session.sessionPreset = AVCaptureSession.Preset.hd1920x1080//不能变，和有效识别区域metadataOutput.rectOfInterest有关系
            }
            if (videoInput?.device.isFocusModeSupported(AVCaptureDevice.FocusMode.continuousAutoFocus))! {
                videoInput?.device.focusMode = AVCaptureDevice.FocusMode.continuousAutoFocus
            }
            if (videoInput?.device.isAutoFocusRangeRestrictionSupported)! {
                videoInput?.device.autoFocusRangeRestriction = AVCaptureDevice.AutoFocusRangeRestriction.far
            }
            
            if (videoInput?.device.isExposureModeSupported(AVCaptureDevice.ExposureMode.continuousAutoExposure))!{
                videoInput?.device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
            }
            
//            if (videoInput?.device.isTorchAvailable)! && (videoInput?.device.isTorchModeSupported(AVCaptureDevice.TorchMode.auto))! {
//                videoInput?.device.torchMode = AVCaptureDevice.TorchMode.auto
//            }
            videoInput?.device.unlockForConfiguration()
            
        }catch{
            print("failed")
        }
        dataOutput.metadataObjectTypes = [.face]
        dataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
    }
    
    func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
        clearDrawLayer()
        for dataObject in metadataObjects {
            
            if let codeObject = dataObject as? AVMetadataFaceObject,
                let obj = previewLayer.transformedMetadataObject(for: codeObject) as? AVMetadataFaceObject {
                
                if scanFrame.contains(obj.bounds) {
                    currentDetectedCount += 1
                    if currentDetectedCount == maxDetectedCount {//等于最大值
                        // transform codeObject
                        if let codeObject = previewLayer.transformedMetadataObject(for: codeObject) as? AVMetadataFaceObject{
                            self.codeObject = codeObject
                            self.drawCodeCorners(codeObject)
                        }
                        
                        if autoRemoveSubLayers {
                            removeAllLayers()
                            self.setupLayers(self.currentView!)
                        }
                        //检测到人脸，去拍照
                        if self.autoDetectingFlag {//开启了自动识别模式才自动拍照
                            self.takePhoto()
                        }
                    }
                    
                }
            }
        }
    }
    
    func removeAllLayers() {
        previewLayer.removeFromSuperlayer()
        drawLayer.removeFromSuperlayer()
    }
    
    func clearDrawLayer() {
        if drawLayer.sublayers == nil {
            return
        }
        for layer in drawLayer.sublayers! {
            layer.removeFromSuperlayer()
        }
    }
    
    func drawCodeCorners(_ codeObject: AVMetadataFaceObject) {
        let shapeLayer = CAShapeLayer()
        shapeLayer.lineWidth = lineWidth
        shapeLayer.strokeColor = strokeColor.cgColor
        shapeLayer.fillColor = UIColor.clear.cgColor
        shapeLayer.path = createPath(codeObject.bounds)
        drawLayer.addSublayer(shapeLayer)
    }
    
    func createPath(_ bounds: CGRect) -> CGMutablePath {
        let path = CGMutablePath()
        path.addRect(bounds)
        return path
    }
    
    //获取摄像头
    private func getCamera(_ position: AVCaptureDevice.Position) -> AVCaptureDevice?{
        if let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: position).devices.first{
            return device
        }
        return nil
    }
    //MARK: -切换前后摄像头
    func switchCamara() {
        do{
            if let _ = session.inputs.first {
                session.removeInput(session.inputs.first!)
            }
            
            if(usingFrontCamera){
                usingFrontCamera = false
                self.captureDevice = getCamera(.back)
            }else{
                usingFrontCamera = true
                self.captureDevice = getCamera(.front)
            }
            if let _ = captureDevice{
                let captureDeviceInput1 = try AVCaptureDeviceInput(device: self.captureDevice)
                session.addInput(captureDeviceInput1)
            }
            
        }catch{
            print(error.localizedDescription)
        }
    }
    func takePhoto(){
        if let _ = self.captureDevice {
            let settings = AVCapturePhotoSettings()
            photoOutput.capturePhoto(with: settings, delegate: self)
        }
    }
}
extension XZQRCode: AVCapturePhotoCaptureDelegate{
    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
        self.session.stopRunning()
        if let imageData = photo.fileDataRepresentation(),let image = UIImage(data: imageData){
            self.completedCallBack?(image)
        }
        
    }
}
