//
//  CaptureViewController.swift
//  Entrance
//
//  Created by Chunhui on 2018/1/22.
//  Copyright © 2018年 chunhuiLai. All rights reserved.
//

import UIKit
import AVKit
import SnapKit
import SYKit
//import YourFilm
import RxSwift

class FaceDisplayView: UIView, UICollectionViewDataSource {
    
    var faces = [UIImage]() {
        didSet {
            collectionView.reloadData()
        }
    }

    lazy var collectionView: UICollectionView = {

        let collectionView = UICollectionView(frame: CGRect.zero, collectionViewLayout: FaceDisplayFlowLayout())
        
        collectionView.register(UICollectionViewCell.self, forCellWithReuseIdentifier: "collectionCell")
        collectionView.backgroundColor = UIColor.lightGray
        collectionView.dataSource = self
        return collectionView
    }()
    
    lazy var certainButton: UIButton = {
        let button = UIButton.init(type: .custom)
        button.setTitle("确定", for: .normal)
        button.backgroundColor = UIColor.lightGray
        return button
    }()
    
    override init(frame: CGRect) {
        super.init(frame: frame)
        
        layoutUI()
    }
    
    required init?(coder aDecoder: NSCoder) {
        fatalError("init(coder:) has not been implemented")
    }
    
    func layoutUI() {
        
        addSubview(collectionView)
  
        addSubview(certainButton)
    
        collectionView.snp.makeConstraints { (make) in
            make.top.equalTo(10)
            make.left.equalToSuperview().offset(10)
            make.right.equalTo(-10)
            make.bottom.equalTo(-50)
        }
        
        certainButton.snp.makeConstraints { (make) in
            make.left.right.bottom.equalToSuperview()
            make.height.equalTo(40)
        }
    }
    
    func numberOfSections(in collectionView: UICollectionView) -> Int {
        return 1
    }
    
    func collectionView(_ collectionView: UICollectionView, numberOfItemsInSection section: Int) -> Int {
        return faces.count
    }
    
    func collectionView(_ collectionView: UICollectionView, cellForItemAt indexPath: IndexPath) -> UICollectionViewCell {
        let cell = collectionView.dequeueReusableCell(withReuseIdentifier: "collectionCell", for: indexPath)
        
        let imageView = UIImageView()
        imageView.frame = cell.bounds
        imageView.image = faces.sy_Element(at: indexPath.row)
        cell.contentView.addSubview(imageView)
        return cell
    }
    
}

struct SYConfigs {
    struct FaceRecognize {
        static let previewLayerWidth = UIScreen.sy_width - 32
    }
}

class FaceDisplayFlowLayout: UICollectionViewFlowLayout {
    override func prepare() {
        super.prepare()
        
        self.minimumLineSpacing = 20
        self.minimumInteritemSpacing = 20
        
        sectionInset = UIEdgeInsets.init(top: 20, left: minimumInteritemSpacing, bottom: 20, right: minimumInteritemSpacing)
       // let columns: CGFloat = 1
        let itemW =  100
        self.itemSize =  CGSize.init(width: itemW, height: itemW)
        
        self.scrollDirection = .vertical
    }
}

class CaptureViewController: UIViewController, SYBaseView {
    
    var disposeBag = DisposeBag()
    
    var currentImageBuffer: CVImageBuffer?
    
    // 摄像头设备
    lazy var device: AVCaptureDevice? = {
        return AVCaptureDevice.devices(for: .video).filter({ $0.position == .front }).first
    }()
    
    // AVCaptureSession对象来执行输入设备和输出设备之间的数据传递
    lazy var session: AVCaptureSession = {
        let session = AVCaptureSession()
        session.sessionPreset = .high
        return session
    }()
    
    // 输出格式
   // @property (nonatomic,strong) NSNumber *outPutSetting;
  
    // 出流对象
    lazy var videoDataOutput: AVCaptureVideoDataOutput = {
        let output = AVCaptureVideoDataOutput()
        output.alwaysDiscardsLateVideoFrames = true
        
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]
        
        return output
    }()
    
    // 元数据（用于人脸识别）
    lazy var metadataOutput: AVCaptureMetadataOutput = {
        let metadataOutput = AVCaptureMetadataOutput()
        metadataOutput.setMetadataObjectsDelegate(self, queue: self.detectQueue)
        return metadataOutput
    }()
    
    // 队列
    var detectQueue: DispatchQueue = DispatchQueue(label: "face detect")
    
    // 预览图层
    lazy var previewLayer: AVCaptureVideoPreviewLayer = {
        let layer = AVCaptureVideoPreviewLayer(session: self.session)
        layer.videoGravity = .resizeAspectFill
        
        return layer
    }()
    
    // 人脸检测框区域
    var faceDetectionFrame: CGRect = {
        let width = UIScreen.sy_width - 120
        let y = (UIScreen.sy_height - width) / 2 - 30
        return CGRect.init(x: 60, y: y, width: width, height: width)
    }()
    
    // 是否打开手电筒
    var torchIsOn: Bool = false
    
    var printContent: String = "" {
        didSet (newValue) {
            DispatchQueue.main.async {
                self.printBoard.text = newValue
                
            }
        }
    }
    
    lazy var printBoard: UITextView =  {
        let printBoard = UITextView()
        printBoard.backgroundColor = UIColor.green
        printBoard.isEditable = false
        return printBoard
    }()
    
    lazy var faceRim: UIView = {
        let rim = UIView()
        rim.backgroundColor = UIColor.clear
        rim.layer.borderColor = UIColor.red.cgColor
        rim.layer.borderWidth = 1
        rim.alpha = 0
        return rim
    }()
    
    lazy var resultView: FaceDisplayView = {
        let view = FaceDisplayView()
        let width = UIScreen.sy_width / 2
        view.sy_size = CGSize.init(width: width / 2, height: width * 1.5)
        return view
    }()

    override func viewDidLoad() {
        super.viewDidLoad()

        configureSession()
        
        setupUI()
    }
    
    override func viewWillAppear(_ animated: Bool) {
        super.viewWillAppear(animated)
        
        if SYDeviveAuthorization.captureDeviceDidAuthorized() {
            runSession()
        }
        else {
            AVCaptureDevice.requestAccess(for: .video, completionHandler: { (granted) in
                granted ? self.runSession() : self.dismiss(animated: true, completion: nil)
            })
           // flashMessage("请到系统的“设置-隐私-相机”中授权此应用使用您的相机")
        }
    }
    
    override func viewDidDisappear(_ animated: Bool) {
        super.viewDidDisappear(animated)
        
        session.stopRunning()
    }
    
    func configureSession() {
        
        guard let device = device else {
            flashMessage("设备异常")
            return
        }
        
        if let _ = try? device.lockForConfiguration() {
            
            // 平滑对焦
            if device.isSmoothAutoFocusSupported {
                 device.isSmoothAutoFocusEnabled = true
            }
            
             // 自动持续对焦
            if device.isFocusModeSupported(.continuousAutoFocus) {
                device.focusMode = .continuousAutoFocus
            }
            
            // 自动持续曝光
            if device.isExposureModeSupported(.continuousAutoExposure) {
                device.exposureMode = .continuousAutoExposure
            }
            
            // 自动持续白平衡
            if device.isWhiteBalanceModeSupported(.continuousAutoWhiteBalance) {
                
            }
           
            device.unlockForConfiguration()
            
            do {
                let input = try AVCaptureDeviceInput.init(device: device)
                if session.canAddInput(input) {
                    session.addInput(input)
                }
                
                if session.canAddOutput(videoDataOutput) {
                    session.addOutput(videoDataOutput)
                }
                
                if session.canAddOutput(metadataOutput) {
                    session.addOutput(metadataOutput)
                    // 输出格式要放在addOutPut之后，否则奔溃
                    metadataOutput.metadataObjectTypes = [.face]
                }
                
            }
            catch {
                
            }
           
        }
    }
    
    // 输入设备和输出设备开始数据传递
    func runSession() {
        if session.isRunning { return }
        detectQueue.async { [weak self] in
            self?.session.startRunning()
        }
    }
    
    // 结束数据传递
    func stopSession() {
        if session.isRunning {
            DispatchQueue.main.async { [weak self] in
                self?.session.stopRunning()
            }
        }
    }
    
    deinit {
        
    }
    
    // turn on or turn off torch
    func torchSwitch() {
        torchIsOn = !torchIsOn
        
        guard let _ = device?.hasTorch else {
            flashMessage("您的设备没有闪光设备，不能提供手电筒功能")
            return
        }
        
        do {
            try device?.lockForConfiguration()
            if torchIsOn {
                device?.torchMode = .off
                torchIsOn = false
            }
            else {
                if let _ = try? device?.setTorchModeOn(level: 0.5) {
                    torchIsOn = true
                }
            }
        }
        catch {}
    }
    
    func setupUI() {
        
        previewLayer.frame = UIScreen.sy_bounds
        view.layer.addSublayer(previewLayer)
        
        view.addSubview(faceRim)
//        let preview = UIView()
//        preview.layer.mask = previewLayer
//
//        view.addSubview(preview)
//
//        preview.snp.makeConstraints { (make) in
//            make.top.bottom.left.right.equalToSuperview()
////            make.center.equalToSuperview()
////            make.width.height.equalTo(SYConfigs.FaceRecognize.previewLayerWidth)
//        }
        
//        let scanLayer = CAShapeLayer()
//        scanLayer.path = UIBezierPath.init
        
        view.addSubview(printBoard)
        printBoard.snp.makeConstraints { (make) in
            make.left.right.bottom.equalToSuperview()
            make.height.equalTo(70)
        }
        
        let circleLayer = CAShapeLayer()
        
        let path = UIBezierPath.init(roundedRect: faceDetectionFrame, cornerRadius: faceDetectionFrame.width / 2).cgPath
        circleLayer.path = path
        circleLayer.fillColor = UIColor.clear.cgColor
        circleLayer.lineWidth = 3
        circleLayer.strokeColor = UIColor.blue.cgColor
        
        view.layer.addSublayer(circleLayer)
        
        let captureButton = UIButton().then {
            $0.setTitle("捕捉人脸", for: .normal)
            $0.titleLabel?.textColor = UIColor.white
            $0.backgroundColor = UIColor.orange
        }
        view.addSubview(captureButton)
        captureButton.snp.makeConstraints { (make) in
            make.bottom.right.equalTo(-10)
            make.size.equalTo(CGSize(width: 80, height: 30))
        }
        
        captureButton.rx.tap.bind {
            if let buffer = self.currentImageBuffer {
                let faces = self.fetchFaces(from: buffer)
                self.display(images: faces)
                self.suspendCaptureBuffer()
            }
        }
        .disposed(by: disposeBag)
    }
    
    func  display(images: [UIImage]) {
        
//        resultView.certainButton.rx.tap.bind { _ in
//            YourFilm.curtainCall()
//            }
//            .disposed(by: disposeBag)
//
//        resultView.faces = images
//        var plot = YourFilm.Plot.default
//        plot.showTimeDuration = TimeInterval(Int.max)
//        YourFilm.show(resultView, plot: plot, scenery: YourFilm.Scenery.default, inView: nil)
    }
}

extension CaptureViewController: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureMetadataOutputObjectsDelegate {
    
    // 从输出的元数据中捕捉人脸
    func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
        if let metaData = metadataObjects.first, let transformedMetaData = previewLayer.transformedMetadataObject(for: metaData) {

            let faceRegion = transformedMetaData.bounds
            
            if metaData.type == .face {
                
                //printlog("是否包含人脸\(faceDetectionFrame.contains(faceRegion))")
                
                if faceDetectionFrame.contains(faceRegion) {
                    resumeCaptureBuffer()
                }
            }
        }
    }
    
    //MARK: pragma mark 从输出的数据流捕捉单一的图像帧
    // 获取实时图像，这个代理方法的回调频率几乎与手机屏幕的刷新频率一样快
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard output == videoDataOutput else {
            return
        }
        
        if let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
            detect(with: imageBuffer)
        }
    }
    
    func detect(with imageBuffer: CVImageBuffer) {
        currentImageBuffer = imageBuffer
        
        let personciImage = CIImage.init(cvImageBuffer: imageBuffer)
        
        let imageOptions = [CIDetectorImageOrientation: NSNumber(value: 5), CIDetectorSmile: true, CIDetectorEyeBlink: true]
    
        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        
        let faces = faceDetector?.features(in: personciImage, options: imageOptions)
        
        let ciImageSize = personciImage.extent.size
        //printlog("ciImageSize:\(ciImageSize)")  1920.0, 1080.0
        var transform = CGAffineTransform(scaleX: 1, y: -1)
        
        
        transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
        
        if let face = faces?.first as? CIFaceFeature {
            
            //print("found bounds are \(face.bounds)")
            // Apply the transform to convert the coordinates
            var faceViewBounds = face.bounds.applying(transform)
            
            // Calculate the actual position and size of the rectangle in the image view
            let viewSize = UIScreen.sy_size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
            
            faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY
            
//            DispatchQueue.main.async {
//                self.faceRim.frame = faceViewBounds
//                self.faceRim.alpha = 1
//
//                let image = UIImage(ciImage: personciImage, scale: scale, orientation: .right).orientationFit()
//
//                self.display(image: image)
//            }
         
            let alert = UIAlertController(title: "提示", message: "检测到了人脸", preferredStyle: UIAlertControllerStyle.alert)
            alert.addAction(UIAlertAction(title: "确定", style: UIAlertActionStyle.default, handler: nil))
       //     self.present(alert, animated: true, completion: nil)
            
            if face.hasSmile {
                printContent = "正在微笑😊😊 \n" + printContent
            }
            
            if face.hasLeftEyePosition {
               // print("Left eye bounds are \(face.leftEyePosition)")
              //  printContent += "\n Left eye bounds are"
            }
            
            if face.hasRightEyePosition {
               // print("Right eye bounds are \(face.rightEyePosition)")
            }
            
            if face.leftEyeClosed {
                printContent = "左眼闭上了👁👁 \n" + printContent
            }
            
            if face.rightEyeClosed {
                printContent = "右眼闭上了👁👁 \n" + printContent
            }
        
        }
        else {
            DispatchQueue.main.async {
                self.faceRim.alpha = 0
            }
        }
    }
    
    func fetchFaces(from imageBuffer: CVImageBuffer) -> [UIImage] {
        let ciimage = CIImage.init(cvImageBuffer: imageBuffer)
        
        let imageOptions = [CIDetectorImageOrientation: NSNumber(value: 5), CIDetectorSmile: true, CIDetectorEyeBlink: true]
        
        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        
        let faces = faceDetector?.features(in: ciimage, options: imageOptions)
        let ciImageSize = ciimage.extent.size
        
        let viewSize = UIScreen.sy_size
        let scale = min(viewSize.width / ciImageSize.width,
                        viewSize.height / ciImageSize.height)
        
        return faces?.flatMap {
            let rect = $0.bounds.insetBy(dx: -10, dy: -10)
            let ciimage = ciimage.cropped(to: rect)
            UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
            defer { UIGraphicsEndImageContext() }
            UIImage(ciImage: ciimage, scale: scale, orientation: .right).draw(in: CGRect(origin: .zero, size: rect.size))
            guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
            // now that you have your face image you need to properly apply a circle mask to it
            let size = face.size
            let breadth = min(size.width, size.height)
            let breadthSize = CGSize(width: breadth, height: breadth)
            UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
            defer { UIGraphicsEndImageContext() }
            guard let cgImage = face.cgImage?.cropping(to: CGRect(origin:
                CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down) / 2 : 0,
                        y: size.height > size.width ? (size.height-size.width).rounded(.down) / 2 : 0),
                                                                  size: breadthSize)) else { return nil }
            let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
            UIBezierPath(ovalIn: faceRect).addClip()
            UIImage(cgImage: cgImage).draw(in: faceRect)
            return UIGraphicsGetImageFromCurrentImageContext()
            } ?? []
        
    }
    
    func resumeCaptureBuffer() {
        videoDataOutput.setSampleBufferDelegate(self, queue: self.detectQueue)
    }
    
    func suspendCaptureBuffer() {
        videoDataOutput.setSampleBufferDelegate(nil, queue: self.detectQueue)
    }
}





