//
//  ViewController.swift
//  studyGLKit
//
//  Created by 虞明义 on 2017/9/6.
//  Copyright © 2017年 yumingyi. All rights reserved.
//

import UIKit
import AVFoundation
import GLKit



class ViewController: UIViewController {
    
    var getImageAction:((UIImage?)->Void)?
    
    fileprivate var device:AVCaptureDevice?
    
    //控制输入和输出设备之间的数据传递
    fileprivate lazy var session:AVCaptureSession = {
        return AVCaptureSession.init()
    }()
    //调用所有的输入硬件--摄像头和麦克风
    fileprivate var input:AVCaptureDeviceInput?
    
    //适配10.0以下
//    fileprivate var output:AVCaptureOutput?
    fileprivate lazy var output:AVCaptureVideoDataOutput = AVCaptureVideoDataOutput.init()
    //用于输出图像
    
    //镜头捕捉得到的预览图层
    fileprivate var seeLayer:AVCaptureVideoPreviewLayer?
    //保存父视图
    fileprivate var parentView:UIView!
    //临时保存图片
    fileprivate var image:UIImage? = nil
    
    fileprivate var captureQueue:DispatchQueue!
    
    fileprivate lazy var detector:CIDetector? = {
        let detor = CIDetector.init(ofType: CIDetectorTypeRectangle, context: nil, options: [CIDetectorAccuracy:CIDetectorAccuracyHigh])
        return detor
    }()
    
    fileprivate lazy var intrinsicContentSize:CGSize = CGSize.zero
    
    //AVCaptureFlashMode  闪光灯 :更改这个设置的时候必须先锁定设备，修改完后再解锁，否则崩溃 lockForConfiguration
    //AVCaptureFocusMode  对焦
    //AVCaptureExposureMode  曝光
    //AVCaptureWhiteBalanceMode  白平衡
    //闪光灯和白平衡可以在生成相机时候设置
    //曝光要根据对焦点的光线状况而决定,所以和对焦一块写

    override func viewDidLoad() {
        super.viewDidLoad()
        setUp()
    }

    //MARK: - 注释:初始化UI************
    private func setUp(){
        captureQueue = DispatchQueue.init(label: "com.studyGLKit")
//        view.addSubview(glView)

        allocWithSession(position: AVCaptureDevicePosition.back)
//        view.bringSubview(toFront: glView)
//        glView.addSubview(submitBtn)
    }
    //MARK: - 注释:加载数据**************
    
    //MARK: - 注释:点击事件**************
    @objc private func submitBtnCliked(btn:UIButton){
        
    }
    //MARK: - 注释:数据请求**************
    
    //MARK: - 注释:业务逻辑**************
    
    //MARK: - 注释:代理方法**************
    
    //MARK: - 注释:通知注册和销毁**************

    //OPENGL
    fileprivate lazy var glContext:EAGLContext = EAGLContext.init(api: EAGLRenderingAPI.openGLES2)
    fileprivate lazy var glView:GLKView = {
        [unowned self] in
        let gl:GLKView = GLKView.init(frame: self.view.bounds, context: self.glContext)
        gl.autoresizingMask = [UIViewAutoresizing.flexibleWidth,UIViewAutoresizing.flexibleHeight]
        gl.translatesAutoresizingMaskIntoConstraints = true
        //我们要自己触发 drawing 事件 (而不是在 -drawRect: 中触发),将 enableSetNeedsDisplay 设置为 false。之后我们有可用新图像的时候，我们需要主动去调用 -display
        gl.enableSetNeedsDisplay = false
        gl.contentScaleFactor = 1.0
        //    drawableDepthFormat
        //    你的OpenGL上下文还可以（可选地）有另一个缓冲区，称为深度缓冲区。这帮助我们确保更接近观察者的对象显示在远一些的对象的前面（意思就是离观察者近一些的对象会挡住在它后面的对象）。
        //    其缺省的工作方式是：OpenGL把接近观察者的对象的所有像素存储到深度缓冲区，当开始绘制一个像素时，它（OpenGL）首先检查深度缓冲区，看是否已经绘制了更接近观察者的什么东西，如果是则忽略它（要绘制的像素，就是说，在绘制一个像素之前，看看前面有没有挡着它的东西，如果有那就不用绘制了）。否则，把它增加到深度缓冲区和颜色缓冲区。
        //    你可以设置这个属性，以选择深度缓冲区的格式。缺省值是GLKViewDrawableDepthFormatNone，意味着完全没有深度缓冲区。
        //    但是如果你要使用这个属性（一般用于3D游戏），你应该选择GLKViewDrawableDepthFormat16或GLKViewDrawableDepthFormat24。这里的差别是使用GLKViewDrawableDepthFormat16将消耗更少的资源，但是当对象非常接近彼此时，你可能存在渲染问题（）
        gl.drawableDepthFormat = GLKViewDrawableDepthFormat.format24
        //缺省值是GLKViewDrawableColorFormatRGBA8888，即缓冲区的每个像素的最小组成部分(-个像素有四个元素组成 RGBA)使用8个bit(如R使用8个bit)（所以每个像素4个字节 既 4*8 个bit）。这非常好，因为它给了你提供了最广泛的颜色范围，让你的app看起来更好。
        //    但是如果你的app允许更小范围的颜色，你可以设置为GLKViewDrawableColorFormatRGB565，从而使你的app消耗更少的资源（内存和处理时间）。
        gl.drawableColorFormat = GLKViewDrawableColorFormat.RGBA8888
        //发送第一个“GL”指令：激活“深度检测”
        glEnable(GLenum(GL_DEPTH_TEST))
        return gl
        }()
    
    fileprivate lazy var coreImageContext:CIContext = {
        return CIContext.init(eaglContext: self.glContext, options: [kCIContextWorkingColorSpace:NSNull.init(),kCIContextUseSoftwareRenderer:0])
    }()

    fileprivate lazy var submitBtn:UIButton = {
        let btn = UIButton.init(frame: CGRect.init(x: (kScreenW - 40) * 0.5, y: kScreenH - 100, width: 80, height: 100))
        btn.backgroundColor = UIColor.green
        btn.layer.cornerRadius = 10
        btn.layer.masksToBounds = true
        btn.addTarget(self, action: #selector(submitBtnCliked), for: UIControlEvents.touchUpInside)
        return btn
    }()
}

extension ViewController{
    fileprivate func allocWithSession(position:AVCaptureDevicePosition){
        device = getCameraDevice(position: position)
        if device == nil{
            print("打开相机失败")
            return
        }
        input =  getInput(device: device!)
        //        //拿到的图像的大小，自行设定，可根据机型适配
        if IsiPhone4(){
            //4s必须用这个分辨率，否则在取像素值的时候会越界崩溃
            session.sessionPreset = AVCaptureSessionPreset640x480
        }else{
            // 高质量采集率
            session.canSetSessionPreset(AVCaptureSessionPresetHigh)
        }
        
//        if #available(iOS 10.0, *) {
//            output = AVCapturePhotoOutput.init()
//        } else {
//            output = AVCaptureStillImageOutput.init()
//        }
        output.alwaysDiscardsLateVideoFrames = true
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable:kCVPixelFormatType_32BGRA]
        output.setSampleBufferDelegate(self as AVCaptureVideoDataOutputSampleBufferDelegate, queue: captureQueue)
//        output.sampleBufferDelegate = self
        //输入输出设备结合
        if session.canAddInput(input){
            session.addInput(input)
        }
        if session.canAddOutput(output){
            session.addOutput(output)
        }
        getSeeLayer()
        //设备开始取景
        session.startRunning()
        
    }
    
    fileprivate func getCameraDevice(position:AVCaptureDevicePosition)->AVCaptureDevice?{
        var camera:AVCaptureDevice?
        if #available(iOS 10.0, *) {
            camera = AVCaptureDevice.defaultDevice(withDeviceType: AVCaptureDeviceType.builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: position)
        } else {
            if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice]{
                for obj in devices{
                    if obj.position == position{
                        camera = obj
                    }
                }
            }
        }
        return camera
    }
    
    fileprivate func getInput(device:AVCaptureDevice)->AVCaptureDeviceInput?{
        do{
            let newInput = try AVCaptureDeviceInput.init(device: device)
            return newInput
        }catch{
            print("初始化input失败")
            return nil
        }
    }
    fileprivate func getSeeLayer(){
        //预览层的生成
        seeLayer = AVCaptureVideoPreviewLayer.init(session: session)
        seeLayer?.frame = UIScreen.main.bounds
        //AVLayerVideoGravityResizeAspectFill 铺满
        //AVLayerVideoGravityResizeAspect   上下有黑边
        //AVLayerVideoGravityResize  拉升
        seeLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
        view.layer.addSublayer(seeLayer!)
    }
}

extension ViewController:AVCaptureVideoDataOutputSampleBufferDelegate{
    func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
        if CMSampleBufferIsValid(sampleBuffer) == false{
            return
        }
        let pixelBuffer:CVPixelBuffer? = CMSampleBufferGetImageBuffer(sampleBuffer)
        let image:CIImage = CIImage.init(cvPixelBuffer: pixelBuffer!)

        /**
         *  调整图片饱和度, 亮度, 对比度
         *
         *  @param image      目标图片
         *  @param saturation 饱和度
         *  @param brightness 亮度: -1.0 ~ 1.0
         *  @param contrast   对比度
         *
         */
        var newImage = CIFilter.init(name: "CIColorControls", withInputParameters: [kCIInputImageKey:image,"inputBrightness":0.0,"inputContrast":1.14,"inputSaturation":1.0])?.outputImage
//        newImage = newImage?.applyingOrientation(0)
        let features:[CIRectangleFeature] = detector!.features(in: newImage!) as! [CIRectangleFeature]
        let borderDetetsLastRectangleFeatur = biggestRectangleInRectangles(features: features)
        if borderDetetsLastRectangleFeatur != nil{
            if newImage != nil{
//               newImage = drawHighlightOverlayForPoints(image: newImage!, points: borderDetetsLastRectangleFeatur!)
//                if self.glContext != EAGLContext.current(){
//                    EAGLContext.setCurrent(self.glContext)
//                }
//                glView.bindDrawable()

                let myImage = newImage!
//                let rect = self.cropRectForPreviewImage(image: myImage)
//                coreImageContext.draw(myImage, in: self.glView.bounds, from: rect)
//                glView.display()
                
                if let cgImage = coreImageContext.createCGImage(myImage, from: myImage.extent){
                    DispatchQueue.main.async {
                        self.cropWithRectangle(image: cgImage, rectangle: borderDetetsLastRectangleFeatur!)
                    }
                }
                newImage = nil
                
            }
        }
    }
    
    private func cropWithRectangle(image:CGImage,rectangle:YMYRectanleFeature){
        session.stopRunning()
        let size = CGSize.init(width: image.width, height: image.height)
        print("lagre size = \(size)")
        UIGraphicsBeginImageContext(size)
        if let context = UIGraphicsGetCurrentContext(){
            
            context.setFillColor(UIColor.red.cgColor)
            context.move(to: rectangle.topLeft)
            context.addLine(to: rectangle.topRight)
            context.addLine(to: rectangle.bottomRight)
            context.addLine(to: rectangle.bottomLeft)
            context.closePath()
            context.clip()
            context.draw(image, in: CGRect.init(origin: CGPoint.zero, size: size))
            let newImage = UIGraphicsGetImageFromCurrentImageContext()
            UIGraphicsEndImageContext()
            print(newImage!.size)
            getImageAction?(newImage)
            
            dismiss(animated: true, completion: nil)
        }
    }
    
    
    
    /// 裁剪对应的照片
    ///
    /// - Parameter image: 裁剪的图片
    /// - Returns: 返回裁剪区域在图片上的位置
    private func cropRectForPreviewImage(image:CIImage)->CGRect{
        var cropWidth:CGFloat = image.extent.size.width
        var cropHeight:CGFloat = image.extent.size.height
        if (image.extent.size.width > image.extent.size.height){
            cropWidth = image.extent.size.width
            cropHeight = cropWidth * view.bounds.size.height/view.bounds.size.width
        }else if (image.extent.size.width < image.extent.size.height){
            cropHeight = image.extent.size.height
            cropWidth = cropHeight * view.bounds.size.width/view.bounds.size.height
        }
        return image.extent.insetBy(dx: (image.extent.size.width - cropWidth)/2, dy: (image.extent.size.height - cropHeight)/2)
    }
    
    private func drawHighlightOverlayForPoints(image:CIImage,points:YMYRectanleFeature)->CIImage{
        var overlay = CIImage.init(color: CIColor.init(red: 1, green: 0, blue: 0, alpha: 0.2))
//        var overlay = image
        overlay = overlay.cropping(to: image.extent)
    
        overlay = overlay.applyingFilter("CIPerspectiveTransformWithExtent", withInputParameters: ["inputExtent":CIVector.init(cgRect: image.extent),"inputTopLeft":CIVector.init(cgPoint: points.topLeft),"inputTopRight":CIVector.init(cgPoint: points.topRight),"inputBottomLeft":CIVector.init(cgPoint: points.bottomLeft),"inputBottomRight":CIVector.init(cgPoint: points.bottomRight)])
        print(overlay.extent)
        return overlay.compositingOverImage(image)
//        return overlay
    }
    
    
    private func biggestRectangleInRectangles(features:[CIRectangleFeature])->YMYRectanleFeature?{
        let rectangleFeature = smallestRectangleInRectangles(features: features)
        if rectangleFeature == nil{
            return nil
        }
        let points:[NSValue] = [NSValue.init(cgPoint: rectangleFeature!.topLeft),NSValue.init(cgPoint: rectangleFeature!.topRight),NSValue.init(cgPoint: rectangleFeature!.bottomLeft),NSValue.init(cgPoint: rectangleFeature!.bottomRight)]
        var min:CGPoint = points.first!.cgPointValue
        var max = min
        for point in points{
            let myPoint = point.cgPointValue
            min.x = fmin(myPoint.x, min.x)
            min.y = fmin(myPoint.y, min.y)
            max.x = fmax(myPoint.x, max.x)
            max.y = fmax(myPoint.y, max.y)
        }
        let center = CGPoint.init(x: 0.5 * (min.x + max.x), y: 0.5 * (min.y + max.y))
        let angleFromPointBlock:(NSValue)->NSNumber = {
            (value) in
            let point = value.cgPointValue
            let theta:CGFloat = atan2(point.y - center.y, point.x - center.x)
            let angle = fmod(Double.pi - Double.pi/4 + Double(theta) , 2.0 * Double.pi)
            return NSNumber.init(value: angle)
        }
        let sortedPoints = points.sorted { (a, b) -> Bool in
            let x = angleFromPointBlock(a)
            let y = angleFromPointBlock(b)
            return x.compare(y) == ComparisonResult.orderedAscending
        }
        let myRectanleFeature = YMYRectanleFeature.init()
        
        myRectanleFeature.topLeft = sortedPoints[3].cgPointValue
        myRectanleFeature.topRight = sortedPoints[2].cgPointValue
        myRectanleFeature.bottomRight = sortedPoints[1].cgPointValue
        myRectanleFeature.bottomLeft = sortedPoints[0].cgPointValue
//        print(myRectanleFeature.topLeft)
//        print(myRectanleFeature.topRight)
//        print(myRectanleFeature.bottomLeft)
//        print(myRectanleFeature.bottomRight)
        return myRectanleFeature
    }
    
    
    /// 根据得到的矩形过滤器的坐标计算出最大的一个矩形
    ///
    /// - Parameter features: 一组CIRectangleFeature对象
    /// - Returns: 面积最大的对象
    private func smallestRectangleInRectangles(features:[CIRectangleFeature])->CIRectangleFeature?{
        if features.isEmpty{
            return nil
        }
        var halfPerimiterValue:CGFloat = 0
        var biggestRectangle:CIRectangleFeature = features.first!
        for retangleFeature in features{
            let p1:CGPoint = retangleFeature.topLeft
            let p2:CGPoint = retangleFeature.topRight
            //hypot对于给定的直角三角形的两个直角边，求其斜边的长度
            let width:CGFloat = hypot(p1.x - p2.x, p1.y - p2.y)
            let p3:CGPoint = retangleFeature.topLeft
            let p4:CGPoint = retangleFeature.bottomLeft
            let height:CGFloat = hypot(p3.x - p4.x, p3.y - p4.y)
            let radio = width > height ? (height / width) : (width / height)
            print(radio)
            if !(radio > 0.265 && radio < 0.329){
                continue
            }
            let currentHalfPerimiterValue:CGFloat = height + width
            if halfPerimiterValue < currentHalfPerimiterValue{
                halfPerimiterValue = currentHalfPerimiterValue
                biggestRectangle = retangleFeature
            }
        }
        if halfPerimiterValue == 0{
            return nil
        }
        return biggestRectangle
    }
}
