//
//  NativeScannerView.swift
//  GreenDeal
//
//  Created by Moennig Q on 16/11/2022.
//

import UIKit
import AVFoundation
class NativeScannerView: UIView,ScannerViewProtocal {
    var scannerQueue = DispatchQueue(label: "native.scanner.queue")
    var decodeCurrentTime = Date()
    var isScanning: Bool = false
    var scanAddMethodMode: ScanAddMethodMode = .auto
    var scanMode: ScanMode = .oneD
    //stroke color
    var strokeColor: UIColor = APPConfig.ScanPanelColor
    var enableInvertedBarcodeFlag: Bool = false
    var scanResultClosure: ((ScanningResult) -> Void)?
    var takePhotoResultClosure: ((Data)->Void)?
    //
    private var haveAvailableDeviceFlag: Bool = true
    //MARK: -  session
    private let session = AVCaptureSession()
    //MARK: -  video input
    private var videoInput: AVCaptureDeviceInput?
    //MARK: - 预览层
    lazy var previewLayer: AVCaptureVideoPreviewLayer = {
        let layer = AVCaptureVideoPreviewLayer(session: self.session)
        //保持原始比例，填充整个bounds，多余的会被剪掉
        //不能改，和有效识别区域metadataOutput.rectOfInterest有关系
        layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
        return layer
    }()
    //MARK: -
    //MARK: - metadataOutput
    private var metadataOutput: AVCaptureMetadataOutput!
    private var photoOutput: AVCapturePhotoOutput!
    /// drawLayer
    lazy var drawLayer = CALayer()
    //  check if barcdoe is scanned
    var scannedFlag = false
    var codeObject: AVMetadataMachineReadableCodeObject?
    /// corner line width
    var lineWidth: CGFloat = 2
    //  scanned barcode information
    var scanResult = ScanningResult()
    //
    var scanFrame: CGRect = CGRect.zero
    override init(frame: CGRect) {
        super.init(frame: frame)
        setUpView()
    }
    convenience init() {
        self.init(frame: CGRect(x: 0, y: 0, width: 0, height: 0))
        self.lineWidth = 2
    }
    required init?(coder aDecoder: NSCoder) {
        fatalError("init(coder:) has not been implemented")
    }
    
    private func setUpView() {
        self.backgroundColor = .black//1.背景初始化为黑色，开启扫描器后再设置为透明；2.无可用设备也是黑色
        self.scanFrame = UIScreen.main.bounds
        self.previewLayer.frame = UIScreen.main.bounds
        
    }
    // MARK: - 启动扫描器,原生扫码enableInvetedFlag没有用途
    func launchScanner(enableInvetedFlag: Bool,enableFuzzyFlag: Bool) {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            //获取是否有后置摄像头有效的设备
            strongSelf.haveAvailableDeviceFlag = strongSelf.checkIfHaveAvailableDevice()
            if strongSelf.haveAvailableDeviceFlag{
                strongSelf.setUpSession()
                DispatchQueue.main.async {
                    strongSelf.setupLayers()
                }
                strongSelf.session.startRunning()
                //1.在startRunning之后，再执行此代码
                strongSelf.switchScanMode(.oneD)
            }
        }
    }
    
    // MARK: - 开始扫描
    func startScanning() {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            if strongSelf.haveAvailableDeviceFlag{
                if strongSelf.session.isRunning {
                    return
                }
                strongSelf.session.startRunning()
                strongSelf.isScanning = true
            }
        }
    }
    // MARK: - 停止扫描
    func stopScanning() {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            if strongSelf.haveAvailableDeviceFlag{
                if !strongSelf.session.isRunning {
                    return
                }
                strongSelf.session.stopRunning()
                strongSelf.isScanning = false
            }
        }
    }
    // MARK: - Switch反色条码的开关
    func switchInverterSetting(_: Bool) {
        //MARK: - 原生扫描暂时没有切换反色条码扫描的功能
    }
    // MARK: - 切换条形码或二维码扫描
    func switchScanMode(_ scanMode: ScanMode) {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            strongSelf.scanMode = scanMode
            if strongSelf.haveAvailableDeviceFlag {
                //设置扫码区域rectOfInterest和metadataObjectTypes，必须在session.startRunning代码后设置，startRunning之前必须设置预设值
                let cropRect = ScanResultImageTool().getCropRect(strongSelf.scanMode)
                //rectOfInterest（有效扫描范围）的坐标系有所不同，屏幕右上角为坐标原点，横为y，竖为x,左上角位(0,1)，左下角为(1,1)，右下角为(1,0)。
                strongSelf.metadataOutput.rectOfInterest = strongSelf.previewLayer.metadataOutputRectConverted(fromLayerRect: cropRect)//self.converterCropRectOfInterest(cropRect)
                let objectTypes = strongSelf.getAvailableMetadataObjectTypes(strongSelf.scanMode)
                strongSelf.metadataOutput.metadataObjectTypes = objectTypes
            }
        }
    }
    
    // MARK: - 是否开启模糊扫码
    func switchFuzzyScanSetting(_: Bool) {
        debugPrint("原生扫码没有此设置")
    }
    // MARK: - 手工增加时的拍照
    func takePhoto() {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            if strongSelf.haveAvailableDeviceFlag {
                let settings = AVCapturePhotoSettings()
                strongSelf.photoOutput.capturePhoto(with: settings, delegate: strongSelf)
            }
        }
    }
    //MARK: - 清空手动上次识别的内容
    func clearPreResultForManually(){
        self.scanResult = ScanningResult()
    }
    // MARK: - 释放扫描器
    func releaseScanner() {
        scannerQueue.async {[weak self] in
            guard let strongSelf = self else { return  }
            if strongSelf.session.isRunning {
                strongSelf.session.stopRunning()
            }
            DispatchQueue.main.async {
                strongSelf.removeAllLayers()
            }
            DebugLog("原生扫描器 deinit")
        }
        
    }
    //MARK: - 私有方法
    //session
    private func setUpSession(){
        //是否有设备
        if let device = AVCaptureDevice.default(for: AVMediaType.video) {
            self.videoInput = try? AVCaptureDeviceInput(device: device)
        }
        //没有设备就退出
        guard let _ = self.videoInput else {
            return
        }
        let videoOutput = AVCaptureVideoDataOutput()
        photoOutput = AVCapturePhotoOutput()
        metadataOutput = AVCaptureMetadataOutput()
        if !session.canAddInput(videoInput!) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        
        if !session.canAddOutput(metadataOutput) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        if !session.canAddOutput(videoOutput) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        if !session.canAddOutput(photoOutput) {
            debugPrint("====QRCodeScanner====")
            debugPrint("找不到输入设备")
            return
        }
        session.addInput(videoInput!)
        session.addOutput(metadataOutput)
        session.addOutput(videoOutput)
        session.addOutput(photoOutput)
        
        do {
            try videoInput?.device.lockForConfiguration()
            if session.canSetSessionPreset(AVCaptureSession.Preset.hd1920x1080) {
                session.sessionPreset = AVCaptureSession.Preset.hd1920x1080//不能变，和有效识别区域metadataOutput.rectOfInterest有关系
            }
            //持续聚焦
            if videoInput!.device.isFocusModeSupported(AVCaptureDevice.FocusMode.continuousAutoFocus) {
                videoInput?.device.focusMode = AVCaptureDevice.FocusMode.continuousAutoFocus
            }
            if videoInput!.device.isAutoFocusRangeRestrictionSupported {
                videoInput?.device.autoFocusRangeRestriction = AVCaptureDevice.AutoFocusRangeRestriction.far
            }
            //自动曝光
            if videoInput!.device.isExposureModeSupported(AVCaptureDevice.ExposureMode.continuousAutoExposure) {
                videoInput?.device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
            }
            //自动白平衡
            if videoInput!.device.isWhiteBalanceModeSupported(.continuousAutoWhiteBalance){
                videoInput?.device.whiteBalanceMode = .continuousAutoWhiteBalance
            }
            videoInput?.device.unlockForConfiguration()
            
        }catch{
            print("failed")
        }
        
        photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg] )], completionHandler: nil)
        metadataOutput.setMetadataObjectsDelegate(self, queue: scannerQueue)
        //AVCaptureVideoDataOutputSampleBufferDelegate
        videoOutput.setSampleBufferDelegate(self, queue: scannerQueue )
        
    }
    private func setupLayers() {
        drawLayer.frame = self.bounds
        self.layer.insertSublayer(drawLayer, at: 0)
        previewLayer.frame = self.bounds
        self.layer.insertSublayer(previewLayer, at: 0)
        
    }
    
    //MARK: - 将cropRect转换为rectOfInterest的坐标，
    //不要用previewLayer.metadataOutputRectConverted(fromLayerRect: cropRect)赋值，因为此方法转换坐标有问题，会导致无法识别
    private func converterCropRectOfInterest(_ cropRect: CGRect)->CGRect{
        let size = UIScreen.main.bounds.size
        let  p1: CGFloat = size.height/size.width
        let  p2: CGFloat = 1920 / 1080  //使用了1080p的图像输出
        ////设置解析数据所感兴趣的区域recOfInterect和两个因素有关系：一个是AVCaptureSession(会话对象)的sessionPreset属性，另一个是AVCaptureVideoPreviewLayer(预览图层)的videoGravity属性
        //
        var rectOfInterest: CGRect!
        if (p1 < p2) {
            let fixHeight: CGFloat  = size.width * p2
            let fixPadding: CGFloat = (fixHeight - size.height) / 2
            rectOfInterest = CGRect(x: ( cropRect.origin.y + fixPadding) / fixHeight,
                                                   y: (size.width-(cropRect.size.width+cropRect.origin.x)) / size.width,
                                                   width: cropRect.size.height / fixHeight,
                                                   height: cropRect.size.width / size.width)
            
        } else {
            let fixWidth: CGFloat  = size.width * (1/p2)
            let fixPadding: CGFloat = (fixWidth - size.width) / 2

            rectOfInterest = CGRect(x:  cropRect.origin.y  / size.height,
                                                   y: (size.width-(cropRect.size.width+cropRect.origin.x)+fixPadding) / fixWidth,
                                                   width: cropRect.size.height / size.height,
                                                   height: cropRect.size.width / fixWidth)

        }
        return rectOfInterest
    }
    
}
extension NativeScannerView: AVCaptureMetadataOutputObjectsDelegate,AVCaptureVideoDataOutputSampleBufferDelegate, AVCapturePhotoCaptureDelegate{
    //MARK: - 获取识别到code时，当前的视频帧图片AVCaptureVideoDataOutputSampleBufferDelegate
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        if self.scannedFlag {
            guard let buffer = CMSampleBufferGetImageBuffer(sampleBuffer) else{
                self.scannedFlag = false
                return
            }
            let ciImage = CIImage(cvImageBuffer: buffer)
            let context = CIContext(options:nil)
            guard let cgImage: CGImage = context.createCGImage(ciImage, from: ciImage.extent) else{
                self.scannedFlag = false
                return
            }
            guard let resultPoints = self.codeObject?.corners else{
                self.scannedFlag = false
                return
            }
            //auto增加才处理需要图片，manuall不需要处理
            if self.scanAddMethodMode == .auto{
                let resultImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: UIImage.Orientation.right)
                //识别时间,
                let now = Date()
                let scanTime = Int32(lround(now.timeIntervalSince1970 * 1000 - self.decodeCurrentTime.timeIntervalSince1970 * 1000))
                //重置时间，作为下一个code的识别开始时间
                self.decodeCurrentTime = now
                scanResult.scanTime = scanTime
                //draw line on image
                if let combineImage = self.drawLineOnImage(fromImage: resultImage,corners: resultPoints){
                    if let data: Data = combineImage.jpegData(compressionQuality: 1.0){
                        scanResult.imageData = data
                    }
                }else{
                    if let data: Data = resultImage.jpegData(compressionQuality: 1.0){
                        scanResult.imageData = data
                    }
                }
            }
            clearDrawLayer()
            self.scannedFlag = false
            //auto，manaully都走这里
            DispatchQueue.main.async {
                self.scanResultClosure?(self.scanResult)
            }
        }
    }
    //MARK: - 识别到code的回调，返回code数据和四个坐标点AVCaptureMetadataOutputObjectsDelegate
    func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
        DispatchQueue.main.async {
            self.clearDrawLayer()
        }
        for dataObject in metadataObjects {
            if let codeObject = dataObject as? AVMetadataMachineReadableCodeObject,
               let objects = previewLayer.transformedMetadataObject(for: codeObject) as? AVMetadataMachineReadableCodeObject {
                if self.scanFrame.contains(objects.bounds) {
                    scannedFlag = true
                    
                    scanResult.barcode = codeObject.stringValue ?? ""
                    var type: String = objects.type.rawValue
                    let list = type.split(separator: ".")
                    if list.count > 0{
                        type = String(list[list.count - 1])
                    }
                    scanResult.type = type
                    // transform codeObject
                    self.codeObject = objects
                    DispatchQueue.main.async {
                        self.drawCodeCorners(objects)
                        //remove所有的layers
                        self.removeAllLayers()
                        self.setupLayers()
                    }
                }
            }
        }
    }
    //MARK: - AVCapturePhotoCaptureDelegate拍照回调
    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
        if let imageData = photo.fileDataRepresentation(){
            DispatchQueue.main.async {
                self.takePhotoResultClosure?(imageData)
            }
        }
        
    }
    private func removeAllLayers() {
        previewLayer.removeFromSuperlayer()
        drawLayer.removeFromSuperlayer()
    }
    
    private func clearDrawLayer() {
        if drawLayer.sublayers == nil {
            return
        }
        
        for layer in drawLayer.sublayers! {
            layer.removeFromSuperlayer()
        }
    }
    
    private func drawCodeCorners(_ codeObject: AVMetadataMachineReadableCodeObject) {
        if codeObject.corners.count == 0 {
            return
        }
        
        let shapeLayer = CAShapeLayer()
        shapeLayer.lineWidth = lineWidth
        shapeLayer.strokeColor = strokeColor.cgColor
        shapeLayer.fillColor = UIColor.clear.cgColor
        shapeLayer.path = createPath(codeObject.corners)
        
        drawLayer.addSublayer(shapeLayer)
    }
    private func createPath(_ points: [CGPoint]) -> CGMutablePath {
        let path = CGMutablePath()
        
        if let corner = points.first {
            path.move(to: corner, transform: .identity)
            
            for corner in points[1..<points.count] {
                path.addLine(to: corner)
            }
            
            path.closeSubpath()
        }
        return path
    }
    
    // MARK: - 获取扫描的code type，根据扫码模式
    // 切记不能直接用metadataOutput.availableMetadataObjectTypes，因为iOS 11之后这个变量在sessing.startingRunning前返回的是nil，iOS 11返回所有的可用值
    // 所以这里直接赋值具体的类型即可
    private func getAvailableMetadataObjectTypes(_ scanMode: ScanMode)->[AVMetadataObject.ObjectType]{
        var objects = [AVMetadataObject.ObjectType]()
        if scanMode == .qrCODE {//二维码，
            objects.append(.qr)
            objects.append(.dataMatrix)
            objects.append(.aztec)
            if #available(iOS 15.4, *) {
                objects.append(.microQR)
                objects.append(.microPDF417)
            }
        }else{//条形码，
            objects.append(.code39)
            objects.append(.code93)
            objects.append(.code128)
            objects.append(.code39Mod43)
            objects.append(.ean8)
            objects.append(.ean13)
            objects.append(.upce)
            objects.append(.itf14)
            objects.append(.interleaved2of5)

            if #available(iOS 15.4, *) {
                objects.append(.codabar)
                objects.append(.gs1DataBar)
                objects.append(.gs1DataBarLimited)
                objects.append(.gs1DataBarExpanded)
            }
        }
        return objects
    }
    
    //MARK: - 在barcode上划表示线
    private func drawLineOnImage(fromImage: UIImage? = nil,corners: [CGPoint]) -> UIImage? {
        if fromImage == nil || self.codeObject == nil {
            return nil
        }
        //第一个点是leftTop，第二个点是rightTop,第三个点是rightBottom, 第四个点是leftBottom
        let converResult = corners
        //第一步：划线
        let screemSize = UIScreen.main.bounds.size
        let format = UIGraphicsImageRendererFormat()
        format.opaque = false
        format.scale = UIScreen.main.scale
        let renderer = UIGraphicsImageRenderer(size: screemSize,format: format)
        let newImage = renderer.image{ context in
            fromImage!.draw(in: CGRect(x: 0, y: 0, width: screemSize.width, height: screemSize.height))
            //create path
            let linePath = UIBezierPath()
            //start point
            linePath.move(to: converResult[0])
            //添加其他点
            linePath.addLine(to: converResult[1])
            linePath.addLine(to: converResult[2])
            linePath.addLine(to: converResult[3])
            //闭合路径
            linePath.close()
            strokeColor.setStroke()

            linePath.lineWidth = 5
            linePath.stroke()
        }
        return newImage
    }
}
