//
//  ScanAlertVC.swift
//  FeiDao
//
//  Created by lixin on 2023/9/19.
//

import UIKit
import AVFoundation
import Vision


class ScanAlertVC:  UIViewController, AVCapturePhotoCaptureDelegate {
    
    var scanResultBlock: ((String) -> Void)?
    var captureSession: AVCaptureSession!
    var previewLayer: AVCaptureVideoPreviewLayer!
    var recognitionFrame: CGRect!
    var snapButton: UIButton!
    
    override func viewDidLoad() {
        super.viewDidLoad()
        view.backgroundColor = .white
        setupCaptureSession()
        setupRecognitionFrame()
        
        addOtherView()
    }
    
    func addOtherView(){
        
        
        setupSnapButton()
    }
    
    func setupCaptureSession() {
        captureSession = AVCaptureSession()
        
        guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
        let videoInput: AVCaptureDeviceInput
        
        do {
            videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
        } catch {
            return
        }
        
        if captureSession.canAddInput(videoInput) {
            captureSession.addInput(videoInput)
        } else {
            return
        }
        
        let photoOutput = AVCapturePhotoOutput()
        if captureSession.canAddOutput(photoOutput) {
            captureSession.addOutput(photoOutput)
        } else {
            return
        }
        
        printXY(view, obj: self, line: #line)//414  896
        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        let width = view.bounds.size.width
        let height = width/3*4
        let yOffset = (view.bounds.size.height - height) / 2
        previewLayer.frame = CGRect(x: 0, y: yOffset, width: width, height: height) //
        previewLayer.videoGravity = .resizeAspectFill
        view.layer.addSublayer(previewLayer)
        
        DispatchQueue.global(qos: .userInitiated).async { [self] in
            captureSession.startRunning()
        }
    }
    
    func setupRecognitionFrame() {
        let width = view.bounds.size.width
        let height = width/3*4
        let yOffset = (view.bounds.size.height - height) / 2
        recognitionFrame = CGRect(x: 40, y: yOffset, width: view.frame.width - 80, height: 100)
        
        let overlay = CAShapeLayer()
        overlay.path = UIBezierPath(rect: recognitionFrame).cgPath
        overlay.strokeColor = UIColor.white.cgColor
        overlay.lineWidth = 1
        overlay.fillColor = UIColor.clear.cgColor
        view.layer.addSublayer(overlay)
        
        // Create a mask layer except for the recognition frame
        let maskLayer = CAShapeLayer()
        let path = UIBezierPath(rect: view.bounds)
        path.append(UIBezierPath(rect: recognitionFrame).reversing())
        maskLayer.path = path.cgPath
        maskLayer.fillColor = UIColor(white: 0, alpha: 0.6).cgColor
        view.layer.addSublayer(maskLayer)
    }
    
    func setupSnapButton() {
        snapButton = UIButton(frame: CGRect(x: (view.frame.width - 80) / 2, y: view.frame.height - 200, width: 80, height: 80))
        snapButton.backgroundColor = .red
        snapButton.layer.cornerRadius = 40
        snapButton.addTarget(self, action: #selector(captureImage), for: .touchUpInside)
        view.addSubview(snapButton)
    }
    
    @objc func captureImage() {
        let settings = AVCapturePhotoSettings()
        settings.flashMode = .off
        
        if let photoOutput = captureSession.outputs.first as? AVCapturePhotoOutput {
            photoOutput.capturePhoto(with: settings, delegate: self)
        }
    }
    


    

  
}

//MARK: 校正裁剪图片，拍照图片，识别文字
extension ScanAlertVC {
    //图片向左旋转了90度，要校正
    func fixOrientation(img: UIImage) -> UIImage {
        if img.imageOrientation == .up {
            return img
        }

        var transform: CGAffineTransform = CGAffineTransform.identity

        switch img.imageOrientation {
        case .down, .downMirrored:
            transform = transform.translatedBy(x: img.size.width, y: img.size.height)
            transform = transform.rotated(by: .pi)
        case .left, .leftMirrored:
            transform = transform.translatedBy(x: img.size.width, y: 0)
            transform = transform.rotated(by: .pi / 2)
        case .right, .rightMirrored:
            transform = transform.translatedBy(x: 0, y: img.size.height)
            transform = transform.rotated(by: -(.pi / 2))
        case .up, .upMirrored:
            break
        @unknown default:
            break
        }

        switch img.imageOrientation {
        case .upMirrored, .downMirrored:
            transform.translatedBy(x: img.size.width, y: 0)
            transform.scaledBy(x: -1, y: 1)
        case .leftMirrored, .rightMirrored:
            transform.translatedBy(x: img.size.height, y: 0)
            transform.scaledBy(x: -1, y: 1)
        case .up, .down, .left, .right:
            break
        @unknown default:
            break
        }

        let ctx: CGContext = CGContext(data: nil, width: Int(img.size.width), height: Int(img.size.height), bitsPerComponent: img.cgImage!.bitsPerComponent, bytesPerRow: 0, space: img.cgImage!.colorSpace!, bitmapInfo: img.cgImage!.bitmapInfo.rawValue)!

        ctx.concatenate(transform)

        switch img.imageOrientation {
        case .left, .leftMirrored, .right, .rightMirrored:
            ctx.draw(img.cgImage!, in: CGRect(x: 0, y: 0, width: img.size.height, height: img.size.width))
        default:
            ctx.draw(img.cgImage!, in: CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height))
        }

        let cgimg: CGImage = ctx.makeImage()!
        let imgEnd: UIImage = UIImage(cgImage: cgimg)

        return imgEnd
    }

    //裁剪出的图片和预览部分的宽高比一致
    func cropImageToMatchPreviewLayer(image: UIImage, previewLayer: AVCaptureVideoPreviewLayer) -> UIImage? {
        
        let previewBounds = previewLayer.bounds
        let previewAspectRatio = previewBounds.width / previewBounds.height
        let imageWidth = image.size.width
        let imageHeight = image.size.height
        let imageAspectRatio = imageWidth / imageHeight
        
        var cropRect: CGRect
        
        if imageAspectRatio > previewAspectRatio {
            // 图片比预览层更宽，我们需要裁剪图片的左右两边
            let newWidth = imageHeight * previewAspectRatio
            let xOffset = (imageWidth - newWidth) / 2
            cropRect = CGRect(x: xOffset, y: 0, width: newWidth, height: imageHeight)
        } else {
            // 图片比预览层更高，我们需要裁剪图片的顶部和底部
            let newHeight = imageWidth / previewAspectRatio
            let yOffset = (imageHeight - newHeight) / 2
            cropRect = CGRect(x: 0, y: yOffset, width: imageWidth, height: newHeight)
        }
        
        guard let cgImage = image.cgImage?.cropping(to: cropRect) else {
            return nil
        }
        
        return UIImage(cgImage: cgImage)
    }
    
    func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
        guard let imageData = photo.fileDataRepresentation(),
              let image = UIImage(data: imageData)
        else { return }

        let fixImage = fixOrientation(img: image)
        saveImageToPhotosAlbum(image: fixImage)
        
        guard let cropImage = cropImageToMatchPreviewLayer(image: fixImage, previewLayer: previewLayer) else {
            return
        }
        
        saveImageToPhotosAlbum(image: cropImage)
        // 获取预览层的 frame
           let previewFrame = previewLayer.frame
           
           // 计算裁剪区域
        let scale = cropImage.size.width / view.bounds.width
           let cropRect = CGRect(x: 40 * scale,
                                 y: 0,
                                 width: 300 * scale,
                                 height: 100 * scale)
           
//            裁剪图片
           if let cgImage = cropImage.cgImage?.cropping(to: cropRect) {
               let croppedImage = UIImage(cgImage: cgImage)
               // 使用裁剪后的图片
               print("Cropped Image: \(croppedImage)")
               recognizeText(in: croppedImage)
           }
        
    }
    
    func recognizeText(in image: UIImage) {
        saveImageToPhotosAlbum(image: image)
        // 将 UIImage 转换为 CGImage
        guard let cgImage = image.cgImage else { return }
        
        // 创建处理请求的处理器
        let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
        
        // 创建 VNRecognizeTextRequest
        let request = VNRecognizeTextRequest { (request, error) in
            if let error = error {
                print("Text recognition error: \(error)")
                return
            }
            
            // 处理识别结果
            guard let observations = request.results as? [VNRecognizedTextObservation] else { return }
            for observation in observations {
                // 获取识别到的文本
                guard let topCandidate = observation.topCandidates(1).first else { continue }
                print("Recognized text: \(topCandidate.string)")
                self.scanResultBlock?(topCandidate.string)
                self.dismiss(animated: true)
//                self.resLabel.text = topCandidate.string
            }
        }
        
        // 设置识别级别
        request.recognitionLevel = .accurate
        
        // 执行请求
        do {
            try handler.perform([request])
        } catch {
            print("Failed to perform text recognition: \(error)")
        }
    }

    
}


//保存图片到相册
extension ScanAlertVC {
    func saveImageToPhotosAlbum(image: UIImage) {
        UIImageWriteToSavedPhotosAlbum(image, self, #selector(image(_:didFinishSavingWithError:contextInfo:)), nil)
    }

    @objc func image(_ image: UIImage, didFinishSavingWithError error: Error?, contextInfo: UnsafeRawPointer) {
        if let error = error {
            // 出现错误时保存失败
            print("Error Saving Image: \(error.localizedDescription)")
        } else {
            // 保存成功
            print("Image Saved Successfully!")
        }
    }
}


func printXY(_ any:Any,obj:Any,line:Int) {
    let date = Date()
     let timeFormatter = DateFormatter()
     //日期显示格式，可按自己需求显示
     timeFormatter.dateFormat = "HH:mm:ss.SSS"
     let strNowTime = timeFormatter.string(from: date) as String
     print("\(strNowTime) \(type(of: obj)) \(line) \(any)")
}



