//
//  CameraExperiment.swift
//  appletest3
//
//  Created by 罗天杭 on 2022/8/29.
//


import AVFoundation
import SwiftUI
import UIKit





struct CameraView:View{
    @StateObject var camera = CameraModel()
    var body: some View{
        ZStack{
            CameraPreview(camera: camera)
                .ignoresSafeArea(.all,edges: .all)
//            VStack{
//                if camera.isTaken{
//                    HStack{
//                        Spacer()
//                        Button(action: camera.reTake, label: {
//                            Text("Shot")
//                                .font(.largeTitle)
//                        })
//                        .padding(.trailing,10)
//                    }
//                }
//
//            }
            Spacer()
            Spacer()
//            HStack{
//                if camera.isTaken{
//                    Button(action: {if !camera.isSaved{camera.savePic()}}, label: {
//                        Text(camera.isSaved ? "Saved":"Save")
//                            .font(.largeTitle)
//                    })
//                    .padding(.leading)
//                    Spacer()
//                }else{
//                    Button(action: camera.takePic, label: {
//                        ZStack{
//                            Circle()
//                                .fill(.white)
//                                .frame(width: 65, height: 65, alignment: .center)
//                            Circle()
//                                .stroke(.white,lineWidth:2)
//                                .frame(width: 75, height: 75, alignment: .center)
//                        }
//                    })
//                }
//                }.frame(height:75)
                
            }
            .onAppear(perform: {
                camera.Check()
                
            })
            
        }

    }
        



class CameraModel:NSObject,ObservableObject,AVCapturePhotoCaptureDelegate{
    @Published var isTaken = false
    @Published var session = AVCaptureSession()
    @Published var alert = false
    @Published var output = AVCapturePhotoOutput()
    @Published var output2 = AVCaptureVideoDataOutput()
    @Published var preview : AVCaptureVideoPreviewLayer!
    @Published var isSaved = false
    @Published var picData = Data(count: 0)
    @Published var uiimageview = UIImageView(frame: UIScreen.main.bounds)
    
    var model = style()
    
    
    func Check(){
        switch AVCaptureDevice.authorizationStatus(for: .video){
        case .authorized:
            self.setUp()
            return
        case .notDetermined:
            AVCaptureDevice.requestAccess(for: .video){
                (status) in
                if status{
                    self.setUp()
                }
            }
        case .denied:
            self.alert.toggle()
            return
        default:
            return
        }
    }
    func setUp(){
        do{
    
            
            self.session.beginConfiguration()
            let device = AVCaptureDevice.default( .builtInWideAngleCamera, for: .video,position: .back)
            
            let input = try AVCaptureDeviceInput(device: device!)
            if self.session.canAddInput(input){
                self.session.addInput(input)
            }
           
            
            self.output2.alwaysDiscardsLateVideoFrames = true
            
            if self.session.canAddOutput(self.output2)
            {
                self.session.addOutput(self.output2)
            }

            self.output2.alwaysDiscardsLateVideoFrames = true
            self.session.sessionPreset = AVCaptureSession.Preset.high
            
            let queue = DispatchQueue(label: "model process")
            
            self.output2.setSampleBufferDelegate(self, queue: queue)
            self.session.commitConfiguration()
            
        }catch{
            print(error.localizedDescription)
        }
    }

}


struct CameraPreview:UIViewRepresentable{
    @ObservedObject var camera:CameraModel
    
    func makeUIView(context: Context) -> UIView {
        let subuiimageview3 = UIImageView()
        subuiimageview3.bounds = CGRect(x: 0, y: 0, width: camera.uiimageview.frame.width
                                        , height: camera.uiimageview.frame.height  )
        subuiimageview3.layer.position = CGPoint(x: camera.uiimageview.frame.width/2, y: camera.uiimageview.frame.height/2)
        subuiimageview3.layer.setAffineTransform(CGAffineTransform(rotationAngle: .pi/2))
        camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
        camera.uiimageview.addSubview(subuiimageview3)
        camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
        camera.session.startRunning()
        
        return camera.uiimageview
    }
    func updateUIView(_ uiView: UIView, context: Context) {
        
        return
    }
}




class PW : UIView{
    override class var layerClass: AnyClass{
        AVCaptureVideoPreviewLayer.self
    }
    var previewLayer : AVCaptureVideoPreviewLayer{
        layer as! AVCaptureVideoPreviewLayer
    }
    var session: AVCaptureSession?{
        get {previewLayer.session}
        set {previewLayer.session = newValue}
    }
}




extension CameraModel:  AVCaptureVideoDataOutputSampleBufferDelegate{
    
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        print("work")
        let imagebuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
        

        
        let cgImage:CGImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer)
        
        let uiImage: UIImage = convertCIImageToUIImage(cgImage: cgImage).resizeImageTo(size: CGSize(width: 512, height: 512))!

        let ii = try? self.model.prediction(Data: uiImage.convertToBuffer()!)
 
        let iii = UIImage(pixelBuffer: ii!.Result)

        let iiii = UIImage(cgImage: (iii?.cgImage)!, scale: 3, orientation: .right)
        
        DispatchQueue.main.async {

            self.uiimageview.image = iiii

        }
        print("work2")
    }
    func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
        print("drop")
        }
    
}

func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> CGImage {

    let imageBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
    let ciImage : CIImage = CIImage(cvPixelBuffer: imageBuffer)
    let context:CIContext = CIContext.init(options: nil)
    let cgImage:CGImage = context.createCGImage(ciImage, from: ciImage.extent)!

    return cgImage
}
func convertCIImageToUIImage(cgImage:CGImage) -> UIImage {
        let uiImage = UIImage.init(cgImage: cgImage)
        // 注意！！！这里的uiImage的uiImage.ciImage 是nil
        let ciImage = uiImage.ciImage
        // 注意！！！上面的ciImage是nil，原因如下，官方解释
        // returns underlying CIImage or nil if CGImageRef based
        return uiImage
    }



class PreviewView: UIView {

    // Use AVCaptureVideoPreviewLayer as the view's backing layer.
    override class var layerClass: AnyClass {
        AVCaptureVideoPreviewLayer.self
    }
    
    var previewLayer: AVCaptureVideoPreviewLayer {
        layer as! AVCaptureVideoPreviewLayer
    }
    
    // Connect the layer to a capture session.
    var session: AVCaptureSession? {
        get { previewLayer.session }
        set { previewLayer.session = newValue }
    }
}




