//
//  File.swift
//  DemoTest
//
//  Created by 韦小新 on 2022/4/20.
//

import UIKit
import Vision
import AVFoundation
import SwiftUI


public var isFinal = false
public var countW = 0
public var isRight = 0
class MainViewController: UIViewController, AVAudioPlayerDelegate {
    
    var imageView: UIImageView = UIImageView(frame:CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
    var tipsOneView: UIImageView = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: 100, width: 300, height: 300))
    var tipsTwoView: UIImageView = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: 100, width: 300, height: 300))
    var tipsThreeView: UIImageView = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: 100, width: 300, height: 300))
//    var startView: UIImageView =  UIImageView(frame:CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
    var actionLabel: UILabel = UILabel(frame: CGRect(x: 0, y: 600, width: 100, height: 50))
    var startImageView: UIImageView = UIImageView(frame:CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
    
    var confidenceLabel: UILabel! = UILabel(frame: CGRect(x: 0, y: 650, width: 100, height: 50))
    var nowTips = 1
    var videoCapture: VideoCapture!
    var videoProcessingChain: VideoProcessingChain!
    var actionFrameCounts = [String: Int]()
    var myActionLabels: [String] = []
    let answer = ["Done","CakeOne","Cake"]
    let answerOne = ["W","D","Cake"]
    var bgImageView: UIImageView!
    var cakePlate: UIImageView!
    var cakeOne: UIImageView!
    var cakeTwo: UIImageView!
    var cakeThree: UIImageView!
    var order = 0
    var rightCount = 0
    var isNextView = 0
}


// MARK: Life Style
extension MainViewController {
    
    override func viewDidLoad() {
        super.viewDidLoad()
        UIApplication.shared.isIdleTimerDisabled = true
         bgImageView = UIImageView(frame: CGRect(x: 0, y: UIScreen.main.bounds.height/5, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height))
         cakePlate = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: bgImageView.frame.height/2 - CGFloat(20), width: 400, height: 300))
        
         cakeOne = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: bgImageView.frame.height/2 - CGFloat(20), width: 400, height: 300))
         cakeTwo = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: bgImageView.frame.height/2 - CGFloat(20), width: 400, height: 300))
         cakeThree = UIImageView(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3, y: bgImageView.frame.height/2 - CGFloat(20), width: 400, height: 300))
        
//        startView.image = UIImage(named: "signmantion")
        tipsOneView.image = UIImage(named: "sign-make")
        tipsTwoView.image = UIImage(named: "sign-cake1")
        tipsTwoView.alpha = 0
        tipsThreeView.alpha = 0
        tipsThreeView.image = UIImage(named: "sign-cake2")
        videoProcessingChain = VideoProcessingChain()
        videoProcessingChain.delegate = self
        let timer = Timer.scheduledTimer(timeInterval: 2, target: self, selector: #selector(missButton), userInfo: nil, repeats: true)
                timer.fire()
        videoCapture = VideoCapture()
        videoCapture.delegate = self
        let nextButton = UIButton(frame: CGRect(x: UIScreen.main.bounds.width - UIScreen.main.bounds.width/3 + 50, y: self.bgImageView.frame.height/2 , width: 100, height: 100))
        updateUILabelsWithPrediction(.startingPrediction)
        imageView.addSubview(actionLabel)
        imageView.addSubview(confidenceLabel)
        startImageView.image = UIImage(named: "signmantion")
        startImageView.contentMode = .scaleAspectFit

        nextButton.setImage(UIImage(named: "nextButton"), for: .normal)
        nextButton.contentMode = .scaleAspectFit
        nextButton.addTarget(self, action: #selector(nextView), for: .touchUpInside)
        view.addSubview(imageView)
        view.addSubview(tipsOneView)
        view.addSubview(tipsTwoView)
        view.addSubview(tipsThreeView)
      
        view.addSubview(nextButton)
//        view.addSubview(startView)
        setupImage()
//        var timer = Timer.scheduledTimer(timeInterval: 2, target: self, selector: #selector(missButton), userInfo: nil, repeats: true)
//        timer.fireDate
   

    }
    
    override func viewDidAppear(_ animated: Bool) {
        super.viewDidAppear(animated)

        // Update the device's orientation.
        videoCapture.updateDeviceOrientation()
    }
    
    override func viewDidDisappear(_ animated: Bool) {
        videoCapture = nil
        videoProcessingChain = nil
    }

    /// Notifies the video capture when the device rotates to a new orientation.
    override func viewWillTransition(to size: CGSize,
                                     with coordinator: UIViewControllerTransitionCoordinator) {
        // Update the the camera's orientation to match the device's.
        videoCapture.updateDeviceOrientation()
    }
    
    override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
        if segue.identifier == "quitToResultViewController" {
        }
    }
    
}
// MARK: - Action Event
extension MainViewController {
    //MARK: -添加一个五秒后就让它自动完成
    
    func setupImage() {

        
        cakePlate.image = UIImage(named: "plate")
        cakeOne.image = UIImage(named: "cake1")
        cakeTwo.image = UIImage(named: "cake2")
        cakeThree.image = UIImage(named: "cake3")
        cakePlate.contentMode = .scaleAspectFit
        cakeOne.contentMode = .scaleAspectFit
        cakeTwo.contentMode = .scaleAspectFit
        cakeThree.contentMode = .scaleAspectFit
        cakeOne.alpha = 0.3
        cakeTwo.alpha = 0.3
        cakeThree.alpha = 0.3
       
        bgImageView.addSubview(cakePlate)
        bgImageView.addSubview(cakeOne)
        bgImageView.addSubview(cakeTwo)
        bgImageView.addSubview(cakeThree)
        bgImageView.image = UIImage(named: "cameraBackGround")
        bgImageView.contentMode = .scaleAspectFit
        if isFinal == true {
            bgImageView.alpha = 0
        }else {
            imageView.addSubview(bgImageView)
        }
       
        
    }
    @objc func missButton() {
        startImageView.alpha = 0
    }
    @objc func nextView() {
        isNextView += 1
        if isNextView == 1 {
            
            if let window = UIApplication.shared.windows.first {
                window.rootViewController = UIHostingController(rootView: SendCake())
                window.makeKeyAndVisible()
            }
        }
        
        if isNextView == 2 {
            if let window = UIApplication.shared.windows.first {
                window.rootViewController = UIHostingController(rootView: PassEnd())
                window.makeKeyAndVisible()
            }
        }
  }
}

extension MainViewController: VideoCaptureDelegate {
    func videoCapture(_ videoCapture: VideoCapture, didCreate framePublisher: FramePublisher) {
        updateUILabelsWithPrediction(.startingPrediction)
        videoProcessingChain.upstreamFramePublisher = framePublisher
       
        
    }
}

extension MainViewController: VideoProcessingChainDelegate {
    
    
    func videoProcessingChain(_ chain: VideoProcessingChain, didPredict actionPrediction: ActionPrediction, for frames: Int) {
        if actionPrediction.isModelLabel {
            // Update the total number of frames for this action.
            addFrameCount(frames, to: actionPrediction.label)
        }
        print(actionLabel.text)

        // Present the prediction in the UI.
        updateUILabelsWithPrediction(actionPrediction)
    }
    
 
    
    func videoProcessingChain(_ chain: VideoProcessingChain, didDetect poses: [Pose]?, in frame: CGImage) {
        // Render the poses on a different queue than pose publisher.
        DispatchQueue.global(qos: .userInteractive).async {
            // Draw the poses onto the frame.
            self.drawPoses(poses, onto: frame)
        }
    }
    

}

// MARK: - Helper Methods
extension MainViewController {
    private func addFrameCount(_ frameCount: Int, to actionLabel: String) {
        // Add the new duration to the current total, if it exists.
        let totalFrames = (actionFrameCounts[actionLabel] ?? 0) + frameCount

        // Assign the new total frame count for this action.
        actionFrameCounts[actionLabel] = totalFrames
    }
    
    private func updateUILabelsWithPrediction(_ prediction: ActionPrediction) {
        // Update the UI's prediction label on the main thread.
        DispatchQueue.main.async {
            self.actionLabel.text = prediction.label
            if prediction.label == self.answer[0] && self.order == 0 {
                self.tipsOneView.alpha = 0
                self.tipsTwoView.alpha = 1
                self.order = 1
                self.cakeOne.alpha = 1
                self.bgImageView.addSubview(self.cakeOne)
                self.view.addSubview(self.bgImageView)
            }
            
            if prediction.label == self.answer[1] && self.order == 1 {
                self.tipsTwoView.alpha = 0
                self.tipsThreeView.alpha = 1
                self.order = 2
                self.cakeTwo.alpha = 1
                self.bgImageView.addSubview(self.cakeTwo)
                self.view.addSubview(self.bgImageView)
            }
            if prediction.label == self.answer[2] && self.order == 2 {
                self.tipsThreeView.alpha = 0
                self.cakeThree.alpha = 1
                self.bgImageView.addSubview(self.cakeThree)
                self.view.addSubview(self.bgImageView)
              
            }
            if prediction.label == self.answerOne[0] && self.rightCount == 0 {
                
                self.rightCount = 1
                countW += 1
                print("rightCount:\(self.rightCount)")
                print("countW:\(countW)")
                
            }
            
            if prediction.label == self.answer[1] && self.order == 1 {
                self.rightCount = 2
            }
            if prediction.label == self.answer[2] && self.order == 2 {
                self.rightCount = 3
            }


        }
        
        // Update the UI's confidence label on the main thread.
        let confidenceString = prediction.confidenceString ?? "Observing..."
        DispatchQueue.main.async { self.confidenceLabel.text = confidenceString }
    }
    
 
    
    private func drawPoses(_ poses: [Pose]?, onto frame: CGImage) {
        // Create a default render format at a scale of 1:1.
        let renderFormat = UIGraphicsImageRendererFormat()
        renderFormat.scale = 1.0

        // Create a renderer with the same size as the frame.
        let frameSize = CGSize(width: frame.width, height: frame.height)
        let poseRenderer = UIGraphicsImageRenderer(size: frameSize,
                                                   format: renderFormat)

        // Draw the frame first and then draw pose wireframes on top of it.
        let frameWithPosesRendering = poseRenderer.image { rendererContext in
            // The`UIGraphicsImageRenderer` instance flips the Y-Axis presuming
            // we're drawing with UIKit's coordinate system and orientation.
            let cgContext = rendererContext.cgContext

            // Get the inverse of the current transform matrix (CTM).
            let inverse = cgContext.ctm.inverted()

            // Restore the Y-Axis by multiplying the CTM by its inverse to reset
            // the context's transform matrix to the identity.
            cgContext.concatenate(inverse)

            // Draw the camera image first as the background.
            let imageRectangle = CGRect(origin: .zero, size: frameSize)
            cgContext.draw(frame, in: imageRectangle)

            // Create a transform that converts the poses' normalized point
            // coordinates `[0.0, 1.0]` to properly fit the frame's size.
            let pointTransform = CGAffineTransform(scaleX: frameSize.width,
                                                   y: frameSize.height)

            guard let poses = poses else { return }

            // Draw all the poses Vision found in the frame.
            for pose in poses {
//                if poses.count > 2 {
//                    print("2!")
//                    continue
//                }
                // Draw each pose as a wireframe at the scale of the image.
                pose.drawWireframeToContext(cgContext, applying: pointTransform)
            }
        }

        // Update the UI's full-screen image view on the main thread.
        DispatchQueue.main.async {
            self.imageView.image = frameWithPosesRendering
            self.imageView.contentMode = .scaleAspectFill
//            self.imageViewRight.image = frameWithPosesRendering
        }
    }

}

