// Copyright 2019 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

import CoreImage
import TensorFlowLite
import UIKit
import Foundation

/// A result from invoking the `Interpreter`.
struct Result {
  let inferenceTime: Double
  let inferences: [Inference]
}

/// An inference from invoking the `Interpreter`.
struct Inference {
  let confidence: Float
  let label: String
    
    let className: String
    let rect: CGRect
    let displayColor: UIColor
    let pointResults: [Int]
    var timeDic:NSMutableDictionary

}

/// Information about a model file or labels file.
typealias FileInfo = (name: String, extension: String)

/// Information about the MobileNet model.
enum MobileNet {
  static let modelInfo: FileInfo = (name: "opt_graph", extension: "tflite")//face9_6，mfn_graph,sfv2_graph.tflite,shufflenet_model.tflite，opt_graph
  static let labelsInfo: FileInfo = (name: "labels", extension: "txt")
}

/// This class handles all data preprocessing and makes calls to run inference on a given frame
/// by invoking the `Interpreter`. It then formats the inferences obtained and returns the top N
/// results for a successful inference.

protocol MyDelagate {
    func findFaceRectangle(image: UIImage)
}

class ModelDataHandler {

//     let CPP_ImageClassification cpp_ImageClassification
  // MARK: - Internal Properties

  /// The current thread count used by the TensorFlow Lite Interpreter.
  let threadCount: Int

  let resultCount = 3
  let threadCountLimit = 10

  // MARK: - Model Parameters

  let batchSize = 1
//  let inputChannels = 3 //2
//  let inputWidth = 224 //112
//  let inputHeight = 224 //112
    
    let inputChannels = 3 //2
    let inputWidth = 112 //112
    let inputHeight = 112 //112
    
    var delegate: MyDelagate?
    
  // MARK: - Private Properties
    
    let expandsize :Int = 0
    let expandXsize :Int = 0
    let expandYsize :Int = 90


  /// List of labels from the given labels file.
private var labels: [String] = []

  /// TensorFlow Lite `Interpreter` object for performing inference on a given model.
private var interpreter: Interpreter
private var faceLeftTopX:NSInteger
private var faceLeftTopY:NSInteger

private var faceRightBottomX:NSInteger
private var faceRightBottomY:NSInteger
private var faceWidth:NSInteger
private var faceHeight:NSInteger

  /// Information about the alpha component in RGBA data.
  private let alphaComponent = (baseOffset: 4, moduloRemainder: 3)
    
    var pointResults: [Int] = []
    var timeResults: [Float32] = []
    var timeDic:NSMutableDictionary
    var xResults: [Int] = []
    var yResults: [Int] = []
    var preBox:[Int] = []
    var xMin:Int = 0
    var xMax:Int = 0
    var yMin:Int = 0
    var yMax:Int = 0
    var width:Int = 0
    var height:Int = 0

  // MARK: - Initialization

  /// A failable initializer for `ModelDataHandler`. A new instance is created if the model and
  /// labels files are successfully loaded from the app's main bundle. Default `threadCount` is 1.
  init?(modelFileInfo: FileInfo, labelsFileInfo: FileInfo, threadCount: Int = 1) {
    let modelFilename = modelFileInfo.name

    // Construct the path to the model file.
    guard let modelPath = Bundle.main.path(
      forResource: modelFilename,
      ofType: modelFileInfo.extension
    ) else {
      print("Failed to load the model file with name: \(modelFilename).")
      return nil
    }

    // Specify the options for the `Interpreter`.
    self.threadCount = threadCount
    var options = InterpreterOptions()
    options.threadCount = threadCount
    do {
      // Create the `Interpreter`.
      interpreter = try Interpreter(modelPath: modelPath, options: options)
      // Allocate memory for the model's input `Tensor`s.
      try interpreter.allocateTensors()
    } catch let error {
      print("Failed to create the interpreter with error: \(error.localizedDescription)")
      return nil
    }
    
    faceLeftTopX = 0;
    faceLeftTopY = 0;
    faceRightBottomX = 0
    faceRightBottomY = 0
    faceWidth = 0
    faceHeight = 0
    
    timeDic = NSMutableDictionary();
//    timeDic;: [AnyHashable : Any] = [:]
    
    // Load the classes listed in the labels file.
    loadLabels(fileInfo: labelsFileInfo)
  }
    
//    func bufferToImage(from pixelBuffer:CVPixelBuffer) -> UIImage?{
//
//        CIImage *ciImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];
//
//        CIContext *temporaryContext = [CIContext contextWithOptions:nil];
//        CGImageRef videoImage = [temporaryContext
//        createCGImage:ciImage
//        fromRect:CGRectMake(0, 0,
//        CVPixelBufferGetWidth(pixelBuffer),
//        CVPixelBufferGetHeight(pixelBuffer))];
//
//        UIImage *image = [UIImage imageWithCGImage:videoImage];
//        CGImageRelease(videoImage);
//
//        return image;
//    }
    
//        func bufferToImage(from pixelBuffer:CVPixelBuffer) -> UIImage?{
//
//            CIImage *ciImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];
//
//            CIContext *temporaryContext = [CIContext contextWithOptions:nil];
//            CGImageRef videoImage = [temporaryContext
//            createCGImage:ciImage
//            fromRect:CGRectMake(0, 0,
//            CVPixelBufferGetWidth(pixelBuffer),
//            CVPixelBufferGetHeight(pixelBuffer))];
//    
//            UIImage *image = [UIImage imageWithCGImage:videoImage];
//            CGImageRelease(videoImage);
//    
//            return image;
//        }
    
    func saveImage(image: UIImage) -> Bool {
        guard let data = image.jpegData(compressionQuality: 1) ?? image.pngData() else {
            return false
        }
        guard let directory = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: false) as NSURL else {
            return false
        }
        do {
            try data.write(to: directory.appendingPathComponent("fileName.jpg")!)
            return true
        } catch {
            print(error.localizedDescription)
            return false
        }
    }
    
    // UIImage to buffer method:
    func buffer(from image: UIImage) -> CVPixelBuffer? {
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
        var pixelBuffer : CVPixelBuffer?
        let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
        guard (status == kCVReturnSuccess) else {
            return nil
        }
        
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
        
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
        
        context?.translateBy(x: 0, y: image.size.height)
        context?.scaleBy(x: 1.0, y: -1.0)
        
        UIGraphicsPushContext(context!)
        image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        
        return pixelBuffer
    }

  // MARK: - Internal Methods

  /// Performs image preprocessing, invokes the `Interpreter`, and processes the inference results.
  func runModel(onFrame imagePixelBuffer: CVPixelBuffer) -> Result? {
    let sourcePixelFormat = CVPixelBufferGetPixelFormatType(imagePixelBuffer)
    assert(sourcePixelFormat == kCVPixelFormatType_32ARGB ||
             sourcePixelFormat == kCVPixelFormatType_32BGRA ||
               sourcePixelFormat == kCVPixelFormatType_32RGBA)
    
    
     width = CVPixelBufferGetWidth(imagePixelBuffer)
     height = CVPixelBufferGetHeight(imagePixelBuffer)
    
    let obj = OcObj();
    let strDetectorFile = Bundle.main.path(forResource: "haarcascade_frontalface_alt", ofType: "xml");
    let strSettingsFile = Bundle.main.path(forResource:"MatData", ofType: "yaml");
    
    obj.initNormImg(strDetectorFile!, settingsFile: strSettingsFile!)
    

    let dict :Dictionary = obj.normImg(to: imagePixelBuffer, with: preBox)

    let dstImagData:Data = dict["dstImagData"] as! Data;
    let findFaceArray:NSArray = dict["findFaceData"] as! NSArray
    
     faceLeftTopX = findFaceArray[0] as! NSInteger
     faceLeftTopY = findFaceArray[1] as! NSInteger
    
     faceRightBottomX = findFaceArray[2] as! NSInteger
     faceRightBottomY = findFaceArray[3] as! NSInteger
    
     faceWidth =  faceRightBottomX - faceLeftTopX
     faceHeight = faceRightBottomY - faceLeftTopY
    
    let findFaceTime:Float32 = dict["findFaceTime"] as! Float32;
    
    timeDic.setObject(findFaceTime, forKey:"findFaceTime" as NSCopying)
    if !dstImagData.isEmpty {
//        let aa:Data = dstImagData;
     }
//    pixelBuffer = obj.imageNormImg(to: pixelBuffer!) as! CVPixelBuffer;//调用李博的方法
    let imageChannels = 4
    assert(imageChannels >= inputChannels)

    let interval: TimeInterval
    let outputTensor: Tensor

    do {
      let inputTensor = try interpreter.input(at: 0)

      // Remove the alpha component from the image buffer to get the RGB data.
//      guard let rgbData = rgbDataFromBuffer(
//        thumbnailPixelBuffer,
//        byteCount: batchSize * inputWidth * inputHeight * inputChannels,
//        isModelQuantized: inputTensor.dataType == .uInt8
//      ) else {
//        print("Failed to convert the image buffer to RGB data.")
//        return nil
//      }

      // Copy the RGB data to the input `Tensor`.
      try interpreter.copy(dstImagData, toInputAt: 0)

      // Run inference by invoking the `Interpreter`.
      let startDate = Date()
        let startDottingTime = CFAbsoluteTimeGetCurrent()

      try interpreter.invoke()

        let endDottingTime = CFAbsoluteTimeGetCurrent()
        
        let endDottingDuration = (endDottingTime - startDottingTime)*1000
        
        timeDic.setObject(endDottingDuration, forKey: "endDottingDuration" as NSCopying)
        print(String(format:"打点执行时长：%.14f 毫秒",  endDottingDuration))
        interval = Date().timeIntervalSince(startDate) * 1000

      // Get the output `Tensor` to process the inference results.
      outputTensor = try interpreter.output(at: 0)
        
    } catch let error {
      print("Failed to invoke the interpreter with error: \(error.localizedDescription)")
      return nil
    }

    let results: [Float]
    switch outputTensor.dataType {
    case .uInt8:
      guard let quantization = outputTensor.quantizationParameters else {
        print("No results returned because the quantization values for the output tensor are nil.")
        return nil
      }
      let quantizedResults = [UInt8](outputTensor.data)
      results = quantizedResults.map {
        quantization.scale * Float(Int($0) - quantization.zeroPoint)
      }
    case .float32:
      results = [Float32](unsafeData: outputTensor.data) ?? []

    default:
      print("Output tensor data type \(outputTensor.dataType) is unsupported for this example app.")
      return nil
    }
    
/////////预估人脸框//////// 
    
    let obj1 = OcObj();
    
    pointResults =   obj1.getAffineLandmarks(results) as! [Int]
    
    if preBox.count != 0 {
        preBox.removeAll();
    }
    
    if xResults.count != 0 {
        xResults.removeAll();
    }
    
    if yResults.count != 0 {
        yResults.removeAll();
    }
    
    for i in stride(from: 0, to: pointResults.count, by: 2) {
        let tempX = pointResults[i]
        xResults.append(tempX);
        let tempY = pointResults[i+1]
        yResults.append(tempY)
    }
    
    xMin = xResults.min()!
    xMax = xResults.max()!
    yMin = yResults.min()!
    yMax = yResults.max()!

    if (xMin - expandsize)>0 {
        preBox.insert(xMin - expandsize, at: 0)
        
    } else {
        preBox.insert(0, at: 0)
//        preBox.removeAll();
    }
    
    if (yMin - expandsize)>0 {
        preBox.insert(yMin - expandsize, at: 1)
    } else {
        preBox.insert(0, at: 1)
//        preBox.removeAll();
    }
    
    if(xMax + expandsize)>width {
        preBox.insert(width, at: 2)
    } else {
        preBox.insert(xMax + expandsize, at: 2)
//        preBox.removeAll();
    }
    
    if(yMax+expandsize)>height{
        preBox.insert(height, at: 3)
    } else {
        preBox.insert(yMax + expandsize, at: 3)
//        preBox.removeAll();
    }
    
    for i in stride(from: 0, to: preBox.count, by: 1) {
        
        let secondsStr = String(format: "预估的人脸框坐标:%.6f\n", preBox[i])
        print(secondsStr)
        
    }
    
    // Process the results.
    let topNInferences = getTopN(results: pointResults)

    // Return the inference time and inference results.
    return Result(inferenceTime: interval, inferences: topNInferences)
  }
    
    // UIImage to buffer method:
    func imageToPixelBbuffer(from image: UIImage) -> CVPixelBuffer? {
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
        var pixelBuffer : CVPixelBuffer?
        let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32BGRA, attrs, &pixelBuffer)
        guard (status == kCVReturnSuccess) else {
            return nil
        }
        
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
        
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
        
        context?.translateBy(x: 0, y: image.size.height)
        context?.scaleBy(x: 1.0, y: -1.0)
        
        UIGraphicsPushContext(context!)
        image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        
        return pixelBuffer
    }

    // MARK: - Private Methods
    /// Returns the top N inference results sorted in descending order.
    private func getTopN(results: [Int]) -> [Inference] {
        // Create a zipped array of tuples [(labelIndex: Int, confidence: Float)].
        var zippedResults: [[Int]] = []
        zippedResults.append(results);
        // Sort the zipped results by confidence value in descending order.
        //    let sortedResults = zippedResults.sorted { $0.1 > $1.1 }.prefix(resultCount)
        
        // Return the `Inference` results.
        return zippedResults.map {result in Inference(confidence: Float32(0), label: "", className: "", rect: CGRect(x: width-faceRightBottomX,y: faceLeftTopY,width: faceWidth,height: faceHeight), displayColor: UIColor.blue,pointResults: results,timeDic: timeDic) }
    }
    
//    let className: String
//    let rect: CGRect
//    let displayColor: UIColor

  /// Loads the labels from the labels file and stores them in the `labels` property.
  private func loadLabels(fileInfo: FileInfo) {
    let filename = fileInfo.name
    let fileExtension = fileInfo.extension
    guard let fileURL = Bundle.main.url(forResource: filename, withExtension: fileExtension) else {
      fatalError("Labels file not found in bundle. Please add a labels file with name " +
                   "\(filename).\(fileExtension) and try again.")
    }
    do {
      let contents = try String(contentsOf: fileURL, encoding: .utf8)
      labels = contents.components(separatedBy: .newlines)
    } catch {
      fatalError("Labels file named \(filename).\(fileExtension) cannot be read. Please add a " +
                   "valid labels file and try again.")
    }
  }

  /// Returns the RGB data representation of the given image buffer with the specified `byteCount`.
  ///
  /// - Parameters
  ///   - buffer: The pixel buffer to convert to RGB data.
  ///   - byteCount: The expected byte count for the RGB data calculated using the values that the
  ///       model was trained on: `batchSize * imageWidth * imageHeight * componentsCount`.
  ///   - isModelQuantized: Whether the model is quantized (i.e. fixed point values rather than
  ///       floating point values).
  /// - Returns: The RGB data representation of the image buffer or `nil` if the buffer could not be
  ///     converted.
    
  private func rgbDataFromBuffer(
    _ buffer: CVPixelBuffer,
    byteCount: Int,
    isModelQuantized: Bool
  ) -> Data? {
    CVPixelBufferLockBaseAddress(buffer, .readOnly)
    defer { CVPixelBufferUnlockBaseAddress(buffer, .readOnly) }
    guard let mutableRawPointer = CVPixelBufferGetBaseAddress(buffer) else {
      return nil
    }
    let count = CVPixelBufferGetDataSize(buffer)
    let bufferData = Data(bytesNoCopy: mutableRawPointer, count: count, deallocator: .none)
    var rgbBytes = [UInt8](repeating: 0, count: byteCount)
    var index = 0
    for component in bufferData.enumerated() {
      let offset = component.offset
      let isAlphaComponent = (offset % alphaComponent.baseOffset) == alphaComponent.moduloRemainder
      guard !isAlphaComponent else { continue }
      rgbBytes[index] = component.element
      index += 1
    }
    if isModelQuantized { return Data(bytes: rgbBytes) }
    return Data(copyingBufferOf: rgbBytes.map { Float($0) / 255.0 })
  }
}

// MARK: - Extensions
extension Data {
  /// Creates a new buffer by copying the buffer pointer of the given array.
  ///
  /// - Warning: The given array's element type `T` must be trivial in that it can be copied bit
  ///     for bit with no indirection or reference-counting operations; otherwise, reinterpreting
  ///     data from the resulting buffer has undefined behavior.
  /// - Parameter array: An array with elements of type `T`.
  init<T>(copyingBufferOf array: [T]) {
    self = array.withUnsafeBufferPointer(Data.init)
  }
}

extension Array {
  /// Creates a new array from the bytes of the given unsafe data.
  ///
  /// - Warning: The array's `Element` type must be trivial in that it can be copied bit for bit
  ///     with no indirection or reference-counting operations; otherwise, copying the raw bytes in
  ///     the `unsafeData`'s buffer to a new array returns an unsafe copy.
  /// - Note: Returns `nil` if `unsafeData.count` is not a multiple of
  ///     `MemoryLayout<Element>.stride`.
  /// - Parameter unsafeData: The data containing the bytes to turn into an array.
  init?(unsafeData: Data) {
    guard unsafeData.count % MemoryLayout<Element>.stride == 0 else { return nil }
    #if swift(>=5.0)
    self = unsafeData.withUnsafeBytes { .init($0.bindMemory(to: Element.self)) }
    #else
    self = unsafeData.withUnsafeBytes {
      .init(UnsafeBufferPointer<Element>(
        start: $0,
        count: unsafeData.count / MemoryLayout<Element>.stride
      ))
    }
    #endif  // swift(>=5.0)
  }
}
