// // ObjectDetection.swift // Objekterkennung in AR mit ARKit und CoreML // // Created by Ufuk Kaan on 18.06.21. // import CoreML import Vision import SceneKit class ObjectDetection{ struct Detection_Object { let classification: String let confidence: Float let boundingBox: CGRect } //Object Detection Model initialisieren var mblock_model = try! VNCoreMLModel(for: mblockv1().model) //Detection Request lazy var detection_request: VNCoreMLRequest = { return VNCoreMLRequest(model: mblock_model,completionHandler: self.completionHandler)}() //Array aus erkannten Objekten für Ausgabe/Controller var detection_object_array = [Detection_Object]() func detectObjects(on request: CVPixelBuffer) { do { try VNImageRequestHandler(cvPixelBuffer: request).perform([detection_request]) } catch { print("VNImageRequestHandler failed") } } func completionHandler(_ request: VNRequest?, error: Error?) { guard let results = request?.results as? [VNRecognizedObjectObservation] else { return } for observation in results { if observation.confidence > 0.5 { // print("Observation: \(observation.confidence) \nLabel: \(observation.labels.first!.identifier) \nLocation Center: (\(observation.boundingBox.midX), \(observation.boundingBox.midY))") let responseItem = Detection_Object.init(classification: observation.labels.first!.identifier, confidence: observation.confidence,boundingBox: observation.boundingBox) // print("Observation: \(responseItem.confidence) \nLabel: \(responseItem.classification) \nLocation Center: // (\(responseItem.boundingBox.midX), \(responseItem.boundingBox.midY))") detection_object_array.append(responseItem) } else { print("Kein Objekt im Bild erkannt") } } print("\n------------------------------------------------------------------------------------------\n") } }