Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
//
// ObjectDetection.swift
// Objekterkennung in AR mit ARKit und CoreML
//
// Created by Ufuk Kaan on 18.06.21.
//
import CoreML
import Vision
import SceneKit
class ObjectDetection{
struct Detection_Object {
let classification: String
let confidence: Float
let boundingBox: CGRect
}
var mblock_model = try! VNCoreMLModel(for: mblockv1().model)
lazy var detection_request: VNCoreMLRequest = { return VNCoreMLRequest(model: mblock_model,completionHandler: self.completionHandler)}()
var detection_object_array = [Detection_Object]()
func detectObjects(on request: CVPixelBuffer) {
do {
try VNImageRequestHandler(cvPixelBuffer: request).perform([detection_request])
} catch {
print("VNImageRequestHandler failed")
}
}
func completionHandler(_ request: VNRequest?, error: Error?) {
guard let results = request?.results as? [VNRecognizedObjectObservation] else {
return
}
for observation in results {
if observation.confidence > 0.5 {
// print("Observation: \(observation.confidence) \nLabel: \(observation.labels.first!.identifier) \nLocation Center: (\(observation.boundingBox.midX), \(observation.boundingBox.midY))")
let responseItem = Detection_Object.init(classification: observation.labels.first!.identifier, confidence: observation.confidence,boundingBox: observation.boundingBox)
// print("Observation: \(responseItem.confidence) \nLabel: \(responseItem.classification) \nLocation Center:
// (\(responseItem.boundingBox.midX), \(responseItem.boundingBox.midY))")
detection_object_array.append(responseItem)
}
else {
print("Kein Objekt im Bild erkannt")
}
}
print("\n------------------------------------------------------------------------------------------\n")
}
}