Convert Vision boundingBox from VNFaceObservation to rect to draw on image

I tried multiple ways and here's what worked best for me:

dispatch_async(dispatch_get_main_queue(), ^{
    VNDetectedObjectObservation * newObservation = request.results.firstObject;
    if (newObservation) {
        self.lastObservation = newObservation;
        CGRect transformedRect = newObservation.boundingBox;
        CGRect convertedRect = [self.previewLayer rectForMetadataOutputRectOfInterest:transformedRect];
        self.highlightView.frame = convertedRect;
    }
});

You have to do the transition and scale according to the image. Example

func drawVisionRequestResults(_ results: [VNFaceObservation]) {
    print("face count = \(results.count) ")
    previewView.removeMask()

    let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -self.view.frame.height)

    let translate = CGAffineTransform.identity.scaledBy(x: self.view.frame.width, y: self.view.frame.height)

    for face in results {
        // The coordinates are normalized to the dimensions of the processed image, with the origin at the image's lower-left corner.
        let facebounds = face.boundingBox.applying(translate).applying(transform)
        previewView.drawLayer(in: facebounds)
    }
}

There are built-in methods that would do it for you. To convert from normalized form use this:

func VNImageRectForNormalizedRect(_ normalizedRect: CGRect, _ imageWidth: Int, _ imageHeight: Int) -> CGRect

And vice-versa:

func VNNormalizedRectForImageRect(_ imageRect: CGRect, _ imageWidth: Int, _ imageHeight: Int) -> CGRect

Similar methods for points:

func VNNormalizedFaceBoundingBoxPointForLandmarkPoint(_ faceLandmarkPoint: vector_float2, _ faceBoundingBox: CGRect, _ imageWidth: Int, _ imageHeight: Int) -> CGPoint
func VNImagePointForNormalizedPoint(_ normalizedPoint: CGPoint, _ imageWidth: Int, _ imageHeight: Int) -> CGPoint