in AmazonChimeSDK/AmazonChimeSDKTests/audiovideo/video/backgroundfilter/BackgroundFilterTest.swift [65:128]
func testBackgroundBlurVideoFrameProcessor() {
#if canImport(AmazonChimeSDKMachineLearning)
guard let frame = videoFrameGenerator.generateVideoFrame(image:self.testImage!) else {
return
}
let videoSinkMock = mock(VideoSink.self)
var processedImage: UIImage?
var videoFrameReceivedExpectation: XCTestExpectation?
given(videoSinkMock.onVideoFrameReceived(frame: any())) ~> { [self] videoFrame in
let result = self.processImage(frame: videoFrame)
processedImage = result.image
videoFrameReceivedExpectation!.fulfill()
}
let backgroundBlurConfigurations = BackgroundBlurConfiguration(
logger: ConsoleLogger(name: "testBackgroundBlurVideoFrameProcessor"))
let processor = BackgroundBlurVideoFrameProcessor(backgroundBlurConfiguration: backgroundBlurConfigurations)
processor.addVideoSink(sink: videoSinkMock)
// Verify the checksum for the different blur strengths.
let blurList = [BackgroundBlurStrength.high]
for index in 0...(blurList.count - 1) {
processedImage = nil
videoFrameReceivedExpectation = expectation(description: "Video frame is received for index \(index)")
processor.setBlurStrength(newBlurStrength: blurList[index])
processor.onVideoFrameReceived(frame: frame)
// Wait for the image to generate before proceeding to avoid non-determinism.
wait(for: [videoFrameReceivedExpectation!], timeout: 1)
XCTAssert(processedImage != nil)
guard let gotCgImage = self.resize(image: processedImage!, to: downscaledSize).cgImage,
let gotCgImageData = gotCgImage.dataProvider?.data,
let gotCgImageBytes = CFDataGetBytePtr(gotCgImageData) else {
XCTFail("Couldn't access CGImage data")
return
}
guard let expectedCgImage = self.resize(image: self.expectedBlurImage!, to: downscaledSize).cgImage,
let expectedCgImageData = expectedCgImage.dataProvider?.data,
let expectedCgImageBytes = CFDataGetBytePtr(expectedCgImageData) else {
XCTFail("Couldn't access CGImage data")
return
}
XCTAssert(gotCgImage.colorSpace?.model == .rgb)
XCTAssert(expectedCgImage.colorSpace?.model == .rgb)
XCTAssertEqual(expectedCgImage.bitsPerPixel, gotCgImage.bitsPerPixel)
XCTAssertEqual(expectedCgImage.bitsPerComponent, gotCgImage.bitsPerComponent)
XCTAssertEqual(expectedCgImage.height, gotCgImage.height)
XCTAssertEqual(expectedCgImage.width, gotCgImage.width)
let matchPercentage = self.getCGImageMatchPercentage(
expectedCgImage: expectedCgImage, gotCgImage: gotCgImage,
expectedCgImageBytes: expectedCgImageBytes, gotCgImageBytes: gotCgImageBytes)
XCTAssert(matchPercentage >= expectedMatchPercentage,
"Expected match percentage \(matchPercentage) to be >= \(expectedMatchPercentage)")
}
#else
XCTFail("AmazonChimeSDKMachineLearning could not be imported.")
#endif
}