https://www.google.com/search?q=%22Failed+to+invoke+the+interpreter+with+error%22&oq=%22Failed+to+invoke+the+interpreter+with+error%22&aqs=chrome.0.69i59j69i60.4768j0j7&sourceid=chrome&ie=UTF-8
https://books.google.co.kr/books?id=e--oDwAAQBAJ&pg=PA145&lpg=PA145&dq=%22Failed+to+invoke+the+interpreter+with+error%22&source=bl&ots=CqkTVIC9Gb&sig=ACfU3U1g0P6zHckN-f8lYSDoypASxlZMrw&hl=ko&sa=X&ved=2ahUKEwi0wpGpv7vmAhUBMN4KHWDdAG4Q6AEwAHoECAkQAQ#v=onepage&q=%22Failed%20to%20invoke%20the%20interpreter%20with%20error%22&f=false
func runModel(onFrame pixelBuffer: CVPixelBuffer) -> Result? {
let sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
assert(sourcePixelFormat == kCVPixelFormatType_32ARGB ||
sourcePixelFormat == kCVPixelFormatType_32BGRA ||
sourcePixelFormat == kCVPixelFormatType_32RGBA)
let imageChannels = 4
assert(imageChannels >= inputChannels)
// Crops the image to the biggest square in the center and scales it down to model dimensions.
let scaledSize = CGSize(width: inputWidth, height: inputHeight)
guard let thumbnailPixelBuffer = pixelBuffer.centerThumbnail(ofSize: scaledSize) else {
return nil
}
let interval: TimeInterval
let outputTensor: Tensor
do {
let inputTensor = try interpreter.input(at: 0)
// Remove the alpha component from the image buffer to get the RGB data.
guard let rgbData = rgbDataFromBuffer(
thumbnailPixelBuffer,
byteCount: batchSize * inputWidth * inputHeight * inputChannels,
isModelQuantized: inputTensor.dataType == .uInt8
) else {
print("Failed to convert the image buffer to RGB data.")
return nil
}
// Copy the RGB data to the input `Tensor`.
try interpreter.copy(rgbData, toInputAt: 0)
// Run inference by invoking the `Interpreter`.
let startDate = Date()
try interpreter.invoke()
interval = Date().timeIntervalSince(startDate) * 1000
// Get the output `Tensor` to process the inference results.
outputTensor = try interpreter.output(at: 0)
} catch let error {
print("Failed to invoke the interpreter with error: \(error.localizedDescription)")
return nil
}
let results: [Float]
switch outputTensor.dataType {
case .uInt8:
guard let quantization = outputTensor.quantizationParameters else {
print("No results returned because the quantization values for the output tensor are nil.")
return nil
}
let quantizedResults = [UInt8](outputTensor.data)
results = quantizedResults.map {
quantization.scale * Float(Int($0) - quantization.zeroPoint)
}
case .float32:
results = [Float32](unsafeData: outputTensor.data) ?? []
default:
print("Output tensor data type \(outputTensor.dataType) is unsupported for this example app.")
return nil
}
// Process the results.
let topNInferences = getTopN(results: results)
// Return the inference time and inference results.
return Result(inferenceTime: interval, inferences: topNInferences)
}
Xcode 11
/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime/Contents/Resources/RuntimeRoot/usr/lib/
put tbd copy to the path:
/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/usr/lib/
architecture x86_64 in file
I've faced the same issue after updating to xcode 10. I've resolved this issue by downloading libstdc++.6.0.9.tbd and libstdc++.6.tbd from https://github.com/Kila2/libstdc-.6.0.9.tbd.
then i copied these files to my project folder. After that I added these libs in Linked Frameworks and Libraries by following these steps: Project > Target > General > Linked Frameworks and Libraries click (+) > Click 'Add Other' > Navigat to your project folder in 'Open File' and select these libraries and add.
Now run the project it will run perfectly..
변환한 텐서 모델은 안드로이드에서 잘 되는데 iOS 지원 바로 안되게 되어 있는 듯.
libc 6.0은 dylib가 아니라 tbd만 올려서 해결. 시뮬레이터에서는 안됨. 제길슨.
'살며 생각하며,' 카테고리의 다른 글
아이폰 개발자가 추천하는 캘린더 (0) | 2019.12.23 |
---|---|
063 (0) | 2019.12.19 |
개 발 일 지 061 (0) | 2019.12.17 |
개 발 일 지 060 (0) | 2019.12.13 |
개 발 일 지 059 (0) | 2019.12.13 |
최근댓글