mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
[coreml] Use special throw macro when encountering CoreML API errors (#77429)
Summary: Error messages from `TORCH_CHECK` are stripped during production builds via `-DSTRIP_ERROR_MESSAGES`. This diff introduces a new macro `COREML_CHECK` which will always preserve the error message. This macro is used when encountering errors produced by CoreML API calls so that we can heve enough context to debug. Test Plan: Test in pytorch playground: ``` arc focus2 -b pp-ios -a ModelRunner -a //xplat/caffe2/c10:c10Apple -a //xplat/caffe2:torch_mobile_coreApple -a //xplat/caffe2/fb/dynamic_pytorch:dynamic_pytorch_implApple -a //xplat/caffe2:coreml_delegateApple -a ModelRunnerDevOps -a //xplat/caffe2:torch_mobile_all_opsApple -fd --force-with-wrong-xcode ``` Differential Revision: D36378286 Pull Request resolved: https://github.com/pytorch/pytorch/pull/77429 Approved by: https://github.com/kimishpatel
This commit is contained in:
committed by
PyTorch MergeBot
parent
c34be4eb04
commit
93d84c0fcf
@ -14,6 +14,24 @@
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
// This is a utility macro that can be used to throw an exception when a CoreML
|
||||
// API function produces a NSError. The exception will contain a message with
|
||||
// useful info extracted from the NSError.
|
||||
#define COREML_THROW_IF_ERROR(error, preamble) \
|
||||
do { \
|
||||
if C10_LIKELY(error) { \
|
||||
throw c10::Error( \
|
||||
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
|
||||
c10::str( \
|
||||
preamble, \
|
||||
" Error details: ", \
|
||||
" Localized_description: ", error.localizedDescription.UTF8String, \
|
||||
" Domain: ", error.domain.UTF8String, \
|
||||
" Code: ", error.code, \
|
||||
" User Info: ", error.userInfo.description.UTF8String)); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
@implementation PTMCoreMLFeatureProvider {
|
||||
NSUInteger _coremlVersion;
|
||||
std::vector<PTMCoreMLFeatureSpecs> _specs;
|
||||
@ -173,10 +191,8 @@ static NSString* gModelCacheDirectory = @"";
|
||||
|
||||
// remove cached models if compalition failed.
|
||||
[self cleanup];
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Error compiling the MLModel",
|
||||
[error localizedDescription].UTF8String);
|
||||
|
||||
COREML_THROW_IF_ERROR(error, "Error compiling the MLModel file!");
|
||||
return NO;
|
||||
}
|
||||
if (@available(iOS 12.0, macOS 10.14, *)) {
|
||||
@ -201,10 +217,7 @@ static NSString* gModelCacheDirectory = @"";
|
||||
observer->onExitCompileModel(instance_key, false, true);
|
||||
}
|
||||
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Error loading the MLModel",
|
||||
error.localizedDescription.UTF8String);
|
||||
COREML_THROW_IF_ERROR(error, "Error loading the MLModel file!");
|
||||
}
|
||||
|
||||
if (observer) {
|
||||
@ -240,12 +253,8 @@ static NSString* gModelCacheDirectory = @"";
|
||||
[_mlModel predictionFromFeatures:inputFeature
|
||||
options:options
|
||||
error:&error];
|
||||
if (error) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Error running the prediction",
|
||||
error.localizedDescription.UTF8String);
|
||||
}
|
||||
|
||||
COREML_THROW_IF_ERROR(error, "Error running CoreML inference!");
|
||||
|
||||
++_inferences;
|
||||
if (observer) {
|
||||
|
Reference in New Issue
Block a user