curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'{
"success": true,
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"error": "<string>",
"model": {
"fileName": "<string>",
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileInfo": {
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
}
},
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
}
},
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}Receive info back about the earlier uploaded pretrained model (via uploadPretrainedModel) input/output tensors. If you want to deploy a pretrained model from the API, see startDeployPretrainedModelJob.
curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/pretrained-model \
--header 'x-api-key: <api-key>'{
"success": true,
"specificDeviceSelected": true,
"availableModelTypes": [
"int8"
],
"error": "<string>",
"model": {
"fileName": "<string>",
"inputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"outputs": [
{
"dataType": "int8",
"name": "<string>",
"shape": [
123
],
"quantizationScale": 123,
"quantizationZeroPoint": 123
}
],
"profileInfo": {
"table": {
"variant": "int8",
"lowEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"highEndMcuPlusAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"memory": {
"tflite": {
"ram": 123,
"rom": 123
},
"eon": {
"ram": 123,
"rom": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123
}
},
"mcuSupportError": "<string>"
},
"mpu": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
},
"gpuOrMpuAccelerator": {
"description": "<string>",
"supported": true,
"timePerInferenceMs": 123,
"rom": 123
}
},
"float32": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
},
"int8": {
"variant": "int8",
"device": "<string>",
"tfliteFileSizeBytes": 123,
"isSupportedOnMcu": true,
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"hasPerformance": true,
"memory": {
"tflite": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eon": {
"ram": 123,
"rom": 123,
"arenaSize": 123
},
"eonRamOptimized": {
"ram": 123,
"rom": 123,
"arenaSize": 123
}
},
"timePerInferenceMs": 123,
"mcuSupportError": "<string>",
"profilingError": "<string>"
}
},
"profileJobId": 123,
"profileJobFailed": true,
"supportsTFLite": true
},
"modelInfo": {
"input": {
"inputType": "time-series",
"frequencyHz": 123,
"windowLengthMs": 123
},
"model": {
"modelType": "classification",
"labels": [
"<string>"
]
}
}
}Project ID
Impulse ID. If this is unset then the default impulse is used.
OK
Whether the operation succeeded
Whether a specific device was selected for performance profiling
The types of model that are available
int8, float32, akida, requiresRetrain Optional error description (set if 'success' was false)
Show child attributes
Show child attributes
Performance for a range of device types. Note that MPU is referred to as CPU in Studio, as MPU and CPU are treated equivalent for performance estimation.
Show child attributes
int8, float32 Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
Show child attributes
int8, float32, akida If false, then no metrics are available for this target
Show child attributes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Specific error during profiling (e.g. model not supported)
Show child attributes
int8, float32, akida If false, then no metrics are available for this target
Show child attributes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Show child attributes
Estimated amount of RAM required by the model, measured in bytes
Estimated amount of ROM required by the model, measured in bytes
Estimated arena size required for model inference, measured in bytes
Specific error during profiling (e.g. model not supported)
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
Was this page helpful?