curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/optimize/state \
--header 'x-api-key: <api-key>'{
"success": true,
"config": {
"targetLatency": 0,
"targetDevice": {
"name": "cortex-m4f-80mhz",
"ram": 1024,
"rom": 1024
},
"name": "<string>",
"compiler": [
"<string>"
],
"precision": [
"<string>"
],
"trainingCycles": 5,
"tuningMaxTrials": 2,
"tuningWorkers": 1,
"initialTrials": 5,
"optimizationRounds": 3,
"trialsPerOptimizationRound": 3,
"minMACCS": 123,
"maxMACCS": 123,
"tuningAlgorithm": "random",
"notificationOnCompletion": true,
"importProjectMetrics": true,
"importResourceMetrics": true,
"numImportProjectMetrics": 123,
"numImportResourceMetrics": 123,
"enableSEM": true,
"accuracySEM": 123,
"latencySEM": 123,
"optimizationObjectives": [
{
"objective": "<string>",
"label": "<string>",
"weight": 123
}
],
"rawObjectives": "<string>",
"optimizationPrecision": "float32",
"earlyStopping": true,
"earlyStoppingWindowSize": 123,
"earlyStoppingImprovementBar": 123,
"MOMF": true,
"verboseLogging": true,
"disableConstraints": true,
"disableDeduplicate": true,
"maxTotalTrainingTime": 123,
"tunerSpaceOptions": {},
"space": [
{
"inputBlocks": [
{}
],
"dspBlocks": [
{}
],
"learnBlocks": [
[
{}
]
],
"parameters": {}
}
],
"searchSpaceTemplate": {
"identifier": "speech_keyword",
"classification": true,
"anomaly": true,
"regression": true
},
"searchSpaceSource": {
"type": "template",
"templateTitle": "<string>",
"runTitle": "<string>",
"impulseTitle": "<string>"
}
},
"status": {
"numPendingTrials": 123,
"numRunningTrials": 123,
"numCompletedTrials": 123,
"numFailedTrials": 123,
"numReadyWorkers": 123,
"numBusyWorkers": 123,
"numPendingWorkers": 123,
"status": "creating"
},
"tunerJobIsRunning": true,
"trials": [
{
"id": "<string>",
"name": "<string>",
"status": "pending",
"blocks": [
{
"id": 123,
"retries": 123,
"status": "pending",
"type": "input",
"lastActive": "2023-11-07T05:31:56Z",
"modelBlockIndex": 123
}
],
"impulse": {
"inputBlocks": [
{}
],
"dspBlocks": [
{}
],
"learnBlocks": [
{}
]
},
"lastCompletedEpoch": "2023-11-07T05:31:56Z",
"lastCompletedTraining": "2023-11-07T05:31:56Z",
"retries": 123,
"currentEpoch": 123,
"workerId": "<string>",
"experiment": "<string>",
"original_trial_id": "<string>",
"model": {},
"dspJobId": {
"training": 123,
"testing": 123
},
"learnJobId": 123,
"devicePerformance": {},
"optimizationRound": 123,
"progress": {
"epoch": 123,
"loss": 123,
"val_loss": 123,
"accuracy": 123,
"val_accuracy": 123
},
"metrics": {
"test": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
},
"train": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
},
"validation": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
}
},
"impulseAddedToProject": {
"impulseId": 123,
"link": "<string>"
},
"createdInPostProcessing": true
}
],
"projectDataType": "audio",
"workers": [
{
"workerId": "<string>",
"status": "pending"
}
],
"nextRunIndex": 123,
"canExtendSearch": true,
"isWhitelabel": true,
"totalTrainingTimeExceeded": true,
"error": "<string>",
"tunerJobId": 123,
"tunerCoordinatorJobId": 123,
"continuationJobId": 123,
"tuningAlgorithm": "random",
"jobError": "<string>"
}Retrieves the EON tuner state
curl --request GET \
--url https://studio.edgeimpulse.com/v1/api/{projectId}/optimize/state \
--header 'x-api-key: <api-key>'{
"success": true,
"config": {
"targetLatency": 0,
"targetDevice": {
"name": "cortex-m4f-80mhz",
"ram": 1024,
"rom": 1024
},
"name": "<string>",
"compiler": [
"<string>"
],
"precision": [
"<string>"
],
"trainingCycles": 5,
"tuningMaxTrials": 2,
"tuningWorkers": 1,
"initialTrials": 5,
"optimizationRounds": 3,
"trialsPerOptimizationRound": 3,
"minMACCS": 123,
"maxMACCS": 123,
"tuningAlgorithm": "random",
"notificationOnCompletion": true,
"importProjectMetrics": true,
"importResourceMetrics": true,
"numImportProjectMetrics": 123,
"numImportResourceMetrics": 123,
"enableSEM": true,
"accuracySEM": 123,
"latencySEM": 123,
"optimizationObjectives": [
{
"objective": "<string>",
"label": "<string>",
"weight": 123
}
],
"rawObjectives": "<string>",
"optimizationPrecision": "float32",
"earlyStopping": true,
"earlyStoppingWindowSize": 123,
"earlyStoppingImprovementBar": 123,
"MOMF": true,
"verboseLogging": true,
"disableConstraints": true,
"disableDeduplicate": true,
"maxTotalTrainingTime": 123,
"tunerSpaceOptions": {},
"space": [
{
"inputBlocks": [
{}
],
"dspBlocks": [
{}
],
"learnBlocks": [
[
{}
]
],
"parameters": {}
}
],
"searchSpaceTemplate": {
"identifier": "speech_keyword",
"classification": true,
"anomaly": true,
"regression": true
},
"searchSpaceSource": {
"type": "template",
"templateTitle": "<string>",
"runTitle": "<string>",
"impulseTitle": "<string>"
}
},
"status": {
"numPendingTrials": 123,
"numRunningTrials": 123,
"numCompletedTrials": 123,
"numFailedTrials": 123,
"numReadyWorkers": 123,
"numBusyWorkers": 123,
"numPendingWorkers": 123,
"status": "creating"
},
"tunerJobIsRunning": true,
"trials": [
{
"id": "<string>",
"name": "<string>",
"status": "pending",
"blocks": [
{
"id": 123,
"retries": 123,
"status": "pending",
"type": "input",
"lastActive": "2023-11-07T05:31:56Z",
"modelBlockIndex": 123
}
],
"impulse": {
"inputBlocks": [
{}
],
"dspBlocks": [
{}
],
"learnBlocks": [
{}
]
},
"lastCompletedEpoch": "2023-11-07T05:31:56Z",
"lastCompletedTraining": "2023-11-07T05:31:56Z",
"retries": 123,
"currentEpoch": 123,
"workerId": "<string>",
"experiment": "<string>",
"original_trial_id": "<string>",
"model": {},
"dspJobId": {
"training": 123,
"testing": 123
},
"learnJobId": 123,
"devicePerformance": {},
"optimizationRound": 123,
"progress": {
"epoch": 123,
"loss": 123,
"val_loss": 123,
"accuracy": 123,
"val_accuracy": 123
},
"metrics": {
"test": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
},
"train": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
},
"validation": {
"float32": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
},
"int8": {
"type": "int8",
"loss": 123,
"confusionMatrix": [
[
31,
1,
0
],
[
2,
27,
3
],
[
1,
0,
39
]
],
"report": {},
"onDevicePerformance": [
{
"mcu": "<string>",
"name": "<string>",
"isDefault": true,
"latency": 123,
"tflite": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"eon": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"hasPerformance": true,
"eon_ram_optimized": {
"ramRequired": 123,
"romRequired": 123,
"arenaSize": 123,
"modelSize": 123
},
"customMetrics": [
{
"name": "<string>",
"value": "<string>"
}
],
"profilingError": "<string>"
}
],
"visualization": "featureExplorer",
"isSupportedOnMcu": true,
"additionalMetrics": [
{
"name": "<string>",
"value": "<string>",
"fullPrecisionValue": 123,
"tooltipText": "<string>",
"link": "<string>"
}
],
"accuracy": 123,
"predictions": [
{
"sampleId": 123,
"startMs": 123,
"endMs": 123,
"prediction": "<string>",
"label": "<string>",
"predictionCorrect": true,
"f1Score": 123,
"anomalyScores": [
[
123
]
],
"boundingBoxes": [
{
"label": "<string>",
"x": 123,
"y": 123,
"width": 123,
"height": 123,
"score": 123
}
]
}
],
"mcuSupportError": "<string>",
"profilingJobId": 123,
"profilingJobFailed": true
}
}
},
"impulseAddedToProject": {
"impulseId": 123,
"link": "<string>"
},
"createdInPostProcessing": true
}
],
"projectDataType": "audio",
"workers": [
{
"workerId": "<string>",
"status": "pending"
}
],
"nextRunIndex": 123,
"canExtendSearch": true,
"isWhitelabel": true,
"totalTrainingTimeExceeded": true,
"error": "<string>",
"tunerJobId": 123,
"tunerCoordinatorJobId": 123,
"continuationJobId": 123,
"tuningAlgorithm": "random",
"jobError": "<string>"
}Project ID
Current EON tuner state
Whether the operation succeeded
Show child attributes
Target latency in MS
0
Maximum number of training cycles
5
Maximum number of trials
2
Maximum number of parallel workers/jobs
1
Number of initial trials
5
Number of optimization rounds
3
Number of trials per optimization round
3
Tuning algorithm to use to search hyperparameter space
random, hyperband, bayesian, custom Whether to import metrics for previous EON tuner runs in the same project to accelerate the hyperparameter search process
Whether to import resource usage (RAM/ROM/latency) metrics to accelerate the hyperparameter search process
Number of project trials to import
Number of resource usage trials to import
Enable standard error of the mean (SEM)
Standard error of the trial accuracy mean
Standard error of the trial latency mean
Hyperparameter optimization objectives and corresponding weights
Hyperparameter optimization objectives + weights in string format
Model variant to optimize for
float32, int8 Enable trial level early stopping based on loss metrics during training
Stops the EON tuner if the feasible (mean) objective has not improved over the past “window_size” iterations
Threshold (in [0,1]) for considering relative improvement over the best point.
Enable Multi-fidelity Multi-Objective optimization
Enable verbose logging
Disable search constraints
Disable trial deduplication
Maximum total training time in seconds
List of impulses specifying the EON Tuner search space
Show child attributes
Input Blocks that are part of this impulse
DSP Blocks that are part of this impulse
Learning Blocks that are part of this impulse
Hyperparameters with potential values that can be used in any block in this impulse
Search space template
Show child attributes
Search space template identifier
speech_keyword, speech_continuous, audio_event, audio_continuous, visual, motion_event, motion_continuous, audio_syntiant, object_detection_bounding_boxes, object_detection_centroids, visual_ad Whether a classification block should be added to the search space
Whether an anomaly block should be added to the search space
Whether a regression block should be added to the search space
Search space source
Show child attributes
Search space source type
template, run, impulse, custom Search space source template title
Search space source run title
Search space source impulse title
Show child attributes
creating, ready, running, completed Whether the job is active (if false => finished)
Show child attributes
pending, running, completed, failed Show child attributes
pending, running, completed, failed input, dsp, learn Index of corresponding DSP/learn block in the impulse model passed to createTrial()
Show child attributes
Show child attributes
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Custom, device-specific performance metrics
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
Show child attributes
The type of model
int8, float32, akida, requiresRetrain The model's loss on the validation set after training
[[31, 1, 0], [2, 27, 3], [1, 0, 39]]Precision, recall, F1 and support scores
Show child attributes
Show child attributes
Show child attributes
If false, then no metrics are available for this target
Show child attributes
Specific error during profiling (e.g. model not supported)
featureExplorer, dataExplorer, none Show child attributes
The model's accuracy on the validation set after training
Show child attributes
Only set for object detection projects
Only set for visual anomaly projects. 2D array of shape (n, n) with raw anomaly scores, where n varies based on the image input size and the specific visual anomaly algorithm used. The scores corresponds to each grid cell in the image's spatial matrix.
Only set for object detection projects. Coordinates are scaled 0..1, not absolute values.
Show child attributes
If this is set, then we're still profiling this model. Subscribe to job updates to see when it's done (afterward the metadata will be updated).
If this is set, then the profiling job failed (get the status by getting the job logs for 'profilingJobId').
audio, image, motion, other Index of the next run to be created. Used to pre-populate the name of the next run.
Whether the search can be extended to evaluate more candidate models. Extending is possible if the search space contains candidate models that are expected to perform better than the current best candidate. And extending is also possible if the previous run was paused due to the total runtime limit being exceeded.
Whether the total training time has exceeded the defined limit for the current run.
Optional error description (set if 'success' was false)
Actual tuner process, job message events will be tagged with this ID
The coordinator pod, attach the job runner to this process for finished events
Job ID for the initial job this job continuous the hyperparameter search process for.
Tuning algorithm to use to search hyperparameter space
random, hyperband, bayesian, custom Was this page helpful?