cancel
Showing results for 
Search instead for 
Did you mean: 

STM32Cube-AI: Which input format for an inferenz?

LWilk.2
Associate

Hi Guys,

first time poster here and kinda a noob in deployment of Models, so sorry if i forget to mention something.

My Projekt:

Currently i developed a ML model in python, which is working and takes as Input float numbers. Now i wanted to deploy it on an STM32H747I-DISCO Development-Board as training with the STM32Cube-AI component. The validation per "validation application" was fine, so i wanted to write an manuel code so i could do pre- and postprocess the data. Therefor i used a dummy dataset, where i know the results of the inferenz and wanted to do an inferenz with the following code. It gives me no error, but the results of the inferenz are wrong/ not defined. The result of the inferenz should be ~1, but it gaves me the following:

0693W00000QMbjuQAD.pngThe Definiton of the input is for one float four ai_i8, which together gives an float number 1 (which mantisse, exp and sign) and in the code it looks like that: 0693W00000QMbknQAD.pngIt would be great if someone can explain me, how i should give my Input to the ai_network_run-function:) thanks in advance!

Code

First i call in my main function the MX_X_CUBE_AI_Init() and then MX_X_CUBE_AI_Process(). The Code, in which this are declared:

#ifdef __cplusplus

 extern "C" {

#endif

/* Includes ------------------------------------------------------------------*/

#if defined ( __ICCARM__ )

#elif defined ( __CC_ARM ) || ( __GNUC__ )

#endif

/* System headers */

#include <stdint.h>

#include <stdlib.h>

#include <stdio.h>

#include <inttypes.h>

#include <string.h>

#include "app_x-cube-ai.h"

#include "main.h"

#include "ai_datatypes_defines.h"

#include "network.h"

#include "network_data.h"

#include "val_data.h"

/* USER CODE BEGIN includes */

/* USER CODE END includes */

/* IO buffers ----------------------------------------------------------------*/

AI_ALIGNED(4) ai_i8 data_in_1[AI_NETWORK_IN_1_SIZE_BYTES] = {0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,2};

//ai_float data_in_1[9];

ai_i8* data_ins[AI_NETWORK_IN_NUM] = {

data_in_1

};

AI_ALIGNED(4) ai_i8 data_out_1[AI_NETWORK_OUT_1_SIZE_BYTES];

//ai_float data_out_1;

ai_i8* data_outs[AI_NETWORK_OUT_NUM] = {

data_out_1

};

/* Activations buffers -------------------------------------------------------*/

AI_ALIGNED(32)

static uint8_t pool0[AI_NETWORK_DATA_ACTIVATION_1_SIZE];

ai_handle data_activations0[] = {pool0};

/* AI objects ----------------------------------------------------------------*/

static ai_handle network = AI_HANDLE_NULL;

static ai_buffer* ai_input;

static ai_buffer* ai_output;

static void ai_log_err(const ai_error err, const char *fct)

{

 /* USER CODE BEGIN log */

 if (fct)

  printf("TEMPLATE - Error (%s) - type=0x%02x code=0x%02x\r\n", fct,

    err.type, err.code);

 else

  printf("TEMPLATE - Error - type=0x%02x code=0x%02x\r\n", err.type, err.code);

 do {} while (1);

 /* USER CODE END log */

}

static int ai_boostrap(ai_handle *act_addr)

{

 ai_error err;

 /* Create and initialize an instance of the model */

 err = ai_network_create_and_init(&network, act_addr, NULL);

 if (err.type != AI_ERROR_NONE) {

  ai_log_err(err, "ai_network_create_and_init");

  return -1;

 }

 ai_input = ai_network_inputs_get(network, NULL);

 ai_output = ai_network_outputs_get(network, NULL);

 for (int idx=0; idx < AI_NETWORK_IN_NUM; idx++) {

 ai_input[idx].data = data_ins[idx];

 }

 for (int idx=0; idx < AI_NETWORK_OUT_NUM; idx++) {

ai_output[idx].data = data_outs[idx];

 }

 return 0;

}

static int ai_run(void)

{

 ai_i32 batch;

 batch = ai_network_run(network, &ai_input[0], &ai_output[0]);

 if (batch != 1) {

  ai_log_err(ai_network_get_error(network),

    "ai_network_run");

  return -1;

 }

 return 0;

}

/* USER CODE BEGIN 2 */

int acquire_and_process_data(ai_i8* data[])

{

 for (int idx=0; idx < AI_NETWORK_IN_1_SIZE; idx++ )

 {

// ((ai_float*)data_in_1)[idx] = (ai_float)1;

 }

 return 0;

}

int post_process(ai_i8* data[])

{

//result = *data;

return 0;

}

/* USER CODE END 2 */

/* Entry points --------------------------------------------------------------*/

void MX_X_CUBE_AI_Init(void)

{

  /* USER CODE BEGIN 5 */

 printf("\r\nTEMPLATE - initialization\r\n");

 ai_boostrap(data_activations0);

  /* USER CODE END 5 */

}

void MX_X_CUBE_AI_Process(void)

{

  /* USER CODE BEGIN 6 */

 int res = -1;

 float y_val = 0;

 printf("TEMPLATE - run - main loop\r\n");

 if (network) {

   /* 1 - acquire and pre-process input data */

    res = acquire_and_process_data(data_ins);

    HAL_Delay(100);

   /* 2 - process the data - call inference engine */

    res = ai_run();

    HAL_Delay(100);

   /* 3- post-process the predictions */

    y_val = ((float *)data_out_1)[0];

    res = post_process(data_outs);

    HAL_Delay(100);

 }

 if (res) {

  ai_error err = {AI_ERROR_INVALID_STATE, AI_ERROR_CODE_NETWORK};

  ai_log_err(err, "Process has FAILED");

 }

  /* USER CODE END 6 */

}

#ifdef __cplusplus

}

#endif

1 ACCEPTED SOLUTION

Accepted Solutions
LauraCx
ST Employee

I would propose to analyze simplified but not trivial sw example of STM32Cube.AI integration given below

https://wiki.st.com/stm32mcu/wiki/AI:How_to_perform_motion_sensing_on_STM32L4_IoTnode

especially focusing on chapters starting from chapter: "6.2 Add STM32Cube.AI to your project".

View solution in original post

2 REPLIES 2
Sem A.
Associate III

Hello Linus,

Thank you for contacting STMicroelectronics. We will be in contact with you directly on your request through the support portal.

Best regards,

Sem Amesawu

LauraCx
ST Employee

I would propose to analyze simplified but not trivial sw example of STM32Cube.AI integration given below

https://wiki.st.com/stm32mcu/wiki/AI:How_to_perform_motion_sensing_on_STM32L4_IoTnode

especially focusing on chapters starting from chapter: "6.2 Add STM32Cube.AI to your project".