2026-01-05 11:11 AM
I am working on a project that requires a 320x240 LCD panel (ILI9341). I use a nucleo board with STM32G071RB chip. The SPI communication works fine when I use a blocking HAL_SPI_Transmit() function, but it takes a while to fill the entire screen. When I use a DMA with SPI to speed it up, all colors get distorted.
I have tried a HAL_SPI_Transmit_DMA() - transmit only function, and a two way HAL_SPI_TransmitReceive_DMA() function. I tried switching from 8-bit SPI to 16-bit SPI mode for DMA transfer, but always get the same color distortion problem.
A detailed analysis revealed the core problem. While transmitting the RGB565 pixel information 'RRRRRGGGGGGBBBBB', the 16-bit data is broken into two bytes 'RRRRRGGG' and 'GGGBBBBB'. In the blocking SPI transmit function both bytes are transferred. With any configuration SPI -DMA, the second bit gets lost. The ILI9341 controller receives a single byte and for some reason reads it, according to ChatGPT, as RGB332 'RRRBBGGG' . There is obvious mismatch even with the RGB332 coding, that Chat GPT failed to clearly interpret. However, it was confirmed with numerous bit-by-bit manipulations.
I will post here just one configuration of the numerous I tried.
Here is the SPI and DMA setup.
/* SPI1 parameter configuration*/
__HAL_RCC_SPI1_CLK_ENABLE();
hspi1.Instance = SPI1;
hspi1.Init.Mode = SPI_MODE_MASTER;
hspi1.Init.Direction = SPI_DIRECTION_1LINE;
hspi1.Init.DataSize = SPI_DATASIZE_8BIT;
hspi1.Init.CLKPolarity = SPI_POLARITY_LOW;
hspi1.Init.CLKPhase = SPI_PHASE_1EDGE;
hspi1.Init.NSS = SPI_NSS_SOFT;
hspi1.Init.BaudRatePrescaler = SPI_BAUDRATEPRESCALER_16;
hspi1.Init.FirstBit = SPI_FIRSTBIT_MSB;
hspi1.Init.TIMode = SPI_TIMODE_DISABLE;
hspi1.Init.CRCCalculation = SPI_CRCCALCULATION_DISABLE;
hspi1.Init.CRCPolynomial = 7;
hspi1.Init.CRCLength = SPI_CRC_LENGTH_DATASIZE;
hspi1.Init.NSSPMode = SPI_NSS_PULSE_ENABLE;
static void MX_DMA_Init(void)
{
/* DMA controller clock enable */
__HAL_RCC_DMA1_CLK_ENABLE();
hdma_spi1_tx.Instance = DMA1_Channel3;
hdma_spi1_tx.Init.Request = DMA_REQUEST_SPI1_TX;
hdma_spi1_tx.Init.Direction = DMA_MEMORY_TO_PERIPH;
hdma_spi1_tx.Init.PeriphInc = DMA_PINC_DISABLE;
hdma_spi1_tx.Init.MemInc = DMA_MINC_ENABLE;
hdma_spi1_tx.Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE;
hdma_spi1_tx.Init.MemDataAlignment = DMA_MDATAALIGN_BYTE;
hdma_spi1_tx.Init.Mode = DMA_NORMAL;
hdma_spi1_tx.Init.Priority = DMA_PRIORITY_HIGH;
HAL_DMA_Init(&hdma_spi1_tx);
__HAL_LINKDMA(&hspi1, hdmatx, hdma_spi1_tx);
/* DMA interrupt init */
/* DMA1_Channel2_3_IRQn interrupt configuration */
HAL_NVIC_SetPriority(DMA1_Channel2_3_IRQn, 0, 0);
HAL_NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);The Fill Screen function looks like this:
// Fill entire screen with ONE variable
static uint8_t blockbuf[TFT_WIDTH * BLOCK_LINES * 2];
volatile uint8_t spi_dma_done = 0;
void ILI9341_FillScreen_Color(uint16_t color)
{
uint16_t y = 0;
uint8_t hi = color >> 8;
uint8_t lo = color & 0xFF;
/* Prepare one full block */
for (uint32_t i = 0; i < TFT_WIDTH * BLOCK_LINES; i++)
{
blockbuf[2*i] = hi;
blockbuf[2*i + 1] = lo;
}
while (y < TFT_HEIGHT)
{
uint16_t lines = BLOCK_LINES;
if (y + lines > TFT_HEIGHT)
lines = TFT_HEIGHT - y;
uint32_t bytes = TFT_WIDTH * lines * 2;
/* --- Address window (command phase) --- */
CS_LOW();
ILI9341_SetAddressWindow_NoCS(
0,
y,
TFT_WIDTH - 1,
y + lines - 1
);
/* --- Data phase --- */
DC_DATA();
/* Start DMA */
HAL_SPI_Transmit_DMA(&hspi1, blockbuf, bytes);
/* Wait for completion */
while (HAL_SPI_GetState(&hspi1) != HAL_SPI_STATE_READY);
while (SPI1->SR & SPI_SR_BSY);
// CS_HIGH(); // CS is turned off in the callback function.
y += lines;
}
}
void HAL_SPI_TxRxCpltCallback(SPI_HandleTypeDef *hspi)
{
if (hspi == &hspi1)
{
CS_HIGH();
spi_dma_done = 1;
}
}At this moment I (and Chat GPT) have exhausted our options. Any suggestion would be greatly appreciated.