path: root/firmware/target/arm/imx233/ssp-imx233.c
diff options
authorAmaury Pouly <>2012-05-20 01:23:17 +0200
committerAmaury Pouly <>2012-05-20 01:35:25 +0200
commit1b6e8cba62d0ea12bc39b0e3f60f66c3892ca1ff (patch)
tree9d034e05f485b42f837a509ff91844c0f7432be5 /firmware/target/arm/imx233/ssp-imx233.c
parent1adc47477144e701dcb1184f907887eadec05fda (diff)
imx233: make sure dma descriptors are cache friendly
Because DMA descriptors needs to be committed and discarded from the cache, if they are not cache aligned and/or if their size is not a multiple of cache ligne, nasty side effects could occur with adjacents data. The same applies to DMA buffers which are still potentially broken. Add a macro to ensure that these constraints will not break by error in the future. Change-Id: I1dd69a5a9c29796c156d953eaa57c0d281e79846
Diffstat (limited to 'firmware/target/arm/imx233/ssp-imx233.c')
1 files changed, 5 insertions, 1 deletions
diff --git a/firmware/target/arm/imx233/ssp-imx233.c b/firmware/target/arm/imx233/ssp-imx233.c
index 21dcba67aa..1b773a4dd0 100644
--- a/firmware/target/arm/imx233/ssp-imx233.c
+++ b/firmware/target/arm/imx233/ssp-imx233.c
@@ -43,7 +43,11 @@ struct ssp_dma_command_t
uint32_t ctrl0;
uint32_t cmd0;
uint32_t cmd1;
+ /* padded to next multiple of cache line size (32 bytes) */
+ uint32_t pad[2];
+} __attribute__((packed)) CACHEALIGN_ATTR;
+__ENSURE_STRUCT_CACHE_FRIENDLY(struct ssp_dma_command_t)
static bool ssp_in_use[2];
static int ssp_nr_in_use = 0;