ADuCM4x50 Device Drivers API Reference Manual  Release 4.0.0.0
adi_spi.c
1 
40 #include <adi_processor.h>
43 #include <stdlib.h> /* for 'NULL" definition */
44 #include <string.h>
45 
46 #include <drivers/spi/adi_spi.h>
47 #include <drivers/pwr/adi_pwr.h>
48 #include <drivers/general/adi_drivers_general.h>
49 #include <adi_callback.h>
50 #include <rtos_map/adi_rtos_map.h>
51 #include "adi_spi_config.h"
52 #include <adi_cyclecount.h>
53 
54 
55 #ifdef __ICCARM__
56 /*
57 * IAR MISRA C 2004 error suppressions.
58 *
59 * Pm123 (rule 8.5): there shall be no definition of objects or functions in a header file
60 * This isn't a header as such.
61 *
62 * Pm073 (rule 14.7): a function should have a single point of exit
63 * Pm143 (rule 14.7): a function should have a single point of exit at the end of the function
64 * Multiple returns are used for error handling.
65 *
66 * Pm088 (rule 17.4): pointer arithmetic should not be used.
67 * Relying on pointer arithmetic for buffer handling.
68 *
69 * Pm152: (MISRA C 2004 rule 17.4) array indexing shall only be applied to objects defined as an array type
70 * Accessing the DMA descriptors, which are defined in the system as a pointer to an array of descriptors
71 *
72 * Pm151 (rule 17.4): array indexing shall only be applied to objects of array type
73 * Pm123 (rule 18.5): there shall be no definition of objects in a header file
74 *
75 * Pm50: (MISRA C 2004 rule 14.3) a null statement shall only occur on a line by itself, and shall not have any other text on the same line
76 * Some Macros, such as ISR_PROLOGUE, may not have any expansion resulting in just the terminating ';'
77 *
78 *Pm140: (MISRA C 2004 rule 11.3) a cast should not be performed between a pointer type and an integral type
79 * MMR addresses are defined as simple constants. Accessing the MMR requires casting to a pointer type
80 *
81 * Pm031: (MISRA C 2004 rule 12.7) bitwise operations shall not be performed on signed integer types
82 * MMR macros are beyond the control of the driver.
83 *
84 */
85 #pragma diag_suppress=Pm050,Pm073,Pm088,Pm123,Pm143,Pm152,Pm140,Pm031
86 
87 #endif /* __ICCARM__ */
88 
89 #include "adi_spi_data.c"
90 
93 /* handle checker for debug mode */
94 #define ADI_SPI_VALIDATE_HANDLE(h) ((spi_device_info[0].hDevice != (h)) && (spi_device_info[1].hDevice != (h)) && (spi_device_info[2].hDevice != (h)))
95 
98 /*
99  * Local prototypes
100  */
101 static void common_SPI_Int_Handler (ADI_SPI_DEV_DATA_TYPE* pDD);
102 static void StartTransaction (ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER* const pXfr);
103 static void TxDmaErrorCallback (void *pCBParam, uint32_t Event, void *pArg);
104 static void RxDmaErrorCallback (void *pCBParam, uint32_t Event, void *pArg);
105 
106 /* ISR forward declarations */
108 void SPI0_Int_Handler(void);
109 void SPI1_Int_Handler(void);
110 void SPI2_Int_Handler(void);
111 void DMA_SPI0_TX_Int_Handler(void);
112 void DMA_SPI0_RX_Int_Handler(void);
113 void DMA_SPI1_TX_Int_Handler(void);
114 void DMA_SPI1_RX_Int_Handler(void);
115 void DMA_SPIH_TX_Int_Handler(void);
116 void DMA_SPIH_RX_Int_Handler(void);
119 /*
123 */
124 
154 ADI_SPI_RESULT adi_spi_Open(uint32_t nDeviceNum,
155  void *pDevMemory,
156  uint32_t nMemorySize,
157  ADI_SPI_HANDLE* const phDevice)
158 {
159 
160 #ifdef ADI_DEBUG
161 
162  if (nDeviceNum >= ADI_SPI_NUM_INSTANCES)
163  {
165  }
166 
167  if (nMemorySize != sizeof(struct __ADI_SPI_DEV_DATA_TYPE))
168  {
169  return ADI_SPI_INVALID_PARAM;
170  }
171 
172  if( spi_device_info[nDeviceNum].hDevice != NULL )
173  {
174  return ADI_SPI_IN_USE;
175  }
176 
177 #endif
178 
179  ADI_SPI_HANDLE hDevice = pDevMemory;
180 
181  /*
182  * Link the two data structures together.
183  *
184  * ADI_SPI_DEVICE_INFO <==> ADI_SPI_HANDLE
185  *
186  * Clear the ADI_SPI_HANDLE memory. This also sets all bool
187  * structure members to false so we do not need to waste cycles
188  * setting these explicitly (e.g. hDevice->bDMA = false)
189  *
190  * Other fields, such as callback related fields, are also zeroed
191  * and therefore properly initialized.
192  */
193 
194  memset(pDevMemory,0,nMemorySize);
195  hDevice->pDevInfo = &spi_device_info[nDeviceNum];
196  spi_device_info[nDeviceNum].hDevice = (ADI_SPI_DEV_DATA_TYPE *)pDevMemory;
197 
198 
199  /*
200  * Although the ADI_SPI_DEVICE_INFO struct has the address of the SPI registers
201  * for this instance, copying it to the ADI_SPI_HANDLE struct will minimize
202  * the runtime footprint and cycle count when accessing the SPI registers
203  */
204  hDevice->pSpi = spi_device_info[nDeviceNum].pSpiRegs;
205 
206  SEM_CREATE(hDevice, "SPI_SEM", ADI_SPI_SEMAPHORE_FAILED);
207 
208  /* Static Configuration */
209  /* Initialize the device based on the given configuration parameters */
210  ADI_SPI_CFG_TYPE const* pSPICfg = &gSPICfg[nDeviceNum];
211  hDevice->pSpi->CTL = pSPICfg->SPI_CTL;
212  hDevice->pSpi->DIV = pSPICfg->SPI_DIV;
213 
214  /* write the device data pointer into the caller's handle */
215  *phDevice = hDevice;
216  hDevice->pSpi->CTL |= BITM_SPI_CTL_SPIEN;
217 
218  /* Make sure the DMA controller and its SRAM based descriptors are initialized */
219  adi_dma_Init();
220 
221  /* Setup the DMA TX callback */
222  if (ADI_DMA_SUCCESS != adi_dma_RegisterCallback((DMA_CHANn_TypeDef) hDevice->pDevInfo->dmaTxChannelNumber, TxDmaErrorCallback, (void *) hDevice))
223  {
224  return ADI_SPI_DMA_REG_FAILED;
225  }
226 
227  /* Setup the DMA RX callback */
228  if (ADI_DMA_SUCCESS != adi_dma_RegisterCallback((DMA_CHANn_TypeDef) hDevice->pDevInfo->dmaRxChannelNumber, RxDmaErrorCallback, (void *) hDevice))
229  {
230  return ADI_SPI_DMA_REG_FAILED;
231  }
232 
233  return ADI_SPI_SUCCESS;
234 }
235 
236 
251 {
252 
254 #ifdef ADI_DEBUG
255  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
256  {
257  return ADI_SPI_INVALID_HANDLE;
258  }
259 
260 #endif
261 
262 
263  /* disable Interrupt */
264  NVIC_DisableIRQ(hDevice->pDevInfo->eIRQn);
265 
266 
267  /* destroy semaphore */
268  SEM_DELETE((ADI_SPI_HANDLE) hDevice,ADI_SPI_SEMAPHORE_FAILED);
269 
270  /* invalidate initialization state */
271  hDevice->pDevInfo->hDevice = NULL;
272  return result;
273 }
274 
275 
289 ADI_SPI_RESULT adi_spi_RegisterCallback (ADI_SPI_HANDLE const hDevice, ADI_CALLBACK const pfCallback, void *const pCBParam )
290 {
291 #ifdef ADI_DEBUG
292  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
293  return ADI_SPI_INVALID_HANDLE;
294  }
295 
296 #endif
297  /* Save the application provided callback and callback parameters */
298  hDevice->pfCallback = pfCallback;
299  hDevice->pCBParam = pCBParam;
300 
301  return ADI_SPI_SUCCESS;
302 }
303 
337 ADI_SPI_RESULT adi_spi_SetIrqmode (ADI_SPI_CONST_HANDLE const hDevice, const uint8_t nMode)
338 {
339 
340 #ifdef ADI_DEBUG
341  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
342  return ADI_SPI_INVALID_HANDLE;
343  }
344 
345  if (nMode > ADI_SPI_IRQ_PARAM) {
346  return ADI_SPI_INVALID_PARAM;
347  }
348 
349 #endif
350 
351  uint16_t ien = hDevice->pSpi->IEN;
352  ien = ien & (uint16_t)~BITM_SPI_IEN_IRQMODE;
353  ien = ien | (nMode & BITM_SPI_IEN_IRQMODE);
354  hDevice->pSpi->IEN = ien;
355 
356  return ADI_SPI_SUCCESS;
357 }
358 
359 
378 {
379 
380 #ifdef ADI_DEBUG
381  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
382  return ADI_SPI_INVALID_HANDLE;
383  }
384 
385 #endif
386 
387  if (true == bFlag) {
388  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CON);
389  } else {
390  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CON;
391  }
392 
393  return ADI_SPI_SUCCESS;
394 }
395 
411 ADI_SPI_RESULT adi_spi_SetLoopback (ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
412 {
413 
414 #ifdef ADI_DEBUG
415  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
416  {
417  return ADI_SPI_INVALID_HANDLE;
418  }
419 
420 #endif
421 
422  if (true == bFlag) {
423  hDevice->pSpi->CTL |= (BITM_SPI_CTL_LOOPBACK);
424  } else {
425  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_LOOPBACK;
426  }
427 
428  return ADI_SPI_SUCCESS;
429 }
430 
447 {
448 
449 #ifdef ADI_DEBUG
450  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
451  {
452  return ADI_SPI_INVALID_HANDLE;
453  }
454 
455 #endif
456  ADI_INT_STATUS_ALLOC();
457  ADI_ENTER_CRITICAL_REGION();
458  if (true == bFlag) { /* hardware default */
459  hDevice->pSpi->CTL |= (ADI_SPI_MASTERCON_INITIALIZER);
460  } else {
461  hDevice->pSpi->CNT = 0u;
462  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_MASEN;
463  hDevice->pSpi->CTL |= (ADI_SPI_SLAVECON_INITIALIZER);
464  }
465  ADI_EXIT_CRITICAL_REGION();
466  return ADI_SPI_SUCCESS;
467 }
468 
469 
490 {
491 
492 #ifdef ADI_DEBUG
493  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
494  {
495  return ADI_SPI_INVALID_HANDLE;
496  }
497 
498 #endif
499 
500  if (true == bFlag) {
501  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RXOF);
502  } else {
503  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_RXOF;
504  }
505 
506  return ADI_SPI_SUCCESS;
507 }
508 
509 
528 {
529 
530 #ifdef ADI_DEBUG
531  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
532  {
533  return ADI_SPI_INVALID_HANDLE;
534  }
535 #endif
536 
537  if (true == bFlag) {
538  hDevice->pSpi->CTL |= (BITM_SPI_CTL_ZEN);
539  } else {
540  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_ZEN;
541  }
542 
543  return ADI_SPI_SUCCESS;
544 }
545 
546 
547 
548 
549 
550 
569 ADI_SPI_RESULT adi_spi_SetBitrate (ADI_SPI_CONST_HANDLE const hDevice, const uint32_t Hertz)
570 {
571  uint32_t incoming_clock;
572  uint16_t Div;
573 
574 #ifdef ADI_DEBUG
575  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
576  {
577  return ADI_SPI_INVALID_HANDLE;
578  }
579 #endif
580 
582  {
583  return ADI_SPI_INVALID_HANDLE;
584  }
585 
586  /* requested rate needs to be 2x or less than incoming clock */
587  if ((2U * Hertz) > incoming_clock)
588  {
589  return ADI_SPI_BAD_SYS_CLOCK;
590  }
591 
592  /* compute the SPI divider value */
593  Div = (uint16_t) ((incoming_clock / Hertz) >> 1U) - 1U; /* '>>1' is really a divide by 2 */
594 
595  /* range check that computed divider fits */
596  if (Div != (Div & BITM_SPI_DIV_VALUE))
597  {
598  return ADI_SPI_INVALID_PARAM;
599  }
600 
601  /* store it in core */
602  hDevice->pSpi->DIV = Div;
603 
604  return ADI_SPI_SUCCESS;
605 }
606 
607 
626 ADI_SPI_RESULT adi_spi_GetBitrate (ADI_SPI_CONST_HANDLE const hDevice, uint32_t* const pnBitrate)
627 {
628  uint32_t incoming_clock;
629  ADI_PWR_RESULT ePwrResult;
630  uint32_t Div;
631 
632 #ifdef ADI_DEBUG
633  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
634  {
635  return ADI_SPI_INVALID_HANDLE;
636  }
637 #endif
638  Div = hDevice->pSpi->DIV; /* assumes this is always a right-justified value */
639 
640  ePwrResult = adi_pwr_GetClockFrequency(ADI_CLOCK_PCLK, &incoming_clock);
641  if(ePwrResult != ADI_PWR_SUCCESS)
642  {
643  *pnBitrate= 0u;
644  return(ADI_SPI_FAILURE);
645  }
646  *pnBitrate= (incoming_clock / (Div + 1U)) >> 1U; /* '>>1' is divide by 2 */
647  return(ADI_SPI_SUCCESS);
648 
649 }
650 
680 ADI_SPI_RESULT adi_spi_SetClockPolarity (ADI_SPI_HANDLE const hDevice, const bool bFlag)
681 {
682 
683 #ifdef ADI_DEBUG
684  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
685  {
686  return ADI_SPI_INVALID_HANDLE;
687  }
688 #endif
689 
690  if (true == bFlag) {
691  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CPOL);
692  } else {
693  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CPOL;
694  }
695 
696  return ADI_SPI_SUCCESS;
697 }
698 
716 {
717 
718 #ifdef ADI_DEBUG
719  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
720  {
721  return ADI_SPI_INVALID_HANDLE;
722  }
723 #endif
724 
725  hDevice->ChipSelect = eChipSelect;
726 
727  return ADI_SPI_SUCCESS;
728 }
729 
759 ADI_SPI_RESULT adi_spi_SetClockPhase (ADI_SPI_HANDLE const hDevice, const bool bFlag)
760 {
761 
762 #ifdef ADI_DEBUG
763  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
764  {
765  return ADI_SPI_INVALID_HANDLE;
766  }
767 #endif
768 
769  if (true == bFlag) {
770  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CPHA);
771  } else {
772  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CPHA;
773  }
774 
775  return ADI_SPI_SUCCESS;
776 }
777 
805 {
806  ADI_SPI_RESULT eResult;
807  hDevice->bBlockingMode = true;
808  eResult = adi_spi_MasterSubmitBuffer(hDevice,pXfr);
809  hDevice->bBlockingMode = false;
810  if( (eResult == ADI_SPI_SUCCESS) && (hDevice->HWErrors != 0u))
811  {
812  eResult = ADI_SPI_HW_ERROR_OCCURRED;
813  }
814  return(eResult);
815 }
816 
845 {
847  volatile uint16_t nStatus;
848 
849 #ifdef ADI_DEBUG
850  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
851  {
852  return ADI_SPI_INVALID_HANDLE;
853  }
854 
855  if ((NULL == pXfr->pTransmitter) && (NULL == pXfr->pReceiver))
856  {
858  }
859 
860  if( (pXfr->bRD_CTL == true) && (pXfr->TransmitterBytes > 16u))
861  {
862  return ADI_SPI_INVALID_PARAM;
863  }
864 
865 #endif /* ADI_DEBUG */
866 
867  /* Initialize the transaction. 'hDevice' must hold the transaction values as pXfr is owned by the application */
868  hDevice->pTxBuffer = pXfr->pTransmitter;
869  hDevice->pRxBuffer = pXfr->pReceiver;
870  hDevice->TxRemaining = pXfr->TransmitterBytes;
871  hDevice->RxRemaining = pXfr->ReceiverBytes;
872  hDevice->TxIncrement = (uint8_t)pXfr->nTxIncrement;
873  hDevice->RxIncrement = (uint8_t)pXfr->nRxIncrement;
874  hDevice->bDmaMode = pXfr->bDMA;
875  hDevice->bRdCtlMode = pXfr->bRD_CTL;
876  hDevice->bTransferComplete = false;
877  hDevice->HWErrors = ADI_SPI_HW_ERROR_NONE;
878 
879 
880  /*
881  *
882  * TIM
883  * If set: initiate transfer with write to SPI_TX register
884  * If clear: initiate transfer with a read from SPI_RX register
885  *
886  * RFLUSH
887  * Clear this bit to ensure that incoming data is ignored
888  *
889  * TFLUSH
890  * Clear this not to ensure that transmitted data is not a zero (if SPI_CTL.ZEN is set) or last transmitted byte
891  *
892  */
893 
894 
895  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_TIM | BITM_SPI_CTL_RFLUSH | BITM_SPI_CTL_TFLUSH);
896 
897  /*
898  * If in DMA mode then make sure XFRDONE interrupt is not set. DMA mode will generate three interrupts
899  * TX DMA
900  * RX DMA
901  * XFRDONE
902  *
903  * There is a race condition between XFRDONE and DMA interrupts. They are on different clocks.
904  *
905  * SPI XfrDone is counted on SPI clock (SCL) edge, which is a fixed timing related to SPI bit protocol.
906  * But the DMA works upon system clock (HCLK) and it could finish on various timing upon SCL/HCLK ratio.
907  * And bus bandwidth (e.g., DMA hold off until processor frees up the bus). So SPI RX DMA done interrupt
908  * could be issued earlier or later than SPI XferDone interrupt.
909  *
910  */
911  if( hDevice->bDmaMode==true ) {
917  hDevice->pSpi->IEN &= (uint16_t)~(BITM_SPI_IEN_XFRDONE);
918  } else {
919 
920  /* In interrupt mode always enable XFRDONE */
921  uint16_t activeInterrupts = BITM_SPI_IEN_XFRDONE;
922  /* Enable underflow on;y if sending bytes */
923  if( hDevice->TxRemaining ) {
924  activeInterrupts |= BITM_SPI_IEN_TXUNDR;
925  }
926  /* Enable overflow only if receiving bytes */
927  if( hDevice->RxRemaining ) {
928  activeInterrupts |= BITM_SPI_IEN_RXOVR;
929  }
930 
931  hDevice->pSpi->IEN |= activeInterrupts;
932 
933  /*
934  * In interrupt mode, when there is nothing to receive, need to initiate a transaction
935  * on an TX write only. Initiating on an RX read will start the transaction, but just for
936  * a single byte (and we're not sure why this is true)
937  */
938 
939  if( hDevice->RxRemaining == 0u) {
940  hDevice->pSpi->CTL |= ( BITM_SPI_CTL_TIM );
941  }
942 
943  }
944 
945  /* STAT bits are cleared by writing a '1' to them. Clear any residual status*/
946  nStatus = hDevice->pSpi->STAT;
947  hDevice->pSpi->STAT = nStatus;
948 
949  /* Make sure we are in master mode */
950  hDevice->pSpi->CTL |= ( BITM_SPI_CTL_MASEN);
951 
952  /* Set ChipSelect */
953  hDevice->pSpi->CS_CTL = hDevice->ChipSelect;
954 
955  StartTransaction(hDevice, pXfr);
956 
957 
958  /* block if required */
959  if (hDevice->bBlockingMode == true)
960  {
961  SEM_PEND(hDevice,ADI_SPI_PEND_FAILED);
962  }
963 
964  return result;
965 }
966 
967 /*********************************************************************************************************/
968 /* */
969 /* SPI DRIVER Master Mode transaction start */
970 /* */
971 /*********************************************************************************************************/
972 
973 static void StartTransaction(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER* const pXfr)
974 {
975  /* Transaction completion is determined by the number of bytes to be received */
976  uint16_t nCount;
977 
978  /* Work around SPI anomaly */
979  if( (hDevice->bDmaMode == true) && (hDevice->bRdCtlMode == true) && (pXfr->ReceiverBytes == 1u))
980  {
981  /* Switch to PIO mode if the transaction is setup for a DMA transfer in RD_CTL mode with an RX count of 1 */
982  hDevice->bDmaMode = false;
983  }
984  /* Effectively flush the FIFOs before the start of the next transaction */
985  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
986  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
987 
988  /* Disable any prior notion of DMA */
989  hDevice->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
990 
991 
992  /*
993  * If the transaction is DMA based then set up the DMA descriptors for this transaction
994  */
995 
996  uint16_t dmaFlags = 0u;
997 
998  if( hDevice->bDmaMode == true)
999  {
1000  dmaFlags = BITM_SPI_DMA_EN;
1001 
1002  uint16_t sz = pXfr->TransmitterBytes;
1003  if( sz )
1004  {
1005  uint16_t TxChanNum = hDevice->pDevInfo->dmaTxChannelNumber;
1006 
1007  /* Enable the interrupt for the given DMA */
1008  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaTxIrqNumber));
1009 
1010  /* Disables source address decrement for TX channel */
1011  pADI_DMA0->SRCADDR_CLR = 1U << TxChanNum;
1012 
1013  /* Enable the channel */
1014  pADI_DMA0->EN_SET = 1U << TxChanNum;
1015 
1016  /* Enables SPI peripheral to generate DMA requests. */
1017  pADI_DMA0->RMSK_CLR = 1U << TxChanNum;
1018 
1019  /* Set the primary as the current DMA descriptor */
1020  pADI_DMA0->ALT_CLR = 1U << TxChanNum;
1021 
1022  /* fill in the DMA RAM descriptors */
1023  if( (sz & 1U) != 0u )
1024  {
1025  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1026  /* The SPI_CNT register will hold the "real" transfer count */
1027  sz++;
1028  }
1029 
1030  pPrimaryCCD[TxChanNum].DMASRCEND = (uint32_t)(pXfr->pTransmitter + (sz - 2U));
1031 
1032  pPrimaryCCD[TxChanNum].DMADSTEND = (uint32_t)&hDevice->pSpi->TX;
1033 
1034  pPrimaryCCD[TxChanNum].DMACDC = ((uint32_t)ADI_DMA_INCR_NONE << DMA_BITP_CTL_DST_INC) |
1035  (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_SRC_INC) |
1036  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1037  ((sz/2U -1U)<< DMA_BITP_CTL_N_MINUS_1) |
1038  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1039 
1040  dmaFlags |= (BITM_SPI_DMA_TXEN);
1041  }
1042 
1043  sz = pXfr->ReceiverBytes;
1044  if( sz )
1045  {
1046 
1047  uint16_t RxChanNum = hDevice->pDevInfo->dmaRxChannelNumber;
1048  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaRxIrqNumber));
1049 
1050  /* Disables destination address decrement for RX channel */
1051  pADI_DMA0->DSTADDR_CLR = 1U << RxChanNum;
1052 
1053  /* Enable the channel */
1054  pADI_DMA0->EN_SET = 1U << RxChanNum;
1055 
1056  /* Enables SPI peripheral to generate DMA requests. */
1057  pADI_DMA0->RMSK_CLR = 1U << RxChanNum;
1058 
1059  /* Set the primary as the current DMA descriptor */
1060  pADI_DMA0->ALT_CLR = 1U << RxChanNum;
1061 
1062  if( (sz & 1U) != 0u )
1063  {
1064  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1065  /* The SPI_CNT register will hold the "real" transfer count */
1066  sz++;
1067  }
1068 
1069  pPrimaryCCD[RxChanNum].DMASRCEND = (uint32_t)&hDevice->pSpi->RX;
1070 
1071  pPrimaryCCD[RxChanNum].DMADSTEND = (uint32_t)(pXfr->pReceiver + (sz - 2U));
1072 
1073  pPrimaryCCD[RxChanNum].DMACDC = (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_DST_INC) |
1074  (ADI_DMA_INCR_NONE << DMA_BITP_CTL_SRC_INC) |
1075  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1076  ((sz/2U -1U) << DMA_BITP_CTL_N_MINUS_1) |
1077  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1078 
1079  dmaFlags |= (BITM_SPI_DMA_RXEN );
1080 
1081  }
1082  }
1083 
1084  /*
1085  * SPI CNT register
1086  * Non Read Mode: Size of the entire transactions
1087  * Read Mode: Size of the RX transaction
1088  *
1089  * RD_CTL.SZ
1090  * Read Mode: Size of the TX transaction
1091  */
1092 
1093  hDevice->pSpi->RD_CTL = 0u;
1094  if( hDevice->bRdCtlMode)
1095  {
1096  /* "Half Duplex Mode" */
1097 
1098  /* The number of bytes to be transmitted */
1099  uint32_t nBytes = hDevice->TxRemaining - 1U;
1100 
1101  /* Enable RD_CTL and set the TX count for the half-duplex mode of operation */
1102  hDevice->pSpi->RD_CTL &= (uint16_t)~((uint16_t)(BITM_SPI_RD_CTL_TXBYTES << BITP_SPI_RD_CTL_TXBYTES));
1103 
1104  hDevice->pSpi->RD_CTL |= (uint16_t)( (uint16_t)(nBytes << BITP_SPI_RD_CTL_TXBYTES) |
1105  (uint16_t)(1 << BITP_SPI_RD_CTL_CMDEN));
1106 
1107  /* RD_CTL requires continuous mode operation. */
1108  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CON);
1109 
1110  /* CNT represent the number of bytes to receive */
1111  hDevice->pSpi->CNT = hDevice->RxRemaining;
1112  }
1113  else
1114  {
1115  /* Full duplex mode of operation */
1116  if(hDevice->RxRemaining == 0u)
1117  {
1118  /* There is nothing to receive. Flush the RX FIFO and to ignore all incoming data */
1119  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH);
1120  }
1121  else if(hDevice->TxRemaining == 0u)
1122  {
1123  /* If there is nothing to transmit then clear the TX FIFO */
1124  hDevice->pSpi->CTL |= (BITM_SPI_CTL_TFLUSH);
1125  }
1126  else
1127  {
1128  /* Misra compliance: All if/else chains should end with a final else clause */
1129  }
1130 
1131  /* Set CNT to MAX of RX/TX */
1132 
1133  nCount = hDevice->RxRemaining > hDevice->TxRemaining ? hDevice->RxRemaining : hDevice->TxRemaining;
1134 
1135  hDevice->pSpi->CNT = (uint16_t)nCount;
1136  }
1137 
1138 
1139  if( hDevice->bDmaMode == false)
1140  {
1141 
1142  /* Make sure that the application passed in a TX Buffer */
1143  if( hDevice->pTxBuffer != NULL)
1144  {
1145  /* interrupt mode: Fill in the FIFO */
1146  nCount = 0u;
1147  while((nCount < ADI_SPI_FIFO_SIZE) && (hDevice->TxRemaining != 0u))
1148  {
1149  /* grab the lead byte */
1150  hDevice->pSpi->TX = *hDevice->pTxBuffer;
1151  /* modify tx pointer and buffer count prior to interrupt */
1152  hDevice->pTxBuffer += hDevice->TxIncrement;
1153  /* decrement the byte count */
1154  hDevice->TxRemaining--;
1155  nCount++;
1156  }
1157  }
1158 
1159  } else {
1160 
1161  hDevice->pSpi->DMA |= dmaFlags;
1162  if( (hDevice->bDmaMode == true) && (pXfr->TransmitterBytes != 0u))
1163  {
1164 #if defined(ADI_SPI_MASTER_TIM_FOR_DMA) && (ADI_SPI_MASTER_TIM_FOR_DMA != 0u)
1165  /*set TIM bit to initiate transfer with a write to the SPI_TX register*/
1166  hDevice->pSpi->CTL |= BITM_SPI_CTL_TIM;
1167 #endif
1168  /* Spinning waiting for a potential timing window to close is required, but the spin loop needs to be guarded with an exit plan */
1169  /* After an abitrary number of iterations (say 100) the window should be closed. */
1170  for( uint16_t numberOfLoops = 0u; numberOfLoops < 100u; numberOfLoops++)
1171  {
1172  /* wait until there is data in the TX FIFO before starting the transaction to avoid a potential race condition resulting in a TX underflow */
1173  /* The DMA controller and the SPI controller are on different clock domains, so wait until the DMA starts filling the FIFO before enabling the SPI */
1174  volatile uint16_t nFifoStatus = hDevice->pSpi->FIFO_STAT;
1175  /* calculate the number of bytes already written to tx fifo */
1176  uint16_t bytesInTxFifo = ((BITM_SPI_FIFO_STAT_TX & nFifoStatus) >> BITP_SPI_FIFO_STAT_TX);
1177  if( bytesInTxFifo != 0u)
1178  {
1179  /* There are bytes in Tx FIFO: the SPI transaction can be initiated */
1180  break;
1181  }
1182  }
1183  }
1184 
1185  }
1186 
1187  if((hDevice->pSpi->CTL & BITM_SPI_CTL_TIM) != BITM_SPI_CTL_TIM)
1188  {
1189  uint16_t byte ADI_UNUSED_ATTRIBUTE = hDevice->pSpi->RX;
1190  }
1191 
1192 
1193  NVIC_EnableIRQ(hDevice->pDevInfo->eIRQn);
1194 
1195  return;
1196 }
1197 
1214  ADI_SPI_HANDLE const hDevice,
1215  uint32_t * const pHWErrors
1216  )
1217 {
1218 #ifdef ADI_DEBUG
1219  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1220  {
1221  *pHWErrors = ADI_SPI_HW_ERROR_NONE;
1222  return ADI_SPI_INVALID_HANDLE;
1223  }
1224 #endif
1225 
1226  SEM_PEND(hDevice,ADI_SPI_SEMAPHORE_FAILED);
1227  *pHWErrors = hDevice->HWErrors;
1228  return(ADI_SPI_SUCCESS);
1229 }
1230 
1249 {
1250 #ifdef ADI_DEBUG
1251  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1252  {
1253  return ADI_SPI_INVALID_HANDLE;
1254  }
1255 #endif
1256 
1257  *bComplete = hDevice->bTransferComplete;
1258  return(ADI_SPI_SUCCESS);
1259 }
1260 
1292 {
1293  volatile uint16_t ADI_UNUSED_ATTRIBUTE byte;
1294  uint32_t nCount = 0u;
1295 
1296 #ifdef ADI_DEBUG
1297  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1298  {
1299  return ADI_SPI_INVALID_HANDLE;
1300  }
1301  if ((NULL == pXfr->pTransmitter) && (NULL == pXfr->pReceiver))
1302  {
1303  return ADI_SPI_INVALID_POINTER;
1304  }
1305 
1306  if ((0u == pXfr->pTransmitter) && (0u == pXfr->pReceiver) )
1307  {
1308  return ADI_SPI_INVALID_PARAM;
1309  }
1310  /* Return error if the RX buffer is not null and count is equal to zero or vice versa.*/
1311  if (((pXfr->pReceiver != NULL) && (pXfr->ReceiverBytes == 0u)) || ((pXfr->pReceiver == NULL) && ((pXfr->ReceiverBytes > 0u))))
1312  {
1313  return ADI_SPI_INVALID_PARAM;
1314  }
1315 
1316  /* Return error if the Tx buffer is not null and count is equal to zero or vice versa.*/
1317  if (((pXfr->pTransmitter != NULL) && (pXfr->TransmitterBytes == 0u)) || ((pXfr->pTransmitter == NULL) && (pXfr->TransmitterBytes > 0u)))
1318  {
1319  return ADI_SPI_INVALID_PARAM;
1320  }
1321 
1322  /* DMA count register is only 8 bits, so block size is limited to 255 */
1323  if ((pXfr->bDMA==true) && (pXfr->TransmitterBytes != 0u) &&(((uint32_t)pXfr->pTransmitter&0x1U) !=0u ) )
1324  {
1325  return ADI_SPI_INVALID_PARAM;
1326  }
1327 
1328 #endif /* ADI_DEBUG */
1329 
1330  /* Effectively flush the FIFOs before the start of the next transaction */
1331  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1332  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1333 
1334  /* Shut down any DMA enables that are still lingering from a prior transaction */
1335  hDevice->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1336 
1337  hDevice->bTransferComplete = false;
1338  hDevice->pTxBuffer = pXfr->pTransmitter;
1339  hDevice->pRxBuffer = pXfr->pReceiver;
1340  hDevice->TxRemaining = pXfr->TransmitterBytes;
1341  hDevice->RxRemaining = pXfr->ReceiverBytes;
1342  hDevice->TxIncrement = (uint8_t)pXfr->nTxIncrement;
1343  hDevice->RxIncrement = (uint8_t)pXfr->nRxIncrement;
1344  hDevice->bDmaMode = pXfr->bDMA;
1345  hDevice->bRdCtlMode = pXfr->bRD_CTL;
1346  hDevice->HWErrors = ADI_SPI_HW_ERROR_NONE;
1347 
1348 
1349  /* Configure SPI. First step is to clear CTL bits that may have been set previously */
1350  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_TIM | BITM_SPI_CTL_RFLUSH | BITM_SPI_CTL_TFLUSH | BITM_SPI_CTL_CON);
1351  if( hDevice->TxRemaining == 0u )
1352  {
1353  /* This will prevent TX underflow interrupts from occurring */
1354  hDevice->pSpi->CTL |= BITM_SPI_CTL_TFLUSH;
1355  }
1356  if( hDevice->RxRemaining == 0u )
1357  {
1358  /* This will prevent data from entering RX. Also prevents overflow interrupts from occurring */
1359  hDevice->pSpi->CTL |= BITM_SPI_CTL_RFLUSH;
1360 
1361  /* If SPI_CTL.TIM is set, the Tx FIFO status causes the interrupt. */
1362  if( hDevice->bDmaMode != true) {
1363  hDevice->pSpi->CTL |= BITM_SPI_CTL_TIM;
1364  }
1365 
1366  }
1367 
1368  hDevice->pSpi->CNT = (uint16_t) hDevice->TxRemaining > hDevice->RxRemaining ? hDevice->TxRemaining : hDevice->RxRemaining;
1369 
1370  uint16_t nDMAFlags = 0u;
1371 
1372  if( hDevice->bDmaMode == true)
1373  {
1374  uint16_t sz = pXfr->TransmitterBytes;
1375  if( sz )
1376  {
1377  uint16_t TxChanNum = hDevice->pDevInfo->dmaTxChannelNumber;
1378 
1379  /* Enable the interrupt for the given DMA */
1380  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaTxIrqNumber));
1381 
1382  /* Disables source address decrement for TX channel */
1383  pADI_DMA0->SRCADDR_CLR = 1U << TxChanNum;
1384 
1385  /* Enable the channel */
1386  pADI_DMA0->EN_SET = 1U << TxChanNum;
1387 
1388  /* Enables SPI peripheral to generate DMA requests. */
1389  pADI_DMA0->RMSK_CLR = 1U << TxChanNum;
1390 
1391  /* Set the primary as the current DMA descriptor */
1392  pADI_DMA0->ALT_CLR = 1U << TxChanNum;
1393 
1394  /* fill in the DMA RAM descriptors */
1395  if( (sz & 1U) != 0u )
1396  {
1397  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1398  /* The SPI_CNT register will hold the "real" transfer count */
1399  sz++;
1400  }
1401 
1402  pPrimaryCCD[TxChanNum].DMASRCEND = (uint32_t)(pXfr->pTransmitter + (sz - 2U));
1403 
1404  pPrimaryCCD[TxChanNum].DMADSTEND = (uint32_t)&hDevice->pSpi->TX;
1405 
1406  pPrimaryCCD[TxChanNum].DMACDC = ((uint32_t)ADI_DMA_INCR_NONE << DMA_BITP_CTL_DST_INC) |
1407  (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_SRC_INC) |
1408  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1409  ((sz/2U -1U)<< DMA_BITP_CTL_N_MINUS_1) |
1410  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1411 
1412  nDMAFlags |= (BITM_SPI_DMA_TXEN);
1413  }
1414 
1415  sz = pXfr->ReceiverBytes;
1416  if( sz )
1417  {
1418 
1419  uint16_t RxChanNum = hDevice->pDevInfo->dmaRxChannelNumber;
1420  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaRxIrqNumber));
1421 
1422  /* Disables destination address decrement for RX channel */
1423  pADI_DMA0->DSTADDR_CLR = 1U << RxChanNum;
1424 
1425  /* Enable the channel */
1426  pADI_DMA0->EN_SET = 1U << RxChanNum;
1427 
1428  /* Enables SPI peripheral to generate DMA requests. */
1429  pADI_DMA0->RMSK_CLR = 1U << RxChanNum;
1430 
1431  /* Set the primary as the current DMA descriptor */
1432  pADI_DMA0->ALT_CLR = 1U << RxChanNum;
1433 
1434  if( (sz & 1U) != 0u )
1435  {
1436  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1437  /* The SPI_CNT register will hold the "real" transfer count */
1438  sz++;
1439  }
1440 
1441  pPrimaryCCD[RxChanNum].DMASRCEND = (uint32_t)&hDevice->pSpi->RX;
1442 
1443  pPrimaryCCD[RxChanNum].DMADSTEND = (uint32_t)(pXfr->pReceiver + (sz - 2U));
1444 
1445  pPrimaryCCD[RxChanNum].DMACDC = (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_DST_INC) |
1446  (ADI_DMA_INCR_NONE << DMA_BITP_CTL_SRC_INC) |
1447  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1448  ((sz/2U -1U) << DMA_BITP_CTL_N_MINUS_1) |
1449  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1450 
1451  nDMAFlags |= (BITM_SPI_DMA_RXEN );
1452 
1453  }
1454  }
1455 
1456  /* Make sure XFRDONE is shut down. This IEN has no affect in slave mode */
1457  hDevice->pSpi->IEN &= (uint16_t)~BITM_SPI_IEN_XFRDONE;
1458 
1459  if( hDevice->bDmaMode == false) {
1460  /* Make sure we are not in continuous mode from a prior DMA transaction */
1461  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CON;
1462 
1463 
1464  /* interrupt mode: Enable the UNDERFLOW and OVERFLOW interrupts */
1465  /* XFRDONE is invalid in slave mode */
1466  uint16_t activeInterrupts = 0u;
1467  /* Enable underflow on;y if sending bytes */
1468  if( hDevice->TxRemaining ) {
1469  activeInterrupts |= BITM_SPI_IEN_TXUNDR;
1470  }
1471  /* Enable overflow only if receiving bytes */
1472  if( hDevice->RxRemaining ) {
1473  activeInterrupts |= BITM_SPI_IEN_RXOVR;
1474  }
1475  hDevice->pSpi->IEN |= activeInterrupts;
1476 
1477  /* interrupt mode: Fill in the FIFO and enable the TX by a dummy read. */
1478  while((nCount < ADI_SPI_FIFO_SIZE) && (hDevice->TxRemaining != 0u))
1479  {
1480  /* grab the lead byte */
1481  hDevice->pSpi->TX = *hDevice->pTxBuffer;
1482  /* modify tx pointer and buffer count prior to interrupt */
1483  hDevice->pTxBuffer += hDevice->TxIncrement;
1484  /* decrement the byte count */
1485  hDevice->TxRemaining--;
1486  nCount++;
1487  }
1488  } else {
1489 
1490  /* DMA mode. Enable the controller */
1491  hDevice->pSpi->DMA |= (uint16_t)(BITM_SPI_DMA_EN | nDMAFlags);
1492  }
1493 
1494  if((hDevice->pSpi->CTL & BITM_SPI_CTL_TIM) != BITM_SPI_CTL_TIM)
1495  {
1496  byte = hDevice->pSpi->RX;
1497  }
1498  NVIC_EnableIRQ(hDevice->pDevInfo->eIRQn);
1499 
1500  if (hDevice->bBlockingMode == true)
1501  {
1502  SEM_PEND(hDevice,ADI_SPI_SEMAPHORE_FAILED);
1503  }
1504 
1505  return ADI_SPI_SUCCESS;
1506 }
1507 
1508 
1509 
1537 {
1538  ADI_SPI_RESULT eResult;
1539  hDevice->bBlockingMode = true;
1540  eResult = adi_spi_SlaveSubmitBuffer(hDevice,pXfr);
1541  hDevice->bBlockingMode = false;
1542  if( (eResult == ADI_SPI_SUCCESS) && (hDevice->HWErrors != 0u))
1543  {
1544  eResult = ADI_SPI_HW_ERROR_OCCURRED;
1545  }
1546  return(eResult);
1547 }
1548 
1549 /*
1550  *****************************************************************************
1551  * SPI Internal Static Support Functions
1552  *****************************************************************************/
1553 
1554 
1558 /*-----------------------------------------------------------------------------
1559  *
1560  * SPI ISR
1561  *
1562  *----------------------------------------------------------------------------*/
1563 
1564 static void common_SPI_Int_Handler (ADI_SPI_DEV_DATA_TYPE* pDD)
1565 {
1566  uint16_t nFifoStatus;
1567  uint16_t writableBytes;
1568  uint16_t readableBytes;
1569  bool terminate = false;
1570 
1571  /* read status register - first thing */
1572  uint16_t nErrorStatus = pDD->pSpi->STAT;
1573 
1574  uint16_t nErrorStatusMsk = 0u;
1575 
1576  /* Take a fresh snapshot of FIFO and Clear the RX/TX/IRQ interrupts, as we service those
1577  * here. */
1578  pDD->pSpi->STAT = nErrorStatus;
1579  nFifoStatus = pDD->pSpi->FIFO_STAT;
1580 
1581  /* Trap overflow/underflow errors and terminate the current transaction if there is an error. */
1582 #if !defined(ADI_SPI_TRAP_RXOVR) || (ADI_SPI_TRAP_RXOVR!=0)
1583  nErrorStatusMsk |= BITM_SPI_STAT_RXOVR;
1584 #endif
1585 
1586 #if !defined(ADI_SPI_TRAP_TXUNDR) || (ADI_SPI_TRAP_TXUNDR!=0)
1587  nErrorStatusMsk |= BITM_SPI_STAT_TXUNDR;
1588 #endif
1589 
1590  if(0u != (nErrorStatusMsk & nErrorStatus)) {
1591  /* Error condition. Handle each individually */
1592  if(BITM_SPI_STAT_TXUNDR == (BITM_SPI_STAT_TXUNDR & nErrorStatus)) {
1593  pDD->HWErrors |= (uint32_t)ADI_SPI_HW_ERROR_TX_UNDERFLOW;
1594  }
1595  if( BITM_SPI_STAT_RXOVR == (BITM_SPI_STAT_RXOVR & nErrorStatus)) {
1596  pDD->HWErrors |= (uint32_t)ADI_SPI_HW_ERROR_RX_OVERFLOW;
1597  }
1598 
1599  /* There's been an error, so shut it down */
1600  terminate = true;
1601  }
1602  else {
1603  /* calculate number of bytes that can be written to tx fifo */
1604  writableBytes = ADI_SPI_FIFO_SIZE - ((BITM_SPI_FIFO_STAT_TX & nFifoStatus) >> BITP_SPI_FIFO_STAT_TX);
1605  /* calculate number of bytes to read from rx fifo */
1606  readableBytes = ((BITM_SPI_FIFO_STAT_RX & nFifoStatus) >> BITP_SPI_FIFO_STAT_RX);
1607 
1608  /* fill tx fifo */
1609  while ((writableBytes != 0u) && (pDD->TxRemaining != 0u))
1610  {
1611  pDD->pSpi->TX = *pDD->pTxBuffer;
1612  pDD->pTxBuffer += pDD->TxIncrement;
1613  pDD->TxRemaining--;
1614  writableBytes--;
1615  }
1616 
1617  /*
1618  * Now focus on the RX FIFO but only if we are not in RD_CTL mode OR, if we
1619  * are in RD_CTL mode, TX bytes are all transmitted
1620  */
1621  if( (pDD->bRdCtlMode==false) || (pDD->TxRemaining==0u) )
1622  {
1623  /* empty rx fifo */
1624  while ((readableBytes != 0u) &&(pDD->RxRemaining != 0u))
1625  {
1626  *pDD->pRxBuffer = (uint8_t) pDD->pSpi->RX;
1627  pDD->pRxBuffer += pDD->RxIncrement;
1628  pDD->RxRemaining--;
1629  readableBytes--;
1630  }
1631  }
1632  }
1633 
1634  /* Terminate the transaction and notify the caller
1635  * 1) Master mode: If there are no more bytes to RX or TX and XFRDONE is set
1636  * 2) Slave mode: If there are no more bytes to RX or TX (XFRDONE is invalid in slave mode)
1637  * 3) If there was a HW error
1638  */
1639  if( (pDD->RxRemaining == 0u) && (pDD->TxRemaining == 0u))
1640  {
1641  if( BITM_SPI_CTL_MASEN == (pDD->pSpi->CTL & BITM_SPI_CTL_MASEN ))
1642  {
1643  /* Master mode */
1644  /* On handler entry we may have had a transfer complete state, so we check
1645  * if that has happened. Also, between clearing the int status and reading the
1646  * FIFO state, there is a slim chance that the transfer has completed. So,
1647  * we check both for absolute completeness */
1648  if((BITM_SPI_STAT_XFRDONE == (nErrorStatus & BITM_SPI_STAT_XFRDONE)) ||
1649  (BITM_SPI_STAT_XFRDONE == (pDD->pSpi->STAT & BITM_SPI_STAT_XFRDONE)) )
1650  {
1651  /* Master mode XFRDONE */
1652  terminate = true;
1653 
1654  /* Disable SPI DMA AFTER XFRDONE has been serviced */
1655  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN);
1656  }
1657  } else {
1658  /* Slave mode - we're all done here */
1659  terminate = true;
1660 
1661  /* Disable SPI DMA */
1662  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN);
1663  }
1664  }
1665 
1666  /* We terminate if:
1667  - There has been an error on entry to this handler
1668  - Data has transferred successfully.
1669  If there has been an error whilst executing this handler (data can transfer
1670  during this handler, and an error can still occur) then the interrupt will
1671  have raised, we re-enter the interrupt and then the error is handled above. */
1672  if( terminate )
1673  {
1674 
1675  /* Clear possible interrupt sources: XFRDONE and underflow and overflow */
1676  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1677  pDD->bTransferComplete = true;
1678  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
1679 
1680  /* Everything done, now just clear the STAT register */
1681  nErrorStatus = pDD->pSpi->STAT;
1682  pDD->pSpi->STAT = nErrorStatus;
1683 
1684  /* If a callback is registered notify the buffer processed event to the application */
1685  if(NULL != pDD->pfCallback ){
1686  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1687  }
1688  else
1689  {
1690  SEM_POST(pDD);
1691  }
1692  }
1693 
1694 #if defined(ADI_CYCLECOUNT_SPI_ISR_ENABLED) && (ADI_CYCLECOUNT_SPI_ISR_ENABLED == 1u)
1696 #endif
1697 
1698 }
1699 
1700 
1701 /* Internal DMA Callback for receiving DMA faults from common DMA error handler. */
1702 static void RxDmaErrorCallback(void *pCBParam, uint32_t Event, void *pArg) {
1703 
1704  /* Recover the device handle. */
1705  ADI_SPI_HANDLE hDevice = (ADI_SPI_HANDLE) pCBParam;
1706 
1707  /* Save the DMA error. */
1708  switch (Event) {
1709  case ADI_DMA_EVENT_ERR_BUS:
1710  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_BUS_FAULT;
1711  break;
1713  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_INVALID_DESCR;
1714  break;
1715  default:
1716  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_UNKNOWN_ERROR;
1717  break;
1718  }
1719 
1720  /* If a callback is registered notify the buffer processed event to the application */
1721  if(NULL != hDevice->pfCallback ){
1722  hDevice->pfCallback(hDevice->pCBParam, hDevice->HWErrors, NULL);
1723  }
1724  else
1725  {
1726  SEM_POST(hDevice);
1727  }
1728 }
1729 
1730 
1731 /* Internal DMA Callback for receiving DMA faults from common DMA error handler. */
1732 static void TxDmaErrorCallback(void *pCBParam, uint32_t Event, void *pArg) {
1733 
1734  /* Recover the device handle. */
1735  ADI_SPI_HANDLE hDevice = (ADI_SPI_HANDLE) pArg;
1736 
1737  /* Save the DMA error. */
1738  switch (Event) {
1739  case ADI_DMA_EVENT_ERR_BUS:
1740  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_BUS_FAULT;
1741  break;
1743  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_INVALID_DESCR;
1744  break;
1745  default:
1746  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_UNKNOWN_ERROR;
1747  break;
1748  }
1749 
1750  /* If a callback is registered notify the buffer processed event to the application */
1751  if(NULL != hDevice->pfCallback ){
1752  hDevice->pfCallback(hDevice->pCBParam, hDevice->HWErrors, NULL);
1753  }
1754  else
1755  {
1756  SEM_POST(hDevice);
1757  }
1758 }
1759 
1760 
1768 void SPI0_Int_Handler(void) {
1769  ISR_PROLOG();
1770  common_SPI_Int_Handler(spi_device_info[0].hDevice );
1771  ISR_EPILOG();
1772 }
1773 
1774 
1782 void SPI1_Int_Handler(void) {
1783  ISR_PROLOG();
1784  common_SPI_Int_Handler(spi_device_info[1].hDevice);
1785  ISR_EPILOG();
1786 }
1787 
1795 void SPI2_Int_Handler(void) {
1796  ISR_PROLOG();
1797  common_SPI_Int_Handler(spi_device_info[2].hDevice );
1798  ISR_EPILOG();
1799 }
1800 
1801 
1802 /*
1806 */
1807 
1808 
1809 /*
1810  * SPI DMA interrupt handlers
1811  */
1812 
1813 static void common_spi_DMA_Tx(ADI_SPI_HANDLE pDD, const int masterMode)
1814 {
1815  pDD->TxRemaining = 0u;
1816  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_TXEN);
1817  if (0u == pDD->RxRemaining)
1818  {
1819  if (masterMode)
1820  {
1827  pDD->pSpi->IEN |= BITM_SPI_IEN_XFRDONE;
1828  }else{
1829  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1830 
1831  /* If a callback is registered notify the buffer processed event to the application */
1832  if(NULL != pDD->pfCallback ){
1833  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1834  }
1835  else
1836  {
1837  SEM_POST(pDD);
1838  }
1839  }
1840  }
1841 }
1842 
1843 static void common_spi_DMA_Rx(ADI_SPI_HANDLE pDD, const int masterMode)
1844 {
1845  pDD->RxRemaining = 0u;
1846  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_RXEN);
1847 
1848  // enable flushing the RxFIFO
1849  pDD->pSpi->CTL |= (uint16_t)(BITM_SPI_CTL_RFLUSH);
1850 
1851  if (0u == pDD->TxRemaining)
1852  {
1853  if (masterMode)
1854  {
1861  pDD->pSpi->IEN |= BITM_SPI_IEN_XFRDONE;
1862  }else{
1863  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1864  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN);
1865 
1866  /* If a callback is registered notify the buffer processed event to the application */
1867  pDD->bTransferComplete = true;
1868  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
1869  if(NULL != pDD->pfCallback ){
1870  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1871  }
1872  else
1873  {
1874  SEM_POST(pDD);
1875  }
1876  }
1877  }
1878 }
1879 
1880 
1881 void DMA_SPI0_TX_Int_Handler(void)
1882 {
1883  ISR_PROLOG();
1884  common_spi_DMA_Tx(spi_device_info[0].hDevice,(ADI_SPI0_MASTER_MODE==1u));
1885  ISR_EPILOG();
1886 }
1887 
1888 void DMA_SPI0_RX_Int_Handler(void)
1889 {
1890  ISR_PROLOG();
1891  common_spi_DMA_Rx(spi_device_info[0].hDevice,(ADI_SPI0_MASTER_MODE==1u));
1892  ISR_EPILOG();
1893 }
1894 
1895 void DMA_SPI1_TX_Int_Handler(void)
1896 {
1897  ISR_PROLOG();
1898  common_spi_DMA_Tx(spi_device_info[1].hDevice,(ADI_SPI1_MASTER_MODE==1u));
1899  ISR_EPILOG();
1900 }
1901 
1902 
1903 void DMA_SPI1_RX_Int_Handler(void)
1904 {
1905  ISR_PROLOG();
1906  common_spi_DMA_Rx(spi_device_info[1].hDevice,(ADI_SPI1_MASTER_MODE==1u));
1907  ISR_EPILOG();
1908 }
1909 
1910 void DMA_SPIH_TX_Int_Handler(void)
1911 {
1912  ISR_PROLOG();
1913  common_spi_DMA_Tx(spi_device_info[2].hDevice,(ADI_SPI2_MASTER_MODE==1u));
1914  ISR_EPILOG();
1915 }
1916 
1917 void DMA_SPIH_RX_Int_Handler(void)
1918 {
1919  ISR_PROLOG();
1920  common_spi_DMA_Rx(spi_device_info[2].hDevice,(ADI_SPI2_MASTER_MODE==1u));
1921  ISR_EPILOG();
1922 }
1923 
1927 /* @} */
1928 
ADI_SPI_RESULT adi_spi_SetTransmitUnderflow(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the SPI transmit FIFO underflow mode.
Definition: adi_spi.c:527
ADI_DMA_RESULT adi_dma_RegisterCallback(DMA_CHANn_TypeDef const eChannelID, ADI_CALLBACK const pfCallback, void *const pCBParam)
Register a call-back function for a DMA channel.
Definition: adi_dma.c:232
ADI_SPI_RESULT adi_spi_GetBuffer(ADI_SPI_HANDLE const hDevice, uint32_t *const pHWErrors)
Block until the SPI transaction is complete.
Definition: adi_spi.c:1213
#define ADI_CYCLECOUNT_STORE(id)
ADI_SPI_RESULT adi_spi_SlaveReadWrite(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Slave-Mode transaction in "Blocking mode".This function returns only af...
Definition: adi_spi.c:1536
uint16_t TransmitterBytes
Definition: adi_spi.h:219
uint8_t * pReceiver
Definition: adi_spi.h:217
uint8_t * pTransmitter
Definition: adi_spi.h:215
ADI_SPI_RESULT
Definition: adi_spi.h:53
ADI_SPI_RESULT adi_spi_SetClockPhase(ADI_SPI_HANDLE const hDevice, const bool bFlag)
Set the clock phase.
Definition: adi_spi.c:759
#define ADI_CYCLECOUNT_ISR_SPI
ADI_SPI_RESULT adi_spi_SetContinuousMode(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the continuous transfer mode.
Definition: adi_spi.c:377
ADI_SPI_RESULT adi_spi_Open(uint32_t nDeviceNum, void *pDevMemory, uint32_t nMemorySize, ADI_SPI_HANDLE *const phDevice)
Initialize and allocate an SPI device for use in Master Mode.
Definition: adi_spi.c:154
ADI_SPI_RESULT adi_spi_Close(ADI_SPI_HANDLE const hDevice)
Uninitialize and deallocate an SPI device.
Definition: adi_spi.c:250
ADI_SPI_RESULT adi_spi_isBufferAvailable(ADI_SPI_CONST_HANDLE const hDevice, bool *const bComplete)
Get the SPI transaction completion status.
Definition: adi_spi.c:1248
ADI_PWR_RESULT
Definition: adi_pwr.h:236
ADI_SPI_RESULT adi_spi_SetChipSelect(ADI_SPI_HANDLE const hDevice, const ADI_SPI_CHIP_SELECT eChipSelect)
Set the chip select.
Definition: adi_spi.c:715
uint8_t nRxIncrement
Definition: adi_spi.h:225
ADI_SPI_RESULT adi_spi_SetLoopback(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the internal loopback mode.
Definition: adi_spi.c:411
ADI_SPI_RESULT adi_spi_GetBitrate(ADI_SPI_CONST_HANDLE const hDevice, uint32_t *const pnBitrate)
Get the SPI serial clock frequency.
Definition: adi_spi.c:626
ADI_SPI_RESULT adi_spi_RegisterCallback(ADI_SPI_HANDLE const hDevice, ADI_CALLBACK const pfCallback, void *const pCBParam)
Register or unregister the callback.
Definition: adi_spi.c:289
#define ADI_SPI2_MASTER_MODE
ADI_SPI_RESULT adi_spi_SetClockPolarity(ADI_SPI_HANDLE const hDevice, const bool bFlag)
Set the clock polarity.
Definition: adi_spi.c:680
ADI_PWR_RESULT adi_pwr_GetClockFrequency(const ADI_CLOCK_ID eClockId, uint32_t *pClock)
Get the frequency of the given clock. Obtain individual peripheral clock frequencies.
Definition: adi_pwr.c:435
ADI_SPI_RESULT adi_spi_SetIrqmode(ADI_SPI_CONST_HANDLE const hDevice, const uint8_t nMode)
Set the IRQ mode.
Definition: adi_spi.c:337
ADI_SPI_RESULT adi_spi_SetReceiveOverflow(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the SPI receive FIFO overflow mode.
Definition: adi_spi.c:489
ADI_SPI_RESULT adi_spi_MasterReadWrite(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Master-Mode transaction in "Blocking mode".This function returns only a...
Definition: adi_spi.c:804
struct __ADI_SPI_DEV_DATA_TYPE * ADI_SPI_HANDLE
Definition: adi_spi.h:150
uint16_t ReceiverBytes
Definition: adi_spi.h:221
ADI_SPI_RESULT adi_spi_SetMasterMode(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set SPI Master-Mode operation.
Definition: adi_spi.c:446
#define ADI_SPI0_MASTER_MODE
ADI_SPI_CHIP_SELECT
Definition: adi_spi.h:134
void adi_dma_Init(void)
Initialize the DMA peripheral.
Definition: adi_dma.c:155
const struct __ADI_SPI_DEV_DATA_TYPE * ADI_SPI_CONST_HANDLE
Definition: adi_spi.h:152
uint8_t nTxIncrement
Definition: adi_spi.h:223
ADI_SPI_RESULT adi_spi_SlaveSubmitBuffer(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Slave-Mode transaction.
Definition: adi_spi.c:1291
ADI_SPI_RESULT adi_spi_SetBitrate(ADI_SPI_CONST_HANDLE const hDevice, const uint32_t Hertz)
Set the SPI serial clock frequency.
Definition: adi_spi.c:569
ADI_SPI_RESULT adi_spi_MasterSubmitBuffer(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Master-Mode transaction.
Definition: adi_spi.c:844
#define ADI_SPI1_MASTER_MODE