ADuCM302x Device Drivers API Reference Manual  Release 3.1.2.0
adi_spi.c
1 
70 #include <adi_processor.h>
73 #include <stdlib.h> /* for 'NULL" definition */
74 #include <string.h>
75 
76 #include <drivers/spi/adi_spi.h>
77 #include <drivers/pwr/adi_pwr.h>
78 #include <drivers/general/adi_drivers_general.h>
79 #include <adi_callback.h>
80 #include <rtos_map/adi_rtos_map.h>
81 #include "adi_spi_config.h"
82 #include <adi_cyclecount.h>
83 
84 
85 #ifdef __ICCARM__
86 /*
87 * IAR MISRA C 2004 error suppressions.
88 *
89 * Pm123 (rule 8.5): there shall be no definition of objects or functions in a header file
90 * This isn't a header as such.
91 *
92 * Pm073 (rule 14.7): a function should have a single point of exit
93 * Pm143 (rule 14.7): a function should have a single point of exit at the end of the function
94 * Multiple returns are used for error handling.
95 *
96 * Pm088 (rule 17.4): pointer arithmetic should not be used.
97 * Relying on pointer arithmetic for buffer handling.
98 *
99 * Pm152: (MISRA C 2004 rule 17.4) array indexing shall only be applied to objects defined as an array type
100 * Accessing the DMA descriptors, which are defined in the system as a pointer to an array of descriptors
101 *
102 * Pm151 (rule 17.4): array indexing shall only be applied to objects of array type
103 * Pm123 (rule 18.5): there shall be no definition of objects in a header file
104 *
105 * Pm50: (MISRA C 2004 rule 14.3) a null statement shall only occur on a line by itself, and shall not have any other text on the same line
106 * Some Macros, such as ISR_PROLOGUE, may not have any expansion resulting in just the terminating ';'
107 *
108 *Pm140: (MISRA C 2004 rule 11.3) a cast should not be performed between a pointer type and an integral type
109 * MMR addresses are defined as simple constants. Accessing the MMR requires casting to a pointer type
110 *
111 * Pm031: (MISRA C 2004 rule 12.7) bitwise operations shall not be performed on signed integer types
112 * MMR macros are beyond the control of the driver.
113 *
114 */
115 #pragma diag_suppress=Pm050,Pm073,Pm088,Pm123,Pm143,Pm152,Pm140,Pm031
116 
117 #endif /* __ICCARM__ */
118 
119 #include "adi_spi_data.c"
120 
123 /* handle checker for debug mode */
124 #define ADI_SPI_VALIDATE_HANDLE(h) ((spi_device_info[0].hDevice != (h)) && (spi_device_info[1].hDevice != (h)) && (spi_device_info[2].hDevice != (h)))
125 
128 /*
129  * Local prototypes
130  */
131 static void common_SPI_Int_Handler (ADI_SPI_DEV_DATA_TYPE* pDD);
132 static void StartTransaction (ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER* const pXfr);
133 static void TxDmaErrorCallback (void *pCBParam, uint32_t Event, void *pArg);
134 static void RxDmaErrorCallback (void *pCBParam, uint32_t Event, void *pArg);
135 
136 /* ISR forward declarations */
138 void SPI0_Int_Handler(void);
139 void SPI1_Int_Handler(void);
140 void SPI2_Int_Handler(void);
141 void DMA_SPI0_TX_Int_Handler(void);
142 void DMA_SPI0_RX_Int_Handler(void);
143 void DMA_SPI1_TX_Int_Handler(void);
144 void DMA_SPI1_RX_Int_Handler(void);
145 void DMA_SPIH_TX_Int_Handler(void);
146 void DMA_SPIH_RX_Int_Handler(void);
149 /*
153 */
154 
184 ADI_SPI_RESULT adi_spi_Open(uint32_t nDeviceNum,
185  void *pDevMemory,
186  uint32_t nMemorySize,
187  ADI_SPI_HANDLE* const phDevice)
188 {
189 
190 #ifdef ADI_DEBUG
191 
192  if (nDeviceNum >= ADI_SPI_NUM_INSTANCES)
193  {
195  }
196 
197  if (nMemorySize != sizeof(struct __ADI_SPI_DEV_DATA_TYPE))
198  {
199  return ADI_SPI_INVALID_PARAM;
200  }
201 
202  if( spi_device_info[nDeviceNum].hDevice != NULL )
203  {
204  return ADI_SPI_IN_USE;
205  }
206 
207 #endif
208 
209  ADI_SPI_HANDLE hDevice = pDevMemory;
210 
211  /*
212  * Link the two data structures together.
213  *
214  * ADI_SPI_DEVICE_INFO <==> ADI_SPI_HANDLE
215  *
216  * Clear the ADI_SPI_HANDLE memory. This also sets all bool
217  * structure members to false so we do not need to waste cycles
218  * setting these explicitly (e.g. hDevice->bDMA = false)
219  *
220  * Other fields, such as callback related fields, are also zeroed
221  * and therefore properly initialized.
222  */
223 
224  memset(pDevMemory,0,nMemorySize);
225  hDevice->pDevInfo = &spi_device_info[nDeviceNum];
226  spi_device_info[nDeviceNum].hDevice = (ADI_SPI_DEV_DATA_TYPE *)pDevMemory;
227 
228 
229  /*
230  * Although the ADI_SPI_DEVICE_INFO struct has the address of the SPI registers
231  * for this instance, copying it to the ADI_SPI_HANDLE struct will minimize
232  * the runtime footprint and cycle count when accessing the SPI registers
233  */
234  hDevice->pSpi = spi_device_info[nDeviceNum].pSpiRegs;
235 
236  SEM_CREATE(hDevice, "SPI_SEM", ADI_SPI_SEMAPHORE_FAILED);
237 
238  /* Static Configuration */
239  /* Initialize the device based on the given configuration parameters */
240  ADI_SPI_CFG_TYPE const* pSPICfg = &gSPICfg[nDeviceNum];
241  hDevice->pSpi->CTL = pSPICfg->SPI_CTL;
242  hDevice->pSpi->DIV = pSPICfg->SPI_DIV;
243 
244  /* write the device data pointer into the caller's handle */
245  *phDevice = hDevice;
246  hDevice->pSpi->CTL |= BITM_SPI_CTL_SPIEN;
247 
248  /* Make sure the DMA controller and its SRAM based descriptors are initialized */
249  adi_dma_Init();
250 
251  /* Setup the DMA TX callback */
252  if (ADI_DMA_SUCCESS != adi_dma_RegisterCallback((DMA_CHANn_TypeDef) hDevice->pDevInfo->dmaTxChannelNumber, TxDmaErrorCallback, (void *) hDevice))
253  {
254  return ADI_SPI_DMA_REG_FAILED;
255  }
256 
257  /* Setup the DMA RX callback */
258  if (ADI_DMA_SUCCESS != adi_dma_RegisterCallback((DMA_CHANn_TypeDef) hDevice->pDevInfo->dmaRxChannelNumber, RxDmaErrorCallback, (void *) hDevice))
259  {
260  return ADI_SPI_DMA_REG_FAILED;
261  }
262 
263  return ADI_SPI_SUCCESS;
264 }
265 
266 
281 {
282 
284 #ifdef ADI_DEBUG
285  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
286  {
287  return ADI_SPI_INVALID_HANDLE;
288  }
289 
290 #endif
291 
292 
293  /* disable Interrupt */
294  NVIC_DisableIRQ(hDevice->pDevInfo->eIRQn);
295 
296 
297  /* destroy semaphore */
298  SEM_DELETE((ADI_SPI_HANDLE) hDevice,ADI_SPI_SEMAPHORE_FAILED);
299 
300  /* invalidate initialization state */
301  hDevice->pDevInfo->hDevice = NULL;
302  return result;
303 }
304 
305 
319 ADI_SPI_RESULT adi_spi_RegisterCallback (ADI_SPI_HANDLE const hDevice, ADI_CALLBACK const pfCallback, void *const pCBParam )
320 {
321 #ifdef ADI_DEBUG
322  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
323  return ADI_SPI_INVALID_HANDLE;
324  }
325 
326 #endif
327  /* Save the application provided callback and callback parameters */
328  hDevice->pfCallback = pfCallback;
329  hDevice->pCBParam = pCBParam;
330 
331  return ADI_SPI_SUCCESS;
332 }
333 
367 ADI_SPI_RESULT adi_spi_SetIrqmode (ADI_SPI_CONST_HANDLE const hDevice, const uint8_t nMode)
368 {
369 
370 #ifdef ADI_DEBUG
371  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
372  return ADI_SPI_INVALID_HANDLE;
373  }
374 
375  if (nMode > ADI_SPI_IRQ_PARAM) {
376  return ADI_SPI_INVALID_PARAM;
377  }
378 
379 #endif
380 
381  uint16_t ien = hDevice->pSpi->IEN;
382  ien = ien & (uint16_t)~BITM_SPI_IEN_IRQMODE;
383  ien = ien | (nMode & BITM_SPI_IEN_IRQMODE);
384  hDevice->pSpi->IEN = ien;
385 
386  return ADI_SPI_SUCCESS;
387 }
388 
389 
408 {
409 
410 #ifdef ADI_DEBUG
411  if (ADI_SPI_VALIDATE_HANDLE(hDevice)) {
412  return ADI_SPI_INVALID_HANDLE;
413  }
414 
415 #endif
416 
417  if (true == bFlag) {
418  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CON);
419  } else {
420  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CON;
421  }
422 
423  return ADI_SPI_SUCCESS;
424 }
425 
441 ADI_SPI_RESULT adi_spi_SetLoopback (ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
442 {
443 
444 #ifdef ADI_DEBUG
445  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
446  {
447  return ADI_SPI_INVALID_HANDLE;
448  }
449 
450 #endif
451 
452  if (true == bFlag) {
453  hDevice->pSpi->CTL |= (BITM_SPI_CTL_LOOPBACK);
454  } else {
455  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_LOOPBACK;
456  }
457 
458  return ADI_SPI_SUCCESS;
459 }
460 
477 {
478 
479 #ifdef ADI_DEBUG
480  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
481  {
482  return ADI_SPI_INVALID_HANDLE;
483  }
484 
485 #endif
486  ADI_INT_STATUS_ALLOC();
487  ADI_ENTER_CRITICAL_REGION();
488  if (true == bFlag) { /* hardware default */
489  hDevice->pSpi->CTL |= (ADI_SPI_MASTERCON_INITIALIZER);
490  } else {
491  hDevice->pSpi->CNT = 0u;
492  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_MASEN;
493  hDevice->pSpi->CTL |= (ADI_SPI_SLAVECON_INITIALIZER);
494  }
495  ADI_EXIT_CRITICAL_REGION();
496  return ADI_SPI_SUCCESS;
497 }
498 
499 
520 {
521 
522 #ifdef ADI_DEBUG
523  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
524  {
525  return ADI_SPI_INVALID_HANDLE;
526  }
527 
528 #endif
529 
530  if (true == bFlag) {
531  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RXOF);
532  } else {
533  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_RXOF;
534  }
535 
536  return ADI_SPI_SUCCESS;
537 }
538 
539 
558 {
559 
560 #ifdef ADI_DEBUG
561  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
562  {
563  return ADI_SPI_INVALID_HANDLE;
564  }
565 #endif
566 
567  if (true == bFlag) {
568  hDevice->pSpi->CTL |= (BITM_SPI_CTL_ZEN);
569  } else {
570  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_ZEN;
571  }
572 
573  return ADI_SPI_SUCCESS;
574 }
575 
576 
577 
578 
579 
580 
599 ADI_SPI_RESULT adi_spi_SetBitrate (ADI_SPI_CONST_HANDLE const hDevice, const uint32_t Hertz)
600 {
601  uint32_t incoming_clock;
602  uint16_t Div;
603 
604 #ifdef ADI_DEBUG
605  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
606  {
607  return ADI_SPI_INVALID_HANDLE;
608  }
609 #endif
610 
612  {
613  return ADI_SPI_INVALID_HANDLE;
614  }
615 
616  /* requested rate needs to be 2x or less than incoming clock */
617  if ((2U * Hertz) > incoming_clock)
618  {
619  return ADI_SPI_BAD_SYS_CLOCK;
620  }
621 
622  /* compute the SPI divider value */
623  Div = (uint16_t) ((incoming_clock / Hertz) >> 1U) - 1U; /* '>>1' is really a divide by 2 */
624 
625  /* range check that computed divider fits */
626  if (Div != (Div & BITM_SPI_DIV_VALUE))
627  {
628  return ADI_SPI_INVALID_PARAM;
629  }
630 
631  /* store it in core */
632  hDevice->pSpi->DIV = Div;
633 
634  return ADI_SPI_SUCCESS;
635 }
636 
637 
656 ADI_SPI_RESULT adi_spi_GetBitrate (ADI_SPI_CONST_HANDLE const hDevice, uint32_t* const pnBitrate)
657 {
658  uint32_t incoming_clock;
659  ADI_PWR_RESULT ePwrResult;
660  uint32_t Div;
661 
662 #ifdef ADI_DEBUG
663  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
664  {
665  return ADI_SPI_INVALID_HANDLE;
666  }
667 #endif
668  Div = hDevice->pSpi->DIV; /* assumes this is always a right-justified value */
669 
670  ePwrResult = adi_pwr_GetClockFrequency(ADI_CLOCK_PCLK, &incoming_clock);
671  if(ePwrResult != ADI_PWR_SUCCESS)
672  {
673  *pnBitrate= 0u;
674  return(ADI_SPI_FAILURE);
675  }
676  *pnBitrate= (incoming_clock / (Div + 1U)) >> 1U; /* '>>1' is divide by 2 */
677  return(ADI_SPI_SUCCESS);
678 
679 }
680 
710 ADI_SPI_RESULT adi_spi_SetClockPolarity (ADI_SPI_HANDLE const hDevice, const bool bFlag)
711 {
712 
713 #ifdef ADI_DEBUG
714  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
715  {
716  return ADI_SPI_INVALID_HANDLE;
717  }
718 #endif
719 
720  if (true == bFlag) {
721  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CPOL);
722  } else {
723  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CPOL;
724  }
725 
726  return ADI_SPI_SUCCESS;
727 }
728 
746 {
747 
748 #ifdef ADI_DEBUG
749  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
750  {
751  return ADI_SPI_INVALID_HANDLE;
752  }
753 #endif
754 
755  hDevice->ChipSelect = eChipSelect;
756 
757  return ADI_SPI_SUCCESS;
758 }
759 
789 ADI_SPI_RESULT adi_spi_SetClockPhase (ADI_SPI_HANDLE const hDevice, const bool bFlag)
790 {
791 
792 #ifdef ADI_DEBUG
793  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
794  {
795  return ADI_SPI_INVALID_HANDLE;
796  }
797 #endif
798 
799  if (true == bFlag) {
800  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CPHA);
801  } else {
802  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CPHA;
803  }
804 
805  return ADI_SPI_SUCCESS;
806 }
807 
835 {
836  ADI_SPI_RESULT eResult;
837  hDevice->bBlockingMode = true;
838  eResult = adi_spi_MasterSubmitBuffer(hDevice,pXfr);
839  hDevice->bBlockingMode = false;
840  if( (eResult == ADI_SPI_SUCCESS) && (hDevice->HWErrors != 0u))
841  {
842  eResult = ADI_SPI_HW_ERROR_OCCURRED;
843  }
844  return(eResult);
845 }
846 
875 {
877  volatile uint16_t nStatus;
878 
879 #ifdef ADI_DEBUG
880  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
881  {
882  return ADI_SPI_INVALID_HANDLE;
883  }
884 
885  if ((NULL == pXfr->pTransmitter) && (NULL == pXfr->pReceiver))
886  {
888  }
889 
890  if( (pXfr->bRD_CTL == true) && (pXfr->TransmitterBytes > 16u))
891  {
892  return ADI_SPI_INVALID_PARAM;
893  }
894 
895 #endif /* ADI_DEBUG */
896 
897  /* Initialize the transaction. 'hDevice' must hold the transaction values as pXfr is owned by the application */
898  hDevice->pTxBuffer = pXfr->pTransmitter;
899  hDevice->pRxBuffer = pXfr->pReceiver;
900  hDevice->TxRemaining = pXfr->TransmitterBytes;
901  hDevice->RxRemaining = pXfr->ReceiverBytes;
902  hDevice->TxIncrement = (uint8_t)pXfr->nTxIncrement;
903  hDevice->RxIncrement = (uint8_t)pXfr->nRxIncrement;
904  hDevice->bDmaMode = pXfr->bDMA;
905  hDevice->bRdCtlMode = pXfr->bRD_CTL;
906  hDevice->bTransferComplete = false;
907  hDevice->HWErrors = ADI_SPI_HW_ERROR_NONE;
908 
909 
910  /*
911  *
912  * TIM
913  * If set: initiate transfer with write to SPI_TX register
914  * If clear: initiate transfer with a read from SPI_RX register
915  *
916  * RFLUSH
917  * Clear this bit to ensure that incoming data is ignored
918  *
919  * TFLUSH
920  * Clear this not to ensure that transmitted data is not a zero (if SPI_CTL.ZEN is set) or last transmitted byte
921  *
922  */
923 
924 
925  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_TIM | BITM_SPI_CTL_RFLUSH | BITM_SPI_CTL_TFLUSH);
926 
927  /*
928  * If in DMA mode then make sure XFRDONE interrupt is not set. DMA mode will generate three interrupts
929  * TX DMA
930  * RX DMA
931  * XFRDONE
932  *
933  * There is a race condition between XFRDONE and DMA interrupts. They are on different clocks.
934  *
935  * SPI XfrDone is counted on SPI clock (SCL) edge, which is a fixed timing related to SPI bit protocol.
936  * But the DMA works upon system clock (HCLK) and it could finish on various timing upon SCL/HCLK ratio.
937  * And bus bandwidth (e.g., DMA hold off until processor frees up the bus). So SPI RX DMA done interrupt
938  * could be issued earlier or later than SPI XferDone interrupt.
939  *
940  */
941  if( hDevice->bDmaMode==true ) {
942  /* The race condition has been between RX and XFRDONE. If there are no bytes to receive then */
943  /* do not clear XFRDONE */
944  if( hDevice->RxRemaining != 0u) {
945  hDevice->pSpi->IEN &= (uint16_t)~(BITM_SPI_IEN_XFRDONE);
946  } else {
947  hDevice->pSpi->IEN |= (BITM_SPI_IEN_XFRDONE);
948  }
949 
950  } else {
951 
952  /* In interrupt mode always enable XFRDONE */
953  uint16_t activeInterrupts = BITM_SPI_IEN_XFRDONE;
954  /* Enable underflow on;y if sending bytes */
955  if( hDevice->TxRemaining ) {
956  activeInterrupts |= BITM_SPI_IEN_TXUNDR;
957  }
958  /* Enable overflow only if receiving bytes */
959  if( hDevice->RxRemaining ) {
960  activeInterrupts |= BITM_SPI_IEN_RXOVR;
961  }
962 
963  hDevice->pSpi->IEN |= activeInterrupts;
964 
965  /*
966  * In interrupt mode, when there is nothing to receive, need to initiate a transaction
967  * on an TX write only. Initiating on an RX read will start the transaction, but just for
968  * a single byte (and we're not sure why this is true)
969  */
970 
971  if( hDevice->RxRemaining == 0u) {
972  hDevice->pSpi->CTL |= ( BITM_SPI_CTL_TIM );
973  }
974 
975  }
976 
977  /* STAT bits are cleared by writing a '1' to them. Clear any residual status*/
978  nStatus = hDevice->pSpi->STAT;
979  hDevice->pSpi->STAT = nStatus;
980 
981  /* Make sure we are in master mode */
982  hDevice->pSpi->CTL |= ( BITM_SPI_CTL_MASEN);
983 
984  /* Set ChipSelect */
985  hDevice->pSpi->CS_CTL = hDevice->ChipSelect;
986 
987  StartTransaction(hDevice, pXfr);
988 
989 
990  /* block if required */
991  if (hDevice->bBlockingMode == true)
992  {
993  SEM_PEND(hDevice,ADI_SPI_PEND_FAILED);
994  }
995 
996  return result;
997 }
998 
999 /*********************************************************************************************************/
1000 /* */
1001 /* SPI DRIVER Master Mode transaction start */
1002 /* */
1003 /*********************************************************************************************************/
1004 
1005 static void StartTransaction(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER* const pXfr)
1006 {
1007  /* Transaction completion is determined by the number of bytes to be received */
1008  uint16_t nCount;
1009 
1010  /* Work around SPI anomaly */
1011  if( (hDevice->bDmaMode == true) && (hDevice->bRdCtlMode == true) && (pXfr->ReceiverBytes == 1u))
1012  {
1013  /* Switch to PIO mode if the transaction is setup for a DMA transfer in RD_CTL mode with an RX count of 1 */
1014  hDevice->bDmaMode = false;
1015  }
1016  /* Effectively flush the FIFOs before the start of the next transaction */
1017  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1018  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1019 
1020  /* Disable any prior notion of DMA */
1021  hDevice->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1022 
1023 
1024  /*
1025  * If the transaction is DMA based then set up the DMA descriptors for this transaction
1026  */
1027 
1028  uint16_t dmaFlags = 0u;
1029 
1030  if( hDevice->bDmaMode == true)
1031  {
1032  dmaFlags = BITM_SPI_DMA_EN;
1033 
1034  uint16_t sz = pXfr->TransmitterBytes;
1035  if( sz )
1036  {
1037  uint16_t TxChanNum = hDevice->pDevInfo->dmaTxChannelNumber;
1038 
1039  /* Enable the interrupt for the given DMA */
1040  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaTxIrqNumber));
1041 
1042  /* Disables source address decrement for TX channel */
1043  pADI_DMA0->SRCADDR_CLR = 1U << TxChanNum;
1044 
1045  /* Enable the channel */
1046  pADI_DMA0->EN_SET = 1U << TxChanNum;
1047 
1048  /* Enables SPI peripheral to generate DMA requests. */
1049  pADI_DMA0->RMSK_CLR = 1U << TxChanNum;
1050 
1051  /* Set the primary as the current DMA descriptor */
1052  pADI_DMA0->ALT_CLR = 1U << TxChanNum;
1053 
1054  /* fill in the DMA RAM descriptors */
1055  if( (sz & 1U) != 0u )
1056  {
1057  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1058  /* The SPI_CNT register will hold the "real" transfer count */
1059  sz++;
1060  }
1061 
1062  pPrimaryCCD[TxChanNum].DMASRCEND = (uint32_t)(pXfr->pTransmitter + (sz - 2U));
1063 
1064  pPrimaryCCD[TxChanNum].DMADSTEND = (uint32_t)&hDevice->pSpi->TX;
1065 
1066  pPrimaryCCD[TxChanNum].DMACDC = ((uint32_t)ADI_DMA_INCR_NONE << DMA_BITP_CTL_DST_INC) |
1067  (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_SRC_INC) |
1068  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1069  ((sz/2U -1U)<< DMA_BITP_CTL_N_MINUS_1) |
1070  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1071 
1072  dmaFlags |= (BITM_SPI_DMA_TXEN);
1073  }
1074 
1075  sz = pXfr->ReceiverBytes;
1076  if( sz )
1077  {
1078 
1079  uint16_t RxChanNum = hDevice->pDevInfo->dmaRxChannelNumber;
1080  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaRxIrqNumber));
1081 
1082  /* Disables destination address decrement for RX channel */
1083  pADI_DMA0->DSTADDR_CLR = 1U << RxChanNum;
1084 
1085  /* Enable the channel */
1086  pADI_DMA0->EN_SET = 1U << RxChanNum;
1087 
1088  /* Enables SPI peripheral to generate DMA requests. */
1089  pADI_DMA0->RMSK_CLR = 1U << RxChanNum;
1090 
1091  /* Set the primary as the current DMA descriptor */
1092  pADI_DMA0->ALT_CLR = 1U << RxChanNum;
1093 
1094  if( (sz & 1U) != 0u )
1095  {
1096  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1097  /* The SPI_CNT register will hold the "real" transfer count */
1098  sz++;
1099  }
1100 
1101  pPrimaryCCD[RxChanNum].DMASRCEND = (uint32_t)&hDevice->pSpi->RX;
1102 
1103  pPrimaryCCD[RxChanNum].DMADSTEND = (uint32_t)(pXfr->pReceiver + (sz - 2U));
1104 
1105  pPrimaryCCD[RxChanNum].DMACDC = (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_DST_INC) |
1106  (ADI_DMA_INCR_NONE << DMA_BITP_CTL_SRC_INC) |
1107  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1108  ((sz/2U -1U) << DMA_BITP_CTL_N_MINUS_1) |
1109  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1110 
1111  dmaFlags |= (BITM_SPI_DMA_RXEN );
1112 
1113  }
1114  }
1115 
1116  /*
1117  * SPI CNT register
1118  * Non Read Mode: Size of the entire transactions
1119  * Read Mode: Size of the RX transaction
1120  *
1121  * RD_CTL.SZ
1122  * Read Mode: Size of the TX transaction
1123  */
1124 
1125  hDevice->pSpi->RD_CTL = 0u;
1126  if( hDevice->bRdCtlMode)
1127  {
1128  /* "Half Duplex Mode" */
1129 
1130  /* The number of bytes to be transmitted */
1131  uint32_t nBytes = hDevice->TxRemaining - 1U;
1132 
1133  /* Enable RD_CTL and set the TX count for the half-duplex mode of operation */
1134  hDevice->pSpi->RD_CTL &= (uint16_t)~((uint16_t)(BITM_SPI_RD_CTL_TXBYTES << BITP_SPI_RD_CTL_TXBYTES));
1135 
1136  hDevice->pSpi->RD_CTL |= (uint16_t)( (uint16_t)(nBytes << BITP_SPI_RD_CTL_TXBYTES) |
1137  (uint16_t)(1 << BITP_SPI_RD_CTL_CMDEN));
1138 
1139  /* RD_CTL requires continuous mode operation. */
1140  hDevice->pSpi->CTL |= (BITM_SPI_CTL_CON);
1141 
1142  /* CNT represent the number of bytes to receive */
1143  hDevice->pSpi->CNT = hDevice->RxRemaining;
1144  }
1145  else
1146  {
1147  /* Full duplex mode of operation */
1148  if(hDevice->RxRemaining == 0u)
1149  {
1150  /* There is nothing to receive. Flush the RX FIFO and to ignore all incoming data */
1151  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH);
1152  }
1153  else if(hDevice->TxRemaining == 0u)
1154  {
1155  /* If there is nothing to transmit then clear the TX FIFO */
1156  hDevice->pSpi->CTL |= (BITM_SPI_CTL_TFLUSH);
1157  }
1158  else
1159  {
1160  /* Misra compliance: All if/else chains should end with a final else clause */
1161  }
1162 
1163  /* Set CNT to MAX of RX/TX */
1164 
1165  nCount = hDevice->RxRemaining > hDevice->TxRemaining ? hDevice->RxRemaining : hDevice->TxRemaining;
1166 
1167  hDevice->pSpi->CNT = (uint16_t)nCount;
1168  }
1169 
1170 
1171  if( hDevice->bDmaMode == false)
1172  {
1173 
1174  /* Make sure that the application passed in a TX Buffer */
1175  if( hDevice->pTxBuffer != NULL)
1176  {
1177  /* interrupt mode: Fill in the FIFO */
1178  nCount = 0u;
1179  while((nCount < ADI_SPI_FIFO_SIZE) && (hDevice->TxRemaining != 0u))
1180  {
1181  /* grab the lead byte */
1182  hDevice->pSpi->TX = *hDevice->pTxBuffer;
1183  /* modify tx pointer and buffer count prior to interrupt */
1184  hDevice->pTxBuffer += hDevice->TxIncrement;
1185  /* decrement the byte count */
1186  hDevice->TxRemaining--;
1187  nCount++;
1188  }
1189  }
1190 
1191  } else {
1192 
1193  hDevice->pSpi->DMA |= dmaFlags;
1194  if( (hDevice->bDmaMode == true) && (pXfr->TransmitterBytes != 0u))
1195  {
1196 #if defined(ADI_SPI_MASTER_TIM_FOR_DMA) && (ADI_SPI_MASTER_TIM_FOR_DMA != 0u)
1197  /*set TIM bit to initiate transfer with a write to the SPI_TX register*/
1198  hDevice->pSpi->CTL |= BITM_SPI_CTL_TIM;
1199 #endif
1200  /* Spinning waiting for a potential timing window to close is required, but the spin loop needs to be guarded with an exit plan */
1201  /* After an abitrary number of iterations (say 100) the window should be closed. */
1202  for( uint16_t numberOfLoops = 0u; numberOfLoops < 100u; numberOfLoops++)
1203  {
1204  /* wait until there is data in the TX FIFO before starting the transaction to avoid a potential race condition resulting in a TX underflow */
1205  /* The DMA controller and the SPI controller are on different clock domains, so wait until the DMA starts filling the FIFO before enabling the SPI */
1206  volatile uint16_t nFifoStatus = hDevice->pSpi->FIFO_STAT;
1207  /* calculate number of bytes has been written to tx fifo */
1208  uint16_t writableBytes = ADI_SPI_FIFO_SIZE - ((BITM_SPI_FIFO_STAT_TX & nFifoStatus) >> BITP_SPI_FIFO_STAT_TX);
1209  if( writableBytes != 0u)
1210  {
1211  break;
1212  }
1213  }
1214  }
1215 
1216  }
1217 
1218  if((hDevice->pSpi->CTL & BITM_SPI_CTL_TIM) != BITM_SPI_CTL_TIM)
1219  {
1220  uint16_t byte ADI_UNUSED_ATTRIBUTE = hDevice->pSpi->RX;
1221  }
1222 
1223 
1224  NVIC_EnableIRQ(hDevice->pDevInfo->eIRQn);
1225 
1226  return;
1227 }
1228 
1245  ADI_SPI_HANDLE const hDevice,
1246  uint32_t * const pHWErrors
1247  )
1248 {
1249 #ifdef ADI_DEBUG
1250  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1251  {
1252  *pHWErrors = ADI_SPI_HW_ERROR_NONE;
1253  return ADI_SPI_INVALID_HANDLE;
1254  }
1255 #endif
1256 
1257  SEM_PEND(hDevice,ADI_SPI_SEMAPHORE_FAILED);
1258  *pHWErrors = hDevice->HWErrors;
1259  return(ADI_SPI_SUCCESS);
1260 }
1261 
1280 {
1281 #ifdef ADI_DEBUG
1282  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1283  {
1284  return ADI_SPI_INVALID_HANDLE;
1285  }
1286 #endif
1287 
1288  *bComplete = hDevice->bTransferComplete;
1289  return(ADI_SPI_SUCCESS);
1290 }
1291 
1323 {
1324  volatile uint16_t ADI_UNUSED_ATTRIBUTE byte;
1325  uint32_t nCount = 0u;
1326 
1327 #ifdef ADI_DEBUG
1328  if (ADI_SPI_VALIDATE_HANDLE(hDevice))
1329  {
1330  return ADI_SPI_INVALID_HANDLE;
1331  }
1332  if ((NULL == pXfr->pTransmitter) && (NULL == pXfr->pReceiver))
1333  {
1334  return ADI_SPI_INVALID_POINTER;
1335  }
1336 
1337  if ((0u == pXfr->pTransmitter) && (0u == pXfr->pReceiver) )
1338  {
1339  return ADI_SPI_INVALID_PARAM;
1340  }
1341  /* Return error if the RX buffer is not null and count is equal to zero or vice versa.*/
1342  if (((pXfr->pReceiver != NULL) && (pXfr->ReceiverBytes == 0u)) || ((pXfr->pReceiver == NULL) && ((pXfr->ReceiverBytes > 0u))))
1343  {
1344  return ADI_SPI_INVALID_PARAM;
1345  }
1346 
1347  /* Return error if the Tx buffer is not null and count is equal to zero or vice versa.*/
1348  if (((pXfr->pTransmitter != NULL) && (pXfr->TransmitterBytes == 0u)) || ((pXfr->pTransmitter == NULL) && (pXfr->TransmitterBytes > 0u)))
1349  {
1350  return ADI_SPI_INVALID_PARAM;
1351  }
1352 
1353  /* DMA count register is only 8 bits, so block size is limited to 255 */
1354  if ((pXfr->bDMA==true) && (pXfr->TransmitterBytes != 0u) &&(((uint32_t)pXfr->pTransmitter&0x1U) !=0u ) )
1355  {
1356  return ADI_SPI_INVALID_PARAM;
1357  }
1358 
1359 #endif /* ADI_DEBUG */
1360 
1361  /* Effectively flush the FIFOs before the start of the next transaction */
1362  hDevice->pSpi->CTL |= (BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1363  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_RFLUSH|BITM_SPI_CTL_TFLUSH);
1364 
1365  /* Shut down any DMA enables that are still lingering from a prior transaction */
1366  hDevice->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1367 
1368  hDevice->bTransferComplete = false;
1369  hDevice->pTxBuffer = pXfr->pTransmitter;
1370  hDevice->pRxBuffer = pXfr->pReceiver;
1371  hDevice->TxRemaining = pXfr->TransmitterBytes;
1372  hDevice->RxRemaining = pXfr->ReceiverBytes;
1373  hDevice->TxIncrement = (uint8_t)pXfr->nTxIncrement;
1374  hDevice->RxIncrement = (uint8_t)pXfr->nRxIncrement;
1375  hDevice->bDmaMode = pXfr->bDMA;
1376  hDevice->bRdCtlMode = pXfr->bRD_CTL;
1377  hDevice->HWErrors = ADI_SPI_HW_ERROR_NONE;
1378 
1379 
1380  /* Configure SPI. First step is to clear CTL bits that may have been set previously */
1381  hDevice->pSpi->CTL &= (uint16_t)~(BITM_SPI_CTL_TIM | BITM_SPI_CTL_RFLUSH | BITM_SPI_CTL_TFLUSH | BITM_SPI_CTL_CON);
1382  if( hDevice->TxRemaining == 0u )
1383  {
1384  /* This will prevent TX underflow interrupts from occurring */
1385  hDevice->pSpi->CTL |= BITM_SPI_CTL_TFLUSH;
1386  }
1387  if( hDevice->RxRemaining == 0u )
1388  {
1389  /* This will prevent data from entering RX. Also prevents overflow interrupts from occurring */
1390  hDevice->pSpi->CTL |= BITM_SPI_CTL_RFLUSH;
1391 
1392  /* If SPI_CTL.TIM is set, the Tx FIFO status causes the interrupt. */
1393  if( hDevice->bDmaMode != true) {
1394  hDevice->pSpi->CTL |= BITM_SPI_CTL_TIM;
1395  }
1396 
1397  }
1398 
1399  hDevice->pSpi->CNT = (uint16_t) hDevice->TxRemaining > hDevice->RxRemaining ? hDevice->TxRemaining : hDevice->RxRemaining;
1400 
1401  uint16_t nDMAFlags = 0u;
1402 
1403  if( hDevice->bDmaMode == true)
1404  {
1405  uint16_t sz = pXfr->TransmitterBytes;
1406  if( sz )
1407  {
1408  uint16_t TxChanNum = hDevice->pDevInfo->dmaTxChannelNumber;
1409 
1410  /* Enable the interrupt for the given DMA */
1411  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaTxIrqNumber));
1412 
1413  /* Disables source address decrement for TX channel */
1414  pADI_DMA0->SRCADDR_CLR = 1U << TxChanNum;
1415 
1416  /* Enable the channel */
1417  pADI_DMA0->EN_SET = 1U << TxChanNum;
1418 
1419  /* Enables SPI peripheral to generate DMA requests. */
1420  pADI_DMA0->RMSK_CLR = 1U << TxChanNum;
1421 
1422  /* Set the primary as the current DMA descriptor */
1423  pADI_DMA0->ALT_CLR = 1U << TxChanNum;
1424 
1425  /* fill in the DMA RAM descriptors */
1426  if( (sz & 1U) != 0u )
1427  {
1428  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1429  /* The SPI_CNT register will hold the "real" transfer count */
1430  sz++;
1431  }
1432 
1433  pPrimaryCCD[TxChanNum].DMASRCEND = (uint32_t)(pXfr->pTransmitter + (sz - 2U));
1434 
1435  pPrimaryCCD[TxChanNum].DMADSTEND = (uint32_t)&hDevice->pSpi->TX;
1436 
1437  pPrimaryCCD[TxChanNum].DMACDC = ((uint32_t)ADI_DMA_INCR_NONE << DMA_BITP_CTL_DST_INC) |
1438  (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_SRC_INC) |
1439  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1440  ((sz/2U -1U)<< DMA_BITP_CTL_N_MINUS_1) |
1441  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1442 
1443  nDMAFlags |= (BITM_SPI_DMA_TXEN);
1444  }
1445 
1446  sz = pXfr->ReceiverBytes;
1447  if( sz )
1448  {
1449 
1450  uint16_t RxChanNum = hDevice->pDevInfo->dmaRxChannelNumber;
1451  NVIC_EnableIRQ((IRQn_Type)(hDevice->pDevInfo->dmaRxIrqNumber));
1452 
1453  /* Disables destination address decrement for RX channel */
1454  pADI_DMA0->DSTADDR_CLR = 1U << RxChanNum;
1455 
1456  /* Enable the channel */
1457  pADI_DMA0->EN_SET = 1U << RxChanNum;
1458 
1459  /* Enables SPI peripheral to generate DMA requests. */
1460  pADI_DMA0->RMSK_CLR = 1U << RxChanNum;
1461 
1462  /* Set the primary as the current DMA descriptor */
1463  pADI_DMA0->ALT_CLR = 1U << RxChanNum;
1464 
1465  if( (sz & 1U) != 0u )
1466  {
1467  /* DMA is performed on 16-bit data. Make sure the DMA engine is properly aligned to even counts */
1468  /* The SPI_CNT register will hold the "real" transfer count */
1469  sz++;
1470  }
1471 
1472  pPrimaryCCD[RxChanNum].DMASRCEND = (uint32_t)&hDevice->pSpi->RX;
1473 
1474  pPrimaryCCD[RxChanNum].DMADSTEND = (uint32_t)(pXfr->pReceiver + (sz - 2U));
1475 
1476  pPrimaryCCD[RxChanNum].DMACDC = (ADI_DMA_INCR_2_BYTE << DMA_BITP_CTL_DST_INC) |
1477  (ADI_DMA_INCR_NONE << DMA_BITP_CTL_SRC_INC) |
1478  (ADI_DMA_WIDTH_2_BYTE << DMA_BITP_CTL_SRC_SIZE) |
1479  ((sz/2U -1U) << DMA_BITP_CTL_N_MINUS_1) |
1480  (DMA_ENUM_CTL_CYCLE_CTL_BASIC << DMA_BITP_CTL_CYCLE_CTL);
1481 
1482  nDMAFlags |= (BITM_SPI_DMA_RXEN );
1483 
1484  }
1485  }
1486 
1487  /* Make sure XFRDONE is shut down. This IEN has no affect in slave mode */
1488  hDevice->pSpi->IEN &= (uint16_t)~BITM_SPI_IEN_XFRDONE;
1489 
1490  if( hDevice->bDmaMode == false) {
1491  /* Make sure we are not in continuous mode from a prior DMA transaction */
1492  hDevice->pSpi->CTL &= (uint16_t)~BITM_SPI_CTL_CON;
1493 
1494 
1495  /* interrupt mode: Enable the UNDERFLOW and OVERFLOW interrupts */
1496  /* XFRDONE is invalid in slave mode */
1497  uint16_t activeInterrupts = 0u;
1498  /* Enable underflow on;y if sending bytes */
1499  if( hDevice->TxRemaining ) {
1500  activeInterrupts |= BITM_SPI_IEN_TXUNDR;
1501  }
1502  /* Enable overflow only if receiving bytes */
1503  if( hDevice->RxRemaining ) {
1504  activeInterrupts |= BITM_SPI_IEN_RXOVR;
1505  }
1506  hDevice->pSpi->IEN |= activeInterrupts;
1507 
1508  /* interrupt mode: Fill in the FIFO and enable the TX by a dummy read. */
1509  while((nCount < ADI_SPI_FIFO_SIZE) && (hDevice->TxRemaining != 0u))
1510  {
1511  /* grab the lead byte */
1512  hDevice->pSpi->TX = *hDevice->pTxBuffer;
1513  /* modify tx pointer and buffer count prior to interrupt */
1514  hDevice->pTxBuffer += hDevice->TxIncrement;
1515  /* decrement the byte count */
1516  hDevice->TxRemaining--;
1517  nCount++;
1518  }
1519  } else {
1520 
1521  /* DMA mode. Enable the controller */
1522  hDevice->pSpi->DMA |= (uint16_t)(BITM_SPI_DMA_EN | nDMAFlags);
1523  }
1524 
1525  if((hDevice->pSpi->CTL & BITM_SPI_CTL_TIM) != BITM_SPI_CTL_TIM)
1526  {
1527  byte = hDevice->pSpi->RX;
1528  }
1529  NVIC_EnableIRQ(hDevice->pDevInfo->eIRQn);
1530 
1531  if (hDevice->bBlockingMode == true)
1532  {
1533  SEM_PEND(hDevice,ADI_SPI_SEMAPHORE_FAILED);
1534  }
1535 
1536  return ADI_SPI_SUCCESS;
1537 }
1538 
1539 
1540 
1568 {
1569  ADI_SPI_RESULT eResult;
1570  hDevice->bBlockingMode = true;
1571  eResult = adi_spi_SlaveSubmitBuffer(hDevice,pXfr);
1572  hDevice->bBlockingMode = false;
1573  if( (eResult == ADI_SPI_SUCCESS) && (hDevice->HWErrors != 0u))
1574  {
1575  eResult = ADI_SPI_HW_ERROR_OCCURRED;
1576  }
1577  return(eResult);
1578 }
1579 
1580 /*
1581  *****************************************************************************
1582  * SPI Internal Static Support Functions
1583  *****************************************************************************/
1584 
1585 
1589 /*-----------------------------------------------------------------------------
1590  *
1591  * SPI ISR
1592  *
1593  *----------------------------------------------------------------------------*/
1594 
1595 static void common_SPI_Int_Handler (ADI_SPI_DEV_DATA_TYPE* pDD)
1596 {
1597 
1598  uint16_t nFifoStatus;
1599  uint16_t writableBytes;
1600  uint16_t readableBytes;
1601  bool terminate = false;
1602 
1603  /* read status register - first thing */
1604  uint16_t nErrorStatus = pDD->pSpi->STAT;
1605 
1606  /* Take a fresh snapshot of FIFO and Clear the RX/TX/IRQ interrupts, as we service those
1607  * here. */
1608  pDD->pSpi->STAT = nErrorStatus;
1609  nFifoStatus = pDD->pSpi->FIFO_STAT;
1610 
1611  /* Trap overflow/underflow errors and terminate the current transaction if there is an error. */
1612  if(0u != ((BITM_SPI_STAT_RXOVR | BITM_SPI_STAT_TXUNDR) & nErrorStatus)) {
1613  /* Error condition. Handle each individually */
1614  if(BITM_SPI_STAT_TXUNDR == (BITM_SPI_STAT_TXUNDR & nErrorStatus)) {
1615  pDD->HWErrors |= (uint32_t)ADI_SPI_HW_ERROR_TX_UNDERFLOW;
1616  }
1617  if( BITM_SPI_STAT_RXOVR == (BITM_SPI_STAT_RXOVR & nErrorStatus)) {
1618  pDD->HWErrors |= (uint32_t)ADI_SPI_HW_ERROR_RX_OVERFLOW;
1619  }
1620 
1621  /* There's been an error, so shut it down */
1622  terminate = true;
1623  }
1624  else {
1625  /* calculate number of bytes that can be written to tx fifo */
1626  writableBytes = ADI_SPI_FIFO_SIZE - ((BITM_SPI_FIFO_STAT_TX & nFifoStatus) >> BITP_SPI_FIFO_STAT_TX);
1627  /* calculate number of bytes to read from rx fifo */
1628  readableBytes = ((BITM_SPI_FIFO_STAT_RX & nFifoStatus) >> BITP_SPI_FIFO_STAT_RX);
1629 
1630  /* fill tx fifo */
1631  while ((writableBytes != 0u) && (pDD->TxRemaining != 0u))
1632  {
1633  pDD->pSpi->TX = *pDD->pTxBuffer;
1634  pDD->pTxBuffer += pDD->TxIncrement;
1635  pDD->TxRemaining--;
1636  writableBytes--;
1637  }
1638 
1639  /*
1640  * Now focus on the RX FIFO but only if we are not in RD_CTL mode OR, if we
1641  * are in RD_CTL mode, TX bytes are all transmitted
1642  */
1643  if( (pDD->bRdCtlMode==false) || (pDD->TxRemaining==0u) )
1644  {
1645  /* empty rx fifo */
1646  while ((readableBytes != 0u) &&(pDD->RxRemaining != 0u))
1647  {
1648  *pDD->pRxBuffer = (uint8_t) pDD->pSpi->RX;
1649  pDD->pRxBuffer += pDD->RxIncrement;
1650  pDD->RxRemaining--;
1651  readableBytes--;
1652  }
1653  }
1654  }
1655 
1656  /* Terminate the transaction and notify the caller
1657  * 1) Master mode: If there are no more bytes to RX or TX and XFRDONE is set
1658  * 2) Slave mode: If there are no more bytes to RX or TX (XFRDONE is invalid in slave mode)
1659  * 3) If there was a HW error
1660  */
1661  if( (pDD->RxRemaining == 0u) && (pDD->TxRemaining == 0u))
1662  {
1663  if( BITM_SPI_CTL_MASEN == (pDD->pSpi->CTL & BITM_SPI_CTL_MASEN ))
1664  {
1665  /* Master mode */
1666  /* On handler entry we may have had a transfer complete state, so we check
1667  * if that has happened. Also, between clearing the int status and reading the
1668  * FIFO state, there is a slim chance that the transfer has completed. So,
1669  * we check both for absolute completeness */
1670  if((BITM_SPI_STAT_XFRDONE == (nErrorStatus & BITM_SPI_STAT_XFRDONE)) ||
1671  (BITM_SPI_STAT_XFRDONE == (pDD->pSpi->STAT & BITM_SPI_STAT_XFRDONE)) )
1672  {
1673  /* Master mode XFRDONE */
1674  terminate = true;
1675  }
1676  } else {
1677  /* Slave mode - we're all done here */
1678  terminate = true;
1679  }
1680  }
1681 
1682  /* We terminate if:
1683  - There has been an error on entry to this handler
1684  - Data has transferred successfully.
1685  If there has been an error whilst executing this handler (data can transfer
1686  during this handler, and an error can still occur) then the interrupt will
1687  have raised, we re-enter the interrupt and then the error is handled above. */
1688  if( terminate )
1689  {
1690 
1691  /* Clear possible interrupt sources: XFRDONE and underflow and overflow */
1692  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1693  pDD->bTransferComplete = true;
1694  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
1695 
1696  /* Everything done, now just clear the STAT register */
1697  nErrorStatus = pDD->pSpi->STAT;
1698  pDD->pSpi->STAT = nErrorStatus;
1699 
1700  /* If a callback is registered notify the buffer processed event to the application */
1701  if(NULL != pDD->pfCallback ){
1702  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1703  }
1704  else
1705  {
1706  SEM_POST(pDD);
1707  }
1708  }
1709 
1710 #if defined(ADI_CYCLECOUNT_SPI_ISR_ENABLED) && (ADI_CYCLECOUNT_SPI_ISR_ENABLED == 1u)
1712 #endif
1713 
1714 }
1715 
1716 
1717 /* Internal DMA Callback for receiving DMA faults from common DMA error handler. */
1718 static void RxDmaErrorCallback(void *pCBParam, uint32_t Event, void *pArg) {
1719 
1720  /* Recover the device handle. */
1721  ADI_SPI_HANDLE hDevice = (ADI_SPI_HANDLE) pCBParam;
1722 
1723  /* Save the DMA error. */
1724  switch (Event) {
1725  case ADI_DMA_EVENT_ERR_BUS:
1726  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_BUS_FAULT;
1727  break;
1729  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_INVALID_DESCR;
1730  break;
1731  default:
1732  hDevice->HWErrors |= ADI_SPI_HW_ERROR_RX_CHAN_DMA_UNKNOWN_ERROR;
1733  break;
1734  }
1735 
1736  /* If a callback is registered notify the buffer processed event to the application */
1737  if(NULL != hDevice->pfCallback ){
1738  hDevice->pfCallback(hDevice->pCBParam, hDevice->HWErrors, NULL);
1739  }
1740  else
1741  {
1742  SEM_POST(hDevice);
1743  }
1744 }
1745 
1746 
1747 /* Internal DMA Callback for receiving DMA faults from common DMA error handler. */
1748 static void TxDmaErrorCallback(void *pCBParam, uint32_t Event, void *pArg) {
1749 
1750  /* Recover the device handle. */
1751  ADI_SPI_HANDLE hDevice = (ADI_SPI_HANDLE) pArg;
1752 
1753  /* Save the DMA error. */
1754  switch (Event) {
1755  case ADI_DMA_EVENT_ERR_BUS:
1756  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_BUS_FAULT;
1757  break;
1759  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_INVALID_DESCR;
1760  break;
1761  default:
1762  hDevice->HWErrors |= ADI_SPI_HW_ERROR_TX_CHAN_DMA_UNKNOWN_ERROR;
1763  break;
1764  }
1765 
1766  /* If a callback is registered notify the buffer processed event to the application */
1767  if(NULL != hDevice->pfCallback ){
1768  hDevice->pfCallback(hDevice->pCBParam, hDevice->HWErrors, NULL);
1769  }
1770  else
1771  {
1772  SEM_POST(hDevice);
1773  }
1774 }
1775 
1776 
1784 void SPI0_Int_Handler(void) {
1785  ISR_PROLOG();
1786  common_SPI_Int_Handler(spi_device_info[0].hDevice );
1787  ISR_EPILOG();
1788 }
1789 
1790 
1798 void SPI1_Int_Handler(void) {
1799  ISR_PROLOG();
1800  common_SPI_Int_Handler(spi_device_info[1].hDevice);
1801  ISR_EPILOG();
1802 }
1803 
1811 void SPI2_Int_Handler(void) {
1812  ISR_PROLOG();
1813  common_SPI_Int_Handler(spi_device_info[2].hDevice );
1814  ISR_EPILOG();
1815 }
1816 
1817 
1818 /*
1822 */
1823 
1824 
1825 /*
1826  * SPI DMA interrupt handlers
1827  */
1828 
1829 
1830 #if defined(ADI_SPI0_MASTER_MODE) && (ADI_SPI0_MASTER_MODE==1u)
1831 void DMA_SPI0_TX_Int_Handler(void)
1832 {
1833  ISR_PROLOG();
1834  ADI_SPI_HANDLE pDD = spi_device_info[0].hDevice;
1835  pDD->TxRemaining = 0u;
1836  ISR_EPILOG();
1837 }
1838 
1839 /* Master mode DMA ISR */
1840 void DMA_SPI0_RX_Int_Handler(void)
1841 {
1842  ISR_PROLOG();
1843  ADI_SPI_HANDLE pDD = spi_device_info[0].hDevice;
1844  pDD->RxRemaining = 0u;
1845  /* Disable DMA */
1846  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1847  /* Master mode: Now allow the XFRDONE interrupt to occur. It's the SPI ISR that really ends the transaction */
1848  /* The slave mode is not affected by this setting */
1849  pDD->pSpi->IEN |= BITM_SPI_IEN_XFRDONE;
1850  ISR_EPILOG();
1851 }
1852 #endif
1853 #if defined(ADI_SPI0_MASTER_MODE) && (ADI_SPI0_MASTER_MODE==0u)
1854 /* Slave mode DMA ISRs */
1855 void DMA_SPI0_TX_Int_Handler(void)
1856 {
1857  ISR_PROLOG();
1858  ADI_SPI_HANDLE pDD = spi_device_info[0].hDevice;
1859  pDD->TxRemaining = 0u;
1860  if( pDD->RxRemaining == 0)
1861  {
1862  /* If a callback is registered notify the buffer processed event to the application */
1863  if(NULL != pDD->pfCallback ){
1864  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1865  }
1866  else
1867  {
1868  SEM_POST(pDD);
1869  }
1870  }
1871  ISR_EPILOG();
1872 }
1873 void DMA_SPI0_RX_Int_Handler(void)
1874 {
1875  ISR_PROLOG();
1876  ADI_SPI_HANDLE pDD = spi_device_info[0].hDevice;
1877  pDD->RxRemaining = 0u;
1878  /* Disable DMA */
1879  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1880  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1881  pDD->bTransferComplete = true;
1882  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
1883 
1884  /* If a callback is registered notify the buffer processed event to the application */
1885  if(NULL != pDD->pfCallback ){
1886  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1887  }
1888  else
1889  {
1890  SEM_POST(pDD);
1891  }
1892  ISR_EPILOG();
1893 }
1894 #endif
1895 
1896 
1897 
1898 
1899 #if defined(ADI_SPI1_MASTER_MODE) && (ADI_SPI1_MASTER_MODE==1u)
1900 /* Master mode DMA ISR */
1901 void DMA_SPI1_TX_Int_Handler(void)
1902 {
1903  ISR_PROLOG();
1904  ADI_SPI_HANDLE pDD = spi_device_info[1].hDevice;
1905  pDD->TxRemaining = 0u;
1906  ISR_EPILOG();
1907 }
1908 
1909 void DMA_SPI1_RX_Int_Handler(void)
1910 {
1911  ISR_PROLOG();
1912  ADI_SPI_HANDLE pDD = spi_device_info[1].hDevice;
1913  pDD->RxRemaining = 0u;
1914  /* Disable DMA */
1915  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1916  /* Master mode: Now allow the XFRDONE interrupt to occur. It's the SPI ISR that really ends the transaction */
1917  /* The slave mode is not affected by this setting */
1918  pDD->pSpi->IEN |= BITM_SPI_IEN_XFRDONE;
1919  ISR_EPILOG();
1920 }
1921 #endif
1922 
1923 
1924 #if defined(ADI_SPI1_MASTER_MODE) && (ADI_SPI1_MASTER_MODE==0u)
1925 /* Slave mode DMA ISRs */
1926 void DMA_SPI1_TX_Int_Handler(void)
1927 {
1928  ISR_PROLOG();
1929  ADI_SPI_HANDLE pDD = spi_device_info[1].hDevice;
1930  pDD->TxRemaining = 0u;
1931  if( pDD->RxRemaining == 0)
1932  {
1933  /* If a callback is registered notify the buffer processed event to the application */
1934  if(NULL != pDD->pfCallback ){
1935  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1936  }
1937  else
1938  {
1939  SEM_POST(pDD);
1940  }
1941  }
1942  ISR_EPILOG();
1943 }
1944 
1945 
1946 void DMA_SPI1_RX_Int_Handler(void)
1947 {
1948  ISR_PROLOG();
1949  ADI_SPI_HANDLE pDD = spi_device_info[1].hDevice;
1950  pDD->RxRemaining = 0u;
1951  /* Disable DMA */
1952  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1953  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
1954  pDD->bTransferComplete = true;
1955  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
1956 
1957  /* If a callback is registered notify the buffer processed event to the application */
1958  if(NULL != pDD->pfCallback ){
1959  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
1960  }
1961  else
1962  {
1963  SEM_POST(pDD);
1964  }
1965  ISR_EPILOG();
1966 }
1967 #endif
1968 
1969 
1970 #if defined(ADI_SPI2_MASTER_MODE) && (ADI_SPI2_MASTER_MODE==1u)
1971 /* Master mode DMA ISR */
1972 
1973 void DMA_SPIH_TX_Int_Handler(void)
1974 {
1975  ISR_PROLOG();
1976  ADI_SPI_HANDLE pDD = spi_device_info[2].hDevice;
1977  pDD->TxRemaining = 0u;
1978  ISR_EPILOG();
1979 }
1980 
1981 void DMA_SPIH_RX_Int_Handler(void)
1982 {
1983  ISR_PROLOG();
1984  ADI_SPI_HANDLE pDD = spi_device_info[2].hDevice;
1985  pDD->RxRemaining = 0u;
1986  /* Disable DMA */
1987  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
1988  /* Master mode: Now allow the XFRDONE interrupt to occur. It's the SPI ISR that really ends the transaction */
1989  /* The slave mode is not affected by this setting */
1990  pDD->pSpi->IEN |= BITM_SPI_IEN_XFRDONE;
1991  ISR_EPILOG();
1992 }
1993 #endif
1994 #if defined(ADI_SPI2_MASTER_MODE) && (ADI_SPI2_MASTER_MODE==0u)
1995 /* Master mode DMA ISRs */
1996 
1997 void DMA_SPIH_TX_Int_Handler(void)
1998 {
1999  ISR_PROLOG();
2000  ADI_SPI_HANDLE pDD = spi_device_info[2].hDevice;
2001  pDD->TxRemaining = 0u;
2002  if( pDD->RxRemaining == 0)
2003  {
2004  /* If a callback is registered notify the buffer processed event to the application */
2005  if(NULL != pDD->pfCallback ){
2006  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
2007  }
2008  else
2009  {
2010  SEM_POST(pDD);
2011  }
2012  }
2013  ISR_EPILOG();
2014 }
2015 
2016 void DMA_SPIH_RX_Int_Handler(void)
2017 {
2018  ISR_PROLOG();
2019  ADI_SPI_HANDLE pDD = spi_device_info[2].hDevice;
2020  pDD->RxRemaining = 0u;
2021  /* Disable DMA */
2022  pDD->pSpi->DMA &= (uint16_t)~(BITM_SPI_DMA_EN | BITM_SPI_DMA_RXEN | BITM_SPI_DMA_TXEN);
2023  pDD->pSpi->IEN &= ~(BITM_SPI_IEN_XFRDONE|BITM_SPI_IEN_RXOVR|BITM_SPI_IEN_TXUNDR);
2024  pDD->bTransferComplete = true;
2025  NVIC_DisableIRQ(pDD->pDevInfo->eIRQn);
2026 
2027  /* If a callback is registered notify the buffer processed event to the application */
2028  if(NULL != pDD->pfCallback ){
2029  pDD->pfCallback(pDD->pCBParam, pDD->HWErrors, NULL);
2030  }
2031  else
2032  {
2033  SEM_POST(pDD);
2034  }
2035  ISR_EPILOG();
2036 }
2037 #endif
2038 
2039 
2040 
2041 
2045 /* @} */
2046 
ADI_SPI_RESULT adi_spi_SetTransmitUnderflow(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the SPI transmit FIFO underflow mode.
Definition: adi_spi.c:557
ADI_DMA_RESULT adi_dma_RegisterCallback(DMA_CHANn_TypeDef const eChannelID, ADI_CALLBACK const pfCallback, void *const pCBParam)
Register a call-back function for a DMA channel.
Definition: adi_dma.c:223
ADI_SPI_RESULT adi_spi_GetBuffer(ADI_SPI_HANDLE const hDevice, uint32_t *const pHWErrors)
Block until the SPI transaction is complete.
Definition: adi_spi.c:1244
#define ADI_CYCLECOUNT_STORE(id)
ADI_SPI_RESULT adi_spi_SlaveReadWrite(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Slave-Mode transaction in "Blocking mode".This function returns only af...
Definition: adi_spi.c:1567
uint16_t TransmitterBytes
Definition: adi_spi.h:250
uint8_t * pReceiver
Definition: adi_spi.h:248
uint8_t * pTransmitter
Definition: adi_spi.h:246
ADI_SPI_RESULT
Definition: adi_spi.h:84
ADI_SPI_RESULT adi_spi_SetClockPhase(ADI_SPI_HANDLE const hDevice, const bool bFlag)
Set the clock phase.
Definition: adi_spi.c:789
#define ADI_CYCLECOUNT_ISR_SPI
ADI_SPI_RESULT adi_spi_SetContinuousMode(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the continuous transfer mode.
Definition: adi_spi.c:407
ADI_SPI_RESULT adi_spi_Open(uint32_t nDeviceNum, void *pDevMemory, uint32_t nMemorySize, ADI_SPI_HANDLE *const phDevice)
Initialize and allocate an SPI device for use in Master Mode.
Definition: adi_spi.c:184
ADI_SPI_RESULT adi_spi_Close(ADI_SPI_HANDLE const hDevice)
Uninitialize and deallocate an SPI device.
Definition: adi_spi.c:280
ADI_SPI_RESULT adi_spi_isBufferAvailable(ADI_SPI_CONST_HANDLE const hDevice, bool *const bComplete)
Get the SPI transaction completion status.
Definition: adi_spi.c:1279
ADI_PWR_RESULT
Definition: adi_pwr.h:268
ADI_SPI_RESULT adi_spi_SetChipSelect(ADI_SPI_HANDLE const hDevice, const ADI_SPI_CHIP_SELECT eChipSelect)
Set the chip select.
Definition: adi_spi.c:745
uint8_t nRxIncrement
Definition: adi_spi.h:256
ADI_SPI_RESULT adi_spi_SetLoopback(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the internal loopback mode.
Definition: adi_spi.c:441
ADI_SPI_RESULT adi_spi_GetBitrate(ADI_SPI_CONST_HANDLE const hDevice, uint32_t *const pnBitrate)
Get the SPI serial clock frequency.
Definition: adi_spi.c:656
ADI_SPI_RESULT adi_spi_RegisterCallback(ADI_SPI_HANDLE const hDevice, ADI_CALLBACK const pfCallback, void *const pCBParam)
Register or unregister the callback.
Definition: adi_spi.c:319
ADI_SPI_RESULT adi_spi_SetClockPolarity(ADI_SPI_HANDLE const hDevice, const bool bFlag)
Set the clock polarity.
Definition: adi_spi.c:710
ADI_PWR_RESULT adi_pwr_GetClockFrequency(const ADI_CLOCK_ID eClockId, uint32_t *pClock)
Get the frequency of the given clock. Obtain individual peripheral clock frequencies.
Definition: adi_pwr.c:467
ADI_SPI_RESULT adi_spi_SetIrqmode(ADI_SPI_CONST_HANDLE const hDevice, const uint8_t nMode)
Set the IRQ mode.
Definition: adi_spi.c:367
ADI_SPI_RESULT adi_spi_SetReceiveOverflow(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set the SPI receive FIFO overflow mode.
Definition: adi_spi.c:519
ADI_SPI_RESULT adi_spi_MasterReadWrite(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Master-Mode transaction in "Blocking mode".This function returns only a...
Definition: adi_spi.c:834
struct __ADI_SPI_DEV_DATA_TYPE * ADI_SPI_HANDLE
Definition: adi_spi.h:181
uint16_t ReceiverBytes
Definition: adi_spi.h:252
ADI_SPI_RESULT adi_spi_SetMasterMode(ADI_SPI_CONST_HANDLE const hDevice, const bool bFlag)
Set SPI Master-Mode operation.
Definition: adi_spi.c:476
ADI_SPI_CHIP_SELECT
Definition: adi_spi.h:165
void adi_dma_Init(void)
Initialize the DMA peripheral.
Definition: adi_dma.c:175
const struct __ADI_SPI_DEV_DATA_TYPE * ADI_SPI_CONST_HANDLE
Definition: adi_spi.h:183
uint8_t nTxIncrement
Definition: adi_spi.h:254
ADI_SPI_RESULT adi_spi_SlaveSubmitBuffer(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Slave-Mode transaction.
Definition: adi_spi.c:1322
ADI_SPI_RESULT adi_spi_SetBitrate(ADI_SPI_CONST_HANDLE const hDevice, const uint32_t Hertz)
Set the SPI serial clock frequency.
Definition: adi_spi.c:599
ADI_SPI_RESULT adi_spi_MasterSubmitBuffer(ADI_SPI_HANDLE const hDevice, const ADI_SPI_TRANSCEIVER *const pXfr)
Submit data buffers for SPI Master-Mode transaction.
Definition: adi_spi.c:874