STM32F769IDiscovery  1.00
uDANTE Audio Networking with STM32F7 DISCO board
queue.c
Go to the documentation of this file.
1 /*
2  FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
3  All rights reserved
4 
5  VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
6 
7  This file is part of the FreeRTOS distribution.
8 
9  FreeRTOS is free software; you can redistribute it and/or modify it under
10  the terms of the GNU General Public License (version 2) as published by the
11  Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
12 
13  ***************************************************************************
14  >>! NOTE: The modification to the GPL is included to allow you to !<<
15  >>! distribute a combined work that includes FreeRTOS without being !<<
16  >>! obliged to provide the source code for proprietary components !<<
17  >>! outside of the FreeRTOS kernel. !<<
18  ***************************************************************************
19 
20  FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
21  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
22  FOR A PARTICULAR PURPOSE. Full license text is available on the following
23  link: http://www.freertos.org/a00114.html
24 
25  ***************************************************************************
26  * *
27  * FreeRTOS provides completely free yet professionally developed, *
28  * robust, strictly quality controlled, supported, and cross *
29  * platform software that is more than just the market leader, it *
30  * is the industry's de facto standard. *
31  * *
32  * Help yourself get started quickly while simultaneously helping *
33  * to support the FreeRTOS project by purchasing a FreeRTOS *
34  * tutorial book, reference manual, or both: *
35  * http://www.FreeRTOS.org/Documentation *
36  * *
37  ***************************************************************************
38 
39  http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
40  the FAQ page "My application does not run, what could be wrong?". Have you
41  defined configASSERT()?
42 
43  http://www.FreeRTOS.org/support - In return for receiving this top quality
44  embedded software for free we request you assist our global community by
45  participating in the support forum.
46 
47  http://www.FreeRTOS.org/training - Investing in training allows your team to
48  be as productive as possible as early as possible. Now you can receive
49  FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
50  Ltd, and the world's leading authority on the world's leading RTOS.
51 
52  http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
53  including FreeRTOS+Trace - an indispensable productivity tool, a DOS
54  compatible FAT file system, and our tiny thread aware UDP/IP stack.
55 
56  http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
57  Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
58 
59  http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
60  Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
61  licenses offer ticketed support, indemnification and commercial middleware.
62 
63  http://www.SafeRTOS.com - High Integrity Systems also provide a safety
64  engineered and independently SIL3 certified version for use in safety and
65  mission critical applications that require provable dependability.
66 
67  1 tab == 4 spaces!
68 */
69 
70 #include <stdlib.h>
71 #include <string.h>
72 
73 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
74 all the API functions to use the MPU wrappers. That should only be done when
75 task.h is included from an application file. */
76 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
77 
78 #include "FreeRTOS.h"
79 #include "task.h"
80 #include "queue.h"
81 
82 #if ( configUSE_CO_ROUTINES == 1 )
83  #include "croutine.h"
84 #endif
85 
86 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
87 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
88 header files above, but not in this file, in order to generate the correct
89 privileged Vs unprivileged linkage and placement. */
90 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
91 
92 
93 /* Constants used with the xRxLock and xTxLock structure members. */
94 #define queueUNLOCKED ( ( BaseType_t ) -1 )
95 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
96 
97 /* When the Queue_t structure is used to represent a base queue its pcHead and
98 pcTail members are used as pointers into the queue storage area. When the
99 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
100 not necessary, and the pcHead pointer is set to NULL to indicate that the
101 pcTail pointer actually points to the mutex holder (if any). Map alternative
102 names to the pcHead and pcTail structure members to ensure the readability of
103 the code is maintained despite this dual use of two structure members. An
104 alternative implementation would be to use a union, but use of a union is
105 against the coding standard (although an exception to the standard has been
106 permitted where the dual use also significantly changes the type of the
107 structure member). */
108 #define pxMutexHolder pcTail
109 #define uxQueueType pcHead
110 #define queueQUEUE_IS_MUTEX NULL
111 
112 /* Semaphores do not actually store or copy data, so have an item size of
113 zero. */
114 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
115 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
116 
117 #if( configUSE_PREEMPTION == 0 )
118  /* If the cooperative scheduler is being used then a yield should not be
119  performed just because a higher priority task has been woken. */
120  #define queueYIELD_IF_USING_PREEMPTION()
121 #else
122  #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
123 #endif
124 
125 /*
126  * Definition of the queue used by the scheduler.
127  * Items are queued by copy, not reference. See the following link for the
128  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
129  */
130 typedef struct QueueDefinition
131 {
132  int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
133  int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
134  int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
135 
136  union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
137  {
138  int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
139  UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
140  } u;
141 
142  List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
143  List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
144 
145  volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
146  UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
147  UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
148 
149  volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
150  volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
151 
152  #if ( configUSE_TRACE_FACILITY == 1 )
153  UBaseType_t uxQueueNumber;
154  uint8_t ucQueueType;
155  #endif
156 
157  #if ( configUSE_QUEUE_SETS == 1 )
158  struct QueueDefinition *pxQueueSetContainer;
159  #endif
160 
161 } xQUEUE;
162 
163 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
164 name below to enable the use of older kernel aware debuggers. */
165 typedef xQUEUE Queue_t;
166 
167 /*-----------------------------------------------------------*/
168 
169 /*
170  * The queue registry is just a means for kernel aware debuggers to locate
171  * queue structures. It has no other purpose so is an optional component.
172  */
173 #if ( configQUEUE_REGISTRY_SIZE > 0 )
174 
175  /* The type stored within the queue registry array. This allows a name
176  to be assigned to each queue making kernel aware debugging a little
177  more user friendly. */
178  typedef struct QUEUE_REGISTRY_ITEM
179  {
180  const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
181  QueueHandle_t xHandle;
182  } xQueueRegistryItem;
183 
184  /* The old xQueueRegistryItem name is maintained above then typedefed to the
185  new xQueueRegistryItem name below to enable the use of older kernel aware
186  debuggers. */
187  typedef xQueueRegistryItem QueueRegistryItem_t;
188 
189  /* The queue registry is simply an array of QueueRegistryItem_t structures.
190  The pcQueueName member of a structure being NULL is indicative of the
191  array position being vacant. */
192  PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
193 
194 #endif /* configQUEUE_REGISTRY_SIZE */
195 
196 /*
197  * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
198  * prevent an ISR from adding or removing items to the queue, but does prevent
199  * an ISR from removing tasks from the queue event lists. If an ISR finds a
200  * queue is locked it will instead increment the appropriate queue lock count
201  * to indicate that a task may require unblocking. When the queue in unlocked
202  * these lock counts are inspected, and the appropriate action taken.
203  */
204 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
205 
206 /*
207  * Uses a critical section to determine if there is any data in a queue.
208  *
209  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
210  */
211 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
212 
213 /*
214  * Uses a critical section to determine if there is any space in a queue.
215  *
216  * @return pdTRUE if there is no space, otherwise pdFALSE;
217  */
218 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
219 
220 /*
221  * Copies an item into the queue, either at the front of the queue or the
222  * back of the queue.
223  */
224 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
225 
226 /*
227  * Copies an item out of a queue.
228  */
229 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
230 
231 #if ( configUSE_QUEUE_SETS == 1 )
232  /*
233  * Checks to see if a queue is a member of a queue set, and if so, notifies
234  * the queue set that the queue contains data.
235  */
236  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
237 #endif
238 
239 /*-----------------------------------------------------------*/
240 
241 /*
242  * Macro to mark a queue as locked. Locking a queue prevents an ISR from
243  * accessing the queue event lists.
244  */
245 #define prvLockQueue( pxQueue ) \
246  taskENTER_CRITICAL(); \
247  { \
248  if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
249  { \
250  ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
251  } \
252  if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
253  { \
254  ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
255  } \
256  } \
257  taskEXIT_CRITICAL()
258 /*-----------------------------------------------------------*/
259 
261 {
262 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
263 
264  configASSERT( pxQueue );
265 
267  {
268  pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
269  pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
270  pxQueue->pcWriteTo = pxQueue->pcHead;
271  pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
272  pxQueue->xRxLock = queueUNLOCKED;
273  pxQueue->xTxLock = queueUNLOCKED;
274 
275  if( xNewQueue == pdFALSE )
276  {
277  /* If there are tasks blocked waiting to read from the queue, then
278  the tasks will remain blocked as after this function exits the queue
279  will still be empty. If there are tasks blocked waiting to write to
280  the queue, then one should be unblocked as after this function exits
281  it will be possible to write to it. */
282  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
283  {
284  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
285  {
287  }
288  else
289  {
291  }
292  }
293  else
294  {
296  }
297  }
298  else
299  {
300  /* Ensure the event queues start in the correct state. */
301  vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
302  vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
303  }
304  }
306 
307  /* A value is returned for calling semantic consistency with previous
308  versions. */
309  return pdPASS;
310 }
311 /*-----------------------------------------------------------*/
312 
313 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
314 {
315 Queue_t *pxNewQueue;
316 size_t xQueueSizeInBytes;
317 QueueHandle_t xReturn = NULL;
318 
319  /* Remove compiler warnings about unused parameters should
320  configUSE_TRACE_FACILITY not be set to 1. */
321  ( void ) ucQueueType;
322 
323  configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
324 
325  if( uxItemSize == ( UBaseType_t ) 0 )
326  {
327  /* There is not going to be a queue storage area. */
328  xQueueSizeInBytes = ( size_t ) 0;
329  }
330  else
331  {
332  /* The queue is one byte longer than asked for to make wrap checking
333  easier/faster. */
334  xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
335  }
336 
337  /* Allocate the new queue structure and storage area. */
338  pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
339 
340  if( pxNewQueue != NULL )
341  {
342  if( uxItemSize == ( UBaseType_t ) 0 )
343  {
344  /* No RAM was allocated for the queue storage area, but PC head
345  cannot be set to NULL because NULL is used as a key to say the queue
346  is used as a mutex. Therefore just set pcHead to point to the queue
347  as a benign value that is known to be within the memory map. */
348  pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
349  }
350  else
351  {
352  /* Jump past the queue structure to find the location of the queue
353  storage area. */
354  pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );
355  }
356 
357  /* Initialise the queue members as described above where the queue type
358  is defined. */
359  pxNewQueue->uxLength = uxQueueLength;
360  pxNewQueue->uxItemSize = uxItemSize;
361  ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
362 
363  #if ( configUSE_TRACE_FACILITY == 1 )
364  {
365  pxNewQueue->ucQueueType = ucQueueType;
366  }
367  #endif /* configUSE_TRACE_FACILITY */
368 
369  #if( configUSE_QUEUE_SETS == 1 )
370  {
371  pxNewQueue->pxQueueSetContainer = NULL;
372  }
373  #endif /* configUSE_QUEUE_SETS */
374 
375  traceQUEUE_CREATE( pxNewQueue );
376  xReturn = pxNewQueue;
377  }
378  else
379  {
381  }
382 
383  configASSERT( xReturn );
384 
385  return xReturn;
386 }
387 /*-----------------------------------------------------------*/
388 
389 #if ( configUSE_MUTEXES == 1 )
390 
391  QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
392  {
393  Queue_t *pxNewQueue;
394 
395  /* Prevent compiler warnings about unused parameters if
396  configUSE_TRACE_FACILITY does not equal 1. */
397  ( void ) ucQueueType;
398 
399  /* Allocate the new queue structure. */
400  pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
401  if( pxNewQueue != NULL )
402  {
403  /* Information required for priority inheritance. */
404  pxNewQueue->pxMutexHolder = NULL;
405  pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
406 
407  /* Queues used as a mutex no data is actually copied into or out
408  of the queue. */
409  pxNewQueue->pcWriteTo = NULL;
410  pxNewQueue->u.pcReadFrom = NULL;
411 
412  /* Each mutex has a length of 1 (like a binary semaphore) and
413  an item size of 0 as nothing is actually copied into or out
414  of the mutex. */
415  pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
416  pxNewQueue->uxLength = ( UBaseType_t ) 1U;
417  pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
418  pxNewQueue->xRxLock = queueUNLOCKED;
419  pxNewQueue->xTxLock = queueUNLOCKED;
420 
421  #if ( configUSE_TRACE_FACILITY == 1 )
422  {
423  pxNewQueue->ucQueueType = ucQueueType;
424  }
425  #endif
426 
427  #if ( configUSE_QUEUE_SETS == 1 )
428  {
429  pxNewQueue->pxQueueSetContainer = NULL;
430  }
431  #endif
432 
433  /* Ensure the event queues start with the correct state. */
434  vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
435  vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
436 
437  traceCREATE_MUTEX( pxNewQueue );
438 
439  /* Start with the semaphore in the expected state. */
440  ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
441  }
442  else
443  {
445  }
446 
447  return pxNewQueue;
448  }
449 
450 #endif /* configUSE_MUTEXES */
451 /*-----------------------------------------------------------*/
452 
453 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
454 
455  void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
456  {
457  void *pxReturn;
458 
459  /* This function is called by xSemaphoreGetMutexHolder(), and should not
460  be called directly. Note: This is a good way of determining if the
461  calling task is the mutex holder, but not a good way of determining the
462  identity of the mutex holder, as the holder may change between the
463  following critical section exiting and the function returning. */
465  {
466  if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
467  {
468  pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
469  }
470  else
471  {
472  pxReturn = NULL;
473  }
474  }
476 
477  return pxReturn;
478  } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
479 
480 #endif
481 /*-----------------------------------------------------------*/
482 
483 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
484 
486  {
487  BaseType_t xReturn;
488  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
489 
490  configASSERT( pxMutex );
491 
492  /* If this is the task that holds the mutex then pxMutexHolder will not
493  change outside of this task. If this task does not hold the mutex then
494  pxMutexHolder can never coincidentally equal the tasks handle, and as
495  this is the only condition we are interested in it does not matter if
496  pxMutexHolder is accessed simultaneously by another task. Therefore no
497  mutual exclusion is required to test the pxMutexHolder variable. */
498  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
499  {
500  traceGIVE_MUTEX_RECURSIVE( pxMutex );
501 
502  /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
503  the task handle, therefore no underflow check is required. Also,
504  uxRecursiveCallCount is only modified by the mutex holder, and as
505  there can only be one, no mutual exclusion is required to modify the
506  uxRecursiveCallCount member. */
507  ( pxMutex->u.uxRecursiveCallCount )--;
508 
509  /* Have we unwound the call count? */
510  if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
511  {
512  /* Return the mutex. This will automatically unblock any other
513  task that might be waiting to access the mutex. */
515  }
516  else
517  {
519  }
520 
521  xReturn = pdPASS;
522  }
523  else
524  {
525  /* The mutex cannot be given because the calling task is not the
526  holder. */
527  xReturn = pdFAIL;
528 
530  }
531 
532  return xReturn;
533  }
534 
535 #endif /* configUSE_RECURSIVE_MUTEXES */
536 /*-----------------------------------------------------------*/
537 
538 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
539 
541  {
542  BaseType_t xReturn;
543  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
544 
545  configASSERT( pxMutex );
546 
547  /* Comments regarding mutual exclusion as per those within
548  xQueueGiveMutexRecursive(). */
549 
550  traceTAKE_MUTEX_RECURSIVE( pxMutex );
551 
552  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
553  {
554  ( pxMutex->u.uxRecursiveCallCount )++;
555  xReturn = pdPASS;
556  }
557  else
558  {
559  xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
560 
561  /* pdPASS will only be returned if the mutex was successfully
562  obtained. The calling task may have entered the Blocked state
563  before reaching here. */
564  if( xReturn == pdPASS )
565  {
566  ( pxMutex->u.uxRecursiveCallCount )++;
567  }
568  else
569  {
571  }
572  }
573 
574  return xReturn;
575  }
576 
577 #endif /* configUSE_RECURSIVE_MUTEXES */
578 /*-----------------------------------------------------------*/
579 
580 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
581 
582  QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
583  {
584  QueueHandle_t xHandle;
585 
586  configASSERT( uxMaxCount != 0 );
587  configASSERT( uxInitialCount <= uxMaxCount );
588 
590 
591  if( xHandle != NULL )
592  {
593  ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
594 
596  }
597  else
598  {
600  }
601 
602  configASSERT( xHandle );
603  return xHandle;
604  }
605 
606 #endif /* configUSE_COUNTING_SEMAPHORES */
607 /*-----------------------------------------------------------*/
608 
609 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
610 {
611 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
612 TimeOut_t xTimeOut;
613 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
614 
615  configASSERT( pxQueue );
616  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
617  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
618  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
619  {
620  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
621  }
622  #endif
623 
624 
625  /* This function relaxes the coding standard somewhat to allow return
626  statements within the function itself. This is done in the interest
627  of execution time efficiency. */
628  for( ;; )
629  {
631  {
632  /* Is there room on the queue now? The running task must be the
633  highest priority task wanting to access the queue. If the head item
634  in the queue is to be overwritten then it does not matter if the
635  queue is full. */
636  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
637  {
638  traceQUEUE_SEND( pxQueue );
639  xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
640 
641  #if ( configUSE_QUEUE_SETS == 1 )
642  {
643  if( pxQueue->pxQueueSetContainer != NULL )
644  {
645  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
646  {
647  /* The queue is a member of a queue set, and posting
648  to the queue set caused a higher priority task to
649  unblock. A context switch is required. */
651  }
652  else
653  {
655  }
656  }
657  else
658  {
659  /* If there was a task waiting for data to arrive on the
660  queue then unblock it now. */
661  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
662  {
663  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
664  {
665  /* The unblocked task has a priority higher than
666  our own so yield immediately. Yes it is ok to
667  do this from within the critical section - the
668  kernel takes care of that. */
670  }
671  else
672  {
674  }
675  }
676  else if( xYieldRequired != pdFALSE )
677  {
678  /* This path is a special case that will only get
679  executed if the task was holding multiple mutexes
680  and the mutexes were given back in an order that is
681  different to that in which they were taken. */
683  }
684  else
685  {
687  }
688  }
689  }
690  #else /* configUSE_QUEUE_SETS */
691  {
692  /* If there was a task waiting for data to arrive on the
693  queue then unblock it now. */
694  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
695  {
696  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
697  {
698  /* The unblocked task has a priority higher than
699  our own so yield immediately. Yes it is ok to do
700  this from within the critical section - the kernel
701  takes care of that. */
703  }
704  else
705  {
707  }
708  }
709  else if( xYieldRequired != pdFALSE )
710  {
711  /* This path is a special case that will only get
712  executed if the task was holding multiple mutexes and
713  the mutexes were given back in an order that is
714  different to that in which they were taken. */
716  }
717  else
718  {
720  }
721  }
722  #endif /* configUSE_QUEUE_SETS */
723 
725  return pdPASS;
726  }
727  else
728  {
729  if( xTicksToWait == ( TickType_t ) 0 )
730  {
731  /* The queue was full and no block time is specified (or
732  the block time has expired) so leave now. */
734 
735  /* Return to the original privilege level before exiting
736  the function. */
737  traceQUEUE_SEND_FAILED( pxQueue );
738  return errQUEUE_FULL;
739  }
740  else if( xEntryTimeSet == pdFALSE )
741  {
742  /* The queue was full and a block time was specified so
743  configure the timeout structure. */
744  vTaskSetTimeOutState( &xTimeOut );
745  xEntryTimeSet = pdTRUE;
746  }
747  else
748  {
749  /* Entry time was already set. */
751  }
752  }
753  }
755 
756  /* Interrupts and other tasks can send to and receive from the queue
757  now the critical section has been exited. */
758 
759  vTaskSuspendAll();
760  prvLockQueue( pxQueue );
761 
762  /* Update the timeout state to see if it has expired yet. */
763  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
764  {
765  if( prvIsQueueFull( pxQueue ) != pdFALSE )
766  {
767  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
768  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
769 
770  /* Unlocking the queue means queue events can effect the
771  event list. It is possible that interrupts occurring now
772  remove this task from the event list again - but as the
773  scheduler is suspended the task will go onto the pending
774  ready last instead of the actual ready list. */
775  prvUnlockQueue( pxQueue );
776 
777  /* Resuming the scheduler will move tasks from the pending
778  ready list into the ready list - so it is feasible that this
779  task is already in a ready list before it yields - in which
780  case the yield will not cause a context switch unless there
781  is also a higher priority task in the pending ready list. */
782  if( xTaskResumeAll() == pdFALSE )
783  {
785  }
786  }
787  else
788  {
789  /* Try again. */
790  prvUnlockQueue( pxQueue );
791  ( void ) xTaskResumeAll();
792  }
793  }
794  else
795  {
796  /* The timeout has expired. */
797  prvUnlockQueue( pxQueue );
798  ( void ) xTaskResumeAll();
799 
800  /* Return to the original privilege level before exiting the
801  function. */
802  traceQUEUE_SEND_FAILED( pxQueue );
803  return errQUEUE_FULL;
804  }
805  }
806 }
807 /*-----------------------------------------------------------*/
808 
809 #if ( configUSE_ALTERNATIVE_API == 1 )
810 
811  BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
812  {
813  BaseType_t xEntryTimeSet = pdFALSE;
814  TimeOut_t xTimeOut;
815  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
816 
817  configASSERT( pxQueue );
818  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
819 
820  for( ;; )
821  {
823  {
824  /* Is there room on the queue now? To be running we must be
825  the highest priority task wanting to access the queue. */
826  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
827  {
828  traceQUEUE_SEND( pxQueue );
829  prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
830 
831  /* If there was a task waiting for data to arrive on the
832  queue then unblock it now. */
833  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
834  {
835  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
836  {
837  /* The unblocked task has a priority higher than
838  our own so yield immediately. */
840  }
841  else
842  {
844  }
845  }
846  else
847  {
849  }
850 
852  return pdPASS;
853  }
854  else
855  {
856  if( xTicksToWait == ( TickType_t ) 0 )
857  {
859  return errQUEUE_FULL;
860  }
861  else if( xEntryTimeSet == pdFALSE )
862  {
863  vTaskSetTimeOutState( &xTimeOut );
864  xEntryTimeSet = pdTRUE;
865  }
866  }
867  }
869 
871  {
872  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
873  {
874  if( prvIsQueueFull( pxQueue ) != pdFALSE )
875  {
876  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
877  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
879  }
880  else
881  {
883  }
884  }
885  else
886  {
888  traceQUEUE_SEND_FAILED( pxQueue );
889  return errQUEUE_FULL;
890  }
891  }
893  }
894  }
895 
896 #endif /* configUSE_ALTERNATIVE_API */
897 /*-----------------------------------------------------------*/
898 
899 #if ( configUSE_ALTERNATIVE_API == 1 )
900 
901  BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
902  {
903  BaseType_t xEntryTimeSet = pdFALSE;
904  TimeOut_t xTimeOut;
905  int8_t *pcOriginalReadPosition;
906  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
907 
908  configASSERT( pxQueue );
909  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
910 
911  for( ;; )
912  {
914  {
915  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
916  {
917  /* Remember our read position in case we are just peeking. */
918  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
919 
920  prvCopyDataFromQueue( pxQueue, pvBuffer );
921 
922  if( xJustPeeking == pdFALSE )
923  {
924  traceQUEUE_RECEIVE( pxQueue );
925 
926  /* Data is actually being removed (not just peeked). */
927  --( pxQueue->uxMessagesWaiting );
928 
929  #if ( configUSE_MUTEXES == 1 )
930  {
931  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
932  {
933  /* Record the information required to implement
934  priority inheritance should it become necessary. */
935  pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
936  }
937  else
938  {
940  }
941  }
942  #endif
943 
944  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
945  {
946  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
947  {
949  }
950  else
951  {
953  }
954  }
955  }
956  else
957  {
958  traceQUEUE_PEEK( pxQueue );
959 
960  /* The data is not being removed, so reset our read
961  pointer. */
962  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
963 
964  /* The data is being left in the queue, so see if there are
965  any other tasks waiting for the data. */
966  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
967  {
968  /* Tasks that are removed from the event list will get added to
969  the pending ready list as the scheduler is still suspended. */
971  {
972  /* The task waiting has a higher priority than this task. */
974  }
975  else
976  {
978  }
979  }
980  else
981  {
983  }
984  }
985 
987  return pdPASS;
988  }
989  else
990  {
991  if( xTicksToWait == ( TickType_t ) 0 )
992  {
994  traceQUEUE_RECEIVE_FAILED( pxQueue );
995  return errQUEUE_EMPTY;
996  }
997  else if( xEntryTimeSet == pdFALSE )
998  {
999  vTaskSetTimeOutState( &xTimeOut );
1000  xEntryTimeSet = pdTRUE;
1001  }
1002  }
1003  }
1005 
1007  {
1008  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1009  {
1010  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1011  {
1012  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1013 
1014  #if ( configUSE_MUTEXES == 1 )
1015  {
1016  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1017  {
1019  {
1020  vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1021  }
1023  }
1024  else
1025  {
1027  }
1028  }
1029  #endif
1030 
1031  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1033  }
1034  else
1035  {
1037  }
1038  }
1039  else
1040  {
1042  traceQUEUE_RECEIVE_FAILED( pxQueue );
1043  return errQUEUE_EMPTY;
1044  }
1045  }
1047  }
1048  }
1049 
1050 
1051 #endif /* configUSE_ALTERNATIVE_API */
1052 /*-----------------------------------------------------------*/
1053 
1054 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
1055 {
1056 BaseType_t xReturn;
1057 UBaseType_t uxSavedInterruptStatus;
1058 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1059 
1060  configASSERT( pxQueue );
1061  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1062  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1063 
1064  /* RTOS ports that support interrupt nesting have the concept of a maximum
1065  system call (or maximum API call) interrupt priority. Interrupts that are
1066  above the maximum system call priority are kept permanently enabled, even
1067  when the RTOS kernel is in a critical section, but cannot make any calls to
1068  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1069  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1070  failure if a FreeRTOS API function is called from an interrupt that has been
1071  assigned a priority above the configured maximum system call priority.
1072  Only FreeRTOS functions that end in FromISR can be called from interrupts
1073  that have been assigned a priority at or (logically) below the maximum
1074  system call interrupt priority. FreeRTOS maintains a separate interrupt
1075  safe API to ensure interrupt entry is as fast and as simple as possible.
1076  More information (albeit Cortex-M specific) is provided on the following
1077  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1079 
1080  /* Similar to xQueueGenericSend, except without blocking if there is no room
1081  in the queue. Also don't directly wake a task that was blocked on a queue
1082  read, instead return a flag to say whether a context switch is required or
1083  not (i.e. has a task with a higher priority than us been woken by this
1084  post). */
1085  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1086  {
1087  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1088  {
1089  traceQUEUE_SEND_FROM_ISR( pxQueue );
1090 
1091  /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1092  semaphore or mutex. That means prvCopyDataToQueue() cannot result
1093  in a task disinheriting a priority and prvCopyDataToQueue() can be
1094  called here even though the disinherit function does not check if
1095  the scheduler is suspended before accessing the ready lists. */
1096  ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1097 
1098  /* The event list is not altered if the queue is locked. This will
1099  be done when the queue is unlocked later. */
1100  if( pxQueue->xTxLock == queueUNLOCKED )
1101  {
1102  #if ( configUSE_QUEUE_SETS == 1 )
1103  {
1104  if( pxQueue->pxQueueSetContainer != NULL )
1105  {
1106  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
1107  {
1108  /* The queue is a member of a queue set, and posting
1109  to the queue set caused a higher priority task to
1110  unblock. A context switch is required. */
1111  if( pxHigherPriorityTaskWoken != NULL )
1112  {
1113  *pxHigherPriorityTaskWoken = pdTRUE;
1114  }
1115  else
1116  {
1118  }
1119  }
1120  else
1121  {
1123  }
1124  }
1125  else
1126  {
1127  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1128  {
1129  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1130  {
1131  /* The task waiting has a higher priority so
1132  record that a context switch is required. */
1133  if( pxHigherPriorityTaskWoken != NULL )
1134  {
1135  *pxHigherPriorityTaskWoken = pdTRUE;
1136  }
1137  else
1138  {
1140  }
1141  }
1142  else
1143  {
1145  }
1146  }
1147  else
1148  {
1150  }
1151  }
1152  }
1153  #else /* configUSE_QUEUE_SETS */
1154  {
1155  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1156  {
1157  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1158  {
1159  /* The task waiting has a higher priority so record that a
1160  context switch is required. */
1161  if( pxHigherPriorityTaskWoken != NULL )
1162  {
1163  *pxHigherPriorityTaskWoken = pdTRUE;
1164  }
1165  else
1166  {
1168  }
1169  }
1170  else
1171  {
1173  }
1174  }
1175  else
1176  {
1178  }
1179  }
1180  #endif /* configUSE_QUEUE_SETS */
1181  }
1182  else
1183  {
1184  /* Increment the lock count so the task that unlocks the queue
1185  knows that data was posted while it was locked. */
1186  ++( pxQueue->xTxLock );
1187  }
1188 
1189  xReturn = pdPASS;
1190  }
1191  else
1192  {
1194  xReturn = errQUEUE_FULL;
1195  }
1196  }
1197  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1198 
1199  return xReturn;
1200 }
1201 /*-----------------------------------------------------------*/
1202 
1203 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
1204 {
1205 BaseType_t xReturn;
1206 UBaseType_t uxSavedInterruptStatus;
1207 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1208 
1209  /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1210  item size is 0. Don't directly wake a task that was blocked on a queue
1211  read, instead return a flag to say whether a context switch is required or
1212  not (i.e. has a task with a higher priority than us been woken by this
1213  post). */
1214 
1215  configASSERT( pxQueue );
1216 
1217  /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1218  if the item size is not 0. */
1219  configASSERT( pxQueue->uxItemSize == 0 );
1220 
1221  /* Normally a mutex would not be given from an interrupt, especially if
1222  there is a mutex holder, as priority inheritance makes no sense for an
1223  interrupts, only tasks. */
1224  configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
1225 
1226  /* RTOS ports that support interrupt nesting have the concept of a maximum
1227  system call (or maximum API call) interrupt priority. Interrupts that are
1228  above the maximum system call priority are kept permanently enabled, even
1229  when the RTOS kernel is in a critical section, but cannot make any calls to
1230  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1231  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1232  failure if a FreeRTOS API function is called from an interrupt that has been
1233  assigned a priority above the configured maximum system call priority.
1234  Only FreeRTOS functions that end in FromISR can be called from interrupts
1235  that have been assigned a priority at or (logically) below the maximum
1236  system call interrupt priority. FreeRTOS maintains a separate interrupt
1237  safe API to ensure interrupt entry is as fast and as simple as possible.
1238  More information (albeit Cortex-M specific) is provided on the following
1239  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1241 
1242  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1243  {
1244  /* When the queue is used to implement a semaphore no data is ever
1245  moved through the queue but it is still valid to see if the queue 'has
1246  space'. */
1247  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
1248  {
1249  traceQUEUE_SEND_FROM_ISR( pxQueue );
1250 
1251  /* A task can only have an inherited priority if it is a mutex
1252  holder - and if there is a mutex holder then the mutex cannot be
1253  given from an ISR. As this is the ISR version of the function it
1254  can be assumed there is no mutex holder and no need to determine if
1255  priority disinheritance is needed. Simply increase the count of
1256  messages (semaphores) available. */
1257  ++( pxQueue->uxMessagesWaiting );
1258 
1259  /* The event list is not altered if the queue is locked. This will
1260  be done when the queue is unlocked later. */
1261  if( pxQueue->xTxLock == queueUNLOCKED )
1262  {
1263  #if ( configUSE_QUEUE_SETS == 1 )
1264  {
1265  if( pxQueue->pxQueueSetContainer != NULL )
1266  {
1267  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
1268  {
1269  /* The semaphore is a member of a queue set, and
1270  posting to the queue set caused a higher priority
1271  task to unblock. A context switch is required. */
1272  if( pxHigherPriorityTaskWoken != NULL )
1273  {
1274  *pxHigherPriorityTaskWoken = pdTRUE;
1275  }
1276  else
1277  {
1279  }
1280  }
1281  else
1282  {
1284  }
1285  }
1286  else
1287  {
1288  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1289  {
1290  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1291  {
1292  /* The task waiting has a higher priority so
1293  record that a context switch is required. */
1294  if( pxHigherPriorityTaskWoken != NULL )
1295  {
1296  *pxHigherPriorityTaskWoken = pdTRUE;
1297  }
1298  else
1299  {
1301  }
1302  }
1303  else
1304  {
1306  }
1307  }
1308  else
1309  {
1311  }
1312  }
1313  }
1314  #else /* configUSE_QUEUE_SETS */
1315  {
1316  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1317  {
1318  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1319  {
1320  /* The task waiting has a higher priority so record that a
1321  context switch is required. */
1322  if( pxHigherPriorityTaskWoken != NULL )
1323  {
1324  *pxHigherPriorityTaskWoken = pdTRUE;
1325  }
1326  else
1327  {
1329  }
1330  }
1331  else
1332  {
1334  }
1335  }
1336  else
1337  {
1339  }
1340  }
1341  #endif /* configUSE_QUEUE_SETS */
1342  }
1343  else
1344  {
1345  /* Increment the lock count so the task that unlocks the queue
1346  knows that data was posted while it was locked. */
1347  ++( pxQueue->xTxLock );
1348  }
1349 
1350  xReturn = pdPASS;
1351  }
1352  else
1353  {
1355  xReturn = errQUEUE_FULL;
1356  }
1357  }
1358  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1359 
1360  return xReturn;
1361 }
1362 /*-----------------------------------------------------------*/
1363 
1364 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
1365 {
1366 BaseType_t xEntryTimeSet = pdFALSE;
1367 TimeOut_t xTimeOut;
1368 int8_t *pcOriginalReadPosition;
1369 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1370 
1371  configASSERT( pxQueue );
1372  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1373  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1374  {
1375  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1376  }
1377  #endif
1378 
1379  /* This function relaxes the coding standard somewhat to allow return
1380  statements within the function itself. This is done in the interest
1381  of execution time efficiency. */
1382 
1383  for( ;; )
1384  {
1386  {
1387  /* Is there data in the queue now? To be running the calling task
1388  must be the highest priority task wanting to access the queue. */
1389  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1390  {
1391  /* Remember the read position in case the queue is only being
1392  peeked. */
1393  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1394 
1395  prvCopyDataFromQueue( pxQueue, pvBuffer );
1396 
1397  if( xJustPeeking == pdFALSE )
1398  {
1399  traceQUEUE_RECEIVE( pxQueue );
1400 
1401  /* Actually removing data, not just peeking. */
1402  --( pxQueue->uxMessagesWaiting );
1403 
1404  #if ( configUSE_MUTEXES == 1 )
1405  {
1406  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1407  {
1408  /* Record the information required to implement
1409  priority inheritance should it become necessary. */
1410  pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
1411  }
1412  else
1413  {
1415  }
1416  }
1417  #endif /* configUSE_MUTEXES */
1418 
1419  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1420  {
1421  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
1422  {
1424  }
1425  else
1426  {
1428  }
1429  }
1430  else
1431  {
1433  }
1434  }
1435  else
1436  {
1437  traceQUEUE_PEEK( pxQueue );
1438 
1439  /* The data is not being removed, so reset the read
1440  pointer. */
1441  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1442 
1443  /* The data is being left in the queue, so see if there are
1444  any other tasks waiting for the data. */
1445  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1446  {
1447  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1448  {
1449  /* The task waiting has a higher priority than this task. */
1451  }
1452  else
1453  {
1455  }
1456  }
1457  else
1458  {
1460  }
1461  }
1462 
1464  return pdPASS;
1465  }
1466  else
1467  {
1468  if( xTicksToWait == ( TickType_t ) 0 )
1469  {
1470  /* The queue was empty and no block time is specified (or
1471  the block time has expired) so leave now. */
1473  traceQUEUE_RECEIVE_FAILED( pxQueue );
1474  return errQUEUE_EMPTY;
1475  }
1476  else if( xEntryTimeSet == pdFALSE )
1477  {
1478  /* The queue was empty and a block time was specified so
1479  configure the timeout structure. */
1480  vTaskSetTimeOutState( &xTimeOut );
1481  xEntryTimeSet = pdTRUE;
1482  }
1483  else
1484  {
1485  /* Entry time was already set. */
1487  }
1488  }
1489  }
1491 
1492  /* Interrupts and other tasks can send to and receive from the queue
1493  now the critical section has been exited. */
1494 
1495  vTaskSuspendAll();
1496  prvLockQueue( pxQueue );
1497 
1498  /* Update the timeout state to see if it has expired yet. */
1499  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1500  {
1501  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1502  {
1503  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1504 
1505  #if ( configUSE_MUTEXES == 1 )
1506  {
1507  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1508  {
1510  {
1511  vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1512  }
1514  }
1515  else
1516  {
1518  }
1519  }
1520  #endif
1521 
1522  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1523  prvUnlockQueue( pxQueue );
1524  if( xTaskResumeAll() == pdFALSE )
1525  {
1527  }
1528  else
1529  {
1531  }
1532  }
1533  else
1534  {
1535  /* Try again. */
1536  prvUnlockQueue( pxQueue );
1537  ( void ) xTaskResumeAll();
1538  }
1539  }
1540  else
1541  {
1542  prvUnlockQueue( pxQueue );
1543  ( void ) xTaskResumeAll();
1544  traceQUEUE_RECEIVE_FAILED( pxQueue );
1545  return errQUEUE_EMPTY;
1546  }
1547  }
1548 }
1549 /*-----------------------------------------------------------*/
1550 
1551 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1552 {
1553 BaseType_t xReturn;
1554 UBaseType_t uxSavedInterruptStatus;
1555 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1556 
1557  configASSERT( pxQueue );
1558  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1559 
1560  /* RTOS ports that support interrupt nesting have the concept of a maximum
1561  system call (or maximum API call) interrupt priority. Interrupts that are
1562  above the maximum system call priority are kept permanently enabled, even
1563  when the RTOS kernel is in a critical section, but cannot make any calls to
1564  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1565  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1566  failure if a FreeRTOS API function is called from an interrupt that has been
1567  assigned a priority above the configured maximum system call priority.
1568  Only FreeRTOS functions that end in FromISR can be called from interrupts
1569  that have been assigned a priority at or (logically) below the maximum
1570  system call interrupt priority. FreeRTOS maintains a separate interrupt
1571  safe API to ensure interrupt entry is as fast and as simple as possible.
1572  More information (albeit Cortex-M specific) is provided on the following
1573  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1575 
1576  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1577  {
1578  /* Cannot block in an ISR, so check there is data available. */
1579  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1580  {
1581  traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1582 
1583  prvCopyDataFromQueue( pxQueue, pvBuffer );
1584  --( pxQueue->uxMessagesWaiting );
1585 
1586  /* If the queue is locked the event list will not be modified.
1587  Instead update the lock count so the task that unlocks the queue
1588  will know that an ISR has removed data while the queue was
1589  locked. */
1590  if( pxQueue->xRxLock == queueUNLOCKED )
1591  {
1592  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1593  {
1594  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1595  {
1596  /* The task waiting has a higher priority than us so
1597  force a context switch. */
1598  if( pxHigherPriorityTaskWoken != NULL )
1599  {
1600  *pxHigherPriorityTaskWoken = pdTRUE;
1601  }
1602  else
1603  {
1605  }
1606  }
1607  else
1608  {
1610  }
1611  }
1612  else
1613  {
1615  }
1616  }
1617  else
1618  {
1619  /* Increment the lock count so the task that unlocks the queue
1620  knows that data was removed while it was locked. */
1621  ++( pxQueue->xRxLock );
1622  }
1623 
1624  xReturn = pdPASS;
1625  }
1626  else
1627  {
1628  xReturn = pdFAIL;
1630  }
1631  }
1632  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1633 
1634  return xReturn;
1635 }
1636 /*-----------------------------------------------------------*/
1637 
1638 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
1639 {
1640 BaseType_t xReturn;
1641 UBaseType_t uxSavedInterruptStatus;
1642 int8_t *pcOriginalReadPosition;
1643 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1644 
1645  configASSERT( pxQueue );
1646  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1647  configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
1648 
1649  /* RTOS ports that support interrupt nesting have the concept of a maximum
1650  system call (or maximum API call) interrupt priority. Interrupts that are
1651  above the maximum system call priority are kept permanently enabled, even
1652  when the RTOS kernel is in a critical section, but cannot make any calls to
1653  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1654  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1655  failure if a FreeRTOS API function is called from an interrupt that has been
1656  assigned a priority above the configured maximum system call priority.
1657  Only FreeRTOS functions that end in FromISR can be called from interrupts
1658  that have been assigned a priority at or (logically) below the maximum
1659  system call interrupt priority. FreeRTOS maintains a separate interrupt
1660  safe API to ensure interrupt entry is as fast and as simple as possible.
1661  More information (albeit Cortex-M specific) is provided on the following
1662  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1664 
1665  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1666  {
1667  /* Cannot block in an ISR, so check there is data available. */
1668  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1669  {
1670  traceQUEUE_PEEK_FROM_ISR( pxQueue );
1671 
1672  /* Remember the read position so it can be reset as nothing is
1673  actually being removed from the queue. */
1674  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1675  prvCopyDataFromQueue( pxQueue, pvBuffer );
1676  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1677 
1678  xReturn = pdPASS;
1679  }
1680  else
1681  {
1682  xReturn = pdFAIL;
1684  }
1685  }
1686  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1687 
1688  return xReturn;
1689 }
1690 /*-----------------------------------------------------------*/
1691 
1693 {
1694 UBaseType_t uxReturn;
1695 
1696  configASSERT( xQueue );
1697 
1699  {
1700  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1701  }
1703 
1704  return uxReturn;
1705 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1706 /*-----------------------------------------------------------*/
1707 
1709 {
1710 UBaseType_t uxReturn;
1711 Queue_t *pxQueue;
1712 
1713  pxQueue = ( Queue_t * ) xQueue;
1714  configASSERT( pxQueue );
1715 
1717  {
1718  uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1719  }
1721 
1722  return uxReturn;
1723 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1724 /*-----------------------------------------------------------*/
1725 
1727 {
1728 UBaseType_t uxReturn;
1729 
1730  configASSERT( xQueue );
1731 
1732  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1733 
1734  return uxReturn;
1735 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1736 /*-----------------------------------------------------------*/
1737 
1739 {
1740 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1741 
1742  configASSERT( pxQueue );
1743 
1744  traceQUEUE_DELETE( pxQueue );
1745  #if ( configQUEUE_REGISTRY_SIZE > 0 )
1746  {
1747  vQueueUnregisterQueue( pxQueue );
1748  }
1749  #endif
1750  vPortFree( pxQueue );
1751 }
1752 /*-----------------------------------------------------------*/
1753 
1754 #if ( configUSE_TRACE_FACILITY == 1 )
1755 
1757  {
1758  return ( ( Queue_t * ) xQueue )->uxQueueNumber;
1759  }
1760 
1761 #endif /* configUSE_TRACE_FACILITY */
1762 /*-----------------------------------------------------------*/
1763 
1764 #if ( configUSE_TRACE_FACILITY == 1 )
1765 
1766  void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
1767  {
1768  ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
1769  }
1770 
1771 #endif /* configUSE_TRACE_FACILITY */
1772 /*-----------------------------------------------------------*/
1773 
1774 #if ( configUSE_TRACE_FACILITY == 1 )
1775 
1776  uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
1777  {
1778  return ( ( Queue_t * ) xQueue )->ucQueueType;
1779  }
1780 
1781 #endif /* configUSE_TRACE_FACILITY */
1782 /*-----------------------------------------------------------*/
1783 
1784 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
1785 {
1786 BaseType_t xReturn = pdFALSE;
1787 
1788  if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
1789  {
1790  #if ( configUSE_MUTEXES == 1 )
1791  {
1792  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1793  {
1794  /* The mutex is no longer being held. */
1795  xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
1796  pxQueue->pxMutexHolder = NULL;
1797  }
1798  else
1799  {
1801  }
1802  }
1803  #endif /* configUSE_MUTEXES */
1804  }
1805  else if( xPosition == queueSEND_TO_BACK )
1806  {
1807  ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
1808  pxQueue->pcWriteTo += pxQueue->uxItemSize;
1809  if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1810  {
1811  pxQueue->pcWriteTo = pxQueue->pcHead;
1812  }
1813  else
1814  {
1816  }
1817  }
1818  else
1819  {
1820  ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1821  pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
1822  if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1823  {
1824  pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
1825  }
1826  else
1827  {
1829  }
1830 
1831  if( xPosition == queueOVERWRITE )
1832  {
1833  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1834  {
1835  /* An item is not being added but overwritten, so subtract
1836  one from the recorded number of items in the queue so when
1837  one is added again below the number of recorded items remains
1838  correct. */
1839  --( pxQueue->uxMessagesWaiting );
1840  }
1841  else
1842  {
1844  }
1845  }
1846  else
1847  {
1849  }
1850  }
1851 
1852  ++( pxQueue->uxMessagesWaiting );
1853 
1854  return xReturn;
1855 }
1856 /*-----------------------------------------------------------*/
1857 
1858 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
1859 {
1860  if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
1861  {
1862  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
1863  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
1864  {
1865  pxQueue->u.pcReadFrom = pxQueue->pcHead;
1866  }
1867  else
1868  {
1870  }
1871  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
1872  }
1873 }
1874 /*-----------------------------------------------------------*/
1875 
1876 static void prvUnlockQueue( Queue_t * const pxQueue )
1877 {
1878  /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
1879 
1880  /* The lock counts contains the number of extra data items placed or
1881  removed from the queue while the queue was locked. When a queue is
1882  locked items can be added or removed, but the event lists cannot be
1883  updated. */
1885  {
1886  /* See if data was added to the queue while it was locked. */
1887  while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
1888  {
1889  /* Data was posted while the queue was locked. Are any tasks
1890  blocked waiting for data to become available? */
1891  #if ( configUSE_QUEUE_SETS == 1 )
1892  {
1893  if( pxQueue->pxQueueSetContainer != NULL )
1894  {
1895  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
1896  {
1897  /* The queue is a member of a queue set, and posting to
1898  the queue set caused a higher priority task to unblock.
1899  A context switch is required. */
1900  vTaskMissedYield();
1901  }
1902  else
1903  {
1905  }
1906  }
1907  else
1908  {
1909  /* Tasks that are removed from the event list will get added to
1910  the pending ready list as the scheduler is still suspended. */
1911  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1912  {
1913  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1914  {
1915  /* The task waiting has a higher priority so record that a
1916  context switch is required. */
1917  vTaskMissedYield();
1918  }
1919  else
1920  {
1922  }
1923  }
1924  else
1925  {
1926  break;
1927  }
1928  }
1929  }
1930  #else /* configUSE_QUEUE_SETS */
1931  {
1932  /* Tasks that are removed from the event list will get added to
1933  the pending ready list as the scheduler is still suspended. */
1934  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1935  {
1936  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1937  {
1938  /* The task waiting has a higher priority so record that a
1939  context switch is required. */
1940  vTaskMissedYield();
1941  }
1942  else
1943  {
1945  }
1946  }
1947  else
1948  {
1949  break;
1950  }
1951  }
1952  #endif /* configUSE_QUEUE_SETS */
1953 
1954  --( pxQueue->xTxLock );
1955  }
1956 
1957  pxQueue->xTxLock = queueUNLOCKED;
1958  }
1960 
1961  /* Do the same for the Rx lock. */
1963  {
1964  while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
1965  {
1966  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1967  {
1968  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1969  {
1970  vTaskMissedYield();
1971  }
1972  else
1973  {
1975  }
1976 
1977  --( pxQueue->xRxLock );
1978  }
1979  else
1980  {
1981  break;
1982  }
1983  }
1984 
1985  pxQueue->xRxLock = queueUNLOCKED;
1986  }
1988 }
1989 /*-----------------------------------------------------------*/
1990 
1991 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
1992 {
1993 BaseType_t xReturn;
1994 
1996  {
1997  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1998  {
1999  xReturn = pdTRUE;
2000  }
2001  else
2002  {
2003  xReturn = pdFALSE;
2004  }
2005  }
2007 
2008  return xReturn;
2009 }
2010 /*-----------------------------------------------------------*/
2011 
2013 {
2014 BaseType_t xReturn;
2015 
2016  configASSERT( xQueue );
2017  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
2018  {
2019  xReturn = pdTRUE;
2020  }
2021  else
2022  {
2023  xReturn = pdFALSE;
2024  }
2025 
2026  return xReturn;
2027 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2028 /*-----------------------------------------------------------*/
2029 
2030 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
2031 {
2032 BaseType_t xReturn;
2033 
2035  {
2036  if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2037  {
2038  xReturn = pdTRUE;
2039  }
2040  else
2041  {
2042  xReturn = pdFALSE;
2043  }
2044  }
2046 
2047  return xReturn;
2048 }
2049 /*-----------------------------------------------------------*/
2050 
2052 {
2053 BaseType_t xReturn;
2054 
2055  configASSERT( xQueue );
2056  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
2057  {
2058  xReturn = pdTRUE;
2059  }
2060  else
2061  {
2062  xReturn = pdFALSE;
2063  }
2064 
2065  return xReturn;
2066 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2067 /*-----------------------------------------------------------*/
2068 
2069 #if ( configUSE_CO_ROUTINES == 1 )
2070 
2071  BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
2072  {
2073  BaseType_t xReturn;
2074  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2075 
2076  /* If the queue is already full we may have to block. A critical section
2077  is required to prevent an interrupt removing something from the queue
2078  between the check to see if the queue is full and blocking on the queue. */
2080  {
2081  if( prvIsQueueFull( pxQueue ) != pdFALSE )
2082  {
2083  /* The queue is full - do we want to block or just leave without
2084  posting? */
2085  if( xTicksToWait > ( TickType_t ) 0 )
2086  {
2087  /* As this is called from a coroutine we cannot block directly, but
2088  return indicating that we need to block. */
2089  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2091  return errQUEUE_BLOCKED;
2092  }
2093  else
2094  {
2096  return errQUEUE_FULL;
2097  }
2098  }
2099  }
2101 
2103  {
2104  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2105  {
2106  /* There is room in the queue, copy the data into the queue. */
2107  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2108  xReturn = pdPASS;
2109 
2110  /* Were any co-routines waiting for data to become available? */
2111  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2112  {
2113  /* In this instance the co-routine could be placed directly
2114  into the ready list as we are within a critical section.
2115  Instead the same pending ready list mechanism is used as if
2116  the event were caused from within an interrupt. */
2118  {
2119  /* The co-routine waiting has a higher priority so record
2120  that a yield might be appropriate. */
2121  xReturn = errQUEUE_YIELD;
2122  }
2123  else
2124  {
2126  }
2127  }
2128  else
2129  {
2131  }
2132  }
2133  else
2134  {
2135  xReturn = errQUEUE_FULL;
2136  }
2137  }
2139 
2140  return xReturn;
2141  }
2142 
2143 #endif /* configUSE_CO_ROUTINES */
2144 /*-----------------------------------------------------------*/
2145 
2146 #if ( configUSE_CO_ROUTINES == 1 )
2147 
2148  BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
2149  {
2150  BaseType_t xReturn;
2151  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2152 
2153  /* If the queue is already empty we may have to block. A critical section
2154  is required to prevent an interrupt adding something to the queue
2155  between the check to see if the queue is empty and blocking on the queue. */
2157  {
2158  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2159  {
2160  /* There are no messages in the queue, do we want to block or just
2161  leave with nothing? */
2162  if( xTicksToWait > ( TickType_t ) 0 )
2163  {
2164  /* As this is a co-routine we cannot block directly, but return
2165  indicating that we need to block. */
2166  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2168  return errQUEUE_BLOCKED;
2169  }
2170  else
2171  {
2173  return errQUEUE_FULL;
2174  }
2175  }
2176  else
2177  {
2179  }
2180  }
2182 
2184  {
2185  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2186  {
2187  /* Data is available from the queue. */
2188  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2189  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2190  {
2191  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2192  }
2193  else
2194  {
2196  }
2197  --( pxQueue->uxMessagesWaiting );
2198  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2199 
2200  xReturn = pdPASS;
2201 
2202  /* Were any co-routines waiting for space to become available? */
2203  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2204  {
2205  /* In this instance the co-routine could be placed directly
2206  into the ready list as we are within a critical section.
2207  Instead the same pending ready list mechanism is used as if
2208  the event were caused from within an interrupt. */
2210  {
2211  xReturn = errQUEUE_YIELD;
2212  }
2213  else
2214  {
2216  }
2217  }
2218  else
2219  {
2221  }
2222  }
2223  else
2224  {
2225  xReturn = pdFAIL;
2226  }
2227  }
2229 
2230  return xReturn;
2231  }
2232 
2233 #endif /* configUSE_CO_ROUTINES */
2234 /*-----------------------------------------------------------*/
2235 
2236 #if ( configUSE_CO_ROUTINES == 1 )
2237 
2238  BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
2239  {
2240  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2241 
2242  /* Cannot block within an ISR so if there is no space on the queue then
2243  exit without doing anything. */
2244  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2245  {
2246  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2247 
2248  /* We only want to wake one co-routine per ISR, so check that a
2249  co-routine has not already been woken. */
2250  if( xCoRoutinePreviouslyWoken == pdFALSE )
2251  {
2252  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2253  {
2255  {
2256  return pdTRUE;
2257  }
2258  else
2259  {
2261  }
2262  }
2263  else
2264  {
2266  }
2267  }
2268  else
2269  {
2271  }
2272  }
2273  else
2274  {
2276  }
2277 
2278  return xCoRoutinePreviouslyWoken;
2279  }
2280 
2281 #endif /* configUSE_CO_ROUTINES */
2282 /*-----------------------------------------------------------*/
2283 
2284 #if ( configUSE_CO_ROUTINES == 1 )
2285 
2286  BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
2287  {
2288  BaseType_t xReturn;
2289  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2290 
2291  /* We cannot block from an ISR, so check there is data available. If
2292  not then just leave without doing anything. */
2293  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2294  {
2295  /* Copy the data from the queue. */
2296  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2297  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2298  {
2299  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2300  }
2301  else
2302  {
2304  }
2305  --( pxQueue->uxMessagesWaiting );
2306  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2307 
2308  if( ( *pxCoRoutineWoken ) == pdFALSE )
2309  {
2310  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2311  {
2313  {
2314  *pxCoRoutineWoken = pdTRUE;
2315  }
2316  else
2317  {
2319  }
2320  }
2321  else
2322  {
2324  }
2325  }
2326  else
2327  {
2329  }
2330 
2331  xReturn = pdPASS;
2332  }
2333  else
2334  {
2335  xReturn = pdFAIL;
2336  }
2337 
2338  return xReturn;
2339  }
2340 
2341 #endif /* configUSE_CO_ROUTINES */
2342 /*-----------------------------------------------------------*/
2343 
2344 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2345 
2346  void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2347  {
2348  UBaseType_t ux;
2349 
2350  /* See if there is an empty space in the registry. A NULL name denotes
2351  a free slot. */
2352  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2353  {
2354  if( xQueueRegistry[ ux ].pcQueueName == NULL )
2355  {
2356  /* Store the information on this queue. */
2357  xQueueRegistry[ ux ].pcQueueName = pcQueueName;
2358  xQueueRegistry[ ux ].xHandle = xQueue;
2359 
2360  traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
2361  break;
2362  }
2363  else
2364  {
2366  }
2367  }
2368  }
2369 
2370 #endif /* configQUEUE_REGISTRY_SIZE */
2371 /*-----------------------------------------------------------*/
2372 
2373 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2374 
2375  void vQueueUnregisterQueue( QueueHandle_t xQueue )
2376  {
2377  UBaseType_t ux;
2378 
2379  /* See if the handle of the queue being unregistered in actually in the
2380  registry. */
2381  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2382  {
2383  if( xQueueRegistry[ ux ].xHandle == xQueue )
2384  {
2385  /* Set the name to NULL to show that this slot if free again. */
2386  xQueueRegistry[ ux ].pcQueueName = NULL;
2387  break;
2388  }
2389  else
2390  {
2392  }
2393  }
2394 
2395  } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2396 
2397 #endif /* configQUEUE_REGISTRY_SIZE */
2398 /*-----------------------------------------------------------*/
2399 
2400 #if ( configUSE_TIMERS == 1 )
2401 
2402  void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
2403  {
2404  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2405 
2406  /* This function should not be called by application code hence the
2407  'Restricted' in its name. It is not part of the public API. It is
2408  designed for use by kernel code, and has special calling requirements.
2409  It can result in vListInsert() being called on a list that can only
2410  possibly ever have one item in it, so the list will be fast, but even
2411  so it should be called with the scheduler locked and not from a critical
2412  section. */
2413 
2414  /* Only do anything if there are no messages in the queue. This function
2415  will not actually cause the task to block, just place it on a blocked
2416  list. It will not block until the scheduler is unlocked - at which
2417  time a yield will be performed. If an item is added to the queue while
2418  the queue is locked, and the calling task blocks on the queue, then the
2419  calling task will be immediately unblocked when the queue is unlocked. */
2420  prvLockQueue( pxQueue );
2421  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
2422  {
2423  /* There is nothing in the queue, block for the specified period. */
2424  vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
2425  }
2426  else
2427  {
2429  }
2430  prvUnlockQueue( pxQueue );
2431  }
2432 
2433 #endif /* configUSE_TIMERS */
2434 /*-----------------------------------------------------------*/
2435 
2436 #if ( configUSE_QUEUE_SETS == 1 )
2437 
2438  QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
2439  {
2440  QueueSetHandle_t pxQueue;
2441 
2442  pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
2443 
2444  return pxQueue;
2445  }
2446 
2447 #endif /* configUSE_QUEUE_SETS */
2448 /*-----------------------------------------------------------*/
2449 
2450 #if ( configUSE_QUEUE_SETS == 1 )
2451 
2452  BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2453  {
2454  BaseType_t xReturn;
2455 
2457  {
2458  if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
2459  {
2460  /* Cannot add a queue/semaphore to more than one queue set. */
2461  xReturn = pdFAIL;
2462  }
2463  else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
2464  {
2465  /* Cannot add a queue/semaphore to a queue set if there are already
2466  items in the queue/semaphore. */
2467  xReturn = pdFAIL;
2468  }
2469  else
2470  {
2471  ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
2472  xReturn = pdPASS;
2473  }
2474  }
2476 
2477  return xReturn;
2478  }
2479 
2480 #endif /* configUSE_QUEUE_SETS */
2481 /*-----------------------------------------------------------*/
2482 
2483 #if ( configUSE_QUEUE_SETS == 1 )
2484 
2486  {
2487  BaseType_t xReturn;
2488  Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
2489 
2490  if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
2491  {
2492  /* The queue was not a member of the set. */
2493  xReturn = pdFAIL;
2494  }
2495  else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
2496  {
2497  /* It is dangerous to remove a queue from a set when the queue is
2498  not empty because the queue set will still hold pending events for
2499  the queue. */
2500  xReturn = pdFAIL;
2501  }
2502  else
2503  {
2505  {
2506  /* The queue is no longer contained in the set. */
2507  pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2508  }
2510  xReturn = pdPASS;
2511  }
2512 
2513  return xReturn;
2514  } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
2515 
2516 #endif /* configUSE_QUEUE_SETS */
2517 /*-----------------------------------------------------------*/
2518 
2519 #if ( configUSE_QUEUE_SETS == 1 )
2520 
2522  {
2523  QueueSetMemberHandle_t xReturn = NULL;
2524 
2525  ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
2526  return xReturn;
2527  }
2528 
2529 #endif /* configUSE_QUEUE_SETS */
2530 /*-----------------------------------------------------------*/
2531 
2532 #if ( configUSE_QUEUE_SETS == 1 )
2533 
2535  {
2536  QueueSetMemberHandle_t xReturn = NULL;
2537 
2538  ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
2539  return xReturn;
2540  }
2541 
2542 #endif /* configUSE_QUEUE_SETS */
2543 /*-----------------------------------------------------------*/
2544 
2545 #if ( configUSE_QUEUE_SETS == 1 )
2546 
2547  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
2548  {
2549  Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2550  BaseType_t xReturn = pdFALSE;
2551 
2552  /* This function must be called form a critical section. */
2553 
2554  configASSERT( pxQueueSetContainer );
2555  configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
2556 
2557  if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
2558  {
2559  traceQUEUE_SEND( pxQueueSetContainer );
2560 
2561  /* The data copied is the handle of the queue that contains data. */
2562  xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
2563 
2564  if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
2565  {
2566  if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
2567  {
2568  if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
2569  {
2570  /* The task waiting has a higher priority. */
2571  xReturn = pdTRUE;
2572  }
2573  else
2574  {
2576  }
2577  }
2578  else
2579  {
2581  }
2582  }
2583  else
2584  {
2585  ( pxQueueSetContainer->xTxLock )++;
2586  }
2587  }
2588  else
2589  {
2591  }
2592 
2593  return xReturn;
2594  }
2595 
2596 #endif /* configUSE_QUEUE_SETS */
2597 
2598 
2599 
2600 
2601 
2602 
2603 
2604 
2605 
2606 
2607 
2608 
#define pdTRUE
Definition: projdefs.h:83
BaseType_t xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
UBaseType_t uxLength
Definition: queue.c:146
#define pxMutexHolder
Definition: queue.c:108
int8_t * pcTail
Definition: queue.c:133
UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
Definition: queue.c:1726
void vPortFree(void *pv) PRIVILEGED_FUNCTION
Definition: heap_1.c:149
#define queueQUEUE_TYPE_SET
Definition: queue.h:111
#define traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:442
BaseType_t xQueueTakeMutexRecursive(QueueHandle_t xMutex, TickType_t xTicksToWait) PRIVILEGED_FUNCTION
#define queueOVERWRITE
Definition: queue.h:107
BaseType_t xQueueCRSendFromISR(QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken)
#define traceQUEUE_SEND_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:478
int8_t * pcWriteTo
Definition: queue.c:134
void * QueueSetMemberHandle_t
Definition: queue.h:102
union QueueDefinition::@46 u
#define queueYIELD_IF_USING_PREEMPTION()
Definition: queue.c:120
void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber) PRIVILEGED_FUNCTION
#define errQUEUE_YIELD
Definition: projdefs.h:93
volatile BaseType_t xTxLock
Definition: queue.c:150
xQUEUE Queue_t
Definition: queue.c:165
List_t xTasksWaitingToReceive
Definition: queue.c:143
TaskHandle_t xTaskGetCurrentTaskHandle(void) PRIVILEGED_FUNCTION
#define mtCOVERAGE_TEST_MARKER()
Definition: FreeRTOS.h:752
UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
Definition: queue.c:1692
UBaseType_t uxRecursiveCallCount
Definition: queue.c:139
#define queueQUEUE_TYPE_COUNTING_SEMAPHORE
Definition: queue.h:113
#define queueLOCKED_UNMODIFIED
Definition: queue.c:95
#define taskEXIT_CRITICAL()
Definition: task.h:231
void vTaskSuspendAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:1633
#define vQueueAddToRegistry(xQueue, pcName)
Definition: FreeRTOS.h:321
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:490
#define traceQUEUE_RECEIVE_FAILED(pxQueue)
Definition: FreeRTOS.h:474
#define portENABLE_INTERRUPTS()
Definition: portmacro.h:144
#define configASSERT(x)
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH
Definition: queue.c:114
void vTaskPlaceOnEventListRestricted(List_t *const pxEventList, const TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION
void * xQueueGetMutexHolder(QueueHandle_t xSemaphore) PRIVILEGED_FUNCTION
#define portDISABLE_INTERRUPTS()
Definition: portmacro.h:143
void * pvTaskIncrementMutexHeldCount(void) PRIVILEGED_FUNCTION
#define traceQUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:462
void * pvPortMalloc(size_t xSize) PRIVILEGED_FUNCTION
Definition: heap_1.c:99
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue)
Definition: FreeRTOS.h:301
#define portYIELD_WITHIN_API
Definition: FreeRTOS.h:684
QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength) PRIVILEGED_FUNCTION
#define listLIST_IS_EMPTY(pxList)
Definition: list.h:291
void * QueueSetHandle_t
Definition: queue.h:95
volatile UBaseType_t uxMessagesWaiting
Definition: queue.c:145
#define traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:494
unsigned long UBaseType_t
Definition: portmacro.h:99
uint32_t TickType_t
Definition: portmacro.h:105
#define queueSEND_TO_BACK
Definition: queue.h:105
struct QueueDefinition xQUEUE
BaseType_t xQueueGenericReceive(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking)
Definition: queue.c:1364
volatile BaseType_t xRxLock
Definition: queue.c:149
#define portSET_INTERRUPT_MASK_FROM_ISR()
Definition: FreeRTOS.h:297
#define traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:482
QueueSetMemberHandle_t xQueueSelectFromSet(QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
Definition: queue.c:1638
#define traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:434
#define vQueueUnregisterQueue(xQueue)
Definition: FreeRTOS.h:322
#define NULL
Definition: usbd_def.h:53
QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
Definition: queue.c:2051
#define traceCREATE_MUTEX(pxNewQueue)
Definition: FreeRTOS.h:422
#define traceQUEUE_CREATE(pxNewQueue)
Definition: FreeRTOS.h:414
#define traceGIVE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:430
#define queueUNLOCKED
Definition: queue.c:94
void vQueueWaitForMessageRestricted(QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
Definition: mpu_wrappers.h:170
BaseType_t xQueueReceiveFromISR(QueueHandle_t xQueue, void *const pvBuffer, BaseType_t *const pxHigherPriorityTaskWoken)
Definition: queue.c:1551
#define traceQUEUE_DELETE(pxQueue)
Definition: FreeRTOS.h:498
BaseType_t xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
#define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName)
Definition: FreeRTOS.h:622
List_t xTasksWaitingToSend
Definition: queue.c:142
#define pdFAIL
Definition: projdefs.h:86
void vTaskMissedYield(void) PRIVILEGED_FUNCTION
Definition: tasks.c:2635
int8_t * pcHead
Definition: queue.c:132
long BaseType_t
Definition: portmacro.h:98
BaseType_t xQueueGiveMutexRecursive(QueueHandle_t pxMutex) PRIVILEGED_FUNCTION
UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
#define errQUEUE_FULL
Definition: projdefs.h:88
QueueHandle_t xQueueGenericCreate(const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType)
Definition: queue.c:313
#define pdPASS
Definition: projdefs.h:85
#define traceQUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:454
BaseType_t xQueueCRReceiveFromISR(QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken)
uint8_t ucQueueGetQueueType(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
void * QueueHandle_t
Definition: queue.h:88
#define traceQUEUE_SEND_FAILED(pxQueue)
Definition: FreeRTOS.h:458
void vTaskPriorityInherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
#define traceBLOCKING_ON_QUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:400
BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
Definition: queue.c:260
#define traceCREATE_COUNTING_SEMAPHORE()
Definition: FreeRTOS.h:446
void vCoRoutineAddToDelayedList(TickType_t xTicksToDelay, List_t *pxEventList)
BaseType_t xQueueAltGenericSend(QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition) PRIVILEGED_FUNCTION
#define pdFALSE
Definition: projdefs.h:82
#define traceTAKE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:438
BaseType_t xTaskResumeAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:1671
BaseType_t xTaskGetSchedulerState(void) PRIVILEGED_FUNCTION
#define taskENTER_CRITICAL()
Definition: task.h:216
#define uxQueueType
Definition: queue.c:109
BaseType_t xQueueGenericSendFromISR(QueueHandle_t xQueue, const void *const pvItemToQueue, BaseType_t *const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition)
Definition: queue.c:1054
#define traceQUEUE_PEEK_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:470
#define traceQUEUE_PEEK(pxQueue)
Definition: FreeRTOS.h:466
BaseType_t xQueueCRSend(QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait)
void vTaskSetTimeOutState(TimeOut_t *const pxTimeOut) PRIVILEGED_FUNCTION
Definition: tasks.c:2578
QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType) PRIVILEGED_FUNCTION
#define queueQUEUE_IS_MUTEX
Definition: queue.c:110
#define configQUEUE_REGISTRY_SIZE
#define errQUEUE_BLOCKED
Definition: projdefs.h:92
BaseType_t xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
BaseType_t xCoRoutineRemoveFromEventList(const List_t *pxEventList)
BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
Definition: queue.c:2012
#define PRIVILEGED_FUNCTION
Definition: mpu_wrappers.h:169
#define traceCREATE_COUNTING_SEMAPHORE_FAILED()
Definition: FreeRTOS.h:450
#define traceCREATE_MUTEX_FAILED()
Definition: FreeRTOS.h:426
int8_t * pcReadFrom
Definition: queue.c:138
#define errQUEUE_EMPTY
Definition: projdefs.h:87
UBaseType_t uxItemSize
Definition: queue.c:147
#define queueMUTEX_GIVE_BLOCK_TIME
Definition: queue.c:115
#define prvLockQueue(pxQueue)
Definition: queue.c:245
BaseType_t xTaskCheckForTimeOut(TimeOut_t *const pxTimeOut, TickType_t *const pxTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:2586
#define taskSCHEDULER_SUSPENDED
Definition: task.h:256
void vQueueDelete(QueueHandle_t xQueue)
Definition: queue.c:1738
Definition: list.h:205
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
Definition: FreeRTOS.h:744
BaseType_t xQueueCRReceive(QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait)
BaseType_t xQueueGenericSend(QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition)
Definition: queue.c:609
BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList) PRIVILEGED_FUNCTION
Definition: tasks.c:2465
void vTaskPlaceOnEventList(List_t *const pxEventList, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:2262
void vListInitialise(List_t *const pxList) PRIVILEGED_FUNCTION
Definition: list.c:79
QueueHandle_t xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount) PRIVILEGED_FUNCTION
BaseType_t xQueueAltGenericReceive(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking) PRIVILEGED_FUNCTION
BaseType_t xQueueGiveFromISR(QueueHandle_t xQueue, BaseType_t *const pxHigherPriorityTaskWoken)
Definition: queue.c:1203
#define traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:392
#define traceQUEUE_RECEIVE_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:486
UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
Definition: queue.c:1708