mirror of
https://github.com/apache/nuttx.git
synced 2025-05-08 22:32:04 +08:00
sched/spin_lock: rename raw_spin_lock to spin_lock_notrace
Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
42e6825cbc
commit
f22b93b337
@ -1841,7 +1841,7 @@ void sched_note_filter_mode(FAR struct note_filter_named_mode_s *oldm,
|
||||
irqstate_t irq_mask;
|
||||
FAR struct note_driver_s **driver;
|
||||
|
||||
irq_mask = raw_spin_lock_irqsave(&g_note_lock);
|
||||
irq_mask = spin_lock_irqsave_notrace(&g_note_lock);
|
||||
|
||||
if (oldm != NULL)
|
||||
{
|
||||
@ -1877,7 +1877,7 @@ void sched_note_filter_mode(FAR struct note_filter_named_mode_s *oldm,
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&g_note_lock, irq_mask);
|
||||
spin_unlock_irqrestore_notrace(&g_note_lock, irq_mask);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -1907,7 +1907,7 @@ void sched_note_filter_syscall(FAR struct note_filter_named_syscall_s *oldf,
|
||||
irqstate_t irq_mask;
|
||||
FAR struct note_driver_s **driver;
|
||||
|
||||
irq_mask = raw_spin_lock_irqsave(&g_note_lock);
|
||||
irq_mask = spin_lock_irqsave_notrace(&g_note_lock);
|
||||
|
||||
if (oldf != NULL)
|
||||
{
|
||||
@ -1943,7 +1943,7 @@ void sched_note_filter_syscall(FAR struct note_filter_named_syscall_s *oldf,
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&g_note_lock, irq_mask);
|
||||
spin_unlock_irqrestore_notrace(&g_note_lock, irq_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1974,7 +1974,7 @@ void sched_note_filter_irq(FAR struct note_filter_named_irq_s *oldf,
|
||||
irqstate_t irq_mask;
|
||||
FAR struct note_driver_s **driver;
|
||||
|
||||
irq_mask = raw_spin_lock_irqsave(&g_note_lock);
|
||||
irq_mask = spin_lock_irqsave_notrace(&g_note_lock);
|
||||
|
||||
if (oldf != NULL)
|
||||
{
|
||||
@ -2010,7 +2010,7 @@ void sched_note_filter_irq(FAR struct note_filter_named_irq_s *oldf,
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&g_note_lock, irq_mask);
|
||||
spin_unlock_irqrestore_notrace(&g_note_lock, irq_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2041,7 +2041,7 @@ void sched_note_filter_tag(FAR struct note_filter_named_tag_s *oldf,
|
||||
FAR struct note_driver_s **driver;
|
||||
irqstate_t irq_mask;
|
||||
|
||||
irq_mask = raw_spin_lock_irqsave(&g_note_lock);
|
||||
irq_mask = spin_lock_irqsave_notrace(&g_note_lock);
|
||||
|
||||
if (oldf != NULL)
|
||||
{
|
||||
@ -2077,7 +2077,7 @@ void sched_note_filter_tag(FAR struct note_filter_named_tag_s *oldf,
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&g_note_lock, irq_mask);
|
||||
spin_unlock_irqrestore_notrace(&g_note_lock, irq_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -467,9 +467,9 @@ static ssize_t noteram_read(FAR struct file *filep, FAR char *buffer,
|
||||
|
||||
if (ctx->mode == NOTERAM_MODE_READ_BINARY)
|
||||
{
|
||||
flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
ret = noteram_get(drv, (FAR uint8_t *)buffer, buflen);
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -481,9 +481,9 @@ static ssize_t noteram_read(FAR struct file *filep, FAR char *buffer,
|
||||
|
||||
/* Get the next note (removing it from the buffer) */
|
||||
|
||||
flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
ret = noteram_get(drv, note, sizeof(note));
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
if (ret <= 0)
|
||||
{
|
||||
return ret;
|
||||
@ -508,7 +508,7 @@ static int noteram_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
|
||||
{
|
||||
int ret = -ENOSYS;
|
||||
FAR struct noteram_driver_s *drv = filep->f_inode->i_private;
|
||||
irqstate_t flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
irqstate_t flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
|
||||
/* Handle the ioctl commands */
|
||||
|
||||
@ -600,7 +600,7 @@ static int noteram_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -622,7 +622,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds,
|
||||
DEBUGASSERT(inode != NULL && inode->i_private != NULL);
|
||||
drv = inode->i_private;
|
||||
|
||||
flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
|
||||
/* Ignore waits that do not include POLLIN */
|
||||
|
||||
@ -655,7 +655,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds,
|
||||
|
||||
if (noteram_unread_length(drv) > 0)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
poll_notify(&drv->pfd, 1, POLLIN);
|
||||
return ret;
|
||||
}
|
||||
@ -666,7 +666,7 @@ static int noteram_poll(FAR struct file *filep, FAR struct pollfd *fds,
|
||||
}
|
||||
|
||||
errout:
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -698,11 +698,11 @@ static void noteram_add(FAR struct note_driver_s *driver,
|
||||
unsigned int space;
|
||||
irqstate_t flags;
|
||||
|
||||
flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
|
||||
if (drv->ni_overwrite == NOTERAM_MODE_OVERWRITE_OVERFLOW)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -716,7 +716,7 @@ static void noteram_add(FAR struct note_driver_s *driver,
|
||||
/* Stop recording if not in overwrite mode */
|
||||
|
||||
drv->ni_overwrite = NOTERAM_MODE_OVERWRITE_OVERFLOW;
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -737,7 +737,7 @@ static void noteram_add(FAR struct note_driver_s *driver,
|
||||
memcpy(drv->ni_buffer + head, note, space);
|
||||
memcpy(drv->ni_buffer, buf + space, notelen - space);
|
||||
drv->ni_head = noteram_next(drv, head, NOTE_ALIGN(notelen));
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
poll_notify(&drv->pfd, 1, POLLIN);
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ static bool noterpmsg_transfer(FAR struct noterpmsg_driver_s *drv,
|
||||
static void noterpmsg_work(FAR void *priv)
|
||||
{
|
||||
FAR struct noterpmsg_driver_s *drv = priv;
|
||||
irqstate_t flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
irqstate_t flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
|
||||
if (!noterpmsg_transfer(drv, false))
|
||||
{
|
||||
@ -188,7 +188,7 @@ static void noterpmsg_work(FAR void *priv)
|
||||
NOTE_RPMSG_WORK_DELAY);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
}
|
||||
|
||||
static void noterpmsg_add(FAR struct note_driver_s *driver,
|
||||
@ -199,7 +199,7 @@ static void noterpmsg_add(FAR struct note_driver_s *driver,
|
||||
irqstate_t flags;
|
||||
size_t space;
|
||||
|
||||
flags = raw_spin_lock_irqsave(&drv->lock);
|
||||
flags = spin_lock_irqsave_notrace(&drv->lock);
|
||||
|
||||
space = CONFIG_DRIVERS_NOTERPMSG_BUFSIZE - noterpmsg_length(drv);
|
||||
if (space < notelen)
|
||||
@ -236,7 +236,7 @@ static void noterpmsg_add(FAR struct note_driver_s *driver,
|
||||
NOTE_RPMSG_WORK_DELAY);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&drv->lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&drv->lock, flags);
|
||||
}
|
||||
|
||||
static int noterpmsg_ept_cb(FAR struct rpmsg_endpoint *ept,
|
||||
|
@ -89,11 +89,11 @@ extern spinlock_t g_segger_lock;
|
||||
|
||||
/* Lock RTT (nestable) (i.e. disable interrupts) */
|
||||
|
||||
#define SEGGER_RTT_LOCK() irqstate_t __flags = raw_spin_lock_irqsave(&g_segger_lock)
|
||||
#define SEGGER_RTT_LOCK() irqstate_t __flags = spin_lock_irqsave_notrace(&g_segger_lock)
|
||||
|
||||
/* Unlock RTT (nestable) (i.e. enable previous interrupt lock state) */
|
||||
|
||||
#define SEGGER_RTT_UNLOCK() raw_spin_unlock_irqrestore(&g_segger_lock, __flags)
|
||||
#define SEGGER_RTT_UNLOCK() spin_unlock_irqrestore_notrace(&g_segger_lock, __flags)
|
||||
|
||||
/* Disable RTT SEGGER_RTT_WriteSkipNoLock */
|
||||
|
||||
|
@ -150,7 +150,7 @@ void syslog_add_intbuffer(FAR const char *buffer, size_t buflen)
|
||||
|
||||
/* Disable concurrent modification from interrupt handling logic */
|
||||
|
||||
flags = raw_spin_lock_irqsave(&g_syslog_intbuffer.splock);
|
||||
flags = spin_lock_irqsave_notrace(&g_syslog_intbuffer.splock);
|
||||
|
||||
space = circbuf_space(&g_syslog_intbuffer.circ);
|
||||
|
||||
@ -172,7 +172,7 @@ void syslog_add_intbuffer(FAR const char *buffer, size_t buflen)
|
||||
buffer + space, buflen - space);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&g_syslog_intbuffer.splock, flags);
|
||||
spin_unlock_irqrestore_notrace(&g_syslog_intbuffer.splock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -198,9 +198,9 @@ void syslog_flush_intbuffer(bool force)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
flags = raw_spin_lock_irqsave(&g_syslog_intbuffer.splock);
|
||||
flags = spin_lock_irqsave_notrace(&g_syslog_intbuffer.splock);
|
||||
syslog_flush_internal(force, sizeof(g_syslog_intbuffer.buffer));
|
||||
raw_spin_unlock_irqrestore(&g_syslog_intbuffer.splock, flags);
|
||||
spin_unlock_irqrestore_notrace(&g_syslog_intbuffer.splock, flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSLOG_INTBUFFER */
|
||||
|
@ -72,9 +72,9 @@ static FAR struct file *files_fget_by_index(FAR struct filelist *list,
|
||||
FAR struct file *filep;
|
||||
irqstate_t flags;
|
||||
|
||||
flags = raw_spin_lock_irqsave(&list->fl_lock);
|
||||
flags = spin_lock_irqsave_notrace(&list->fl_lock);
|
||||
filep = &list->fl_files[l1][l2];
|
||||
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&list->fl_lock, flags);
|
||||
|
||||
#ifdef CONFIG_FS_REFCOUNT
|
||||
if (filep->f_inode != NULL)
|
||||
@ -164,7 +164,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
|
||||
}
|
||||
while (++i < row);
|
||||
|
||||
flags = raw_spin_lock_irqsave(&list->fl_lock);
|
||||
flags = spin_lock_irqsave_notrace(&list->fl_lock);
|
||||
|
||||
/* To avoid race condition, if the file list is updated by other threads
|
||||
* and list rows is greater or equal than temp list,
|
||||
@ -173,7 +173,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
|
||||
|
||||
if (orig_rows != list->fl_rows && list->fl_rows >= row)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&list->fl_lock, flags);
|
||||
|
||||
for (j = orig_rows; j < i; j++)
|
||||
{
|
||||
@ -195,7 +195,7 @@ static int files_extend(FAR struct filelist *list, size_t row)
|
||||
list->fl_files = files;
|
||||
list->fl_rows = row;
|
||||
|
||||
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&list->fl_lock, flags);
|
||||
|
||||
if (tmp != NULL && tmp != &list->fl_prefile)
|
||||
{
|
||||
@ -565,13 +565,13 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
|
||||
|
||||
/* Find free file */
|
||||
|
||||
flags = raw_spin_lock_irqsave(&list->fl_lock);
|
||||
flags = spin_lock_irqsave_notrace(&list->fl_lock);
|
||||
|
||||
for (; ; i++, j = 0)
|
||||
{
|
||||
if (i >= list->fl_rows)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&list->fl_lock, flags);
|
||||
|
||||
ret = files_extend(list, i + 1);
|
||||
if (ret < 0)
|
||||
@ -579,7 +579,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
|
||||
return ret;
|
||||
}
|
||||
|
||||
flags = raw_spin_lock_irqsave(&list->fl_lock);
|
||||
flags = spin_lock_irqsave_notrace(&list->fl_lock);
|
||||
}
|
||||
|
||||
do
|
||||
@ -608,7 +608,7 @@ int file_allocate_from_tcb(FAR struct tcb_s *tcb, FAR struct inode *inode,
|
||||
}
|
||||
|
||||
found:
|
||||
raw_spin_unlock_irqrestore(&list->fl_lock, flags);
|
||||
spin_unlock_irqrestore_notrace(&list->fl_lock, flags);
|
||||
|
||||
if (addref)
|
||||
{
|
||||
|
@ -80,7 +80,7 @@
|
||||
do \
|
||||
{ \
|
||||
g_cpu_irqset = 0; \
|
||||
raw_spin_unlock(&g_cpu_irqlock); \
|
||||
spin_unlock_notrace(&g_cpu_irqlock); \
|
||||
} \
|
||||
while (0)
|
||||
#endif
|
||||
|
@ -168,7 +168,7 @@ static inline spinlock_t up_testset(FAR volatile spinlock_t *lock)
|
||||
#define spin_lock_init(l) do { *(l) = SP_UNLOCKED; } while (0)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: raw_spin_lock
|
||||
* Name: spin_lock_notrace
|
||||
*
|
||||
* Description:
|
||||
* If this CPU does not already hold the spinlock, then loop until the
|
||||
@ -190,7 +190,7 @@ static inline spinlock_t up_testset(FAR volatile spinlock_t *lock)
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SPINLOCK
|
||||
static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock)
|
||||
static inline_function void spin_lock_notrace(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
#ifdef CONFIG_TICKET_SPINLOCK
|
||||
int ticket = atomic_fetch_add(&lock->next, 1);
|
||||
@ -239,7 +239,7 @@ static inline_function void spin_lock(FAR volatile spinlock_t *lock)
|
||||
|
||||
/* Lock without trace note */
|
||||
|
||||
raw_spin_lock(lock);
|
||||
spin_lock_notrace(lock);
|
||||
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
@ -338,7 +338,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
|
||||
#endif /* CONFIG_SPINLOCK */
|
||||
|
||||
/****************************************************************************
|
||||
* Name: raw_spin_unlock
|
||||
* Name: spin_unlock_notrace
|
||||
*
|
||||
* Description:
|
||||
* Release one count on a non-reentrant spinlock.
|
||||
@ -359,7 +359,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
|
||||
|
||||
#ifdef CONFIG_SPINLOCK
|
||||
static inline_function void
|
||||
raw_spin_unlock(FAR volatile spinlock_t *lock)
|
||||
spin_unlock_notrace(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
UP_DMB();
|
||||
#ifdef CONFIG_TICKET_SPINLOCK
|
||||
@ -395,7 +395,7 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
/* Unlock without trace note */
|
||||
|
||||
raw_spin_unlock(lock);
|
||||
spin_unlock_notrace(lock);
|
||||
|
||||
/* Notify that we are unlocking the spinlock */
|
||||
|
||||
@ -429,7 +429,7 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: raw_spin_lock_irqsave
|
||||
* Name: spin_lock_irqsave_notrace
|
||||
*
|
||||
* Description:
|
||||
* This function is no trace version of spin_lock_irqsave()
|
||||
@ -438,17 +438,17 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
|
||||
|
||||
#ifdef CONFIG_SPINLOCK
|
||||
static inline_function
|
||||
irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
||||
irqstate_t spin_lock_irqsave_notrace(FAR volatile spinlock_t *lock)
|
||||
{
|
||||
irqstate_t flags;
|
||||
flags = up_irq_save();
|
||||
|
||||
raw_spin_lock(lock);
|
||||
spin_lock_notrace(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
#else
|
||||
# define raw_spin_lock_irqsave(l) ((void)(l), up_irq_save())
|
||||
# define spin_lock_irqsave_notrace(l) ((void)(l), up_irq_save())
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
@ -487,7 +487,7 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
||||
|
||||
/* Lock without trace note */
|
||||
|
||||
flags = raw_spin_lock_irqsave(lock);
|
||||
flags = spin_lock_irqsave_notrace(lock);
|
||||
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
@ -575,7 +575,7 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
||||
#endif /* CONFIG_SPINLOCK */
|
||||
|
||||
/****************************************************************************
|
||||
* Name: raw_spin_unlock_irqrestore
|
||||
* Name: spin_unlock_irqrestore_notrace
|
||||
*
|
||||
* Description:
|
||||
* This function is no trace version of spin_unlock_irqrestore()
|
||||
@ -584,15 +584,15 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
|
||||
|
||||
#ifdef CONFIG_SPINLOCK
|
||||
static inline_function
|
||||
void raw_spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
|
||||
void spin_unlock_irqrestore_notrace(FAR volatile spinlock_t *lock,
|
||||
irqstate_t flags)
|
||||
{
|
||||
raw_spin_unlock(lock);
|
||||
spin_unlock_notrace(lock);
|
||||
|
||||
up_irq_restore(flags);
|
||||
}
|
||||
#else
|
||||
# define raw_spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
|
||||
# define spin_unlock_irqrestore_notrace(l, f) ((void)(l), up_irq_restore(f))
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
@ -623,7 +623,7 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, irqstate_t flags)
|
||||
{
|
||||
/* Unlock without trace note */
|
||||
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
spin_unlock_irqrestore_notrace(lock, flags);
|
||||
|
||||
/* Notify that we are unlocking the spinlock */
|
||||
|
||||
|
@ -46,11 +46,11 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
void weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
\
|
||||
*(FAR type *)ptr = value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
}
|
||||
|
||||
#define LOAD(fn, n, type) \
|
||||
@ -58,11 +58,11 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR const volatile void *ptr, \
|
||||
int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
\
|
||||
type ret = *(FAR type *)ptr; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -71,13 +71,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
type ret = *tmp; \
|
||||
*tmp = value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
int success, int failure) \
|
||||
{ \
|
||||
bool ret = false; \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmpmem = (FAR type *)mem; \
|
||||
FAR type *tmpexp = (FAR type *)expect; \
|
||||
\
|
||||
@ -103,7 +103,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
*tmpexp = *tmpmem; \
|
||||
} \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -112,13 +112,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*(FAR type *)ptr = 1; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -127,13 +127,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*tmp = *tmp + value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -142,13 +142,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*tmp = *tmp - value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -157,13 +157,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*tmp = *tmp & value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -172,13 +172,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*tmp = *tmp | value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -187,13 +187,13 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value, int memorder) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
*tmp = *tmp ^ value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -202,12 +202,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = *tmp + value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -216,12 +216,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = *tmp - value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -230,12 +230,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = *tmp | value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -244,12 +244,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = *tmp & value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -258,12 +258,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = *tmp ^ value; \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -272,12 +272,12 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type weak_function CONCATENATE(fn, n)(FAR volatile void *ptr, \
|
||||
type value) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
*tmp = ~(*tmp & value); \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return *tmp; \
|
||||
}
|
||||
|
||||
@ -288,7 +288,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type newvalue) \
|
||||
{ \
|
||||
bool ret = false; \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
\
|
||||
if (*tmp == oldvalue) \
|
||||
@ -297,7 +297,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
*tmp = newvalue; \
|
||||
} \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
type oldvalue, \
|
||||
type newvalue) \
|
||||
{ \
|
||||
irqstate_t irqstate = raw_spin_lock_irqsave(&g_atomic_lock); \
|
||||
irqstate_t irqstate = spin_lock_irqsave_notrace(&g_atomic_lock); \
|
||||
FAR type *tmp = (FAR type *)ptr; \
|
||||
type ret = *tmp; \
|
||||
\
|
||||
@ -316,7 +316,7 @@ static spinlock_t g_atomic_lock = SP_UNLOCKED;
|
||||
*tmp = newvalue; \
|
||||
} \
|
||||
\
|
||||
raw_spin_unlock_irqrestore(&g_atomic_lock, irqstate); \
|
||||
spin_unlock_irqrestore_notrace(&g_atomic_lock, irqstate); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ irqstate_t enter_critical_section_wo_note(void)
|
||||
* no longer blocked by the critical section).
|
||||
*/
|
||||
|
||||
raw_spin_lock(&g_cpu_irqlock);
|
||||
spin_lock_notrace(&g_cpu_irqlock);
|
||||
cpu_irqlock_set(cpu);
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ irqstate_t enter_critical_section_wo_note(void)
|
||||
|
||||
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
|
||||
|
||||
raw_spin_lock(&g_cpu_irqlock);
|
||||
spin_lock_notrace(&g_cpu_irqlock);
|
||||
|
||||
/* Then set the lock count to 1.
|
||||
*
|
||||
|
@ -73,7 +73,7 @@ void nxsched_process_delivered(int cpu)
|
||||
|
||||
if ((g_cpu_irqset & (1 << cpu)) == 0)
|
||||
{
|
||||
raw_spin_lock(&g_cpu_irqlock);
|
||||
spin_lock_notrace(&g_cpu_irqlock);
|
||||
|
||||
g_cpu_irqset |= (1 << cpu);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user