doxygen: fixed duplicated comments for cpu APIs

Refer to the issue described in PR #10599. This was a
temporary fix at that time.

After further research, we discovered that this issue
can be addressed using the "@cond" command supported
by Doxygen.

Since we currently do not intend to generate two sets
of documentation for UP and MP when generating Doxygen
documentation, the current solution is to generate only
MP documentation by default. For functions defined in
MP but not in UP, we will use the "@note" command in
the function's Doxygen comment to indicate whether the
function supports both UP and MP, or only MP.

Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
This commit is contained in:
Chen Wang
2025-09-08 15:14:14 +08:00
committed by R b b666
parent 7d4efbafaa
commit afb3f22973
3 changed files with 57 additions and 7 deletions

View File

@@ -718,7 +718,7 @@ GENERATE_DEPRECATEDLIST= YES
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
ENABLED_SECTIONS = DOXYGEN_SMP
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the

View File

@@ -29,10 +29,20 @@ void *_cpus_lock_pc = 0;
#endif /* RT_DEBUGING_SPINLOCK */
/**
* @addtogroup group_thread_comm
*
* @cond DOXYGEN_SMP
*
* @{
*/
/**
* @brief Initialize a static spinlock object.
*
* @param lock is a pointer to the spinlock to initialize.
*
* @note This function has UP version and MP version.
*/
void rt_spin_lock_init(struct rt_spinlock *lock)
{
@@ -43,10 +53,12 @@ RTM_EXPORT(rt_spin_lock_init)
/**
* @brief This function will lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
* If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*
* @note This function has UP version and MP version.
*/
void rt_spin_lock(struct rt_spinlock *lock)
{
@@ -59,9 +71,11 @@ RTM_EXPORT(rt_spin_lock)
/**
* @brief This function will unlock the spinlock, will unlock the thread scheduler.
*
* @note If the scheduling function is called before unlocking, it will be scheduled in this function.
* If the scheduling function is called before unlocking, it will be scheduled in this function.
*
* @param lock is a pointer to the spinlock.
*
* @note This function has UP version and MP version.
*/
void rt_spin_unlock(struct rt_spinlock *lock)
{
@@ -75,12 +89,14 @@ RTM_EXPORT(rt_spin_unlock)
/**
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
*
* @note If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
* If the spinlock is locked, the current CPU will keep polling the spinlock state
* until the spinlock is unlocked.
*
* @param lock is a pointer to the spinlock.
*
* @return Return current cpu interrupt status.
*
* @note This function has UP version and MP version.
*/
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
@@ -97,11 +113,13 @@ RTM_EXPORT(rt_spin_lock_irqsave)
/**
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
*
* @note If the scheduling function is called before unlocking, it will be scheduled in this function.
* If the scheduling function is called before unlocking, it will be scheduled in this function.
*
* @param lock is a pointer to the spinlock.
*
* @param level is interrupt status returned by rt_spin_lock_irqsave().
*
* @note This function has UP version and MP version.
*/
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
@@ -118,6 +136,8 @@ RTM_EXPORT(rt_spin_unlock_irqrestore)
* @brief This fucntion will return current cpu object.
*
* @return Return a pointer to the current cpu object.
*
* @note This function has UP version and MP version.
*/
struct rt_cpu *rt_cpu_self(void)
{
@@ -130,6 +150,8 @@ struct rt_cpu *rt_cpu_self(void)
* @param index is the index of target cpu object.
*
* @return Return a pointer to the cpu object corresponding to index.
*
* @note This function has UP version and MP version.
*/
struct rt_cpu *rt_cpu_index(int index)
{
@@ -140,6 +162,8 @@ struct rt_cpu *rt_cpu_index(int index)
* @brief This function will lock all cpus's scheduler and disable local irq.
*
* @return Return current cpu interrupt status.
*
* @note This function only has MP version.
*/
rt_base_t rt_cpus_lock(void)
{
@@ -176,6 +200,8 @@ RTM_EXPORT(rt_cpus_lock);
* @brief This function will restore all cpus's scheduler and restore local irq.
*
* @param level is interrupt status returned by rt_cpus_lock().
*
* @note This function only has MP version.
*/
void rt_cpus_unlock(rt_base_t level)
{
@@ -211,6 +237,8 @@ RTM_EXPORT(rt_cpus_unlock);
* If target thread not locked the cpus then unlock the cpus lock.
*
* @param thread is a pointer to the target thread.
*
* @note This function only has MP version.
*/
void rt_cpus_lock_status_restore(struct rt_thread *thread)
{
@@ -228,6 +256,8 @@ RTM_EXPORT(rt_cpus_lock_status_restore);
* @brief Get logical CPU ID
*
* @return logical CPU ID
*
* @note This function only has MP version.
*/
rt_base_t rt_cpu_get_id(void)
{
@@ -238,3 +268,9 @@ rt_base_t rt_cpu_get_id(void)
return rt_hw_cpu_id();
}
/**
* @}
*
* @endcond
*/

View File

@@ -14,6 +14,14 @@
static struct rt_cpu _cpu;
/**
* @addtogroup group_thread_comm
*
* @cond
*
* @{
*/
/**
* @brief Initialize a static spinlock object.
*
@@ -110,3 +118,9 @@ struct rt_cpu *rt_cpu_index(int index)
{
return index == 0 ? &_cpu : RT_NULL;
}
/**
* @}
*
* @endcond
*/