update by jenkins(20250101001532)

This commit is contained in:
HEYAHONG 2025-01-01 00:15:32 +08:00
parent db1acd2a9a
commit 504d73aa19
125 changed files with 10445 additions and 631 deletions

View File

@ -9,7 +9,7 @@ RUN apt-get update
RUN apt-get upgrade -y
RUN apt-get install -y vim git wget python3 python-is-python3 pip gcc-arm-none-eabi scons libncurses5-dev
RUN apt-get install -y vim git wget python3 python-is-python3 pip gcc-arm-none-eabi libncurses5-dev
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y qemu-system-arm
@ -34,7 +34,7 @@ RUN git clone https://github.com/RT-Thread/packages.git /root/.env/packages/pack
ENV PATH="/root/.env/tools/scripts:$PATH"
RUN pip install requests psutil kconfiglib tqdm -qq
RUN pip install scons requests psutil kconfiglib tqdm -qq
ENV RTT_EXEC_PATH=/usr/bin

View File

@ -188,6 +188,7 @@ jobs:
- "stm32/stm32g474-st-nucleo"
- "stm32/stm32h563-st-nucleo"
- "stm32/stm32h503-st-nucleo"
- "stm32/stm32h723-st-nucleo"
- "stm32/stm32h743-armfly-v7"
- "stm32/stm32h743-atk-apollo"
- "stm32/stm32h743-openmv-h7plus"

View File

@ -1,5 +1,16 @@
name: doc_doxygen
on:
pull_request:
branches:
- master
paths:
- 'documentation/doxygen/**'
- 'src/**'
- 'include/**'
- 'components/drivers/include/drivers/**'
- 'components/dfs/dfs_v2/include/**'
- 'components/dfs/dfs_v2/src/**'
- 'components/finsh/**'
# Runs at 16:00 UTC (BeiJing 00:00) on the 30st of every month
schedule:
- cron: '0 16 30 * *'
@ -7,7 +18,7 @@ on:
jobs:
build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: doxygen_doc generate
if: github.repository_owner == 'RT-Thread'
steps:

View File

@ -20,6 +20,7 @@
#include <dfs.h>
#include <dfs_fs.h>
#include <dfs_file.h>
#include <posix/string.h>
#include <drivers/misc.h>
#include <drivers/byteorder.h>
@ -118,7 +119,7 @@ rt_packed(struct iso9660_common_voldesc
struct iso9660_date created;
struct iso9660_date modified;
rt_uint8_t unused5[0 /* 1201 */];
};
});
struct iso9660
{
@ -250,7 +251,7 @@ static struct iso9660_fd *iso9660_lookup(struct iso9660 *iso, const char *path,
/* Skip the first '/' */
++path;
len = rt_strchrnul(path, '/') - path;
len = strchrnul(path, '/') - path;
}
lba = rt_le32_to_cpu(dirent->first_sector);
@ -359,7 +360,7 @@ static struct iso9660_fd *iso9660_lookup(struct iso9660 *iso, const char *path,
sz = 0;
path += len + 1;
len = rt_strchrnul(path, '/') - path;
len = strchrnul(path, '/') - path;
}
} while (len);
@ -474,7 +475,7 @@ _end:
return rcount;
}
static int dfs_iso9660_lseek(struct dfs_file *fd, off_t offset)
static off_t dfs_iso9660_lseek(struct dfs_file *fd, off_t offset)
{
int ret = -EIO;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -107,7 +107,8 @@ int dfs_init(void)
INIT_PREV_EXPORT(dfs_init);
/**
* this function will lock device file system.
* @brief this function will lock device file system.
* this lock (fslock) is used for protecting filesystem_operation_table and filesystem_table.
*
* @note please don't invoke it on ISR.
*/
@ -126,6 +127,12 @@ void dfs_lock(void)
}
}
/**
* @brief this function will lock file descriptors.
* this lock (fdlock) is used for protecting fd table (_fdtab).
*
* @note please don't invoke it on ISR.
*/
void dfs_file_lock(void)
{
rt_err_t result = -RT_EBUSY;
@ -142,7 +149,7 @@ void dfs_file_lock(void)
}
/**
* this function will lock device file system.
* @brief this function will unlock device file system.
*
* @note please don't invoke it on ISR.
*/
@ -151,33 +158,56 @@ void dfs_unlock(void)
rt_mutex_release(&fslock);
}
#ifdef DFS_USING_POSIX
/**
* @brief this function will unlock fd table.
*/
void dfs_file_unlock(void)
{
rt_mutex_release(&fdlock);
}
#ifdef DFS_USING_POSIX
/**
* @brief Expand the file descriptor table to accommodate a specific file descriptor.
*
* This function ensures that the file descriptor table in the given `dfs_fdtable` structure
* has sufficient capacity to include the specified file descriptor `fd`. If the table
* needs to be expanded, it reallocates memory and initializes new slots to `NULL`.
*
* @param fdt Pointer to the `dfs_fdtable` structure representing the file descriptor table.
* @param fd The file descriptor that the table must accommodate.
* @return int
* - The input file descriptor `fd` if it is within the current or newly expanded table's capacity.
* - `-1` if the requested file descriptor exceeds `DFS_FD_MAX` or memory allocation fails.
*/
static int fd_slot_expand(struct dfs_fdtable *fdt, int fd)
{
int nr;
int index;
struct dfs_file **fds = NULL;
/* If the file descriptor is already within the current capacity, no expansion is needed.*/
if (fd < fdt->maxfd)
{
return fd;
}
/* If the file descriptor exceeds the maximum allowable limit, return an error.*/
if (fd >= DFS_FD_MAX)
{
return -1;
}
/* Calculate the new capacity, rounding up to the nearest multiple of 4.*/
nr = ((fd + 4) & ~3);
/* Ensure the new capacity does not exceed the maximum limit.*/
if (nr > DFS_FD_MAX)
{
nr = DFS_FD_MAX;
}
/* Attempt to reallocate the file descriptor table to the new capacity.*/
fds = (struct dfs_file **)rt_realloc(fdt->fds, nr * sizeof(struct dfs_file *));
if (!fds)
{
@ -189,12 +219,23 @@ static int fd_slot_expand(struct dfs_fdtable *fdt, int fd)
{
fds[index] = NULL;
}
/* Update the file descriptor table and its capacity.*/
fdt->fds = fds;
fdt->maxfd = nr;
return fd;
}
/**
* @brief Allocate a file descriptor slot starting from a specified index.
*
* @param fdt fdt Pointer to the `dfs_fdtable` structure representing the file descriptor table.
* @param startfd The starting index for the search for an empty slot.
* @return int
* - The index of the first available slot if successful.
* - `-1` if no slot is available or if table expansion fails
*/
static int fd_slot_alloc(struct dfs_fdtable *fdt, int startfd)
{
int idx;
@ -219,6 +260,17 @@ static int fd_slot_alloc(struct dfs_fdtable *fdt, int startfd)
}
return idx;
}
/**
* @brief Allocate a new file descriptor and associate it with a newly allocated `struct dfs_file`.
*
* @param fdt Pointer to the `dfs_fdtable` structure representing the file descriptor table.
* @param startfd The starting index for searching an available file descriptor slot.
*
* @return
* - The index of the allocated file descriptor if successful.
* - `-1` if no slot is available or memory allocation fails.
*/
static int fd_alloc(struct dfs_fdtable *fdt, int startfd)
{
int idx;
@ -323,7 +375,11 @@ struct dfs_file *fd_get(int fd)
/**
* @ingroup Fd
*
* This function will put the file descriptor.
* @brief This function will release the file descriptor.
*
* This function releases a file descriptor slot in the file descriptor table, decrements reference
* counts, and cleans up resources associated with the `dfs_file` and `dfs_vnode` structures when applicable.
*
*/
void fdt_fd_release(struct dfs_fdtable* fdt, int fd)
{
@ -378,6 +434,20 @@ void fd_release(int fd)
fdt_fd_release(fdt, fd);
}
/**
* @brief Duplicates a file descriptor.
*
* This function duplicates an existing file descriptor (`oldfd`) and returns
* a new file descriptor that refers to the same underlying file object.
*
* @param oldfd The file descriptor to duplicate. It must be a valid file
* descriptor within the range of allocated descriptors.
*
* @return The new file descriptor if successful, or a negative value
* (e.g., -1) if an error occurs.
*
* @see sys_dup2()
*/
rt_err_t sys_dup(int oldfd)
{
int newfd = -1;
@ -470,6 +540,23 @@ int fd_is_open(const char *pathname)
return -1;
}
/**
* @brief Duplicates a file descriptor to a specified file descriptor.
*
* This function duplicates an existing file descriptor (`oldfd`) and assigns it
* to the specified file descriptor (`newfd`).
*
* @param oldfd The file descriptor to duplicate. It must be a valid and open file
* descriptor within the range of allocated descriptors.
* @param newfd The target file descriptor. If `newfd` is already in use, it will
* be closed before duplication. If `newfd` exceeds the current file
* descriptor table size, the table will be expanded to accommodate it.
*
* @return The value of `newfd` on success, or a negative value (e.g., -1) if an
* error occurs.
*
* @see sys_dup()
*/
rt_err_t sys_dup2(int oldfd, int newfd)
{
struct dfs_fdtable *fdt = NULL;
@ -550,6 +637,10 @@ static int fd_get_fd_index_form_fdt(struct dfs_fdtable *fdt, struct dfs_file *fi
return fd;
}
/**
* @brief get fd (index) by dfs file object.
*
*/
int fd_get_fd_index(struct dfs_file *file)
{
struct dfs_fdtable *fdt;
@ -558,6 +649,21 @@ int fd_get_fd_index(struct dfs_file *file)
return fd_get_fd_index_form_fdt(fdt, file);
}
/**
* @brief Associates a file descriptor with a file object.
*
* This function associates a given file descriptor (`fd`) with a specified
* file object (`file`) in the file descriptor table (`fdt`).
*
* @param fdt The file descriptor table to operate on. It must be a valid
* and initialized `dfs_fdtable` structure.
* @param fd The file descriptor to associate. It must be within the range
* of allocated file descriptors and currently unoccupied.
* @param file The file object to associate with the file descriptor. It must
* be a valid and initialized `dfs_file` structure.
*
* @return The value of `fd` on success, or -1 if an error occurs.
*/
int fd_associate(struct dfs_fdtable *fdt, int fd, struct dfs_file *file)
{
int retfd = -1;
@ -591,6 +697,10 @@ exit:
return retfd;
}
/**
* @brief initialize a dfs file object.
*
*/
void fd_init(struct dfs_file *fd)
{
if (fd)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,10 +18,12 @@
#define DFS_VNODE_HASH_NR 128
/*dfs vnode manager, for saving and searching vnodes.*/
struct dfs_vnode_mgr
{
struct rt_mutex lock;
rt_list_t head[DFS_VNODE_HASH_NR];
struct rt_mutex lock; /* mutex for protecting dfs vnode lists */
rt_list_t head[DFS_VNODE_HASH_NR]; /* a group of dfs vnode lists, the dfs vnode is inserted to one of the lists
according to path string's hash-value mod DFS_VNODE_HASH_NR. */
};
static struct dfs_vnode_mgr dfs_fm;
@ -36,6 +38,10 @@ void dfs_fm_unlock(void)
rt_mutex_release(&dfs_fm.lock);
}
/**
* @brief Initialize dfs vnode manager structure, including a lock and hash tables for vnode.
*
*/
void dfs_vnode_mgr_init(void)
{
int i = 0;
@ -47,6 +53,23 @@ void dfs_vnode_mgr_init(void)
}
}
/**
* @brief Initialize a DFS vnode structure.
*
* @param vnode Pointer to the DFS vnode structure to be initialized.
* The caller must ensure this is a valid, allocated structure.
* @param type The type of the vnode, representing its role or category (e.g., regular file, directory).
* @param fops Pointer to the file operations structure associated with this vnode.
* This structure defines the behavior of the vnode for operations such as open, read, write, etc.
* If `fops` is NULL, the vnode will have no associated file operations.
*
* @return 0 on success, or a negative error code on failure.
*
* @note The caller should ensure that:
* - The `vnode` pointer is valid and properly allocated.
* - The `fops` pointer (if not NULL) points to a valid `struct dfs_file_ops`
* instance, where all necessary function pointers are properly set.
*/
int dfs_vnode_init(struct dfs_vnode *vnode, int type, const struct dfs_file_ops *fops)
{
if (vnode)
@ -64,7 +87,7 @@ int dfs_vnode_init(struct dfs_vnode *vnode, int type, const struct dfs_file_ops
/* BKDR Hash Function */
static unsigned int bkdr_hash(const char *str)
{
unsigned int seed = 131; // 31 131 1313 13131 131313 etc..
unsigned int seed = 131; /* 31 131 1313 13131 131313 etc..*/
unsigned int hash = 0;
while (*str)
@ -75,6 +98,22 @@ static unsigned int bkdr_hash(const char *str)
return (hash % DFS_VNODE_HASH_NR);
}
/**
* @brief Find a DFS vnode by its path.
*
* This function searches for a vnode in the vnode hash table using the specified path.
* If found, it returns a pointer to the vnode and updates the hash head if required.
*
* @param path The file path to search for. This should be a valid null-terminated string.
* @param hash_head Pointer to a location where the hash table head associated with the vnode
* can be stored. This can be NULL if the hash head is not needed.
*
* @return Pointer to the DFS vnode if found, or NULL if no vnode matches the specified path.
*
* @note The caller must ensure that:
* - The `path` pointer is valid and points to a properly null-terminated string.
* - If `hash_head` is not NULL, it points to a valid location to store the hash head.
*/
static struct dfs_vnode *dfs_vnode_find(const char *path, rt_list_t **hash_head)
{
struct dfs_vnode *vnode = NULL;
@ -329,11 +368,12 @@ int dfs_file_close(struct dfs_file *fd)
}
/**
* this function will perform a io control on a file descriptor.
* this function will perform an io control on a file descriptor.
*
* @param fd the file descriptor.
* @param cmd the command to send to file descriptor.
* @param args the argument to send to file descriptor.
* - When `cmd` is `F_SETFL`, an additional integer argument specifies the new status flags.
*
* @return 0 on successful, -1 on failed.
*/
@ -1026,14 +1066,14 @@ void copy(const char *src, const char *dst)
flag |= FLAG_DST_IS_FILE;
}
//2. check status
/*2. check status*/
if ((flag & FLAG_SRC_IS_DIR) && (flag & FLAG_DST_IS_FILE))
{
rt_kprintf("cp faild, cp dir to file is not permitted!\n");
return ;
}
//3. do copy
/*3. do copy*/
if (flag & FLAG_SRC_IS_FILE)
{
if (flag & FLAG_DST_IS_DIR)
@ -1053,7 +1093,7 @@ void copy(const char *src, const char *dst)
copyfile(src, dst);
}
}
else //flag & FLAG_SRC_IS_DIR
else /*flag & FLAG_SRC_IS_DIR*/
{
if (flag & FLAG_DST_IS_DIR)
{

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -529,7 +529,8 @@ int dfs_mount_device(rt_device_t dev)
{
int index = 0;
if(dev == RT_NULL) {
if(dev == RT_NULL)
{
rt_kprintf("the device is NULL to be mounted.\n");
return -RT_ERROR;
}
@ -538,7 +539,8 @@ int dfs_mount_device(rt_device_t dev)
{
if (mount_table[index].path == NULL) break;
if(strcmp(mount_table[index].device_name, dev->parent.name) == 0) {
if(strcmp(mount_table[index].device_name, dev->parent.name) == 0)
{
if (dfs_mount(mount_table[index].device_name,
mount_table[index].path,
mount_table[index].filesystemtype,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -28,7 +28,17 @@
* return a file descriptor according specified flags.
*
* @param file the path name of file.
* @param flags the file open flags.
* @param flags the file open flags. Common values include:
* - Access modes (mutually exclusive):
* - `O_RDONLY`: Open for read-only access.
* - `O_WRONLY`: Open for write-only access.
* - `O_RDWR`: Open for both reading and writing.
* - File status flags (can be combined with bitwise OR `|`):
* - `O_CREAT`: Create the file if it does not exist. Requires a `mode` argument.
* - `O_TRUNC`: Truncate the file to zero length if it already exists.
* - `O_APPEND`: Append writes to the end of the file.
* - `O_EXCL`: Ensure that `O_CREAT` creates the file exclusively.
* - Other platform-specific flags
*
* @return the non-negative integer on successful open, others for failed.
*/
@ -65,6 +75,22 @@ RTM_EXPORT(open);
#ifndef AT_FDCWD
#define AT_FDCWD (-100)
#endif
/**
* @brief Opens a file relative to a directory file descriptor.
*
* @param dirfd The file descriptor of the directory to base the relative path on.
* @param pathname The path to the file to be opened, relative to the directory specified by `dirfd`.
* Can be an absolute path (in which case `dirfd` is ignored).
* @param flags File access and status flags (e.g., `O_RDONLY`, `O_WRONLY`, `O_CREAT`).
*
* @return On success, returns a new file descriptor for the opened file.
* On failure, returns `-1` and sets `errno` to indicate the error.
*
* @note When using relative paths, ensure `dirfd` is a valid directory descriptor.
* When `pathname` is absolute, the `dirfd` argument is ignored.
*
*/
int openat(int dirfd, const char *path, int flag, ...)
{
struct dfs_file *d;
@ -241,14 +267,22 @@ ssize_t write(int fd, const void *buf, size_t len)
RTM_EXPORT(write);
/**
* this function is a POSIX compliant version, which will seek the offset for
* this function is a POSIX compliant version, which will Reposition the file offset for
* an open file descriptor.
*
* @param fd the file descriptor.
* @param offset the offset to be seeked.
* @param whence the directory of seek.
* The `lseek` function sets the file offset for the file descriptor `fd`
* to a new value, determined by the `offset` and `whence` parameters.
* It can be used to seek to specific positions in a file for reading or writing.
*
* @return the current read/write position in the file, or -1 on failed.
* @param fd the file descriptor.
* @param offset The offset, in bytes, to set the file position.
* The meaning of `offset` depends on the value of `whence`.
* @param whence the directive of seek. It can be one of:
* - `SEEK_SET`: Set the offset to `offset` bytes from the beginning of the file.
* - `SEEK_CUR`: Set the offset to its current location plus `offset` bytes.
* - `SEEK_END`: Set the offset to the size of the file plus `offset` bytes.
*
* @return the resulting read/write position in the file, or -1 on failed.
*/
off_t lseek(int fd, off_t offset, int whence)
{
@ -436,9 +470,15 @@ RTM_EXPORT(fsync);
* control functions on devices.
*
* @param fildes the file description
* @param cmd the specified command
* @param cmd the specified command, Common values include:
* - `F_DUPFD`: Duplicate a file descriptor.
* - `F_GETFD`: Get the file descriptor flags.
* - `F_SETFD`: Set the file descriptor flags.
* - `F_GETFL`: Get the file status flags.
* - `F_SETFL`: Set the file status flags.
* @param ... represents the additional information that is needed by this
* specific device to perform the requested function.
* specific device to perform the requested function. For example:
* - When `cmd` is `F_SETFL`, an additional integer argument specifies the new status flags.
*
* @return 0 on successful completion. Otherwise, -1 shall be returned and errno
* set to indicate the error.
@ -595,7 +635,7 @@ RTM_EXPORT(fstatfs);
* this function is a POSIX compliant version, which will make a directory
*
* @param path the directory path to be made.
* @param mode
* @param mode The permission mode for the new directory (unused here, can be set to 0).
*
* @return 0 on successful, others on failed.
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -31,7 +31,17 @@
* return a file descriptor according specified flags.
*
* @param file the path name of file.
* @param flags the file open flags.
* @param flags the file open flags. Common values include:
* - Access modes (mutually exclusive):
* - `O_RDONLY`: Open for read-only access.
* - `O_WRONLY`: Open for write-only access.
* - `O_RDWR`: Open for both reading and writing.
* - File status flags (can be combined with bitwise OR `|`):
* - `O_CREAT`: Create the file if it does not exist. Requires a `mode` argument.
* - `O_TRUNC`: Truncate the file to zero length if it already exists.
* - `O_APPEND`: Append writes to the end of the file.
* - `O_EXCL`: Ensure that `O_CREAT` creates the file exclusively.
* - Other platform-specific flags
*
* @return the non-negative integer on successful open, others for failed.
*/
@ -81,6 +91,22 @@ RTM_EXPORT(open);
#ifndef AT_FDCWD
#define AT_FDCWD (-100)
#endif
/**
* @brief Opens a file relative to a directory file descriptor.
*
* @param dirfd The file descriptor of the directory to base the relative path on.
* @param path The path to the file to be opened, relative to the directory specified by `dirfd`.
* Can be an absolute path (in which case `dirfd` is ignored).
* @param flag File access and status flags (e.g., `O_RDONLY`, `O_WRONLY`, `O_CREAT`).
*
* @return On success, returns a new file descriptor for the opened file.
* On failure, returns `-1` and sets `errno` to indicate the error.
*
* @note When using relative paths, ensure `dirfd` is a valid directory descriptor.
* When `pathname` is absolute, the `dirfd` argument is ignored.
*
*/
int openat(int dirfd, const char *path, int flag, ...)
{
struct dfs_file *d;
@ -171,7 +197,7 @@ int utimensat(int __fd, const char *__path, const struct timespec __times[2], in
}
}
//update time
/*update time*/
attr.ia_valid = ATTR_ATIME_SET | ATTR_MTIME_SET;
time(&current_time);
if (UTIME_NOW == __times[0].tv_nsec)
@ -374,14 +400,22 @@ ssize_t write(int fd, const void *buf, size_t len)
RTM_EXPORT(write);
/**
* this function is a POSIX compliant version, which will seek the offset for
* this function is a POSIX compliant version, which will Reposition the file offset for
* an open file descriptor.
*
* @param fd the file descriptor.
* @param offset the offset to be seeked.
* @param whence the directory of seek.
* The `lseek` function sets the file offset for the file descriptor `fd`
* to a new value, determined by the `offset` and `whence` parameters.
* It can be used to seek to specific positions in a file for reading or writing.
*
* @return the current read/write position in the file, or -1 on failed.
* @param fd the file descriptor.
* @param offset The offset, in bytes, to set the file position.
* The meaning of `offset` depends on the value of `whence`.
* @param whence the directive of seek. It can be one of:
* - `SEEK_SET`: Set the offset to `offset` bytes from the beginning of the file.
* - `SEEK_CUR`: Set the offset to its current location plus `offset` bytes.
* - `SEEK_END`: Set the offset to the size of the file plus `offset` bytes.
*
* @return the resulting read/write position in the file, or -1 on failed.
*/
off_t lseek(int fd, off_t offset, int whence)
{
@ -581,9 +615,15 @@ RTM_EXPORT(fsync);
* control functions on devices.
*
* @param fildes the file description
* @param cmd the specified command
* @param cmd the specified command, Common values include:
* - `F_DUPFD`: Duplicate a file descriptor.
* - `F_GETFD`: Get the file descriptor flags.
* - `F_SETFD`: Set the file descriptor flags.
* - `F_GETFL`: Get the file status flags.
* - `F_SETFL`: Set the file status flags.
* @param ... represents the additional information that is needed by this
* specific device to perform the requested function.
* specific device to perform the requested function. For example:
* - When `cmd` is `F_SETFL`, an additional integer argument specifies the new status flags.
*
* @return 0 on successful completion. Otherwise, -1 shall be returned and errno
* set to indicate the error.
@ -765,7 +805,7 @@ RTM_EXPORT(fstatfs);
* this function is a POSIX compliant version, which will make a directory
*
* @param path the directory path to be made.
* @param mode
* @param mode The permission mode for the new directory (unused here, can be set to 0).
*
* @return 0 on successful, others on failed.
*/

View File

@ -25,11 +25,12 @@ rsource "led/Kconfig"
rsource "mailbox/Kconfig"
rsource "phye/Kconfig"
rsource "ata/Kconfig"
rsource "block/Kconfig"
rsource "nvme/Kconfig"
rsource "block/Kconfig"
rsource "scsi/Kconfig"
rsource "regulator/Kconfig"
rsource "reset/Kconfig"
rsource "thermal/Kconfig"
rsource "virtio/Kconfig"
rsource "dma/Kconfig"
rsource "mfd/Kconfig"

View File

@ -510,13 +510,13 @@ static void ahci_isr(int irqno, void *param)
{
int id;
rt_uint32_t isr;
bitmap_t int_map;
rt_bitmap_t int_map;
struct rt_ahci_port *port;
struct rt_ahci_host *host = param;
int_map = HWREG32(host->regs + RT_AHCI_HBA_INTS);
bitmap_for_each_set_bit(&int_map, id, host->ports_nr)
rt_bitmap_for_each_set_bit(&int_map, id, host->ports_nr)
{
port = &host->ports[id];
@ -535,7 +535,7 @@ static void ahci_isr(int irqno, void *param)
HWREG32(port->regs + RT_AHCI_PORT_INTS) = isr;
}
HWREG32(host->regs + RT_AHCI_HBA_INTS) = isr;
HWREG32(host->regs + RT_AHCI_HBA_INTS) = int_map;
}
rt_err_t rt_ahci_host_register(struct rt_ahci_host *host)

View File

@ -195,6 +195,10 @@ static rt_err_t blk_control(rt_device_t dev, int cmd, void *args)
{
err = disk->ops->control(disk, RT_NULL, cmd, args);
}
else
{
err = -RT_ENOSYS;
}
break;
}
@ -344,7 +348,7 @@ rt_err_t rt_hw_blk_disk_unregister(struct rt_blk_disk *disk)
spin_lock(&disk->lock);
if (disk->parent.ref_count != 1)
if (disk->parent.ref_count > 0)
{
err = -RT_EBUSY;
goto _unlock;
@ -470,7 +474,7 @@ INIT_ENV_EXPORT(blk_dfs_mnt_table);
const char *convert_size(struct rt_device_blk_geometry *geome,
rt_size_t sector_count, rt_size_t *out_cap, rt_size_t *out_minor)
{
rt_size_t cap, minor;
rt_size_t cap, minor = 0;
int size_index = 0;
const char *size_name[] = { "B", "K", "M", "G", "T", "P", "E" };

View File

@ -49,5 +49,7 @@ rt_err_t dfs_partition(struct rt_blk_disk *disk)
}
}
rt_free(sector);
return RT_EOK;
}

View File

@ -448,13 +448,11 @@ rt_err_t rt_dma_prep_single(struct rt_dma_chan *chan,
}
static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
const char *name)
const char *name, struct rt_ofw_cell_args *args)
{
struct rt_dma_controller *ctrl = RT_NULL;
#ifdef RT_USING_OFW
int index;
rt_err_t err;
struct rt_ofw_cell_args dma_args = {};
struct rt_ofw_node *np = dev->ofw_node, *ctrl_np;
if (!np)
@ -469,9 +467,9 @@ static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
return RT_NULL;
}
if (!rt_ofw_parse_phandle_cells(np, "dmas", "#dma-cells", index, &dma_args))
if (!rt_ofw_parse_phandle_cells(np, "dmas", "#dma-cells", index, args))
{
ctrl_np = dma_args.data;
ctrl_np = args->data;
if (!rt_ofw_data(ctrl_np))
{
@ -480,14 +478,6 @@ static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
ctrl = rt_ofw_data(ctrl_np);
rt_ofw_node_put(ctrl_np);
if (ctrl && ctrl->ops->ofw_parse)
{
if ((err = ctrl->ops->ofw_parse(ctrl, &dma_args)))
{
ctrl = rt_err_ptr(err);
}
}
}
#endif /* RT_USING_OFW */
return ctrl;
@ -495,7 +485,9 @@ static struct rt_dma_controller *ofw_find_dma_controller(struct rt_device *dev,
struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
{
void *fw_data = RT_NULL;
struct rt_dma_chan *chan;
struct rt_ofw_cell_args dma_args;
struct rt_dma_controller *ctrl = RT_NULL;
if (!dev)
@ -505,7 +497,8 @@ struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
if (name)
{
ctrl = ofw_find_dma_controller(dev, name);
fw_data = &dma_args;
ctrl = ofw_find_dma_controller(dev, name, &dma_args);
}
else
{
@ -531,7 +524,7 @@ struct rt_dma_chan *rt_dma_chan_request(struct rt_device *dev, const char *name)
if (ctrl->ops->request_chan)
{
chan = ctrl->ops->request_chan(ctrl, dev);
chan = ctrl->ops->request_chan(ctrl, dev, fw_data);
}
else
{

View File

@ -322,8 +322,14 @@ static void *dma_alloc(struct rt_device *dev, rt_size_t size,
rt_list_for_each_entry(pool, &dma_pool_nodes, list)
{
if ((flags & RT_DMA_F_DEVICE) &&
(!(pool->flags & RT_DMA_F_DEVICE) || pool->dev != dev))
if (pool->flags & RT_DMA_F_DEVICE)
{
if (!(flags & RT_DMA_F_DEVICE) || pool->dev != dev)
{
continue;
}
}
else if ((flags & RT_DMA_F_DEVICE))
{
continue;
}

View File

@ -17,6 +17,7 @@
#include <ioremap.h>
#include <drivers/misc.h>
#include <drivers/byteorder.h>
#include <drivers/core/master_id.h>
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1

View File

@ -89,6 +89,8 @@ struct rt_pin_irqchip
int irq;
rt_base_t pin_range[2];
};
struct rt_pin_irq_hdr;
#endif /* RT_USING_DM */
/**
@ -98,7 +100,13 @@ struct rt_device_pin
{
struct rt_device parent;
#ifdef RT_USING_DM
/* MUST keep the order member after parent */
struct rt_pin_irqchip irqchip;
/* Fill by DM */
rt_base_t pin_start;
rt_size_t pin_nr;
rt_list_t list;
struct rt_pin_irq_hdr *legacy_isr;
#endif /* RT_USING_DM */
const struct rt_pin_ops *ops;
};
@ -212,6 +220,7 @@ struct rt_pin_ops
rt_err_t (*pin_detach_irq)(struct rt_device *device, rt_base_t pin);
rt_err_t (*pin_irq_enable)(struct rt_device *device, rt_base_t pin, rt_uint8_t enabled);
rt_base_t (*pin_get)(const char *name);
rt_err_t (*pin_debounce)(struct rt_device *device, rt_base_t pin, rt_uint32_t debounce);
#ifdef RT_USING_DM
rt_err_t (*pin_irq_mode)(struct rt_device *device, rt_base_t pin, rt_uint8_t mode);
rt_ssize_t (*pin_parse)(struct rt_device *device, struct rt_ofw_cell_args *args, rt_uint32_t *flags);
@ -284,6 +293,14 @@ rt_err_t rt_pin_detach_irq(rt_base_t pin);
*/
rt_err_t rt_pin_irq_enable(rt_base_t pin, rt_uint8_t enabled);
/**
* @brief set the pin's debounce time
* @param pin the pin number
* @param debounce time
* @return rt_err_t error code
*/
rt_err_t rt_pin_debounce(rt_base_t pin, rt_uint32_t debounce);
#ifdef RT_USING_DM
rt_ssize_t rt_pin_get_named_pin(struct rt_device *dev, const char *propname, int index,
rt_uint8_t *out_mode, rt_uint8_t *out_value);

View File

@ -76,7 +76,7 @@
* if (!serial)
* {
* rt_kprintf("find %s failed!\n", uart_name);
* return RT_ERROR;
* return -RT_ERROR;
* }
*
*
@ -97,7 +97,7 @@
* }
* else
* {
* ret = RT_ERROR;
* ret = -RT_ERROR;
* }
*
* return ret;
@ -264,7 +264,9 @@ struct rt_serial_device
void *serial_tx;
struct rt_spinlock spinlock;
#ifdef RT_USING_SERIAL_BYPASS
struct rt_serial_bypass* bypass;
#endif
struct rt_device_notify rx_notify;
};
typedef struct rt_serial_device rt_serial_t;

View File

@ -618,7 +618,7 @@ rt_err_t rt_qspi_send_then_recv(struct rt_qspi_device *device, const void *send_
*
* @param device the QSPI device attached to QSPI bus.
* @param send_buf the buffer to be transmitted to QSPI device.
* @param send_length the number of data to be transmitted.
* @param length the number of data to be transmitted.
*
* @return the status of transmit.
*/

View File

@ -90,7 +90,8 @@ struct rt_dma_controller
struct rt_dma_controller_ops
{
struct rt_dma_chan *(*request_chan)(struct rt_dma_controller *ctrl, struct rt_device *slave);
struct rt_dma_chan *(*request_chan)(struct rt_dma_controller *ctrl,
struct rt_device *slave, void *fw_data);
rt_err_t (*release_chan)(struct rt_dma_chan *chan);
rt_err_t (*start)(struct rt_dma_chan *chan);
@ -107,8 +108,6 @@ struct rt_dma_controller_ops
rt_err_t (*prep_single)(struct rt_dma_chan *chan,
rt_ubase_t dma_buf_addr, rt_size_t buf_len,
enum rt_dma_transfer_direction dir);
rt_err_t (*ofw_parse)(struct rt_dma_controller *ctrl, struct rt_ofw_cell_args *dma_args);
};
struct rt_dma_chan

View File

@ -34,6 +34,12 @@
(((__x) - ((__d) / 2)) / (__d)); \
})
#define __KEY_PLACEHOLDER_1 0,
#define ____KEY_ENABLED(__ignored, val, ...) val
#define ___KEY_ENABLED(arg1_or_junk) ____KEY_ENABLED(arg1_or_junk 1, 0)
#define __KEY_ENABLED(value) ___KEY_ENABLED(__KEY_PLACEHOLDER_##value)
#define RT_KEY_ENABLED(key) __KEY_ENABLED(key)
#define RT_FIELD_PREP(mask, val) (((rt_uint64_t)(val) << (__rt_ffsl((mask)) - 1)) & (mask))
#define RT_FIELD_GET(mask, val) (((val) & (mask)) >> (__rt_ffsl((mask)) - 1))
@ -107,6 +113,22 @@
_rem; \
})
#define rt_abs(x) \
({ \
long ret; \
if (sizeof(x) == sizeof(long)) \
{ \
long __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
else \
{ \
int __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
})
#ifndef rt_ilog2
rt_inline int rt_ilog2(rt_ubase_t v)
{

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#ifndef __RTT_BYPASS_H__
#define __RTT_BYPASS_H__
#include <rtthread.h>
#include <rttypes.h>
#include <rtdevice.h>
typedef rt_err_t(*bypass_function_t)(struct rt_serial_device* serial, char buf, void* data);
#define RT_BYPASS_LEVEL_MAX 4
#define RT_BYPASS_LEVEL_1 0
#define RT_BYPASS_LEVEL_2 1
#define RT_BYPASS_LEVEL_3 2
#define RT_BYPASS_LEVEL_4 3
#define RT_BYPASS_MAX_LEVEL 4
/*The protect level can be register but can not be unregister we should use it carefully*/
#define RT_BYPASS_PROTECT_LEVEL_1 10
#define RT_BYPASS_PROTECT_LEVEL_2 11
#define RT_BYPASS_PROTECT_LEVEL_3 12
#define RT_BYPASS_PROTECT_LEVEL_4 13
struct rt_serial_bypass_func {
/*The function pointer of the bypassed data processing*/
bypass_function_t bypass;
/*The smaller the array of levels, the higher the priority of execution*/
rt_uint8_t level;
rt_list_t node;
char name[RT_NAME_MAX];
void* data;
};
struct rt_serial_bypass_head
{
rt_list_t head;
struct rt_spinlock spinlock;
};
struct rt_serial_bypass {
struct rt_work work;
struct rt_spinlock spinlock;
struct rt_workqueue* lower_workq;
struct rt_serial_bypass_head* upper_h;
struct rt_serial_bypass_head* lower_h;
rt_mutex_t mutex;
struct rt_ringbuffer* pipe;
};
int serial_bypass_list(int argc, char** argv);
void rt_bypass_work_straight(struct rt_serial_device* serial);
void rt_bypass_putchar(struct rt_serial_device* serial, rt_uint8_t ch);
rt_size_t rt_bypass_getchar(struct rt_serial_device* serial, rt_uint8_t* ch);
rt_err_t rt_bypass_upper_unregister(struct rt_serial_device* serial, rt_uint8_t level);
rt_err_t rt_bypass_lower_unregister(struct rt_serial_device* serial, rt_uint8_t level);
rt_err_t rt_bypass_upper_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data);
rt_err_t rt_bypass_lower_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data);
#endif

View File

@ -0,0 +1,205 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#ifndef __THERMAL_H__
#define __THERMAL_H__
#include <rtdef.h>
#include <dt-bindings/thermal/thermal.h>
/* No upper/lower limit requirement */
#define RT_THERMAL_NO_LIMIT ((rt_uint32_t)THERMAL_NO_LIMIT)
#define RT_THERMAL_TEMP_INVALID (-274000)
struct rt_thermal_zone_ops;
struct rt_thermal_cooling_device;
struct rt_thermal_cooling_device_ops;
struct rt_thermal_cooling_governor;
enum rt_thermal_trip_type
{
RT_THERMAL_TRIP_ACTIVE = 0,
RT_THERMAL_TRIP_PASSIVE,
RT_THERMAL_TRIP_HOT,
RT_THERMAL_TRIP_CRITICAL,
RT_THERMAL_TRIP_TYPE_MAX,
};
struct rt_thermal_trip
{
/* Temperature value in millidegree celsius */
int temperature;
/* Relative hysteresis in millidegree celsius */
int hysteresis;
enum rt_thermal_trip_type type;
void *priv;
};
struct rt_thermal_zone_params
{
/* Sustainable power (heat) that this thermal zone can dissipate in mW */
int sustainable_power;
/* Slope of a linear temperature adjustment curve */
int slope;
/* Offset of a linear temperature adjustment curve */
int offset;
};
struct rt_thermal_cooling_cell
{
struct rt_thermal_cooling_device *cooling_devices;
rt_uint32_t level_range[2];
};
struct rt_thermal_cooling_map
{
rt_uint32_t contribution;
rt_size_t cells_nr;
struct rt_thermal_cooling_cell *cells;
struct rt_thermal_trip *trips;
};
struct rt_thermal_zone_device
{
struct rt_device parent;
int zone_id;
const struct rt_thermal_zone_ops *ops;
rt_bool_t trips_free;
rt_size_t trips_nr;
struct rt_thermal_trip *trips;
struct rt_thermal_zone_params params;
rt_bool_t enabled;
rt_bool_t cooling;
int temperature;
int last_temperature;
int prev_low_trip;
int prev_high_trip;
rt_list_t notifier_nodes;
struct rt_spinlock nodes_lock;
rt_size_t cooling_maps_nr;
struct rt_thermal_cooling_map *cooling_maps;
rt_tick_t passive_delay, polling_delay;
struct rt_work poller;
struct rt_mutex mutex;
void *priv;
};
struct rt_thermal_zone_ops
{
rt_err_t (*get_temp)(struct rt_thermal_zone_device *zdev, int *out_temp);
rt_err_t (*set_trips)(struct rt_thermal_zone_device *zdev, int low_temp, int high_temp);
rt_err_t (*set_trip_temp)(struct rt_thermal_zone_device *zdev, int trip_id, int temp);
rt_err_t (*set_trip_hyst)(struct rt_thermal_zone_device *zdev, int trip_id, int hyst);
void (*hot)(struct rt_thermal_zone_device *zdev);
void (*critical)(struct rt_thermal_zone_device *zdev);
};
/*
* We don't want to make a temperature control system
* that is finer than an air conditioner's temperature control,
* just ensure get a reliable heat dissipation under high-load task
* or when the SoC temperature is too high.
*/
struct rt_thermal_cooling_device
{
struct rt_device parent;
const struct rt_thermal_cooling_device_ops *ops;
/* The cooling capacity indicator */
rt_ubase_t max_level;
rt_list_t governor_node;
struct rt_thermal_cooling_governor *gov;
void *priv;
};
struct rt_thermal_cooling_device_ops
{
rt_err_t (*bind)(struct rt_thermal_cooling_device *cdev, struct rt_thermal_zone_device *zdev);
rt_err_t (*unbind)(struct rt_thermal_cooling_device *cdev, struct rt_thermal_zone_device *zdev);
rt_err_t (*get_max_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t *out_level);
rt_err_t (*get_cur_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t *out_level);
rt_err_t (*set_cur_level)(struct rt_thermal_cooling_device *cdev, rt_ubase_t level);
};
struct rt_thermal_cooling_governor
{
rt_list_t list;
const char *name;
rt_list_t cdev_nodes;
void (*tuning)(struct rt_thermal_zone_device *zdev,
int map_idx, int cell_idx, rt_ubase_t *level);
};
struct rt_thermal_notifier;
#define RT_THERMAL_MSG_EVENT_UNSPECIFIED RT_BIT(0) /* Unspecified event */
#define RT_THERMAL_MSG_EVENT_TEMP_SAMPLE RT_BIT(1) /* New Temperature sample */
#define RT_THERMAL_MSG_TRIP_VIOLATED RT_BIT(2) /* TRIP Point violation */
#define RT_THERMAL_MSG_TRIP_CHANGED RT_BIT(3) /* TRIP Point temperature changed */
#define RT_THERMAL_MSG_DEVICE_DOWN RT_BIT(4) /* Thermal device is down */
#define RT_THERMAL_MSG_DEVICE_UP RT_BIT(5) /* Thermal device is up after a down event */
#define RT_THERMAL_MSG_DEVICE_POWER_CAPABILITY_CHANGED RT_BIT(6) /* Power capability changed */
#define RT_THERMAL_MSG_TABLE_CHANGED RT_BIT(7) /* Thermal table(s) changed */
#define RT_THERMAL_MSG_EVENT_KEEP_ALIVE RT_BIT(8) /* Request for user space handler to respond */
typedef rt_err_t (*rt_thermal_notifier_callback)(struct rt_thermal_notifier *notifier,
rt_ubase_t msg);
struct rt_thermal_notifier
{
rt_list_t list;
struct rt_thermal_zone_device *zdev;
rt_thermal_notifier_callback callback;
void *priv;
};
rt_err_t rt_thermal_zone_device_register(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_zone_device_unregister(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_cooling_device_register(struct rt_thermal_cooling_device *cdev);
rt_err_t rt_thermal_cooling_device_unregister(struct rt_thermal_cooling_device *cdev);
rt_err_t rt_thermal_cooling_governor_register(struct rt_thermal_cooling_governor *gov);
rt_err_t rt_thermal_cooling_governor_unregister(struct rt_thermal_cooling_governor *gov);
rt_err_t rt_thermal_cooling_device_change_governor(struct rt_thermal_cooling_device *cdev,
const char *name);
rt_err_t rt_thermal_zone_notifier_register(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier);
rt_err_t rt_thermal_zone_notifier_unregister(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier);
void rt_thermal_zone_device_update(struct rt_thermal_zone_device *zdev, rt_ubase_t msg);
void rt_thermal_cooling_device_kick(struct rt_thermal_zone_device *zdev);
rt_err_t rt_thermal_zone_set_trip(struct rt_thermal_zone_device *zdev, int trip_id,
const struct rt_thermal_trip *trip);
rt_err_t rt_thermal_zone_get_trip(struct rt_thermal_zone_device *zdev, int trip_id,
struct rt_thermal_trip *out_trip);
#endif /* __THERMAL_H__ */

View File

@ -0,0 +1,13 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __DT_BINDINGS_THERMAL_THERMAL_H__
#define __DT_BINDINGS_THERMAL_THERMAL_H__
/* On cooling devices upper and lower limits */
#define THERMAL_NO_LIMIT (~0)
#endif /* __DT_BINDINGS_THERMAL_THERMAL_H__ */

View File

@ -89,6 +89,16 @@ extern "C" {
#include "drivers/pic.h"
#endif /* RT_USING_PIC */
#ifdef RT_USING_PCI
#include "drivers/pci.h"
#ifdef RT_PCI_MSI
#include "drivers/pci_msi.h"
#endif /* RT_PCI_MSI */
#ifdef RT_PCI_ENDPOINT
#include "drivers/pci_endpoint.h"
#endif /* RT_PCI_ENDPOINT */
#endif /* RT_USING_PCI */
#ifdef RT_USING_REGULATOR
#include "drivers/regulator.h"
#endif /* RT_USING_REGULATOR */
@ -104,6 +114,10 @@ extern "C" {
#ifdef RT_MFD_SYSCON
#include "drivers/syscon.h"
#endif /* RT_MFD_SYSCON */
#ifdef RT_USING_THERMAL
#include "drivers/thermal.h"
#endif /* RT_USING_THERMAL */
#endif /* RT_USING_DM */
#ifdef RT_USING_RTC
@ -138,6 +152,9 @@ extern "C" {
#include "drivers/dev_serial_v2.h"
#else
#include "drivers/dev_serial.h"
#ifdef RT_USING_SERIAL_BYPASS
#include "drivers/serial_bypass.h"
#endif /* RT_USING_SERIAL_BYPASS */
#endif
#endif /* RT_USING_SERIAL */

View File

@ -15,7 +15,7 @@
#include <rtdbg.h>
#include <drivers/led.h>
#include <drivers/core/rtdm.h>
#include <drivers/core/dm.h>
struct blink_timer
{

View File

@ -66,6 +66,7 @@ static rt_err_t pic_mbox_request(struct rt_mbox_chan *chan)
struct pic_mbox *pic_mbox = raw_to_pic_mbox(chan->ctrl);
HWREG32(pic_mbox->regs + MAILBOX_IMASK) &= ~RT_BIT(index);
HWREG32(pic_mbox->regs + MAILBOX_ISTATE) = 0;
return RT_EOK;
}
@ -89,6 +90,11 @@ static rt_err_t pic_mbox_send(struct rt_mbox_chan *chan, const void *data)
rt_thread_yield();
}
if (HWREG32(pic_mbox->peer_regs + MAILBOX_IMASK) & RT_BIT(index))
{
return -RT_ERROR;
}
level = rt_spin_lock_irqsave(&pic_mbox->lock);
HWREG32(pic_mbox->regs + MAILBOX_MSG(index)) = *(rt_uint32_t *)data;
@ -187,6 +193,12 @@ static rt_err_t pic_mbox_probe(struct rt_platform_device *pdev)
}
pic_mbox->peer_regs = pic_mbox->regs + size / 2;
/* Init by the captain */
HWREG32(pic_mbox->regs + MAILBOX_IMASK) = 0xffffffff;
HWREG32(pic_mbox->regs + MAILBOX_ISTATE) = 0;
HWREG32(pic_mbox->peer_regs + MAILBOX_IMASK) = 0xffffffff;
HWREG32(pic_mbox->peer_regs + MAILBOX_ISTATE) = 0;
}
else
{

View File

@ -18,7 +18,7 @@
#include <drivers/ofw.h>
#include <drivers/mailbox.h>
#include <drivers/platform.h>
#include <drivers/core/rtdm.h>
#include <drivers/core/dm.h>
static struct rt_spinlock mbox_ops_lock = {};
static rt_list_t mbox_nodes = RT_LIST_OBJECT_INIT(mbox_nodes);
@ -253,7 +253,11 @@ struct rt_mbox_chan *rt_mbox_request_by_index(struct rt_mbox_client *client, int
if (!rt_ofw_data(ctrl_np))
{
rt_spin_unlock(&mbox_ops_lock);
rt_platform_ofw_request(ctrl_np);
rt_spin_lock(&mbox_ops_lock);
}
ctrl = rt_ofw_data(ctrl_np);

View File

@ -578,7 +578,7 @@ static rt_err_t nvme_blk_sync(struct rt_blk_disk *disk)
static rt_err_t nvme_blk_erase(struct rt_blk_disk *disk)
{
rt_err_t err;
rt_err_t err = RT_EOK;
rt_ssize_t slba, lbas, max_lbas;
struct rt_nvme_command cmd;
struct rt_nvme_device *ndev = rt_disk_to_nvme_device(disk);
@ -955,7 +955,7 @@ static rt_err_t nvme_setup_io_queues(struct rt_nvme_controller *nvme)
int irq, cpuid = 0;
char name[RT_NAME_MAX];
rt_bool_t affinity_fixup = RT_FALSE;
RT_DECLARE_IRQ_AFFINITY(affinity) = { 0 };
RT_IRQ_AFFINITY_DECLARE(affinity) = { 0 };
struct rt_nvme_queue *queue;
nvme->io_queue_max = nvme->irqs_nr > 1 ? nvme->irqs_nr - 1 : 1;
@ -1052,7 +1052,7 @@ static void nvme_remove_devices(struct rt_nvme_controller *nvme)
static rt_err_t nvme_scan_device(struct rt_nvme_controller *nvme,
rt_size_t number_of_ns)
{
rt_err_t err;
rt_err_t err = RT_EOK;
rt_uint32_t lbaf;
struct rt_nvme_id_ns *id = RT_NULL;
@ -1272,8 +1272,8 @@ static int nvme_queue_affinify_fixup(void)
{
int cpuid = rt_hw_cpu_id();
struct rt_nvme_controller *nvme;
RT_DECLARE_IRQ_AFFINITY(affinity) = { 0 };
RT_DECLARE_IRQ_AFFINITY(current_affinity) = { 0 };
RT_IRQ_AFFINITY_DECLARE(affinity) = { 0 };
RT_IRQ_AFFINITY_DECLARE(current_affinity) = { 0 };
RT_IRQ_AFFINITY_SET(affinity, cpuid);

View File

@ -333,8 +333,6 @@ static int ofw_prop_index_of_string(struct rt_ofw_prop *prop, const char *string
static rt_int32_t ofw_strcasecmp(const char *cs, const char *ct)
{
extern rt_int32_t strcasecmp(const char *cs, const char *ct);
return rt_strcasecmp(cs, ct);
}

View File

@ -37,6 +37,12 @@ static rt_phandle _phandle_max;
static rt_size_t _root_size_cells;
static rt_size_t _root_addr_cells;
#ifdef ARCH_CPU_64BIT
#define MIN_BIT 16
#else
#define MIN_BIT 8
#endif
const char *rt_fdt_node_name(const char *full_name)
{
const char *node_name = strrchr(full_name, '/');
@ -358,11 +364,11 @@ static rt_err_t fdt_scan_memory(void)
if (!err)
{
LOG_I("Memory node(%d) ranges: %p - %p%s", no, base, base + size, "");
LOG_I("Memory node(%d) ranges: 0x%.*lx - 0x%.*lx%s", no, MIN_BIT, base, MIN_BIT, base + size, "");
}
else
{
LOG_W("Memory node(%d) ranges: %p - %p%s", no, base, base + size, " unable to record");
LOG_W("Memory node(%d) ranges: 0x%.*lx - 0x%.*lx%s", no, MIN_BIT, base, MIN_BIT, base + size, " unable to record");
}
}
}

View File

@ -435,6 +435,6 @@ EP_API rt_err_t dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no
EP_API rt_err_t dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, rt_uint8_t func_no,
rt_ubase_t phys_addr, rt_uint64_t pci_addr, rt_size_t size) EP_RET(-RT_ENOSYS)
EP_API struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET()
EP_API struct dw_pcie_ep_func *dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, rt_uint8_t func_no) EP_RET(RT_NULL)
#endif /* __PCIE_DESIGNWARE_H__ */

View File

@ -8,6 +8,7 @@ menuconfig RT_USING_PIC
config RT_USING_PIC_STATISTICS
bool "Enable ISR execution time statistics"
depends on RT_USING_PIC
depends on RT_USING_KTIME
depends on RT_USING_INTERRUPT_INFO
default n

View File

@ -83,6 +83,11 @@ static void gicv2_dist_init(struct gicv2 *gic)
LOG_D("Max irq = %d", gic->max_irq);
if (gic->skip_init)
{
return;
}
HWREG32(base + GIC_DIST_CTRL) = GICD_DISABLE;
/* Set all global (unused) interrupts to this CPU only. */
@ -620,6 +625,8 @@ static rt_err_t gicv2_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_
break;
}
gic->skip_init = rt_ofw_prop_read_bool(np, "skip-init");
gic_common_init_quirk_ofw(np, _gicv2_quirks, gic);
gicv2_init(gic);

View File

@ -78,6 +78,8 @@ struct gicv2
rt_size_t hyp_size;
void *vcpu_base;
rt_size_t vcpu_size;
rt_bool_t skip_init;
};
#endif /* __IRQ_GICV2_H__ */

View File

@ -341,7 +341,7 @@ rt_err_t gicv2m_ofw_probe(struct rt_ofw_node *np, const struct rt_ofw_node_id *i
}
}
bitmap_size = RT_BITMAP_LEN(v2m->spis_nr) * sizeof(bitmap_t);
bitmap_size = RT_BITMAP_LEN(v2m->spis_nr) * sizeof(rt_bitmap_t);
if (!(v2m->vectors = rt_calloc(1, bitmap_size)))
{

View File

@ -216,6 +216,11 @@ static void gicv3_dist_init(void)
LOG_D("%d SPIs implemented", _gic.line_nr - 32);
LOG_D("%d Extended SPIs implemented", _gic.espi_nr);
if (_gic.skip_init)
{
goto _get_max_irq;
}
/* Disable the distributor */
HWREG32(base + GICD_CTLR) = 0;
gicv3_dist_wait_for_rwp();
@ -266,6 +271,7 @@ static void gicv3_dist_init(void)
HWREG64(base + GICD_IROUTERnE + i * 8) = affinity;
}
_get_max_irq:
if (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) > 1)
{
/* Max LPI = 8192 + Math.pow(2, num_LPIs + 1) - 1 */
@ -1063,6 +1069,7 @@ static rt_err_t gicv3_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_
redist_stride = 0;
}
_gic.redist_stride = redist_stride;
_gic.skip_init = rt_ofw_prop_read_bool(np, "skip-init");
gic_common_init_quirk_ofw(np, _gicv3_quirks, &_gic.parent);
gicv3_init();

View File

@ -385,6 +385,8 @@ struct gicv3
rt_uint64_t redist_flags;
rt_size_t redist_stride;
rt_size_t redist_regions_nr;
rt_bool_t skip_init;
};
#endif /* __PIC_GICV3_H__ */

View File

@ -16,7 +16,9 @@
#include <rtdbg.h>
#include <drivers/pic.h>
#ifdef RT_USING_PIC_STATISTICS
#include <ktime.h>
#endif
struct irq_traps
{

View File

@ -1,3 +1,7 @@
menuconfig RT_USING_PIN
bool "Using Generic GPIO device drivers"
default y
if RT_USING_PIN
osource "$(SOC_DM_PIN_DIR)/Kconfig"
endif

View File

@ -132,6 +132,16 @@ rt_err_t rt_pin_irq_enable(rt_base_t pin, rt_uint8_t enabled)
return -RT_ENOSYS;
}
rt_err_t rt_pin_debounce(rt_base_t pin, rt_uint32_t debounce)
{
RT_ASSERT(_hw_pin.ops != RT_NULL);
if (_hw_pin.ops->pin_debounce)
{
return _hw_pin.ops->pin_debounce(&_hw_pin.parent, pin, debounce);
}
return -RT_ENOSYS;
}
/* RT-Thread Hardware PIN APIs */
void rt_pin_mode(rt_base_t pin, rt_uint8_t mode)
{

View File

@ -10,6 +10,227 @@
#include "dev_pin_dm.h"
static rt_size_t pin_total_nr = 0;
static struct rt_spinlock pin_lock = {};
static rt_list_t pin_nodes = RT_LIST_OBJECT_INIT(pin_nodes);
static struct rt_device_pin *pin_device_find(rt_ubase_t pin)
{
struct rt_device_pin *gpio = RT_NULL, *gpio_tmp;
rt_spin_lock(&pin_lock);
rt_list_for_each_entry(gpio_tmp, &pin_nodes, list)
{
if (pin >= gpio_tmp->pin_start &&
pin - gpio_tmp->pin_start < gpio_tmp->pin_nr)
{
gpio = gpio_tmp;
break;
}
}
rt_spin_unlock(&pin_lock);
return gpio;
}
static void pin_api_mode(struct rt_device *device, rt_base_t pin, rt_uint8_t mode)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_mode)
{
gpio->ops->pin_mode(&gpio->parent, pin - gpio->pin_start, mode);
}
}
static void pin_api_write(struct rt_device *device, rt_base_t pin, rt_uint8_t value)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_write)
{
gpio->ops->pin_write(&gpio->parent, pin - gpio->pin_start, value);
}
}
static rt_ssize_t pin_api_read(struct rt_device *device, rt_base_t pin)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_read)
{
return gpio->ops->pin_read(&gpio->parent, pin - gpio->pin_start);
}
return -RT_EINVAL;
}
static rt_err_t pin_api_attach_irq(struct rt_device *device, rt_base_t pin,
rt_uint8_t mode, void (*hdr)(void *args), void *args)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio)
{
rt_base_t pin_index = pin - gpio->pin_start;
if (!gpio->ops->pin_attach_irq)
{
rt_err_t err;
struct rt_pin_irq_hdr *legacy_isr;
if ((err = gpio->ops->pin_irq_mode(&gpio->parent, pin_index, mode)))
{
return err;
}
legacy_isr = &gpio->legacy_isr[pin_index];
legacy_isr->pin = pin_index;
legacy_isr->mode = mode;
legacy_isr->hdr = hdr;
legacy_isr->args = args;
return RT_EOK;
}
else
{
return gpio->ops->pin_attach_irq(&gpio->parent, pin_index, mode, hdr, args);
}
}
return -RT_EINVAL;
}
static rt_err_t pin_api_detach_irq(struct rt_device *device, rt_base_t pin)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio)
{
rt_base_t pin_index = pin - gpio->pin_start;
if (!gpio->ops->pin_detach_irq)
{
struct rt_pin_irq_hdr *legacy_isr;
legacy_isr = &gpio->legacy_isr[pin_index];
rt_memset(legacy_isr, 0, sizeof(*legacy_isr));
return RT_EOK;
}
else
{
return gpio->ops->pin_detach_irq(&gpio->parent, pin);
}
}
return -RT_EINVAL;
}
static rt_err_t pin_api_irq_enable(struct rt_device *device, rt_base_t pin,
rt_uint8_t enabled)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_irq_enable)
{
return gpio->ops->pin_irq_enable(&gpio->parent, pin - gpio->pin_start, enabled);
}
return -RT_EINVAL;
}
static rt_base_t pin_api_get(const char *name)
{
rt_base_t res = -RT_EINVAL;
struct rt_device_pin *gpio;
rt_spin_lock(&pin_lock);
rt_list_for_each_entry(gpio, &pin_nodes, list)
{
if (gpio->ops->pin_get && !(res = gpio->ops->pin_get(name)))
{
break;
}
}
rt_spin_unlock(&pin_lock);
return res;
}
static rt_err_t pin_api_debounce(struct rt_device *device, rt_base_t pin,
rt_uint32_t debounce)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_debounce)
{
return gpio->ops->pin_debounce(&gpio->parent, pin - gpio->pin_start, debounce);
}
return -RT_EINVAL;
}
static rt_err_t pin_api_irq_mode(struct rt_device *device, rt_base_t pin,
rt_uint8_t mode)
{
struct rt_device_pin *gpio = pin_device_find(pin);
if (gpio && gpio->ops->pin_irq_mode)
{
return gpio->ops->pin_irq_mode(&gpio->parent, pin - gpio->pin_start, mode);
}
return -RT_EINVAL;
}
static const struct rt_pin_ops pin_api_dm_ops =
{
.pin_mode = pin_api_mode,
.pin_write = pin_api_write,
.pin_read = pin_api_read,
.pin_attach_irq = pin_api_attach_irq,
.pin_detach_irq = pin_api_detach_irq,
.pin_irq_enable = pin_api_irq_enable,
.pin_get = pin_api_get,
.pin_debounce = pin_api_debounce,
.pin_irq_mode = pin_api_irq_mode,
};
rt_err_t pin_api_init(struct rt_device_pin *gpio, rt_size_t pin_nr)
{
rt_err_t err = RT_EOK;
if (!gpio || !gpio->ops)
{
return -RT_EINVAL;
}
rt_spin_lock(&pin_lock);
if (rt_list_isempty(&pin_nodes))
{
rt_spin_unlock(&pin_lock);
rt_device_pin_register("gpio", &pin_api_dm_ops, RT_NULL);
rt_spin_lock(&pin_lock);
}
gpio->pin_start = pin_total_nr;
gpio->pin_nr = pin_nr;
pin_total_nr += pin_nr;
rt_list_init(&gpio->list);
rt_list_insert_before(&pin_nodes, &gpio->list);
rt_spin_unlock(&pin_lock);
return err;
}
static void pin_dm_irq_mask(struct rt_pic_irq *pirq)
{
struct rt_device_pin *gpio = pirq->pic->priv_data;
@ -78,7 +299,8 @@ static int pin_dm_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
return irq;
}
static rt_err_t pin_dm_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
static rt_err_t pin_dm_irq_parse(struct rt_pic *pic,
struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
{
rt_err_t err = RT_EOK;
@ -95,7 +317,7 @@ static rt_err_t pin_dm_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *ar
return err;
}
static struct rt_pic_ops pin_dm_ops =
const static struct rt_pic_ops pin_dm_ops =
{
.name = "GPIO",
.irq_enable = pin_dm_irq_mask,
@ -113,13 +335,15 @@ rt_err_t pin_pic_handle_isr(struct rt_device_pin *gpio, rt_base_t pin)
if (gpio)
{
rt_ubase_t pin_index = pin;
struct rt_pin_irqchip *irqchip = &gpio->irqchip;
if (pin >= irqchip->pin_range[0] && pin <= irqchip->pin_range[1])
if (pin_index < gpio->pin_nr)
{
struct rt_pic_irq *pirq;
struct rt_pin_irq_hdr *legacy_isr;
pirq = rt_pic_find_irq(&irqchip->parent, pin - irqchip->pin_range[0]);
pirq = rt_pic_find_irq(&irqchip->parent, pin_index);
if (pirq->irq >= 0)
{
@ -129,6 +353,13 @@ rt_err_t pin_pic_handle_isr(struct rt_device_pin *gpio, rt_base_t pin)
{
err = -RT_EINVAL;
}
legacy_isr = &gpio->legacy_isr[pin_index];
if (legacy_isr->hdr)
{
legacy_isr->hdr(legacy_isr->args);
}
}
else
{
@ -143,32 +374,39 @@ rt_err_t pin_pic_handle_isr(struct rt_device_pin *gpio, rt_base_t pin)
return err;
}
rt_err_t pin_pic_init(struct rt_device_pin *gpio)
rt_err_t pin_pic_init(struct rt_device_pin *gpio, int pin_irq)
{
rt_err_t err;
if (gpio)
{
struct rt_pin_irqchip *irqchip = &gpio->irqchip;
struct rt_pic *pic = &irqchip->parent;
if (irqchip->pin_range[0] >= 0 && irqchip->pin_range[1] >= irqchip->pin_range[0])
irqchip->irq = pin_irq;
if (!gpio->pin_nr)
{
struct rt_pic *pic = &irqchip->parent;
rt_size_t pin_nr = irqchip->pin_range[1] - irqchip->pin_range[0] + 1;
pic->priv_data = gpio;
pic->ops = &pin_dm_ops;
/* Make sure the type of gpio for pic */
gpio->parent.parent.type = RT_Object_Class_Device;
rt_pic_default_name(&irqchip->parent);
err = rt_pic_linear_irq(pic, pin_nr);
rt_pic_user_extends(pic);
return -RT_EINVAL;
}
else
gpio->legacy_isr = rt_calloc(gpio->pin_nr, sizeof(*gpio->legacy_isr));
if (!gpio->legacy_isr)
{
err = -RT_EINVAL;
return -RT_ENOMEM;
}
pic->priv_data = gpio;
pic->ops = &pin_dm_ops;
/* Make sure the type of gpio for pic */
gpio->parent.parent.type = RT_Object_Class_Device;
rt_pic_default_name(&irqchip->parent);
err = rt_pic_linear_irq(pic, gpio->pin_nr);
rt_pic_user_extends(pic);
err = RT_EOK;
}
else
{

View File

@ -15,7 +15,9 @@
#include <rtthread.h>
#include <rtdevice.h>
rt_err_t pin_api_init(struct rt_device_pin *gpio, rt_size_t pin_nr);
rt_err_t pin_pic_init(struct rt_device_pin *gpio, int pin_irq);
rt_err_t pin_pic_handle_isr(struct rt_device_pin *gpio, rt_base_t pin);
rt_err_t pin_pic_init(struct rt_device_pin *gpio);
#endif /* __DEV_PIN_DM_H__ */

View File

@ -59,6 +59,12 @@ rt_ssize_t rt_ofw_get_named_pin(struct rt_ofw_node *np, const char *propname, in
}
pin_dev_np = pin_args.data;
if (!rt_ofw_data(pin_dev_np))
{
rt_platform_ofw_request(pin_dev_np);
}
pin_dev = rt_ofw_data(pin_dev_np);
if (!pin_dev)
@ -111,11 +117,11 @@ rt_ssize_t rt_ofw_get_named_pin(struct rt_ofw_node *np, const char *propname, in
if (out_value)
{
if (flags == (PIN_ACTIVE_HIGH | PIN_PUSH_PULL))
if ((flags & 1) == PIN_ACTIVE_HIGH)
{
value = PIN_HIGH;
}
else if (flags == (PIN_ACTIVE_LOW | PIN_PUSH_PULL))
else if ((flags & 1) == PIN_ACTIVE_LOW)
{
value = PIN_LOW;
}
@ -124,14 +130,20 @@ rt_ssize_t rt_ofw_get_named_pin(struct rt_ofw_node *np, const char *propname, in
_out_converts:
rt_ofw_node_put(pin_dev_np);
if (out_mode)
if (pin >= 0)
{
*out_mode = mode;
}
/* Get virtual pin */
pin += pin_dev->pin_start;
if (out_value)
{
*out_value = value;
if (out_mode)
{
*out_mode = mode;
}
if (out_value)
{
*out_value = value;
}
}
return pin;
@ -142,7 +154,7 @@ rt_ssize_t rt_ofw_get_named_pin_count(struct rt_ofw_node *np, const char *propna
char gpios_name[64];
rt_ssize_t count = 0;
if (!np || !propname)
if (!np)
{
return -RT_EINVAL;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -35,7 +35,6 @@
#endif
static struct rt_work rtc_sync_work;
static rt_device_t source_device = RT_NULL;
static struct rt_device soft_rtc_dev;
static rt_tick_t init_tick;
@ -82,18 +81,6 @@ static void set_rtc_time(time_t t)
#endif
}
static void _source_device_control(int cmd, void *args)
{
if (source_device == RT_NULL)
return;
if (rt_device_open(source_device, 0) == RT_EOK)
{
rt_device_control(source_device, cmd, args);
rt_device_close(source_device);
}
}
static rt_err_t soft_rtc_control(rt_device_t dev, int cmd, void *args)
{
time_t *t;
@ -114,7 +101,6 @@ static rt_err_t soft_rtc_control(rt_device_t dev, int cmd, void *args)
{
t = (time_t *) args;
set_rtc_time(*t);
_source_device_control(RT_DEVICE_CTRL_RTC_SET_TIME, t);
break;
}
#ifdef RT_USING_ALARM
@ -143,7 +129,6 @@ static rt_err_t soft_rtc_control(rt_device_t dev, int cmd, void *args)
rt_ktime_boottime_get_us(&_tv);
set_rtc_time(tv->tv_sec);
init_tv.tv_usec = tv->tv_usec - _tv.tv_usec;
_source_device_control(RT_DEVICE_CTRL_RTC_SET_TIME, &(tv->tv_sec));
break;
}
case RT_DEVICE_CTRL_RTC_GET_TIMESPEC:
@ -162,7 +147,6 @@ static rt_err_t soft_rtc_control(rt_device_t dev, int cmd, void *args)
rt_ktime_boottime_get_ns(&_ts);
set_rtc_time(ts->tv_sec);
init_ts.tv_nsec = ts->tv_nsec - _ts.tv_nsec;
_source_device_control(RT_DEVICE_CTRL_RTC_SET_TIME, &(ts->tv_sec));
break;
}
case RT_DEVICE_CTRL_RTC_GET_TIMERES:
@ -187,7 +171,6 @@ static rt_err_t soft_rtc_control(rt_device_t dev, int cmd, void *args)
rt_tick_t tick = rt_tick_get() - init_tick;
set_rtc_time(tv->tv_sec);
init_tv.tv_usec = tv->tv_usec - ((tick % RT_TICK_PER_SECOND) * (1000000 / RT_TICK_PER_SECOND));
_source_device_control(RT_DEVICE_CTRL_RTC_SET_TIME, &(tv->tv_sec));
break;
}
case RT_DEVICE_CTRL_RTC_GET_TIMERES:
@ -227,6 +210,9 @@ static int rt_soft_rtc_init(void)
return 0;
}
/* make sure only one 'rtc' device */
#if defined(RT_USING_SOFT_RTC) && defined(BSP_USING_ONCHIP_RTC)
#warning "Please note: Currently only one RTC device is allowed in the system, and the name is "rtc"."
#endif
RT_ASSERT(!rt_device_find("rtc"));
#ifdef RT_USING_ALARM
@ -272,13 +258,7 @@ rt_err_t rt_soft_rtc_sync(void)
{
time_t time = 0;
if (source_device == RT_NULL)
{
rt_kprintf("error: rtc source not found, please set it!!!\n");
return RT_ENOSYS;
}
_source_device_control(RT_DEVICE_CTRL_RTC_GET_TIME, &time);
rt_device_control(&soft_rtc_dev, RT_DEVICE_CTRL_RTC_GET_TIME, &time);
set_rtc_time(time);
return RT_EOK;
}
@ -292,9 +272,8 @@ static void rtc_sync_work_func(struct rt_work *work, void *work_data)
rt_err_t rt_soft_rtc_set_source(const char *name)
{
RT_ASSERT(name != RT_NULL);
RT_ASSERT(rt_device_find(name)); // make sure source is exist
RT_ASSERT(rt_device_find(name)); /* make sure source is exist*/
source_device = rt_device_find(name);
rt_work_init(&rtc_sync_work, rtc_sync_work_func, RT_NULL);
rt_work_submit(&rtc_sync_work, rt_tick_from_millisecond(RTC_AUTO_SYNC_FIRST_DELAY * 1000));
@ -317,7 +296,7 @@ static void cmd_rtc_sync(int argc, char **argv)
rt_kprintf("local time: %.*s", 25, ctime(&now));
rt_kprintf("timestamps: %ld\n", (long)tv.tv_sec);
}
MSH_CMD_EXPORT_ALIAS(cmd_rtc_sync, rtc_sync, Update time by real rtc);
MSH_CMD_EXPORT_ALIAS(cmd_rtc_sync, rtc_sync, Update time by soft rtc);
#endif
#endif /* RT_USING_SYSTEM_WORKQUEUE */

View File

@ -25,5 +25,8 @@ config RT_USING_SDIO
default 16
config RT_SDIO_DEBUG
bool "Enable SDIO debug log output"
default n
endif
default n
config RT_USING_SDHCI
bool "Using sdhci for sd/mmc drivers"
default n
endif

View File

@ -11,7 +11,12 @@ dev_mmc.c
""")
# The set of source files associated with this SConscript file.
path = [cwd + '/../include']
path = [cwd + '/../include' , cwd + '/sdhci/include']
if GetDepend('RT_USING_SDHCI'):
src += [os.path.join('sdhci', 'sdhci.c')]
src += [os.path.join('sdhci', 'fit-mmc.c')]
src += [os.path.join('sdhci', 'sdhci-platform.c')]
group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_SDIO'], CPPPATH = path)

View File

@ -0,0 +1,320 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#include <rtthread.h>
#include "sdhci.h"
#include <rtdbg.h>
#include <mmu.h>
#include <drivers/core/dm.h>
static void rt_plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
rt_uint32_t flags = req->cmd->flags;
switch (flags & RESP_MASK)
{
case RESP_NONE:
flags |= MMC_RSP_NONE;
break;
case RESP_R1:
flags |= MMC_RSP_R1;
break;
case RESP_R1B:
flags |= MMC_RSP_R1B;
break;
case RESP_R2:
flags |= MMC_RSP_R2;
break;
case RESP_R3:
flags |= MMC_RSP_R3;
break;
case RESP_R4:
flags |= MMC_RSP_R4;
break;
case RESP_R5:
flags |= MMC_RSP_R5;
break;
case RESP_R6:
flags |= MMC_RSP_R6;
break;
case RESP_R7:
flags |= MMC_RSP_R7;
break;
}
if (req->data)
{
if ((rt_uint64_t)rt_kmem_v2p(req->data->buf) > 0xffffffff)
{
void *dma_buffer = rt_malloc(req->data->blks * req->data->blksize);
void *req_buf = NULL;
if (req->data->flags & DATA_DIR_WRITE)
{
rt_memcpy(dma_buffer, req->data->buf, req->data->blks * req->data->blksize);
req_buf = req->data->buf;
req->data->buf = dma_buffer;
}
else if (req->data->flags & DATA_DIR_READ)
{
req_buf = req->data->buf;
req->data->buf = dma_buffer;
}
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
if (req->data->flags & DATA_DIR_READ)
{
rt_memcpy(req_buf, dma_buffer, req->data->blksize * req->data->blks);
req->data->buf = req_buf;
}else{
req->data->buf = req_buf;
}
rt_free(dma_buffer);
rt_sem_release(&host->sem_ack);
}
else
{
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
}
}
else
{
req->cmd->flags |= flags;
mmc->ops->request(mmc, req);
}
}
static void rt_plat_set_ioconfig(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *iocfg)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
LOG_D("clock:%d,width:%d,power:%d,vdd:%d,timing:%d\n",
iocfg->clock, iocfg->bus_width,
iocfg->power_mode, iocfg->vdd, iocfg->timing);
mmc->ops->set_ios(mmc, iocfg);
}
static rt_int32_t rt_plat_get_card_status(struct rt_mmcsd_host *host)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->get_cd(mmc);
}
static rt_int32_t rt_plat_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->execute_tuning(mmc, opcode);
}
static void rt_plat_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
{
struct rt_mmc_host *mmc = (struct rt_mmc_host *)host;
return mmc->ops->enable_sdio_irq(mmc, en);
}
static const struct rt_mmcsd_host_ops rt_mmcsd_ops = {
.request = rt_plat_request,
.set_iocfg = rt_plat_set_ioconfig,
.get_card_status = rt_plat_get_card_status,
.enable_sdio_irq = rt_plat_enable_sdio_irq,
.execute_tuning = rt_plat_execute_tuning,
};
void rt_mmc_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq)
{
mmcsd_req_complete(&host->rthost);
}
/*add host in rtt while sdhci complete*/
int rt_mmc_add_host(struct rt_mmc_host *mmc)
{
mmc->rthost.ops = &rt_mmcsd_ops;
mmc->rthost.flags = mmc->caps;
mmc->rthost.freq_max = mmc->f_max;
mmc->rthost.freq_min = 400000;
mmc->rthost.max_dma_segs = mmc->max_segs;
mmc->rthost.max_seg_size = mmc->max_seg_size;
mmc->rthost.max_blk_size = mmc->max_blk_size;
mmc->rthost.max_blk_count = mmc->max_blk_count;
mmc->rthost.valid_ocr = VDD_165_195|VDD_20_21|VDD_21_22|VDD_22_23|VDD_24_25|VDD_25_26|VDD_26_27|VDD_27_28|VDD_28_29|VDD_29_30|VDD_30_31|VDD_32_33|VDD_33_34|VDD_34_35|VDD_35_36;
mmcsd_change(&mmc->rthost);
return 0;
}
struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *dev)
{
struct rt_mmc_host *mmc;
mmc = rt_malloc(sizeof(*mmc) + extra);
if (mmc)
{
rt_memset(mmc, 0, sizeof(*mmc) + extra);
mmc->parent = dev;
mmcsd_host_init(&mmc->rthost);
}
return mmc;
}
void rt_mmc_remove_host(struct rt_mmc_host *host)
{
rt_free(host);
}
int rt_mmc_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode)
{
return 0;
}
int rt_mmc_gpio_get_cd(struct rt_mmc_host *host)
{
return -ENOSYS;
}
void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay)
{
}
int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios)
{
return 0;
}
rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host)
{
return RT_FALSE;
}
int rt_mmc_gpio_get_ro(struct rt_mmc_host *host)
{
return 0;
}
int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode)
{
return 0;
}
int rt_mmc_of_parse(struct rt_mmc_host *host)
{
struct rt_device *dev = host->parent;
rt_uint32_t bus_width;
if (!dev || !dev->ofw_node)
return 0;
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
if (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) < 0)
{
bus_width = 1;
}
switch (bus_width)
{
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
break; /* Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
case 1:
break;
default:
return -EINVAL;
}
/* f_max is obtained from the optional "max-frequency" property */
rt_dm_dev_prop_read_u32(dev, "max-frequency", &host->f_max);
if (rt_dm_dev_prop_read_bool(dev, "cap-mmc-highspeed"))
{
host->caps |= MMC_CAP_MMC_HIGHSPEED;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-hs200-1_8v"))
{
host->caps |= MMC_CAP2_HS200_1_8V_SDR;
}
if (rt_dm_dev_prop_read_bool(dev, "non-removable"))
{
host->caps |= MMC_CAP_NONREMOVABLE;
}
if (rt_dm_dev_prop_read_bool(dev, "no-sdio"))
{
host->caps2 |= MMC_CAP2_NO_SDIO;
}
if (rt_dm_dev_prop_read_bool(dev, "no-sd"))
{
host->caps2 |= MMC_CAP2_NO_SD;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-3_3v"))
{
host->caps |= MMC_CAP_3_3V_DDR;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_8v"))
{
host->caps |= MMC_CAP_1_8V_DDR;
}
if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_2v"))
{
host->caps |= MMC_CAP_1_2V_DDR;
}
return 0;
}
void rt_mmc_free_host(struct rt_mmc_host *host)
{
}
rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host)
{
return RT_FALSE;
}
int mmc_regulator_get_supply(struct rt_mmc_host *mmc)
{
mmc->supply.vmmc = -RT_NULL;
mmc->supply.vqmmc = -RT_NULL;
return 0;
}
int regulator_get_current_limit(struct regulator *regulator)
{
return 0;
}
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
return 0;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef _DRIVERS_MMC_RT_SDHCI_PLTFM_H
#define _DRIVERS_MMC_RT_SDHCI_PLTFM_H
#include <rtthread.h>
#include <drivers/core/dm.h>
#include <drivers/ofw.h>
#include <drivers/platform.h>
#include <drivers/clk.h>
#include "sdhci.h"
struct rt_sdhci_pltfm_data
{
const struct rt_sdhci_ops *ops;
unsigned int quirks;
unsigned int quirks2;
};
struct rt_sdhci_pltfm_host
{
struct rt_clk *clk;
unsigned int clock;
rt_uint64_t xfer_mode_shadow;
unsigned long private[];
};
void rt_sdhci_get_property(struct rt_platform_device *pdev);
static inline void sdhci_get_of_property(struct rt_platform_device *pdev)
{
return rt_sdhci_get_property(pdev);
}
extern struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size);
extern void rt_sdhci_pltfm_free(struct rt_platform_device *pdev);
extern int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size);
extern void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev);
extern unsigned int rt_sdhci_pltfm_clk_get_max_clock(struct rt_sdhci_host *host);
static inline void *sdhci_pltfm_priv(struct rt_sdhci_pltfm_host *host)
{
return host->private;
}
static inline int sdhci_pltfm_suspend(struct rt_device *dev)
{
return 0;
}
static inline int sdhci_pltfm_resume(struct rt_device *dev)
{
return 0;
}
#endif

View File

@ -0,0 +1,677 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_HW_H
#define __RT_SDHCI_HW_H
#include "sdhci_host.h"
#include "sdhci_misc.h"
#include "sdhci-platform.h"
#include <drivers/mmcsd_cmd.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/mmcsd_host.h>
#include <rtdevice.h>
#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff))
#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define MAX_TUNING_LOOP 40
/*
* Controller registers
*/
#define RT_SDHCI_DMA_ADDRESS 0x00
#define RT_SDHCI_ARGUMENT2 RT_SDHCI_DMA_ADDRESS
#define RT_SDHCI_32BIT_BLK_CNT RT_SDHCI_DMA_ADDRESS
#define RT_SDHCI_BLOCK_SIZE 0x04
#define RT_SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
#define RT_SDHCI_BLOCK_COUNT 0x06
#define RT_SDHCI_ARGUMENT 0x08
#define RT_SDHCI_TRANSFER_MODE 0x0C
#define RT_SDHCI_TRNS_DMA 0x01
#define RT_SDHCI_TRNS_BLK_CNT_EN 0x02
#define RT_SDHCI_TRNS_AUTO_CMD12 0x04
#define RT_SDHCI_TRNS_AUTO_CMD23 0x08
#define RT_SDHCI_TRNS_AUTO_SEL 0x0C
#define RT_SDHCI_TRNS_READ 0x10
#define RT_SDHCI_TRNS_MULTI 0x20
#define RT_SDHCI_COMMAND 0x0E
#define RT_SDHCI_CMD_RESP_MASK 0x03
#define RT_SDHCI_CMD_CRC 0x08
#define RT_SDHCI_CMD_INDEX 0x10
#define RT_SDHCI_CMD_DATA 0x20
#define RT_SDHCI_CMD_ABORTCMD 0xC0
#define RT_SDHCI_CMD_RESP_NONE 0x00
#define RT_SDHCI_CMD_RESP_LONG 0x01
#define RT_SDHCI_CMD_RESP_SHORT 0x02
#define RT_SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define RT_SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
#define RT_SDHCI_GET_CMD(c) ((c >> 8) & 0x3f)
#define RT_SDHCI_RESPONSE 0x10
#define RT_SDHCI_BUFFER 0x20
#define RT_SDHCI_PRESENT_STATE 0x24
#define RT_SDHCI_CMD_INHIBIT 0x00000001
#define RT_SDHCI_DATA_INHIBIT 0x00000002
#define RT_SDHCI_DOING_WRITE 0x00000100
#define RT_SDHCI_DOING_READ 0x00000200
#define RT_SDHCI_SPACE_AVAILABLE 0x00000400
#define RT_SDHCI_DATA_AVAILABLE 0x00000800
#define RT_SDHCI_CARD_PRESENT 0x00010000
#define RT_SDHCI_CARD_PRES_SHIFT 16
#define RT_SDHCI_CD_STABLE 0x00020000
#define RT_SDHCI_CD_LVL 0x00040000
#define RT_SDHCI_CD_LVL_SHIFT 18
#define RT_SDHCI_WRITE_PROTECT 0x00080000
#define RT_SDHCI_DATA_LVL_MASK 0x00F00000
#define RT_SDHCI_DATA_LVL_SHIFT 20
#define RT_SDHCI_DATA_0_LVL_MASK 0x00100000
#define RT_SDHCI_CMD_LVL 0x01000000
#define RT_SDHCI_HOST_CONTROL 0x28
#define RT_SDHCI_CTRL_LED 0x01
#define RT_SDHCI_CTRL_4BITBUS 0x02
#define RT_SDHCI_CTRL_HISPD 0x04
#define RT_SDHCI_CTRL_DMA_MASK 0x18
#define RT_SDHCI_CTRL_SDMA 0x00
#define RT_SDHCI_CTRL_ADMA1 0x08
#define RT_SDHCI_CTRL_ADMA32 0x10
#define RT_SDHCI_CTRL_ADMA64 0x18
#define RT_SDHCI_CTRL_ADMA3 0x18
#define RT_SDHCI_CTRL_8BITBUS 0x20
#define RT_SDHCI_CTRL_CDTEST_INS 0x40
#define RT_SDHCI_CTRL_CDTEST_EN 0x80
#define RT_SDHCI_POWER_CONTROL 0x29
#define RT_SDHCI_POWER_ON 0x01
#define RT_SDHCI_POWER_180 0x0A
#define RT_SDHCI_POWER_300 0x0C
#define RT_SDHCI_POWER_330 0x0E
/*
* VDD2 - UHS2 or PCIe/NVMe
* VDD2 power on/off and voltage select
*/
#define RT_SDHCI_VDD2_POWER_ON 0x10
#define RT_SDHCI_VDD2_POWER_120 0x80
#define RT_SDHCI_VDD2_POWER_180 0xA0
#define RT_SDHCI_BLOCK_GAP_CONTROL 0x2A
#define RT_SDHCI_WAKE_UP_CONTROL 0x2B
#define RT_SDHCI_WAKE_ON_INT 0x01
#define RT_SDHCI_WAKE_ON_INSERT 0x02
#define RT_SDHCI_WAKE_ON_REMOVE 0x04
#define RT_SDHCI_CLOCK_CONTROL 0x2C
#define RT_SDHCI_DIVIDER_SHIFT 8
#define RT_SDHCI_DIVIDER_HI_SHIFT 6
#define RT_SDHCI_DIV_MASK 0xFF
#define RT_SDHCI_DIV_MASK_LEN 8
#define RT_SDHCI_DIV_HI_MASK 0x300
#define RT_SDHCI_PROG_CLOCK_MODE 0x0020
#define RT_SDHCI_CLOCK_CARD_EN 0x0004
#define RT_SDHCI_CLOCK_PLL_EN 0x0008
#define RT_SDHCI_CLOCK_INT_STABLE 0x0002
#define RT_SDHCI_CLOCK_INT_EN 0x0001
#define RT_SDHCI_TIMEOUT_CONTROL 0x2E
#define RT_SDHCI_SOFTWARE_RESET 0x2F
#define RT_SDHCI_RESET_ALL 0x01
#define RT_SDHCI_RESET_CMD 0x02
#define RT_SDHCI_RESET_DATA 0x04
#define RT_SDHCI_INT_STATUS 0x30
#define RT_SDHCI_INT_ENABLE 0x34
#define RT_SDHCI_SIGNAL_ENABLE 0x38
#define RT_SDHCI_INT_RESPONSE 0x00000001
#define RT_SDHCI_INT_DATA_END 0x00000002
#define RT_SDHCI_INT_BLK_GAP 0x00000004
#define RT_SDHCI_INT_DMA_END 0x00000008
#define RT_SDHCI_INT_SPACE_AVAIL 0x00000010
#define RT_SDHCI_INT_DATA_AVAIL 0x00000020
#define RT_SDHCI_INT_CARD_INSERT 0x00000040
#define RT_SDHCI_INT_CARD_REMOVE 0x00000080
#define RT_SDHCI_INT_CARD_INT 0x00000100
#define RT_SDHCI_INT_RETUNE 0x00001000
#define RT_SDHCI_INT_CQE 0x00004000
#define RT_SDHCI_INT_ERROR 0x00008000
#define RT_SDHCI_INT_TIMEOUT 0x00010000
#define RT_SDHCI_INT_CRC 0x00020000
#define RT_SDHCI_INT_END_BIT 0x00040000
#define RT_SDHCI_INT_INDEX 0x00080000
#define RT_SDHCI_INT_DATA_TIMEOUT 0x00100000
#define RT_SDHCI_INT_DATA_CRC 0x00200000
#define RT_SDHCI_INT_DATA_END_BIT 0x00400000
#define RT_SDHCI_INT_BUS_POWER 0x00800000
#define RT_SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define RT_SDHCI_INT_ADMA_ERROR 0x02000000
#define RT_SDHCI_INT_NORMAL_MASK 0x00007FFF
#define RT_SDHCI_INT_ERROR_MASK 0xFFFF8000
#define RT_SDHCI_INT_CMD_MASK (RT_SDHCI_INT_RESPONSE | RT_SDHCI_INT_TIMEOUT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_AUTO_CMD_ERR)
#define RT_SDHCI_INT_DATA_MASK (RT_SDHCI_INT_DATA_END | RT_SDHCI_INT_DMA_END | RT_SDHCI_INT_DATA_AVAIL | RT_SDHCI_INT_SPACE_AVAIL | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BLK_GAP)
#define RT_SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define RT_SDHCI_CQE_INT_ERR_MASK ( \
RT_SDHCI_INT_ADMA_ERROR | RT_SDHCI_INT_BUS_POWER | RT_SDHCI_INT_DATA_END_BIT | RT_SDHCI_INT_DATA_CRC | RT_SDHCI_INT_DATA_TIMEOUT | RT_SDHCI_INT_INDEX | RT_SDHCI_INT_END_BIT | RT_SDHCI_INT_CRC | RT_SDHCI_INT_TIMEOUT)
#define RT_SDHCI_CQE_INT_MASK (RT_SDHCI_CQE_INT_ERR_MASK | RT_SDHCI_INT_CQE)
#define RT_SDHCI_AUTO_CMD_STATUS 0x3C
#define RT_SDHCI_AUTO_CMD_TIMEOUT 0x00000002
#define RT_SDHCI_AUTO_CMD_CRC 0x00000004
#define RT_SDHCI_AUTO_CMD_END_BIT 0x00000008
#define RT_SDHCI_AUTO_CMD_INDEX 0x00000010
#define RT_SDHCI_HOST_CONTROL2 0x3E
#define RT_SDHCI_CTRL_UHS_MASK 0x0007
#define RT_SDHCI_CTRL_UHS_SDR12 0x0000
#define RT_SDHCI_CTRL_UHS_SDR25 0x0001
#define RT_SDHCI_CTRL_UHS_SDR50 0x0002
#define RT_SDHCI_CTRL_UHS_SDR104 0x0003
#define RT_SDHCI_CTRL_UHS_DDR50 0x0004
#define RT_SDHCI_CTRL_HS400 0x0005 /* Non-standard */
#define RT_SDHCI_CTRL_VDD_180 0x0008
#define RT_SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define RT_SDHCI_CTRL_DRV_TYPE_B 0x0000
#define RT_SDHCI_CTRL_DRV_TYPE_A 0x0010
#define RT_SDHCI_CTRL_DRV_TYPE_C 0x0020
#define RT_SDHCI_CTRL_DRV_TYPE_D 0x0030
#define RT_SDHCI_CTRL_EXEC_TUNING 0x0040
#define RT_SDHCI_CTRL_TUNED_CLK 0x0080
#define RT_SDHCI_CMD23_ENABLE 0x0800
#define RT_SDHCI_CTRL_V4_MODE 0x1000
#define RT_SDHCI_CTRL_64BIT_ADDR 0x2000
#define RT_SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define RT_SDHCI_CAPABILITIES 0x40
#define RT_SDHCI_TIMEOUT_CLK_MASK RT_GENMASK(5, 0)
#define RT_SDHCI_TIMEOUT_CLK_SHIFT 0
#define RT_SDHCI_TIMEOUT_CLK_UNIT 0x00000080
#define RT_SDHCI_CLOCK_BASE_MASK RT_GENMASK(13, 8)
#define RT_SDHCI_CLOCK_BASE_SHIFT 8
#define RT_SDHCI_CLOCK_V3_BASE_MASK RT_GENMASK(15, 8)
#define RT_SDHCI_MAX_BLOCK_MASK 0x00030000
#define RT_SDHCI_MAX_BLOCK_SHIFT 16
#define RT_SDHCI_CAN_DO_8BIT 0x00040000
#define RT_SDHCI_CAN_DO_ADMA2 0x00080000
#define RT_SDHCI_CAN_DO_ADMA1 0x00100000
#define RT_SDHCI_CAN_DO_HISPD 0x00200000
#define RT_SDHCI_CAN_DO_SDMA 0x00400000
#define RT_SDHCI_CAN_DO_SUSPEND 0x00800000
#define RT_SDHCI_CAN_VDD_330 0x01000000
#define RT_SDHCI_CAN_VDD_300 0x02000000
#define RT_SDHCI_CAN_VDD_180 0x04000000
#define RT_SDHCI_CAN_64BIT_V4 0x08000000
#define RT_SDHCI_CAN_64BIT 0x10000000
#define RT_SDHCI_CAPABILITIES_1 0x44
#define RT_SDHCI_SUPPORT_SDR50 0x00000001
#define RT_SDHCI_SUPPORT_SDR104 0x00000002
#define RT_SDHCI_SUPPORT_DDR50 0x00000004
#define RT_SDHCI_DRIVER_TYPE_A 0x00000010
#define RT_SDHCI_DRIVER_TYPE_C 0x00000020
#define RT_SDHCI_DRIVER_TYPE_D 0x00000040
#define RT_SDHCI_RETUNING_TIMER_COUNT_MASK RT_GENMASK(11, 8)
#define RT_SDHCI_USE_SDR50_TUNING 0x00002000
#define RT_SDHCI_RETUNING_MODE_MASK RT_GENMASK(15, 14)
#define RT_SDHCI_CLOCK_MUL_MASK RT_GENMASK(23, 16)
#define RT_SDHCI_CAN_DO_ADMA3 0x08000000
#define RT_SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
#define RT_SDHCI_MAX_CURRENT 0x48
#define RT_SDHCI_MAX_CURRENT_LIMIT RT_GENMASK(7, 0)
#define RT_SDHCI_MAX_CURRENT_330_MASK RT_GENMASK(7, 0)
#define RT_SDHCI_MAX_CURRENT_300_MASK RT_GENMASK(15, 8)
#define RT_SDHCI_MAX_CURRENT_180_MASK RT_GENMASK(23, 16)
#define RT_SDHCI_MAX_CURRENT_MULTIPLIER 4
/* 4C-4F reserved for more max current */
#define RT_SDHCI_SET_ACMD12_ERROR 0x50
#define RT_SDHCI_SET_INT_ERROR 0x52
#define RT_SDHCI_ADMA_ERROR 0x54
/* 55-57 reserved */
#define RT_SDHCI_ADMA_ADDRESS 0x58
#define RT_SDHCI_ADMA_ADDRESS_HI 0x5C
/* 60-FB reserved */
#define RT_SDHCI_PRESET_FOR_HIGH_SPEED 0x64
#define RT_SDHCI_PRESET_FOR_SDR12 0x66
#define RT_SDHCI_PRESET_FOR_SDR25 0x68
#define RT_SDHCI_PRESET_FOR_SDR50 0x6A
#define RT_SDHCI_PRESET_FOR_SDR104 0x6C
#define RT_SDHCI_PRESET_FOR_DDR50 0x6E
#define RT_SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
#define RT_SDHCI_PRESET_DRV_MASK RT_GENMASK(15, 14)
#define BIT(nr) ((1) << (nr))
#define RT_SDHCI_PRESET_CLKGEN_SEL BIT(10)
#define RT_SDHCI_PRESET_SDCLK_FREQ_MASK RT_GENMASK(9, 0)
#define RT_SDHCI_SLOT_INT_STATUS 0xFC
#define RT_SDHCI_HOST_VERSION 0xFE
#define RT_SDHCI_VENDOR_VER_MASK 0xFF00
#define RT_SDHCI_VENDOR_VER_SHIFT 8
#define RT_SDHCI_SPEC_VER_MASK 0x00FF
#define RT_SDHCI_SPEC_VER_SHIFT 0
#define RT_SDHCI_SPEC_100 0
#define RT_SDHCI_SPEC_200 1
#define RT_SDHCI_SPEC_300 2
#define RT_SDHCI_SPEC_400 3
#define RT_SDHCI_SPEC_410 4
#define RT_SDHCI_SPEC_420 5
/*
* End of controller registers.
*/
#define RT_SDHCI_MAX_DIV_SPEC_200 256
#define RT_SDHCI_MAX_DIV_SPEC_300 2046
/*
* Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
*/
#define RT_SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
#define ilog2(v) __rt_ffs(v)
#define RT_SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(RT_SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
#define RT_SDHCI_MAX_SEGS 128
/* Allow for a command request and a data request at the same time */
#define RT_SDHCI_MAX_MRQS 2
#define MMC_CMD_TRANSFER_TIME (10 * 1000000L) /* max 10 ms */
enum rt_sdhci_cookie
{
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */
};
struct rt_sdhci_host
{
const char *hw_name; /* Hardware bus name */
unsigned int quirks; /* Deviations from spec. */
void *data_buf;
/* Controller doesn't honor resets unless we touch the clock register */
#define RT_SDHCI_QUIRK_CLOCK_BEFORE_RESET (1 << 0)
/* Controller has bad caps bits, but really supports DMA */
#define RT_SDHCI_QUIRK_FORCE_DMA (1 << 1)
/* Controller doesn't like to be reset when there is no card inserted. */
#define RT_SDHCI_QUIRK_NO_CARD_NO_RESET (1 << 2)
/* Controller doesn't like clearing the power reg before a change */
#define RT_SDHCI_QUIRK_SINGLE_POWER_WRITE (1 << 3)
/* Controller has an unusable DMA engine */
#define RT_SDHCI_QUIRK_BROKEN_DMA (1 << 5)
/* Controller has an unusable ADMA engine */
#define RT_SDHCI_QUIRK_BROKEN_ADMA (1 << 6)
/* Controller can only DMA from 32-bit aligned addresses */
#define RT_SDHCI_QUIRK_32BIT_DMA_ADDR (1 << 7)
/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
#define RT_SDHCI_QUIRK_32BIT_DMA_SIZE (1 << 8)
/* Controller can only ADMA chunks that are a multiple of 32 bits */
#define RT_SDHCI_QUIRK_32BIT_ADMA_SIZE (1 << 9)
/* Controller needs to be reset after each request to stay stable */
#define RT_SDHCI_QUIRK_RESET_AFTER_REQUEST (1 << 10)
/* Controller needs voltage and power writes to happen separately */
#define RT_SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1 << 11)
/* Controller provides an incorrect timeout value for transfers */
#define RT_SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1 << 12)
/* Controller has an issue with buffer bits for small transfers */
#define RT_SDHCI_QUIRK_BROKEN_SMALL_PIO (1 << 13)
/* Controller does not provide transfer-complete interrupt when not busy */
#define RT_SDHCI_QUIRK_NO_BUSY_IRQ (1 << 14)
/* Controller has unreliable card detection */
#define RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION (1 << 15)
/* Controller reports inverted write-protect state */
#define RT_SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1 << 16)
/* Controller has unusable command queue engine */
#define RT_SDHCI_QUIRK_BROKEN_CQE (1 << 17)
/* Controller does not like fast PIO transfers */
#define RT_SDHCI_QUIRK_PIO_NEEDS_DELAY (1 << 18)
/* Controller does not have a LED */
#define RT_SDHCI_QUIRK_NO_LED (1 << 19)
/* Controller has to be forced to use block size of 2048 bytes */
#define RT_SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1 << 20)
/* Controller cannot do multi-block transfers */
#define RT_SDHCI_QUIRK_NO_MULTIBLOCK (1 << 21)
/* Controller can only handle 1-bit data transfers */
#define RT_SDHCI_QUIRK_FORCE_1_BIT_DATA (1 << 22)
/* Controller needs 10ms delay between applying power and clock */
#define RT_SDHCI_QUIRK_DELAY_AFTER_POWER (1 << 23)
/* Controller uses SDCLK instead of TMCLK for data timeouts */
#define RT_SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1 << 24)
/* Controller reports wrong base clock capability */
#define RT_SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1 << 25)
/* Controller cannot support End Attribute in NOP ADMA descriptor */
#define RT_SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1 << 26)
/* Controller uses Auto CMD12 command to stop the transfer */
#define RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1 << 28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define RT_SDHCI_QUIRK_NO_HISPD_BIT (1 << 29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
#define RT_SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1 << 30)
/* The read-only detection via RT_SDHCI_PRESENT_STATE register is unstable */
#define RT_SDHCI_QUIRK_UNSTABLE_RO_DETECT (1 << 31)
unsigned int quirks2; /* More deviations from spec. */
#define RT_SDHCI_QUIRK2_HOST_OFF_CARD_ON (1 << 0)
#define RT_SDHCI_QUIRK2_HOST_NO_CMD23 (1 << 1)
/* The system physically doesn't support 1.8v, even if the host does */
#define RT_SDHCI_QUIRK2_NO_1_8_V (1 << 2)
#define RT_SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1 << 3)
#define RT_SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1 << 4)
/* Controller has a non-standard host control register */
#define RT_SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1 << 5)
/* Controller does not support HS200 */
#define RT_SDHCI_QUIRK2_BROKEN_HS200 (1 << 6)
/* Controller does not support DDR50 */
#define RT_SDHCI_QUIRK2_BROKEN_DDR50 (1 << 7)
/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
#define RT_SDHCI_QUIRK2_STOP_WITH_TC (1 << 8)
/* Controller does not support 64-bit DMA */
#define RT_SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1 << 9)
/* need clear transfer mode register before send cmd */
#define RT_SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1 << 10)
/* Capability register bit-63 indicates HS400 support */
#define RT_SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1 << 11)
/* forced tuned clock */
#define RT_SDHCI_QUIRK2_TUNING_WORK_AROUND (1 << 12)
/* disable the block count for single block transactions */
#define RT_SDHCI_QUIRK2_SUPPORT_SINGLE (1 << 13)
/* Controller broken with using ACMD23 */
#define RT_SDHCI_QUIRK2_ACMD23_BROKEN (1 << 14)
/* Broken Clock divider zero in controller */
#define RT_SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1 << 15)
/* Controller has CRC in 136 bit Command Response */
#define RT_SDHCI_QUIRK2_RSP_136_HAS_CRC (1 << 16)
#define RT_SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1 << 17)
#define RT_SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1 << 18)
/* Issue CMD and DATA reset together */
#define RT_SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER (1 << 19)
int irq; /* Device IRQ */
void *ioaddr; /* Mapped address */
char *bounce_buffer; /* For packing SDMA reads/writes */
rt_uint64_t bounce_addr;
unsigned int bounce_buffer_size;
const struct rt_sdhci_ops *ops; /* Low level hw interface */
/* Internal data */
struct rt_mmc_host *mmc; /* MMC structure */
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
rt_uint64_t dma_mask; /* custom DMA mask */
rt_spinlock_t lock;
int flags; /* Host attributes */
#define RT_SDHCI_USE_SDMA (1 << 0) /* Host is SDMA capable */
#define RT_SDHCI_USE_ADMA (1 << 1) /* Host is ADMA capable */
#define RT_SDHCI_REQ_USE_DMA (1 << 2) /* Use DMA for this req. */
#define RT_SDHCI_DEVICE_DEAD (1 << 3) /* Device unresponsive */
#define RT_SDHCI_SDR50_NEEDS_TUNING (1 << 4) /* SDR50 needs tuning */
#define RT_SDHCI_AUTO_CMD12 (1 << 6) /* Auto CMD12 support */
#define RT_SDHCI_AUTO_CMD23 (1 << 7) /* Auto CMD23 support */
#define RT_SDHCI_PV_ENABLED (1 << 8) /* Preset value enabled */
#define RT_SDHCI_USE_64_BIT_DMA (1 << 12) /* Use 64-bit DMA */
#define RT_SDHCI_HS400_TUNING (1 << 13) /* Tuning for HS400 */
#define RT_SDHCI_SIGNALING_330 (1 << 14) /* Host is capable of 3.3V signaling */
#define RT_SDHCI_SIGNALING_180 (1 << 15) /* Host is capable of 1.8V signaling */
#define RT_SDHCI_SIGNALING_120 (1 << 16) /* Host is capable of 1.2V signaling */
unsigned int version; /* RT_SDHCI spec. version */
unsigned int max_clk; /* Max possible freq (MHz) */
unsigned int timeout_clk; /* Timeout freq (KHz) */
rt_uint8_t max_timeout_count; /* Vendor specific max timeout count */
unsigned int clk_mul; /* Clock Muliplier value */
unsigned int clock; /* Current clock (MHz) */
rt_uint8_t pwr; /* Current voltage */
rt_uint8_t drv_type; /* Current UHS-I driver type */
rt_bool_t reinit_uhs; /* Force UHS-related re-initialization */
rt_bool_t runtime_suspended; /* Host is runtime suspended */
rt_bool_t bus_on; /* Bus power prevents runtime suspend */
rt_bool_t preset_enabled; /* Preset is enabled */
rt_bool_t pending_reset; /* Cmd/data reset is pending */
rt_bool_t irq_wake_enabled; /* IRQ wakeup is enabled */
rt_bool_t v4_mode; /* Host Version 4 Enable */
rt_bool_t always_defer_done; /* Always defer to complete requests */
struct rt_mmcsd_req *mrqs_done[RT_SDHCI_MAX_MRQS]; /* Requests done */
struct rt_mmcsd_cmd *cmd; /* Current command */
struct rt_mmcsd_cmd *data_cmd; /* Current data command */
struct rt_mmcsd_cmd *deferred_cmd; /* Deferred command */
struct rt_mmcsd_data *data; /* Current data request */
unsigned int data_early : 1; /* Data finished before cmd */
unsigned int blocks; /* remaining PIO blocks */
size_t align_buffer_sz; /* Bounce buffer size */
rt_uint64_t align_addr; /* Mapped bounce buffer */
struct rt_workqueue *complete_wq; /* Request completion wq */
struct rt_work complete_work; /* Request completion work */
struct rt_workqueue *irq_wq;
struct rt_work irq_work;
struct rt_timer timer; /* Timer for timeouts */
struct rt_timer data_timer; /* Timer for data timeouts */
rt_uint32_t caps; /* CAPABILITY_0 */
rt_uint32_t caps1; /* CAPABILITY_1 */
rt_bool_t read_caps; /* Capability flags have been read */
rt_bool_t sdhci_core_to_disable_vqmmc; /* sdhci core can disable vqmmc */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
unsigned int ocr_avail_mmc;
rt_uint32_t ocr_mask; /* available voltages */
unsigned timing; /* Current timing */
rt_uint32_t thread_isr;
/* cached registers */
rt_uint32_t ier;
rt_bool_t cqe_on; /* CQE is operating */
rt_uint32_t cqe_ier; /* CQE interrupt mask */
rt_uint32_t cqe_err_ier; /* CQE error interrupt mask */
rt_wqueue_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
unsigned int tuning_err; /* Error code for re-tuning */
#define RT_SDHCI_TUNING_MODE_1 0
#define RT_SDHCI_TUNING_MODE_2 1
#define RT_SDHCI_TUNING_MODE_3 2
/* Delay (ms) between tuning commands */
int tuning_delay;
int tuning_loop_count;
/* Host SDMA buffer boundary. */
rt_uint32_t sdma_boundary;
rt_uint64_t data_timeout;
unsigned long private[];
};
static inline rt_uint8_t u8_read(const volatile void *addr)
{
return *(const volatile rt_uint8_t *)addr;
}
static inline rt_uint16_t u16_read(const volatile void *addr)
{
return *(const volatile rt_uint16_t *)addr;
}
static inline rt_uint32_t u32_read(const volatile void *addr)
{
return *(const volatile rt_uint32_t *)addr;
}
static inline void u8_write(rt_uint8_t value, volatile void *addr)
{
*(volatile rt_uint8_t *)addr = value;
}
static inline void u16_write(rt_uint16_t value, volatile void *addr)
{
*(volatile rt_uint16_t *)addr = value;
}
static inline void u32_write(rt_uint32_t value, volatile void *addr)
{
*(volatile rt_uint32_t *)addr = value;
}
#define readb(c) u8_read(c)
#define readw(c) u16_read(c)
#define readl(c) u32_read(c)
#define readsb(p, d, l) ({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p, d, l) ({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p, d, l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v, c) u8_write(v, c)
#define writew(v, c) u16_write(v, c)
#define writel(v, c) u32_write(v, c)
#define writesb(p, d, l) ({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p, d, l) ({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p, d, l) ({ __iowmb(); __raw_writesl(p,d,l); })
static inline void rt_sdhci_writel(struct rt_sdhci_host *host, rt_uint32_t val, int reg)
{
writel(val, host->ioaddr + reg);
}
static inline void rt_sdhci_writew(struct rt_sdhci_host *host, rt_uint16_t val, int reg)
{
writew(val, host->ioaddr + reg);
}
static inline void rt_sdhci_writeb(struct rt_sdhci_host *host, rt_uint8_t val, int reg)
{
writeb(val, host->ioaddr + reg);
}
static inline rt_uint32_t rt_sdhci_readl(struct rt_sdhci_host *host, int reg)
{
return readl(host->ioaddr + reg);
}
static inline rt_uint16_t rt_sdhci_readw(struct rt_sdhci_host *host, int reg)
{
return readw(host->ioaddr + reg);
}
static inline rt_uint8_t rt_sdhci_readb(struct rt_sdhci_host *host, int reg)
{
return readb(host->ioaddr + reg);
}
struct rt_sdhci_ops
{
void (*set_clock)(struct rt_sdhci_host *host, unsigned int clock);
void (*set_power)(struct rt_sdhci_host *host, unsigned char mode,
unsigned short vdd);
rt_uint32_t (*irq)(struct rt_sdhci_host *host, rt_uint32_t intmask);
int (*set_dma_mask)(struct rt_sdhci_host *host);
int (*enable_dma)(struct rt_sdhci_host *host);
unsigned int (*get_max_clock)(struct rt_sdhci_host *host);
unsigned int (*get_min_clock)(struct rt_sdhci_host *host);
unsigned int (*get_timeout_clock)(struct rt_sdhci_host *host);
unsigned int (*get_max_timeout_count)(struct rt_sdhci_host *host);
void (*set_timeout)(struct rt_sdhci_host *host,
struct rt_mmcsd_cmd *cmd);
void (*set_bus_width)(struct rt_sdhci_host *host, int width);
unsigned int (*get_ro)(struct rt_sdhci_host *host);
void (*reset)(struct rt_sdhci_host *host, rt_uint8_t mask);
int (*platform_execute_tuning)(struct rt_sdhci_host *host, rt_uint32_t opcode);
void (*set_uhs_signaling)(struct rt_sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct rt_sdhci_host *host);
void (*card_event)(struct rt_sdhci_host *host);
void (*voltage_switch)(struct rt_sdhci_host *host);
void (*request_done)(struct rt_sdhci_host *host,
struct rt_mmcsd_req *mrq);
};
struct rt_sdhci_host *rt_sdhci_alloc_host(struct rt_device *dev, size_t priv_size);
void rt_sdhci_free_host(struct rt_sdhci_host *host);
static inline void *sdhci_priv(struct rt_sdhci_host *host)
{
return host->private;
}
void rt_sdhci_read_caps(struct rt_sdhci_host *host, const rt_uint16_t *ver,
const rt_uint32_t *caps, const rt_uint32_t *caps1);
int rt_sdhci_setup_host(struct rt_sdhci_host *host);
void rt_sdhci_cleanup_host(struct rt_sdhci_host *host);
int rt_sdhci_set_and_add_host(struct rt_sdhci_host *host);
int rt_sdhci_init_host(struct rt_sdhci_host *host);
void rt_sdhci_uninit_host(struct rt_sdhci_host *host, int dead);
rt_uint16_t rt_sdhci_clk_set(struct rt_sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void rt_sdhci_set_clock(struct rt_sdhci_host *host, unsigned int clock);
void rt_sdhci_clk_enable(struct rt_sdhci_host *host, rt_uint16_t clk);
void rt_sdhci_set_power(struct rt_sdhci_host *host, unsigned char mode,unsigned short vdd);
void rt_read_reg(struct rt_sdhci_host* host);
void rt_sdhci_set_power_with_noreg(struct rt_sdhci_host *host, unsigned char mode,
unsigned short vdd);
void rt_sdhci_start_request(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq);
int rt_sdhci_start_request_atomic(struct rt_mmc_host *mmc, struct rt_mmcsd_req *mrq);
void rt_sdhci_set_bus_width(struct rt_sdhci_host *host, int width);
void rt_sdhci_reset(struct rt_sdhci_host *host, rt_uint8_t mask);
void rt_sdhci_set_uhs(struct rt_sdhci_host *host, unsigned timing);
int rt_sdhci_execute_tuning(struct rt_mmc_host *mmc, rt_uint32_t opcode);
int __sdhci_execute_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_ios_set(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios);
int rt_sdhci_start_signal_voltage_switch(struct rt_mmc_host *mmc,
struct rt_mmcsd_io_cfg *ios);
void rt_sdhci_enable_io_irq(struct rt_mmc_host *mmc, int enable);
void rt_sdhci_start_tuning(struct rt_sdhci_host *host);
void rt_sdhci_end_tuning(struct rt_sdhci_host *host);
void rt_sdhci_reset_tuning(struct rt_sdhci_host *host);
void rt_sdhci_send_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_abort_tuning(struct rt_sdhci_host *host, rt_uint32_t opcode);
void rt_sdhci_data_irq_timeout(struct rt_sdhci_host *host, rt_bool_t enable);
void rt_sdhci_timeout_set(struct rt_sdhci_host *host, struct rt_mmcsd_cmd *cmd);
void rt_read_reg_debug(struct rt_sdhci_host* host);
#endif /* __RT_SDHCI_HW_H */

View File

@ -0,0 +1,345 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_MMC_H__
#define __RT_SDHCI_MMC_H__
#include <drivers/dev_mmcsd_core.h>
#include <rtthread.h>
#include <drivers/mmcsd_cmd.h>
#include <drivers/dev_mmcsd_core.h>
#include <drivers/mmcsd_host.h>
#define mmc_dev(x) ((x)->parent)
#define MMC_SEND_TUNING_BLOCK_HS200 SEND_TUNING_BLOCK_HS200
#define MMC_SEND_TUNING_BLOCK SEND_TUNING_BLOCK
#define MMC_STOP_TRANSMISSION STOP_TRANSMISSION
#define MMC_BUS_TEST_R 14 /* adtc R1 */
#define MMC_WRITE_MULTIPLE_BLOCK WRITE_MULTIPLE_BLOCK
#define MMC_READ_MULTIPLE_BLOCK READ_MULTIPLE_BLOCK
#define MMC_TIMING_UHS_DDR50 MMCSD_TIMING_UHS_DDR50
#define MMC_TIMING_UHS_SDR50 MMCSD_TIMING_UHS_SDR50
#define MMC_TIMING_MMC_HS200 MMCSD_TIMING_MMC_HS200
#define MMC_TIMING_MMC_HS400 MMCSD_TIMING_MMC_HS400
#define MMC_TIMING_UHS_SDR104 MMCSD_TIMING_UHS_SDR104
#define MMC_TIMING_UHS_SDR25 MMCSD_TIMING_UHS_SDR25
#define MMC_TIMING_MMC_DDR52 MMCSD_TIMING_MMC_DDR52
#define MMC_TIMING_UHS_SDR12 MMCSD_TIMING_UHS_SDR12
#define MMC_TIMING_SD_HS MMCSD_TIMING_SD_HS
#define MMC_TIMING_MMC_HS MMCSD_TIMING_MMC_HS
#define MMC_POWER_OFF MMCSD_POWER_OFF
#define MMC_POWER_UP MMCSD_POWER_UP
#define MMC_POWER_ON MMCSD_POWER_ON
#define MMC_POWER_UNDEFINED 3
#define MMC_SET_DRIVER_TYPE_B 0
#define MMC_SET_DRIVER_TYPE_A 1
#define MMC_SET_DRIVER_TYPE_C 2
#define MMC_SET_DRIVER_TYPE_D 3
#define MMC_SIGNAL_VOLTAGE_330 0
#define MMC_SIGNAL_VOLTAGE_180 1
#define MMC_SIGNAL_VOLTAGE_120 2
#define MMC_RSP_PRESENT (1 << 16)
#define MMC_RSP_136 (1 << 17) /* 136 bit response */
#define MMC_RSP_CRC (1 << 18) /* expect valid crc */
#define MMC_RSP_BUSY (1 << 19) /* card may send busy */
#define MMC_RSP_OPCODE (1 << 20) /* response contains opcode */
#define MMC_RSP_NONE (0)
#define MMC_RSP_R1 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R1B (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
#define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE)
#define MMC_CMD_ADTC CMD_ADTC
#define MMC_BUS_WIDTH_8 MMCSD_BUS_WIDTH_8
#define MMC_BUS_WIDTH_4 MMCSD_BUS_WIDTH_4
#define MMC_BUS_WIDTH_1 MMCSD_BUS_WIDTH_1
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
enum mmc_blk_status
{
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
MMC_BLK_NEW_REQUEST,
};
#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
struct rt_mmc_host ;
struct mmc_host_ops
{
void (*request)(struct rt_mmc_host *host, struct rt_mmcsd_req *req);
void (*set_ios)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*get_ro)(struct rt_mmc_host *host);
int (*get_cd)(struct rt_mmc_host *host);
void (*enable_sdio_irq)(struct rt_mmc_host *host, int enable);
void (*ack_sdio_irq)(struct rt_mmc_host *host);
int (*start_signal_voltage_switch)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*card_busy)(struct rt_mmc_host *host);
int (*execute_tuning)(struct rt_mmc_host *host, unsigned opcode);
int (*prepare_hs400_tuning)(struct rt_mmc_host *host, struct rt_mmcsd_io_cfg *ios);
int (*hs400_prepare_ddr)(struct rt_mmc_host *host);
void (*hs400_downgrade)(struct rt_mmc_host *host);
void (*hs400_complete)(struct rt_mmc_host *host);
void (*hs400_enhanced_strobe)(struct rt_mmc_host *host,
struct rt_mmcsd_io_cfg* ios);
void (*hw_reset)(struct rt_mmc_host* host);
void (*card_event)(struct rt_mmc_host* host);
};
struct regulator;
struct mmc_pwrseq;
struct mmc_supply
{
struct regulator *vmmc; /* Card power supply */
struct regulator *vqmmc; /* Optional Vccq supply */
};
struct mmc_ctx
{
struct task_struct *task;
};
/* VDD voltage 3.3 ~ 3.4 */
#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
#define MMC_CAP2_HS200_1_8V_SDR MMCSD_SUP_HS200_1V8
#define MMC_CAP_4_BIT_DATA MMCSD_BUSWIDTH_4
#define MMC_CAP_8_BIT_DATA MMCSD_BUSWIDTH_8
#define MMC_CAP2_HS200 MMCSD_SUP_HS200
#define MMC_CAP_MMC_HIGHSPEED MMCSD_SUP_HIGHSPEED
#define MMC_CAP_SD_HIGHSPEED MMCSD_SUP_HIGHSPEED
#define MMC_CAP_1_8V_DDR MMCSD_SUP_DDR_1V8
#define MMC_CAP_3_3V_DDR MMCSD_SUP_DDR_3V3
#define MMC_CAP_1_2V_DDR MMCSD_SUP_DDR_1V2
#define MMC_CAP_NONREMOVABLE MMCSD_SUP_NONREMOVABLE
#define MMC_CAP_UHS_DDR50 0
#define MMC_CAP2_HS400 0
#define MMC_CAP_UHS_SDR50 0
#define MMC_CAP_UHS_SDR25 0
#define MMC_CAP_UHS_SDR12 0
#define MMC_CAP_UHS_SDR104 0
#define MMC_CAP_UHS 0
#define MMC_CAP2_HSX00_1_8V 0
#define MMC_CAP2_HS400_ES 0
#define MMC_CAP_NEEDS_POLL 0
#define MMC_CAP2_HSX00_1_2V 0
#define MMC_CAP2_HS400_1_8V 0
#define MMC_CAP_DRIVER_TYPE_D 0
#define MMC_CAP_DRIVER_TYPE_C 0
#define MMC_SET_DRIVER_TYPE_B 0
#define MMC_CAP_DRIVER_TYPE_A 0
#define MMC_CAP2_SDIO_IRQ_NOTHREAD 0
#define MMC_CAP_CMD23 0
#define MMC_CAP_SDIO_IRQ 0
#define MMC_CAP2_NO_SDIO (1 << 19)
#define MMC_CAP2_NO_SD (1 << 21)
#define MMC_CAP2_NO_MMC (1 << 22)
#define MMC_CAP2_CQE (1 << 23)
#define MMC_VDD_165_195 VDD_165_195
#define MMC_VDD_20_21 VDD_20_21
#define MMC_VDD_29_30 VDD_29_30
#define MMC_VDD_30_31 VDD_30_31
#define MMC_VDD_32_33 VDD_32_33
#define MMC_VDD_33_34 VDD_33_34
struct rt_mmc_host
{
struct rt_mmcsd_host rthost;
struct rt_device *parent;
int index;
const struct mmc_host_ops *ops;
unsigned int f_min;
unsigned int f_max;
unsigned int f_init;
rt_uint32_t ocr_avail;
rt_uint32_t ocr_avail_sdio; /* SDIO-specific OCR */
rt_uint32_t ocr_avail_sd; /* SD-specific OCR */
rt_uint32_t ocr_avail_mmc; /* MMC-specific OCR */
struct wakeup_source *ws; /* Enable consume of uevents */
rt_uint32_t max_current_330;
rt_uint32_t max_current_300;
rt_uint32_t max_current_180;
rt_uint32_t caps; /* Host capabilities */
rt_uint32_t caps2; /* More host capabilities */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_segs; /* see blk_queue_max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req */
unsigned int max_busy_timeout; /* max busy timeout in ms */
struct rt_mmcsd_io_cfg ios; /* current io bus settings */
unsigned int retune_period;
/* group bitfields together to minimize padding */
unsigned int use_spi_crc : 1;
unsigned int claimed : 1; /* host exclusively claimed */
unsigned int doing_init_tune : 1; /* initial tuning in progress */
unsigned int can_retune : 1; /* re-tuning can be used */
unsigned int doing_retune : 1; /* re-tuning in progress */
unsigned int retune_now : 1; /* do re-tuning at next req */
unsigned int retune_paused : 1; /* re-tuning is temporarily disabled */
unsigned int retune_crc_disable : 1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge : 1; /* merging can be used */
unsigned int vqmmc_enabled : 1; /* vqmmc regulator is enabled */
int need_retune; /* re-tuning is needed */
int hold_retune; /* hold off re-tuning */
rt_bool_t trigger_card_event; /* card_event necessary */
unsigned int sdio_irqs;
rt_bool_t sdio_irq_pending;
struct led_trigger *led; /* activity led */
struct mmc_supply supply;
/* Ongoing data transfer that allows commands during transfer */
struct rt_mmcsd_req *ongoing_mrq;
unsigned int actual_clock; /* Actual HC clock rate */
rt_uint32_t pm_caps;
unsigned long private[];
};
static inline int mmc_card_is_removable(struct rt_mmc_host *host)
{
return !(host->caps & MMC_CAP_NONREMOVABLE);
}
struct device_node;
struct rt_mmc_host *rt_mmc_alloc_host(int extra, struct rt_device *);
int rt_mmc_add_host(struct rt_mmc_host *);
void rt_mmc_remove_host(struct rt_mmc_host *);
void rt_mmc_free_host(struct rt_mmc_host *);
int rt_mmc_of_parse(struct rt_mmc_host *host);
int rt_mmc_of_parse_voltage(struct rt_mmc_host *host, rt_uint32_t *mask);
static inline void *mmc_priv(struct rt_mmc_host *host)
{
return (void *)host->private;
}
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (x->parent->parent.name)
void rt_mmc_detect_change(struct rt_mmc_host *, unsigned long delay);
void rt_mmc_request_done(struct rt_mmc_host *, struct rt_mmcsd_req *);
void mmc_command_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq);
void mmc_cqe_request_done(struct rt_mmc_host *host, struct rt_mmcsd_req *mrq);
static inline rt_bool_t sdio_irq_claimed(struct rt_mmc_host *host)
{
return host->sdio_irqs > 0;
}
static inline int mmc_regulator_set_ocr(struct rt_mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
return 0;
}
int mmc_regulator_get_supply(struct rt_mmc_host *mmc);
int mmc_regulator_enable_vqmmc(struct rt_mmc_host *mmc);
void mmc_regulator_disable_vqmmc(struct rt_mmc_host *mmc);
void mmc_retune_timer_stop(struct rt_mmc_host* host);
enum dma_data_direction
{
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
static inline void mmc_retune_needed(struct rt_mmc_host *host)
{
if (host->can_retune)
host->need_retune = 1;
}
static inline rt_bool_t mmc_can_retune(struct rt_mmc_host *host)
{
return host->can_retune == 1;
}
static inline rt_bool_t mmc_doing_retune(struct rt_mmc_host *host)
{
return host->doing_retune == 1;
}
static inline rt_bool_t mmc_doing_tune(struct rt_mmc_host *host)
{
return host->doing_retune == 1 || host->doing_init_tune == 1;
}
static inline int mmc_get_dma_dir(struct rt_mmcsd_data *data)
{
return data->flags & DATA_DIR_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
static inline rt_bool_t mmc_op_multi(rt_uint32_t opcode)
{
return opcode == MMC_WRITE_MULTIPLE_BLOCK || opcode == MMC_READ_MULTIPLE_BLOCK;
}
static inline rt_bool_t mmc_op_tuning(rt_uint32_t opcode)
{
return opcode == MMC_SEND_TUNING_BLOCK || opcode == MMC_SEND_TUNING_BLOCK_HS200;
}
int rt_mmc_gpio_get_cd(struct rt_mmc_host *host);
void rt_mmc_detect_change(struct rt_mmc_host *host, unsigned long delay);
int rt_mmc_regulator_set_vqmmc(struct rt_mmc_host *mmc, struct rt_mmcsd_io_cfg *ios);
rt_bool_t rt_mmc_can_gpio_ro(struct rt_mmc_host *host);
int rt_mmc_gpio_get_ro(struct rt_mmc_host *host);
int rt_mmc_send_abort_tuning(struct rt_mmc_host *host, rt_uint32_t opcode);
int rt_mmc_of_parse(struct rt_mmc_host *host);
#endif

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#ifndef __RT_SDHCI_MISC_H__
#define __RT_SDHCI_MISC_H__
#define __BF_FIELD_CHECK(...)
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
#define FIELD_GET(_mask, _reg) \
({ \
__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
#define FIELD_PREP(_mask, _val) \
({ \
__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define min_t(type, x, y) (((type)x < (type)y) ? x : y)
#define max_t(type, x, y) (((type)x > (type)y) ? x : y)
#define min(x, y) ((x) < (y) ? (x) : (y))
#define from_timer(var, callback_timer, timer_fieldname) \
container_of(callback_timer, typeof(*var), timer_fieldname)
#define le32_to_cpu(x) (x)
#define le16_to_cpu(x) (x)
#define cpu_to_le16(x) (x)
#define cpu_to_le32(x) (x)
#define lower_32_bits(n) ((rt_uint32_t)((n) & 0xffffffff))
#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define do_div(n, base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#define fallthrough \
do { \
} while (0)
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV);
rt_bool_t rt_mmc_can_gpio_cd(struct rt_mmc_host *host);
struct regulator
{
const char *supply_name;
};
int regulator_get_current_limit(struct regulator *regulator);
#endif

View File

@ -0,0 +1,125 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-08-16 zhujiale first version
*/
#include "sdhci-platform.h"
static const struct rt_sdhci_ops sdhci_pltfm_ops = {
.set_clock = rt_sdhci_set_clock,
.set_bus_width = rt_sdhci_set_bus_width,
.reset = rt_sdhci_reset,
.set_uhs_signaling = rt_sdhci_set_uhs,
};
void rt_sdhci_get_property(struct rt_platform_device *pdev)
{
struct rt_device *dev = &pdev->parent;
struct rt_sdhci_host *host = pdev->priv;
struct rt_sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
rt_uint32_t bus_width;
if (rt_dm_dev_prop_read_bool(dev, "sdhci,auto-cmd12"))
host->quirks |= RT_SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
if (rt_dm_dev_prop_read_bool(dev, "sdhci,1-bit-only") || (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) == 0 && bus_width == 1))
host->quirks |= RT_SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (rt_dm_dev_prop_read_bool(dev, "broken-cd"))
host->quirks |= RT_SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (rt_dm_dev_prop_read_bool(dev, "no-1-8-v"))
host->quirks2 |= RT_SDHCI_QUIRK2_NO_1_8_V;
rt_dm_dev_prop_read_u32(dev, "clock-frequency", &pltfm_host->clock);
if (rt_dm_dev_prop_read_bool(dev, "keep-power-in-suspend"))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
if (rt_dm_dev_prop_read_bool(dev, "wakeup-source") || rt_dm_dev_prop_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
}
struct rt_sdhci_host *rt_sdhci_pltfm_init(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct rt_sdhci_host *host;
struct rt_device *dev = &pdev->parent;
void *ioaddr;
int irq;
ioaddr = rt_dm_dev_iomap(dev, 0);
if (!ioaddr)
{
return RT_NULL;
}
irq = rt_dm_dev_get_irq(dev, 0);
if (irq < 0)
{
return RT_NULL;
}
host = rt_sdhci_alloc_host(dev,sizeof(struct rt_sdhci_pltfm_host) + priv_size);
if (!host)
{
return RT_NULL;
}
host->irq = irq;
host->ioaddr = ioaddr;
host->hw_name = rt_dm_dev_get_name(dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
else
host->ops = &sdhci_pltfm_ops;
if (pdata)
{
host->quirks = pdata->quirks;
host->quirks2 = pdata->quirks2;
}
pdev->priv = host;
return host;
}
int rt_sdhci_pltfm_init_and_add_host(struct rt_platform_device *pdev,
const struct rt_sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct rt_sdhci_host *host;
int ret = 0;
host = rt_sdhci_pltfm_init(pdev, pdata, priv_size);
if (!host)
return -RT_ERROR;
rt_sdhci_get_property(pdev);
ret = rt_sdhci_init_host(host);
if (ret)
rt_sdhci_pltfm_free(pdev);
return ret;
}
void rt_sdhci_pltfm_free(struct rt_platform_device *pdev)
{
struct rt_sdhci_host *host = pdev->priv;
rt_sdhci_free_host(host);
}
void rt_sdhci_pltfm_remove(struct rt_platform_device *pdev)
{
struct rt_sdhci_host *host = pdev->priv;
int dead = (readl(host->ioaddr + RT_SDHCI_INT_STATUS) == 0xffffffff);
rt_sdhci_uninit_host(host, dead);
rt_sdhci_pltfm_free(pdev);
}

File diff suppressed because it is too large Load Diff

View File

@ -21,4 +21,7 @@ menuconfig RT_USING_SERIAL
int "Set RX buffer size"
depends on !RT_USING_SERIAL_V2
default 64
endif
config RT_USING_SERIAL_BYPASS
bool "Using serial bypass"
default n
endif

View File

@ -16,6 +16,9 @@ if GetDepend(['RT_USING_SERIAL_V2']):
else:
src += ['dev_serial.c']
if GetDepend(['RT_USING_SERIAL_BYPASS']):
src += ['bypass.c']
if GetDepend(['RT_USING_DM']):
src += ['serial_dm.c']

View File

@ -0,0 +1,355 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#include<rtdevice.h>
#define DBG_TAG "UART"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
static struct rt_serial_bypass_func* rt_bypass_alloc_func(const char* name, rt_uint8_t level, bypass_function_t func, void* data)
{
struct rt_serial_bypass_func* bypass;
if (!func)
return RT_NULL;
bypass = rt_malloc(sizeof(struct rt_serial_bypass_func));
rt_memset(bypass, 0, sizeof(struct rt_serial_bypass_func));
if (rt_strlen(name) > RT_NAME_MAX - 1)
rt_memcpy(bypass->name, name, RT_NAME_MAX);
else
rt_memcpy(bypass->name, name, rt_strlen(name) + 1);
bypass->level = level;
rt_list_init(&bypass->node);
bypass->bypass = func;
bypass->data = data;
return bypass;
}
rt_err_t rt_serial_bypass_init(struct rt_serial_device* serial)
{
serial->bypass = rt_malloc(sizeof(struct rt_serial_bypass));
rt_memset(serial->bypass, 0, sizeof(struct rt_serial_bypass));
serial->bypass->pipe = rt_ringbuffer_create(serial->config.bufsz);
serial->bypass->mutex = rt_mutex_create("serial_bypass", RT_IPC_FLAG_FIFO);
return RT_EOK;
}
static rt_err_t rt_bypass_register(struct rt_serial_bypass_head* bypass, const char* name, rt_uint8_t level, bypass_function_t func, void* data)
{
struct rt_serial_bypass_func* pass = RT_NULL;
struct rt_list_node* node;
int flags;
RT_DEBUG_NOT_IN_INTERRUPT;
pass = rt_bypass_alloc_func(name, level, func, data);
RT_ASSERT(bypass != RT_NULL);
node = bypass->head.next;
if (node == &bypass->head)
{
rt_list_insert_before(&pass->node, node);
return RT_EOK;
}
flags = rt_spin_lock_irqsave(&(bypass->spinlock));
do {
struct rt_serial_bypass_func* temp_curr;
temp_curr = rt_container_of(node, struct rt_serial_bypass_func, node);
if (level < temp_curr->level)
{
rt_list_insert_before(node, &pass->node);
rt_spin_unlock_irqrestore(&(bypass->spinlock), flags);
return RT_EOK;
}
else if (level == temp_curr->level)
{
rt_spin_unlock_irqrestore(&(bypass->spinlock), flags);
LOG_E("Conflict: bypass [%s] level conflicts with [%s] at level [%d]\n", name, temp_curr->name, level);
rt_free(pass);
return -RT_ERROR;
}
node = node->next;
} while (node != &bypass->head);
rt_list_insert_before(&bypass->head, &pass->node);
rt_spin_unlock_irqrestore(&(bypass->spinlock), flags);
return RT_EOK;
}
rt_err_t rt_bypass_upper_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data)
{
if (!serial->bypass)
rt_serial_bypass_init(serial);
if (!serial->bypass->upper_h)
{
serial->bypass->upper_h = rt_malloc(sizeof(struct rt_serial_bypass_head));
rt_spin_lock_init(&serial->bypass->upper_h->spinlock);
rt_list_init(&serial->bypass->upper_h->head);
}
return rt_bypass_register(serial->bypass->upper_h, name, level, func, data);
}
void rt_bypass_putchar(struct rt_serial_device* serial, rt_uint8_t ch)
{
rt_mutex_take(serial->bypass->mutex, RT_WAITING_FOREVER);
rt_ringbuffer_putchar(serial->bypass->pipe, ch);
rt_mutex_release(serial->bypass->mutex);
}
rt_size_t rt_bypass_getchar(struct rt_serial_device* serial, rt_uint8_t* ch)
{
int flags;
rt_mutex_take(serial->bypass->mutex, RT_WAITING_FOREVER);
flags = rt_ringbuffer_getchar(serial->bypass->pipe, ch);
rt_mutex_release(serial->bypass->mutex);
return flags;
}
static inline rt_err_t _bypass_getchar_form_serial_fifo(struct rt_serial_device* serial, char* ch)
{
rt_base_t level;
struct rt_serial_rx_fifo* rx_fifo;
rx_fifo = (struct rt_serial_rx_fifo*)serial->serial_rx;
/* disable interrupt */
level = rt_spin_lock_irqsave(&(serial->spinlock));
/* there's no data: */
if ((rx_fifo->get_index == rx_fifo->put_index) && (rx_fifo->is_full == RT_FALSE))
{
/* no data, enable interrupt and break out */
rt_spin_unlock_irqrestore(&(serial->spinlock), level);
return -RT_EEMPTY;
}
/* otherwise there's the data: */
*ch = rx_fifo->buffer[rx_fifo->get_index];
rx_fifo->get_index += 1;
if (rx_fifo->get_index >= serial->config.bufsz) rx_fifo->get_index = 0;
if (rx_fifo->is_full == RT_TRUE)
{
rx_fifo->is_full = RT_FALSE;
}
/* enable interrupt */
rt_spin_unlock_irqrestore(&(serial->spinlock), level);
return RT_EOK;
}
static void _lower_work(struct rt_serial_device* serial)
{
struct rt_list_node* node;
struct rt_serial_bypass_func* temp_curr = RT_NULL;
if (serial->bypass && serial->bypass->lower_h)
{
while (1)
{
char ch;
if (_bypass_getchar_form_serial_fifo(serial, &ch))
return;
node = serial->bypass->lower_h->head.next;
while (node != &serial->bypass->lower_h->head)
{
temp_curr = rt_container_of(node, struct rt_serial_bypass_func, node);
if (!temp_curr->bypass(serial, ch, temp_curr->data))
{
break;
}
node = node->next;
}
if (node == &serial->bypass->lower_h->head)
{
rt_bypass_putchar(serial, ch);
}
}
}
}
static void rt_lower_work(struct rt_work* work, void* work_data)
{
struct rt_serial_device* serial = (struct rt_serial_device*)work_data;
RT_ASSERT(serial != RT_NULL);
_lower_work(serial);
}
rt_err_t rt_bypass_lower_register(struct rt_serial_device* serial, const char* name, rt_uint8_t level, bypass_function_t func, void* data)
{
if (!serial->bypass)
rt_serial_bypass_init(serial);
if (!serial->bypass->lower_h)
{
serial->bypass->lower_workq = rt_workqueue_create("serial bypass", RT_SYSTEM_WORKQUEUE_STACKSIZE,
RT_SYSTEM_WORKQUEUE_PRIORITY);
rt_work_init(&serial->bypass->work, rt_lower_work, (void*)serial);
serial->bypass->lower_h = rt_malloc(sizeof(struct rt_serial_bypass_head));
rt_spin_lock_init(&serial->bypass->lower_h->spinlock);
rt_list_init(&serial->bypass->lower_h->head);
}
return rt_bypass_register(serial->bypass->lower_h, name, level, func, data);
}
void rt_bypass_work_straight(struct rt_serial_device* serial)
{
if (serial->bypass && serial->bypass->lower_h)
{
_lower_work(serial);
return;
}
while (1)
{
char ch;
if (_bypass_getchar_form_serial_fifo(serial, &ch))
return;
rt_bypass_putchar(serial, ch);
}
}
rt_err_t rt_bypass_unregister(struct rt_serial_bypass_head* bypass, rt_uint8_t level)
{
struct rt_list_node* node;
struct rt_serial_bypass_func* temp_curr = RT_NULL;
int flags;
/*Can not unregister protect level in bypass it general be msh or tty*/
if (level > RT_BYPASS_PROTECT_LEVEL_1)
return -RT_ERROR;
if (!bypass)
return -RT_ERROR;
node = bypass->head.next;
flags = rt_spin_lock_irqsave(&(bypass->spinlock));
do {
temp_curr = rt_container_of(node, struct rt_serial_bypass_func, node);
if (level == temp_curr->level)
{
rt_list_remove(node);
rt_spin_unlock_irqrestore(&(bypass->spinlock), flags);
rt_free(temp_curr);
return RT_EOK;
}
node = node->next;
} while (node != &bypass->head);
LOG_E("Can't find bypass with level [%d]", level);
rt_spin_unlock_irqrestore(&(bypass->spinlock), flags);
return -RT_ERROR;
}
rt_err_t rt_bypass_upper_unregister(struct rt_serial_device* serial, rt_uint8_t level)
{
if (!serial->bypass || !serial->bypass->upper_h)
return -RT_ERROR;
return rt_bypass_unregister(serial->bypass->upper_h, level);
}
rt_err_t rt_bypass_lower_unregister(struct rt_serial_device* serial, rt_uint8_t level)
{
if (!serial->bypass || !serial->bypass->lower_h)
return -RT_ERROR;
return rt_bypass_unregister(serial->bypass->lower_h, level);
}
int serial_bypass_list(int argc, char** argv)
{
struct rt_serial_device* serial = RT_NULL;
struct rt_serial_bypass_func* current;
struct rt_list_node* node;
int flags;
serial = (struct rt_serial_device*)rt_console_get_device();
if (!serial || !serial->bypass)
{
rt_kprintf("Serial bypass not initialized.\n");
return -1;
}
/* 遍历 Upper Bypass 链表 */
if (serial->bypass->upper_h)
{
rt_kprintf("Upper bypass chain:\n");
node = serial->bypass->upper_h->head.next;
flags = rt_spin_lock_irqsave(&(serial->bypass->upper_h->spinlock)); /* 加锁*/
while (node != &serial->bypass->upper_h->head)
{
current = rt_container_of(node, struct rt_serial_bypass_func, node);
rt_kprintf(" - Name: [%s], Level: [%d]\n", current->name, current->level);
node = node->next;
}
rt_spin_unlock_irqrestore(&(serial->bypass->upper_h->spinlock), flags); /* 解锁*/
}
else
{
rt_kprintf("Upper bypass chain is empty.\n");
}
/* 遍历 Lower Bypass 链表 */
if (serial->bypass->lower_h)
{
rt_kprintf("Lower bypass chain:\n");
node = serial->bypass->lower_h->head.next;
flags = rt_spin_lock_irqsave(&(serial->bypass->lower_h->spinlock)); /* 加锁*/
while (node != &serial->bypass->lower_h->head)
{
current = rt_container_of(node, struct rt_serial_bypass_func, node);
rt_kprintf(" - Name: [%s], Level: [%d]\n", current->name, current->level);
node = node->next;
}
rt_spin_unlock_irqrestore(&(serial->bypass->lower_h->spinlock), flags); /* 解锁*/
}
else
{
rt_kprintf("Lower bypass chain is empty.\n");
}
return 0;
}
MSH_CMD_EXPORT(serial_bypass_list, serial bypass list)

View File

@ -27,6 +27,7 @@
* 2020-12-14 Meco Man implement function of setting window's size(TIOCSWINSZ)
* 2021-08-22 Meco Man implement function of getting window's size(TIOCGWINSZ)
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
* 2024-11-25 zhujiale add bypass mode
*/
#include <rthw.h>
@ -303,6 +304,23 @@ rt_inline int _serial_int_rx(struct rt_serial_device *serial, rt_uint8_t *data,
rx_fifo = (struct rt_serial_rx_fifo*) serial->serial_rx;
RT_ASSERT(rx_fifo != RT_NULL);
#ifdef RT_USING_SERIAL_BYPASS
if (serial->bypass)
{
rt_bypass_work_straight(serial);
while (length)
{
rt_uint8_t ch;
if (!rt_bypass_getchar(serial, &ch))
break;
*data = ch & 0xff;
data++; length--;
}
return size - length;
}
#endif
/* read from software FIFO */
while (length)
{
@ -1413,10 +1431,31 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
ch = serial->ops->getc(serial);
if (ch == -1) break;
/* disable interrupt */
level = rt_spin_lock_irqsave(&(serial->spinlock));
#ifdef RT_USING_SERIAL_BYPASS
if (serial->bypass && serial->bypass->upper_h && (serial->bypass->upper_h->head.next != &serial->bypass->upper_h->head))
{
rt_bool_t skip = RT_FALSE;
char buf = (char)ch;
int ret;
rt_list_t* node = serial->bypass->upper_h->head.next;
do {
struct rt_serial_bypass_func* bypass_run = rt_container_of(node, struct rt_serial_bypass_func, node);
ret = bypass_run->bypass(serial, buf, bypass_run->data);
if (!ret)
{
skip = RT_TRUE;
break;
}
node = node->next;
} while (node != &serial->bypass->upper_h->head);
if (skip)
continue;
}
#endif
level = rt_spin_lock_irqsave(&(serial->spinlock));
rx_fifo->buffer[rx_fifo->put_index] = ch;
rx_fifo->put_index += 1;
if (rx_fifo->put_index >= serial->config.bufsz) rx_fifo->put_index = 0;
@ -1435,17 +1474,12 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
rt_spin_unlock_irqrestore(&(serial->spinlock), level);
}
/**
* Invoke callback.
* First try notify if any, and if notify is existed, rx_indicate()
* is not callback. This separate the priority and makes the reuse
* of same serial device reasonable for RT console.
*/
if (serial->rx_notify.notify)
{
serial->rx_notify.notify(serial->rx_notify.dev);
}
else if (serial->parent.rx_indicate != RT_NULL)
#ifdef RT_USING_SERIAL_BYPASS
if (serial->bypass && serial->bypass->lower_h)
rt_workqueue_dowork(serial->bypass->lower_workq, &serial->bypass->work);
#endif
if (serial->parent.rx_indicate != RT_NULL)
{
rt_size_t rx_length;

View File

@ -114,59 +114,24 @@ static void _setup_debug_rxind_hook(void)
#endif /* LWP_DEBUG_INIT */
static void _tty_rx_notify(struct rt_device *device)
static rt_err_t _serial_ty_bypass(struct rt_serial_device* serial, char ch,void *data)
{
lwp_tty_t tp;
struct serial_tty_context *softc;
tp = rt_container_of(device, struct lwp_tty, parent);
RT_ASSERT(tp);
softc = tty_softc(tp);
if (_ttyworkq)
rt_workqueue_submit_work(_ttyworkq, &softc->work, 0);
}
static void _tty_rx_worker(struct rt_work *work, void *data)
{
char input;
rt_ssize_t readbytes;
lwp_tty_t tp = data;
struct serial_tty_context *softc;
struct rt_serial_device *serial;
tp = (lwp_tty_t)data;
tty_lock(tp);
while (1)
{
softc = tty_softc(tp);
serial = softc->parent;
readbytes = rt_device_read(&serial->parent, -1, &input, 1);
if (readbytes != 1)
{
break;
}
ttydisc_rint(tp, input, 0);
}
ttydisc_rint(tp, ch, 0);
ttydisc_rint_done(tp);
tty_unlock(tp);
return RT_EOK;
}
rt_inline void _setup_serial(struct rt_serial_device *serial, lwp_tty_t tp,
rt_inline void _setup_serial(struct rt_serial_device* serial, lwp_tty_t tp,
struct serial_tty_context *softc)
{
struct rt_device_notify notify;
softc->backup_notify = serial->rx_notify;
notify.dev = &tp->parent;
notify.notify = _tty_rx_notify;
rt_device_init(&serial->parent);
rt_device_control(&serial->parent, RT_DEVICE_CTRL_NOTIFY_SET, &notify);
rt_bypass_lower_register(serial, "tty",RT_BYPASS_PROTECT_LEVEL_1, _serial_ty_bypass,(void *)tp);
}
rt_inline void _restore_serial(struct rt_serial_device *serial, lwp_tty_t tp,
@ -345,7 +310,6 @@ rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial)
{
_serial_tty_set_speed(tty);
rc = lwp_tty_register(tty, dev_name);
rt_work_init(&softc->work, _tty_rx_worker, tty);
if (rc != RT_EOK)
{

View File

@ -13,6 +13,8 @@
#define __SMP_IPI_H__
#include <rtthread.h>
#ifdef RT_USING_SMP
/* callback of smp call */
typedef void (*rt_smp_call_cb_t)(void *data);
typedef rt_bool_t (*rt_smp_cond_t)(int cpu, void *info);
@ -66,4 +68,7 @@ rt_inline size_t rt_smp_get_next_remote(size_t iter, size_t cpuid)
return iter == cpuid ? iter + 1 : iter;
}
#define rt_smp_for_each_remote_cpu(_iter, _cpuid) for (_iter = rt_smp_get_next_remote(-1, _cpuid); (_iter) < RT_CPUS_NR; _iter=rt_smp_get_next_remote(_iter, _cpuid))
#endif
#endif // RT_USING_SMP
#endif // __SMP_IPI_H__

View File

@ -0,0 +1,28 @@
menuconfig RT_USING_THERMAL
bool "Using Thermal Management device drivers"
depends on RT_USING_DM
default n
if RT_USING_THERMAL
comment "Thermal Sensors Drivers"
endif
if RT_USING_THERMAL
osource "$(SOC_DM_THERMAL_DIR)/Kconfig"
endif
if RT_USING_THERMAL
comment "Thermal Cool Drivers"
endif
config RT_THERMAL_COOL_PWM_FAN
bool "PWM Fan"
depends on RT_USING_THERMAL
depends on RT_USING_PWM
depends on RT_USING_REGULATOR
depends on RT_USING_OFW
default n
if RT_USING_THERMAL
osource "$(SOC_DM_THERMAL_COOL_DIR)/Kconfig"
endif

View File

@ -0,0 +1,18 @@
from building import *
group = []
if not GetDepend(['RT_USING_THERMAL']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../include']
src = ['thermal.c', 'thermal_dm.c']
if GetDepend(['RT_THERMAL_COOL_PWM_FAN']):
src += ['thermal-cool-pwm-fan.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,288 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "thermal.cool.pwm-fan"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#define MAX_PWM 255
struct pwm_fan_cool
{
struct rt_thermal_cooling_device parent;
rt_uint32_t pwm_fan_level;
rt_uint32_t pwm_fan_max_level;
rt_uint32_t *pwm_fan_cooling_levels;
struct rt_device_pwm *pwm_dev;
struct rt_pwm_configuration pwm_conf;
struct rt_regulator *supply;
struct rt_spinlock lock;
};
#define raw_to_pwm_fan_cool(raw) rt_container_of(raw, struct pwm_fan_cool, parent)
static rt_err_t pwm_fan_power_on(struct pwm_fan_cool *pf_cool)
{
rt_err_t err = RT_EOK;
if ((err = rt_pwm_enable(pf_cool->pwm_dev, pf_cool->pwm_conf.channel)))
{
return err;
}
if (pf_cool->supply && (err = rt_regulator_enable(pf_cool->supply)))
{
rt_pwm_disable(pf_cool->pwm_dev, pf_cool->pwm_conf.channel);
return err;
}
return err;
}
static rt_err_t pwm_fan_power_off(struct pwm_fan_cool *pf_cool)
{
rt_err_t err = RT_EOK;
if (pf_cool->supply && (err = rt_regulator_disable(pf_cool->supply)))
{
return err;
}
if ((err = rt_pwm_disable(pf_cool->pwm_dev, pf_cool->pwm_conf.channel)))
{
rt_regulator_enable(pf_cool->supply);
return err;
}
return err;
}
static rt_err_t pwm_fan_cool_get_max_level(struct rt_thermal_cooling_device *cdev,
rt_ubase_t *out_level)
{
struct pwm_fan_cool *pf_cool = raw_to_pwm_fan_cool(cdev);
*out_level = pf_cool->pwm_fan_max_level;
return RT_EOK;
}
static rt_err_t pwm_fan_cool_get_cur_level(struct rt_thermal_cooling_device *cdev,
rt_ubase_t *out_level)
{
struct pwm_fan_cool *pf_cool = raw_to_pwm_fan_cool(cdev);
*out_level = pf_cool->pwm_fan_level;
return RT_EOK;
}
static rt_err_t pwm_fan_cool_set_cur_level(struct rt_thermal_cooling_device *cdev,
rt_ubase_t level)
{
rt_ubase_t pwm;
rt_err_t err = RT_EOK;
struct pwm_fan_cool *pf_cool = raw_to_pwm_fan_cool(cdev);
if (pf_cool->pwm_fan_level == level)
{
return RT_EOK;
}
rt_spin_lock(&pf_cool->lock);
if ((pwm = pf_cool->pwm_fan_cooling_levels[level]))
{
rt_ubase_t period;
struct rt_pwm_configuration *pwm_conf = &pf_cool->pwm_conf;
period = pwm_conf->period;
pwm_conf->pulse = RT_DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
err = rt_pwm_set(pf_cool->pwm_dev,
pwm_conf->channel, pwm_conf->period, pwm_conf->pulse);
if (!err && pf_cool->pwm_fan_level == 0)
{
err = pwm_fan_power_on(pf_cool);
}
}
else if (pf_cool->pwm_fan_level > 0)
{
err = pwm_fan_power_off(pf_cool);
}
rt_spin_unlock(&pf_cool->lock);
if (!err)
{
pf_cool->pwm_fan_level = level;
}
return RT_EOK;
}
const static struct rt_thermal_cooling_device_ops pwm_fan_cool_ops =
{
.get_max_level = pwm_fan_cool_get_max_level,
.get_cur_level = pwm_fan_cool_get_cur_level,
.set_cur_level = pwm_fan_cool_set_cur_level,
};
static void pwm_fan_cool_free(struct pwm_fan_cool *pf_cool)
{
if (!rt_is_err_or_null(pf_cool->supply))
{
rt_regulator_put(pf_cool->supply);
}
if (pf_cool->pwm_fan_cooling_levels)
{
rt_free(pf_cool->pwm_fan_cooling_levels);
}
rt_free(pf_cool);
}
static rt_err_t pwm_fan_cool_probe(struct rt_platform_device *pdev)
{
rt_err_t err;
int levels_nr;
struct rt_ofw_cell_args pwm_args;
struct rt_device *dev = &pdev->parent;
struct rt_ofw_node *np = dev->ofw_node, *pwm_np;
struct pwm_fan_cool *pf_cool = rt_calloc(1, sizeof(*pf_cool));
if (!pf_cool)
{
return -RT_ENOMEM;
}
if (rt_ofw_parse_phandle_cells(np, "pwms", "#pwm-cells", 0, &pwm_args))
{
err = -RT_EINVAL;
goto _fail;
}
pwm_np = pwm_args.data;
if (!rt_ofw_data(pwm_np))
{
rt_platform_ofw_request(pwm_np);
}
pf_cool->pwm_dev = rt_ofw_data(pwm_np);
rt_ofw_node_put(pwm_np);
if (!pf_cool->pwm_dev)
{
err = -RT_EINVAL;
goto _fail;
}
pf_cool->pwm_conf.channel = pwm_args.args[0];
pf_cool->pwm_conf.period = pwm_args.args[1];
pf_cool->supply = rt_regulator_get(dev, "fan");
if (rt_is_err(pf_cool->supply))
{
err = rt_ptr_err(pf_cool->supply);
goto _fail;
}
if ((levels_nr = rt_dm_dev_prop_count_of_u32(dev, "cooling-levels")) <= 0)
{
err = -RT_EINVAL;
goto _fail;
}
pf_cool->pwm_fan_cooling_levels = rt_calloc(levels_nr, sizeof(rt_uint32_t));
if (!pf_cool->pwm_fan_cooling_levels)
{
err = -RT_ENOMEM;
goto _fail;
}
if (rt_dm_dev_prop_read_u32_array_index(dev, "cooling-levels",
0, levels_nr, pf_cool->pwm_fan_cooling_levels) <= 0)
{
err = -RT_EINVAL;
goto _fail;
}
pf_cool->pwm_fan_level = MAX_PWM;
pf_cool->pwm_fan_max_level = levels_nr - 1;
rt_spin_lock_init(&pf_cool->lock);
pwm_fan_cool_set_cur_level(&pf_cool->parent, 0);
rt_dm_dev_set_name(&pf_cool->parent.parent, "%s", rt_dm_dev_get_name(&pdev->parent));
pf_cool->parent.parent.ofw_node = dev->ofw_node;
pf_cool->parent.ops = &pwm_fan_cool_ops;
if ((err = rt_thermal_cooling_device_register(&pf_cool->parent)))
{
goto _fail;
}
dev->user_data = pf_cool;
return RT_EOK;
_fail:
pwm_fan_cool_free(pf_cool);
return err;
}
static rt_err_t pwm_fan_cool_remove(struct rt_platform_device *pdev)
{
struct pwm_fan_cool *pf_cool = pdev->parent.ofw_node;
rt_thermal_cooling_device_unregister(&pf_cool->parent);
pwm_fan_power_off(pf_cool);
pwm_fan_cool_free(pf_cool);
return RT_EOK;
}
static rt_err_t pwm_fan_cool_shutdown(struct rt_platform_device *pdev)
{
return pwm_fan_cool_remove(pdev);
}
static const struct rt_ofw_node_id pwm_fan_cool_ofw_ids[] =
{
{ .compatible = "pwm-fan" },
{ /* sentinel */ }
};
static struct rt_platform_driver pwm_fan_cool_driver =
{
.name = "pwm-fan-cool",
.ids = pwm_fan_cool_ofw_ids,
.probe = pwm_fan_cool_probe,
.remove = pwm_fan_cool_remove,
.shutdown = pwm_fan_cool_shutdown,
};
RT_PLATFORM_DRIVER_EXPORT(pwm_fan_cool_driver);

View File

@ -0,0 +1,917 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#include <drivers/platform.h>
#define DBG_TAG "rtdm.thermal"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "thermal_dm.h"
#ifndef INT_MAX
#define INT_MAX (RT_UINT32_MAX >> 1)
#endif
#define device_list(dev) (dev)->parent.parent.list
#define device_foreach(dev, nodes) rt_list_for_each_entry(dev, nodes, parent.parent.list)
static struct rt_spinlock nodes_lock = {};
static rt_list_t thermal_zone_device_nodes = RT_LIST_OBJECT_INIT(thermal_zone_device_nodes);
static rt_list_t thermal_cooling_device_nodes = RT_LIST_OBJECT_INIT(thermal_cooling_device_nodes);
static rt_list_t thermal_cooling_governor_nodes = RT_LIST_OBJECT_INIT(thermal_cooling_governor_nodes);
#ifdef RT_USING_OFW
static void thermal_ofw_params_parse(struct rt_ofw_node *np,
struct rt_thermal_zone_params *tz_params)
{
rt_uint32_t coef[2], prop;
if (!np)
{
return;
}
if (!rt_ofw_prop_read_u32(np, "sustainable-power", &prop))
{
tz_params->sustainable_power = prop;
}
/*
* For now, the thermal framework supports only one sensor per thermal zone.
* Thus, we are considering only the first two values as slope and offset.
*/
if (rt_ofw_prop_read_u32_array_index(np, "coefficients", 0, 1, coef) < 0)
{
coef[0] = 1;
coef[1] = 0;
}
tz_params->slope = coef[0];
tz_params->offset = coef[1];
}
static void thermal_ofw_setup(struct rt_ofw_node *np, struct rt_thermal_zone_device *zdev)
{
int i = 0;
rt_uint32_t delay, pdelay;
struct rt_ofw_cell_args args;
struct rt_ofw_node *tmp_np, *tz_np, *trip_np, *cm_np, *cdev_np;
if (!np || !zdev)
{
return;
}
tmp_np = rt_ofw_find_node_by_path("/thermal-zones");
if (!tmp_np)
{
return;
}
rt_ofw_foreach_child_node(tmp_np, tz_np)
{
if (!rt_ofw_parse_phandle_cells(tz_np, "thermal-sensors", "#thermal-sensor-cells", 0, &args))
{
if (args.data == np && (!args.args_count || args.args[0] == zdev->zone_id))
{
rt_ofw_node_put(args.data);
goto _found;
}
rt_ofw_node_put(args.data);
}
}
return;
_found:
rt_ofw_prop_read_u32(tz_np, "polling-delay-passive", &pdelay);
rt_ofw_prop_read_u32(tz_np, "polling-delay", &delay);
zdev->passive_delay = rt_tick_from_millisecond(pdelay);
zdev->polling_delay = rt_tick_from_millisecond(delay);
thermal_ofw_params_parse(tz_np, &zdev->params);
if (zdev->trips_nr)
{
goto _scan_cooling;
}
tmp_np = rt_ofw_get_child_by_tag(tz_np, "trips");
if (!tmp_np)
{
goto _scan_cooling;
}
zdev->trips_nr = rt_ofw_get_child_count(tmp_np);
if (!zdev->trips_nr)
{
goto _scan_cooling;
}
zdev->trips = rt_calloc(zdev->trips_nr, sizeof(*zdev->trips));
zdev->trips_free = RT_TRUE;
if (!zdev->trips)
{
LOG_E("%s: No memory to create %s", rt_ofw_node_full_name(np), "trips");
RT_ASSERT(0);
}
rt_ofw_foreach_child_node(tmp_np, trip_np)
{
const char *type;
rt_ofw_prop_read_u32(trip_np, "temperature", (rt_uint32_t *)&zdev->trips[i].temperature);
rt_ofw_prop_read_u32(trip_np, "hysteresis", (rt_uint32_t *)&zdev->trips[i].hysteresis);
rt_ofw_prop_read_string(trip_np, "type", &type);
zdev->trips[i].type = thermal_type(type);
rt_ofw_data(trip_np) = &zdev->trips[i];
++i;
}
_scan_cooling:
i = 0;
tmp_np = rt_ofw_get_child_by_tag(tz_np, "cooling-maps");
if (!tmp_np)
{
goto _end;
}
zdev->cooling_maps_nr = rt_ofw_get_child_count(tmp_np);
if (!zdev->cooling_maps_nr)
{
goto _end;
}
zdev->cooling_maps = rt_calloc(zdev->cooling_maps_nr, sizeof(*zdev->cooling_maps));
if (!zdev->cooling_maps)
{
LOG_E("%s: No memory to create %s", rt_ofw_node_full_name(np), "cooling_maps");
RT_ASSERT(0);
}
rt_ofw_foreach_child_node(tmp_np, cm_np)
{
struct rt_thermal_cooling_device *cdev;
struct rt_thermal_cooling_map *map = &zdev->cooling_maps[i++];
map->cells_nr = rt_ofw_count_phandle_cells(cm_np, "cooling-device", "#cooling-cells");
map->cells = rt_calloc(sizeof(*map->cells), map->cells_nr);
if (!map->cells)
{
LOG_E("%s: No memory to create %s", rt_ofw_node_full_name(np), "cells");
RT_ASSERT(0);
}
trip_np = rt_ofw_parse_phandle(cm_np, "trip", 0);
map->trips = rt_ofw_data(trip_np);
rt_ofw_node_put(trip_np);
if (!map->trips)
{
LOG_E("%s: trips(%s) not found", rt_ofw_node_full_name(np),
rt_ofw_node_full_name(trip_np));
RT_ASSERT(0);
}
rt_ofw_prop_read_u32(cm_np, "contribution", &map->contribution);
for (int c = 0; c < map->cells_nr; ++c)
{
struct rt_thermal_cooling_cell *cell = &map->cells[c];
if (rt_ofw_parse_phandle_cells(cm_np, "cooling-device", "#cooling-cells", c, &args))
{
continue;
}
cdev_np = args.data;
rt_spin_lock(&nodes_lock);
device_foreach(cdev, &thermal_cooling_device_nodes)
{
if (cdev->parent.ofw_node == cdev_np)
{
cell->cooling_devices = cdev;
break;
}
}
rt_spin_unlock(&nodes_lock);
cell->level_range[0] = args.args[0];
cell->level_range[1] = args.args[1];
if (cell->cooling_devices)
{
thermal_bind(cell->cooling_devices, zdev);
}
rt_ofw_node_put(cdev_np);
}
}
_end:
}
#else
rt_inline void thermal_ofw_setup(struct rt_ofw_node *np, struct rt_thermal_zone_device *zdev)
{
}
#endif /* RT_USING_OFW */
static void thermal_zone_poll(struct rt_work *work, void *work_data)
{
struct rt_thermal_zone_device *zdev = work_data;
rt_thermal_zone_device_update(zdev, RT_THERMAL_MSG_EVENT_UNSPECIFIED);
}
rt_err_t rt_thermal_zone_device_register(struct rt_thermal_zone_device *zdev)
{
if (!zdev || !zdev->ops || !zdev->ops->get_temp)
{
return -RT_EINVAL;
}
zdev->ops->get_temp(zdev, &zdev->temperature);
zdev->last_temperature = zdev->temperature;
if (!zdev->trips)
{
zdev->trips_nr = 0;
}
rt_spin_lock_init(&zdev->nodes_lock);
rt_list_init(&zdev->notifier_nodes);
rt_list_init(&device_list(zdev));
rt_mutex_init(&zdev->mutex, rt_dm_dev_get_name(&zdev->parent), RT_IPC_FLAG_PRIO);
zdev->temperature = RT_THERMAL_TEMP_INVALID;
zdev->prev_low_trip = -INT_MAX;
zdev->prev_high_trip = INT_MAX;
rt_spin_lock(&nodes_lock);
rt_list_insert_before(&thermal_zone_device_nodes, &device_list(zdev));
rt_spin_unlock(&nodes_lock);
thermal_ofw_setup(zdev->parent.ofw_node, zdev);
rt_work_init(&zdev->poller, thermal_zone_poll, zdev);
zdev->enabled = RT_TRUE;
/* Start to poll */
rt_work_submit(&zdev->poller, zdev->polling_delay);
return RT_EOK;
}
rt_err_t rt_thermal_zone_device_unregister(struct rt_thermal_zone_device *zdev)
{
if (!zdev)
{
return -RT_EINVAL;
}
rt_spin_lock(&zdev->nodes_lock);
if (rt_list_isempty(&zdev->notifier_nodes))
{
LOG_E("%s: there is %u user", rt_dm_dev_get_name(&zdev->parent),
rt_list_len(&zdev->notifier_nodes));
rt_spin_unlock(&zdev->nodes_lock);
return -RT_EBUSY;
}
rt_spin_unlock(&zdev->nodes_lock);
rt_work_cancel(&zdev->poller);
rt_spin_lock(&nodes_lock);
rt_list_remove(&device_list(zdev));
rt_spin_unlock(&nodes_lock);
if (zdev->trips_free && zdev->trips)
{
rt_free(zdev->trips);
}
if (zdev->cooling_maps_nr && zdev->cooling_maps_nr)
{
for (int i = 0; i < zdev->cooling_maps_nr; ++i)
{
struct rt_thermal_cooling_device *cdev;
struct rt_thermal_cooling_map *map = &zdev->cooling_maps[i];
for (int c = 0; c < map->cells_nr; ++c)
{
cdev = map->cells[i].cooling_devices;
if (cdev)
{
thermal_unbind(cdev, zdev);
}
}
rt_free(map->cells);
}
rt_free(zdev->cooling_maps);
}
rt_mutex_detach(&zdev->mutex);
return RT_EOK;
}
rt_err_t rt_thermal_cooling_device_register(struct rt_thermal_cooling_device *cdev)
{
rt_err_t err;
if (!cdev || !cdev->ops ||
!cdev->ops->get_max_level || !cdev->ops->get_cur_level || !cdev->ops->set_cur_level)
{
return -RT_EINVAL;
}
if ((err = cdev->ops->get_max_level(cdev, &cdev->max_level)))
{
return err;
}
rt_list_init(&device_list(cdev));
rt_list_init(&cdev->governor_node);
rt_spin_lock(&nodes_lock);
rt_list_insert_before(&thermal_cooling_device_nodes, &device_list(cdev));
rt_spin_unlock(&nodes_lock);
err = rt_thermal_cooling_device_change_governor(cdev, RT_NULL);
return err;
}
rt_err_t rt_thermal_cooling_device_unregister(struct rt_thermal_cooling_device *cdev)
{
if (!cdev)
{
return -RT_EINVAL;
}
if (cdev->parent.ref_count)
{
LOG_E("%s: there is %u user",
rt_dm_dev_get_name(&cdev->parent), cdev->parent.ref_count);
return -RT_EINVAL;
}
rt_spin_lock(&nodes_lock);
rt_list_remove(&device_list(cdev));
rt_spin_unlock(&nodes_lock);
return RT_EOK;
}
static void dumb_governor_tuning(struct rt_thermal_zone_device *zdev,
int map_idx, int cell_idx, rt_ubase_t *level)
{
struct rt_thermal_cooling_map *map = &zdev->cooling_maps[map_idx];
if (zdev->cooling && zdev->temperature > map->trips->temperature)
{
if (zdev->temperature - zdev->last_temperature > map->trips->hysteresis)
{
++*level;
}
else if (zdev->last_temperature - zdev->temperature > map->trips->hysteresis)
{
--*level;
}
}
else
{
*level = 0;
}
}
static struct rt_thermal_cooling_governor dumb_governor =
{
.name = "dumb",
.tuning = dumb_governor_tuning,
};
static int system_thermal_cooling_governor_init(void)
{
rt_thermal_cooling_governor_register(&dumb_governor);
return 0;
}
INIT_CORE_EXPORT(system_thermal_cooling_governor_init);
rt_err_t rt_thermal_cooling_governor_register(struct rt_thermal_cooling_governor *gov)
{
rt_err_t err = RT_EOK;
struct rt_thermal_cooling_governor *gov_tmp;
if (!gov || !gov->name || !gov->tuning)
{
return -RT_EINVAL;
}
rt_list_init(&gov->list);
rt_list_init(&gov->cdev_nodes);
rt_spin_lock(&nodes_lock);
rt_list_for_each_entry(gov_tmp, &thermal_cooling_governor_nodes, list)
{
if (!rt_strcmp(gov_tmp->name, gov->name))
{
err = -RT_ERROR;
goto _out_unlock;
}
}
rt_list_insert_before(&thermal_cooling_governor_nodes, &gov->list);
_out_unlock:
rt_spin_unlock(&nodes_lock);
return err;
}
rt_err_t rt_thermal_cooling_governor_unregister(struct rt_thermal_cooling_governor *gov)
{
if (!gov)
{
return -RT_EINVAL;
}
if (gov == &dumb_governor)
{
return -RT_EINVAL;
}
rt_spin_lock(&nodes_lock);
if (!rt_list_isempty(&gov->cdev_nodes))
{
goto _out_unlock;
}
rt_list_remove(&gov->list);
_out_unlock:
rt_spin_unlock(&nodes_lock);
return RT_EOK;
}
rt_err_t rt_thermal_cooling_device_change_governor(struct rt_thermal_cooling_device *cdev,
const char *name)
{
rt_err_t err;
struct rt_thermal_cooling_governor *gov;
if (!cdev)
{
return -RT_EINVAL;
}
name = name ? : dumb_governor.name;
err = -RT_ENOSYS;
rt_spin_lock(&nodes_lock);
rt_list_for_each_entry(gov, &thermal_cooling_governor_nodes, list)
{
if (!rt_strcmp(gov->name, name))
{
if (cdev->gov)
{
rt_list_remove(&cdev->governor_node);
}
cdev->gov = gov;
rt_list_insert_before(&cdev->governor_node, &gov->cdev_nodes);
err = RT_EOK;
break;
}
}
rt_spin_unlock(&nodes_lock);
return err;
}
rt_err_t rt_thermal_zone_notifier_register(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier)
{
if (!zdev || !notifier)
{
return -RT_EINVAL;
}
notifier->zdev = zdev;
rt_list_init(&notifier->list);
rt_spin_lock(&zdev->nodes_lock);
rt_list_insert_after(&zdev->notifier_nodes, &notifier->list);
rt_spin_unlock(&zdev->nodes_lock);
return RT_EOK;
}
rt_err_t rt_thermal_zone_notifier_unregister(struct rt_thermal_zone_device *zdev,
struct rt_thermal_notifier *notifier)
{
if (!zdev || !notifier)
{
return -RT_EINVAL;
}
rt_spin_lock(&zdev->nodes_lock);
rt_list_remove(&notifier->list);
rt_spin_unlock(&zdev->nodes_lock);
return RT_EOK;
}
void rt_thermal_zone_device_update(struct rt_thermal_zone_device *zdev, rt_ubase_t msg)
{
rt_err_t err;
rt_bool_t passive = RT_FALSE, need_cool = RT_FALSE;
struct rt_thermal_notifier *notifier, *next_notifier;
RT_ASSERT(zdev != RT_NULL);
if (!rt_interrupt_get_nest())
{
rt_mutex_take(&zdev->mutex, RT_WAITING_FOREVER);
}
/* Check thermal zone status */
if (msg == RT_THERMAL_MSG_DEVICE_DOWN)
{
zdev->enabled = RT_FALSE;
}
else if (msg == RT_THERMAL_MSG_DEVICE_UP)
{
zdev->enabled = RT_TRUE;
}
/* Read temperature */
zdev->last_temperature = zdev->temperature;
zdev->ops->get_temp(zdev, &zdev->temperature);
for (int i = 0; i < zdev->trips_nr; ++i)
{
struct rt_thermal_trip *tmp_trip = &zdev->trips[i];
if (zdev->temperature <= tmp_trip->temperature)
{
continue;
}
switch (tmp_trip->type)
{
case RT_THERMAL_TRIP_PASSIVE:
passive = RT_TRUE;
goto cooling;
case RT_THERMAL_TRIP_CRITICAL:
if (zdev->ops->critical)
{
zdev->ops->critical(zdev);
}
else if (zdev->last_temperature > tmp_trip->temperature)
{
/* Tried to cool already, but failed */
rt_hw_cpu_reset();
}
else
{
goto cooling;
}
break;
case RT_THERMAL_TRIP_HOT:
if (zdev->ops->hot)
{
zdev->ops->hot(zdev);
break;
}
default:
cooling:
zdev->cooling = need_cool = RT_TRUE;
rt_thermal_cooling_device_kick(zdev);
break;
}
}
if (!need_cool && zdev->cooling)
{
rt_thermal_cooling_device_kick(zdev);
}
/* Set the new trips */
if (zdev->ops->set_trips)
{
rt_bool_t same_trip = RT_FALSE;
int low = -INT_MAX, high = INT_MAX;
struct rt_thermal_trip trip;
for (int i = 0; i < zdev->trips_nr; ++i)
{
int trip_low;
rt_bool_t low_set = RT_FALSE;
if (i >= zdev->trips_nr)
{
goto _call_notifier;
}
rt_memcpy(&trip, &zdev->trips[i], sizeof(trip));
trip_low = trip.temperature - trip.hysteresis;
if (trip_low < zdev->temperature && trip_low > low)
{
low = trip_low;
low_set = RT_TRUE;
same_trip = RT_FALSE;
}
if (trip.temperature > zdev->temperature && trip.temperature < high)
{
high = trip.temperature;
same_trip = low_set;
}
}
/* No need to change trip points */
if (zdev->prev_low_trip == low && zdev->prev_high_trip == high)
{
goto _call_notifier;
}
if (same_trip &&
(zdev->prev_low_trip != -INT_MAX || zdev->prev_high_trip != INT_MAX))
{
goto _call_notifier;
}
zdev->prev_low_trip = low;
zdev->prev_high_trip = high;
if ((err = zdev->ops->set_trips(zdev, low, high)))
{
LOG_E("%s: Set trips error = %s", rt_dm_dev_get_name(&zdev->parent),
rt_strerror(err));
}
}
/* Call all notifier, maybe have governor */
_call_notifier:
rt_spin_lock(&zdev->nodes_lock);
rt_list_for_each_entry_safe(notifier, next_notifier, &zdev->notifier_nodes, list)
{
rt_spin_unlock(&zdev->nodes_lock);
notifier->callback(notifier, msg);
rt_spin_lock(&zdev->nodes_lock);
}
rt_spin_unlock(&zdev->nodes_lock);
/* Prepare for the next report */
if (!zdev->enabled)
{
rt_work_cancel(&zdev->poller);
}
else if (passive && zdev->passive_delay)
{
rt_work_submit(&zdev->poller, zdev->passive_delay);
}
else if (zdev->polling_delay)
{
rt_work_submit(&zdev->poller, zdev->polling_delay);
}
if (!rt_interrupt_get_nest())
{
rt_mutex_release(&zdev->mutex);
}
}
void rt_thermal_cooling_device_kick(struct rt_thermal_zone_device *zdev)
{
RT_ASSERT(zdev != RT_NULL);
for (int i = 0; i < zdev->cooling_maps_nr; ++i)
{
rt_ubase_t level;
struct rt_thermal_cooling_device *cdev;
struct rt_thermal_cooling_cell *cell;
struct rt_thermal_cooling_map *map = &zdev->cooling_maps[i];
for (int c = 0; c < map->cells_nr; ++c)
{
cell = &map->cells[c];
cdev = cell->cooling_devices;
if (!cdev)
{
continue;
}
/* Update status */
if (cdev->ops->get_max_level(cdev, &cdev->max_level))
{
continue;
}
if (cdev->ops->get_cur_level(cdev, &level) || level > cdev->max_level)
{
continue;
}
/* Check if cooling is required */
if (level >= cell->level_range[0] && level <= cell->level_range[1])
{
/* Is cooling, not call */
continue;
}
cdev->gov->tuning(zdev, i, c, &level);
level = rt_min_t(rt_ubase_t, level, cdev->max_level);
cdev->ops->set_cur_level(cdev, level);
}
}
}
rt_err_t rt_thermal_zone_set_trip(struct rt_thermal_zone_device *zdev, int trip_id,
const struct rt_thermal_trip *trip)
{
rt_err_t err;
struct rt_thermal_trip tmp_trip;
if (!zdev || !trip)
{
return -RT_EINVAL;
}
rt_mutex_take(&zdev->mutex, RT_WAITING_FOREVER);
if (!zdev->ops->set_trip_temp && !zdev->ops->set_trip_hyst && !zdev->trips)
{
err = -RT_EINVAL;
goto _out_unlock;
}
if (trip_id >= zdev->trips_nr)
{
err = -RT_EINVAL;
goto _out_unlock;
}
rt_memcpy(&tmp_trip, &zdev->trips[trip_id], sizeof(tmp_trip));
if (tmp_trip.type != trip->type)
{
err = -RT_EINVAL;
goto _out_unlock;
}
if (tmp_trip.temperature != trip->temperature && zdev->ops->set_trip_temp)
{
if ((err = zdev->ops->set_trip_temp(zdev, trip_id, trip->temperature)))
{
goto _out_unlock;
}
}
if (tmp_trip.hysteresis != trip->hysteresis && zdev->ops->set_trip_hyst)
{
if ((err = zdev->ops->set_trip_hyst(zdev, trip_id, trip->hysteresis)))
{
goto _out_unlock;
}
}
if (zdev->trips &&
(tmp_trip.temperature != trip->temperature || tmp_trip.hysteresis != trip->hysteresis))
{
zdev->trips[trip_id] = *trip;
}
_out_unlock:
rt_mutex_release(&zdev->mutex);
if (!err)
{
rt_thermal_zone_device_update(zdev, RT_THERMAL_MSG_TRIP_CHANGED);
}
return err;
}
rt_err_t rt_thermal_zone_get_trip(struct rt_thermal_zone_device *zdev, int trip_id,
struct rt_thermal_trip *out_trip)
{
rt_err_t err = RT_EOK;
if (!zdev || !out_trip)
{
return -RT_EINVAL;
}
rt_mutex_take(&zdev->mutex, RT_WAITING_FOREVER);
if (!zdev->trips_nr)
{
err = -RT_ENOSYS;
goto _out_unlock;
}
if (trip_id >= zdev->trips_nr)
{
err = -RT_EINVAL;
goto _out_unlock;
}
*out_trip = zdev->trips[trip_id];
_out_unlock:
rt_mutex_release(&zdev->mutex);
return err;
}
#if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH)
static int list_thermal(int argc, char**argv)
{
struct rt_thermal_zone_device *zdev;
/* Thermal is an important subsystem, please do not output too much. */
rt_spin_lock(&nodes_lock);
device_foreach(zdev, &thermal_zone_device_nodes)
{
int temperature = zdev->temperature;
rt_kprintf("%s-%d\n", rt_dm_dev_get_name(&zdev->parent), zdev->zone_id);
rt_kprintf("temperature:\t%+d.%u C\n", temperature / 1000, rt_abs(temperature) % 1000);
for (int i = 0, id = 0; i < zdev->cooling_maps_nr; ++i)
{
rt_ubase_t level;
struct rt_thermal_trip *trips;
struct rt_thermal_cooling_device *cdev;
struct rt_thermal_cooling_cell *cell;
struct rt_thermal_cooling_map *map = &zdev->cooling_maps[i];
for (int c = 0; c < map->cells_nr; ++c, ++id)
{
trips = map->trips;
cell = &map->cells[c];
cdev = cell->cooling_devices;
if (cdev)
{
cdev->ops->get_cur_level(cdev, &level);
rt_kprintf("cooling%u:\t%s[%+d.%u C] %d\n", id,
rt_dm_dev_get_name(&cdev->parent),
trips->temperature / 1000, rt_abs(trips->temperature) % 1000,
level);
}
else
{
rt_kprintf("cooling%u:\t%s[%+d.%u C] %d\n", id,
"(not supported)",
trips->temperature / 1000, rt_abs(trips->temperature) % 1000,
0);
}
}
}
}
rt_spin_unlock(&nodes_lock);
return 0;
}
MSH_CMD_EXPORT(list_thermal, dump all of thermal information);
#endif /* RT_USING_CONSOLE && RT_USING_MSH */

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#define DBG_TAG "rtdm.thermal"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "thermal_dm.h"
enum rt_thermal_trip_type thermal_type(const char *type)
{
if (!type)
{
return RT_THERMAL_TRIP_TYPE_MAX;
}
if (!rt_strcmp(type, "active"))
{
return RT_THERMAL_TRIP_ACTIVE;
}
else if (!rt_strcmp(type, "passive"))
{
return RT_THERMAL_TRIP_PASSIVE;
}
else if (!rt_strcmp(type, "hot"))
{
return RT_THERMAL_TRIP_HOT;
}
else if (!rt_strcmp(type, "critical"))
{
return RT_THERMAL_TRIP_CRITICAL;
}
return RT_THERMAL_TRIP_TYPE_MAX;
}
rt_err_t thermal_bind(struct rt_thermal_cooling_device *cdev,
struct rt_thermal_zone_device *zdev)
{
if (cdev->ops->bind)
{
return cdev->ops->bind(cdev, zdev);
}
return RT_EOK;
}
rt_err_t thermal_unbind(struct rt_thermal_cooling_device *cdev,
struct rt_thermal_zone_device *zdev)
{
if (cdev->ops->unbind)
{
return cdev->ops->unbind(cdev, zdev);
}
return RT_EOK;
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-3-08 GuEe-GUI the first version
*/
#ifndef __THERMAL_DM_H__
#define __THERMAL_DM_H__
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#include <drivers/ofw.h>
enum rt_thermal_trip_type thermal_type(const char *type);
rt_err_t thermal_bind(struct rt_thermal_cooling_device *cdev,
struct rt_thermal_zone_device *zdev);
rt_err_t thermal_unbind(struct rt_thermal_cooling_device *cdev,
struct rt_thermal_zone_device *zdev);
#endif /* __THERMAL_DM_H__ */

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2018-08-03 tyx the first version
* 2024-12-25 Evlers add get_info api for more new sta information
*/
#include <rthw.h>
@ -249,6 +250,25 @@ int rt_wlan_dev_get_rssi(struct rt_wlan_device *device)
return rssi;
}
rt_err_t rt_wlan_dev_get_info(struct rt_wlan_device *device, struct rt_wlan_info *info)
{
rt_err_t result = RT_EOK;
if (device == RT_NULL)
{
return -RT_EIO;
}
result = rt_device_control(RT_DEVICE(device), RT_WLAN_CMD_GET_INFO, info);
if (result != RT_EOK)
{
rt_set_errno(result);
return 0;
}
return result;
}
rt_err_t rt_wlan_dev_get_mac(struct rt_wlan_device *device, rt_uint8_t mac[6])
{
rt_err_t result = RT_EOK;
@ -784,6 +804,17 @@ static rt_err_t _rt_wlan_dev_control(rt_device_t dev, int cmd, void *args)
*rssi = wlan->ops->wlan_get_rssi(wlan);
break;
}
case RT_WLAN_CMD_GET_INFO:
{
struct rt_wlan_info *info = args;
LOG_D("%s %d cmd[%d]:%s run......", __FUNCTION__, __LINE__, RT_WLAN_CMD_GET_INFO, "RT_WLAN_CMD_GET_INFO");
if (wlan->ops->wlan_get_info)
err = wlan->ops->wlan_get_info(wlan, info);
else
err = -RT_ERROR;
break;
}
case RT_WLAN_CMD_SET_POWERSAVE:
{
int level = *((int *)args);

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2018-08-03 tyx the first version
* 2024-12-25 Evlers add get_info api for more new sta information
*/
#ifndef __DEV_WLAN_DEVICE_H__
@ -15,6 +16,8 @@
extern "C" {
#endif
#define RT_WLAN_DEV_VERSION 0x10000 /* 1.0.0 */
typedef enum
{
RT_WLAN_NONE,
@ -34,6 +37,7 @@ typedef enum
RT_WLAN_CMD_AP_DEAUTH,
RT_WLAN_CMD_SCAN_STOP,
RT_WLAN_CMD_GET_RSSI, /* get sensitivity (dBm) */
RT_WLAN_CMD_GET_INFO, /* get information (rssi, channel, datarate.) */
RT_WLAN_CMD_SET_POWERSAVE,
RT_WLAN_CMD_GET_POWERSAVE,
RT_WLAN_CMD_CFG_PROMISC, /* start/stop minitor */
@ -497,6 +501,7 @@ struct rt_wlan_dev_ops
rt_err_t (*wlan_ap_deauth)(struct rt_wlan_device *wlan, rt_uint8_t mac[]);
rt_err_t (*wlan_scan_stop)(struct rt_wlan_device *wlan);
int (*wlan_get_rssi)(struct rt_wlan_device *wlan);
int (*wlan_get_info)(struct rt_wlan_device *wlan, struct rt_wlan_info *info);
rt_err_t (*wlan_set_powersave)(struct rt_wlan_device *wlan, int level);
int (*wlan_get_powersave)(struct rt_wlan_device *wlan);
rt_err_t (*wlan_cfg_promisc)(struct rt_wlan_device *wlan, rt_bool_t start);
@ -527,6 +532,7 @@ rt_err_t rt_wlan_dev_connect(struct rt_wlan_device *device, struct rt_wlan_info
rt_err_t rt_wlan_dev_fast_connect(struct rt_wlan_device *device, struct rt_wlan_info *info, const char *password, int password_len);
rt_err_t rt_wlan_dev_disconnect(struct rt_wlan_device *device);
int rt_wlan_dev_get_rssi(struct rt_wlan_device *device);
rt_err_t rt_wlan_dev_get_info(struct rt_wlan_device *device, struct rt_wlan_info *info);
/*
* wlan device ap interface

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2018-08-06 tyx the first version
* 2023-12-12 Evlers add the wlan join scan function
* 2024-12-25 Evlers add get_info api for more new sta information
*/
#include <rthw.h>
@ -1185,8 +1186,14 @@ rt_err_t rt_wlan_get_info(struct rt_wlan_info *info)
if (rt_wlan_is_connected() == RT_TRUE)
{
/* Initialize the information to the scan first */
*info = _sta_mgnt.info;
info->rssi = rt_wlan_get_rssi();
/* Try using get_info's API for more new information */
if (rt_wlan_dev_get_info(STA_DEVICE(), info) != RT_EOK)
{
/* The get_info returns an error and gets the rssi value separately */
info->rssi = rt_wlan_get_rssi();
}
return RT_EOK;
}
return -RT_ERROR;

View File

@ -13,13 +13,13 @@
#include <rtthread.h>
#include <stdlib.h>
#ifndef RT_USING_PICOLIBC
/**
* @brief erases the data in the n bytes of the memory starting at the
* location pointed to by s, by writing zeros (bytes containing '\0') to that area.
*
* @note The bzero() function is deprecated (marked as LEGACY in POSIX. 1-2001).
*/
#ifndef RT_USING_PICOLIBC
void bzero(void* s, size_t n)
{
rt_memset(s, 0, n);
@ -46,12 +46,12 @@ void explicit_bzero(void* s, size_t n)
}
}
char* index(const char* s, int c)
char *index(const char* s, int c)
{
return strchr(s, c);
}
char* rindex(const char* s, int c)
char *rindex(const char* s, int c)
{
return strrchr(s, c);
}
@ -99,7 +99,7 @@ int ffsll(long long i)
*
* @note This function is GNU extension, available since glibc 2.1.91.
*/
void* memrchr(const void* ptr, int ch, size_t pos)
void *memrchr(const void* ptr, int ch, size_t pos)
{
char* end = (char*)ptr + pos - 1;
while (end != ptr)
@ -118,7 +118,7 @@ size_t strnlen(const char *s, size_t maxlen)
return sc - s;
}
char* strchrnul(const char* s, int c)
char *strchrnul(const char* s, int c)
{
while (*s != '\0' && *s != c)
s++;

View File

@ -602,9 +602,9 @@ int clock_getres(clockid_t clockid, struct timespec *res)
switch (clockid)
{
#ifdef RT_USING_RTC
case CLOCK_REALTIME: // use RTC
case CLOCK_REALTIME_COARSE:
#ifdef RT_USING_RTC
return _control_rtc(RT_DEVICE_CTRL_RTC_GET_TIMERES, res);
#endif /* RT_USING_RTC */
@ -635,9 +635,9 @@ int clock_gettime(clockid_t clockid, struct timespec *tp)
switch (clockid)
{
#ifdef RT_USING_RTC
case CLOCK_REALTIME: // use RTC
case CLOCK_REALTIME_COARSE:
#ifdef RT_USING_RTC
return _control_rtc(RT_DEVICE_CTRL_RTC_GET_TIMESPEC, tp);
#endif /* RT_USING_RTC */
@ -679,8 +679,8 @@ int clock_nanosleep(clockid_t clockid, int flags, const struct timespec *rqtp, s
switch (clockid)
{
#ifdef RT_USING_RTC
case CLOCK_REALTIME: // use RTC
#ifdef RT_USING_RTC
if (flags & TIMER_ABSTIME)
err = _control_rtc(RT_DEVICE_CTRL_RTC_GET_TIMESPEC, &ts);
break;

View File

@ -112,12 +112,12 @@ int wcwidth(wchar_t ucs)
(ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */
(ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */
(ucs >= 0xff00 && ucs <= 0xff5f) || /* Fullwidth Forms */
(ucs >= 0xffe0 && ucs <= 0xffe6) // ||
//#ifndef _WIN32
// (ucs >= 0x20000 && ucs <= 0x2ffff)
//#else
// 0
//#endif
(ucs >= 0xffe0 && ucs <= 0xffe6) ||
#ifndef _WIN32
(ucs >= 0x20000 && ucs <= 0x2ffff)
#else
0
#endif
));
}

View File

@ -69,7 +69,9 @@ struct dirent
typedef uint64_t ino_t;
#endif
struct libc_dirent {
#ifdef RT_USING_MUSLLIBC
ino_t d_ino;
#endif
off_t d_off;
unsigned short d_reclen;
unsigned char d_type;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@ -546,16 +546,100 @@ static int timerfd_do_gettime(int fd, struct itimerspec *cur)
return 0;
}
/**
* @brief Creates a file descriptor for a timer.
*
* The `timerfd_create` function creates a new timer object that generates
* timer expiration notifications via a file descriptor.
*
* @param clockid The clock ID that specifies the clock to be used as the
* timing base for the timer. Common values include:
* - `CLOCK_REALTIME`: A system-wide clock representing
* wall-clock time.
* - `CLOCK_MONOTONIC`: A clock that cannot be set and
* represents monotonic time since some unspecified
* starting point.
* @param flags A bitmask that can include the following flags:
* - `TFD_CLOEXEC`: Close the file descriptor on `execve`.
* - `TFD_NONBLOCK`: Set the file descriptor to non-blocking mode.
*
* @return On success, returns a file descriptor for the timer. On error,
* returns -1 and sets `errno` appropriately.
*
* @note The file descriptor can be used with select, poll, or epoll to wait
* for timer expirations.
*
* @warning The timerfd interface is Linux-specific and may not be available
* on other operating systems.
*
* @see timerfd_settime, timerfd_gettime
*/
int timerfd_create(int clockid, int flags)
{
return timerfd_do_create(clockid, flags);
}
/**
* @brief Sets the time for a timer file descriptor.
*
* The `timerfd_settime` function starts or modifies the timer associated
* with the specified timer file descriptor.
*
* @param fd The file descriptor of the timer, obtained from
* `timerfd_create`.
* @param flags Flags that control the behavior of the timer. Possible
* values include:
* - `0`: Relative time is specified in `new`.
* - `TFD_TIMER_ABSTIME`: Use absolute time instead of
* relative time.
* @param new A pointer to a `itimerspec` structure that specifies the
* new timer settings:
* - `it_value`: The initial expiration time. A zero value
* means the timer is disabled.
* - `it_interval`: The interval for periodic timers. A zero
* value means the timer is not periodic.
* @param old A pointer to a `itimerspec` structure to store the
* previous timer settings. Can be `NULL` if this information
* is not needed.
*
* @return On success, returns 0. On error, returns -1 and sets `errno`
* appropriately.
*
* @note The timer starts counting down immediately after this call if
* `it_value` is non-zero.
*
* @warning If the timer is set to a very short interval, high-frequency
* events may impact system performance.
*
* @see timerfd_create, timerfd_gettime
*/
int timerfd_settime(int fd, int flags, const struct itimerspec *new, struct itimerspec *old)
{
return timerfd_do_settime(fd, flags, new, old);
}
/**
* @brief Retrieves the current value and interval of a timer.
*
* The `timerfd_gettime` function queries the settings of the timer associated
* with the specified timer file descriptor.
*
* @param fd The file descriptor of the timer, obtained from `timerfd_create`.
* @param cur A pointer to a `itimerspec` structure where the current timer
* settings will be stored:
* - `it_value`: The time remaining until the next expiration.
* If zero, the timer is disabled.
* - `it_interval`: The interval for periodic timers. Zero if the
* timer is not periodic.
*
* @return On success, returns 0. On error, returns -1 and sets `errno`
* appropriately.
*
* @note This function does not reset or modify the timer; it only retrieves
* the current settings.
*
* @see timerfd_create, timerfd_settime
*/
int timerfd_gettime(int fd, struct itimerspec *cur)
{
return timerfd_do_gettime(fd, cur);

View File

@ -48,7 +48,7 @@ int arch_expand_user_stack(void *addr)
else /* map failed, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
dbg_log(DBG_ERROR, "[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
LOG_E("[fault] thread %s mapped addr %p failed!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif
@ -57,7 +57,7 @@ int arch_expand_user_stack(void *addr)
else /* not stack, send signal SIGSEGV */
{
#ifdef RT_USING_SIGNALS
dbg_log(DBG_ERROR, "[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
LOG_E("[fault] thread %s access unmapped addr %p!\n", rt_thread_self()->parent.name, addr);
lwp_thread_kill(rt_thread_self(), SIGSEGV);
ret = 1; /* return 1, will return back to intr, then check exit */
#endif

View File

@ -447,7 +447,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
if (lwp == RT_NULL)
{
dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
LOG_E("lwp struct out of memory!\n");
return -ENOMEM;
}
LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));

View File

@ -821,7 +821,7 @@ sysret_t sys_unlink(const char *pathname)
sysret_t sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
{
int ret = 0;
dbg_log(DBG_LOG, "sys_nanosleep\n");
LOG_D("sys_nanosleep\n");
if (!lwp_user_accessable((void *)rqtp, sizeof *rqtp))
return -EFAULT;
@ -4759,7 +4759,7 @@ sysret_t sys_clock_gettime(clockid_t clk, struct timespec *ts)
sysret_t sys_clock_nanosleep(clockid_t clk, int flags, const struct timespec *rqtp, struct timespec *rmtp)
{
int ret = 0;
dbg_log(DBG_LOG, "sys_nanosleep\n");
LOG_D("sys_nanosleep\n");
if (!lwp_user_accessable((void *)rqtp, sizeof *rqtp))
return -EFAULT;

View File

@ -2,6 +2,7 @@ menuconfig LWP_USING_TERMINAL
bool "Terminal I/O Subsystem"
depends on RT_USING_SMART
default y
select RT_USING_SERIAL_BYPASS
if LWP_USING_TERMINAL
config LWP_PTY_MAX_PARIS_LIMIT

View File

@ -73,7 +73,7 @@ static const char *dev_console_filename;
#define TTYSUP_CFLAG \
(CIGNORE | CSIZE | CSTOPB | CREAD | PARENB | PARODD | HUPCL | CLOCAL | \
CCTS_OFLOW | CRTS_IFLOW | CDTR_IFLOW | CDSR_OFLOW | CCAR_OFLOW | \
CNO_RTSDTR)
CNO_RTSDTR | CBAUD)
/*
* Set TTY buffer sizes.

View File

@ -22,6 +22,12 @@
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#ifdef ARCH_CPU_64BIT
#define MIN_BIT 16
#else
#define MIN_BIT 8
#endif
#ifndef RT_INIT_MEMORY_REGIONS
#define RT_INIT_MEMORY_REGIONS 128
#endif
@ -155,16 +161,16 @@ static rt_err_t _memblock_add_range(struct rt_memblock *memblock,
rt_err_t rt_memblock_add_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
{
LOG_D("add physical address range [%p-%p) with flag 0x%x" \
" to overall memory regions\n", base, base + size, flag);
LOG_D("add physical address range [0x%.*lx-0x%.*lx) with flag 0x%x" \
" to overall memory regions\n", MIN_BIT, base, MIN_BIT, base + size, flag);
return _memblock_add_range(&mmblk_memory, name, start, end, flags);
}
rt_err_t rt_memblock_reserve_memory(const char *name, rt_size_t start, rt_size_t end, mmblk_flag_t flags)
{
LOG_D("add physical address range [%p-%p) to reserved memory regions\n",\
base, base + size);
LOG_D("add physical address range %s [0x%.*lx-0x%.*lx) to reserved memory regions\n",
name, MIN_BIT, start, MIN_BIT, end);
return _memblock_add_range(&mmblk_reserved, name, start, end, flags);
}
@ -347,14 +353,14 @@ void rt_memblock_setup_memory_environment(void)
rt_slist_for_each_entry(iter, &(mmblk_memory.reg_list), node)
{
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, iter->memreg.name, iter->memreg.start, iter->memreg.end);
LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
}
LOG_I("Reserved memory:");
rt_slist_for_each_entry(iter, &(mmblk_reserved.reg_list), node)
{
LOG_I(" %-*.s [%p, %p]", RT_NAME_MAX, iter->memreg.name, iter->memreg.start, iter->memreg.end);
LOG_I(" %-*.s [0x%.*lx, 0x%.*lx]", RT_NAME_MAX, iter->memreg.name, MIN_BIT, iter->memreg.start, MIN_BIT, iter->memreg.end);
if (iter->flags != MEMBLOCK_NONE)
{

View File

@ -99,6 +99,10 @@
#include <rtthread.h>
#ifdef RT_USING_NETDEV
#include "netdev.h"
#endif
/** Random generator function to create random TXIDs and source ports for queries */
#ifndef DNS_RAND_TXID
#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0)
@ -300,7 +304,9 @@ static u8_t dns_last_pcb_idx;
static u8_t dns_seqno;
static struct dns_table_entry dns_table[DNS_TABLE_SIZE];
static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS];
#ifndef RT_USING_NETDEV
static ip_addr_t dns_servers[DNS_MAX_SERVERS];
#endif
#if LWIP_IPV4
const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT;
@ -364,21 +370,25 @@ dns_setserver(u8_t numdns, const ip_addr_t *dnsserver)
{
if (numdns < DNS_MAX_SERVERS) {
if (dnsserver != NULL) {
dns_servers[numdns] = (*dnsserver);
#ifdef RT_USING_NETDEV
extern struct netif *netif_list;
extern struct netdev *netdev_get_by_name(const char *name);
extern void netdev_low_level_set_dns_server(struct netdev *netdev, uint8_t dns_num, const ip_addr_t *dns_server);
struct netif *netif = NULL;
/* set network interface device DNS server address */
for (netif = netif_list; netif != NULL; netif = netif->next) {
netdev_low_level_set_dns_server(netdev_get_by_name(netif->name), numdns, dnsserver);
netdev_set_dns_server(netdev_get_by_name(netif->name), numdns, dnsserver);
}
#else
dns_servers[numdns] = (*dnsserver);
#endif /* RT_USING_NETDEV */
} else {
#ifdef RT_USING_NETDEV
struct netif *netif = NULL;
for (netif = netif_list; netif != NULL; netif = netif->next) {
netdev_set_dns_server(netdev_get_by_name(netif->name), numdns, IP_ADDR_ANY);
}
#else
dns_servers[numdns] = *IP_ADDR_ANY;
#endif
}
}
}
@ -395,7 +405,11 @@ const ip_addr_t *
dns_getserver(u8_t numdns)
{
if (numdns < DNS_MAX_SERVERS) {
#ifdef RT_USING_NETDEV
return &netdev_default->dns_servers[numdns];
#else
return &dns_servers[numdns];
#endif
} else {
return IP_ADDR_ANY;
}
@ -770,11 +784,19 @@ dns_send(u8_t idx)
u8_t n;
u8_t pcb_idx;
struct dns_table_entry *entry = &dns_table[idx];
const ip_addr_t *dns_addr;
LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n",
(u16_t)(entry->server_idx), entry->name));
LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS);
if (ip_addr_isany_val(dns_servers[entry->server_idx])
#ifdef RT_USING_NETDEV
dns_addr = &netdev_default->dns_servers[entry->server_idx];
#else
dns_addr = &dns_servers[entry->server_idx];
#endif
if (ip_addr_isany(dns_addr)
#if LWIP_DNS_SUPPORT_MDNS_QUERIES
&& !entry->is_mdns
#endif
@ -859,7 +881,11 @@ dns_send(u8_t idx)
#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
{
dst_port = DNS_SERVER_PORT;
dst = &dns_servers[entry->server_idx];
dst = dns_addr;
#ifdef RT_USING_NETDEV
/* set netif index for this pcb, specify the network interface corresponding to the DNS server */
dns_pcbs[pcb_idx]->netif_idx = netif_get_index((struct netif *)netdev_default->user_data);
#endif
}
err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port);
@ -1040,8 +1066,15 @@ dns_backupserver_available(struct dns_table_entry *pentry)
u8_t ret = 0;
if (pentry) {
if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) {
ret = 1;
if ((pentry->server_idx + 1 < DNS_MAX_SERVERS)) {
#ifdef RT_USING_NETDEV
const ip_addr_t *dns_addr = &netdev_default->dns_servers[pentry->server_idx + 1];
#else
const ip_addr_t *dns_addr = &dns_servers[pentry->server_idx + 1];
#endif
if (!ip_addr_isany(dns_addr)) {
ret = 1;
}
}
}
@ -1230,9 +1263,14 @@ dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr,
if (!entry->is_mdns)
#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
{
#ifdef RT_USING_NETDEV
const ip_addr_t *dns_addr = &netdev_default->dns_servers[entry->server_idx];
#else
const ip_addr_t *dns_addr = &dns_servers[entry->server_idx];
#endif
/* Check whether response comes from the same network address to which the
question was sent. (RFC 5452) */
if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) {
if (!ip_addr_cmp(addr, dns_addr)) {
goto ignore_packet; /* ignore this packet */
}
}
@ -1631,8 +1669,13 @@ dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_call
if (!is_mdns)
#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */
{
#ifdef RT_USING_NETDEV
const ip_addr_t *dns_addr = &netdev_default->dns_servers[0];
#else
const ip_addr_t *dns_addr = &dns_servers[0];
#endif
/* prevent calling found callback if no server is set, return error instead */
if (ip_addr_isany_val(dns_servers[0])) {
if (ip_addr_isany(dns_addr)) {
return ERR_VAL;
}
}

View File

@ -673,7 +673,16 @@ dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in)
for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) {
ip_addr_t dns_addr;
ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n)));
#ifdef RT_USING_NETDEV
extern struct netdev *netdev_get_by_name(const char *name);
extern void netdev_set_dns_server(struct netdev *netdev, uint8_t dns_num, const ip_addr_t *dns_server);
/* Here we only need to set the dns server of the corresponding network device,
* but do not need to configure all network devices.
*/
netdev_set_dns_server(netdev_get_by_name(netif->name), n, &dns_addr);
#else
dns_setserver(n, &dns_addr);
#endif
}
#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */
}

View File

@ -538,7 +538,16 @@ dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in)
}
ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif);
/* @todo: do we need a different offset than DHCP(v4)? */
#ifdef RT_USING_NETDEV
extern struct netdev *netdev_get_by_name(const char *name);
extern void netdev_set_dns_server(struct netdev *netdev, uint8_t dns_num, const ip_addr_t *dns_server);
/* Here we only need to set the dns server of the corresponding network device,
* but do not need to configure all network devices.
*/
netdev_set_dns_server(netdev_get_by_name(netif->name), n, &dns_addr);
#else
dns_setserver(n, &dns_addr);
#endif
}
}
/* @ todo: parse and set Domain Search List */

View File

@ -776,14 +776,32 @@ nd6_input(struct pbuf *p, struct netif *inp)
if (htonl(rdnss_opt->lifetime) > 0) {
/* TODO implement Lifetime > 0 */
#ifdef RT_USING_NETDEV
extern struct netdev *netdev_get_by_name(const char *name);
extern void netdev_set_dns_server(struct netdev *netdev, uint8_t dns_num, const ip_addr_t *dns_server);
/* Here we only need to set the dns server of the corresponding network device,
* but do not need to configure all network devices.
*/
netdev_set_dns_server(netdev_get_by_name(inp->name), rdnss_server_idx++, &rdnss_address);
#else
dns_setserver(rdnss_server_idx++, &rdnss_address);
#endif
} else {
/* TODO implement DNS removal in dns.c */
u8_t s;
for (s = 0; s < DNS_MAX_SERVERS; s++) {
const ip_addr_t *addr = dns_getserver(s);
if(ip_addr_cmp(addr, &rdnss_address)) {
dns_setserver(s, NULL);
#ifdef RT_USING_NETDEV
extern struct netdev *netdev_get_by_name(const char *name);
extern void netdev_set_dns_server(struct netdev *netdev, uint8_t dns_num, const ip_addr_t *dns_server);
/* Here we only need to set the dns server of the corresponding network device,
* but do not need to configure all network devices.
*/
netdev_set_dns_server(netdev_get_by_name(inp->name), s, IP_ADDR_ANY);
#else
dns_setserver(s, IP_ADDR_ANY);
#endif
}
}
}

View File

@ -24,7 +24,8 @@
*
******************************************************************************
* REVISION HISTORY
*
* 24-09-12 Evlers <1425295900@qq.com>
* add support for independent dns services for multiple network devices
* 03-01-01 Marc Boucher <marc@mbsi.ca>
* Ported to lwIP.
* 97-11-05 Guy Lancaster <lancasterg@acm.org>, Global Election Systems Inc.
@ -134,6 +135,9 @@
#if PPP_IPV6_SUPPORT
#include "netif/ppp/ipv6cp.h"
#endif /* PPP_IPV6_SUPPORT */
#ifdef RT_USING_NETDEV
#include "netdev.h"
#endif /* RT_USING_NETDEV */
/*************************/
/*** LOCAL DEFINITIONS ***/
@ -1109,9 +1113,20 @@ int sdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) {
LWIP_UNUSED_ARG(pcb);
ip_addr_set_ip4_u32_val(ns, ns1);
#ifdef RT_USING_NETDEV
/* Here we only need to set the dns server of the corresponding network device,
* but do not need to configure all network cards.
*/
netdev_set_dns_server(netdev_get_by_name(pcb->netif->name), 0, &ns);
#else
dns_setserver(0, &ns);
#endif
ip_addr_set_ip4_u32_val(ns, ns2);
#ifdef RT_USING_NETDEV
netdev_set_dns_server(netdev_get_by_name(pcb->netif->name), 1, &ns);
#else
dns_setserver(1, &ns);
#endif
return 1;
}
@ -1127,12 +1142,20 @@ int cdns(ppp_pcb *pcb, u32_t ns1, u32_t ns2) {
nsa = dns_getserver(0);
ip_addr_set_ip4_u32_val(nsb, ns1);
if (ip_addr_cmp(nsa, &nsb)) {
#ifdef RT_USING_NETDEV
netdev_set_dns_server(netdev_get_by_name(pcb->netif->name), 0, IP_ADDR_ANY);
#else
dns_setserver(0, IP_ADDR_ANY);
#endif
}
nsa = dns_getserver(1);
ip_addr_set_ip4_u32_val(nsb, ns2);
if (ip_addr_cmp(nsa, &nsb)) {
#ifdef RT_USING_NETDEV
netdev_set_dns_server(netdev_get_by_name(pcb->netif->name), 1, IP_ADDR_ANY);
#else
dns_setserver(1, IP_ADDR_ANY);
#endif
}
return 1;
}

View File

@ -15,6 +15,7 @@
* 2018-11-02 MurphyZhao port to lwIP 2.1.0
* 2021-09-07 Grissiom fix eth_tx_msg ack bug
* 2022-02-22 xiangxistu integrate v1.4.1 v2.0.3 and v2.1.2 porting layer
* 2024-09-12 Evlers add support for independent dns services for multiple network devices
*/
/*
@ -169,8 +170,11 @@ static int lwip_netdev_set_addr_info(struct netdev *netif, ip_addr_t *ip_addr, i
}
#ifdef RT_LWIP_DNS
static int lwip_netdev_set_dns_server(struct netdev *netif, uint8_t dns_num, ip_addr_t *dns_server)
static int lwip_netdev_set_dns_server(struct netdev *netdev, uint8_t dns_num, ip_addr_t *dns_server)
{
#if RT_USING_LWIP_VER_NUM >= 0x20102
netdev_low_level_set_dns_server(netdev, dns_num, dns_server);
#else
#if LWIP_VERSION_MAJOR == 1U /* v1.x */
extern void dns_setserver(u8_t numdns, ip_addr_t *dnsserver);
#else /* >=2.x */
@ -178,6 +182,7 @@ static int lwip_netdev_set_dns_server(struct netdev *netif, uint8_t dns_num, ip_
#endif /* LWIP_VERSION_MAJOR == 1U */
dns_setserver(dns_num, dns_server);
#endif /* RT_USING_LWIP_VER_NUM >= 0x20102 */
return ERR_OK;
}
#endif /* RT_LWIP_DNS */

View File

@ -43,16 +43,10 @@ extern "C" {
#undef DBG_WARNING
#undef DBG_INFO
#undef DBG_LOG
#undef dbg_log
#define DBG_ERROR LOG_LVL_ERROR
#define DBG_WARNING LOG_LVL_WARNING
#define DBG_INFO LOG_LVL_INFO
#define DBG_LOG LOG_LVL_DBG
#define dbg_log(level, ...) \
if ((level) <= LOG_LVL) \
{ \
ulog_output(level, LOG_TAG, RT_FALSE, __VA_ARGS__);\
}
#if !defined(LOG_TAG)
/* compatible for rtdbg */

View File

@ -11,12 +11,8 @@
#include <rtthread.h>
#include <string.h>
#include <stdlib.h>
#include "utest.h"
#include <utest_log.h>
#undef DBG_TAG
#undef DBG_LVL
#include "utest_log.h"
#define DBG_TAG "utest"
#ifdef UTEST_DEBUG
@ -191,7 +187,7 @@ static int utest_help(void)
return 0;
}
static void utest_run(const char *utest_name)
static void utest_do_run(const char *utest_name)
{
rt_size_t i;
rt_uint32_t index;
@ -217,7 +213,7 @@ static void utest_run(const char *utest_name)
{
if (utest_name)
{
int len = strlen(utest_name);
int len = rt_strlen(utest_name);
if (utest_name[len - 1] == '*')
{
len -= 1;
@ -300,17 +296,38 @@ static void utest_run(const char *utest_name)
}
}
static void utest_thr_entry(const char *utest_name)
static void utest_thr_entry(void *para)
{
/* see commit:0dc7b9a for details */
rt_thread_mdelay(1000);
utest_run(utest_name);
char *utest_name = (char *)para;
rt_thread_mdelay(1000); /* see commit:0dc7b9a for details */
rt_kprintf("\n");
utest_do_run(utest_name);
}
long utest_testcase_run(int argc, char** argv)
static void utest_thread_create(const char *utest_name)
{
rt_thread_t tid = RT_NULL;
tid = rt_thread_create("utest",
utest_thr_entry, (void *)utest_name,
UTEST_THREAD_STACK_SIZE, UTEST_THREAD_PRIORITY, 10);
if (tid != RT_NULL)
{
rt_thread_startup(tid);
}
}
#ifdef RT_USING_CI_ACTION
static int utest_ci_action(void)
{
tc_loop = 1;
utest_thread_create(RT_NULL);
return RT_EOK;
}
INIT_APP_EXPORT(utest_ci_action);
#endif /* RT_USING_CI_ACTION */
int utest_testcase_run(int argc, char** argv)
{
static char utest_name[UTEST_NAME_MAX_LEN];
rt_memset(utest_name, 0x0, sizeof(utest_name));
@ -318,27 +335,21 @@ long utest_testcase_run(int argc, char** argv)
if (argc == 1)
{
utest_run(RT_NULL);
return 0;
utest_thread_create(RT_NULL);
}
else if (argc == 2 || argc == 3 || argc == 4)
{
if (rt_strcmp(argv[1], "-thread") == 0)
{
rt_thread_t tid = RT_NULL;
if (argc == 3 || argc == 4)
{
rt_strncpy(utest_name, argv[2], sizeof(utest_name) -1);
if (argc == 4) tc_loop = atoi(argv[3]);
}
tid = rt_thread_create("utest",
(void (*)(void *))utest_thr_entry, utest_name,
UTEST_THREAD_STACK_SIZE, UTEST_THREAD_PRIORITY, 10);
if (tid != NULL)
{
rt_thread_startup(tid);
if (argc == 4)
{
tc_loop = atoi(argv[3]);
}
}
utest_thread_create(utest_name);
}
else if (rt_strcmp(argv[1], "-help") == 0)
{
@ -347,8 +358,11 @@ long utest_testcase_run(int argc, char** argv)
else
{
rt_strncpy(utest_name, argv[1], sizeof(utest_name) -1);
if (argc == 3) tc_loop = atoi(argv[2]);
utest_run(utest_name);
if (argc == 3)
{
tc_loop = atoi(argv[2]);
}
utest_do_run(utest_name);
}
}
else
@ -356,7 +370,8 @@ long utest_testcase_run(int argc, char** argv)
LOG_E("[ error ] at (%s:%d), in param error.", __func__, __LINE__);
utest_help();
}
return 0;
return RT_EOK;
}
MSH_CMD_EXPORT_ALIAS(utest_testcase_run, utest_run, utest_run [-thread or -help] [testcase name] [loop num]);
@ -378,13 +393,27 @@ void utest_unit_run(test_unit_func func, const char *unit_func_name)
}
}
void utest_assert(int value, const char *file, int line, const char *func, const char *msg)
/*
* utest_assert - assert function
*
* @param value - assert value
* @param file - file name
* @param line - line number
* @param func - function name
* @param msg - assert message
*
* @return - RT_TRUE: assert success; RT_FALSE: assert failed
*/
rt_bool_t utest_assert(int value, const char *file, int line, const char *func, const char *msg)
{
rt_bool_t rst = RT_FALSE;
if (!(value))
{
local_utest.error = UTEST_FAILED;
local_utest.failed_num ++;
LOG_E("[ ASSERT ] [ unit ] at (%s); func: (%s:%d); msg: (%s)", file_basename(file), func, line, msg);
rst = RT_FALSE;
}
else
{
@ -394,38 +423,50 @@ void utest_assert(int value, const char *file, int line, const char *func, const
}
local_utest.error = UTEST_PASSED;
local_utest.passed_num ++;
rst = RT_TRUE;
}
return rst;
}
void utest_assert_string(const char *a, const char *b, rt_bool_t equal, const char *file, int line, const char *func, const char *msg)
{
rt_bool_t rst = RT_FALSE;
if (a == RT_NULL || b == RT_NULL)
{
utest_assert(0, file, line, func, msg);
}
if (equal)
{
if (rt_strcmp(a, b) == 0)
{
utest_assert(1, file, line, func, msg);
}
else
{
utest_assert(0, file, line, func, msg);
}
rst = utest_assert(0, file, line, func, msg);
}
else
{
if (rt_strcmp(a, b) == 0)
if (equal)
{
utest_assert(0, file, line, func, msg);
if (rt_strcmp(a, b) == 0)
{
rst = utest_assert(1, file, line, func, msg);
}
else
{
rst = utest_assert(0, file, line, func, msg);
}
}
else
{
utest_assert(1, file, line, func, msg);
if (rt_strcmp(a, b) == 0)
{
rst = utest_assert(0, file, line, func, msg);
}
else
{
rst = utest_assert(1, file, line, func, msg);
}
}
}
if (!rst)
{
LOG_E("[ ASSERT ] [ unit ] str-a: (%s); str-b: (%s)", a, b);
}
}
void utest_assert_buf(const char *a, const char *b, rt_size_t sz, rt_bool_t equal, const char *file, int line, const char *func, const char *msg)

View File

@ -19,7 +19,7 @@ extern "C" {
#endif
/* No need for the user to use this function directly */
void utest_assert(int value, const char *file, int line, const char *func, const char *msg);
rt_bool_t utest_assert(int value, const char *file, int line, const char *func, const char *msg);
/* No need for the user to use this function directly */
void utest_assert_string(const char *a, const char *b, rt_bool_t equal, const char *file, int line, const char *func, const char *msg);
@ -56,6 +56,9 @@ void utest_assert_buf(const char *a, const char *b, rt_size_t sz, rt_bool_t equa
#define uassert_int_equal(a, b) __utest_assert((a) == (b), "(" #a ") not equal to (" #b ")")
#define uassert_int_not_equal(a, b) __utest_assert((a) != (b), "(" #a ") equal to (" #b ")")
#define uassert_ptr_equal(a, b) __utest_assert((const void*)(a) == (const void*)(b), "(" #a ") not equal to (" #b ")")
#define uassert_ptr_not_equal(a, b) __utest_assert((const void*)(a) != (const void*)(b), "(" #a ") equal to (" #b ")")
#define uassert_str_equal(a, b) utest_assert_string((const char*)(a), (const char*)(b), RT_TRUE, __FILE__, __LINE__, __func__, "string not equal")
#define uassert_str_not_equal(a, b) utest_assert_string((const char*)(a), (const char*)(b), RT_FALSE, __FILE__, __LINE__, __func__, "string equal")

View File

@ -13,12 +13,12 @@
#include <rtthread.h>
#define UTEST_DEBUG
// #define UTEST_DEBUG
#undef DBG_TAG
#undef DBG_LVL
#define DBG_TAG "testcase"
#define DBG_TAG "utest"
#ifdef UTEST_DEBUG
#define DBG_LVL DBG_LOG
#else

View File

@ -6,3 +6,43 @@
4. Open the file ./Doxyfile
5. To tab `Run` , Click `Run doxygen`
# How to build & run doxygen html on Ubuntu
The following steps are verified on Ubuntu 22.04
```shell
$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 22.04.5 LTS
Release: 22.04
Codename: jammy
```
The following packages (and dependents) need to be installed:
```shell
$ sudo apt update
$ sudo apt install doxygen
$ sudo apt install graphviz
```
Assume that the path of RT-Thead code tree is $RTT, execute the following command to build html.
```shell
$ cd $RTT/documentation/doxygen
$ rm -rf html
$ doxygen
```
A new html directory will be created and all the html files will be placed in this directory.
If you want to quickly browse HTML locally (in Ubuntu environment), you can enter the html directory and start a local HTML server through Python.
```shell
$ cd html
$ python3 -m http.server
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ...
```
Open the browser and enter `http://<IP>:8000/index.html` to access the created html web pages. If it is a local access, then `<IP>` should be replaced by `localhost`. If it is a remote access, then `<IP>` should be replaced by the actual accessible IP address of the machine where HTML is located.

View File

@ -11,6 +11,7 @@ rsource "utest/Kconfig"
rsource "kernel/Kconfig"
rsource "cpp11/Kconfig"
rsource "drivers/serial_v2/Kconfig"
rsource "drivers/serial_bypass/Kconfig"
rsource "drivers/ipc/Kconfig"
rsource "posix/Kconfig"
rsource "mm/Kconfig"

View File

@ -0,0 +1,7 @@
menu "Serial-Bypass Testcase"
config UTEST_SERIAL_BYPASS
bool "Serial testcase"
default n
endmenu

View File

@ -0,0 +1,11 @@
Import('rtconfig')
from building import *
cwd = GetCurrentDir()
src = Glob('bypass*.c')
CPPPATH = [cwd]
group = DefineGroup('utestcases', src, depend = ['UTEST_SERIAL_BYPASS'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,185 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#include "utest.h"
static struct rt_serial_device* _serial0;
static struct rt_spinlock lock;
static int cnt = 0;
#define __REG32(x) (*((volatile unsigned int*)((rt_ubase_t)x)))
#define UART_FR(base) __REG32(base + 0x18)
#define UART_DR(base) __REG32(base + 0x00)
#define UARTFR_TXFF 0x20
static rt_err_t utest_get_c(struct rt_serial_device* serial, char ch, void* data)
{
rt_atomic_add(&cnt, 1);
return RT_EOK;
}
static int utest_getc(struct rt_serial_device* serial)
{
static int num = 0;
rt_spin_lock(&lock);
if (rt_atomic_load(&num) == 10)
{
rt_atomic_flag_clear(&num);
rt_spin_unlock(&lock);
return -1;
}
rt_atomic_add(&num, 1);
rt_spin_unlock(&lock);
return 'a';
}
struct hw_uart_device
{
rt_size_t hw_base;
rt_size_t irqno;
};
static int uart_putc(struct rt_serial_device* serial, char c)
{
struct hw_uart_device* uart;
RT_ASSERT(serial != RT_NULL);
uart = (struct hw_uart_device*)serial->parent.user_data;
while (UART_FR(uart->hw_base) & UARTFR_TXFF);
UART_DR(uart->hw_base) = c;
return 1;
}
static const struct rt_uart_ops _utest_ops =
{
RT_NULL,
RT_NULL,
uart_putc,
utest_getc,
};
static void thread_rx1(void* parameter)
{
for (int i = 0; i < 10; i++)
{
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
}
}
static void thread_rx2(void* parameter)
{
for (int i = 0; i < 10; i++)
{
rt_workqueue_dowork(_serial0->bypass->lower_workq, &_serial0->bypass->work);
}
}
static void thread_high_priority(void* parameter)
{
for (int i = 1; i < 10; i++)
{
rt_bypass_upper_register(_serial0, "test", i, utest_get_c, RT_NULL);
rt_bypass_upper_unregister(_serial0, i);
}
}
static void thread_low_priority(void* parameter)
{
for (int i = 0; i < 20; i++)
{
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
}
}
static void bypass_rx_stress_003(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
rt_thread_t high = rt_thread_create("high_prio", thread_high_priority, RT_NULL, 2048, 15, 10);
rt_thread_t low = rt_thread_create("low_prio", thread_low_priority, RT_NULL, 2048, 20, 10);
rt_atomic_flag_clear(&cnt);
_serial0->ops = &_utest_ops;
rt_bypass_upper_register(_serial0, "test", 0, utest_get_c, RT_NULL);
rt_thread_startup(high);
rt_thread_startup(low);
rt_thread_mdelay(1000);
_serial0->ops = tmp;
rt_bypass_upper_unregister(_serial0, 0);
uassert_true(rt_atomic_load(&cnt) == 200);
}
static void bypass_rx_stress_002(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
rt_thread_t rx2 = rt_thread_create("rx2", thread_rx1, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_thread_t rx3 = rt_thread_create("rx3", thread_rx2, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_atomic_flag_clear(&cnt);
_serial0->ops = &_utest_ops;
rt_bypass_lower_register(_serial0, "utest", 0, utest_get_c, RT_NULL);
rt_thread_startup(rx2);
rt_thread_startup(rx3);
rt_thread_mdelay(1000);
uassert_true(rt_atomic_load(&cnt) == 100);
_serial0->ops = tmp;
rt_bypass_lower_unregister(_serial0, 0);
}
static void bypass_rx_stress_001(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
rt_thread_t rx1 = rt_thread_create("rx1", thread_rx1, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_thread_t rx2 = rt_thread_create("rx1", thread_rx1, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
cnt = 0;
_serial0->ops = &_utest_ops;
rt_bypass_upper_register(_serial0, "utest", 0, utest_get_c, RT_NULL);
rt_thread_startup(rx1);
rt_thread_startup(rx2);
rt_thread_mdelay(1000);
uassert_true(rt_atomic_load(&cnt) == 200);
_serial0->ops = tmp;
rt_bypass_upper_unregister(_serial0, 0);
}
static rt_err_t utest_tc_init(void)
{
_serial0 = (struct rt_serial_device*)rt_console_get_device();
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(bypass_rx_stress_001);
UTEST_UNIT_RUN(bypass_rx_stress_002);
UTEST_UNIT_RUN(bypass_rx_stress_003);
}
UTEST_TC_EXPORT(_testcase, "testcase.bypass.conflict.001", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#include "utest.h"
static struct rt_serial_device* _serial0;
static int cnt = 0;
#define __REG32(x) (*((volatile unsigned int*)((rt_ubase_t)x)))
#define UART_FR(base) __REG32(base + 0x18)
#define UART_DR(base) __REG32(base + 0x00)
#define UARTFR_TXFF 0x20
struct hw_uart_device
{
rt_size_t hw_base;
rt_size_t irqno;
};
static int uart_putc(struct rt_serial_device* serial, char c)
{
struct hw_uart_device* uart;
RT_ASSERT(serial != RT_NULL);
uart = (struct hw_uart_device*)serial->parent.user_data;
while (UART_FR(uart->hw_base) & UARTFR_TXFF);
UART_DR(uart->hw_base) = c;
return 1;
}
static rt_err_t utest_lower_run_test2(struct rt_serial_device* serial, char ch, void* data)
{
static rt_uint8_t num = 0;
num++;
uassert_true(ch == ('a' + num));
return RT_EOK;
}
static int utest_getc_2(struct rt_serial_device* serial)
{
static rt_uint8_t num = 0;
if (num == 20)
return -1;
num++;
return 'a' + num;
}
static const struct rt_uart_ops _utest_ops2 =
{
RT_NULL,
RT_NULL,
uart_putc,
utest_getc_2,
};
static rt_err_t utest_lower_run(struct rt_serial_device* serial, char ch, void* data)
{
uassert_true(ch == 'a');
cnt++;
return RT_EOK;
}
static int utest_getc(struct rt_serial_device* serial)
{
static rt_uint8_t num = 0;
if (num == 10)
return -1;
num++;
return 'a';
}
static const struct rt_uart_ops _utest_ops =
{
RT_NULL,
RT_NULL,
uart_putc,
utest_getc,
};
static void bypass_lower_001(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
_serial0->ops = &_utest_ops;
rt_bypass_lower_register(_serial0, "utest", RT_BYPASS_MAX_LEVEL, utest_lower_run, RT_NULL);
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
rt_thread_mdelay(100);
uassert_true(cnt == 10);
_serial0->ops = tmp;
rt_bypass_lower_unregister(_serial0, RT_BYPASS_MAX_LEVEL);
}
static void bypass_lower_002(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
_serial0->ops = &_utest_ops2;
rt_bypass_lower_register(_serial0, "utest", RT_BYPASS_MAX_LEVEL, utest_lower_run_test2, RT_NULL);
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
rt_thread_mdelay(100);
uassert_true(cnt == 10);
_serial0->ops = tmp;
rt_bypass_lower_unregister(_serial0, RT_BYPASS_MAX_LEVEL);
}
static rt_err_t utest_tc_init(void)
{
_serial0 = (struct rt_serial_device*)rt_console_get_device();
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(bypass_lower_001);
UTEST_UNIT_RUN(bypass_lower_002);
}
UTEST_TC_EXPORT(_testcase, "testcase.bypass.lower.001", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#include "utest.h"
static struct rt_serial_device* _serial0;
static struct rt_spinlock lock;
static rt_err_t utest_001_run(struct rt_serial_device* serial, char ch, void* data)
{
return 0;
}
static void thread_serial_register1(void* parameter)
{
for (int i = 2; i < 10; i += 2)
{
rt_bypass_upper_register(_serial0, "test", i, utest_001_run, RT_NULL);
}
}
static void thread_serial_register_upper(void* parameter)
{
for (int i = 1; i < 10; i++)
{
rt_bypass_upper_register(_serial0, "test", i, utest_001_run, RT_NULL);
}
}
static void thread_serial_register_lower(void* parameter)
{
for (int i = 1; i < 10; i++)
{
rt_bypass_lower_register(_serial0, "test", i, utest_001_run, RT_NULL);
}
}
static void bypass_register_001(void)
{
rt_thread_t t1 = rt_thread_create("serial_register", thread_serial_register1, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_bypass_upper_register(_serial0, "test", 0, utest_001_run, RT_NULL);
rt_thread_startup(t1);
for (int i = 1; i < 10; i += 2)
{
rt_bypass_upper_register(_serial0, "test", i, utest_001_run, RT_NULL);
}
rt_thread_mdelay(1000);
rt_list_t* node = _serial0->bypass->upper_h->head.next;
for (int i = 0; i < 10;i++)
{
rt_list_t* next = node->next;
struct rt_serial_bypass_func* temp = rt_container_of(node, struct rt_serial_bypass_func, node);
uassert_true(temp->level == i);
rt_bypass_upper_unregister(_serial0, temp->level);
node = next;
}
}
static void bypass_register_002(void)
{
rt_thread_t t1 = rt_thread_create("serial_register", thread_serial_register_upper, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_thread_t t2 = rt_thread_create("serial_register", thread_serial_register_lower, RT_NULL, 2048, RT_THREAD_PRIORITY_MAX - 5, 10);
rt_bypass_upper_register(_serial0, "test", 0, utest_001_run, RT_NULL);
rt_thread_startup(t1);
rt_thread_startup(t2);
rt_thread_mdelay(1000);
rt_list_t* node = _serial0->bypass->upper_h->head.next;
for (int i = 0; i < 10;i++)
{
rt_list_t* next = node->next;
struct rt_serial_bypass_func* temp = rt_container_of(node, struct rt_serial_bypass_func, node);
uassert_true(temp->level == i);
rt_bypass_upper_unregister(_serial0, temp->level);
node = next;
}
node = _serial0->bypass->lower_h->head.next;
for (int i = 1; i < 10;i++)
{
rt_list_t* next = node->next;
struct rt_serial_bypass_func* temp = rt_container_of(node, struct rt_serial_bypass_func, node);
uassert_true(temp->level == i);
rt_bypass_lower_unregister(_serial0, temp->level);
node = next;
}
}
static rt_err_t utest_tc_init(void)
{
_serial0 = (struct rt_serial_device*)rt_console_get_device();
rt_spin_lock_init(&lock);
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(bypass_register_001);
UTEST_UNIT_RUN(bypass_register_002);
}
UTEST_TC_EXPORT(_testcase, "testcase.bypass.register.001", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -0,0 +1,129 @@
/*
* Copyright (c) 2006-2024 RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2024-11-20 zhujiale the first version
*/
#include <rtthread.h>
#include <rtdevice.h>
#include "utest.h"
static struct rt_serial_device* _serial0;
static int cnt;
#define __REG32(x) (*((volatile unsigned int*)((rt_ubase_t)x)))
#define UART_FR(base) __REG32(base + 0x18)
#define UART_DR(base) __REG32(base + 0x00)
#define UARTFR_TXFF 0x20
static rt_err_t utest_upper_run(struct rt_serial_device* serial, char ch, void* data)
{
uassert_true(ch == 'a');
cnt++;
return RT_EOK;
}
static int utest_getc(struct rt_serial_device* serial)
{
static rt_uint8_t num = 0;
if (num == 10)
return -1;
num++;
return 'a';
}
struct hw_uart_device
{
rt_size_t hw_base;
rt_size_t irqno;
};
static int uart_putc(struct rt_serial_device* serial, char c)
{
struct hw_uart_device* uart;
RT_ASSERT(serial != RT_NULL);
uart = (struct hw_uart_device*)serial->parent.user_data;
while (UART_FR(uart->hw_base) & UARTFR_TXFF);
UART_DR(uart->hw_base) = c;
return 1;
}
static const struct rt_uart_ops _utest_ops =
{
RT_NULL,
RT_NULL,
uart_putc,
utest_getc,
};
static rt_err_t utest_lower_run_test2(struct rt_serial_device* serial, char ch, void* data)
{
static rt_uint8_t num = 0;
num++;
uassert_true(ch == ('a' + num));
return RT_EOK;
}
static int utest_getc_2(struct rt_serial_device* serial)
{
static rt_uint8_t num = 0;
if (num == 20)
return -1;
num++;
return 'a' + num;
}
static const struct rt_uart_ops _utest_ops2 =
{
RT_NULL,
RT_NULL,
uart_putc,
utest_getc_2,
};
static void bypass_upper_001(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
_serial0->ops = &_utest_ops;
rt_bypass_upper_register(_serial0, "utest", RT_BYPASS_LEVEL_1, utest_upper_run, RT_NULL);
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
uassert_true(cnt == 10);
_serial0->ops = tmp;
rt_bypass_upper_unregister(_serial0, RT_BYPASS_LEVEL_1);
}
static void bypass_upper_002(void)
{
const struct rt_uart_ops* tmp = _serial0->ops;
_serial0->ops = &_utest_ops2;
rt_bypass_upper_register(_serial0, "utest", RT_BYPASS_MAX_LEVEL, utest_lower_run_test2, RT_NULL);
rt_hw_serial_isr(_serial0, RT_SERIAL_EVENT_RX_IND);
rt_thread_mdelay(100);
uassert_true(cnt == 10);
_serial0->ops = tmp;
rt_bypass_upper_unregister(_serial0, RT_BYPASS_MAX_LEVEL);
}
static rt_err_t utest_tc_init(void)
{
_serial0 = (struct rt_serial_device*)rt_console_get_device();
return RT_EOK;
}
static rt_err_t utest_tc_cleanup(void)
{
return RT_EOK;
}
static void _testcase(void)
{
UTEST_UNIT_RUN(bypass_upper_001);
UTEST_UNIT_RUN(bypass_upper_002);
}
UTEST_TC_EXPORT(_testcase, "testcase.bypass.upper.001", utest_tc_init, utest_tc_cleanup, 10);

View File

@ -113,32 +113,6 @@ extern "C" {
rt_kprintf("\n")
#endif /* DBG_COLOR */
/*
* static debug routine
* NOTE: This is a NOT RECOMMENDED API. Please using LOG_X API.
* It will be DISCARDED later. Because it will take up more resources.
*/
#define dbg_log(level, fmt, ...) \
if ((level) <= DBG_LEVEL) \
{ \
switch(level) \
{ \
case DBG_ERROR: _DBG_LOG_HDR("E", 31); break; \
case DBG_WARNING: _DBG_LOG_HDR("W", 33); break; \
case DBG_INFO: _DBG_LOG_HDR("I", 32); break; \
case DBG_LOG: _DBG_LOG_HDR("D", 0); break; \
default: break; \
} \
rt_kprintf(fmt, ##__VA_ARGS__); \
_DBG_COLOR(0); \
}
#define dbg_here \
if ((DBG_LEVEL) <= DBG_LOG){ \
rt_kprintf(DBG_SECTION_NAME " Here %s:%d\n", \
__FUNCTION__, __LINE__); \
}
#define dbg_log_line(lvl, color_n, fmt, ...) \
do \
{ \
@ -151,10 +125,6 @@ extern "C" {
#define dbg_raw(...) rt_kprintf(__VA_ARGS__);
#else
#define dbg_log(level, fmt, ...)
#define dbg_here
#define dbg_enter
#define dbg_exit
#define dbg_log_line(lvl, color_n, fmt, ...)
#define dbg_raw(...)
#endif /* DBG_ENABLE */

Some files were not shown because too many files have changed in this diff Show More