io_uring是linux 5.1引入的异步io接口,适合io密集型应用。其初衷是为了解决linux下异步io接口不完善且性能差的现状,用以替代linux aio接口(io_setup,io_submit,io_getevents)。现在io_uring已经支持socket和文件的读写,未来会支持更多场景。
io_uring 的实现主要在 fs/io_uring.c 中。
io_uring 的实现仅仅使用了三个 syscall:
用户和内核通过提交和完成队列进行任务的提交和获取。以下是io_uring常用的缩写及对应的含义。
int io_uring_setup(u32 entries, struct io_uring_params *param);
用户通过io_uring_setup初始化一个io_uring的上下文。该函数返回一个文件描述符fd,并将io_uring支持的功能及各个数据结构在 fd 中的偏移保存在param中。用户根据偏移量通过mmap将 fd 映射到内存,获取到一段用户和内核共享的内存区域。这块区域中有 io_uring 的上下文,SQ_Ring、CQ_Ring以及一块专门用来存放SQ Entry的区域(SQE area)。
注意,SQ_Ring中保存的是SQ Entry在SQE area中的Index,而CQ_Ring中保存的是完成后完整的数据。
在Linux 5.12 内核中,SQE的大小为64B,CQE的大小为16B。初始化io_uring时,若没有指定第一个参数entries,内核默认会分配entries个SQE,2*entries的CQE。
io_uring 设计的巧妙之处在于,用户和内核通过mmap映射出一段共享区域,任务的提交和获取都在这块区域进行,速度非常快。
代码功能:实现Tcp服务器,支持多客户端连接,实现客户端服务端echo功能。
#include
#include
#include
#include
#include
#include
#define ENTRIES_LENGTH 4096
#define MAX_CONNECTIONS 1024
#define BUFFER_LENGTH 1024
char buf_table[MAX_CONNECTIONS][BUFFER_LENGTH] = {0};
enum {
READ,
WRITE,
ACCEPT,
};
struct conninfo {
int connfd;
int type;
};
void set_read_event(struct io_uring *ring, int fd, void *buf, size_t len, int flags) {
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
io_uring_prep_recv(sqe, fd, buf, len, flags);
struct conninfo ci = {
.connfd = fd,
.type = READ
};
memcpy(&sqe->user_data, &ci, sizeof(struct conninfo));
return ;
}
void set_write_event(struct io_uring *ring, int fd, const void *buf, size_t len, int flags) {
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
io_uring_prep_send(sqe, fd, buf, len, flags);
struct conninfo ci = {
.connfd = fd,
.type = WRITE
};
memcpy(&sqe->user_data, &ci, sizeof(struct conninfo));
return ;
}
void set_accept_event(struct io_uring *ring, int fd,
struct sockaddr *cliaddr, socklen_t *clilen, unsigned flags) {
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
io_uring_prep_accept(sqe, fd, cliaddr, clilen, flags);
struct conninfo ci = {
.connfd = fd,
.type = ACCEPT
};
memcpy(&sqe->user_data, &ci, sizeof(struct conninfo));
return ;
}
int main() {
int listenfd = socket(AF_INET, SOCK_STREAM, 0); //
if (listenfd == -1) return -1;
struct sockaddr_in servaddr, clientaddr;
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(9999);
if (-1 == bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr))) {
return -2;
}
listen(listenfd, 10);
struct io_uring_params params;
memset(¶ms, 0, sizeof(params));
struct io_uring ring;
memset(&ring, 0, sizeof(ring));
/*初始化params 和 ring*/
io_uring_queue_init_params(ENTRIES_LENGTH, &ring, ¶ms);
socklen_t clilen = sizeof(clientaddr);
set_accept_event(&ring, listenfd, (struct sockaddr*)&clientaddr, &clilen, 0);
while (1) {
struct io_uring_cqe *cqe;
io_uring_submit(&ring);
int ret = io_uring_wait_cqe(&ring, &cqe);
struct io_uring_cqe *cqes[10];
int cqecount = io_uring_peek_batch_cqe(&ring, cqes, 10);
unsigned count = 0;
for (int i = 0;i < cqecount;i ++) {
cqe = cqes[i];
count ++;
struct conninfo ci;
memcpy(&ci, &cqe->user_data, sizeof(ci));
if (ci.type == ACCEPT) {
int connfd = cqe->res;
char *buffer = buf_table[connfd];
set_read_event(&ring, connfd, buffer, 1024, 0);
set_accept_event(&ring, listenfd, (struct sockaddr*)&clientaddr, &clilen, 0);
} else if (ci.type == READ) {
int bytes_read = cqe->res;
if (bytes_read == 0) {
close(ci.connfd);
} else if (bytes_read < 0) {
close(ci.connfd);
printf("client %d disconnected!\n", ci.connfd);
} else {
//printf("buffer : %s\n", buffer);
char *buffer = buf_table[ci.connfd];
set_write_event(&ring, ci.connfd, buffer, bytes_read, 0);
}
} else if (ci.type == WRITE) {
char *buffer = buf_table[ci.connfd];
set_read_event(&ring, ci.connfd, buffer, 1024, 0);
}
}
io_uring_cq_advance(&ring, count);
}
return 0;
}