Web服务器在面对高并发的状况下,网络的IO通常选择IO复用,像apache选择的Select/poll。Nginx在linux 2.6后选择Epoll作网路IO,提升了WEB服务的并发能力。linux
在本章,咱们将看看NGINX如何使用epoll。apache
首先,咱们看一下数据结构图:数组
一、从结构图中,咱们先看第一部分,NGINX拿到socket标示符,绑定本地地址,监听socket标示符信息,因为NGINX支持多server,支持各个server使用不一样的端口、不一样的协议族。如此须要多个socket句柄来支持,NGINX利用ngx_listening_t这个数据结构来保存打开的socket信息而且放置在一个数组里面。这个过程是在初始化cycle中完成的。服务器
二、关于socket句柄,咱们区分两类,一类是上一部分说的服务端申请的socket,这一部分socket处理和客户端链接的状况,若是有新的链接进来,将会产生关于这一批socket的事件;第二类,是链接创建后,服务端accept客户端的socket句柄,针对这个句柄,服务端作读和写的操做。网络
咱们再看一下下图绘制的一个处理流程。数据结构
咱们再经过代码来看看整个过程并发
分析配置文件,申请socket fdsocket
二、处理事件模块的初始化高并发
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ....... //对全部NGX_EVENT_MODULE模块执行module->actions.init for (m = 0; ngx_modules[m]; m++) { if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } if (ngx_modules[m]->ctx_index != ecf->use) { continue; } module = ngx_modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } ...... cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; #if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock; #endif } cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; #if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock; #endif } i = cycle->connection_n; next = NULL; do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i]; #if (NGX_THREADS) c[i].lock = 0; #endif } while (i); cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { //获取空余链接,将服务端申请的每一个socket fd占用一个链接, c = ngx_get_connection(ls[i].fd, cycle->log); ...... rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { //若是使用epoll模块,则在epoll上注册读事件,操做类型是EPOLL_CTL_ADD,也就是关注服务端申请的socket fd是否有新链接申请 if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } } return NGX_OK; }
三、 EPOLL在发现新事件后,会查找处理该事件的方法,实际上先查找对应的链接,该链接指针是以下方式,利用epoll_event结构体成员data的ptr指针能够用来存储用户的链接信息,咱们在看看后面是怎么处理的ui
static ngx_int_t ngx_epoll_add_event(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags) { ....... //epoll_event结构体成员data的ptr指针能够用来存储用户的指针信息,这里用来存储链接信息,其中事件的instance标识主要用来显示事件是否过时 ee.events = events | (uint32_t) flags; ee.data.ptr = (void *) ((uintptr_t) c | ev->instance); //把instance加到ee的末尾(因为内存对齐,末尾通常为0) ……. if (epoll_ctl(ep, op, c->fd, &ee) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno, "epoll_ctl(%d, %d) failed", op, c->fd); return NGX_ERROR; } ....... return NGX_OK; }
四、
static void ngx_worker_process_cycle(ngx_cycle_t *cycle, void *data) { ...... ngx_worker_process_init(cycle, worker); ...... for ( ;; ) { ...... ngx_process_events_and_timers(cycle); ...... } }
五、
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ...... (void) ngx_process_events(cycle, timer, flags); ...... }