Class: IO::Event::Selector::EPoll
- Inherits:
-
Object
- Object
- IO::Event::Selector::EPoll
- Defined in:
- ext/io/event/selector/epoll.c
Instance Method Summary collapse
- #close ⇒ Object
- #initialize(loop) ⇒ Object constructor
- #io_read(*args) ⇒ Object
- #io_wait(fiber, io, events) ⇒ Object
- #io_write(*args) ⇒ Object
- #loop ⇒ Object
-
#process_wait(fiber, _pid, _flags) ⇒ Object
rb_define_method(IO_Event_Selector_EPoll, “io_write”, IO_Event_Selector_EPoll_io_write, 5);.
- #push(fiber) ⇒ Object
- #raise(*args) ⇒ Object
- #ready? ⇒ Boolean
- #resume(*args) ⇒ Object
-
#select(duration) ⇒ Object
TODO This function is not re-entrant and we should document and assert as such.
- #transfer ⇒ Object
- #wakeup ⇒ Object
- #yield ⇒ Object
Constructor Details
#initialize(loop) ⇒ Object
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 |
# File 'ext/io/event/selector/epoll.c', line 359
VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
IO_Event_Selector_initialize(&selector->backend, loop);
int result = epoll_create1(EPOLL_CLOEXEC);
if (result == -1) {
rb_sys_fail("IO_Event_Selector_EPoll_initialize:epoll_create");
} else {
selector->descriptor = result;
rb_update_max_fd(selector->descriptor);
}
IO_Event_Interrupt_open(&selector->interrupt);
IO_Event_Interrupt_add(&selector->interrupt, selector);
return self;
}
|
Instance Method Details
#close ⇒ Object
387 388 389 390 391 392 393 394 |
# File 'ext/io/event/selector/epoll.c', line 387
VALUE IO_Event_Selector_EPoll_close(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
close_internal(selector);
return Qnil;
}
|
#io_read(*args) ⇒ Object
660 661 662 663 664 665 666 667 668 669 670 671 |
# File 'ext/io/event/selector/epoll.c', line 660
VALUE IO_Event_Selector_EPoll_io_read_compatible(int argc, VALUE *argv, VALUE self)
{
rb_check_arity(argc, 4, 5);
VALUE _offset = SIZET2NUM(0);
if (argc == 5) {
_offset = argv[4];
}
return IO_Event_Selector_EPoll_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
}
|
#io_wait(fiber, io, events) ⇒ Object
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 |
# File 'ext/io/event/selector/epoll.c', line 547
VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
int descriptor = IO_Event_Selector_io_descriptor(io);
struct IO_Event_Selector_EPoll_Waiting waiting = {
.list = {.type = &IO_Event_Selector_EPoll_io_wait_list_type},
.fiber = fiber,
.events = RB_NUM2INT(events),
};
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
if (result == -1) {
if (errno == EPERM) {
IO_Event_Selector_queue_push(&selector->backend, fiber);
IO_Event_Selector_yield(&selector->backend);
return events;
}
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:IO_Event_Selector_EPoll_Waiting_register");
}
struct io_wait_arguments io_wait_arguments = {
.selector = selector,
.waiting = &waiting,
};
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
}
|
#io_write(*args) ⇒ Object
756 757 758 759 760 761 762 763 764 765 766 767 |
# File 'ext/io/event/selector/epoll.c', line 756
VALUE IO_Event_Selector_EPoll_io_write_compatible(int argc, VALUE *argv, VALUE self)
{
rb_check_arity(argc, 4, 5);
VALUE _offset = SIZET2NUM(0);
if (argc == 5) {
_offset = argv[4];
}
return IO_Event_Selector_EPoll_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
}
|
#loop ⇒ Object
380 381 382 383 384 385 |
# File 'ext/io/event/selector/epoll.c', line 380
VALUE IO_Event_Selector_EPoll_loop(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return selector->backend.loop;
}
|
#process_wait(fiber, _pid, _flags) ⇒ Object
rb_define_method(IO_Event_Selector_EPoll, “io_write”, IO_Event_Selector_EPoll_io_write, 5);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 |
# File 'ext/io/event/selector/epoll.c', line 479
VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
pid_t pid = NUM2PIDT(_pid);
int flags = NUM2INT(_flags);
int descriptor = pidfd_open(pid, 0);
if (descriptor == -1) {
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:pidfd_open");
}
rb_update_max_fd(descriptor);
struct IO_Event_Selector_EPoll_Waiting waiting = {
.list = {.type = &IO_Event_Selector_EPoll_process_wait_list_type},
.fiber = fiber,
.events = IO_EVENT_READABLE,
};
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
if (result == -1) {
close(descriptor);
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:IO_Event_Selector_EPoll_Waiting_register");
}
struct process_wait_arguments process_wait_arguments = {
.selector = selector,
.pid = pid,
.flags = flags,
.descriptor = descriptor,
.waiting = &waiting,
};
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
}
|
#push(fiber) ⇒ Object
420 421 422 423 424 425 426 427 428 |
# File 'ext/io/event/selector/epoll.c', line 420
VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
IO_Event_Selector_queue_push(&selector->backend, fiber);
return Qnil;
}
|
#raise(*args) ⇒ Object
430 431 432 433 434 435 436 |
# File 'ext/io/event/selector/epoll.c', line 430
VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_raise(&selector->backend, argc, argv);
}
|
#ready? ⇒ Boolean
438 439 440 441 442 443 |
# File 'ext/io/event/selector/epoll.c', line 438
VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return selector->backend.ready ? Qtrue : Qfalse;
}
|
#resume(*args) ⇒ Object
404 405 406 407 408 409 410 |
# File 'ext/io/event/selector/epoll.c', line 404
VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_resume(&selector->backend, argc, argv);
}
|
#select(duration) ⇒ Object
TODO This function is not re-entrant and we should document and assert as such.
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 |
# File 'ext/io/event/selector/epoll.c', line 965
VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
int ready = IO_Event_Selector_queue_flush(&selector->backend);
struct select_arguments arguments = {
.selector = selector,
.storage = {
.tv_sec = 0,
.tv_nsec = 0
},
.saved = {},
};
arguments.timeout = &arguments.storage;
// Process any currently pending events:
select_internal_with_gvl(&arguments);
// If we:
// 1. Didn't process any ready fibers, and
// 2. Didn't process any events from non-blocking select (above), and
// 3. There are no items in the ready list,
// then we can perform a blocking select.
if (!ready && !arguments.count && !selector->backend.ready) {
arguments.timeout = make_timeout(duration, &arguments.storage);
if (!timeout_nonblocking(arguments.timeout)) {
// Wait for events to occur
select_internal_without_gvl(&arguments);
}
}
if (arguments.count) {
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
} else {
return RB_INT2NUM(0);
}
}
|
#transfer ⇒ Object
396 397 398 399 400 401 402 |
# File 'ext/io/event/selector/epoll.c', line 396
VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
}
|
#wakeup ⇒ Object
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 |
# File 'ext/io/event/selector/epoll.c', line 1006
VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
// If we are blocking, we can schedule a nop event to wake up the selector:
if (selector->blocked) {
IO_Event_Interrupt_signal(&selector->interrupt);
return Qtrue;
}
return Qfalse;
}
|
#yield ⇒ Object
412 413 414 415 416 417 418 |
# File 'ext/io/event/selector/epoll.c', line 412
VALUE IO_Event_Selector_EPoll_yield(VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_yield(&selector->backend);
}
|