Class: IO::Event::Selector::URing

Inherits:
Object
  • Object
show all
Defined in:
ext/io/event/selector/uring.c

Instance Method Summary collapse

Constructor Details

#initialize(loop) ⇒ Object



246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
# File 'ext/io/event/selector/uring.c', line 246

VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	IO_Event_Selector_initialize(&selector->backend, loop);
	int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
	
	if (result < 0) {
		rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
	}
	
	rb_update_max_fd(selector->ring.ring_fd);
	
	return self;
}

Instance Method Details

#closeObject



269
270
271
272
273
274
275
276
# File 'ext/io/event/selector/uring.c', line 269

VALUE IO_Event_Selector_URing_close(VALUE self) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	close_internal(selector);
	
	return Qnil;
}

#io_close(io) ⇒ Object



866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
# File 'ext/io/event/selector/uring.c', line 866

VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	int descriptor = IO_Event_Selector_io_descriptor(io);

	if (ASYNC_CLOSE) {
		struct io_uring_sqe *sqe = io_get_sqe(selector);
		io_uring_prep_close(sqe, descriptor);
		io_uring_sqe_set_data(sqe, NULL);
		io_uring_submit_now(selector);
	} else {
		close(descriptor);
	}

	// We don't wait for the result of close since it has no use in pratice:
	return Qtrue;
}

#io_read(*args) ⇒ Object



723
724
725
726
727
728
729
730
731
732
733
734
# File 'ext/io/event/selector/uring.c', line 723

static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, VALUE self)
{
	rb_check_arity(argc, 4, 5);
	
	VALUE _offset = SIZET2NUM(0);
	
	if (argc == 5) {
		_offset = argv[4];
	}
	
	return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
}

#io_wait(fiber, io, events) ⇒ Object



565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
# File 'ext/io/event/selector/uring.c', line 565

VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	int descriptor = IO_Event_Selector_io_descriptor(io);
	
	short flags = poll_flags_from_events(NUM2INT(events));
	
	if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
	
	struct IO_Event_Selector_URing_Waiting waiting = {
		.fiber = fiber,
	};
	
	struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
	
	struct io_uring_sqe *sqe = io_get_sqe(selector);
	io_uring_prep_poll_add(sqe, descriptor, flags);
	io_uring_sqe_set_data(sqe, completion);
	// If we are going to wait, we assume that we are waiting for a while:
	io_uring_submit_pending(selector);
	
	struct io_wait_arguments io_wait_arguments = {
		.selector = selector,
		.waiting = &waiting,
		.flags = flags
	};
	
	return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
}

#io_write(*args) ⇒ Object



847
848
849
850
851
852
853
854
855
856
857
858
# File 'ext/io/event/selector/uring.c', line 847

static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv, VALUE self)
{
	rb_check_arity(argc, 4, 5);
	
	VALUE _offset = SIZET2NUM(0);
	
	if (argc == 5) {
		_offset = argv[4];
	}
	
	return IO_Event_Selector_URing_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
}

#loopObject



262
263
264
265
266
267
# File 'ext/io/event/selector/uring.c', line 262

VALUE IO_Event_Selector_URing_loop(VALUE self) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return selector->backend.loop;
}

#process_wait(fiber, _pid, _flags) ⇒ Object



459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
# File 'ext/io/event/selector/uring.c', line 459

VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	pid_t pid = NUM2PIDT(_pid);
	int flags = NUM2INT(_flags);
	
	int descriptor = pidfd_open(pid, 0);
	if (descriptor < 0) {
		rb_syserr_fail(errno, "IO_Event_Selector_URing_process_wait:pidfd_open");
	}
	rb_update_max_fd(descriptor);
	
	struct IO_Event_Selector_URing_Waiting waiting = {
		.fiber = fiber,
	};
	
	struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
	
	struct process_wait_arguments process_wait_arguments = {
		.selector = selector,
		.waiting = &waiting,
		.pid = pid,
		.flags = flags,
		.descriptor = descriptor,
	};
	
	if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
	struct io_uring_sqe *sqe = io_get_sqe(selector);
	io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
	io_uring_sqe_set_data(sqe, completion);
	io_uring_submit_pending(selector);
	
	return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
}

#push(fiber) ⇒ Object



302
303
304
305
306
307
308
309
310
# File 'ext/io/event/selector/uring.c', line 302

VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
{
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	IO_Event_Selector_queue_push(&selector->backend, fiber);
	
	return Qnil;
}

#raise(*args) ⇒ Object



312
313
314
315
316
317
318
# File 'ext/io/event/selector/uring.c', line 312

VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
{
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return IO_Event_Selector_raise(&selector->backend, argc, argv);
}

#ready?Boolean

Returns:

  • (Boolean)


320
321
322
323
324
325
# File 'ext/io/event/selector/uring.c', line 320

VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return selector->backend.ready ? Qtrue : Qfalse;
}

#resume(*args) ⇒ Object



286
287
288
289
290
291
292
# File 'ext/io/event/selector/uring.c', line 286

VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
{
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return IO_Event_Selector_resume(&selector->backend, argc, argv);
}

#select(duration) ⇒ Object



1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
# File 'ext/io/event/selector/uring.c', line 1008

VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	// Flush any pending events:
	io_uring_submit_flush(selector);
	
	int ready = IO_Event_Selector_queue_flush(&selector->backend);
	
	int result = select_process_completions(selector);
	
	// If we:
	// 1. Didn't process any ready fibers, and
	// 2. Didn't process any events from non-blocking select (above), and
	// 3. There are no items in the ready list,
	// then we can perform a blocking select.
	if (!ready && !result && !selector->backend.ready) {
		// We might need to wait for events:
		struct select_arguments arguments = {
			.selector = selector,
			.timeout = NULL,
		};
		
		arguments.timeout = make_timeout(duration, &arguments.storage);
		
		if (!selector->backend.ready && !timeout_nonblocking(arguments.timeout)) {
			// This is a blocking operation, we wait for events:
			result = select_internal_without_gvl(&arguments);
			
			// After waiting/flushing the SQ, check if there are any completions:
			if (result > 0) {
				result = select_process_completions(selector);
			}
		}
	}
	
	return RB_INT2NUM(result);
}

#transferObject



278
279
280
281
282
283
284
# File 'ext/io/event/selector/uring.c', line 278

VALUE IO_Event_Selector_URing_transfer(VALUE self)
{
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
}

#wakeupObject



1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
# File 'ext/io/event/selector/uring.c', line 1047

VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	// If we are blocking, we can schedule a nop event to wake up the selector:
	if (selector->blocked) {
		struct io_uring_sqe *sqe = NULL;
		
		while (true) {
			sqe = io_uring_get_sqe(&selector->ring);
			if (sqe) break;
			
			rb_thread_schedule();
			
			// It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
			if (!selector->blocked) return Qfalse;
		}
		
		io_uring_prep_nop(sqe);
		// If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour:
		io_uring_sqe_set_data(sqe, NULL);
		io_uring_submit(&selector->ring);
		
		return Qtrue;
	}
	
	return Qfalse;
}

#yieldObject



294
295
296
297
298
299
300
# File 'ext/io/event/selector/uring.c', line 294

VALUE IO_Event_Selector_URing_yield(VALUE self)
{
	struct IO_Event_Selector_URing *selector = NULL;
	TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
	
	return IO_Event_Selector_yield(&selector->backend);
}