Class: MemprofilerPprof::Collector
- Inherits:
-
Object
- Object
- MemprofilerPprof::Collector
- Defined in:
- ext/ruby_memprofiler_pprof/collector.c
Instance Method Summary collapse
- #allocation_retain_rate ⇒ Object
- #allocation_retain_rate=(newval) ⇒ Object
- #bt_method ⇒ Object
- #bt_method=(newval) ⇒ Object
- #flush ⇒ Object
- #initialize(*args) ⇒ Object constructor
- #live_heap_samples_count ⇒ Object
- #max_allocation_samples ⇒ Object
- #max_allocation_samples=(newval) ⇒ Object
- #max_heap_samples ⇒ Object
- #max_heap_samples=(newval) ⇒ Object
- #profile ⇒ Object
- #running? ⇒ Boolean
- #sample_rate ⇒ Object
- #sample_rate=(newval) ⇒ Object
- #start! ⇒ Object
- #stop! ⇒ Object
Constructor Details
#initialize(*args) ⇒ Object
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 372
static VALUE collector_initialize(int argc, VALUE *argv, VALUE self) {
// Need to do this rb_protect dance to ensure that all access to collector_cdata is through the mutex.
struct initialize_protected_args args;
args.argc = argc;
args.argv = argv;
args.self = self;
args.cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&args.cd->lock);
int jump_tag = 0;
VALUE r = rb_protect(collector_initialize_protected, (VALUE)&args, &jump_tag);
mpp_pthread_mutex_unlock(&args.cd->lock);
if (jump_tag) rb_jump_tag(jump_tag);
return r;
}
|
Instance Method Details
#allocation_retain_rate ⇒ Object
403 404 405 406 407 408 409 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 403
static VALUE collector_get_allocation_retain_rate(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
uint32_t retain_rate_u32 = cd->u32_allocation_retain_rate;
mpp_pthread_mutex_unlock(&cd->lock);
return DBL2NUM(((double)retain_rate_u32)/UINT32_MAX);
}
|
#allocation_retain_rate=(newval) ⇒ Object
411 412 413 414 415 416 417 418 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 411
static VALUE collector_set_allocation_retain_rate(VALUE self, VALUE newval) {
struct collector_cdata *cd = collector_cdata_get(self);
uint32_t retain_rate_u32 = UINT32_MAX * NUM2DBL(newval);
mpp_pthread_mutex_lock(&cd->lock);
cd->u32_allocation_retain_rate = retain_rate_u32;
mpp_pthread_mutex_unlock(&cd->lock);
return newval;
}
|
#bt_method ⇒ Object
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 944
static VALUE collector_bt_method_get(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
int method = cd->bt_method;
mpp_pthread_mutex_unlock(&cd->lock);
if (method == MPP_BT_METHOD_CFP) {
return rb_id2sym(rb_intern("cfp"));
} else if (method == MPP_BT_METHOD_SLOWRB) {
return rb_id2sym(rb_intern("slowrb"));
} else {
MPP_ASSERT_FAIL("unknown bt_method");
return Qundef;
}
}
|
#bt_method=(newval) ⇒ Object
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 961
static VALUE collector_bt_method_set(VALUE self, VALUE newval) {
struct collector_cdata *cd = collector_cdata_get(self);
ID bt_method = rb_sym2id(newval);
int method;
if (bt_method == rb_intern("cfp")) {
method = MPP_BT_METHOD_CFP;
} else if (bt_method == rb_intern("slowrb")) {
method = MPP_BT_METHOD_SLOWRB;
} else {
rb_raise(rb_eArgError, "passed value for bt_method was not recognised");
}
mpp_pthread_mutex_lock(&cd->lock);
cd->bt_method = method;
mpp_pthread_mutex_unlock(&cd->lock);
return newval;
}
|
#flush ⇒ Object
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 828
static VALUE collector_flush(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
struct mpp_pprof_serctx *serctx = NULL;
char *buf_out;
size_t buflen_out;
char errbuf[256];
int jump_tag = 0;
int r = 0;
VALUE retval = Qundef;
struct mpp_sample *sample_list = NULL;
struct collector_flush_prepresult_args prepresult_args;
int lock_held = 0;
// Whilst under the GVL, we need to get the collector lock
mpp_pthread_mutex_lock(&cd->lock);
lock_held = 1;
sample_list = cd->allocation_samples;
cd->allocation_samples = NULL;
prepresult_args.allocation_samples_count = cd->allocation_samples_count;
prepresult_args.heap_samples_count = cd->heap_samples_count;
cd->allocation_samples_count = 0;
cd->pending_size_count = 0;
prepresult_args.dropped_samples_nolock =
__atomic_exchange_n(&cd->dropped_samples_nolock, 0, __ATOMIC_SEQ_CST);
prepresult_args.dropped_samples_allocation_bufsize =
__atomic_exchange_n(&cd->dropped_samples_allocation_bufsize, 0, __ATOMIC_SEQ_CST);
prepresult_args.dropped_samples_heap_bufsize =
__atomic_exchange_n(&cd->dropped_samples_heap_bufsize, 0, __ATOMIC_SEQ_CST);
// Get the current size for everything in the live allocations table.
rb_protect(collector_flush_protected_heap_sample_size, self, &jump_tag);
if (jump_tag) goto out;
serctx = mpp_pprof_serctx_new();
MPP_ASSERT_MSG(serctx, "mpp_pprof_serctx_new failed??");
r = mpp_pprof_serctx_set_loctab(serctx, cd->loctab, errbuf, sizeof(errbuf));
if (r == -1) {
goto out;
}
// Now that we have the samples (and have processed the stringtab) we can
// yield the lock.
mpp_pthread_mutex_unlock(&cd->lock);
lock_held = 0;
// Add the allocation samples
struct mpp_sample *s = sample_list;
while (s) {
r = mpp_pprof_serctx_add_sample(serctx, s, MPP_SAMPLE_TYPE_ALLOCATION, errbuf, sizeof(errbuf));
if (r == -1) {
goto out;
}
s = s->next_alloc;
}
// Add the heap samples
struct collector_heap_samples_each_add_args heap_add_args;
heap_add_args.serctx = serctx;
heap_add_args.errbuf = errbuf;
heap_add_args.errbuf_len = sizeof(errbuf);
heap_add_args.r = 0;
st_foreach(cd->heap_samples, collector_heap_samples_each_add, (st_data_t)&heap_add_args);
if (heap_add_args.r != 0) goto out;
r = mpp_pprof_serctx_serialize(serctx, &buf_out, &buflen_out, errbuf, sizeof(errbuf));
if ( r == -1) {
goto out;
}
// Annoyingly, since rb_str_new could (in theory) throw, we have to rb_protect the whole construction
// of our return value to ensure we don't leak serctx.
prepresult_args.pprofbuf = buf_out;
prepresult_args.pprofbuf_len = buflen_out;
prepresult_args.cProfileData = cd->cProfileData;
retval = rb_protect(collector_flush_prepresult, (VALUE)&prepresult_args, &jump_tag);
// Do cleanup here now.
out:
if (serctx) mpp_pprof_serctx_destroy(serctx);
if (lock_held) mpp_pthread_mutex_unlock(&cd->lock);
if (sample_list) internal_sample_decrement_refcount(cd, sample_list);
// Now return-or-raise back to ruby.
if (jump_tag) {
rb_jump_tag(jump_tag);
}
if (retval == Qundef) {
// Means we have an error to construct and throw
rb_raise(rb_eRuntimeError, "ruby_memprofiler_pprof failed serializing pprof protobuf: %s", errbuf);
}
return retval;
RB_GC_GUARD(self);
}
|
#live_heap_samples_count ⇒ Object
935 936 937 938 939 940 941 942 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 935
static VALUE collector_live_heap_samples_count(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
int64_t counter = cd->heap_samples_count;
mpp_pthread_mutex_unlock(&cd->lock);
return LONG2NUM(counter);
}
|
#max_allocation_samples ⇒ Object
420 421 422 423 424 425 426 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 420
static VALUE collector_get_max_allocation_samples(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
int64_t v = cd->max_allocation_samples;
mpp_pthread_mutex_unlock(&cd->lock);
return LONG2NUM(v);
}
|
#max_allocation_samples=(newval) ⇒ Object
428 429 430 431 432 433 434 435 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 428
static VALUE collector_set_max_allocation_samples(VALUE self, VALUE newval) {
struct collector_cdata *cd = collector_cdata_get(self);
int64_t v = NUM2LONG(newval);
mpp_pthread_mutex_lock(&cd->lock);
cd->max_allocation_samples = v;
mpp_pthread_mutex_unlock(&cd->lock);
return newval;
}
|
#max_heap_samples ⇒ Object
437 438 439 440 441 442 443 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 437
static VALUE collector_get_max_heap_samples(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
int64_t v = cd->max_heap_samples;
mpp_pthread_mutex_unlock(&cd->lock);
return LONG2NUM(v);
}
|
#max_heap_samples=(newval) ⇒ Object
445 446 447 448 449 450 451 452 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 445
static VALUE collector_set_max_heap_samples(VALUE self, VALUE newval) {
struct collector_cdata *cd = collector_cdata_get(self);
int64_t v = NUM2LONG(newval);
mpp_pthread_mutex_lock(&cd->lock);
cd->max_heap_samples = v;
mpp_pthread_mutex_unlock(&cd->lock);
return newval;
}
|
#profile ⇒ Object
924 925 926 927 928 929 930 931 932 933 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 924 static VALUE collector_profile(VALUE self) { rb_need_block(); rb_funcall(self, rb_intern("start!"), 0); rb_yield_values(0); VALUE profile_output = rb_funcall(self, rb_intern("flush"), 0); rb_funcall(self, rb_intern("stop!"), 0); return profile_output; } |
#running? ⇒ Boolean
739 740 741 742 743 744 745 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 739
static VALUE collector_is_running(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
bool running = cd->is_tracing;
mpp_pthread_mutex_unlock(&cd->lock);
return running ? Qtrue : Qfalse;
}
|
#sample_rate ⇒ Object
388 389 390 391 392 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 388
static VALUE collector_get_sample_rate(VALUE self) {
struct collector_cdata *cd = collector_cdata_get(self);
uint32_t sample_rate = __atomic_load_n(&cd->u32_sample_rate, __ATOMIC_SEQ_CST);
return DBL2NUM(((double)sample_rate)/UINT32_MAX);
}
|
#sample_rate=(newval) ⇒ Object
394 395 396 397 398 399 400 401 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 394
static VALUE collector_set_sample_rate(VALUE self, VALUE newval) {
struct collector_cdata *cd = collector_cdata_get(self);
double dbl_sample_rate = NUM2DBL(newval);
// Convert the double sample rate (between 0 and 1) to a value between 0 and UINT32_MAX
uint32_t new_sample_rate_uint = UINT32_MAX * dbl_sample_rate;
__atomic_store_n(&cd->u32_sample_rate, new_sample_rate_uint, __ATOMIC_SEQ_CST);
return newval;
}
|
#start! ⇒ Object
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 678
static VALUE collector_start(VALUE self) {
int jump_tag = 0;
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
if (cd->is_tracing) goto out;
// Don't needlessly double-initialize everything
if (cd->heap_samples_count > 0) {
collector_cdata_gc_free_heap_samples(cd);
cd->heap_samples = st_init_numtable();
cd->heap_samples_count = 0;
}
if (cd->allocation_samples_count > 0) {
collector_cdata_gc_free_allocation_samples(cd);
cd->allocation_samples = NULL;
cd->allocation_samples_count = 0;
cd->pending_size_count = 0;
}
cd->is_tracing = true;
__atomic_store_n(&cd->dropped_samples_allocation_bufsize, 0, __ATOMIC_SEQ_CST);
__atomic_store_n(&cd->dropped_samples_heap_bufsize, 0, __ATOMIC_SEQ_CST);
__atomic_store_n(&cd->dropped_samples_nolock, 0, __ATOMIC_SEQ_CST);
// Now do the things that might throw
rb_protect(collector_start_protected, self, &jump_tag);
out:
mpp_pthread_mutex_unlock(&cd->lock);
if (jump_tag) {
rb_jump_tag(jump_tag);
}
return Qnil;
}
|
#stop! ⇒ Object
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 |
# File 'ext/ruby_memprofiler_pprof/collector.c', line 720
static VALUE collector_stop(VALUE self) {
int jump_tag = 0;
struct collector_cdata *cd = collector_cdata_get(self);
mpp_pthread_mutex_lock(&cd->lock);
if (!cd->is_tracing) goto out;
rb_protect(collector_stop_protected, self, &jump_tag);
if (jump_tag) goto out;
cd->is_tracing = false;
// Don't clear any of our buffers - it's OK to access the profiling info after calling stop!
out:
mpp_pthread_mutex_unlock(&cd->lock);
if (jump_tag) {
rb_jump_tag(jump_tag);
}
return Qnil;
}
|