Class: ABProf::BenchmarkInstance
- Inherits:
-
Object
- Object
- ABProf::BenchmarkInstance
- Defined in:
- lib/abprof/benchmark_dsl.rb
Instance Attribute Summary collapse
-
#reports ⇒ Object
readonly
Returns the value of attribute reports.
Instance Method Summary collapse
-
#initialize ⇒ BenchmarkInstance
constructor
A new instance of BenchmarkInstance.
- #print_summary(opts = {}) ⇒ Object
- #report(&block) ⇒ Object
- #report_command(cmd) ⇒ Object
- #run_burnin(opts = {}) ⇒ Object
- #run_one_trial(pts = {}) ⇒ Object
- #run_sampling(opts = {}) ⇒ Object
Constructor Details
#initialize ⇒ BenchmarkInstance
Returns a new instance of BenchmarkInstance.
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
# File 'lib/abprof/benchmark_dsl.rb', line 16 def initialize @pvalue = 0.05 @burnin = 10 @min_trials = 1 @max_trials = 20 @iters_per_trial = 10 = false @static_order = false @state = { :samples => [[], []], :p_tests => [], :iter => 0, } end |
Instance Attribute Details
#reports ⇒ Object (readonly)
Returns the value of attribute reports.
6 7 8 |
# File 'lib/abprof/benchmark_dsl.rb', line 6 def reports @reports end |
Instance Method Details
#print_summary(opts = {}) ⇒ Object
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
# File 'lib/abprof/benchmark_dsl.rb', line 116 def print_summary(opts = {}) p_val = @state[:p_tests][-1] diverged = false if p_val < @pvalue puts "Based on measured P value #{p_val}, we believe there is a speed difference." puts "As of end of run, p value is #{p_val}. Now run more times to check, or with lower p." summary11 = ABProf.summarize("mean", @state[:samples][0]) summary12 = ABProf.summarize("median", @state[:samples][0]) summary21 = ABProf.summarize("mean", @state[:samples][1]) summary22 = ABProf.summarize("median", @state[:samples][1]) fastest = "1" command = @reports[0] mean_times = summary21 / summary11 median_times = summary22 / summary12 if summary12 > summary22 fastest = "2" command = @reports[1] mean_times = summary11 / summary21 median_times = summary12 / summary22 end puts "Lower (faster?) process is #{fastest}, command line: #{command.inspect}" puts "Lower command is (very) roughly #{median_times} times lower (faster?) -- assuming linear sampling, checking at median." puts " Checking at mean, it would be #{mean_times} lower (faster?)." print "\n" puts "Process 1 mean result: #{summary11}" puts "Process 1 median result: #{summary12}" puts "Process 2 mean result: #{summary21}" puts "Process 2 median result: #{summary22}" else puts "Based on measured P value #{p_val} and threshold #{@pvalue}, we believe there is" puts "no significant difference detectable with this set of trials." puts "If you believe there is a small difference that wasn't detected, try raising the number" puts "of iterations per trial, or the maximum number of trials." diverged = true end end |
#report(&block) ⇒ Object
41 42 43 44 |
# File 'lib/abprof/benchmark_dsl.rb', line 41 def report(&block) @reports ||= [] @reports.push block end |
#report_command(cmd) ⇒ Object
36 37 38 39 |
# File 'lib/abprof/benchmark_dsl.rb', line 36 def report_command(cmd) @reports ||= [] @reports.push cmd end |
#run_burnin(opts = {}) ⇒ Object
46 47 48 49 50 51 |
# File 'lib/abprof/benchmark_dsl.rb', line 46 def run_burnin(opts = {}) return unless @burnin > 0 @process1.run_iters @burnin @process2.run_iters @burnin end |
#run_one_trial(pts = {}) ⇒ Object
53 54 55 56 57 58 59 60 61 62 63 64 |
# File 'lib/abprof/benchmark_dsl.rb', line 53 def run_one_trial(pts = {}) order_rand = (rand() * 2.0).to_i if @static_order || order_rand == 0 @state[:samples][0] += @process1.run_iters @iters_per_trial @state[:samples][1] += @process2.run_iters @iters_per_trial else # Same thing, but do process2 first @state[:samples][1] += @process2.run_iters @iters_per_trial @state[:samples][0] += @process1.run_iters @iters_per_trial end @state[:iter] += 1 end |
#run_sampling(opts = {}) ⇒ Object
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
# File 'lib/abprof/benchmark_dsl.rb', line 66 def run_sampling(opts = {}) process_type = ? ABProf::ABBareProcess : ABProf::ABHarnessProcess command1 = @reports[0] command2 = @reports[1] @process1 = process_type.new command1, :debug => @debug @process2 = process_type.new command2, :debug => @debug run_burnin opts puts "Beginning sampling from processes." if opts[:print_output] # Sampling p_val = 1.0 @max_trials.times do run_one_trial opts # No t-test without 3+ samples if @state[:samples][0].size > 2 # Evaluate the Welch's t-test t = Statsample::Test.t_two_samples_independent(@state[:samples][0].to_vector, @state[:samples][1].to_vector) p_val = t.probability_not_equal_variance @state[:p_tests].push p_val avg_1 = @state[:samples][0].inject(0.0, &:+) / @state[:samples][0].length avg_2 = @state[:samples][1].inject(0.0, &:+) / @state[:samples][1].length smaller = "1" smaller = "2" if avg_1 > avg_2 puts "Trial #{@state[:iter]}, Welch's T-test p value: #{p_val.inspect} (Guessed smaller: #{smaller})" if opts[:print_output] end # Just finished trial number i+1. So we can exit only if i+1 was at least # the minimum number of trials. break if p_val < @pvalue && (@state[:iter] + 1 >= @min_trials) end # Clean up processes @process1.kill @process2.kill print_summary opts unless opts[:no_print_summary] p_val = @state[:p_tests][-1] if p_val >= @pvalue && @fail_on_divergence STDERR.puts "Measured P value of #{p_val.inspect} is higher than allowable P value of #{@pvalue.inspect}!" exit 2 end @state end |