Module: MonitorUtilities

Included in:
Scoped
Defined in:
lib/rearview/templates/utilities.rb

Instance Method Summary collapse

Instance Method Details

#collect_aberrations(*metrics, deviation) ⇒ Object

checks standard deviation delta for metric(s) and returns metric label delta if > deviation



93
94
95
96
97
98
99
100
101
102
# File 'lib/rearview/templates/utilities.rb', line 93

def collect_aberrations(*metrics, deviation)
  if metrics.first.values.length % 2 == 1
    raise "ERROR: collect_aberrations expects an even number of data points and you passed in #{metrics.first.values.length}"
  end
  aberrations = {}
  metrics.each do |m|
    collect_comparisons(m).inject(aberrations) { |hash, delta| hash[m.label] = delta if delta >= deviation; hash }
  end
  aberrations
end

#collect_comparisons(metric) ⇒ Object

determines delta in standard deviation between 2 data sets



87
88
89
90
# File 'lib/rearview/templates/utilities.rb', line 87

def collect_comparisons(metric)
  five_minute_sv = metric.values.each_slice(metric.values.length / 2).to_a.map { |pair| pair.stdev  }
  five_minute_sv.each_slice(2).to_a.map { |pair| pair.sort }.map { |pair| pair[1].to_f - pair[0].to_f }
end

#deploy_check(num_points, deploy, metric) ⇒ Object

checks for a deployment and if found returns data before and after the deploy along with the delta



60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# File 'lib/rearview/templates/utilities.rb', line 60

def deploy_check(num_points, deploy, metric)
  if metric == deploy
    raise "Error: You've passed the deploy metric to be analyzed against itself, which is not a valid analysis."
  elsif metric.values.size < (num_points * 2) + 1
    raise "Error: Not enough data to evaluate. There must be #{num_points} data points before and after a deploy."
  else
    results = []
    last_deploy = deploy.values.rindex { |v| !v.nil? }

    if last_deploy
      deploy_time = deploy.entries[last_deploy].timestamp

      # If the num_points after the deploy is true then
      if metric.entries.drop_while { |entry| entry.timestamp <= deploy_time }.length == num_points
        before = metric.values.last((num_points * 2) + 1).first(num_points).sum
        after  = metric.values.last(num_points).sum
        delta  = before == 0 ? 0.0 : ((after - before) / before) * 100

        results = [metric.label, before, after, delta]
      end
    end

    results
  end
end

#outside_limits?(value, comparison_values, limit_value, limit_type) ⇒ Boolean

checks if the value is outside the limits for all of the comparison values

Returns:

  • (Boolean)


41
42
43
44
# File 'lib/rearview/templates/utilities.rb', line 41

def outside_limits?(value, comparison_values, limit_value, limit_type)
  diffs = comparison_values.map { |v| value - v if value && v }.compact
  diffs.length == 2 && ((limit_type == :lower && diffs.max < limit_value) || (limit_type == :upper && diffs.min > limit_value))
end

#percentage_errors(metric, comparison_metrics, limit_value, limit_type, minutes = @minutes) ⇒ Object

percentage of minutes on this monitor that have values outside the limits



47
48
49
50
51
52
53
54
55
56
57
# File 'lib/rearview/templates/utilities.rb', line 47

def percentage_errors(metric, comparison_metrics, limit_value, limit_type, minutes = @minutes)
  raise "You can only define the limit type as :upper or :lower." if ![:upper, :lower].include? limit_type

  zipped_values = metric.values.zip(*(Array(comparison_metrics).map(&:values)))

  error_count = zipped_values.count do |(value, *comparison_values)|
    outside_limits?(value, comparison_values, limit_value, limit_type)
  end

  (error_count.to_f / minutes.to_f) * 100
end