Module: Convolver

Defined in:
lib/convolver.rb,
lib/convolver/version.rb,
ext/convolver/convolver.c

Constant Summary collapse

VERSION =
"0.1.1"

Class Method Summary collapse

Class Method Details

.convolve(signal, kernel) ⇒ NArray

Calculates convolution of an array of floats representing a signal, with a second array representing a kernel. The two parameters must have the same rank. The output has same rank, its size in each dimension d is given by

signal.shape[d] - kernel.shape[d] + 1

Parameters:

  • signal (NArray)

    must be same size or larger than kernel in each dimension

  • kernel (NArray)

    must be same size or smaller than signal in each dimension

Returns:

  • (NArray)

    result of convolving signal with kernel



74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# File 'ext/convolver/convolver.c', line 74

static VALUE narray_convolve( VALUE self, VALUE a, VALUE b ) {
  struct NARRAY *na_a, *na_b, *na_c;
  volatile VALUE val_a, val_b, val_c;
  int target_rank, i;
  int target_shape[LARGEST_RANK];

  val_a = na_cast_object(a, NA_SFLOAT);
  GetNArray( val_a, na_a );

  val_b = na_cast_object(b, NA_SFLOAT);
  GetNArray( val_b, na_b );

  if ( na_a->rank != na_b->rank ) {
    rb_raise( rb_eArgError, "narray a must have equal rank to narray b (a rack %d, b rank %d)", na_a->rank,  na_b->rank );
  }

  if ( na_a->rank > LARGEST_RANK ) {
    rb_raise( rb_eArgError, "exceeded maximum narray rank for convolve of %d", LARGEST_RANK );
  }

  target_rank = na_a->rank;

  for ( i = 0; i < target_rank; i++ ) {
    target_shape[i] = na_a->shape[i] - na_b->shape[i] + 1;
    if ( target_shape[i] < 1 ) {
      rb_raise( rb_eArgError, "narray b is bigger in one or more dimensions than narray a" );
    }
  }

  val_c = na_make_object( NA_SFLOAT, target_rank, target_shape, CLASS_OF( val_a ) );
  GetNArray( val_c, na_c );

  convolve_raw(
    target_rank, na_a->shape, (float*) na_a->ptr,
    target_rank, na_b->shape, (float*) na_b->ptr,
    target_rank, target_shape, (float*) na_c->ptr );

  return val_c;
}

.convolve_fftw3(signal, kernel) ⇒ NArray

Uses FFTW3 library to calculate convolution of an array of floats representing a signal, with a second array representing a kernel. The two parameters must have the same rank. The output has same rank, its size in each dimension d is given by

signal.shape[d] - kernel.shape[d] + 1

Parameters:

  • signal (NArray)

    must be same size or larger than kernel in each dimension

  • kernel (NArray)

    must be same size or smaller than signal in each dimension

Returns:

  • (NArray)

    result of convolving signal with kernel



14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# File 'lib/convolver.rb', line 14

def self.convolve_fftw3 signal, kernel
  combined_shape, shift_by, ranges = fft_offsets( signal.shape, kernel.shape )

  mod_a = NArray.sfloat(*combined_shape)
  mod_a[*shift_by] = signal

  mod_b = NArray.sfloat(*combined_shape)

  Convolver.fit_kernel_backwards( mod_b, kernel )

  afreqs = FFTW3.fft(mod_a)
  bfreqs = FFTW3.fft(mod_b)
  cfreqs = afreqs * bfreqs

  (FFTW3.ifft( cfreqs ).real * (1.0/mod_a.size))[*ranges]
end

.fft_offsets(signal_shape, kernel_shape) ⇒ Object



33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# File 'lib/convolver.rb', line 33

def self.fft_offsets signal_shape, kernel_shape
  combined_shape = []
  shift_by = []
  ranges = []
  signal_shape.each_with_index do |signal_size, i|
    kernel_size = kernel_shape[i]

    combined_shape[i] = signal_size + kernel_size - 1
    output_size = signal_size - kernel_size + 1
    output_offset = kernel_size - 1
    shift_by[i] = kernel_size / 2
    ranges[i] = (output_offset...(output_offset + output_size))
  end
  [ combined_shape, shift_by, ranges ]
end

.nn_run_layer(inputs, weights, thresholds) ⇒ NArray

Calculates activations of a fully-connected neural network layer. The transfer function after summing weights and applying threshold is a “ReLU”, equivalent to

y = x < 0.0 ? 0.0 : x

this is less sophisticated than many other neural net functions (such as sigma), but is fast to calculate and to train.

Parameters:

  • inputs (NArray)

    must be rank 1 array of floats

  • weights (NArray)

    must be rank 2 array of floats, with first dimension size of inputs, and second dimension size equal to number of outputs

  • thresholds (NArray)

    must be rank 1 array of floats, size equal to number of outputs desired

Returns:

  • (NArray)

    neuron activations



125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
# File 'ext/convolver/convolver.c', line 125

static VALUE narray_nn_run_single_layer( VALUE self, VALUE inputs, VALUE weights, VALUE thresholds ) {
  struct NARRAY *na_inputs, *na_weights, *na_thresholds, *na_outputs;
  volatile VALUE val_inputs, val_weights, val_thresholds, val_outputs;
  int input_size, output_size;
  int output_shape[1];

  val_inputs = na_cast_object(inputs, NA_SFLOAT);
  GetNArray( val_inputs, na_inputs );
  if ( na_inputs->rank != 1 ) {
    rb_raise( rb_eArgError, "input must be array of rank 1" );
  }
  input_size = na_inputs->total;

  val_weights = na_cast_object(weights, NA_SFLOAT);
  GetNArray( val_weights, na_weights );
  if ( na_weights->rank != 2 ) {
    rb_raise( rb_eArgError, "weights must be array of rank 2" );
  }
  if ( na_weights->shape[0] != input_size ) {
    rb_raise( rb_eArgError, "weights shape mismatch, expected %d across, got %d", input_size, na_weights->shape[0] );
  }
  output_size = na_weights->shape[1];

  val_thresholds = na_cast_object(thresholds, NA_SFLOAT);
  GetNArray( val_thresholds, na_thresholds );
  if ( na_thresholds->rank != 1 ) {
    rb_raise( rb_eArgError, "thresholds must be narray of rank 1" );
  }
  if ( na_thresholds->shape[0] != output_size ) {
    rb_raise( rb_eArgError, "thresholds expected size %d, but got %d", output_size, na_thresholds->shape[0] );
  }

  output_shape[0] = output_size;
  val_outputs = na_make_object( NA_SFLOAT, 1, output_shape, CLASS_OF( val_inputs ) );
  GetNArray( val_outputs, na_outputs );

  nn_run_layer_raw( input_size, output_size, (float*) na_inputs->ptr, (float*) na_weights->ptr,
      (float*) na_thresholds->ptr, (float*) na_outputs->ptr );

  return val_outputs;
}