Class: OpenTox::Dataset

Inherits:
Object show all
Defined in:
lib/dataset.rb

Overview

Ruby wrapper for OpenTox Dataset Webservices (opentox.org/dev/apis/api-1.2/dataset).

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(uri = nil) ⇒ Dataset


10
11
12
13
14
15
# File 'lib/dataset.rb', line 10

def initialize uri=nil
  super uri
  @features = []
  @compounds = []
  @data_entries = []
end

Instance Attribute Details

#compounds(force_update = false) ⇒ Array


48
49
50
51
52
53
54
55
56
57
58
59
# File 'lib/dataset.rb', line 48

def compounds force_update=false
  if @compounds.empty? or force_update
    uri = File.join(@uri,"compounds")
    begin
      uris = RestClientWrapper.get(uri,{},{:accept => "text/uri-list"}).split("\n") # ordered datasets return ordered compounds
    rescue
      uris = []
    end
    @compounds = uris.collect{|uri| Compound.new(uri)}
  end
  @compounds
end

#data_entries(force_update = false) ⇒ Array


63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# File 'lib/dataset.rb', line 63

def data_entries force_update=false
  if @data_entries.empty? or force_update
    sparql = "SELECT ?cidx ?fidx ?value FROM <#{uri}> WHERE {
      ?data_entry <#{RDF::OLO.index}> ?cidx ;
                  <#{RDF::OT.values}> ?v .
      ?v          <#{RDF::OT.feature}> ?f;
                  <#{RDF::OT.value}> ?value .
      ?f          <#{RDF::OLO.index}> ?fidx.
      } ORDER BY ?fidx ?cidx"
      RestClientWrapper.get(service_uri,{:query => sparql},{:accept => "text/uri-list"}).split("\n").each do |row|
        r,c,v = row.split("\t")
        @data_entries[r.to_i] ||= []
        # adjust value class depending on feature type, StringFeature takes precedence over NumericFeature
        if features[c.to_i][RDF.type].include? RDF::OT.NumericFeature and ! features[c.to_i][RDF.type].include? RDF::OT.StringFeature
          v = v.to_f if v
        end
        @data_entries[r.to_i][c.to_i] = v if v
      end
    # TODO: fallbacks for external and unordered datasets
  end
  @data_entries
end

#features(force_update = false) ⇒ Array


34
35
36
37
38
39
40
41
42
43
44
45
# File 'lib/dataset.rb', line 34

def features force_update=false
  if @features.empty? or force_update
    uri = File.join(@uri,"features")
    begin
      uris = RestClientWrapper.get(uri,{},{:accept => "text/uri-list"}).split("\n") # ordered datasets return ordered features
    rescue
      uris = []
    end
    @features = uris.collect{|uri| Feature.new(uri)}
  end
  @features
end

Instance Method Details

#<<(row) ⇒ Object

TODO: remove? might be dangerous if feature ordering is incorrect MG: I would not remove this because add_data_entry is very slow (4 times searching in arrays) CH: do you have measurements? compound and feature arrays are not that big, I suspect that feature search/creation is the time critical step

Examples:

d = Dataset.new
d.features << Feature.new(a)
d.features << Feature.new(b)
d << [ Compound.new("c1ccccc1"), feature-value-a, feature-value-b ]

182
183
184
185
186
187
188
189
# File 'lib/dataset.rb', line 182

def << row
  compound = row.shift # removes the compound from the array
  bad_request_error "Dataset features are empty." unless @features
  bad_request_error "Row size '#{row.size}' does not match features size '#{@features.size}'." unless row.size == @features.size
  bad_request_error "First column is not a OpenTox::Compound" unless compound.class == OpenTox::Compound
  @compounds << compound
  @data_entries << row
end

#add_data_entry(compound, feature, value) ⇒ Array


158
159
160
161
162
163
164
165
166
167
168
169
170
171
# File 'lib/dataset.rb', line 158

def add_data_entry compound, feature, value
  @compounds << compound unless @compounds.collect{|c| c.uri}.include?(compound.uri)
  row = @compounds.collect{|c| c.uri}.index(compound.uri)
  @features << feature unless @features.collect{|f| f.uri}.include?(feature.uri)
  col = @features.collect{|f| f.uri}.index(feature.uri)
  if @data_entries[row] and @data_entries[row][col] # duplicated values
    @compounds << compound
    row = @compounds.collect{|c| c.uri}.rindex(compound.uri)
  end
  if value
    @data_entries[row] ||= []
    @data_entries[row][col] = value
  end
end

#compound_index(dataset, compound_index) ⇒ Object

maps a compound-index from another dataset to a compound-index from this dataset mapping works as follows: (compound c is the compound identified by the compound-index of the other dataset)

  • c occurs only once in this dataset? map compound-index of other dataset to index in this dataset

  • c occurs >1 in this dataset?

** number of occurences is equal in both datasets? assume order is preserved(!) and map accordingly ** number of occurences is not equal in both datasets? cannot map, raise error


339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
# File 'lib/dataset.rb', line 339

def compound_index( dataset, compound_index )
  compound_uri = dataset.compounds[compound_index].uri
  self_indices = compound_indices(compound_uri)
  if self_indices==nil
    nil
  else
    dataset_indices = dataset.compound_indices(compound_uri)
    if self_indices.size==1
      self_indices.first
    elsif self_indices.size==dataset_indices.size
      # we do assume that the order is preseverd (i.e., the nth occurences in both datasets are mapped to each other)!
      self_indices[dataset_indices.index(compound_index)]
    else
      raise "cannot map compound #{compound} from dataset #{dataset.uri} to dataset #{uri}, "+
        "compound occurs #{dataset_indices.size} times and #{self_indices.size} times"
    end
  end
end

#compound_indices(compound_uri) ⇒ Array

returns the inidices of the compound in the dataset


361
362
363
364
365
366
367
368
369
370
371
372
373
374
# File 'lib/dataset.rb', line 361

def compound_indices( compound_uri )
  unless defined?(@cmp_indices) and @cmp_indices.has_key?(compound_uri)
    @cmp_indices = {}
    compounds().size.times do |i|
      c = @compounds[i].uri
      if @cmp_indices[c]==nil
        @cmp_indices[c] = [i]
      else
        @cmp_indices[c] = @cmp_indices[c]+[i]
      end
    end
  end
  @cmp_indices[compound_uri]
end

#data_entry_value(compound_index, feature_uri) ⇒ Object

returns compound feature value using the compound-index and the feature_uri


377
378
379
380
381
# File 'lib/dataset.rb', line 377

def data_entry_value(compound_index, feature_uri)
  data_entries(true) if @data_entries.empty?
  col = @features.collect{|f| f.uri}.index feature_uri
  @data_entries[compound_index] ?  @data_entries[compound_index][col] : nil
end

#find_compound_uri(uri) ⇒ OpenTox::Compound

Search a dataset for a compound given its URI


108
109
110
# File 'lib/dataset.rb', line 108

def find_compound_uri(uri)
  compounds.select{|f| f.uri == uri}.first
end

#find_feature_uri(uri) ⇒ OpenTox::Feature

Search a dataset for a feature given its URI


101
102
103
# File 'lib/dataset.rb', line 101

def find_feature_uri(uri)
  features.select{|f| f.uri == uri}.first
end

#metadata(force_update = false) ⇒ Hash

Get data (lazy loading from dataset service) overrides OpenTox#metadata to only load the metadata instead of the whole dataset


20
21
22
23
24
25
26
27
28
29
30
31
# File 'lib/dataset.rb', line 20

def  force_update=false
  if .empty? or force_update
    uri = File.join(@uri,"metadata")
    begin
      parse_ntriples RestClientWrapper.get(uri,{},{:accept => "text/plain"})
    rescue # fall back to rdfxml
      parse_rdfxml RestClientWrapper.get(uri,{},{:accept => "application/rdf+xml"})
    end
     = @rdf.to_hash[RDF::URI.new(@uri)].inject({}) { |h, (predicate, values)| h[predicate] = values.collect{|v| v.to_s}; h }
  end
  
end

#predictionsArray

for prediction result datasets assumes that there are features with title prediction and confidence


115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# File 'lib/dataset.rb', line 115

def predictions
  predictions = []
  prediction_feature = nil
  confidence_feature = nil
  [RDF::OT.predictedVariables].each do |uri|
    feature = OpenTox::Feature.new uri
    case feature.title
    when /prediction$/
      prediction_feature = feature
    when /confidence$/
      confidence_feature = feature
    end
  end
  if prediction_feature and confidence_feature
    compounds.each do |compound|
      value = values(compound,prediction_feature).first
      value = value.to_f if prediction_feature[RDF.type].include? RDF::OT.NumericFeature and ! prediction_feature[RDF.type].include? RDF::OT.StringFeature
      confidence = values(compound,confidence_feature).first.to_f
      predictions << {:compound => compound, :value => value, :confidence => confidence} if value and confidence
    end
  end
  predictions
end

#split(compound_indices, feats, metadata) ⇒ OpenTox::Dataset

create a new dataset with the specified compounds and features


311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
# File 'lib/dataset.rb', line 311

def split( compound_indices, feats, )

  bad_request_error "Dataset.split : Please give compounds as indices" if compound_indices.size==0 or !compound_indices[0].is_a?(Fixnum)
  bad_request_error "Dataset.split : Please give features as feature objects (given: #{feats})" if feats!=nil and feats.size>0 and !feats[0].is_a?(OpenTox::Feature)
  dataset = OpenTox::Dataset.new
  dataset. = 
  dataset.features = (feats ? feats : self.features)
  compound_indices.each do |c_idx|
    d = [ self.compounds[c_idx] ]
    dataset.features.each_with_index.each do |f,f_idx|
      d << (self.data_entries[c_idx] ? self.data_entries[c_idx][f_idx] : nil)
    end
    dataset << d
  end
  dataset.put
  dataset
end

#to_csv(inchi = false) ⇒ String

converts dataset to csv format including compound smiles as first column, other column headers are feature titles


195
196
197
198
199
200
201
202
# File 'lib/dataset.rb', line 195

def to_csv(inchi=false)
  CSV.generate() do |csv| #{:force_quotes=>true}
    csv << [inchi ? "InChI" : "SMILES"] + features.collect{|f| f.title}
    compounds.each_with_index do |c,i|
      csv << [inchi ? c.inchi : c.smiles] + data_entries[i]
    end
  end
end

#to_ntriplesObject

TODO: fix bug that affects data_entry positions # DG: who wrotes this comment ?


261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
# File 'lib/dataset.rb', line 261

def to_ntriples # redefined string version for better performance
  ntriples = ""
  [RDF.type] = [ RDF::OT.Dataset, RDF::OT.OrderedDataset ]

  .each do |predicate,values|
    [values].flatten.each do |value|
      URI.valid?(value) ? value = "<#{value}>" : value = "\"#{value}\""
      ntriples << "<#{@uri}> <#{predicate}> #{value} .\n" #\n"
    end
  end
  @parameters.each_with_index do |parameter,i|
    p_node = "_:parameter"+ i.to_s
    ntriples <<  "<#{@uri}> <#{RDF::OT.parameters}> #{p_node} .\n"
    ntriples <<  "#{p_node} <#{RDF.type}> <#{RDF::OT.Parameter}> .\n"
    parameter.each { |k,v| ntriples <<  "#{p_node} <#{k}> \"#{v.to_s.tr('"', '\'')}\" .\n" }
  end
  @features.each_with_index do |feature,i|
    ntriples <<  "<#{feature.uri}> <#{RDF.type}> <#{RDF::OT.Feature}> .\n"
    ntriples <<  "<#{feature.uri}> <#{RDF::OLO.index}> \"#{i}\"^^<http://www.w3.org/2001/XMLSchema#integer> .\n" # sorting at dataset service does not work without type information
  end
  @compounds.each_with_index do |compound,i|
    ntriples <<  "<#{compound.uri}> <#{RDF.type}> <#{RDF::OT.Compound}> .\n"
    if defined? @neighbors and neighbors.include? compound
      ntriples <<  "<#{compound.uri}> <#{RDF.type}> <#{RDF::OT.Neighbor}> .\n"
    end

    ntriples <<  "<#{compound.uri}> <#{RDF::OLO.index}> \"#{i}\"^^<http://www.w3.org/2001/XMLSchema#integer> .\n" # sorting at dataset service does not work without type information
    data_entry_node = "_:dataentry"+ i.to_s
    ntriples <<  "<#{@uri}> <#{RDF::OT.dataEntry}> #{data_entry_node} .\n"
    ntriples <<  "#{data_entry_node} <#{RDF.type}> <#{RDF::OT.DataEntry}> .\n"
    ntriples <<  "#{data_entry_node} <#{RDF::OLO.index}> \"#{i}\"^^<http://www.w3.org/2001/XMLSchema#integer> .\n" # sorting at dataset service does not work without type information
    ntriples <<  "#{data_entry_node} <#{RDF::OT.compound}> <#{compound.uri}> .\n"
    @data_entries[i].each_with_index do |value,j|
      value_node = data_entry_node+ "_value"+ j.to_s
      ntriples <<  "#{data_entry_node} <#{RDF::OT.values}> #{value_node} .\n"
      ntriples <<  "#{value_node} <#{RDF::OT.feature}> <#{@features[j].uri}> .\n"
      ntriples <<  "#{value_node} <#{RDF::OT.value}> \"#{value}\" .\n"
    end
  end
  ntriples

end

#upload(filename, wait = true) ⇒ String

Create a dataset from file (csv,sdf,…)


145
146
147
148
149
150
151
152
# File 'lib/dataset.rb', line 145

def upload filename, wait=true
  uri = RestClientWrapper.put(@uri, {:file => File.new(filename)})
  wait_for_task uri if URI.task?(uri) and wait
  compounds true
  features true
   true
  @uri
end

#values(compound, feature) ⇒ Array

Find data entry values for a given compound and feature


90
91
92
93
94
# File 'lib/dataset.rb', line 90

def values(compound, feature)
  rows = (0 ... compounds.length).select { |r| compounds[r].uri == compound.uri }
  col = features.collect{|f| f.uri}.index feature.uri
  rows.collect{|row| data_entries[row][col]}
end