Class: OpenApiOpenAIClient::CreateModerationResponseResultsInnerCategories

Inherits:
ApiModelBase
  • Object
show all
Defined in:
lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb

Overview

A list of the categories, and whether they are flagged or not.

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from ApiModelBase

_deserialize, #_to_hash, #to_body, #to_s

Constructor Details

#initialize(attributes = {}) ⇒ CreateModerationResponseResultsInnerCategories

Initializes the object

Parameters:

  • attributes (Hash) (defaults to: {})

    Model attributes in the form of hash



114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 114

def initialize(attributes = {})
  if (!attributes.is_a?(Hash))
    fail ArgumentError, "The input argument (attributes) must be a hash in `OpenApiOpenAIClient::CreateModerationResponseResultsInnerCategories` initialize method"
  end

  # check to see if the attribute exists and convert string to symbol for hash key
  acceptable_attribute_map = self.class.acceptable_attribute_map
  attributes = attributes.each_with_object({}) { |(k, v), h|
    if (!acceptable_attribute_map.key?(k.to_sym))
      fail ArgumentError, "`#{k}` is not a valid attribute in `OpenApiOpenAIClient::CreateModerationResponseResultsInnerCategories`. Please check the name to make sure it's valid. List of attributes: " + acceptable_attribute_map.keys.inspect
    end
    h[k.to_sym] = v
  }

  if attributes.key?(:'hate')
    self.hate = attributes[:'hate']
  else
    self.hate = nil
  end

  if attributes.key?(:'hate_threatening')
    self.hate_threatening = attributes[:'hate_threatening']
  else
    self.hate_threatening = nil
  end

  if attributes.key?(:'harassment')
    self.harassment = attributes[:'harassment']
  else
    self.harassment = nil
  end

  if attributes.key?(:'harassment_threatening')
    self.harassment_threatening = attributes[:'harassment_threatening']
  else
    self.harassment_threatening = nil
  end

  if attributes.key?(:'illicit')
    self.illicit = attributes[:'illicit']
  else
    self.illicit = nil
  end

  if attributes.key?(:'illicit_violent')
    self.illicit_violent = attributes[:'illicit_violent']
  else
    self.illicit_violent = nil
  end

  if attributes.key?(:'self_harm')
    self.self_harm = attributes[:'self_harm']
  else
    self.self_harm = nil
  end

  if attributes.key?(:'self_harm_intent')
    self.self_harm_intent = attributes[:'self_harm_intent']
  else
    self.self_harm_intent = nil
  end

  if attributes.key?(:'self_harm_instructions')
    self.self_harm_instructions = attributes[:'self_harm_instructions']
  else
    self.self_harm_instructions = nil
  end

  if attributes.key?(:'sexual')
    self.sexual = attributes[:'sexual']
  else
    self.sexual = nil
  end

  if attributes.key?(:'sexual_minors')
    self.sexual_minors = attributes[:'sexual_minors']
  else
    self.sexual_minors = nil
  end

  if attributes.key?(:'violence')
    self.violence = attributes[:'violence']
  else
    self.violence = nil
  end

  if attributes.key?(:'violence_graphic')
    self.violence_graphic = attributes[:'violence_graphic']
  else
    self.violence_graphic = nil
  end
end

Instance Attribute Details

#harassmentObject

Content that expresses, incites, or promotes harassing language towards any target.



26
27
28
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 26

def harassment
  @harassment
end

#harassment_threateningObject

Harassment content that also includes violence or serious harm towards any target.



29
30
31
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 29

def harassment_threatening
  @harassment_threatening
end

#hateObject

Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment.



20
21
22
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 20

def hate
  @hate
end

#hate_threateningObject

Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.



23
24
25
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 23

def hate_threatening
  @hate_threatening
end

#illicitObject

Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.



32
33
34
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 32

def illicit
  @illicit
end

#illicit_violentObject

Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.



35
36
37
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 35

def illicit_violent
  @illicit_violent
end

#self_harmObject

Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.



38
39
40
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 38

def self_harm
  @self_harm
end

#self_harm_instructionsObject

Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.



44
45
46
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 44

def self_harm_instructions
  @self_harm_instructions
end

#self_harm_intentObject

Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.



41
42
43
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 41

def self_harm_intent
  @self_harm_intent
end

#sexualObject

Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).



47
48
49
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 47

def sexual
  @sexual
end

#sexual_minorsObject

Sexual content that includes an individual who is under 18 years old.



50
51
52
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 50

def sexual_minors
  @sexual_minors
end

#violenceObject

Content that depicts death, violence, or physical injury.



53
54
55
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 53

def violence
  @violence
end

#violence_graphicObject

Content that depicts death, violence, or physical injury in graphic detail.



56
57
58
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 56

def violence_graphic
  @violence_graphic
end

Class Method Details

.acceptable_attribute_mapObject

Returns attribute mapping this model knows about



78
79
80
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 78

def self.acceptable_attribute_map
  attribute_map
end

.acceptable_attributesObject

Returns all the JSON keys this model knows about



83
84
85
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 83

def self.acceptable_attributes
  acceptable_attribute_map.values
end

.attribute_mapObject

Attribute mapping from ruby-style variable name to JSON key.



59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 59

def self.attribute_map
  {
    :'hate' => :'hate',
    :'hate_threatening' => :'hate/threatening',
    :'harassment' => :'harassment',
    :'harassment_threatening' => :'harassment/threatening',
    :'illicit' => :'illicit',
    :'illicit_violent' => :'illicit/violent',
    :'self_harm' => :'self-harm',
    :'self_harm_intent' => :'self-harm/intent',
    :'self_harm_instructions' => :'self-harm/instructions',
    :'sexual' => :'sexual',
    :'sexual_minors' => :'sexual/minors',
    :'violence' => :'violence',
    :'violence_graphic' => :'violence/graphic'
  }
end

.build_from_hash(attributes) ⇒ Object

Builds the object from hash

Parameters:

  • attributes (Hash)

    Model attributes in the form of hash

Returns:

  • (Object)

    Returns the model itself



452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 452

def self.build_from_hash(attributes)
  return nil unless attributes.is_a?(Hash)
  attributes = attributes.transform_keys(&:to_sym)
  transformed_hash = {}
  openapi_types.each_pair do |key, type|
    if attributes.key?(attribute_map[key]) && attributes[attribute_map[key]].nil?
      transformed_hash["#{key}"] = nil
    elsif type =~ /\AArray<(.*)>/i
      # check to ensure the input is an array given that the attribute
      # is documented as an array but the input is not
      if attributes[attribute_map[key]].is_a?(Array)
        transformed_hash["#{key}"] = attributes[attribute_map[key]].map { |v| _deserialize($1, v) }
      end
    elsif !attributes[attribute_map[key]].nil?
      transformed_hash["#{key}"] = _deserialize(type, attributes[attribute_map[key]])
    end
  end
  new(transformed_hash)
end

.openapi_nullableObject

List of attributes with nullable: true



107
108
109
110
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 107

def self.openapi_nullable
  Set.new([
  ])
end

.openapi_typesObject

Attribute type mapping.



88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 88

def self.openapi_types
  {
    :'hate' => :'Boolean',
    :'hate_threatening' => :'Boolean',
    :'harassment' => :'Boolean',
    :'harassment_threatening' => :'Boolean',
    :'illicit' => :'Boolean',
    :'illicit_violent' => :'Boolean',
    :'self_harm' => :'Boolean',
    :'self_harm_intent' => :'Boolean',
    :'self_harm_instructions' => :'Boolean',
    :'sexual' => :'Boolean',
    :'sexual_minors' => :'Boolean',
    :'violence' => :'Boolean',
    :'violence_graphic' => :'Boolean'
  }
end

Instance Method Details

#==(o) ⇒ Object

Checks equality by comparing each attribute.

Parameters:

  • Object (Object)

    to be compared



419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 419

def ==(o)
  return true if self.equal?(o)
  self.class == o.class &&
      hate == o.hate &&
      hate_threatening == o.hate_threatening &&
      harassment == o.harassment &&
      harassment_threatening == o.harassment_threatening &&
      illicit == o.illicit &&
      illicit_violent == o.illicit_violent &&
      self_harm == o.self_harm &&
      self_harm_intent == o.self_harm_intent &&
      self_harm_instructions == o.self_harm_instructions &&
      sexual == o.sexual &&
      sexual_minors == o.sexual_minors &&
      violence == o.violence &&
      violence_graphic == o.violence_graphic
end

#eql?(o) ⇒ Boolean

Parameters:

  • Object (Object)

    to be compared

Returns:

  • (Boolean)

See Also:

  • `==` method


439
440
441
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 439

def eql?(o)
  self == o
end

#hashInteger

Calculates hash code according to all attributes.

Returns:

  • (Integer)

    Hash code



445
446
447
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 445

def hash
  [hate, hate_threatening, harassment, harassment_threatening, illicit, illicit_violent, self_harm, self_harm_intent, self_harm_instructions, sexual, sexual_minors, violence, violence_graphic].hash
end

#list_invalid_propertiesObject

Show invalid properties with the reasons. Usually used together with valid?

Returns:

  • Array for valid properties with the reasons



209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 209

def list_invalid_properties
  warn '[DEPRECATED] the `list_invalid_properties` method is obsolete'
  invalid_properties = Array.new
  if @hate.nil?
    invalid_properties.push('invalid value for "hate", hate cannot be nil.')
  end

  if @hate_threatening.nil?
    invalid_properties.push('invalid value for "hate_threatening", hate_threatening cannot be nil.')
  end

  if @harassment.nil?
    invalid_properties.push('invalid value for "harassment", harassment cannot be nil.')
  end

  if @harassment_threatening.nil?
    invalid_properties.push('invalid value for "harassment_threatening", harassment_threatening cannot be nil.')
  end

  if @illicit.nil?
    invalid_properties.push('invalid value for "illicit", illicit cannot be nil.')
  end

  if @illicit_violent.nil?
    invalid_properties.push('invalid value for "illicit_violent", illicit_violent cannot be nil.')
  end

  if @self_harm.nil?
    invalid_properties.push('invalid value for "self_harm", self_harm cannot be nil.')
  end

  if @self_harm_intent.nil?
    invalid_properties.push('invalid value for "self_harm_intent", self_harm_intent cannot be nil.')
  end

  if @self_harm_instructions.nil?
    invalid_properties.push('invalid value for "self_harm_instructions", self_harm_instructions cannot be nil.')
  end

  if @sexual.nil?
    invalid_properties.push('invalid value for "sexual", sexual cannot be nil.')
  end

  if @sexual_minors.nil?
    invalid_properties.push('invalid value for "sexual_minors", sexual_minors cannot be nil.')
  end

  if @violence.nil?
    invalid_properties.push('invalid value for "violence", violence cannot be nil.')
  end

  if @violence_graphic.nil?
    invalid_properties.push('invalid value for "violence_graphic", violence_graphic cannot be nil.')
  end

  invalid_properties
end

#to_hashHash

Returns the object in the form of hash

Returns:

  • (Hash)

    Returns the object in the form of hash



474
475
476
477
478
479
480
481
482
483
484
485
486
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 474

def to_hash
  hash = {}
  self.class.attribute_map.each_pair do |attr, param|
    value = self.send(attr)
    if value.nil?
      is_nullable = self.class.openapi_nullable.include?(attr)
      next if !is_nullable || (is_nullable && !instance_variable_defined?(:"@#{attr}"))
    end

    hash[param] = _to_hash(value)
  end
  hash
end

#valid?Boolean

Check to see if the all the properties in the model are valid

Returns:

  • (Boolean)

    true if the model is valid



269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
# File 'lib/openapi_openai/models/create_moderation_response_results_inner_categories.rb', line 269

def valid?
  warn '[DEPRECATED] the `valid?` method is obsolete'
  return false if @hate.nil?
  return false if @hate_threatening.nil?
  return false if @harassment.nil?
  return false if @harassment_threatening.nil?
  return false if @illicit.nil?
  return false if @illicit_violent.nil?
  return false if @self_harm.nil?
  return false if @self_harm_intent.nil?
  return false if @self_harm_instructions.nil?
  return false if @sexual.nil?
  return false if @sexual_minors.nil?
  return false if @violence.nil?
  return false if @violence_graphic.nil?
  true
end