Class: SentinelRb::Client::OpenAI

Inherits:
Base
  • Object
show all
Defined in:
lib/sentinel_rb/client/openai.rb

Overview

OpenAI client implementation for LLM interactions

Instance Method Summary collapse

Constructor Details

#initialize(config) ⇒ OpenAI

Returns a new instance of OpenAI.



10
11
12
13
14
15
16
17
# File 'lib/sentinel_rb/client/openai.rb', line 10

def initialize(config)
  super
  @client = ::OpenAI::Client.new(
    access_token: ENV[config.api_key_env] || ENV["OPENAI_API_KEY"],
    log_errors: config["log_level"] == "debug"
  )
  @model = config.model
end

Instance Method Details

#analyze_content(prompt) ⇒ Object

Analyze content for relevance using LLM



32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'lib/sentinel_rb/client/openai.rb', line 32

def analyze_content(prompt)
  response = @client.chat(
    parameters: {
      model: @model,
      messages: [
        {
          role: "system",
          content: "You are a prompt quality analyzer. Rate the relevance and focus of the given prompt on a scale of 0.0 to 1.0, where 1.0 means highly relevant and focused, and 0.0 means completely irrelevant or unfocused. Respond with just the numeric score."
        },
        {
          role: "user",
          content: "Analyze this prompt for relevance and focus:\n\n#{prompt}"
        }
      ],
      temperature: 0.1,
      max_tokens: 10
    }
  )

  pp response if @config["log_level"] == "debug"
  score_text = response.dig("choices", 0, "message", "content").to_s.strip
  score = extract_score(score_text)

  {
    relevance_score: score,
    raw_response: score_text
  }
rescue StandardError => e
  puts "Warning: Content analysis failed: #{e.message}" if @config["log_level"] == "debug"
  { relevance_score: 0.5, raw_response: "Analysis failed" }
end

#fact_check(statement) ⇒ Object

Basic fact-checking implementation



65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# File 'lib/sentinel_rb/client/openai.rb', line 65

def fact_check(statement)
  response = @client.chat(
    parameters: {
      model: @model,
      messages: [
        {
          role: "system",
          content: "You are a fact-checker. Evaluate the accuracy of the given statement. Respond with 'TRUE' if accurate, 'FALSE' if inaccurate, or 'UNKNOWN' if uncertain. Then provide a confidence score from 0.0 to 1.0."
        },
        {
          role: "user",
          content: "Fact-check this statement: #{statement}"
        }
      ],
      temperature: 0.1,
      max_tokens: 50
    }
  )

  result = response.dig("choices", 0, "message", "content").to_s.strip
  parse_fact_check_result(result)
rescue StandardError => e
  puts "Warning: Fact-checking failed: #{e.message}" if @config["log_level"] == "debug"
  { accurate: true, confidence: 0.5, reason: "Fact-checking unavailable" }
end

#similarity(text1, text2) ⇒ Object

Calculate semantic similarity between two texts using embeddings



20
21
22
23
24
25
26
27
28
29
# File 'lib/sentinel_rb/client/openai.rb', line 20

def similarity(text1, text2)
  embedding1 = get_embedding(text1)
  embedding2 = get_embedding(text2)

  cosine_similarity(embedding1, embedding2)
rescue StandardError => e
  # Fallback to basic text comparison if embeddings fail
  puts "Warning: Embeddings failed, using fallback similarity: #{e.message}" if @config["log_level"] == "debug"
  fallback_similarity(text1, text2)
end