Class: Brakeman::LLM

Inherits:
Object
  • Object
show all
Defined in:
lib/brakeman-llm.rb

Overview

Simple wrapper for RubyLLM

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(model:, provider:, instructions: nil, prompt: nil, assume_model_exists: false, **kwargs) ⇒ LLM

Configure RubyLLM



29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# File 'lib/brakeman-llm.rb', line 29

def initialize(model:, provider:, instructions: nil, prompt: nil, assume_model_exists: false, **kwargs)
  @llm = RubyLLM.context do |config|
    kwargs.each do |k, v|
      case k
      when :api_key, :api_base, :use_system_role
        config.send("#{provider}_#{k}=", v)
      else
        config.send("#{k}=", v)
      end
    end

    config.log_level = :error unless kwargs.key? :log_level
  end

  RubyLLM.logger.level = Logger::ERROR if @llm.config.log_level == :error

  @instructions = instructions || 'You are a world-class application security expert with deep expertise in Ruby and Ruby on Rails security.'

  @prompt = prompt || <<~PROMPT
    Analyze the following security warning resulting from analyzing a Ruby on Rails application with the static analysis security tool Brakeman.
    Explain the security vulnerability and potential fixes. Jump straight into the explanation, do not have a casual introduction.
    Do not ask follow-up questions, as this is not an interactive prompt.
    Keep the explanation to less than 400 words.
    Ignore 'fingerprint' and 'warning_code' fields and do not explain them.
  PROMPT

  @model = model
  @provider = provider
  @assume_model_exists = assume_model_exists
end

Instance Attribute Details

#assume_model_existsObject

Returns the value of attribute assume_model_exists.



25
26
27
# File 'lib/brakeman-llm.rb', line 25

def assume_model_exists
  @assume_model_exists
end

#instructionsObject

Returns the value of attribute instructions.



25
26
27
# File 'lib/brakeman-llm.rb', line 25

def instructions
  @instructions
end

#llmObject (readonly)

Returns the value of attribute llm.



26
27
28
# File 'lib/brakeman-llm.rb', line 26

def llm
  @llm
end

#modelObject

Returns the value of attribute model.



25
26
27
# File 'lib/brakeman-llm.rb', line 25

def model
  @model
end

#promptObject

Returns the value of attribute prompt.



25
26
27
# File 'lib/brakeman-llm.rb', line 25

def prompt
  @prompt
end

#providerObject

Returns the value of attribute provider.



25
26
27
# File 'lib/brakeman-llm.rb', line 25

def provider
  @provider
end

Instance Method Details

#analyze_warning(warning) ⇒ Object

Analyze single Brakeman warning. Results analysis as a string.



62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# File 'lib/brakeman-llm.rb', line 62

def analyze_warning(warning)
  chat = @llm.chat(model: @model, provider: @provider, assume_model_exists: @assume_model_exists)
  chat.with_instructions(@instructions)

  llm_input = <<~INPUT
    #{@prompt}
    #{help_doc(warning)}

    The following is a Brakeman security warning in JSON format that describes a potential security vulnerability:
    #{warning.to_json}
  INPUT

  response = chat.ask llm_input

  response.content
end

#help_doc(warning) ⇒ Object



79
80
81
82
83
84
85
86
87
88
89
90
# File 'lib/brakeman-llm.rb', line 79

def help_doc(warning)
  if warning.link.match %r{https://brakemanscanner.org/(.+)}
    doc = File.join(__dir__, '..', $1, "index.markdown")

    if File.exist? doc
      content = File.read doc
      "Here is background information about this type of vulnerability: #{content}"
    else
      puts "No file: #{doc}"
    end
  end
end