Class: N2B::Llm::Gemini
- Inherits:
-
Object
- Object
- N2B::Llm::Gemini
- Defined in:
- lib/n2b/llm/gemini.rb
Constant Summary collapse
- API_URI =
URI.parse('https://generativelanguage.googleapis.com/v1beta/models')
Instance Method Summary collapse
- #analyze_code_diff(prompt_content) ⇒ Object
- #get_model_name ⇒ Object
-
#initialize(config) ⇒ Gemini
constructor
A new instance of Gemini.
- #make_request(content) ⇒ Object
Constructor Details
#initialize(config) ⇒ Gemini
Returns a new instance of Gemini.
11 12 13 |
# File 'lib/n2b/llm/gemini.rb', line 11 def initialize(config) @config = config end |
Instance Method Details
#analyze_code_diff(prompt_content) ⇒ Object
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
# File 'lib/n2b/llm/gemini.rb', line 76 def analyze_code_diff(prompt_content) # This method assumes prompt_content is the full, ready-to-send prompt # including all instructions for the LLM (system message, diff, user additions, JSON format). model = get_model_name uri = URI.parse("#{API_URI}/#{model}:generateContent?key=#{@config['access_key']}") request = Net::HTTP::Post.new(uri) request.content_type = 'application/json' request.body = JSON.dump({ "contents" => [{ "parts" => [{ "text" => prompt_content }] }], "generationConfig" => { "responseMimeType" => "application/json" } }) response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http| http.request(request) end if response.code != '200' raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.} - #{response.body}") end parsed_response = JSON.parse(response.body) # Return the raw JSON string. CLI's call_llm_for_diff_analysis will handle parsing. # The Gemini API returns the analysis in parsed_response['candidates'].first['content']['parts'].first['text'] # which should itself be a JSON string as per our prompt's instructions. parsed_response['candidates'].first['content']['parts'].first['text'] end |
#get_model_name ⇒ Object
15 16 17 18 19 20 21 22 23 |
# File 'lib/n2b/llm/gemini.rb', line 15 def get_model_name # Resolve model name using the centralized configuration model_name = N2B::ModelConfig.resolve_model('gemini', @config['model']) if model_name.nil? || model_name.empty? # Fallback to default if no model specified model_name = N2B::ModelConfig.resolve_model('gemini', N2B::ModelConfig.default_model('gemini')) end model_name end |
#make_request(content) ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
# File 'lib/n2b/llm/gemini.rb', line 25 def make_request(content) model = get_model_name uri = URI.parse("#{API_URI}/#{model}:generateContent?key=#{@config['access_key']}") request = Net::HTTP::Post.new(uri) request.content_type = 'application/json' request.body = JSON.dump({ "contents" => [{ "parts" => [{ "text" => content }] }], "generationConfig" => { "responseMimeType" => "application/json" } }) response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |http| http.request(request) end # check for errors if response.code != '200' raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.} - #{response.body}") end parsed_response = JSON.parse(response.body) answer = parsed_response['candidates'].first['content']['parts'].first['text'] begin # Try to parse as JSON if it looks like JSON if answer.strip.start_with?('{') && answer.strip.end_with?('}') answer = JSON.parse(answer) else # If not JSON, wrap it in our expected format answer = { 'explanation' => answer, 'code' => nil } end rescue JSON::ParserError # If JSON parsing fails, wrap the text in our expected format answer = { 'explanation' => answer, 'code' => nil } end answer end |