Class: N2B::Llm::OpenAi
- Inherits:
-
Object
- Object
- N2B::Llm::OpenAi
- Defined in:
- lib/n2b/llm/open_ai.rb
Constant Summary collapse
- API_URI =
URI.parse('https://api.openai.com/v1/chat/completions')
Instance Method Summary collapse
- #analyze_code_diff(prompt_content, expect_json: true) ⇒ Object
- #get_model_name ⇒ Object
-
#initialize(config) ⇒ OpenAi
constructor
A new instance of OpenAi.
- #make_request(content, expect_json: true) ⇒ Object
Constructor Details
#initialize(config) ⇒ OpenAi
Returns a new instance of OpenAi.
11 12 13 |
# File 'lib/n2b/llm/open_ai.rb', line 11 def initialize(config) @config = config end |
Instance Method Details
#analyze_code_diff(prompt_content, expect_json: true) ⇒ Object
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# File 'lib/n2b/llm/open_ai.rb', line 64 def analyze_code_diff(prompt_content, expect_json: true) # This method assumes prompt_content is the full, ready-to-send prompt # including all instructions for the LLM (system message, diff, user additions, JSON format). request = Net::HTTP::Post.new(API_URI) request.content_type = 'application/json' request['Authorization'] = "Bearer #{@config['access_key']}" body_hash = { "model" => get_model_name, "messages" => [ { "role" => "user", "content" => prompt_content } ], "max_tokens" => @config['max_tokens'] || 1500 } body_hash["response_format"] = { "type" => "json_object" } if expect_json request.body = JSON.dump(body_hash) response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http| http.request(request) end if response.code != '200' raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.} - #{response.body}") end answer = JSON.parse(response.body)['choices'].first['message']['content'] answer end |
#get_model_name ⇒ Object
15 16 17 18 19 20 21 22 23 |
# File 'lib/n2b/llm/open_ai.rb', line 15 def get_model_name # Resolve model name using the centralized configuration model_name = N2B::ModelConfig.resolve_model('openai', @config['model']) if model_name.nil? || model_name.empty? # Fallback to default if no model specified model_name = N2B::ModelConfig.resolve_model('openai', N2B::ModelConfig.default_model('openai')) end model_name end |
#make_request(content, expect_json: true) ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# File 'lib/n2b/llm/open_ai.rb', line 25 def make_request(content, expect_json: true) request = Net::HTTP::Post.new(API_URI) request.content_type = 'application/json' request['Authorization'] = "Bearer #{@config['access_key']}" body_hash = { "model" => get_model_name, "messages" => [ { "role" => "user", "content" => content } ] } body_hash["response_format"] = { "type" => "json_object" } if expect_json request.body = JSON.dump(body_hash) response = Net::HTTP.start(API_URI.hostname, API_URI.port, use_ssl: true) do |http| http.request(request) end # check for errors if response.code != '200' raise N2B::LlmApiError.new("LLM API Error: #{response.code} #{response.} - #{response.body}") end answer = JSON.parse(response.body)['choices'].first['message']['content'] if expect_json begin # remove everything before the first { and after the last } answer = answer.sub(/.*\{(.*)\}.*/m, '{\1}') unless answer.start_with?('{') answer = JSON.parse(answer) rescue JSON::ParserError answer = { 'commands' => answer.split("\n"), explanation: answer } end end answer end |