Module: Gitlore

Defined in:
lib/gitlore.rb,
lib/gitlore/version.rb

Overview

Gitlore: Summarize git commits with AI. A CLI tool that transforms git commits into natural summaries.

Constant Summary collapse

VERSION =
"0.1.0"

Class Method Summary collapse

Class Method Details

.api_key_errorObject



118
119
120
121
122
123
# File 'lib/gitlore.rb', line 118

def self.api_key_error
  return true if ENV["OPENAI_API_KEY"]

  puts "#{@pastel.red("Error:")} OPENAI_API_KEY environment variable is not set."
  false
end

.choose_openai_modelObject



21
22
23
24
25
26
27
28
29
30
31
32
33
# File 'lib/gitlore.rb', line 21

def self.choose_openai_model
  return nil unless client && api_key_error

  begin
    model_names = client.models.list.data.map(&:id).sort.select do |model|
      model.include?("gpt")
    end
    @prompt.select("Choose an OpenAI model:", model_names, per_page: 10)
  rescue StandardError => e
    puts "#{@pastel.red("Error listing models:")} #{e.message}"
    nil
  end
end

.clientObject



17
18
19
# File 'lib/gitlore.rb', line 17

def self.client
  @client ||= OpenAI::Client.new(api_key: ENV["OPENAI_API_KEY"]) if ENV["OPENAI_API_KEY"]
end

.summarize_gitObject



35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# File 'lib/gitlore.rb', line 35

def self.summarize_git
  count = @prompt.ask("How many recent commits do you want to summarize?", default: "10").to_i
  provider = @prompt.select("Choose model provider", %w[openai ollama])
  style = @prompt.select("Choose summary style:", %w[narrative technical])

  git_log = `git log -n #{count} --pretty=format:"%h %s (%an)"`
  puts "\n#{@pastel.cyan("๐Ÿงพ Git Log:")}\n\n#{git_log}\n\n"

  system_prompts = {
    "narrative" => "You are a skilled storyteller. Given a list of git commits, your task is to craft a concise and
    engaging narrative that captures the essence of the changes, making them accessible and compelling for a
    non-technical audience.",
    "technical" => "You are a senior software engineer. Given a list of git commits, your task is
    to produce a precise, technically accurate summary of the changes, suitable for other developers and engineers."
  }

  case provider
  when "openai"
    summarize_with_openai(git_log, system_prompts[style])
  else
    summarize_with_ollama(git_log, system_prompts[style])
  end
end

.summarize_with_ollama(git_log, system_prompt) ⇒ Object



74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# File 'lib/gitlore.rb', line 74

def self.summarize_with_ollama(git_log, system_prompt)
  conn = Faraday.new(url: "http://localhost:11434") do |f|
    f.request :json
    f.adapter Faraday.default_adapter
  end

  models_response = conn.get("/api/tags")
  if models_response.status != 200
    puts "#{@pastel.red("Error:")} Could not fetch models from Ollama server. Check if Ollama is running."
    return "Failed to connect to Ollama."
  end

  models = JSON.parse(models_response.body)["models"]
  model_names = models.map { |m| m["name"] }

  if model_names.empty?
    puts "#{@pastel.red("Error:")} No models found on Ollama server."
    return "No models available."
  end

  local_model = @prompt.select("Choose the local model to use", model_names)

  chat_response = conn.post("/api/chat") do |req|
    req.headers["Content-Type"] = "application/json"
    req.body = {
      model: local_model,
      messages: [
        { role: "system", content: system_prompt },
        { role: "user", content: "Summarize these commits:\n\n#{git_log}" }
      ],
      stream: false
    }
  end

  if chat_response.status != 200
    puts "#{@pastel.red("Error:")} Failed to get response from Ollama."
    return "Failed to get Ollama response."
  end

  summary = JSON.parse(chat_response.body)["message"]["content"]
  puts TTY::Markdown.parse(summary)
  summary
end

.summarize_with_openai(git_log, system_prompt) ⇒ Object



59
60
61
62
63
64
65
66
67
68
69
70
71
72
# File 'lib/gitlore.rb', line 59

def self.summarize_with_openai(git_log, system_prompt)
  selected_model = choose_openai_model
  response = client.chat.completions.create(
    model: selected_model,
    messages: [
      { role: "system", content: system_prompt },
      { role: "user", content: "Summarize these commits:\n\n#{git_log}" }
    ],
    temperature: 0
  )
  summary = response.choices[0].message.content
  puts TTY::Markdown.parse(summary)
  summary
end