Class: Langchain::LLM::OpenAI
Overview
LLM interface for OpenAI APIs: platform.openai.com/overview
Gem requirements:
gem "ruby-openai", "~> 6.3.0"
Usage:
openai = Langchain::LLM::OpenAI.new(
api_key: ENV["OPENAI_API_KEY"],
llm_options: {}, # Available options: https://github.com/alexrudall/ruby-openai/blob/main/lib/openai/client.rb#L5-L13
default_options: {}
)
Direct Known Subclasses
Constant Summary collapse
- DEFAULTS =
{ n: 1, temperature: 0.0, chat_completion_model_name: "gpt-3.5-turbo", embeddings_model_name: "text-embedding-3-small" }.freeze
- EMBEDDING_SIZES =
{ "text-embedding-ada-002" => 1536, "text-embedding-3-large" => 3072, "text-embedding-3-small" => 1536 }.freeze
- LENGTH_VALIDATOR =
Langchain::Utils::TokenLength::OpenAIValidator
Instance Attribute Summary collapse
-
#defaults ⇒ Object
readonly
Returns the value of attribute defaults.
Attributes inherited from Base
Instance Method Summary collapse
-
#chat(messages: [], model: , frequency_penalty: nil, logit_bias: nil, logprobs: nil, top_logprobs: nil, max_tokens: nil, n: , presence_penalty: nil, response_format: nil, seed: nil, stop: nil, stream: nil, temperature: , top_p: nil, tools: [], tool_choice: nil, user: nil, &block) ⇒ Object
Generate a chat completion for given messages.
-
#complete(prompt:, **params) ⇒ Langchain::LLM::OpenAIResponse
rubocop:disable Style/ArgumentsForwarding Generate a completion for a given prompt.
- #default_dimensions ⇒ Object
-
#embed(text:, model: defaults[:embeddings_model_name], encoding_format: nil, user: nil, dimensions: @defaults[:dimensions]) ⇒ Langchain::LLM::OpenAIResponse
Generate an embedding for a given text.
-
#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ OpenAI
constructor
Initialize an OpenAI LLM instance.
-
#summarize(text:) ⇒ String
Generate a summary for a given text.
Methods included from DependencyHelper
Constructor Details
#initialize(api_key:, llm_options: {}, default_options: {}) ⇒ OpenAI
Initialize an OpenAI LLM instance
37 38 39 40 41 42 43 |
# File 'lib/langchain/llm/openai.rb', line 37 def initialize(api_key:, llm_options: {}, default_options: {}) depends_on "ruby-openai", req: "openai" @client = ::OpenAI::Client.new(access_token: api_key, **) @defaults = DEFAULTS.merge() end |
Instance Attribute Details
#defaults ⇒ Object (readonly)
Returns the value of attribute defaults.
31 32 33 |
# File 'lib/langchain/llm/openai.rb', line 31 def defaults @defaults end |
Instance Method Details
#chat(messages: [], model: , frequency_penalty: nil, logit_bias: nil, logprobs: nil, top_logprobs: nil, max_tokens: nil, n: , presence_penalty: nil, response_format: nil, seed: nil, stop: nil, stream: nil, temperature: , top_p: nil, tools: [], tool_choice: nil, user: nil, &block) ⇒ Object
Generate a chat completion for given messages.
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
# File 'lib/langchain/llm/openai.rb', line 107 def chat( messages: [], model: defaults[:chat_completion_model_name], frequency_penalty: nil, logit_bias: nil, logprobs: nil, top_logprobs: nil, max_tokens: nil, n: defaults[:n], presence_penalty: nil, response_format: nil, seed: nil, stop: nil, stream: nil, temperature: defaults[:temperature], top_p: nil, tools: [], tool_choice: nil, user: nil, &block ) raise ArgumentError.new("messages argument is required") if .empty? raise ArgumentError.new("model argument is required") if model.empty? raise ArgumentError.new("'tool_choice' is only allowed when 'tools' are specified.") if tool_choice && tools.empty? parameters = { messages: , model: model } parameters[:frequency_penalty] = frequency_penalty if frequency_penalty parameters[:logit_bias] = logit_bias if logit_bias parameters[:logprobs] = logprobs if logprobs parameters[:top_logprobs] = top_logprobs if top_logprobs # TODO: Fix max_tokens validation to account for tools/functions parameters[:max_tokens] = max_tokens if max_tokens # || validate_max_tokens(parameters[:messages], parameters[:model]) parameters[:n] = n if n parameters[:presence_penalty] = presence_penalty if presence_penalty parameters[:response_format] = response_format if response_format parameters[:seed] = seed if seed parameters[:stop] = stop if stop parameters[:stream] = stream if stream parameters[:temperature] = temperature if temperature parameters[:top_p] = top_p if top_p parameters[:tools] = tools if tools.any? parameters[:tool_choice] = tool_choice if tool_choice parameters[:user] = user if user # TODO: Clean this part up if block @response_chunks = [] parameters[:stream] = proc do |chunk, _bytesize| chunk_content = chunk.dig("choices", 0) @response_chunks << chunk yield chunk_content end end response = with_api_error_handling do client.chat(parameters: parameters) end response = response_from_chunks if block reset_response_chunks Langchain::LLM::OpenAIResponse.new(response) end |
#complete(prompt:, **params) ⇒ Langchain::LLM::OpenAIResponse
rubocop:disable Style/ArgumentsForwarding Generate a completion for a given prompt
91 92 93 94 95 96 97 98 99 100 |
# File 'lib/langchain/llm/openai.rb', line 91 def complete(prompt:, **params) warn "DEPRECATED: `Langchain::LLM::OpenAI#complete` is deprecated, and will be removed in the next major version. Use `Langchain::LLM::OpenAI#chat` instead." if params[:stop_sequences] params[:stop] = params.delete(:stop_sequences) end # Should we still accept the `messages: []` parameter here? = [{role: "user", content: prompt}] chat(messages: , **params) end |
#default_dimensions ⇒ Object
187 188 189 |
# File 'lib/langchain/llm/openai.rb', line 187 def default_dimensions @defaults[:dimensions] || EMBEDDING_SIZES.fetch(defaults[:embeddings_model_name]) end |
#embed(text:, model: defaults[:embeddings_model_name], encoding_format: nil, user: nil, dimensions: @defaults[:dimensions]) ⇒ Langchain::LLM::OpenAIResponse
Generate an embedding for a given text
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# File 'lib/langchain/llm/openai.rb', line 52 def ( text:, model: defaults[:embeddings_model_name], encoding_format: nil, user: nil, dimensions: @defaults[:dimensions] ) raise ArgumentError.new("text argument is required") if text.empty? raise ArgumentError.new("model argument is required") if model.empty? raise ArgumentError.new("encoding_format must be either float or base64") if encoding_format && %w[float base64].include?(encoding_format) parameters = { input: text, model: model } parameters[:encoding_format] = encoding_format if encoding_format parameters[:user] = user if user if dimensions parameters[:dimensions] = dimensions elsif EMBEDDING_SIZES.key?(model) parameters[:dimensions] = EMBEDDING_SIZES[model] end validate_max_tokens(text, parameters[:model]) response = with_api_error_handling do client.(parameters: parameters) end Langchain::LLM::OpenAIResponse.new(response) end |
#summarize(text:) ⇒ String
Generate a summary for a given text
178 179 180 181 182 183 184 185 |
# File 'lib/langchain/llm/openai.rb', line 178 def summarize(text:) prompt_template = Langchain::Prompt.load_from_path( file_path: Langchain.root.join("langchain/llm/prompts/summarize_template.yaml") ) prompt = prompt_template.format(text: text) complete(prompt: prompt) end |