Class: Langchain::LLM::AI21

Inherits:
Base
  • Object
show all
Defined in:
lib/langchain/llm/ai21.rb

Overview

Wrapper around AI21 Studio APIs.

Gem requirements:

gem "ai21", "~> 0.2.1"

Usage:

ai21 = Langchain::LLM::AI21.new(api_key: ENV["AI21_API_KEY"])

Constant Summary collapse

DEFAULTS =
{
  temperature: 0.0,
  model: "j2-ultra"
}.freeze
LENGTH_VALIDATOR =
Langchain::Utils::TokenLength::AI21Validator

Instance Attribute Summary

Attributes inherited from Base

#client

Instance Method Summary collapse

Methods inherited from Base

#chat, #default_dimensions, #embed

Methods included from DependencyHelper

#depends_on

Constructor Details

#initialize(api_key:, default_options: {}) ⇒ AI21

Returns a new instance of AI21.



21
22
23
24
25
26
# File 'lib/langchain/llm/ai21.rb', line 21

def initialize(api_key:, default_options: {})
  depends_on "ai21"

  @client = ::AI21::Client.new(api_key)
  @defaults = DEFAULTS.merge(default_options)
end

Instance Method Details

#complete(prompt:, **params) ⇒ Langchain::LLM::AI21Response

Generate a completion for a given prompt

Parameters:

  • prompt (String)

    The prompt to generate a completion for

  • params (Hash)

    The parameters to pass to the API

Returns:



35
36
37
38
39
40
41
42
# File 'lib/langchain/llm/ai21.rb', line 35

def complete(prompt:, **params)
  parameters = complete_parameters params

  parameters[:maxTokens] = LENGTH_VALIDATOR.validate_max_tokens!(prompt, parameters[:model], {llm: client})

  response = client.complete(prompt, parameters)
  Langchain::LLM::AI21Response.new response, model: parameters[:model]
end

#summarize(text:, **params) ⇒ String

Generate a summary for a given text

Parameters:

  • text (String)

    The text to generate a summary for

  • params (Hash)

    The parameters to pass to the API

Returns:

  • (String)

    The summary



51
52
53
54
55
# File 'lib/langchain/llm/ai21.rb', line 51

def summarize(text:, **params)
  response = client.summarize(text, "TEXT", params)
  response.dig(:summary)
  # Should we update this to also return a Langchain::LLM::AI21Response?
end