Light Dark

Functions

invoke-claude

fn (model_id: Str, messages: Vec, max_tokens: Int, system: Str, temperature: Dec, top_p: Dec, stop_sequences: Vec): ClaudeResponse | AwsError
fn (model_id: Str, messages: Vec, max_tokens: Int, system: Str): ClaudeResponse | AwsError
fn (model_id: Str, messages: Vec, max_tokens: Int): ClaudeResponse | AwsError
fn (model_id: Str, messages: Vec): ClaudeResponse | AwsError
fn (prompt: Str): ClaudeResponse | AwsError

Invoke an Anthropic Claude model via Bedrock using the Messages API format.

Example

// Simple prompt with default model (Claude 3 Haiku)
result ::aws::bedrock::invoke/invoke-claude("What is 2+2?")
text first(result.content).text
// => "4"

// With specific model and system prompt
result ::aws::bedrock::invoke/invoke-claude(
    "anthropic.claude-3-5-sonnet-20240620-v1:0",
    [{role: "user", content: "Explain quantum computing"}],
    1024,
    "You are a physics teacher"
)

invoke-llama

fn (model_id: Str, prompt: Str, max_gen_len: Int, temperature: Dec, top_p: Dec): InvokeModelResponse | AwsError
fn (model_id: Str, prompt: Str, max_gen_len: Int): InvokeModelResponse | AwsError
fn (model_id: Str, prompt: Str): InvokeModelResponse | AwsError
fn (prompt: Str): InvokeModelResponse | AwsError

Invoke a Meta Llama model via Bedrock.

Example

// Simple prompt with default model (Llama 3 8B)
result ::aws::bedrock::invoke/invoke-llama("What is functional programming?")
result.body.generation
// => "Functional programming is..."

invoke-mistral

fn (model_id: Str, prompt: Str, max_tokens: Int, temperature: Dec, top_p: Dec, stop: Vec): InvokeModelResponse | AwsError
fn (model_id: Str, prompt: Str, max_tokens: Int): InvokeModelResponse | AwsError
fn (model_id: Str, prompt: Str): InvokeModelResponse | AwsError
fn (prompt: Str): InvokeModelResponse | AwsError

Invoke a Mistral AI model via Bedrock.

Example

// Simple prompt with default model (Mistral 7B)
result ::aws::bedrock::invoke/invoke-mistral("Write a haiku about coding")
result.body.outputs
// => [{text: "..."}]

invoke-model

fn (model_id: Str, body: Map, content_type: Str, accept: Str): InvokeModelResponse | AwsError

Invoke a Bedrock foundation model with a raw request body.

This is the low-level invocation API. For most use cases, prefer converse which provides a unified interface across all models.

Example

result ::aws::bedrock::invoke/invoke-model(
    "amazon.titan-text-express-v1",
    {inputText: "Hello", textGenerationConfig: {maxTokenCount: 100}},
    "application/json",
    "application/json"
)
result.body
// => {inputTextTokenCount: 1, results: [{outputText: "...", ...}]}

invoke-titan

fn (model_id: Str, input_text: Str, max_token_count: Int, temperature: Dec, top_p: Dec, stop_sequences: Vec): TitanResponse | AwsError
fn (model_id: Str, input_text: Str, max_token_count: Int): TitanResponse | AwsError
fn (model_id: Str, input_text: Str): TitanResponse | AwsError
fn (input_text: Str): TitanResponse | AwsError

Invoke an Amazon Titan text model via Bedrock.

Example

// Simple prompt with default model (Titan Text Express)
result ::aws::bedrock::invoke/invoke-titan("Tell me about Hot language")
text first(result.results).outputText
// => "Hot is..."

// With specific model and token limit
result ::aws::bedrock::invoke/invoke-titan("amazon.titan-text-lite-v1", "Hello!", 512)

Types

ClaudeResponse

ClaudeResponse type {
    id: Str?,
    type: Str?,
    role: Str?,
    content: Vec?,
    model: Str?,
    stop_reason: Str?,
    stop_sequence: Str?,
    usage: Map?
}

InvokeModelResponse

InvokeModelResponse type {
    body: Any,
    content_type: Str?,
    status_code: Int?
}

TitanResponse

TitanResponse type {
    input_text_token_count: Int?,
    results: Vec?
}