feat: scenario agents & runner (#11944)

Co-authored-by: Muhsin Keloth <muhsinkeramam@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Sojan Jose <sojan@pepalo.com>
Co-authored-by: Pranav <pranav@chatwoot.com>
Co-authored-by: Sivin Varghese <64252451+iamsivin@users.noreply.github.com>
This commit is contained in:
Shivam Mishra
2025-08-14 12:39:21 +05:30
committed by GitHub
parent 14471cc20c
commit c6be04cdc1
21 changed files with 1437 additions and 16 deletions

1
.gitignore vendored
View File

@@ -94,3 +94,4 @@ yarn-debug.log*
.vscode
.claude/settings.local.json
.cursor
CLAUDE.local.md

View File

@@ -179,7 +179,10 @@ gem 'reverse_markdown'
gem 'iso-639'
gem 'ruby-openai'
gem 'ai-agents', '>= 0.2.1'
gem 'ai-agents', '>= 0.4.3'
# TODO: Move this gem as a dependency of ai-agents
gem 'ruby_llm-schema'
gem 'shopify_api'

View File

@@ -126,7 +126,7 @@ GEM
jbuilder (~> 2)
rails (>= 4.2, < 7.2)
selectize-rails (~> 0.6)
ai-agents (0.2.1)
ai-agents (0.4.3)
ruby_llm (~> 1.3)
annotate (3.2.0)
activerecord (>= 3.2, < 8.0)
@@ -720,7 +720,7 @@ GEM
ruby2ruby (2.5.0)
ruby_parser (~> 3.1)
sexp_processor (~> 4.6)
ruby_llm (1.3.1)
ruby_llm (1.5.1)
base64
event_stream_parser (~> 1)
faraday (>= 1.10.0)
@@ -729,6 +729,7 @@ GEM
faraday-retry (>= 1)
marcel (~> 1.0)
zeitwerk (~> 2)
ruby_llm-schema (0.1.0)
ruby_parser (3.20.0)
sexp_processor (~> 4.16)
sass (3.7.4)
@@ -910,7 +911,7 @@ DEPENDENCIES
administrate (>= 0.20.1)
administrate-field-active_storage (>= 1.0.3)
administrate-field-belongs_to_search (>= 0.9.0)
ai-agents (>= 0.2.1)
ai-agents (>= 0.4.3)
annotate
attr_extras
audited (~> 5.4, >= 5.4.1)
@@ -1004,6 +1005,7 @@ DEPENDENCIES
rubocop-rails
rubocop-rspec
ruby-openai
ruby_llm-schema
scout_apm
scss_lint
seed_dump

View File

@@ -0,0 +1,23 @@
# frozen_string_literal: true
require 'agents'
Rails.application.config.after_initialize do
api_key = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_API_KEY')&.value
model = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_MODEL')&.value.presence || OpenAiConstants::DEFAULT_MODEL
api_endpoint = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.value || OpenAiConstants::DEFAULT_ENDPOINT
if api_key.present?
Agents.configure do |config|
config.openai_api_key = api_key
if api_endpoint.present?
api_base = "#{api_endpoint.chomp('/')}/v1"
config.openai_api_base = api_base
end
config.default_model = model
config.debug = false
end
end
rescue StandardError => e
Rails.logger.error "Failed to configure AI Agents SDK: #{e.message}"
end

View File

@@ -26,9 +26,15 @@ class Captain::Conversation::ResponseBuilderJob < ApplicationJob
delegate :account, :inbox, to: :@conversation
def generate_and_process_response
@response = Captain::Llm::AssistantChatService.new(assistant: @assistant).generate_response(
message_history: collect_previous_messages
)
@response = if captain_v2_enabled?
Captain::Assistant::AgentRunnerService.new(assistant: @assistant, conversation: @conversation).generate_response(
message_history: collect_previous_messages
)
else
Captain::Llm::AssistantChatService.new(assistant: @assistant).generate_response(
message_history: collect_previous_messages
)
end
return process_action('handoff') if handoff_requested?
@@ -104,4 +110,8 @@ class Captain::Conversation::ResponseBuilderJob < ApplicationJob
def log_error(error)
ChatwootExceptionTracker.new(error, account: account).capture_exception
end
def captain_v2_enabled?
return account.feature_enabled?('captain_integration_v2')
end
end

View File

@@ -19,6 +19,7 @@
class Captain::Assistant < ApplicationRecord
include Avatarable
include Concerns::CaptainToolsHelpers
include Concerns::Agentable
self.table_name = 'captain_assistants'
@@ -35,6 +36,8 @@ class Captain::Assistant < ApplicationRecord
has_many :copilot_threads, dependent: :destroy_async
has_many :scenarios, class_name: 'Captain::Scenario', dependent: :destroy_async
store_accessor :config, :temperature, :feature_faq, :feature_memory, :product_name
validates :name, presence: true
validates :description, presence: true
validates :account_id, presence: true
@@ -71,6 +74,33 @@ class Captain::Assistant < ApplicationRecord
private
def agent_name
name
end
def agent_tools
[
self.class.resolve_tool_class('faq_lookup').new(self),
self.class.resolve_tool_class('handoff').new(self)
]
end
def prompt_context
{
name: name,
description: description,
product_name: config['product_name'] || 'this product',
scenarios: scenarios.enabled.map do |scenario|
{
key: scenario.title.parameterize.underscore,
description: scenario.description
}
end,
response_guidelines: response_guidelines || [],
guardrails: guardrails || []
}
end
def default_avatar_url
"#{ENV.fetch('FRONTEND_URL', nil)}/assets/images/dashboard/captain/logo.svg"
end

View File

@@ -22,6 +22,7 @@
#
class Captain::Scenario < ApplicationRecord
include Concerns::CaptainToolsHelpers
include Concerns::Agentable
self.table_name = 'captain_scenarios'
@@ -37,10 +38,43 @@ class Captain::Scenario < ApplicationRecord
scope :enabled, -> { where(enabled: true) }
delegate :temperature, :feature_faq, :feature_memory, :product_name, to: :assistant
before_save :resolve_tool_references
def prompt_context
{
title: title,
instructions: resolved_instructions,
tools: resolved_tools
}
end
private
def agent_name
"#{title} Agent".titleize
end
def agent_tools
resolved_tools.map { |tool| self.class.resolve_tool_class(tool[:id]) }.map { |tool| tool.new(assistant) }
end
def resolved_instructions
instruction.gsub(TOOL_REFERENCE_REGEX) do |match|
"#{match} tool "
end
end
def resolved_tools
return [] if tools.blank?
available_tools = self.class.available_agent_tools
tools.filter_map do |tool_id|
available_tools.find { |tool| tool[:id] == tool_id }
end
end
# Validates that all tool references in the instruction are valid.
# Parses the instruction for tool references and checks if they exist
# in the available tools configuration.

View File

@@ -0,0 +1,56 @@
module Concerns::Agentable
extend ActiveSupport::Concern
def agent
Agents::Agent.new(
name: agent_name,
instructions: ->(context) { agent_instructions(context) },
tools: agent_tools,
model: agent_model,
temperature: temperature.to_f || 0.7,
response_schema: agent_response_schema
)
end
def agent_instructions(context = nil)
enhanced_context = prompt_context
if context
state = context.context[:state] || {}
conversation_data = state[:conversation] || {}
contact_data = state[:contact] || {}
enhanced_context = enhanced_context.merge(
conversation: conversation_data,
contact: contact_data
)
end
Captain::PromptRenderer.render(template_name, enhanced_context.with_indifferent_access)
end
private
def agent_name
raise NotImplementedError, "#{self.class} must implement agent_name"
end
def template_name
self.class.name.demodulize.underscore
end
def agent_tools
[] # Default implementation, override if needed
end
def agent_model
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_MODEL')&.value.presence || OpenAiConstants::DEFAULT_MODEL
end
def agent_response_schema
Captain::ResponseSchema
end
def prompt_context
raise NotImplementedError, "#{self.class} must implement prompt_context"
end
end

View File

@@ -0,0 +1,161 @@
require 'agents'
class Captain::Assistant::AgentRunnerService
CONVERSATION_STATE_ATTRIBUTES = %i[
id display_id inbox_id contact_id status priority
label_list custom_attributes additional_attributes
].freeze
CONTACT_STATE_ATTRIBUTES = %i[
id name email phone_number identifier contact_type
custom_attributes additional_attributes
].freeze
def initialize(assistant:, conversation: nil, callbacks: {})
@assistant = assistant
@conversation = conversation
@callbacks = callbacks
end
def generate_response(message_history: [])
agents = build_and_wire_agents
context = build_context(message_history)
message_to_process = extract_last_user_message(message_history)
runner = Agents::Runner.with_agents(*agents)
runner = add_callbacks_to_runner(runner) if @callbacks.any?
result = runner.run(message_to_process, context: context)
process_agent_result(result)
rescue StandardError => e
# when running the agent runner service in a rake task, the conversation might not have an account associated
# for regular production usage, it will run just fine
ChatwootExceptionTracker.new(e, account: @conversation&.account).capture_exception
Rails.logger.error "[Captain V2] AgentRunnerService error: #{e.message}"
Rails.logger.error e.backtrace.join("\n")
error_response(e.message)
end
private
def build_context(message_history)
conversation_history = message_history.map do |msg|
content = extract_text_from_content(msg[:content])
{
role: msg[:role].to_sym,
content: content,
agent_name: msg[:agent_name]
}
end
{
conversation_history: conversation_history,
state: build_state
}
end
def extract_last_user_message(message_history)
last_user_msg = message_history.reverse.find { |msg| msg[:role] == 'user' }
extract_text_from_content(last_user_msg[:content])
end
def extract_text_from_content(content)
# Handle structured output from agents
return content[:response] || content['response'] || content.to_s if content.is_a?(Hash)
return content unless content.is_a?(Array)
text_parts = content.select { |part| part[:type] == 'text' }.pluck(:text)
text_parts.join(' ')
end
# Response formatting methods
def process_agent_result(result)
Rails.logger.info "[Captain V2] Agent result: #{result.inspect}"
format_response(result.output)
end
def format_response(output)
return output.with_indifferent_access if output.is_a?(Hash)
# Fallback for backwards compatibility
{
'response' => output.to_s,
'reasoning' => 'Processed by agent'
}
end
def error_response(error_message)
{
'response' => 'conversation_handoff',
'reasoning' => "Error occurred: #{error_message}"
}
end
def build_state
state = {
account_id: @assistant.account_id,
assistant_id: @assistant.id,
assistant_config: @assistant.config
}
if @conversation
state[:conversation] = @conversation.attributes.symbolize_keys.slice(*CONVERSATION_STATE_ATTRIBUTES)
state[:contact] = @conversation.contact.attributes.symbolize_keys.slice(*CONTACT_STATE_ATTRIBUTES) if @conversation.contact
end
state
end
def build_and_wire_agents
assistant_agent = @assistant.agent
scenario_agents = @assistant.scenarios.enabled.map(&:agent)
assistant_agent.register_handoffs(*scenario_agents) if scenario_agents.any?
scenario_agents.each { |scenario_agent| scenario_agent.register_handoffs(assistant_agent) }
[assistant_agent] + scenario_agents
end
def add_callbacks_to_runner(runner)
runner = add_agent_thinking_callback(runner) if @callbacks[:on_agent_thinking]
runner = add_tool_start_callback(runner) if @callbacks[:on_tool_start]
runner = add_tool_complete_callback(runner) if @callbacks[:on_tool_complete]
runner = add_agent_handoff_callback(runner) if @callbacks[:on_agent_handoff]
runner
end
def add_agent_thinking_callback(runner)
runner.on_agent_thinking do |*args|
@callbacks[:on_agent_thinking].call(*args)
rescue StandardError => e
Rails.logger.warn "[Captain] Callback error for agent_thinking: #{e.message}"
end
end
def add_tool_start_callback(runner)
runner.on_tool_start do |*args|
@callbacks[:on_tool_start].call(*args)
rescue StandardError => e
Rails.logger.warn "[Captain] Callback error for tool_start: #{e.message}"
end
end
def add_tool_complete_callback(runner)
runner.on_tool_complete do |*args|
@callbacks[:on_tool_complete].call(*args)
rescue StandardError => e
Rails.logger.warn "[Captain] Callback error for tool_complete: #{e.message}"
end
end
def add_agent_handoff_callback(runner)
runner.on_agent_handoff do |*args|
@callbacks[:on_agent_handoff].call(*args)
rescue StandardError => e
Rails.logger.warn "[Captain] Callback error for agent_handoff: #{e.message}"
end
end
end

View File

@@ -0,0 +1,25 @@
require 'liquid'
class Captain::PromptRenderer
class << self
def render(template_name, context = {})
template = load_template(template_name)
liquid_template = Liquid::Template.parse(template)
liquid_template.render(stringify_keys(context))
end
private
def load_template(template_name)
template_path = Rails.root.join('enterprise', 'lib', 'captain', 'prompts', "#{template_name}.liquid")
raise "Template not found: #{template_name}" unless File.exist?(template_path)
File.read(template_path)
end
def stringify_keys(hash)
hash.deep_stringify_keys
end
end
end

View File

@@ -0,0 +1,80 @@
# System Context
You are part of Captain, a multi-agent AI system designed for seamless agent coordination and task execution. You can transfer conversations to specialized agents using handoff functions (e.g., `handoff_to_[agent_name]`). These transfers happen in the background - never mention or draw attention to them in your responses.
# Your Identity
You are {{name}}, a helpful and knowledgeable assistant. Your role is to provide accurate information, assist with tasks, and ensure users get the help they need.
{{ description }}
Don't digress away from your instructions, and use all the available tools at your disposal for solving customer issues. If you are to state something factual about {{product_name}} ensure you source that information from the FAQs only. Use the faq_lookup tool for this.
# Current Context
Here's the metadata we have about the current conversation and the contact associated with it:
{% if conversation -%}
{% render 'conversation' %}
{% endif -%}
{% if contact -%}
{% render 'contact' %}
{% endif -%}
{% if response_guidelines.size > 0 -%}
# Response Guidelines
Your responses should follow these guidelines:
{% for guideline in response_guidelines -%}
- {{ guideline }}
{% endfor %}
{% endif -%}
{% if guardrails.size > 0 -%}
# Guardrails
Always respect these boundaries:
{% for guardrail in guardrails -%}
- {{ guardrail }}
{% endfor %}
{% endif -%}
# Decision Framework
## 1. Analyze the Request
First, understand what the user is asking:
- **Intent**: What are they trying to achieve?
- **Type**: Is it a question, task, complaint, or request?
- **Complexity**: Can you handle it or does it need specialized expertise?
## 2. Check for Specialized Scenarios First
Before using any tools, check if the request matches any of these scenarios. If unclear, ask clarifying questions to determine if a scenario applies:
{% for scenario in scenarios -%}
### handoff_to_{{ scenario.key }}
{{ scenario.description }}
{% endfor -%}
## 3. Handle the Request
If no specialized scenario clearly matches, handle it yourself:
### For Questions and Information Requests
1. **First, check existing knowledge**: Use `faq_lookup` tool to search for relevant information
2. **If not found in FAQs**: Provide your best answer based on available context
3. **If unable to answer**: Use `handoff` tool to transfer to a human expert
### For Complex or Unclear Requests
1. **Ask clarifying questions**: Gather more information if needed
2. **Break down complex tasks**: Handle step by step or hand off if too complex
3. **Escalate when necessary**: Use `handoff` tool for issues beyond your capabilities
## Response Best Practices
- Be conversational but professional
- Provide actionable information
- Include relevant details from tool responses
# Human Handoff Protocol
Transfer to a human agent when:
- User explicitly requests human assistance
- You cannot find needed information after checking FAQs
- The issue requires specialized knowledge or permissions you don't have
- Multiple attempts to help have been unsuccessful
When using the `handoff` tool, provide a clear reason that helps the human agent understand the context.

View File

@@ -0,0 +1,24 @@
# System context
You are part of a multi-agent system where you've been handed off a conversation to handle a specific task.
The handoff was seamless - the user is not aware of any transfer. Continue the conversation naturally.
# Your Role
You are a specialized agent called {{ title }}, your task is to handle the following scenario:
{{ instructions }}
{% if conversation -%}
{% render 'conversation' %}
{% if contact -%}
{% render 'contact' %}
{% endif -%}
{% endif -%}
{% if tools.size > 0 -%}
# Available Tools
You have access to these tools:
{% for tool in tools -%}
- {{ tool.id }}: {{ tool.description }}
{% endfor %}
{%- endif %}

View File

@@ -0,0 +1,17 @@
# Contact Information
- Contact ID: {{ contact.id }}
- Name: {{ contact.name || "Unknown" }}
- Email: {{ contact.email || "None" }}
- Phone: {{ contact.phone_number || "None" }}
- Identifier: {{ contact.identifier || "None" }}
- Type: {{ contact.contact_type || "visitor" }}
{% if contact.custom_attributes -%}
{% for attribute in contact.custom_attributes -%}
- {{ attribute[0] }}: {{ attribute[1] }}
{% endfor -%}
{% endif -%}
{% if contact.additional_attributes -%}
{% for attribute in contact.additional_attributes -%}
- {{ attribute[0] }}: {{ attribute[1] }}
{% endfor -%}
{% endif -%}

View File

@@ -0,0 +1,18 @@
# Current Conversation Context
- Conversation ID: {{ conversation.display_id }}
- Contact ID: {{ conversation.contact_id }}
- Status: {{ conversation.status }}
- Priority: {{ conversation.priority || "None" }}
{% if conversation.label_list.size > 0 -%}
- Labels: {{ conversation.label_list | join: ", " }}
{% endif -%}
{% if conversation.custom_attributes -%}
{% for attribute in conversation.custom_attributes -%}
- {{ attribute[0] }}: {{ attribute[1] }}
{% endfor -%}
{% endif -%}
{% if conversation.additional_attributes -%}
{% for attribute in conversation.additional_attributes -%}
- {{ attribute[0] }}: {{ attribute[1] }}
{% endfor -%}
{% endif -%}

View File

@@ -0,0 +1,6 @@
# TODO: Wrap the schema lib under ai-agents
# So we can extend it as Agents::Schema
class Captain::ResponseSchema < RubyLLM::Schema
string :response, description: 'The message to send to the user'
string :reasoning, description: "Agent's thought process"
end

6
lib/open_ai_constants.rb Normal file
View File

@@ -0,0 +1,6 @@
# frozen_string_literal: true
module OpenAiConstants
DEFAULT_MODEL = 'gpt-4.1-mini'
DEFAULT_ENDPOINT = 'https://api.openai.com'
end

235
lib/tasks/captain_chat.rake Normal file
View File

@@ -0,0 +1,235 @@
require 'io/console'
require 'readline'
namespace :captain do
desc 'Start interactive chat with Captain assistant - Usage: rake captain:chat[assistant_id] or rake captain:chat -- assistant_id'
task :chat, [:assistant_id] => :environment do |_, args|
assistant_id = args[:assistant_id] || ARGV[1]
unless assistant_id
puts '❌ Please provide an assistant ID'
puts 'Usage: rake captain:chat[assistant_id]'
puts "\nAvailable assistants:"
Captain::Assistant.includes(:account).each do |assistant|
puts " ID: #{assistant.id} - #{assistant.name} (Account: #{assistant.account.name})"
end
exit 1
end
assistant = Captain::Assistant.find_by(id: assistant_id)
unless assistant
puts "❌ Assistant with ID #{assistant_id} not found"
exit 1
end
# Clear ARGV to prevent gets from reading files
ARGV.clear
chat_session = CaptainChatSession.new(assistant)
chat_session.start
end
end
class CaptainChatSession
def initialize(assistant)
@assistant = assistant
@message_history = []
end
def start
show_assistant_info
show_instructions
chat_loop
show_exit_message
end
private
def show_instructions
puts "💡 Type 'exit', 'quit', or 'bye' to end the session"
puts "💡 Type 'clear' to clear message history"
puts('-' * 50)
end
def chat_loop
loop do
puts '' # Add spacing before prompt
user_input = Readline.readline('👤 You: ', true)
next unless user_input # Handle Ctrl+D
break unless handle_user_input(user_input.strip)
end
end
def handle_user_input(user_input)
case user_input.downcase
when 'exit', 'quit', 'bye'
false
when 'clear'
clear_history
true
when ''
true
else
process_user_message(user_input)
true
end
end
def show_exit_message
puts "\nChat session ended"
puts "Final conversation log has #{@message_history.length} messages"
end
def show_assistant_info
show_basic_info
show_scenarios
show_available_tools
puts ''
end
def show_basic_info
puts "🤖 Starting chat with #{@assistant.name}"
puts "🏢 Account: #{@assistant.account.name}"
puts "🆔 Assistant ID: #{@assistant.id}"
end
def show_scenarios
scenarios = @assistant.scenarios.enabled
if scenarios.any?
puts "⚡ Enabled Scenarios (#{scenarios.count}):"
scenarios.each { |scenario| display_scenario(scenario) }
else
puts '⚡ No scenarios enabled'
end
end
def display_scenario(scenario)
tools_count = scenario.tools&.length || 0
puts "#{scenario.title} (#{tools_count} tools)"
return if scenario.description.blank?
description = truncate_description(scenario.description)
puts " #{description}"
end
def truncate_description(description)
description.length > 60 ? "#{description[0..60]}..." : description
end
def show_available_tools
available_tools = Captain::Assistant.available_tool_ids
if available_tools.any?
puts "🔧 Available Tools (#{available_tools.count}): #{available_tools.join(', ')}"
else
puts '🔧 No tools available'
end
end
def process_user_message(user_input)
add_to_history('user', user_input)
begin
print "🤖 #{@assistant.name}: "
@current_system_messages = []
result = generate_assistant_response
display_response(result)
rescue StandardError => e
handle_error(e)
end
end
def generate_assistant_response
runner = Captain::Assistant::AgentRunnerService.new(assistant: @assistant, callbacks: build_callbacks)
runner.generate_response(message_history: @message_history)
end
def build_callbacks
{
on_agent_thinking: method(:handle_agent_thinking),
on_tool_start: method(:handle_tool_start),
on_tool_complete: method(:handle_tool_complete),
on_agent_handoff: method(:handle_agent_handoff)
}
end
def handle_agent_thinking(agent, _input)
agent_name = extract_name(agent)
@current_system_messages << "#{agent_name} is thinking..."
add_to_history('system', "#{agent_name} is thinking...")
end
def handle_tool_start(tool, _args)
tool_name = extract_tool_name(tool)
@current_system_messages << "Using tool: #{tool_name}"
add_to_history('system', "Using tool: #{tool_name}")
end
def handle_tool_complete(tool, _result)
tool_name = extract_tool_name(tool)
@current_system_messages << "Tool #{tool_name} completed"
add_to_history('system', "Tool #{tool_name} completed")
end
def handle_agent_handoff(from, to, reason)
@current_system_messages << "Handoff: #{extract_name(from)}#{extract_name(to)} (#{reason})"
add_to_history('system', "Agent handoff: #{extract_name(from)}#{extract_name(to)} (#{reason})")
end
def display_response(result)
response_text = result['response'] || 'No response generated'
reasoning = result['reasoning']
puts dim_text("\n#{@current_system_messages.join("\n")}") if @current_system_messages.any?
puts response_text
puts dim_italic_text("(Reasoning: #{reasoning})") if reasoning && reasoning != 'Processed by agent'
add_to_history('assistant', response_text, reasoning: reasoning)
end
def handle_error(error)
error_msg = "Error: #{error.message}"
puts "#{error_msg}"
add_to_history('system', error_msg)
end
def add_to_history(role, content, agent_name: nil, reasoning: nil)
message = {
role: role,
content: content,
timestamp: Time.current,
agent_name: agent_name || (role == 'assistant' ? @assistant.name : nil)
}
message[:reasoning] = reasoning if reasoning
@message_history << message
end
def clear_history
@message_history.clear
puts 'Message history cleared'
end
def dim_text(text)
# ANSI escape code for very dim gray text (bright black/dark gray)
"\e[90m#{text}\e[0m"
end
def dim_italic_text(text)
# ANSI escape codes for dim gray + italic text
"\e[90m\e[3m#{text}\e[0m"
end
def extract_tool_name(tool)
return tool if tool.is_a?(String)
tool.class.name.split('::').last.gsub('Tool', '')
rescue StandardError
tool.to_s
end
def extract_name(obj)
obj.respond_to?(:name) ? obj.name : obj.to_s
end
end

View File

@@ -9,6 +9,7 @@ RSpec.describe Captain::Conversation::ResponseBuilderJob, type: :job do
describe '#perform' do
let(:conversation) { create(:conversation, inbox: inbox, account: account) }
let(:mock_llm_chat_service) { instance_double(Captain::Llm::AssistantChatService) }
let(:mock_agent_runner_service) { instance_double(Captain::Assistant::AgentRunnerService) }
before do
create(:message, conversation: conversation, content: 'Hello', message_type: :incoming)
@@ -16,19 +17,79 @@ RSpec.describe Captain::Conversation::ResponseBuilderJob, type: :job do
allow(inbox).to receive(:captain_active?).and_return(true)
allow(Captain::Llm::AssistantChatService).to receive(:new).and_return(mock_llm_chat_service)
allow(mock_llm_chat_service).to receive(:generate_response).and_return({ 'response' => 'Hey, welcome to Captain Specs' })
allow(Captain::Assistant::AgentRunnerService).to receive(:new).and_return(mock_agent_runner_service)
allow(mock_agent_runner_service).to receive(:generate_response).and_return({ 'response' => 'Hey, welcome to Captain V2' })
end
it 'generates and processes response' do
described_class.perform_now(conversation, assistant)
expect(conversation.messages.count).to eq(2)
expect(conversation.messages.outgoing.count).to eq(1)
expect(conversation.messages.last.content).to eq('Hey, welcome to Captain Specs')
context 'when captain_v2 is disabled' do
before do
allow(account).to receive(:feature_enabled?).and_return(false)
allow(account).to receive(:feature_enabled?).with('captain_integration_v2').and_return(false)
end
it 'uses Captain::Llm::AssistantChatService' do
expect(Captain::Llm::AssistantChatService).to receive(:new).with(assistant: assistant)
expect(Captain::Assistant::AgentRunnerService).not_to receive(:new)
described_class.perform_now(conversation, assistant)
expect(conversation.messages.last.content).to eq('Hey, welcome to Captain Specs')
end
it 'generates and processes response' do
described_class.perform_now(conversation, assistant)
expect(conversation.messages.count).to eq(2)
expect(conversation.messages.outgoing.count).to eq(1)
expect(conversation.messages.last.content).to eq('Hey, welcome to Captain Specs')
end
it 'increments usage response' do
described_class.perform_now(conversation, assistant)
account.reload
expect(account.usage_limits[:captain][:responses][:consumed]).to eq(1)
end
end
it 'increments usage response' do
described_class.perform_now(conversation, assistant)
account.reload
expect(account.usage_limits[:captain][:responses][:consumed]).to eq(1)
context 'when captain_v2 is enabled' do
before do
allow(account).to receive(:feature_enabled?).and_return(false)
allow(account).to receive(:feature_enabled?).with('captain_integration_v2').and_return(true)
end
it 'uses Captain::Assistant::AgentRunnerService' do
expect(Captain::Assistant::AgentRunnerService).to receive(:new).with(
assistant: assistant,
conversation: conversation
)
expect(Captain::Llm::AssistantChatService).not_to receive(:new)
described_class.perform_now(conversation, assistant)
expect(conversation.messages.last.content).to eq('Hey, welcome to Captain V2')
end
it 'passes message history to agent runner service' do
expected_messages = [
{ content: 'Hello', role: 'user' }
]
expect(mock_agent_runner_service).to receive(:generate_response).with(
message_history: expected_messages
)
described_class.perform_now(conversation, assistant)
end
it 'generates and processes response' do
described_class.perform_now(conversation, assistant)
expect(conversation.messages.count).to eq(2)
expect(conversation.messages.outgoing.count).to eq(1)
expect(conversation.messages.last.content).to eq('Hey, welcome to Captain V2')
end
it 'increments usage response' do
described_class.perform_now(conversation, assistant)
account.reload
expect(account.usage_limits[:captain][:responses][:consumed]).to eq(1)
end
end
context 'when message contains an image' do

View File

@@ -0,0 +1,123 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Captain::PromptRenderer do
let(:template_name) { 'test_template' }
let(:template_content) { 'Hello {{name}}, your balance is {{balance}}' }
let(:template_path) { Rails.root.join('enterprise', 'lib', 'captain', 'prompts', "#{template_name}.liquid") }
let(:context) { { name: 'John', balance: 100 } }
before do
allow(File).to receive(:exist?).and_return(false)
allow(File).to receive(:exist?).with(template_path).and_return(true)
allow(File).to receive(:read).with(template_path).and_return(template_content)
end
describe '.render' do
it 'renders template with context' do
result = described_class.render(template_name, context)
expect(result).to eq('Hello John, your balance is 100')
end
it 'handles string keys in context' do
string_context = { 'name' => 'Jane', 'balance' => 200 }
result = described_class.render(template_name, string_context)
expect(result).to eq('Hello Jane, your balance is 200')
end
it 'handles mixed symbol and string keys' do
mixed_context = { :name => 'Bob', 'balance' => 300 }
result = described_class.render(template_name, mixed_context)
expect(result).to eq('Hello Bob, your balance is 300')
end
it 'handles nested hash context' do
nested_template = 'User: {{user.name}}, Account: {{user.account.type}}'
nested_context = { user: { name: 'Alice', account: { type: 'premium' } } }
allow(File).to receive(:read).with(template_path).and_return(nested_template)
result = described_class.render(template_name, nested_context)
expect(result).to eq('User: Alice, Account: premium')
end
it 'handles empty context' do
simple_template = 'Hello World'
allow(File).to receive(:read).with(template_path).and_return(simple_template)
result = described_class.render(template_name, {})
expect(result).to eq('Hello World')
end
it 'loads and parses liquid template' do
liquid_template_double = instance_double(Liquid::Template)
allow(Liquid::Template).to receive(:parse).with(template_content).and_return(liquid_template_double)
allow(liquid_template_double).to receive(:render).with(hash_including('name', 'balance')).and_return('rendered')
result = described_class.render(template_name, context)
expect(result).to eq('rendered')
expect(Liquid::Template).to have_received(:parse).with(template_content)
end
end
describe '.load_template' do
it 'reads template file from correct path' do
described_class.send(:load_template, template_name)
expect(File).to have_received(:read).with(template_path)
end
it 'raises error when template does not exist' do
allow(File).to receive(:exist?).with(template_path).and_return(false)
expect { described_class.send(:load_template, template_name) }
.to raise_error("Template not found: #{template_name}")
end
it 'constructs correct template path' do
expected_path = Rails.root.join('enterprise/lib/captain/prompts/my_template.liquid')
allow(File).to receive(:exist?).with(expected_path).and_return(true)
allow(File).to receive(:read).with(expected_path).and_return('test content')
described_class.send(:load_template, 'my_template')
expect(File).to have_received(:exist?).with(expected_path)
end
end
describe '.stringify_keys' do
it 'converts symbol keys to strings' do
hash = { name: 'John', age: 30 }
result = described_class.send(:stringify_keys, hash)
expect(result).to eq({ 'name' => 'John', 'age' => 30 })
end
it 'handles nested hashes' do
hash = { user: { name: 'John', profile: { age: 30 } } }
result = described_class.send(:stringify_keys, hash)
expect(result).to eq({ 'user' => { 'name' => 'John', 'profile' => { 'age' => 30 } } })
end
it 'handles arrays with hashes' do
hash = { users: [{ name: 'John' }, { name: 'Jane' }] }
result = described_class.send(:stringify_keys, hash)
expect(result).to eq({ 'users' => [{ 'name' => 'John' }, { 'name' => 'Jane' }] })
end
it 'handles empty hash' do
result = described_class.send(:stringify_keys, {})
expect(result).to eq({})
end
end
end

View File

@@ -0,0 +1,186 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Concerns::Agentable do
let(:dummy_class) do
Class.new do
include Concerns::Agentable
attr_accessor :temperature
def initialize(name: 'Test Agent', temperature: 0.8)
@name = name
@temperature = temperature
end
def self.name
'DummyClass'
end
private
def agent_name
@name
end
def prompt_context
{ base_key: 'base_value' }
end
end
end
let(:dummy_instance) { dummy_class.new }
let(:mock_agents_agent) { instance_double(Agents::Agent) }
let(:mock_installation_config) { instance_double(InstallationConfig, value: 'gpt-4-turbo') }
before do
allow(Agents::Agent).to receive(:new).and_return(mock_agents_agent)
allow(InstallationConfig).to receive(:find_by).with(name: 'CAPTAIN_OPEN_AI_MODEL').and_return(mock_installation_config)
allow(Captain::PromptRenderer).to receive(:render).and_return('rendered_template')
end
describe '#agent' do
it 'creates an Agents::Agent with correct parameters' do
expect(Agents::Agent).to receive(:new).with(
name: 'Test Agent',
instructions: instance_of(Proc),
tools: [],
model: 'gpt-4-turbo',
temperature: 0.8,
response_schema: Captain::ResponseSchema
)
dummy_instance.agent
end
it 'converts nil temperature to 0.0' do
dummy_instance.temperature = nil
expect(Agents::Agent).to receive(:new).with(
hash_including(temperature: 0.0)
)
dummy_instance.agent
end
it 'converts temperature to float' do
dummy_instance.temperature = '0.5'
expect(Agents::Agent).to receive(:new).with(
hash_including(temperature: 0.5)
)
dummy_instance.agent
end
end
describe '#agent_instructions' do
it 'calls Captain::PromptRenderer with base context' do
expect(Captain::PromptRenderer).to receive(:render).with(
'dummy_class',
hash_including(base_key: 'base_value')
)
dummy_instance.agent_instructions
end
it 'merges context state when provided' do
context_double = instance_double(Agents::RunContext,
context: {
state: {
conversation: { id: 123 },
contact: { name: 'John' }
}
})
expected_context = {
base_key: 'base_value',
conversation: { id: 123 },
contact: { name: 'John' }
}
expect(Captain::PromptRenderer).to receive(:render).with(
'dummy_class',
hash_including(expected_context)
)
dummy_instance.agent_instructions(context_double)
end
it 'handles context without state' do
context_double = instance_double(Agents::RunContext, context: {})
expect(Captain::PromptRenderer).to receive(:render).with(
'dummy_class',
hash_including(
base_key: 'base_value',
conversation: {},
contact: {}
)
)
dummy_instance.agent_instructions(context_double)
end
end
describe '#template_name' do
it 'returns underscored class name' do
expect(dummy_instance.send(:template_name)).to eq('dummy_class')
end
end
describe '#agent_tools' do
it 'returns empty array by default' do
expect(dummy_instance.send(:agent_tools)).to eq([])
end
end
describe '#agent_model' do
it 'returns value from InstallationConfig when present' do
expect(dummy_instance.send(:agent_model)).to eq('gpt-4-turbo')
end
it 'returns default model when config not found' do
allow(InstallationConfig).to receive(:find_by).and_return(nil)
expect(dummy_instance.send(:agent_model)).to eq('gpt-4.1-mini')
end
it 'returns default model when config value is nil' do
allow(mock_installation_config).to receive(:value).and_return(nil)
expect(dummy_instance.send(:agent_model)).to eq('gpt-4.1-mini')
end
end
describe '#agent_response_schema' do
it 'returns Captain::ResponseSchema' do
expect(dummy_instance.send(:agent_response_schema)).to eq(Captain::ResponseSchema)
end
end
describe 'required methods' do
let(:incomplete_class) do
Class.new do
include Concerns::Agentable
end
end
let(:incomplete_instance) { incomplete_class.new }
describe '#agent_name' do
it 'raises NotImplementedError when not implemented' do
expect { incomplete_instance.send(:agent_name) }
.to raise_error(NotImplementedError, /must implement agent_name/)
end
end
describe '#prompt_context' do
it 'raises NotImplementedError when not implemented' do
expect { incomplete_instance.send(:prompt_context) }
.to raise_error(NotImplementedError, /must implement prompt_context/)
end
end
end
end

View File

@@ -0,0 +1,320 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Captain::Assistant::AgentRunnerService do
let(:account) { create(:account) }
let(:inbox) { create(:inbox, account: account) }
let(:contact) { create(:contact, account: account) }
let(:conversation) { create(:conversation, account: account, inbox: inbox, contact: contact) }
let(:assistant) { create(:captain_assistant, account: account) }
let(:scenario) { create(:captain_scenario, assistant: assistant, enabled: true) }
let(:mock_runner) { instance_double(Agents::Runner) }
let(:mock_agent) { instance_double(Agents::Agent) }
let(:mock_scenario_agent) { instance_double(Agents::Agent) }
let(:mock_result) { instance_double(Agents::RunResult, output: { 'response' => 'Test response' }) }
let(:message_history) do
[
{ role: 'user', content: 'Hello there' },
{ role: 'assistant', content: 'Hi! How can I help you?', agent_name: 'Assistant' },
{ role: 'user', content: 'I need help with my account' }
]
end
before do
allow(assistant).to receive(:agent).and_return(mock_agent)
scenarios_relation = instance_double(Captain::Scenario)
allow(scenarios_relation).to receive(:enabled).and_return([scenario])
allow(assistant).to receive(:scenarios).and_return(scenarios_relation)
allow(scenario).to receive(:agent).and_return(mock_scenario_agent)
allow(Agents::Runner).to receive(:with_agents).and_return(mock_runner)
allow(mock_runner).to receive(:run).and_return(mock_result)
allow(mock_agent).to receive(:register_handoffs)
allow(mock_scenario_agent).to receive(:register_handoffs)
end
describe '#initialize' do
it 'sets instance variables correctly' do
service = described_class.new(assistant: assistant, conversation: conversation)
expect(service.instance_variable_get(:@assistant)).to eq(assistant)
expect(service.instance_variable_get(:@conversation)).to eq(conversation)
expect(service.instance_variable_get(:@callbacks)).to eq({})
end
it 'accepts callbacks parameter' do
callbacks = { on_agent_thinking: proc { |x| x } }
service = described_class.new(assistant: assistant, callbacks: callbacks)
expect(service.instance_variable_get(:@callbacks)).to eq(callbacks)
end
end
describe '#generate_response' do
subject(:service) { described_class.new(assistant: assistant, conversation: conversation) }
it 'builds agents and wires them together' do
expect(assistant).to receive(:agent).and_return(mock_agent)
scenarios_relation = instance_double(Captain::Scenario)
allow(scenarios_relation).to receive(:enabled).and_return([scenario])
expect(assistant).to receive(:scenarios).and_return(scenarios_relation)
expect(scenario).to receive(:agent).and_return(mock_scenario_agent)
expect(mock_agent).to receive(:register_handoffs).with(mock_scenario_agent)
expect(mock_scenario_agent).to receive(:register_handoffs).with(mock_agent)
service.generate_response(message_history: message_history)
end
it 'creates runner with agents' do
expect(Agents::Runner).to receive(:with_agents).with(mock_agent, mock_scenario_agent)
service.generate_response(message_history: message_history)
end
it 'runs agent with extracted user message and context' do
expected_context = {
conversation_history: [
{ role: :user, content: 'Hello there', agent_name: nil },
{ role: :assistant, content: 'Hi! How can I help you?', agent_name: 'Assistant' },
{ role: :user, content: 'I need help with my account', agent_name: nil }
],
state: hash_including(
account_id: account.id,
assistant_id: assistant.id,
conversation: hash_including(id: conversation.id),
contact: hash_including(id: contact.id)
)
}
expect(mock_runner).to receive(:run).with(
'I need help with my account',
context: expected_context
)
service.generate_response(message_history: message_history)
end
it 'processes and formats agent result' do
result = service.generate_response(message_history: message_history)
expect(result).to eq({ 'response' => 'Test response' })
end
context 'when no scenarios are enabled' do
before do
scenarios_relation = instance_double(Captain::Scenario)
allow(scenarios_relation).to receive(:enabled).and_return([])
allow(assistant).to receive(:scenarios).and_return(scenarios_relation)
end
it 'only uses assistant agent' do
expect(Agents::Runner).to receive(:with_agents).with(mock_agent)
expect(mock_agent).not_to receive(:register_handoffs)
service.generate_response(message_history: message_history)
end
end
context 'when agent result is a string' do
let(:mock_result) { instance_double(Agents::RunResult, output: 'Simple string response') }
it 'formats string response correctly' do
result = service.generate_response(message_history: message_history)
expect(result).to eq({
'response' => 'Simple string response',
'reasoning' => 'Processed by agent'
})
end
end
context 'when an error occurs' do
let(:error) { StandardError.new('Test error') }
before do
allow(mock_runner).to receive(:run).and_raise(error)
allow(ChatwootExceptionTracker).to receive(:new).and_return(
instance_double(ChatwootExceptionTracker, capture_exception: true)
)
end
it 'captures exception and returns error response' do
expect(ChatwootExceptionTracker).to receive(:new).with(error, account: conversation.account)
result = service.generate_response(message_history: message_history)
expect(result).to eq({
'response' => 'conversation_handoff',
'reasoning' => 'Error occurred: Test error'
})
end
it 'logs error details' do
expect(Rails.logger).to receive(:error).with('[Captain V2] AgentRunnerService error: Test error')
expect(Rails.logger).to receive(:error).with(kind_of(String))
service.generate_response(message_history: message_history)
end
context 'when conversation is nil' do
subject(:service) { described_class.new(assistant: assistant, conversation: nil) }
it 'handles missing conversation gracefully' do
expect(ChatwootExceptionTracker).to receive(:new).with(error, account: nil)
result = service.generate_response(message_history: message_history)
expect(result).to eq({
'response' => 'conversation_handoff',
'reasoning' => 'Error occurred: Test error'
})
end
end
end
end
describe '#build_context' do
subject(:service) { described_class.new(assistant: assistant, conversation: conversation) }
it 'builds context with conversation history and state' do
context = service.send(:build_context, message_history)
expect(context).to include(
conversation_history: array_including(
{ role: :user, content: 'Hello there', agent_name: nil },
{ role: :assistant, content: 'Hi! How can I help you?', agent_name: 'Assistant' }
),
state: hash_including(
account_id: account.id,
assistant_id: assistant.id
)
)
end
context 'with multimodal content' do
let(:multimodal_message_history) do
[
{
role: 'user',
content: [
{ type: 'text', text: 'Can you help with this image?' },
{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } }
]
}
]
end
it 'extracts text content from multimodal messages' do
context = service.send(:build_context, multimodal_message_history)
expect(context[:conversation_history].first[:content]).to eq('Can you help with this image?')
end
end
end
describe '#extract_last_user_message' do
subject(:service) { described_class.new(assistant: assistant, conversation: conversation) }
it 'extracts the last user message' do
result = service.send(:extract_last_user_message, message_history)
expect(result).to eq('I need help with my account')
end
end
describe '#extract_text_from_content' do
subject(:service) { described_class.new(assistant: assistant, conversation: conversation) }
it 'extracts text from string content' do
result = service.send(:extract_text_from_content, 'Simple text')
expect(result).to eq('Simple text')
end
it 'extracts response from hash content' do
content = { 'response' => 'Hash response' }
result = service.send(:extract_text_from_content, content)
expect(result).to eq('Hash response')
end
it 'extracts text from multimodal array content' do
content = [
{ type: 'text', text: 'First part' },
{ type: 'image_url', image_url: { url: 'image.jpg' } },
{ type: 'text', text: 'Second part' }
]
result = service.send(:extract_text_from_content, content)
expect(result).to eq('First part Second part')
end
end
describe '#build_state' do
subject(:service) { described_class.new(assistant: assistant, conversation: conversation) }
it 'builds state with assistant and account information' do
state = service.send(:build_state)
expect(state).to include(
account_id: account.id,
assistant_id: assistant.id,
assistant_config: assistant.config
)
end
it 'includes conversation attributes when conversation is present' do
state = service.send(:build_state)
expect(state[:conversation]).to include(
id: conversation.id,
inbox_id: inbox.id,
contact_id: contact.id,
status: conversation.status
)
end
it 'includes contact attributes when contact is present' do
state = service.send(:build_state)
expect(state[:contact]).to include(
id: contact.id,
name: contact.name,
email: contact.email
)
end
context 'when conversation is nil' do
subject(:service) { described_class.new(assistant: assistant, conversation: nil) }
it 'builds state without conversation and contact' do
state = service.send(:build_state)
expect(state).to include(
account_id: account.id,
assistant_id: assistant.id,
assistant_config: assistant.config
)
expect(state).not_to have_key(:conversation)
expect(state).not_to have_key(:contact)
end
end
end
describe 'constants' do
it 'defines conversation state attributes' do
expect(described_class::CONVERSATION_STATE_ATTRIBUTES).to include(
:id, :display_id, :inbox_id, :contact_id, :status, :priority
)
end
it 'defines contact state attributes' do
expect(described_class::CONTACT_STATE_ATTRIBUTES).to include(
:id, :name, :email, :phone_number, :identifier, :contact_type
)
end
end
end