feat: use captain endpoint config in legacy OpenAI base service (#12060)

This PR migrates the legacy OpenAI integration (where users provide
their own API keys) from using hardcoded `https://api.openai.com`
endpoints to use the configurable `CAPTAIN_OPEN_AI_ENDPOINT` from the
captain configuration. This ensures consistency across all OpenAI
integrations in the platform.

## Changes

- Updated `lib/integrations/openai_base_service.rb` to use captain
endpoint config
- Updated `enterprise/app/models/enterprise/concerns/article.rb` to use
captain endpoint config
- Removed unused `enterprise/lib/chat_gpt.rb` class
- Added tests for endpoint configuration behavior
This commit is contained in:
Shivam Mishra
2025-07-30 08:58:27 +04:00
committed by GitHub
parent 6475a6a593
commit 75c57ad039
4 changed files with 63 additions and 65 deletions

View File

@@ -68,8 +68,16 @@ module Enterprise::Concerns::Article
headers = { 'Content-Type' => 'application/json', 'Authorization' => "Bearer #{ENV.fetch('OPENAI_API_KEY', nil)}" }
body = { model: 'gpt-4o', messages: messages, response_format: { type: 'json_object' } }.to_json
Rails.logger.info "Requesting Chat GPT with body: #{body}"
response = HTTParty.post('https://api.openai.com/v1/chat/completions', headers: headers, body: body)
response = HTTParty.post(openai_api_url, headers: headers, body: body)
Rails.logger.info "Chat GPT response: #{response.body}"
JSON.parse(response.parsed_response['choices'][0]['message']['content'])['search_terms']
end
private
def openai_api_url
endpoint = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.value || 'https://api.openai.com/'
endpoint = endpoint.chomp('/')
"#{endpoint}/v1/chat/completions"
end
end

View File

@@ -1,62 +0,0 @@
class ChatGpt
def self.base_uri
'https://api.openai.com'
end
def initialize(context_sections = '')
@model = 'gpt-4o'
@messages = [system_message(context_sections)]
end
def generate_response(input, previous_messages = [], role = 'user')
@messages += previous_messages
@messages << { 'role': role, 'content': input } if input.present?
response = request_gpt
JSON.parse(response['choices'][0]['message']['content'].strip)
end
private
def system_message(context_sections)
{
'role': 'system',
'content': system_content(context_sections)
}
end
def system_content(context_sections)
<<~SYSTEM_PROMPT_MESSAGE
You are a very enthusiastic customer support representative who loves to help people.
Your answers will always be formatted in valid JSON hash, as shown below. Never respond in non JSON format.
```
{
response: '' ,
context_ids: [ids],
}
```
response: will be the next response to the conversation
context_ids: will be an array of unique context IDs that were used to generate the answer. choose top 3.
The answers will be generated using the information provided at the end of the prompt under the context sections. You will not respond outside the context of the information provided in context sections.
If the answer is not provided in context sections, Respond to the customer and ask whether they want to talk to another support agent . If they ask to Chat with another agent, return `conversation_handoff' as the response in JSON response
----------------------------------
Context sections:
#{context_sections}
SYSTEM_PROMPT_MESSAGE
end
def request_gpt
headers = { 'Content-Type' => 'application/json', 'Authorization' => "Bearer #{ENV.fetch('OPENAI_API_KEY')}" }
body = { model: @model, messages: @messages, response_format: { type: 'json_object' } }.to_json
Rails.logger.info "Requesting Chat GPT with body: #{body}"
response = HTTParty.post("#{self.class.base_uri}/v1/chat/completions", headers: headers, body: body)
Rails.logger.info "Chat GPT response: #{response.body}"
JSON.parse(response.body)
end
end

View File

@@ -4,7 +4,6 @@ class Integrations::OpenaiBaseService
# sticking with 120000 to be safe
# 120000 * 4 = 480,000 characters (rounding off downwards to 400,000 to be safe)
TOKEN_LIMIT = 400_000
API_URL = 'https://api.openai.com/v1/chat/completions'.freeze
GPT_MODEL = ENV.fetch('OPENAI_GPT_MODEL', 'gpt-4o-mini').freeze
ALLOWED_EVENT_NAMES = %w[rephrase summarize reply_suggestion fix_spelling_grammar shorten expand make_friendly make_formal simplify].freeze
@@ -81,6 +80,12 @@ class Integrations::OpenaiBaseService
self.class::CACHEABLE_EVENTS.include?(event_name)
end
def api_url
endpoint = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.value || 'https://api.openai.com/'
endpoint = endpoint.chomp('/')
"#{endpoint}/v1/chat/completions"
end
def make_api_call(body)
headers = {
'Content-Type' => 'application/json',
@@ -88,7 +93,7 @@ class Integrations::OpenaiBaseService
}
Rails.logger.info("OpenAI API request: #{body}")
response = HTTParty.post(API_URL, headers: headers, body: body)
response = HTTParty.post(api_url, headers: headers, body: body)
Rails.logger.info("OpenAI API response: #{response.body}")
return { error: response.parsed_response, error_code: response.code } unless response.success?

View File

@@ -253,5 +253,52 @@ RSpec.describe Integrations::Openai::ProcessorService do
expect(result).to eq({ :message => 'This is a reply from openai.' })
end
end
context 'when testing endpoint configuration' do
let(:event) { { 'name' => 'rephrase', 'data' => { 'content' => 'test message' } } }
context 'when CAPTAIN_OPEN_AI_ENDPOINT is not configured' do
it 'uses default OpenAI endpoint' do
InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.destroy
stub_request(:post, 'https://api.openai.com/v1/chat/completions')
.with(body: anything, headers: expected_headers)
.to_return(status: 200, body: openai_response, headers: {})
result = subject.perform
expect(result).to eq({ :message => 'This is a reply from openai.' })
end
end
context 'when CAPTAIN_OPEN_AI_ENDPOINT is configured' do
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
end
it 'uses custom endpoint' do
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
.with(body: anything, headers: expected_headers)
.to_return(status: 200, body: openai_response, headers: {})
result = subject.perform
expect(result).to eq({ :message => 'This is a reply from openai.' })
end
end
context 'when CAPTAIN_OPEN_AI_ENDPOINT has trailing slash' do
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_ENDPOINT', value: 'https://custom.azure.com/')
end
it 'properly handles trailing slash' do
stub_request(:post, 'https://custom.azure.com/v1/chat/completions')
.with(body: anything, headers: expected_headers)
.to_return(status: 200, body: openai_response, headers: {})
result = subject.perform
expect(result).to eq({ :message => 'This is a reply from openai.' })
end
end
end
end
end