feat: Improve reply suggestions using help center articles (#9026)

This commit is contained in:
Sojan Jose
2024-03-04 09:55:19 +05:30
committed by GitHub
parent 881d4bf644
commit 81060a72a4
4 changed files with 26 additions and 12 deletions

View File

@@ -8,9 +8,9 @@ class ChatGpt
@messages = [system_message(context_sections)]
end
def generate_response(input, previous_messages = [])
def generate_response(input, previous_messages = [], role = 'user')
@messages += previous_messages
@messages << { 'role': 'user', 'content': input } if input.present?
@messages << { 'role': role, 'content': input } if input.present?
response = request_gpt
JSON.parse(response['choices'][0]['message']['content'].strip)

View File

@@ -3,6 +3,24 @@ module Enterprise::Integrations::OpenaiProcessorService
make_friendly make_formal simplify].freeze
CACHEABLE_EVENTS = %w[label_suggestion].freeze
def reply_suggestion_message
return super unless conversation.inbox.response_bot_enabled?
messages = conversation_messages(in_array_format: true)
last_message = messages.pop
robin_response = ChatGpt.new(
Enterprise::MessageTemplates::ResponseBotService.response_sections(last_message[:content], conversation.inbox)
).generate_response(
last_message[:content], messages, last_message[:role]
)
message_content = robin_response['response']
if robin_response['context_ids'].present?
message_content += Enterprise::MessageTemplates::ResponseBotService.generate_sources_section(robin_response['context_ids'])
end
message_content
end
def label_suggestion_message
payload = label_suggestion_body
return nil if payload.blank?
@@ -19,8 +37,6 @@ module Enterprise::Integrations::OpenaiProcessorService
private
def labels_with_messages
conversation = find_conversation
return nil unless valid_conversation?(conversation)
labels = hook.account.labels.pluck(:title).join(', ')

View File

@@ -62,7 +62,6 @@ class Integrations::Openai::ProcessorService < Integrations::OpenaiBaseService
end
def conversation_messages(in_array_format: false)
conversation = find_conversation
messages = init_messages_body(in_array_format)
add_messages_until_token_limit(conversation, messages, in_array_format)
@@ -70,7 +69,7 @@ class Integrations::Openai::ProcessorService < Integrations::OpenaiBaseService
def add_messages_until_token_limit(conversation, messages, in_array_format, start_from = 0)
character_count = start_from
conversation.messages.chat.reorder('id desc').each do |message|
conversation.messages.where(message_type: [:incoming, :outgoing]).where(private: false).reorder('id desc').each do |message|
character_count, message_added = add_message_if_within_limit(character_count, message, messages, in_array_format)
break unless message_added
end

View File

@@ -1,8 +1,8 @@
class Integrations::OpenaiBaseService
# 3.5 support 4,096 tokens
# 3.5 support 16,385 tokens
# 1 token is approx 4 characters
# 4,096 * 4 = 16,384 characters, sticking to 15,000 to be safe
TOKEN_LIMIT = 15_000
# 16385 * 4 = 65540 characters, sticking to 50,000 to be safe
TOKEN_LIMIT = 50_000
API_URL = 'https://api.openai.com/v1/chat/completions'.freeze
GPT_MODEL = 'gpt-3.5-turbo'.freeze
@@ -31,7 +31,6 @@ class Integrations::OpenaiBaseService
def cache_key
return nil unless event_is_cacheable?
conversation = find_conversation
return nil unless conversation
# since the value from cache depends on the conversation last_activity_at, it will always be fresh
@@ -52,8 +51,8 @@ class Integrations::OpenaiBaseService
Redis::Alfred.setex(cache_key, response)
end
def find_conversation
hook.account.conversations.find_by(display_id: event['data']['conversation_display_id'])
def conversation
@conversation ||= hook.account.conversations.find_by(display_id: event['data']['conversation_display_id'])
end
def valid_event_name?