mirror of
				https://github.com/lingble/chatwoot.git
				synced 2025-10-31 11:08:04 +00:00 
			
		
		
		
	feat: Add support for account abuse detection (#11001)
This PR adds service to automate account abuse detection. Currently based on the signup name and URL, could potentially add more context such as usage analysis, message metadata etc.
This commit is contained in:
		| @@ -2,18 +2,20 @@ | ||||
| # | ||||
| # Table name: accounts | ||||
| # | ||||
| #  id                    :integer          not null, primary key | ||||
| #  auto_resolve_duration :integer | ||||
| #  custom_attributes     :jsonb | ||||
| #  domain                :string(100) | ||||
| #  feature_flags         :bigint           default(0), not null | ||||
| #  limits                :jsonb | ||||
| #  locale                :integer          default("en") | ||||
| #  name                  :string           not null | ||||
| #  status                :integer          default("active") | ||||
| #  support_email         :string(100) | ||||
| #  created_at            :datetime         not null | ||||
| #  updated_at            :datetime         not null | ||||
| #  id                         :integer          not null, primary key | ||||
| #  auto_resolve_duration      :integer | ||||
| #  contactable_contacts_count :integer          default(0) | ||||
| #  custom_attributes          :jsonb | ||||
| #  domain                     :string(100) | ||||
| #  feature_flags              :bigint           default(0), not null | ||||
| #  internal_attributes        :jsonb            not null | ||||
| #  limits                     :jsonb | ||||
| #  locale                     :integer          default("en") | ||||
| #  name                       :string           not null | ||||
| #  status                     :integer          default("active") | ||||
| #  support_email              :string(100) | ||||
| #  created_at                 :datetime         not null | ||||
| #  updated_at                 :datetime         not null | ||||
| # | ||||
| # Indexes | ||||
| # | ||||
|   | ||||
| @@ -212,6 +212,10 @@ | ||||
| - name: CHATWOOT_SUPPORT_IDENTIFIER_HASH | ||||
|   value: | ||||
|   description: 'The Chatwoot identifier hash, to validate the contact in the live chat window.' | ||||
| - name: ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL | ||||
|   display_title: Webhook URL to post security analysis | ||||
|   value: | ||||
|   description: Used to notify Chatwoot about account abuses, potential threads (Should be a Discord Webhook URL) | ||||
| # ------- End of Chatwoot Internal Config for Self Hosted ----# | ||||
|  | ||||
| ## ------ Configs added for enterprise clients ------ ## | ||||
|   | ||||
| @@ -0,0 +1,5 @@ | ||||
| class AddInternalAttributesToAccounts < ActiveRecord::Migration[7.0] | ||||
|   def change | ||||
|     add_column :accounts, :internal_attributes, :jsonb, null: false, default: {} | ||||
|   end | ||||
| end | ||||
| @@ -10,7 +10,7 @@ | ||||
| # | ||||
| # It's strongly recommended that you check this file into your version control system. | ||||
|  | ||||
| ActiveRecord::Schema[7.0].define(version: 2025_02_07_040150) do | ||||
| ActiveRecord::Schema[7.0].define(version: 2025_02_28_185548) do | ||||
|   # These extensions should be enabled to support this database | ||||
|   enable_extension "pg_stat_statements" | ||||
|   enable_extension "pg_trgm" | ||||
| @@ -57,6 +57,7 @@ ActiveRecord::Schema[7.0].define(version: 2025_02_07_040150) do | ||||
|     t.jsonb "limits", default: {} | ||||
|     t.jsonb "custom_attributes", default: {} | ||||
|     t.integer "status", default: 0 | ||||
|     t.jsonb "internal_attributes", default: {}, null: false | ||||
|     t.index ["status"], name: "index_accounts_on_status" | ||||
|   end | ||||
|  | ||||
|   | ||||
| @@ -33,6 +33,6 @@ module Enterprise::SuperAdmin::AppConfigsController | ||||
|  | ||||
|   def internal_config_options | ||||
|     %w[CHATWOOT_INBOX_TOKEN CHATWOOT_INBOX_HMAC_KEY ANALYTICS_TOKEN CLEARBIT_API_KEY DASHBOARD_SCRIPTS BLOCKED_EMAIL_DOMAINS | ||||
|        CAPTAIN_CLOUD_PLAN_LIMITS] | ||||
|        CAPTAIN_CLOUD_PLAN_LIMITS ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL] | ||||
|   end | ||||
| end | ||||
|   | ||||
							
								
								
									
										9
									
								
								enterprise/app/jobs/internal/account_analysis_job.rb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								enterprise/app/jobs/internal/account_analysis_job.rb
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| class Internal::AccountAnalysisJob < ApplicationJob | ||||
|   queue_as :low | ||||
|  | ||||
|   def perform(account) | ||||
|     return if GlobalConfig.get_value('DEPLOYMENT_ENV') != 'cloud' | ||||
|  | ||||
|     Internal::AccountAnalysis::ThreatAnalyserService.new(account).perform | ||||
|   end | ||||
| end | ||||
| @@ -1,6 +1,6 @@ | ||||
| require 'openai' | ||||
|  | ||||
| class Captain::Copilot::ChatService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Copilot::ChatService < Llm::BaseOpenAiService | ||||
|   include Captain::ChatHelper | ||||
|  | ||||
|   def initialize(assistant, config) | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| require 'openai' | ||||
|  | ||||
| class Captain::Llm::AssistantChatService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::AssistantChatService < Llm::BaseOpenAiService | ||||
|   include Captain::ChatHelper | ||||
|  | ||||
|   def initialize(assistant: nil) | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| class Captain::Llm::ContactAttributesService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::ContactAttributesService < Llm::BaseOpenAiService | ||||
|   def initialize(assistant, conversation) | ||||
|     super() | ||||
|     @assistant = assistant | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| class Captain::Llm::ContactNotesService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::ContactNotesService < Llm::BaseOpenAiService | ||||
|   def initialize(assistant, conversation) | ||||
|     super() | ||||
|     @assistant = assistant | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| class Captain::Llm::ConversationFaqService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::ConversationFaqService < Llm::BaseOpenAiService | ||||
|   DISTANCE_THRESHOLD = 0.3 | ||||
|  | ||||
|   def initialize(assistant, conversation) | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| require 'openai' | ||||
|  | ||||
| class Captain::Llm::EmbeddingService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::EmbeddingService < Llm::BaseOpenAiService | ||||
|   class EmbeddingsError < StandardError; end | ||||
|  | ||||
|   DEFAULT_MODEL = 'text-embedding-3-small'.freeze | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| class Captain::Llm::FaqGeneratorService < Captain::Llm::BaseOpenAiService | ||||
| class Captain::Llm::FaqGeneratorService < Llm::BaseOpenAiService | ||||
|   def initialize(content) | ||||
|     super() | ||||
|     @content = content | ||||
|   | ||||
| @@ -0,0 +1,54 @@ | ||||
| class Internal::AccountAnalysis::AccountUpdaterService | ||||
|   def initialize(account) | ||||
|     @account = account | ||||
|   end | ||||
|  | ||||
|   def update_with_analysis(analysis, error_message = nil) | ||||
|     if error_message | ||||
|       save_error(error_message) | ||||
|       notify_on_discord | ||||
|       return | ||||
|     end | ||||
|  | ||||
|     save_analysis_results(analysis) | ||||
|     flag_account_if_needed(analysis) | ||||
|   end | ||||
|  | ||||
|   private | ||||
|  | ||||
|   def save_error(error_message) | ||||
|     @account.internal_attributes['security_flagged'] = true | ||||
|     @account.internal_attributes['security_flag_reason'] = "Error: #{error_message}" | ||||
|     @account.save | ||||
|   end | ||||
|  | ||||
|   def save_analysis_results(analysis) | ||||
|     @account.internal_attributes['last_threat_scan_at'] = Time.current | ||||
|     @account.internal_attributes['last_threat_scan_level'] = analysis['threat_level'] | ||||
|     @account.internal_attributes['last_threat_scan_summary'] = analysis['threat_summary'] | ||||
|     @account.internal_attributes['last_threat_scan_recommendation'] = analysis['recommendation'] | ||||
|     @account.save! | ||||
|   end | ||||
|  | ||||
|   def flag_account_if_needed(analysis) | ||||
|     return if analysis['threat_level'] == 'none' | ||||
|  | ||||
|     if %w[high medium].include?(analysis['threat_level']) || | ||||
|        analysis['illegal_activities_detected'] == true || | ||||
|        analysis['recommendation'] == 'block' | ||||
|  | ||||
|       @account.internal_attributes['security_flagged'] = true | ||||
|       @account.internal_attributes['security_flag_reason'] = "Threat detected: #{analysis['threat_summary']}" | ||||
|       @account.save! | ||||
|  | ||||
|       Rails.logger.info("Flagging account #{@account.id} due to threat level: #{analysis['threat_level']}") | ||||
|     end | ||||
|  | ||||
|     notify_on_discord | ||||
|   end | ||||
|  | ||||
|   def notify_on_discord | ||||
|     Rails.logger.info("Account #{@account.id} has been flagged for security review") | ||||
|     Internal::AccountAnalysis::DiscordNotifierService.new.notify_flagged_account(@account) | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,73 @@ | ||||
| class Internal::AccountAnalysis::ContentEvaluatorService < Llm::BaseOpenAiService | ||||
|   def initialize | ||||
|     super() | ||||
|  | ||||
|     @model = 'gpt-4o-mini'.freeze | ||||
|   end | ||||
|  | ||||
|   def evaluate(content) | ||||
|     return default_evaluation if content.blank? | ||||
|  | ||||
|     begin | ||||
|       response = send_to_llm(content) | ||||
|       evaluation = handle_response(response) | ||||
|       log_evaluation_results(evaluation) | ||||
|       evaluation | ||||
|     rescue StandardError => e | ||||
|       handle_evaluation_error(e) | ||||
|     end | ||||
|   end | ||||
|  | ||||
|   private | ||||
|  | ||||
|   def send_to_llm(content) | ||||
|     Rails.logger.info('Sending content to LLM for security evaluation') | ||||
|     @client.chat( | ||||
|       parameters: { | ||||
|         model: @model, | ||||
|         messages: llm_messages(content), | ||||
|         response_format: { type: 'json_object' } | ||||
|       } | ||||
|     ) | ||||
|   end | ||||
|  | ||||
|   def handle_response(response) | ||||
|     return default_evaluation if response.nil? | ||||
|  | ||||
|     parsed = JSON.parse(response.dig('choices', 0, 'message', 'content').strip) | ||||
|  | ||||
|     { | ||||
|       'threat_level' => parsed['threat_level'] || 'unknown', | ||||
|       'threat_summary' => parsed['threat_summary'] || 'No threat summary provided', | ||||
|       'detected_threats' => parsed['detected_threats'] || [], | ||||
|       'illegal_activities_detected' => parsed['illegal_activities_detected'] || false, | ||||
|       'recommendation' => parsed['recommendation'] || 'review' | ||||
|     } | ||||
|   end | ||||
|  | ||||
|   def default_evaluation(error_type = nil) | ||||
|     { | ||||
|       'threat_level' => 'unknown', | ||||
|       'threat_summary' => 'Failed to complete content evaluation', | ||||
|       'detected_threats' => error_type ? [error_type] : [], | ||||
|       'illegal_activities_detected' => false, | ||||
|       'recommendation' => 'review' | ||||
|     } | ||||
|   end | ||||
|  | ||||
|   def log_evaluation_results(evaluation) | ||||
|     Rails.logger.info("LLM evaluation - Level: #{evaluation['threat_level']}, Illegal activities: #{evaluation['illegal_activities_detected']}") | ||||
|   end | ||||
|  | ||||
|   def handle_evaluation_error(error) | ||||
|     Rails.logger.error("Error evaluating content: #{error.message}") | ||||
|     default_evaluation('evaluation_failure') | ||||
|   end | ||||
|  | ||||
|   def llm_messages(content) | ||||
|     [ | ||||
|       { role: 'system', content: 'You are a security analysis system that evaluates content for potential threats and scams.' }, | ||||
|       { role: 'user', content: Internal::AccountAnalysis::PromptsService.threat_analyser(content.to_s[0...10_000]) } | ||||
|     ] | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,47 @@ | ||||
| class Internal::AccountAnalysis::DiscordNotifierService | ||||
|   def notify_flagged_account(account) | ||||
|     if webhook_url.blank? | ||||
|       Rails.logger.error('Cannot send Discord notification: No webhook URL configured') | ||||
|       return | ||||
|     end | ||||
|  | ||||
|     HTTParty.post( | ||||
|       webhook_url, | ||||
|       body: build_message(account).to_json, | ||||
|       headers: { 'Content-Type' => 'application/json' } | ||||
|     ) | ||||
|  | ||||
|     Rails.logger.info("Discord notification sent for flagged account #{account.id}") | ||||
|   rescue StandardError => e | ||||
|     Rails.logger.error("Error sending Discord notification: #{e.message}") | ||||
|   end | ||||
|  | ||||
|   private | ||||
|  | ||||
|   def build_message(account) | ||||
|     analysis = account.internal_attributes | ||||
|     user = account.users.order(id: :asc).first | ||||
|  | ||||
|     content = <<~MESSAGE | ||||
|       --- | ||||
|       An account has been flagged in our security system with the following details: | ||||
|  | ||||
|       🆔 **Account Details:** | ||||
|       Account ID: #{account.id} | ||||
|       User Email: #{user&.email || 'N/A'} | ||||
|       Threat Level: #{analysis['last_threat_scan_level']} | ||||
|  | ||||
|       🔎 **System Recommendation:** #{analysis['last_threat_scan_recommendation']} | ||||
|       #{analysis['illegal_activities_detected'] ? '⚠️ Potential illegal activities detected' : 'No illegal activities detected'} | ||||
|  | ||||
|       📝 **Findings:** | ||||
|       #{analysis['last_threat_scan_summary']} | ||||
|     MESSAGE | ||||
|  | ||||
|     { content: content } | ||||
|   end | ||||
|  | ||||
|   def webhook_url | ||||
|     @webhook_url ||= InstallationConfig.find_by(name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL')&.value | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,31 @@ | ||||
| class Internal::AccountAnalysis::PromptsService | ||||
|   class << self | ||||
|     def threat_analyser(content) | ||||
|       <<~PROMPT | ||||
|         Analyze the following website content for potential security threats, scams, or illegal activities. | ||||
|  | ||||
|         Focus on identifying: | ||||
|         1. Phishing attempts | ||||
|         2. Fraudulent business practices | ||||
|         3. Malware distribution | ||||
|         4. Illegal product/service offerings | ||||
|         5. Money laundering indicators | ||||
|         6. Identity theft schemes | ||||
|  | ||||
|         Always classify websites under construction or without content to be a medium. | ||||
|  | ||||
|         Website content: | ||||
|         #{content} | ||||
|  | ||||
|         Provide your analysis in the following JSON format: | ||||
|         { | ||||
|           "threat_level": "none|low|medium|high", | ||||
|           "threat_summary": "Brief summary of findings", | ||||
|           "detected_threats": ["threat1", "threat2"], | ||||
|           "illegal_activities_detected": true|false, | ||||
|           "recommendation": "approve|review|block" | ||||
|         } | ||||
|       PROMPT | ||||
|     end | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,43 @@ | ||||
| class Internal::AccountAnalysis::ThreatAnalyserService | ||||
|   def initialize(account) | ||||
|     @account = account | ||||
|     @user = account.users.order(id: :asc).first | ||||
|     @domain = extract_domain_from_email(@user&.email) | ||||
|   end | ||||
|  | ||||
|   def perform | ||||
|     if @domain.blank? | ||||
|       Rails.logger.info("Skipping threat analysis for account #{@account.id}: No domain found") | ||||
|       return | ||||
|     end | ||||
|  | ||||
|     website_content = Internal::AccountAnalysis::WebsiteScraperService.new(@domain).perform | ||||
|     if website_content.blank? | ||||
|       Rails.logger.info("Skipping threat analysis for account #{@account.id}: No website content found") | ||||
|       Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(nil, 'Scraping error: No content found') | ||||
|       return | ||||
|     end | ||||
|  | ||||
|     content = <<~MESSAGE | ||||
|       Domain: #{@domain} | ||||
|       Content: #{website_content} | ||||
|     MESSAGE | ||||
|     threat_analysis = Internal::AccountAnalysis::ContentEvaluatorService.new.evaluate(content) | ||||
|     Rails.logger.info("Completed threat analysis: level=#{threat_analysis['threat_level']} for account-id: #{@account.id}") | ||||
|  | ||||
|     Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(threat_analysis) | ||||
|  | ||||
|     threat_analysis | ||||
|   end | ||||
|  | ||||
|   private | ||||
|  | ||||
|   def extract_domain_from_email(email) | ||||
|     return nil if email.blank? | ||||
|  | ||||
|     email.split('@').last | ||||
|   rescue StandardError => e | ||||
|     Rails.logger.error("Error extracting domain from email #{email}: #{e.message}") | ||||
|     nil | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,32 @@ | ||||
| class Internal::AccountAnalysis::WebsiteScraperService | ||||
|   def initialize(domain) | ||||
|     @domain = domain | ||||
|   end | ||||
|  | ||||
|   def perform | ||||
|     return nil if @domain.blank? | ||||
|  | ||||
|     Rails.logger.info("Scraping website: #{external_link}") | ||||
|  | ||||
|     begin | ||||
|       response = HTTParty.get(external_link, follow_redirects: true) | ||||
|       response.to_s | ||||
|     rescue StandardError => e | ||||
|       Rails.logger.error("Error scraping website for domain #{@domain}: #{e.message}") | ||||
|       nil | ||||
|     end | ||||
|   end | ||||
|  | ||||
|   private | ||||
|  | ||||
|   def external_link | ||||
|     sanitize_url(@domain) | ||||
|   end | ||||
|  | ||||
|   def sanitize_url(domain) | ||||
|     url = domain | ||||
|     url = "https://#{domain}" unless domain.start_with?('http://', 'https://') | ||||
|     Rails.logger.info("Sanitized URL: #{url}") | ||||
|     url | ||||
|   end | ||||
| end | ||||
| @@ -1,4 +1,4 @@ | ||||
| class Captain::Llm::BaseOpenAiService | ||||
| class Llm::BaseOpenAiService | ||||
|   DEFAULT_MODEL = 'gpt-4o-mini'.freeze | ||||
| 
 | ||||
|   def initialize | ||||
| @@ -0,0 +1,130 @@ | ||||
| require 'rails_helper' | ||||
|  | ||||
| RSpec.describe Internal::AccountAnalysis::AccountUpdaterService do | ||||
|   let(:account) { create(:account) } | ||||
|   let(:service) { described_class.new(account) } | ||||
|   let(:discord_notifier) { instance_double(Internal::AccountAnalysis::DiscordNotifierService, notify_flagged_account: true) } | ||||
|  | ||||
|   before do | ||||
|     allow(Internal::AccountAnalysis::DiscordNotifierService).to receive(:new).and_return(discord_notifier) | ||||
|     allow(Rails.logger).to receive(:info) | ||||
|   end | ||||
|  | ||||
|   describe '#update_with_analysis' do | ||||
|     context 'when error_message is provided' do | ||||
|       it 'saves the error and notifies Discord' do | ||||
|         service.update_with_analysis({}, 'Analysis failed') | ||||
|  | ||||
|         expect(account.internal_attributes['security_flagged']).to be true | ||||
|         expect(account.internal_attributes['security_flag_reason']).to eq('Error: Analysis failed') | ||||
|         expect(discord_notifier).to have_received(:notify_flagged_account).with(account) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when analysis is successful' do | ||||
|       let(:analysis) do | ||||
|         { | ||||
|           'threat_level' => 'none', | ||||
|           'threat_summary' => 'No threats detected', | ||||
|           'recommendation' => 'allow' | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       it 'saves the analysis results' do | ||||
|         allow(Time).to receive(:current).and_return('2023-01-01 12:00:00') | ||||
|  | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes['last_threat_scan_at']).to eq('2023-01-01 12:00:00') | ||||
|         expect(account.internal_attributes['last_threat_scan_level']).to eq('none') | ||||
|         expect(account.internal_attributes['last_threat_scan_summary']).to eq('No threats detected') | ||||
|         expect(account.internal_attributes['last_threat_scan_recommendation']).to eq('allow') | ||||
|       end | ||||
|  | ||||
|       it 'does not flag the account when threat level is none' do | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes).not_to include('security_flagged') | ||||
|         expect(discord_notifier).not_to have_received(:notify_flagged_account) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when analysis detects high threat level' do | ||||
|       let(:analysis) do | ||||
|         { | ||||
|           'threat_level' => 'high', | ||||
|           'threat_summary' => 'Suspicious activity detected', | ||||
|           'recommendation' => 'review', | ||||
|           'illegal_activities_detected' => false | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       it 'flags the account and notifies Discord' do | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes['security_flagged']).to be true | ||||
|         expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Suspicious activity detected') | ||||
|         expect(discord_notifier).to have_received(:notify_flagged_account).with(account) | ||||
|         expect(Rails.logger).to have_received(:info).with("Flagging account #{account.id} due to threat level: high") | ||||
|         expect(Rails.logger).to have_received(:info).with("Account #{account.id} has been flagged for security review") | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when analysis detects medium threat level' do | ||||
|       let(:analysis) do | ||||
|         { | ||||
|           'threat_level' => 'medium', | ||||
|           'threat_summary' => 'Potential issues found', | ||||
|           'recommendation' => 'review', | ||||
|           'illegal_activities_detected' => false | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       it 'flags the account and notifies Discord' do | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes['security_flagged']).to be true | ||||
|         expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Potential issues found') | ||||
|         expect(discord_notifier).to have_received(:notify_flagged_account).with(account) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when analysis detects illegal activities' do | ||||
|       let(:analysis) do | ||||
|         { | ||||
|           'threat_level' => 'low', | ||||
|           'threat_summary' => 'Minor issues found', | ||||
|           'recommendation' => 'review', | ||||
|           'illegal_activities_detected' => true | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       it 'flags the account and notifies Discord' do | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes['security_flagged']).to be true | ||||
|         expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found') | ||||
|         expect(discord_notifier).to have_received(:notify_flagged_account).with(account) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when analysis recommends blocking' do | ||||
|       let(:analysis) do | ||||
|         { | ||||
|           'threat_level' => 'low', | ||||
|           'threat_summary' => 'Minor issues found', | ||||
|           'recommendation' => 'block', | ||||
|           'illegal_activities_detected' => false | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       it 'flags the account and notifies Discord' do | ||||
|         service.update_with_analysis(analysis) | ||||
|  | ||||
|         expect(account.internal_attributes['security_flagged']).to be true | ||||
|         expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found') | ||||
|         expect(discord_notifier).to have_received(:notify_flagged_account).with(account) | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,111 @@ | ||||
| require 'rails_helper' | ||||
|  | ||||
| RSpec.describe Internal::AccountAnalysis::ContentEvaluatorService do | ||||
|   let(:service) { described_class.new } | ||||
|   let(:content) { 'This is some test content' } | ||||
|  | ||||
|   before do | ||||
|     create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key') | ||||
|   end | ||||
|  | ||||
|   describe '#evaluate' do | ||||
|     context 'when content is present' do | ||||
|       let(:llm_response) do | ||||
|         { | ||||
|           'choices' => [ | ||||
|             { | ||||
|               'message' => { | ||||
|                 'content' => { | ||||
|                   'threat_level' => 'low', | ||||
|                   'threat_summary' => 'No significant threats detected', | ||||
|                   'detected_threats' => ['minor_concern'], | ||||
|                   'illegal_activities_detected' => false, | ||||
|                   'recommendation' => 'approve' | ||||
|                 }.to_json | ||||
|               } | ||||
|             } | ||||
|           ] | ||||
|         } | ||||
|       end | ||||
|  | ||||
|       before do | ||||
|         allow(service).to receive(:send_to_llm).and_return(llm_response) | ||||
|         allow(Rails.logger).to receive(:info) | ||||
|       end | ||||
|  | ||||
|       it 'returns the evaluation results' do | ||||
|         result = service.evaluate(content) | ||||
|  | ||||
|         expect(result).to include( | ||||
|           'threat_level' => 'low', | ||||
|           'threat_summary' => 'No significant threats detected', | ||||
|           'detected_threats' => ['minor_concern'], | ||||
|           'illegal_activities_detected' => false, | ||||
|           'recommendation' => 'approve' | ||||
|         ) | ||||
|       end | ||||
|  | ||||
|       it 'logs the evaluation results' do | ||||
|         service.evaluate(content) | ||||
|  | ||||
|         expect(Rails.logger).to have_received(:info).with('LLM evaluation - Level: low, Illegal activities: false') | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when content is blank' do | ||||
|       let(:blank_content) { '' } | ||||
|  | ||||
|       it 'returns the default evaluation without calling the LLM' do | ||||
|         expect(service).not_to receive(:send_to_llm) | ||||
|  | ||||
|         result = service.evaluate(blank_content) | ||||
|  | ||||
|         expect(result).to include( | ||||
|           'threat_level' => 'unknown', | ||||
|           'threat_summary' => 'Failed to complete content evaluation', | ||||
|           'detected_threats' => [], | ||||
|           'illegal_activities_detected' => false, | ||||
|           'recommendation' => 'review' | ||||
|         ) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when LLM response is nil' do | ||||
|       before do | ||||
|         allow(service).to receive(:send_to_llm).and_return(nil) | ||||
|       end | ||||
|  | ||||
|       it 'returns the default evaluation' do | ||||
|         result = service.evaluate(content) | ||||
|  | ||||
|         expect(result).to include( | ||||
|           'threat_level' => 'unknown', | ||||
|           'threat_summary' => 'Failed to complete content evaluation', | ||||
|           'detected_threats' => [], | ||||
|           'illegal_activities_detected' => false, | ||||
|           'recommendation' => 'review' | ||||
|         ) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when error occurs during evaluation' do | ||||
|       before do | ||||
|         allow(service).to receive(:send_to_llm).and_raise(StandardError.new('Test error')) | ||||
|         allow(Rails.logger).to receive(:error) | ||||
|       end | ||||
|  | ||||
|       it 'logs the error and returns default evaluation with error type' do | ||||
|         result = service.evaluate(content) | ||||
|  | ||||
|         expect(Rails.logger).to have_received(:error).with('Error evaluating content: Test error') | ||||
|         expect(result).to include( | ||||
|           'threat_level' => 'unknown', | ||||
|           'threat_summary' => 'Failed to complete content evaluation', | ||||
|           'detected_threats' => ['evaluation_failure'], | ||||
|           'illegal_activities_detected' => false, | ||||
|           'recommendation' => 'review' | ||||
|         ) | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,73 @@ | ||||
| require 'rails_helper' | ||||
|  | ||||
| RSpec.describe Internal::AccountAnalysis::DiscordNotifierService do | ||||
|   let(:service) { described_class.new } | ||||
|   let(:webhook_url) { 'https://discord.com/api/webhooks/123456789/some-token' } | ||||
|   let(:account) do | ||||
|     create( | ||||
|       :account, | ||||
|       internal_attributes: { | ||||
|         'last_threat_scan_level' => 'high', | ||||
|         'last_threat_scan_recommendation' => 'review', | ||||
|         'illegal_activities_detected' => true, | ||||
|         'last_threat_scan_summary' => 'Suspicious activity detected' | ||||
|       } | ||||
|     ) | ||||
|   end | ||||
|   let!(:user) { create(:user, account: account) } | ||||
|  | ||||
|   before do | ||||
|     allow(Rails.logger).to receive(:info) | ||||
|     allow(Rails.logger).to receive(:error) | ||||
|   end | ||||
|  | ||||
|   describe '#notify_flagged_account' do | ||||
|     context 'when webhook URL is configured' do | ||||
|       before do | ||||
|         create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url) | ||||
|         stub_request(:post, webhook_url).to_return(status: 200) | ||||
|       end | ||||
|  | ||||
|       it 'sends notification to Discord webhook' do | ||||
|         service.notify_flagged_account(account) | ||||
|         expect(WebMock).to have_requested(:post, webhook_url) | ||||
|           .with( | ||||
|             body: hash_including( | ||||
|               content: include( | ||||
|                 "Account ID: #{account.id}", | ||||
|                 "User Email: #{user.email}", | ||||
|                 'Threat Level: high', | ||||
|                 '**System Recommendation:** review', | ||||
|                 '⚠️ Potential illegal activities detected', | ||||
|                 'Suspicious activity detected' | ||||
|               ) | ||||
|             ) | ||||
|           ) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when webhook URL is not configured' do | ||||
|       it 'logs error and does not make HTTP request' do | ||||
|         service.notify_flagged_account(account) | ||||
|  | ||||
|         expect(Rails.logger).to have_received(:error) | ||||
|           .with('Cannot send Discord notification: No webhook URL configured') | ||||
|         expect(WebMock).not_to have_requested(:post, webhook_url) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when HTTP request fails' do | ||||
|       before do | ||||
|         create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url) | ||||
|         stub_request(:post, webhook_url).to_raise(StandardError.new('Connection failed')) | ||||
|       end | ||||
|  | ||||
|       it 'catches exception and logs error' do | ||||
|         service.notify_flagged_account(account) | ||||
|  | ||||
|         expect(Rails.logger).to have_received(:error) | ||||
|           .with('Error sending Discord notification: Connection failed') | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,62 @@ | ||||
| # frozen_string_literal: true | ||||
|  | ||||
| require 'rails_helper' | ||||
|  | ||||
| RSpec.describe Internal::AccountAnalysis::ThreatAnalyserService do | ||||
|   subject { described_class.new(account) } | ||||
|  | ||||
|   let(:account) { create(:account) } | ||||
|   let(:user) { create(:user, email: 'test@example.com', account: account) } | ||||
|   let(:website_scraper) { instance_double(Internal::AccountAnalysis::WebsiteScraperService) } | ||||
|   let(:content_evaluator) { instance_double(Internal::AccountAnalysis::ContentEvaluatorService) } | ||||
|   let(:account_updater) { instance_double(Internal::AccountAnalysis::AccountUpdaterService) } | ||||
|   let(:website_content) { 'This is the website content' } | ||||
|   let(:threat_analysis) { { 'threat_level' => 'medium' } } | ||||
|  | ||||
|   before do | ||||
|     user | ||||
|  | ||||
|     allow(Internal::AccountAnalysis::WebsiteScraperService).to receive(:new).with('example.com').and_return(website_scraper) | ||||
|     allow(Internal::AccountAnalysis::ContentEvaluatorService).to receive(:new).and_return(content_evaluator) | ||||
|     allow(Internal::AccountAnalysis::AccountUpdaterService).to receive(:new).with(account).and_return(account_updater) | ||||
|   end | ||||
|  | ||||
|   describe '#perform' do | ||||
|     before do | ||||
|       allow(website_scraper).to receive(:perform).and_return(website_content) | ||||
|       allow(content_evaluator).to receive(:evaluate).and_return(threat_analysis) | ||||
|       allow(account_updater).to receive(:update_with_analysis) | ||||
|       allow(Rails.logger).to receive(:info) | ||||
|     end | ||||
|  | ||||
|     it 'performs threat analysis and updates the account' do | ||||
|       expected_content = <<~MESSAGE | ||||
|         Domain: example.com | ||||
|         Content: This is the website content | ||||
|       MESSAGE | ||||
|  | ||||
|       expect(website_scraper).to receive(:perform) | ||||
|       expect(content_evaluator).to receive(:evaluate).with(expected_content) | ||||
|       expect(account_updater).to receive(:update_with_analysis).with(threat_analysis) | ||||
|       expect(Rails.logger).to receive(:info).with("Completed threat analysis: level=medium for account-id: #{account.id}") | ||||
|  | ||||
|       result = subject.perform | ||||
|       expect(result).to eq(threat_analysis) | ||||
|     end | ||||
|  | ||||
|     context 'when website content is blank' do | ||||
|       before do | ||||
|         allow(website_scraper).to receive(:perform).and_return(nil) | ||||
|       end | ||||
|  | ||||
|       it 'logs info and updates account with error' do | ||||
|         expect(Rails.logger).to receive(:info).with("Skipping threat analysis for account #{account.id}: No website content found") | ||||
|         expect(account_updater).to receive(:update_with_analysis).with(nil, 'Scraping error: No content found') | ||||
|         expect(content_evaluator).not_to receive(:evaluate) | ||||
|  | ||||
|         result = subject.perform | ||||
|         expect(result).to be_nil | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
| @@ -0,0 +1,45 @@ | ||||
| require 'rails_helper' | ||||
|  | ||||
| RSpec.describe Internal::AccountAnalysis::WebsiteScraperService do | ||||
|   describe '#perform' do | ||||
|     let(:service) { described_class.new(domain) } | ||||
|     let(:html_content) { '<html><body>This is sample website content</body></html>' } | ||||
|  | ||||
|     before do | ||||
|       allow(Rails.logger).to receive(:info) | ||||
|       allow(Rails.logger).to receive(:error) | ||||
|     end | ||||
|  | ||||
|     context 'when domain is nil' do | ||||
|       let(:domain) { nil } | ||||
|  | ||||
|       it 'returns nil' do | ||||
|         expect(service.perform).to be_nil | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when domain is present' do | ||||
|       let(:domain) { 'example.com' } | ||||
|  | ||||
|       before do | ||||
|         allow(HTTParty).to receive(:get).and_return(html_content) | ||||
|       end | ||||
|  | ||||
|       it 'returns the stripped and normalized content' do | ||||
|         expect(service.perform).to eq(html_content) | ||||
|       end | ||||
|     end | ||||
|  | ||||
|     context 'when an error occurs' do | ||||
|       let(:domain) { 'example.com' } | ||||
|  | ||||
|       before do | ||||
|         allow(HTTParty).to receive(:get).and_raise(StandardError.new('Error')) | ||||
|       end | ||||
|  | ||||
|       it 'returns nil' do | ||||
|         expect(service.perform).to be_nil | ||||
|       end | ||||
|     end | ||||
|   end | ||||
| end | ||||
		Reference in New Issue
	
	Block a user
	 Pranav
					Pranav