Compare commits

..

19 Commits

Author SHA1 Message Date
ning
cd44d131aa refactor: optimize drop sample 2026-01-23 11:58:02 +08:00
ning
42ba6c8738 brain fix get datasource 2026-01-20 20:45:12 +08:00
jie210
cd395f659c feat: sso support feishu (#3045) 2026-01-16 19:23:36 +08:00
ning
a16c87c734 refactor: update trigger value 2026-01-14 16:14:21 +08:00
ning
24f3d85b29 fix: query data 2025-12-30 19:25:07 +08:00
ning
e046b57f8b update ds perm check 2025-12-30 16:48:07 +08:00
ning
4b054c279c fix: datasource delete 2025-12-30 16:29:35 +08:00
ning
cf2f442c36 refactor: doris datasource conf 2025-12-29 20:52:58 +08:00
pioneerlfn
f0e948acef fix: doris exec sql timeout unit: s -> ms (#3020) 2025-12-29 16:08:21 +08:00
pioneerlfn
e1fa53e48c refactor: doris update row check, add interval, offset, showIndexes (#3014) 2025-12-26 16:15:11 +08:00
ning
adeb6dbf0b fix: search view api 2025-12-26 14:51:16 +08:00
ning
027e3ece1e refactor: save view check name 2025-12-26 14:51:06 +08:00
Yening Qin
1637d6db73 feat: support search view save (#3009) 2025-12-26 14:50:55 +08:00
ning
8ff0c1eeb9 fix: webhook connection leak 2025-12-17 18:05:18 +08:00
ning
b5917bdf8e fix: webhook connection leak 2025-12-17 18:01:52 +08:00
pioneerlfn
05063bdcca fix: doris macro/time (#2990) 2025-12-11 12:29:07 +08:00
ning
98de2f5b30 refactor: builtin tpl add 2025-12-08 16:54:31 +08:00
ning
e4f0a6560d Merge branch 'release-20' of github.com:ccfos/nightingale into release-20 2025-12-04 20:15:06 +08:00
ning
b065a1fea7 fix: event concurrent map writes 2025-12-04 20:13:05 +08:00
73 changed files with 879 additions and 3341 deletions

View File

@@ -16,7 +16,6 @@ import (
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline"
"github.com/ccfos/nightingale/v6/alert/pipeline/engine"
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
@@ -232,8 +231,6 @@ func shouldSkipNotify(ctx *ctx.Context, event *models.AlertCurEvent, notifyRuleI
}
func HandleEventPipeline(pipelineConfigs []models.PipelineConfig, eventOrigin, event *models.AlertCurEvent, eventProcessorCache *memsto.EventProcessorCacheType, ctx *ctx.Context, id int64, from string) *models.AlertCurEvent {
workflowEngine := engine.NewWorkflowEngine(ctx)
for _, pipelineConfig := range pipelineConfigs {
if !pipelineConfig.Enable {
continue
@@ -250,28 +247,23 @@ func HandleEventPipeline(pipelineConfigs []models.PipelineConfig, eventOrigin, e
continue
}
// 统一使用工作流引擎执行(兼容线性模式和工作流模式)
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeEvent,
TriggerBy: from,
}
processors := eventProcessorCache.GetProcessorsById(pipelineConfig.PipelineId)
for _, processor := range processors {
var res string
var err error
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, before processor:%+v, event: %+v", from, id, pipelineConfig.PipelineId, processor, event)
event, res, err = processor.Process(ctx, event)
if event == nil {
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, event dropped, after processor:%+v, event: %+v", from, id, pipelineConfig.PipelineId, processor, eventOrigin)
resultEvent, result, err := workflowEngine.Execute(eventPipeline, event, triggerCtx)
if err != nil {
logger.Errorf("processor_by_%s_id:%d pipeline_id:%d, pipeline execute error: %v", from, id, pipelineConfig.PipelineId, err)
continue
}
if resultEvent == nil {
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, event dropped, event: %+v", from, id, pipelineConfig.PipelineId, eventOrigin)
if from == "notify_rule" {
sender.NotifyRecord(ctx, []*models.AlertCurEvent{eventOrigin}, id, "", "", result.Message, fmt.Errorf("processor_by_%s_id:%d pipeline_id:%d, drop by pipeline", from, id, pipelineConfig.PipelineId))
if from == "notify_rule" {
// alert_rule 获取不到 eventId 记录没有意义
sender.NotifyRecord(ctx, []*models.AlertCurEvent{eventOrigin}, id, "", "", res, fmt.Errorf("processor_by_%s_id:%d pipeline_id:%d, drop by processor", from, id, pipelineConfig.PipelineId))
}
return nil
}
return nil
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, after processor:%+v, event: %+v, res:%v, err:%v", from, id, pipelineConfig.PipelineId, processor, event, res, err)
}
event = resultEvent
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, pipeline executed, status:%s, message:%s", from, id, pipelineConfig.PipelineId, result.Status, result.Message)
}
event.FE2DB()
@@ -546,7 +538,7 @@ func SendNotifyRuleMessage(ctx *ctx.Context, userCache *memsto.UserCacheType, us
for i := range flashDutyChannelIDs {
start := time.Now()
respBody, err := notifyChannel.SendFlashDuty(events, flashDutyChannelIDs[i], notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), respBody)
respBody = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), respBody)
logger.Infof("duty_sender notify_id: %d, channel_name: %v, event:%+v, IntegrationUrl: %v dutychannel_id: %v, respBody: %v, err: %v", notifyRuleId, notifyChannel.Name, events[0], notifyChannel.RequestConfig.FlashDutyRequestConfig.IntegrationUrl, flashDutyChannelIDs[i], respBody, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, strconv.FormatInt(flashDutyChannelIDs[i], 10), respBody, err)
}
@@ -555,7 +547,7 @@ func SendNotifyRuleMessage(ctx *ctx.Context, userCache *memsto.UserCacheType, us
for _, routingKey := range pagerdutyRoutingKeys {
start := time.Now()
respBody, err := notifyChannel.SendPagerDuty(events, routingKey, siteInfo.SiteUrl, notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), respBody)
respBody = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), respBody)
logger.Infof("pagerduty_sender notify_id: %d, channel_name: %v, event:%+v, respBody: %v, err: %v", notifyRuleId, notifyChannel.Name, events[0], respBody, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, "", respBody, err)
}
@@ -586,7 +578,7 @@ func SendNotifyRuleMessage(ctx *ctx.Context, userCache *memsto.UserCacheType, us
case "script":
start := time.Now()
target, res, err := notifyChannel.SendScript(events, tplContent, customParams, sendtos)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
logger.Infof("script_sender notify_id: %d, channel_name: %v, event:%+v, tplContent:%s, customParams:%v, target:%s, res:%s, err:%v", notifyRuleId, notifyChannel.Name, events[0], tplContent, customParams, target, res, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, target, res, err)
default:
@@ -833,12 +825,12 @@ func (e *Dispatch) HandleIbex(rule *models.AlertRule, event *models.AlertCurEven
if len(t.Host) == 0 {
sender.CallIbex(e.ctx, t.TplId, event.TargetIdent,
e.taskTplsCache, e.targetCache, e.userCache, event, "")
e.taskTplsCache, e.targetCache, e.userCache, event)
continue
}
for _, host := range t.Host {
sender.CallIbex(e.ctx, t.TplId, host,
e.taskTplsCache, e.targetCache, e.userCache, event, "")
e.taskTplsCache, e.targetCache, e.userCache, event)
}
}
}

View File

@@ -11,7 +11,6 @@ import (
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/ccfos/nightingale/v6/alert/astats"
@@ -25,7 +24,6 @@ import (
"github.com/ccfos/nightingale/v6/pkg/poster"
promsdk "github.com/ccfos/nightingale/v6/pkg/prom"
promql2 "github.com/ccfos/nightingale/v6/pkg/promql"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/ccfos/nightingale/v6/pkg/unit"
"github.com/ccfos/nightingale/v6/prom"
"github.com/prometheus/common/model"
@@ -62,7 +60,6 @@ const (
CHECK_QUERY = "check_query_config"
GET_CLIENT = "get_client"
QUERY_DATA = "query_data"
EXEC_TEMPLATE = "exec_template"
)
const (
@@ -154,7 +151,7 @@ func (arw *AlertRuleWorker) Eval() {
if len(message) == 0 {
logger.Infof("rule_eval:%s finished, duration:%v", arw.Key(), time.Since(begin))
} else {
logger.Warningf("rule_eval:%s finished, duration:%v, message:%s", arw.Key(), time.Since(begin), message)
logger.Infof("rule_eval:%s finished, duration:%v, message:%s", arw.Key(), time.Since(begin), message)
}
}()
@@ -189,7 +186,8 @@ func (arw *AlertRuleWorker) Eval() {
}
if err != nil {
message = fmt.Sprintf("failed to get anomaly points: %v", err)
logger.Errorf("rule_eval:%s get anomaly point err:%s", arw.Key(), err.Error())
message = "failed to get anomaly points"
return
}
@@ -1486,16 +1484,6 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
return points, recoverPoints, fmt.Errorf("rule_eval:%d datasource:%d not exists", rule.Id, dsId)
}
if err = ExecuteQueryTemplate(rule.Cate, query, nil); err != nil {
logger.Warningf("rule_eval rid:%d execute query template error: %v", rule.Id, err)
arw.Processor.Stats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", arw.Processor.DatasourceId()), EXEC_TEMPLATE, arw.Processor.BusiGroupCache.GetNameByBusiGroupId(arw.Rule.GroupId), fmt.Sprintf("%v", arw.Rule.Id)).Inc()
arw.Processor.Stats.GaugeQuerySeriesCount.WithLabelValues(
fmt.Sprintf("%v", arw.Rule.Id),
fmt.Sprintf("%v", arw.Processor.DatasourceId()),
fmt.Sprintf("%v", i),
).Set(-3)
}
ctx := context.WithValue(context.Background(), "delay", int64(rule.Delay))
series, err := plug.QueryData(ctx, query)
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId), fmt.Sprintf("%d", rule.Id)).Inc()
@@ -1614,11 +1602,15 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
continue
}
switch v.(type) {
case float64:
values += fmt.Sprintf("%s:%.3f ", k, v)
case string:
values += fmt.Sprintf("%s:%s ", k, v)
if u, exists := valuesUnitMap[k]; exists { // 配置了单位,优先用配置了单位的值
values += fmt.Sprintf("%s:%s ", k, u.Text)
} else {
switch v.(type) {
case float64:
values += fmt.Sprintf("%s:%.3f ", k, v)
case string:
values += fmt.Sprintf("%s:%s ", k, v)
}
}
}
@@ -1711,61 +1703,3 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
return points, recoverPoints, nil
}
// ExecuteQueryTemplate 根据数据源类型对 Query 进行模板渲染处理
// cate: 数据源类别,如 "mysql", "pgsql" 等
// query: 查询对象,如果是数据库类型的数据源,会处理其中的 sql 字段
// data: 模板数据对象,如果为 nil 则使用空结构体(不支持变量渲染),如果不为 nil 则使用传入的数据(支持变量渲染)
func ExecuteQueryTemplate(cate string, query interface{}, data interface{}) error {
// 检查 query 是否是 map且包含 sql 字段
queryMap, ok := query.(map[string]interface{})
if !ok {
return nil
}
sqlVal, exists := queryMap["sql"]
if !exists {
return nil
}
sqlStr, ok := sqlVal.(string)
if !ok {
return nil
}
// 调用 ExecuteSqlTemplate 处理 sql 字段
processedSQL, err := ExecuteSqlTemplate(sqlStr, data)
if err != nil {
return fmt.Errorf("execute sql template error: %w", err)
}
// 更新 query 中的 sql 字段
queryMap["sql"] = processedSQL
return nil
}
// ExecuteSqlTemplate 执行 query 中的 golang 模板语法函数
// query: 要处理的 query 字符串
// data: 模板数据对象,如果为 nil 则使用空结构体(不支持变量渲染),如果不为 nil 则使用传入的数据(支持变量渲染)
func ExecuteSqlTemplate(query string, data interface{}) (string, error) {
if !strings.Contains(query, "{{") || !strings.Contains(query, "}}") {
return query, nil
}
tmpl, err := template.New("query").Funcs(tplx.TemplateFuncMap).Parse(query)
if err != nil {
return "", fmt.Errorf("query tmpl parse error: %w", err)
}
var buf strings.Builder
templateData := data
if templateData == nil {
templateData = struct{}{}
}
if err := tmpl.Execute(&buf, templateData); err != nil {
return "", fmt.Errorf("query tmpl execute error: %w", err)
}
return buf.String(), nil
}

View File

@@ -1,383 +0,0 @@
package engine
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/google/uuid"
"github.com/toolkits/pkg/logger"
)
type WorkflowEngine struct {
ctx *ctx.Context
}
func NewWorkflowEngine(c *ctx.Context) *WorkflowEngine {
return &WorkflowEngine{ctx: c}
}
func (e *WorkflowEngine) Execute(pipeline *models.EventPipeline, event *models.AlertCurEvent, triggerCtx *models.WorkflowTriggerContext) (*models.AlertCurEvent, *models.WorkflowResult, error) {
startTime := time.Now()
wfCtx := e.initWorkflowContext(pipeline, event, triggerCtx)
nodes := pipeline.GetWorkflowNodes()
connections := pipeline.GetWorkflowConnections()
if len(nodes) == 0 {
return event, &models.WorkflowResult{
Event: event,
Status: models.ExecutionStatusSuccess,
Message: "no nodes to execute",
}, nil
}
nodeMap := make(map[string]*models.WorkflowNode)
for i := range nodes {
if nodes[i].RetryInterval == 0 {
nodes[i].RetryInterval = 1
}
if nodes[i].MaxRetries == 0 {
nodes[i].MaxRetries = 1
}
nodeMap[nodes[i].ID] = &nodes[i]
}
result := e.executeDAG(nodeMap, connections, wfCtx)
result.Event = wfCtx.Event
duration := time.Since(startTime).Milliseconds()
if triggerCtx != nil && triggerCtx.Mode != "" {
e.saveExecutionRecord(pipeline, wfCtx, result, triggerCtx, startTime.Unix(), duration)
}
return wfCtx.Event, result, nil
}
func (e *WorkflowEngine) initWorkflowContext(pipeline *models.EventPipeline, event *models.AlertCurEvent, triggerCtx *models.WorkflowTriggerContext) *models.WorkflowContext {
// 合并环境变量
env := pipeline.GetEnvMap()
if triggerCtx != nil && triggerCtx.EnvOverrides != nil {
for k, v := range triggerCtx.EnvOverrides {
env[k] = v
}
}
metadata := map[string]string{
"start_time": fmt.Sprintf("%d", time.Now().Unix()),
"pipeline_id": fmt.Sprintf("%d", pipeline.ID),
}
// 是否启用流式输出
stream := false
if triggerCtx != nil {
metadata["request_id"] = triggerCtx.RequestID
metadata["trigger_mode"] = triggerCtx.Mode
metadata["trigger_by"] = triggerCtx.TriggerBy
stream = triggerCtx.Stream
}
return &models.WorkflowContext{
Event: event,
Env: env,
Vars: make(map[string]interface{}), // 初始化空的 Vars供节点间传递数据
Metadata: metadata,
Stream: stream,
}
}
// executeDAG 使用 Kahn 算法执行 DAG
func (e *WorkflowEngine) executeDAG(nodeMap map[string]*models.WorkflowNode, connections models.Connections, wfCtx *models.WorkflowContext) *models.WorkflowResult {
result := &models.WorkflowResult{
Status: models.ExecutionStatusSuccess,
NodeResults: make([]*models.NodeExecutionResult, 0),
Stream: wfCtx.Stream, // 从上下文继承流式输出设置
}
// 计算每个节点的入度
inDegree := make(map[string]int)
for nodeID := range nodeMap {
inDegree[nodeID] = 0
}
// 遍历连接,计算入度
for _, nodeConns := range connections {
for _, targets := range nodeConns.Main {
for _, target := range targets {
inDegree[target.Node]++
}
}
}
// 找到所有入度为 0 的节点(起始节点)
queue := make([]string, 0)
for nodeID, degree := range inDegree {
if degree == 0 {
queue = append(queue, nodeID)
}
}
// 如果没有起始节点,说明存在循环依赖
if len(queue) == 0 && len(nodeMap) > 0 {
result.Status = models.ExecutionStatusFailed
result.Message = "workflow has circular dependency"
return result
}
// 记录已执行的节点
executed := make(map[string]bool)
// 记录节点的分支选择结果
branchResults := make(map[string]*int)
for len(queue) > 0 {
// 取出队首节点
nodeID := queue[0]
queue = queue[1:]
// 检查是否已执行
if executed[nodeID] {
continue
}
node, exists := nodeMap[nodeID]
if !exists {
continue
}
// 执行节点
nodeResult, nodeOutput := e.executeNode(node, wfCtx)
result.NodeResults = append(result.NodeResults, nodeResult)
if nodeOutput != nil && nodeOutput.Stream && nodeOutput.StreamChan != nil {
// 流式输出节点通常是最后一个节点
// 直接传递 StreamChan 给 WorkflowResult不阻塞等待
result.Stream = true
result.StreamChan = nodeOutput.StreamChan
result.Event = wfCtx.Event
result.Status = "streaming"
result.Message = fmt.Sprintf("streaming output from node: %s", node.Name)
// 更新节点状态为 streaming
nodeResult.Status = "streaming"
nodeResult.Message = "streaming in progress"
// 立即返回,让 API 层处理流式响应
return result
}
executed[nodeID] = true
// 保存分支结果
if nodeResult.BranchIndex != nil {
branchResults[nodeID] = nodeResult.BranchIndex
}
// 检查执行状态
if nodeResult.Status == "failed" {
if !node.ContinueOnFail {
result.Status = models.ExecutionStatusFailed
result.ErrorNode = nodeID
result.Message = fmt.Sprintf("node %s failed: %s", node.Name, nodeResult.Error)
return result
}
}
// 检查是否终止
if nodeResult.Status == "terminated" {
result.Message = fmt.Sprintf("workflow terminated at node %s", node.Name)
return result
}
// 更新后继节点的入度
if nodeConns, ok := connections[nodeID]; ok {
for outputIndex, targets := range nodeConns.Main {
// 检查是否应该走这个分支
if !e.shouldFollowBranch(nodeID, outputIndex, branchResults) {
continue
}
for _, target := range targets {
inDegree[target.Node]--
if inDegree[target.Node] == 0 {
queue = append(queue, target.Node)
}
}
}
}
}
return result
}
// executeNode 执行单个节点
// 返回:节点执行结果、节点输出(用于流式输出检测)
func (e *WorkflowEngine) executeNode(node *models.WorkflowNode, wfCtx *models.WorkflowContext) (*models.NodeExecutionResult, *models.NodeOutput) {
startTime := time.Now()
nodeResult := &models.NodeExecutionResult{
NodeID: node.ID,
NodeName: node.Name,
NodeType: node.Type,
StartedAt: startTime.Unix(),
}
var nodeOutput *models.NodeOutput
// 跳过禁用的节点
if node.Disabled {
nodeResult.Status = "skipped"
nodeResult.Message = "node is disabled"
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
return nodeResult, nil
}
// 获取处理器
processor, err := models.GetProcessorByType(node.Type, node.Config)
if err != nil {
nodeResult.Status = "failed"
nodeResult.Error = fmt.Sprintf("failed to get processor: %v", err)
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
return nodeResult, nil
}
// 执行处理器(带重试)
var retries int
maxRetries := node.MaxRetries
if !node.RetryOnFail {
maxRetries = 0
}
for retries <= maxRetries {
// 检查是否为分支处理器
if branchProcessor, ok := processor.(models.BranchProcessor); ok {
output, err := branchProcessor.ProcessWithBranch(e.ctx, wfCtx)
if err != nil {
if retries < maxRetries {
retries++
time.Sleep(time.Duration(node.RetryInterval) * time.Second)
continue
}
nodeResult.Status = "failed"
nodeResult.Error = err.Error()
} else {
nodeResult.Status = "success"
if output != nil {
nodeOutput = output
if output.WfCtx != nil {
wfCtx = output.WfCtx
}
nodeResult.Message = output.Message
nodeResult.BranchIndex = output.BranchIndex
if output.Terminate {
nodeResult.Status = "terminated"
}
}
}
break
}
// 普通处理器
newWfCtx, msg, err := processor.Process(e.ctx, wfCtx)
if err != nil {
if retries < maxRetries {
retries++
time.Sleep(time.Duration(node.RetryInterval) * time.Second)
continue
}
nodeResult.Status = "failed"
nodeResult.Error = err.Error()
} else {
nodeResult.Status = "success"
nodeResult.Message = msg
if newWfCtx != nil {
wfCtx = newWfCtx
// 检测流式输出标记
if newWfCtx.Stream && newWfCtx.StreamChan != nil {
nodeOutput = &models.NodeOutput{
WfCtx: newWfCtx,
Message: msg,
Stream: true,
StreamChan: newWfCtx.StreamChan,
}
}
}
// 如果事件被 drop返回 nil 或 Event 为 nil标记为终止
if newWfCtx == nil || newWfCtx.Event == nil {
nodeResult.Status = "terminated"
nodeResult.Message = msg
}
}
break
}
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
logger.Infof("workflow: executed node %s (type=%s) status=%s msg=%s duration=%dms",
node.Name, node.Type, nodeResult.Status, nodeResult.Message, nodeResult.DurationMs)
return nodeResult, nodeOutput
}
// shouldFollowBranch 判断是否应该走某个分支
func (e *WorkflowEngine) shouldFollowBranch(nodeID string, outputIndex int, branchResults map[string]*int) bool {
branchIndex, hasBranch := branchResults[nodeID]
if !hasBranch {
// 没有分支结果,说明不是分支节点,只走第一个输出
return outputIndex == 0
}
if branchIndex == nil {
// branchIndex 为 nil走默认分支通常是最后一个
return true
}
// 只走选中的分支
return outputIndex == *branchIndex
}
func (e *WorkflowEngine) saveExecutionRecord(pipeline *models.EventPipeline, wfCtx *models.WorkflowContext, result *models.WorkflowResult, triggerCtx *models.WorkflowTriggerContext, startTime int64, duration int64) {
executionID := triggerCtx.RequestID
if executionID == "" {
executionID = uuid.New().String()
}
execution := &models.EventPipelineExecution{
ID: executionID,
PipelineID: pipeline.ID,
PipelineName: pipeline.Name,
Mode: triggerCtx.Mode,
Status: result.Status,
ErrorMessage: result.Message,
ErrorNode: result.ErrorNode,
CreatedAt: startTime,
FinishedAt: time.Now().Unix(),
DurationMs: duration,
TriggerBy: triggerCtx.TriggerBy,
}
if wfCtx.Event != nil {
execution.EventID = wfCtx.Event.Id
}
if err := execution.SetNodeResults(result.NodeResults); err != nil {
logger.Errorf("workflow: failed to set node results: pipeline_id=%d, error=%v", pipeline.ID, err)
}
secretKeys := pipeline.GetSecretKeys()
sanitizedEnv := wfCtx.SanitizedEnv(secretKeys)
if err := execution.SetEnvSnapshot(sanitizedEnv); err != nil {
logger.Errorf("workflow: failed to set env snapshot: pipeline_id=%d, error=%v", pipeline.ID, err)
}
if err := models.CreateEventPipelineExecution(e.ctx, execution); err != nil {
logger.Errorf("workflow: failed to save execution record: pipeline_id=%d, error=%v", pipeline.ID, err)
}
}

View File

@@ -5,7 +5,6 @@ import (
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/callback"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/eventdrop"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/eventupdate"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/logic"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/relabel"
)

View File

@@ -55,24 +55,23 @@ func (c *AISummaryConfig) Init(settings interface{}) (models.Processor, error) {
return result, err
}
func (c *AISummaryConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
func (c *AISummaryConfig) Process(ctx *ctx.Context, event *models.AlertCurEvent) (*models.AlertCurEvent, string, error) {
if c.Client == nil {
if err := c.initHTTPClient(); err != nil {
return wfCtx, "", fmt.Errorf("failed to initialize HTTP client: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to initialize HTTP client: %v processor: %v", err, c)
}
}
// 准备告警事件信息
eventInfo, err := c.prepareEventInfo(wfCtx)
eventInfo, err := c.prepareEventInfo(event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to prepare event info: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to prepare event info: %v processor: %v", err, c)
}
// 调用AI模型生成总结
summary, err := c.generateAISummary(eventInfo)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to generate AI summary: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to generate AI summary: %v processor: %v", err, c)
}
// 将总结添加到annotations字段
@@ -84,11 +83,11 @@ func (c *AISummaryConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContex
// 更新Annotations字段
b, err := json.Marshal(event.AnnotationsJSON)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal annotations: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to marshal annotations: %v processor: %v", err, c)
}
event.Annotations = string(b)
return wfCtx, "", nil
return event, "", nil
}
func (c *AISummaryConfig) initHTTPClient() error {
@@ -111,10 +110,9 @@ func (c *AISummaryConfig) initHTTPClient() error {
return nil
}
func (c *AISummaryConfig) prepareEventInfo(wfCtx *models.WorkflowContext) (string, error) {
func (c *AISummaryConfig) prepareEventInfo(event *models.AlertCurEvent) (string, error) {
var defs = []string{
"{{$event := .Event}}",
"{{$env := .Env}}",
"{{$event := .}}",
}
text := strings.Join(append(defs, c.PromptTemplate), "")
@@ -124,7 +122,7 @@ func (c *AISummaryConfig) prepareEventInfo(wfCtx *models.WorkflowContext) (strin
}
var body bytes.Buffer
err = t.Execute(&body, wfCtx)
err = t.Execute(&body, event)
if err != nil {
return "", fmt.Errorf("failed to execute prompt template: %v", err)
}

View File

@@ -42,14 +42,8 @@ func TestAISummaryConfig_Process(t *testing.T) {
},
}
// 创建 WorkflowContext
wfCtx := &models.WorkflowContext{
Event: event,
Env: map[string]string{},
}
// 测试模板处理
eventInfo, err := config.prepareEventInfo(wfCtx)
eventInfo, err := config.prepareEventInfo(event)
assert.NoError(t, err)
assert.Contains(t, eventInfo, "Test Rule")
assert.Contains(t, eventInfo, "1")
@@ -60,18 +54,18 @@ func TestAISummaryConfig_Process(t *testing.T) {
assert.NotNil(t, processor)
// 测试处理函数
result, _, err := processor.Process(&ctx.Context{}, wfCtx)
result, _, err := processor.Process(&ctx.Context{}, event)
assert.NoError(t, err)
assert.NotNil(t, result)
assert.NotEmpty(t, result.Event.AnnotationsJSON["ai_summary"])
assert.NotEmpty(t, result.AnnotationsJSON["ai_summary"])
// 展示处理结果
t.Log("\n=== 处理结果 ===")
t.Logf("告警规则: %s", result.Event.RuleName)
t.Logf("严重程度: %d", result.Event.Severity)
t.Logf("标签: %v", result.Event.TagsMap)
t.Logf("原始注释: %v", result.Event.AnnotationsJSON["description"])
t.Logf("AI总结: %s", result.Event.AnnotationsJSON["ai_summary"])
t.Logf("告警规则: %s", result.RuleName)
t.Logf("严重程度: %d", result.Severity)
t.Logf("标签: %v", result.TagsMap)
t.Logf("原始注释: %v", result.AnnotationsJSON["description"])
t.Logf("AI总结: %s", result.AnnotationsJSON["ai_summary"])
}
func TestConvertCustomParam(t *testing.T) {

View File

@@ -43,8 +43,7 @@ func (c *CallbackConfig) Init(settings interface{}) (models.Processor, error) {
return result, err
}
func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
func (c *CallbackConfig) Process(ctx *ctx.Context, event *models.AlertCurEvent) (*models.AlertCurEvent, string, error) {
if c.Client == nil {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLVerify},
@@ -53,7 +52,7 @@ func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext
if c.Proxy != "" {
proxyURL, err := url.Parse(c.Proxy)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
@@ -73,12 +72,12 @@ func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext
body, err := json.Marshal(event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
}
req, err := http.NewRequest("POST", c.URL, strings.NewReader(string(body)))
if err != nil {
return wfCtx, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
}
for k, v := range headers {
@@ -91,14 +90,14 @@ func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext
resp, err := c.Client.Do(req)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to read response body: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to read response body: %v processor: %v", err, c)
}
logger.Debugf("callback processor response body: %s", string(b))
return wfCtx, "callback success", nil
return event, "callback success", nil
}

View File

@@ -26,29 +26,27 @@ func (c *EventDropConfig) Init(settings interface{}) (models.Processor, error) {
return result, err
}
func (c *EventDropConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
func (c *EventDropConfig) Process(ctx *ctx.Context, event *models.AlertCurEvent) (*models.AlertCurEvent, string, error) {
// 使用背景是可以根据此处理器,实现对事件进行更加灵活的过滤的逻辑
// 在标签过滤和属性过滤都不满足需求时可以使用
// 如果模板执行结果为 true则删除该事件
event := wfCtx.Event
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $env := .Env }}",
"{{ $event := . }}",
"{{ $labels := .TagsMap }}",
"{{ $value := .TriggerValue }}",
}
text := strings.Join(append(defs, c.Content), "")
tpl, err := texttemplate.New("eventdrop").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return wfCtx, "", fmt.Errorf("processor failed to parse template: %v processor: %v", err, c)
return event, "", fmt.Errorf("processor failed to parse template: %v processor: %v", err, c)
}
var body bytes.Buffer
if err = tpl.Execute(&body, wfCtx); err != nil {
return wfCtx, "", fmt.Errorf("processor failed to execute template: %v processor: %v", err, c)
if err = tpl.Execute(&body, event); err != nil {
return event, "", fmt.Errorf("processor failed to execute template: %v processor: %v", err, c)
}
result := strings.TrimSpace(body.String())
@@ -58,5 +56,5 @@ func (c *EventDropConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContex
return nil, "drop event success", nil
}
return wfCtx, "drop event failed", nil
return event, "drop event failed", nil
}

View File

@@ -31,8 +31,7 @@ func (c *EventUpdateConfig) Init(settings interface{}) (models.Processor, error)
return result, err
}
func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
func (c *EventUpdateConfig) Process(ctx *ctx.Context, event *models.AlertCurEvent) (*models.AlertCurEvent, string, error) {
if c.Client == nil {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLVerify},
@@ -41,7 +40,7 @@ func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowCont
if c.Proxy != "" {
proxyURL, err := url.Parse(c.Proxy)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
@@ -61,12 +60,12 @@ func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowCont
body, err := json.Marshal(event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
}
req, err := http.NewRequest("POST", c.URL, strings.NewReader(string(body)))
if err != nil {
return wfCtx, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
}
for k, v := range headers {
@@ -79,7 +78,7 @@ func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowCont
resp, err := c.Client.Do(req)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
}
b, err := io.ReadAll(resp.Body)
@@ -90,8 +89,8 @@ func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowCont
err = json.Unmarshal(b, &event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to unmarshal response body: %v processor: %v", err, c)
return event, "", fmt.Errorf("failed to unmarshal response body: %v processor: %v", err, c)
}
return wfCtx, "", nil
return event, "", nil
}

View File

@@ -1,197 +0,0 @@
package logic
import (
"bytes"
"fmt"
"strings"
"text/template"
alertCommon "github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
// 判断模式常量
const (
ConditionModeExpression = "expression" // 表达式模式(默认)
ConditionModeTags = "tags" // 标签/属性模式
)
// IfConfig If 条件处理器配置
type IfConfig struct {
// 判断模式expression表达式或 tags标签/属性)
Mode string `json:"mode,omitempty"`
// 表达式模式配置
// 条件表达式(支持 Go 模板语法)
// 例如:{{ if eq .Severity 1 }}true{{ end }}
Condition string `json:"condition,omitempty"`
// 标签/属性模式配置
LabelKeys []models.TagFilter `json:"label_keys,omitempty"` // 适用标签
Attributes []models.TagFilter `json:"attributes,omitempty"` // 适用属性
// 内部使用,解析后的过滤器
parsedLabelKeys []models.TagFilter `json:"-"`
parsedAttributes []models.TagFilter `json:"-"`
}
func init() {
models.RegisterProcessor("logic.if", &IfConfig{})
}
func (c *IfConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*IfConfig](settings)
if err != nil {
return nil, err
}
// 解析标签过滤器
if len(result.LabelKeys) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(result.LabelKeys))
copy(labelKeysCopy, result.LabelKeys)
for i := range labelKeysCopy {
if labelKeysCopy[i].Func == "" {
labelKeysCopy[i].Func = labelKeysCopy[i].Op
}
}
result.parsedLabelKeys, err = models.ParseTagFilter(labelKeysCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse label_keys: %v", err)
}
}
// 解析属性过滤器
if len(result.Attributes) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(result.Attributes))
copy(attributesCopy, result.Attributes)
for i := range attributesCopy {
if attributesCopy[i].Func == "" {
attributesCopy[i].Func = attributesCopy[i].Op
}
}
result.parsedAttributes, err = models.ParseTagFilter(attributesCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse attributes: %v", err)
}
}
return result, nil
}
// Process 实现 Processor 接口(兼容旧模式)
func (c *IfConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
result, err := c.evaluateCondition(wfCtx)
if err != nil {
return wfCtx, "", fmt.Errorf("if processor: failed to evaluate condition: %v", err)
}
if result {
return wfCtx, "condition matched (true branch)", nil
}
return wfCtx, "condition not matched (false branch)", nil
}
// ProcessWithBranch 实现 BranchProcessor 接口
func (c *IfConfig) ProcessWithBranch(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.NodeOutput, error) {
result, err := c.evaluateCondition(wfCtx)
if err != nil {
return nil, fmt.Errorf("if processor: failed to evaluate condition: %v", err)
}
output := &models.NodeOutput{
WfCtx: wfCtx,
}
if result {
// 条件为 true走输出 0true 分支)
branchIndex := 0
output.BranchIndex = &branchIndex
output.Message = "condition matched (true branch)"
} else {
// 条件为 false走输出 1false 分支)
branchIndex := 1
output.BranchIndex = &branchIndex
output.Message = "condition not matched (false branch)"
}
return output, nil
}
// evaluateCondition 评估条件
func (c *IfConfig) evaluateCondition(wfCtx *models.WorkflowContext) (bool, error) {
mode := c.Mode
if mode == "" {
mode = ConditionModeExpression // 默认表达式模式
}
switch mode {
case ConditionModeTags:
return c.evaluateTagsCondition(wfCtx.Event)
default:
return c.evaluateExpressionCondition(wfCtx)
}
}
// evaluateExpressionCondition 评估表达式条件
func (c *IfConfig) evaluateExpressionCondition(wfCtx *models.WorkflowContext) (bool, error) {
if c.Condition == "" {
return true, nil
}
// 构建模板数据
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $env := .Env }}",
}
text := strings.Join(append(defs, c.Condition), "")
tpl, err := template.New("if_condition").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return false, err
}
var buf bytes.Buffer
if err = tpl.Execute(&buf, wfCtx); err != nil {
return false, err
}
result := strings.TrimSpace(strings.ToLower(buf.String()))
return result == "true" || result == "1", nil
}
// evaluateTagsCondition 评估标签/属性条件
func (c *IfConfig) evaluateTagsCondition(event *models.AlertCurEvent) (bool, error) {
// 如果没有配置任何过滤条件,默认返回 true
if len(c.parsedLabelKeys) == 0 && len(c.parsedAttributes) == 0 {
return true, nil
}
// 匹配标签 (TagsMap)
if len(c.parsedLabelKeys) > 0 {
tagsMap := event.TagsMap
if tagsMap == nil {
tagsMap = make(map[string]string)
}
if !alertCommon.MatchTags(tagsMap, c.parsedLabelKeys) {
return false, nil
}
}
// 匹配属性 (JsonTagsAndValue - 所有 JSON 字段)
if len(c.parsedAttributes) > 0 {
attributesMap := event.JsonTagsAndValue()
if !alertCommon.MatchTags(attributesMap, c.parsedAttributes) {
return false, nil
}
}
return true, nil
}

View File

@@ -1,224 +0,0 @@
package logic
import (
"bytes"
"fmt"
"strings"
"text/template"
alertCommon "github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
// SwitchCase Switch 分支定义
type SwitchCase struct {
// 判断模式expression表达式或 tags标签/属性)
Mode string `json:"mode,omitempty"`
// 表达式模式配置
// 条件表达式(支持 Go 模板语法)
Condition string `json:"condition,omitempty"`
// 标签/属性模式配置
LabelKeys []models.TagFilter `json:"label_keys,omitempty"` // 适用标签
Attributes []models.TagFilter `json:"attributes,omitempty"` // 适用属性
// 分支名称(可选,用于日志)
Name string `json:"name,omitempty"`
// 内部使用,解析后的过滤器
parsedLabelKeys []models.TagFilter `json:"-"`
parsedAttributes []models.TagFilter `json:"-"`
}
// SwitchConfig Switch 多分支处理器配置
type SwitchConfig struct {
// 分支条件列表
// 按顺序匹配,第一个为 true 的分支将被选中
Cases []SwitchCase `json:"cases"`
// 是否允许多个分支同时匹配(默认 false只走第一个匹配的
AllowMultiple bool `json:"allow_multiple,omitempty"`
}
func init() {
models.RegisterProcessor("logic.switch", &SwitchConfig{})
}
func (c *SwitchConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*SwitchConfig](settings)
if err != nil {
return nil, err
}
// 解析每个 case 的标签和属性过滤器
for i := range result.Cases {
if len(result.Cases[i].LabelKeys) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(result.Cases[i].LabelKeys))
copy(labelKeysCopy, result.Cases[i].LabelKeys)
for j := range labelKeysCopy {
if labelKeysCopy[j].Func == "" {
labelKeysCopy[j].Func = labelKeysCopy[j].Op
}
}
result.Cases[i].parsedLabelKeys, err = models.ParseTagFilter(labelKeysCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse label_keys for case[%d]: %v", i, err)
}
}
if len(result.Cases[i].Attributes) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(result.Cases[i].Attributes))
copy(attributesCopy, result.Cases[i].Attributes)
for j := range attributesCopy {
if attributesCopy[j].Func == "" {
attributesCopy[j].Func = attributesCopy[j].Op
}
}
result.Cases[i].parsedAttributes, err = models.ParseTagFilter(attributesCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse attributes for case[%d]: %v", i, err)
}
}
}
return result, nil
}
// Process 实现 Processor 接口(兼容旧模式)
func (c *SwitchConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
index, caseName, err := c.evaluateCases(wfCtx)
if err != nil {
return wfCtx, "", fmt.Errorf("switch processor: failed to evaluate cases: %v", err)
}
if index >= 0 {
if caseName != "" {
return wfCtx, fmt.Sprintf("matched case[%d]: %s", index, caseName), nil
}
return wfCtx, fmt.Sprintf("matched case[%d]", index), nil
}
// 走默认分支(最后一个输出)
return wfCtx, "no case matched, using default branch", nil
}
// ProcessWithBranch 实现 BranchProcessor 接口
func (c *SwitchConfig) ProcessWithBranch(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.NodeOutput, error) {
index, caseName, err := c.evaluateCases(wfCtx)
if err != nil {
return nil, fmt.Errorf("switch processor: failed to evaluate cases: %v", err)
}
output := &models.NodeOutput{
WfCtx: wfCtx,
}
if index >= 0 {
output.BranchIndex = &index
if caseName != "" {
output.Message = fmt.Sprintf("matched case[%d]: %s", index, caseName)
} else {
output.Message = fmt.Sprintf("matched case[%d]", index)
}
} else {
// 默认分支的索引是 cases 数量(即最后一个输出端口)
defaultIndex := len(c.Cases)
output.BranchIndex = &defaultIndex
output.Message = "no case matched, using default branch"
}
return output, nil
}
// evaluateCases 评估所有分支条件
// 返回匹配的分支索引和分支名称,如果没有匹配返回 -1
func (c *SwitchConfig) evaluateCases(wfCtx *models.WorkflowContext) (int, string, error) {
for i := range c.Cases {
matched, err := c.evaluateCaseCondition(&c.Cases[i], wfCtx)
if err != nil {
return -1, "", fmt.Errorf("case[%d] evaluation error: %v", i, err)
}
if matched {
return i, c.Cases[i].Name, nil
}
}
return -1, "", nil
}
// evaluateCaseCondition 评估单个分支条件
func (c *SwitchConfig) evaluateCaseCondition(caseItem *SwitchCase, wfCtx *models.WorkflowContext) (bool, error) {
mode := caseItem.Mode
if mode == "" {
mode = ConditionModeExpression // 默认表达式模式
}
switch mode {
case ConditionModeTags:
return c.evaluateTagsCondition(caseItem, wfCtx.Event)
default:
return c.evaluateExpressionCondition(caseItem.Condition, wfCtx)
}
}
// evaluateExpressionCondition 评估表达式条件
func (c *SwitchConfig) evaluateExpressionCondition(condition string, wfCtx *models.WorkflowContext) (bool, error) {
if condition == "" {
return false, nil
}
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $env := .Env }}",
}
text := strings.Join(append(defs, condition), "")
tpl, err := template.New("switch_condition").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return false, err
}
var buf bytes.Buffer
if err = tpl.Execute(&buf, wfCtx); err != nil {
return false, err
}
result := strings.TrimSpace(strings.ToLower(buf.String()))
return result == "true" || result == "1", nil
}
// evaluateTagsCondition 评估标签/属性条件
func (c *SwitchConfig) evaluateTagsCondition(caseItem *SwitchCase, event *models.AlertCurEvent) (bool, error) {
// 如果没有配置任何过滤条件,默认返回 false不匹配
if len(caseItem.parsedLabelKeys) == 0 && len(caseItem.parsedAttributes) == 0 {
return false, nil
}
// 匹配标签 (TagsMap)
if len(caseItem.parsedLabelKeys) > 0 {
tagsMap := event.TagsMap
if tagsMap == nil {
tagsMap = make(map[string]string)
}
if !alertCommon.MatchTags(tagsMap, caseItem.parsedLabelKeys) {
return false, nil
}
}
// 匹配属性 (JsonTagsAndValue - 所有 JSON 字段)
if len(caseItem.parsedAttributes) > 0 {
attributesMap := event.JsonTagsAndValue()
if !alertCommon.MatchTags(attributesMap, caseItem.parsedAttributes) {
return false, nil
}
}
return true, nil
}

View File

@@ -42,7 +42,7 @@ func (r *RelabelConfig) Init(settings interface{}) (models.Processor, error) {
return result, err
}
func (r *RelabelConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
func (r *RelabelConfig) Process(ctx *ctx.Context, event *models.AlertCurEvent) (*models.AlertCurEvent, string, error) {
sourceLabels := make([]model.LabelName, len(r.SourceLabels))
for i := range r.SourceLabels {
sourceLabels[i] = model.LabelName(strings.ReplaceAll(r.SourceLabels[i], ".", REPLACE_DOT))
@@ -63,8 +63,8 @@ func (r *RelabelConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext)
},
}
EventRelabel(wfCtx.Event, relabelConfigs)
return wfCtx, "", nil
EventRelabel(event, relabelConfigs)
return event, "", nil
}
func EventRelabel(event *models.AlertCurEvent, relabelConfigs []*pconf.RelabelConfig) {

View File

@@ -135,7 +135,9 @@ func (c *DefaultCallBacker) CallBack(ctx CallBackContext) {
func doSendAndRecord(ctx *ctx.Context, url, token string, body interface{}, channel string,
stats *astats.Stats, events []*models.AlertCurEvent) {
start := time.Now()
res, err := doSend(url, body, channel, stats)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
NotifyRecord(ctx, events, 0, channel, token, res, err)
}
@@ -169,11 +171,11 @@ func doSend(url string, body interface{}, channel string, stats *astats.Stats) (
start := time.Now()
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
res = []byte(fmt.Sprintf("duration: %d ms status_code:%d, response:%s", time.Since(start).Milliseconds(), code, string(res)))
res = []byte(fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res))
if err != nil {
logger.Errorf("%s_sender: result=fail url=%s code=%d error=%v req:%v response=%s", channel, url, code, err, body, string(res))
stats.AlertNotifyErrorTotal.WithLabelValues(channel).Inc()
return string(res), err
return "", err
}
logger.Infof("%s_sender: result=succ url=%s code=%d req:%v response=%s", channel, url, code, body, string(res))

View File

@@ -86,33 +86,30 @@ func (c *IbexCallBacker) handleIbex(ctx *ctx.Context, url string, event *models.
return
}
CallIbex(ctx, id, host, c.taskTplCache, c.targetCache, c.userCache, event, "")
CallIbex(ctx, id, host, c.taskTplCache, c.targetCache, c.userCache, event)
}
func CallIbex(ctx *ctx.Context, id int64, host string,
taskTplCache *memsto.TaskTplCache, targetCache *memsto.TargetCacheType,
userCache *memsto.UserCacheType, event *models.AlertCurEvent, args string) (int64, error) {
logger.Infof("event_callback_ibex: id: %d, host: %s, args: %s, event: %+v", id, host, args, event)
userCache *memsto.UserCacheType, event *models.AlertCurEvent) {
logger.Infof("event_callback_ibex: id: %d, host: %s, event: %+v", id, host, event)
tpl := taskTplCache.Get(id)
if tpl == nil {
err := fmt.Errorf("event_callback_ibex: no such tpl(%d), event: %+v", id, event)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: no such tpl(%d), event: %+v", id, event)
return
}
// check perm
// tpl.GroupId - host - account 三元组校验权限
can, err := CanDoIbex(tpl.UpdateBy, tpl, host, targetCache, userCache)
can, err := canDoIbex(tpl.UpdateBy, tpl, host, targetCache, userCache)
if err != nil {
err = fmt.Errorf("event_callback_ibex: check perm fail: %v, event: %+v", err, event)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: check perm fail: %v, event: %+v", err, event)
return
}
if !can {
err = fmt.Errorf("event_callback_ibex: user(%s) no permission, event: %+v", tpl.UpdateBy, event)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: user(%s) no permission, event: %+v", tpl.UpdateBy, event)
return
}
tagsMap := make(map[string]string)
@@ -136,16 +133,11 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
tags, err := json.Marshal(tagsMap)
if err != nil {
err = fmt.Errorf("event_callback_ibex: failed to marshal tags to json: %v, event: %+v", tagsMap, event)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: failed to marshal tags to json: %v, event: %+v", tagsMap, event)
return
}
// call ibex
taskArgs := tpl.Args
if args != "" {
taskArgs = args
}
in := models.TaskForm{
Title: tpl.Title + " FH: " + host,
Account: tpl.Account,
@@ -154,7 +146,7 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
Timeout: tpl.Timeout,
Pause: tpl.Pause,
Script: tpl.Script,
Args: taskArgs,
Args: tpl.Args,
Stdin: string(tags),
Action: "start",
Creator: tpl.UpdateBy,
@@ -164,9 +156,8 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
id, err = TaskAdd(in, tpl.UpdateBy, ctx.IsCenter)
if err != nil {
err = fmt.Errorf("event_callback_ibex: call ibex fail: %v, event: %+v", err, event)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: call ibex fail: %v, event: %+v", err, event)
return
}
// write db
@@ -187,14 +178,11 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
}
if err = record.Add(ctx); err != nil {
err = fmt.Errorf("event_callback_ibex: persist task_record fail: %v, event: %+v", err, event)
logger.Errorf("%s", err)
return id, err
logger.Errorf("event_callback_ibex: persist task_record fail: %v, event: %+v", err, event)
}
return id, nil
}
func CanDoIbex(username string, tpl *models.TaskTpl, host string, targetCache *memsto.TargetCacheType, userCache *memsto.UserCacheType) (bool, error) {
func canDoIbex(username string, tpl *models.TaskTpl, host string, targetCache *memsto.TargetCacheType, userCache *memsto.UserCacheType) (bool, error) {
user := userCache.GetByUsername(username)
if user != nil && user.IsAdmin() {
return true, nil

View File

@@ -89,7 +89,7 @@ func alertingCallScript(ctx *ctx.Context, stdinBytes []byte, notifyScript models
err, isTimeout := sys.WrapTimeout(cmd, time.Duration(config.Timeout)*time.Second)
res := buf.String()
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
// 截断超出长度的输出
if len(res) > 512 {

View File

@@ -119,11 +119,11 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
if resp.StatusCode == 429 {
logger.Errorf("event_%s_fail, url: %s, response code: %d, body: %s event:%s", channel, conf.Url, resp.StatusCode, string(body), string(bs))
return true, fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), fmt.Errorf("status code is 429")
return true, string(body), fmt.Errorf("status code is 429")
}
logger.Debugf("event_%s_succ, url: %s, response code: %d, body: %s event:%s", channel, conf.Url, resp.StatusCode, string(body), string(bs))
return false, fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), nil
return false, string(body), nil
}
func SingleSendWebhooks(ctx *ctx.Context, webhooks map[string]*models.Webhook, event *models.AlertCurEvent, stats *astats.Stats) {
@@ -132,7 +132,7 @@ func SingleSendWebhooks(ctx *ctx.Context, webhooks map[string]*models.Webhook, e
for retryCount < 3 {
start := time.Now()
needRetry, res, err := sendWebhook(conf, event, stats)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
NotifyRecord(ctx, []*models.AlertCurEvent{event}, 0, "webhook", conf.Url, res, err)
if !needRetry {
break
@@ -204,7 +204,7 @@ func StartConsumer(ctx *ctx.Context, queue *WebhookQueue, popSize int, webhook *
for retryCount < webhook.RetryCount {
start := time.Now()
needRetry, res, err := sendWebhook(webhook, events, stats)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
go NotifyRecord(ctx, events, 0, "webhook", webhook.Url, res, err)
if !needRetry {
break

View File

@@ -55,10 +55,4 @@ var Plugins = []Plugin{
Type: "opensearch",
TypeName: "OpenSearch",
},
{
Id: 10,
Category: "logging",
Type: "victorialogs",
TypeName: "VictoriaLogs",
},
}

View File

@@ -251,10 +251,12 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/auth/redirect/cas", rt.loginRedirectCas)
pages.GET("/auth/redirect/oauth", rt.loginRedirectOAuth)
pages.GET("/auth/redirect/dingtalk", rt.loginRedirectDingTalk)
pages.GET("/auth/redirect/feishu", rt.loginRedirectFeiShu)
pages.GET("/auth/callback", rt.loginCallback)
pages.GET("/auth/callback/cas", rt.loginCallbackCas)
pages.GET("/auth/callback/oauth", rt.loginCallbackOAuth)
pages.GET("/auth/callback/dingtalk", rt.loginCallbackDingTalk)
pages.GET("/auth/callback/feishu", rt.loginCallbackFeiShu)
pages.GET("/auth/perms", rt.allPerms)
pages.GET("/metrics/desc", rt.metricsDescGetFile)
@@ -558,19 +560,6 @@ func (rt *Router) Config(r *gin.Engine) {
pages.POST("/event-pipeline-tryrun", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.tryRunEventPipeline)
pages.POST("/event-processor-tryrun", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.tryRunEventProcessor)
// API 触发工作流
pages.POST("/event-pipeline/:id/trigger", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.triggerEventPipelineByAPI)
// SSE 流式执行工作流
pages.POST("/event-pipeline/:id/stream", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.streamEventPipeline)
// 事件Pipeline执行记录路由
pages.GET("/event-pipeline-executions", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.listAllEventPipelineExecutions)
pages.GET("/event-pipeline/:id/executions", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.listEventPipelineExecutions)
pages.GET("/event-pipeline/:id/execution/:exec_id", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecution)
pages.GET("/event-pipeline-execution/:exec_id", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecution)
pages.GET("/event-pipeline/:id/execution-stats", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecutionStats)
pages.POST("/event-pipeline-executions/clean", rt.auth(), rt.user(), rt.admin(), rt.cleanEventPipelineExecutions)
pages.POST("/notify-channel-configs", rt.auth(), rt.user(), rt.perm("/notification-channels/add"), rt.notifyChannelsAdd)
pages.DELETE("/notify-channel-configs", rt.auth(), rt.user(), rt.perm("/notification-channels/del"), rt.notifyChannelsDel)
pages.PUT("/notify-channel-config/:id", rt.auth(), rt.user(), rt.perm("/notification-channels/put"), rt.notifyChannelPut)
@@ -703,8 +692,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.GET("/message-templates", rt.messageTemplateGets)
service.GET("/event-pipelines", rt.eventPipelinesListByService)
service.POST("/event-pipeline/:id/trigger", rt.triggerEventPipelineByService)
service.POST("/event-pipeline/:id/stream", rt.streamEventPipelineByService)
// 手机号加密存储配置接口
service.POST("/users/phone/encrypt", rt.usersPhoneEncrypt)

View File

@@ -10,7 +10,6 @@ import (
"net/http"
"net/url"
"strings"
"time"
"github.com/ccfos/nightingale/v6/datasource/opensearch"
"github.com/ccfos/nightingale/v6/dskit/clickhouse"
@@ -230,37 +229,6 @@ func (rt *Router) datasourceUpsert(c *gin.Context) {
}
}
if req.PluginType == models.ELASTICSEARCH {
skipAuto := false
// 若用户输入了versionversion字符串存在且不为空则不自动获取
if req.SettingsJson != nil {
if v, ok := req.SettingsJson["version"]; ok {
switch vv := v.(type) {
case string:
if strings.TrimSpace(vv) != "" {
skipAuto = true
}
default:
if strings.TrimSpace(fmt.Sprint(vv)) != "" {
skipAuto = true
}
}
}
}
if !skipAuto {
version, err := getElasticsearchVersion(req, 10*time.Second)
if err != nil {
logger.Warningf("failed to get elasticsearch version: %v", err)
} else {
if req.SettingsJson == nil {
req.SettingsJson = make(map[string]interface{})
}
req.SettingsJson["version"] = version
}
}
}
if req.Id == 0 {
req.CreatedBy = username
req.Status = "enabled"
@@ -293,15 +261,11 @@ func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
}
}
// 使用 TLS 配置(支持 mTLS
tlsConfig, err := ds.HTTPJson.TLS.TLSConfig()
if err != nil {
return fmt.Errorf("failed to create TLS config: %v", err)
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: ds.HTTPJson.TLS.SkipTlsVerify,
},
},
}
@@ -459,82 +423,3 @@ func (rt *Router) datasourceQuery(c *gin.Context) {
}
ginx.NewRender(c).Data(req, err)
}
// getElasticsearchVersion 该函数尝试从提供的Elasticsearch数据源中获取版本号遍历所有URL
// 直到成功获取版本号或所有URL均尝试失败为止。
func getElasticsearchVersion(ds models.Datasource, timeout time.Duration) (string, error) {
client := &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: ds.HTTPJson.TLS.SkipTlsVerify,
},
},
}
urls := make([]string, 0)
if len(ds.HTTPJson.Urls) > 0 {
urls = append(urls, ds.HTTPJson.Urls...)
}
if ds.HTTPJson.Url != "" {
urls = append(urls, ds.HTTPJson.Url)
}
if len(urls) == 0 {
return "", fmt.Errorf("no url provided")
}
var lastErr error
for _, raw := range urls {
baseURL := strings.TrimRight(raw, "/") + "/"
req, err := http.NewRequest("GET", baseURL, nil)
if err != nil {
lastErr = err
continue
}
if ds.AuthJson.BasicAuthUser != "" {
req.SetBasicAuth(ds.AuthJson.BasicAuthUser, ds.AuthJson.BasicAuthPassword)
}
for k, v := range ds.HTTPJson.Headers {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
lastErr = err
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastErr = err
continue
}
if resp.StatusCode != 200 {
lastErr = fmt.Errorf("request to %s failed with status: %d body:%s", baseURL, resp.StatusCode, string(body))
continue
}
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
lastErr = err
continue
}
if version, ok := result["version"].(map[string]interface{}); ok {
if number, ok := version["number"].(string); ok && number != "" {
return number, nil
}
}
lastErr = fmt.Errorf("version not found in response from %s", baseURL)
}
if lastErr != nil {
return "", lastErr
}
return "", fmt.Errorf("failed to get elasticsearch version")
}

View File

@@ -1,19 +1,14 @@
package router
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/alert/pipeline/engine"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/logger"
)
// 获取事件Pipeline列表
@@ -32,8 +27,6 @@ func (rt *Router) eventPipelinesList(c *gin.Context) {
for _, tid := range pipeline.TeamIds {
pipeline.TeamNames = append(pipeline.TeamNames, ugMap[tid])
}
// 兼容处理:自动填充工作流字段
pipeline.FillWorkflowFields()
}
gids, err := models.MyGroupIdsMap(rt.Ctx, me.Id)
@@ -68,9 +61,6 @@ func (rt *Router) getEventPipeline(c *gin.Context) {
err = pipeline.FillTeamNames(rt.Ctx)
ginx.Dangerous(err)
// 兼容处理:自动填充工作流字段
pipeline.FillWorkflowFields()
ginx.NewRender(c).Data(pipeline, nil)
}
@@ -141,9 +131,7 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
var f struct {
EventId int64 `json:"event_id"`
PipelineConfig models.EventPipeline `json:"pipeline_config"`
EnvVariables map[string]string `json:"env_variables,omitempty"`
}
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
@@ -153,33 +141,30 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
event := hisEvent.ToCur()
lang := c.GetHeader("X-Language")
me := c.MustGet("user").(*models.User)
var result string
for _, p := range f.PipelineConfig.ProcessorConfigs {
processor, err := models.GetProcessorByType(p.Typ, p.Config)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "get processor: %+v err: %+v", p, err)
}
event, result, err = processor.Process(rt.Ctx, event)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "processor: %+v err: %+v", p, err)
}
// 统一使用工作流引擎执行(兼容线性模式和工作流模式)
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: me.Username,
EnvOverrides: f.EnvVariables,
}
resultEvent, result, err := workflowEngine.Execute(&f.PipelineConfig, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "pipeline execute error: %v", err)
if event == nil {
ginx.NewRender(c).Data(map[string]interface{}{
"event": event,
"result": i18n.Sprintf(lang, "event is dropped"),
}, nil)
return
}
}
m := map[string]interface{}{
"event": resultEvent,
"result": i18n.Sprintf(lang, result.Message),
"status": result.Status,
"node_results": result.NodeResults,
"event": event,
"result": i18n.Sprintf(lang, result),
}
if resultEvent == nil {
m["result"] = i18n.Sprintf(lang, "event is dropped")
}
ginx.NewRender(c).Data(m, nil)
}
@@ -201,18 +186,14 @@ func (rt *Router) tryRunEventProcessor(c *gin.Context) {
if err != nil {
ginx.Bomb(200, "get processor err: %+v", err)
}
wfCtx := &models.WorkflowContext{
Event: event,
Vars: make(map[string]interface{}),
}
wfCtx, res, err := processor.Process(rt.Ctx, wfCtx)
event, res, err := processor.Process(rt.Ctx, event)
if err != nil {
ginx.Bomb(200, "processor err: %+v", err)
}
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": wfCtx.Event,
"event": event,
"result": i18n.Sprintf(lang, res),
}, nil)
}
@@ -242,10 +223,6 @@ func (rt *Router) tryRunEventProcessorByNotifyRule(c *gin.Context) {
ginx.Bomb(http.StatusBadRequest, "processors not found")
}
wfCtx := &models.WorkflowContext{
Event: event,
Vars: make(map[string]interface{}),
}
for _, pl := range pipelines {
for _, p := range pl.ProcessorConfigs {
processor, err := models.GetProcessorByType(p.Typ, p.Config)
@@ -253,14 +230,14 @@ func (rt *Router) tryRunEventProcessorByNotifyRule(c *gin.Context) {
ginx.Bomb(http.StatusBadRequest, "get processor: %+v err: %+v", p, err)
}
wfCtx, _, err = processor.Process(rt.Ctx, wfCtx)
event, _, err := processor.Process(rt.Ctx, event)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "processor: %+v err: %+v", p, err)
}
if wfCtx == nil || wfCtx.Event == nil {
if event == nil {
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": nil,
"event": event,
"result": i18n.Sprintf(lang, "event is dropped"),
}, nil)
return
@@ -268,337 +245,10 @@ func (rt *Router) tryRunEventProcessorByNotifyRule(c *gin.Context) {
}
}
ginx.NewRender(c).Data(wfCtx.Event, nil)
ginx.NewRender(c).Data(event, nil)
}
func (rt *Router) eventPipelinesListByService(c *gin.Context) {
pipelines, err := models.ListEventPipelines(rt.Ctx)
ginx.NewRender(c).Data(pipelines, err)
}
type EventPipelineRequest struct {
// 事件数据(可选,如果不传则使用空事件)
Event *models.AlertCurEvent `json:"event,omitempty"`
// 环境变量覆盖
EnvOverrides map[string]string `json:"env_overrides,omitempty"`
Username string `json:"username,omitempty"`
}
// executePipelineTrigger 执行 Pipeline 触发的公共逻辑
func (rt *Router) executePipelineTrigger(pipeline *models.EventPipeline, req *EventPipelineRequest, triggerBy string) (string, error) {
// 准备事件数据
var event *models.AlertCurEvent
if req.Event != nil {
event = req.Event
} else {
// 创建空事件
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
// 校验必填环境变量
if err := pipeline.ValidateEnvVariables(req.EnvOverrides); err != nil {
return "", fmt.Errorf("env validation failed: %v", err)
}
// 生成执行ID
executionID := uuid.New().String()
// 创建触发上下文
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: triggerBy,
EnvOverrides: req.EnvOverrides,
RequestID: executionID,
}
// 异步执行工作流
go func() {
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, _, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
logger.Errorf("async workflow execute error: pipeline_id=%d execution_id=%s err=%v",
pipeline.ID, executionID, err)
}
}()
return executionID, nil
}
// triggerEventPipelineByService Service 调用触发工作流执行
func (rt *Router) triggerEventPipelineByService(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
// 获取 Pipeline
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
executionID, err := rt.executePipelineTrigger(pipeline, &f, f.Username)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "%v", err)
}
ginx.NewRender(c).Data(gin.H{
"execution_id": executionID,
"message": "workflow execution started",
}, nil)
}
// triggerEventPipelineByAPI API 触发工作流执行
func (rt *Router) triggerEventPipelineByAPI(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
// 获取 Pipeline
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
// 检查权限
me := c.MustGet("user").(*models.User)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
executionID, err := rt.executePipelineTrigger(pipeline, &f, me.Username)
if err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
}
ginx.NewRender(c).Data(gin.H{
"execution_id": executionID,
"message": "workflow execution started",
}, nil)
}
func (rt *Router) listAllEventPipelineExecutions(c *gin.Context) {
pipelineName := ginx.QueryStr(c, "pipeline_name", "")
mode := ginx.QueryStr(c, "mode", "")
status := ginx.QueryStr(c, "status", "")
limit := ginx.QueryInt(c, "limit", 20)
offset := ginx.QueryInt(c, "p", 1)
if limit <= 0 || limit > 1000 {
limit = 20
}
if offset <= 0 {
offset = 1
}
executions, total, err := models.ListAllEventPipelineExecutions(rt.Ctx, pipelineName, mode, status, limit, (offset-1)*limit)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": executions,
"total": total,
}, nil)
}
func (rt *Router) listEventPipelineExecutions(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
mode := ginx.QueryStr(c, "mode", "")
status := ginx.QueryStr(c, "status", "")
limit := ginx.QueryInt(c, "limit", 20)
offset := ginx.QueryInt(c, "p", 1)
if limit <= 0 || limit > 1000 {
limit = 20
}
if offset <= 0 {
offset = 1
}
executions, total, err := models.ListEventPipelineExecutions(rt.Ctx, pipelineID, mode, status, limit, (offset-1)*limit)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": executions,
"total": total,
}, nil)
}
func (rt *Router) getEventPipelineExecution(c *gin.Context) {
execID := ginx.UrlParamStr(c, "exec_id")
detail, err := models.GetEventPipelineExecutionDetail(rt.Ctx, execID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "execution not found: %v", err)
}
ginx.NewRender(c).Data(detail, nil)
}
func (rt *Router) getEventPipelineExecutionStats(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
stats, err := models.GetEventPipelineExecutionStatistics(rt.Ctx, pipelineID)
ginx.Dangerous(err)
ginx.NewRender(c).Data(stats, nil)
}
func (rt *Router) cleanEventPipelineExecutions(c *gin.Context) {
var f struct {
BeforeDays int `json:"before_days"`
}
ginx.BindJSON(c, &f)
if f.BeforeDays <= 0 {
f.BeforeDays = 30
}
beforeTime := time.Now().AddDate(0, 0, -f.BeforeDays).Unix()
affected, err := models.DeleteEventPipelineExecutions(rt.Ctx, beforeTime)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"deleted": affected,
}, nil)
}
func (rt *Router) streamEventPipeline(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
me := c.MustGet("user").(*models.User)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
var event *models.AlertCurEvent
if f.Event != nil {
event = f.Event
} else {
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: me.Username,
EnvOverrides: f.EnvOverrides,
RequestID: uuid.New().String(),
Stream: true, // 流式端点强制启用流式输出
}
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, result, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, "execute failed: %v", err)
}
if result.Stream && result.StreamChan != nil {
rt.handleStreamResponse(c, result, triggerCtx.RequestID)
return
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) handleStreamResponse(c *gin.Context, result *models.WorkflowResult, requestID string) {
// 设置 SSE 响应头
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no") // 禁用 nginx 缓冲
c.Header("X-Request-ID", requestID)
flusher, ok := c.Writer.(http.Flusher)
if !ok {
ginx.Bomb(http.StatusInternalServerError, "streaming not supported")
return
}
// 发送初始连接成功消息
initData := fmt.Sprintf(`{"type":"connected","request_id":"%s","timestamp":%d}`, requestID, time.Now().UnixMilli())
fmt.Fprintf(c.Writer, "data: %s\n\n", initData)
flusher.Flush()
// 从 channel 读取并发送 SSE
timeout := time.After(30 * time.Minute) // 最长流式输出时间
for {
select {
case chunk, ok := <-result.StreamChan:
if !ok {
// channel 关闭,发送结束标记
return
}
data, err := json.Marshal(chunk)
if err != nil {
logger.Errorf("stream: failed to marshal chunk: %v", err)
continue
}
fmt.Fprintf(c.Writer, "data: %s\n\n", data)
flusher.Flush()
if chunk.Done {
return
}
case <-c.Request.Context().Done():
// 客户端断开连接
logger.Infof("stream: client disconnected, request_id=%s", requestID)
return
case <-timeout:
logger.Errorf("stream: timeout, request_id=%s", requestID)
return
}
}
}
func (rt *Router) streamEventPipelineByService(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
var event *models.AlertCurEvent
if f.Event != nil {
event = f.Event
} else {
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: f.Username,
EnvOverrides: f.EnvOverrides,
RequestID: uuid.New().String(),
Stream: true, // 流式端点强制启用流式输出
}
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, result, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, "execute failed: %v", err)
}
// 检查是否是流式输出
if result.Stream && result.StreamChan != nil {
rt.handleStreamResponse(c, result, triggerCtx.RequestID)
return
}
ginx.NewRender(c).Data(result, nil)
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
"github.com/ccfos/nightingale/v6/pkg/feishu"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
@@ -519,6 +520,85 @@ func (rt *Router) loginCallbackDingTalk(c *gin.Context) {
}
func (rt *Router) loginRedirectFeiShu(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if rt.Sso.FeiShu == nil || !rt.Sso.FeiShu.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.FeiShu.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginCallbackFeiShu(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.FeiShu.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logger.Errorf("sso_callback FeiShu fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil && rt.Sso.FeiShu.FeiShuConfig.CoverAttributes {
updatedFields := user.UpdateSsoFields(feishu.SsoTypeName, ret.Nickname, ret.Phone, ret.Email)
ginx.Dangerous(user.Update(rt.Ctx, "update_at", updatedFields...))
}
} else {
user = new(models.User)
defaultRoles := []string{}
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil {
defaultRoles = rt.Sso.FeiShu.FeiShuConfig.DefaultRoles
}
user.FullSsoFields(feishu.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, defaultRoles)
// create user from feishu
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
func (rt *Router) loginCallbackOAuth(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
@@ -569,10 +649,11 @@ type SsoConfigOutput struct {
CasDisplayName string `json:"casDisplayName"`
OauthDisplayName string `json:"oauthDisplayName"`
DingTalkDisplayName string `json:"dingTalkDisplayName"`
FeiShuDisplayName string `json:"feishuDisplayName"`
}
func (rt *Router) ssoConfigNameGet(c *gin.Context) {
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName string
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName, feiShuDisplayName string
if rt.Sso.OIDC != nil {
oidcDisplayName = rt.Sso.OIDC.GetDisplayName()
}
@@ -589,11 +670,16 @@ func (rt *Router) ssoConfigNameGet(c *gin.Context) {
dingTalkDisplayName = rt.Sso.DingTalk.GetDisplayName()
}
if rt.Sso.FeiShu != nil {
feiShuDisplayName = rt.Sso.FeiShu.GetDisplayName()
}
ginx.NewRender(c).Data(SsoConfigOutput{
OidcDisplayName: oidcDisplayName,
CasDisplayName: casDisplayName,
OauthDisplayName: oauthDisplayName,
DingTalkDisplayName: dingTalkDisplayName,
FeiShuDisplayName: feiShuDisplayName,
}, nil)
}
@@ -608,6 +694,7 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
// TODO: dingTalkExist 为了兼容当前前端配置, 后期单点登陆统一调整后不在预先设置默认内容
dingTalkExist := false
feiShuExist := false
for _, config := range lst {
var ssoReqConfig models.SsoConfig
ssoReqConfig.Id = config.Id
@@ -618,6 +705,10 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
dingTalkExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
case feishu.SsoTypeName:
feiShuExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
default:
ssoReqConfig.Content = config.Content
}
@@ -630,6 +721,11 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
ssoConfig.Name = dingtalk.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
if !feiShuExist {
var ssoConfig models.SsoConfig
ssoConfig.Name = feishu.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
ginx.NewRender(c).Data(ssoConfigs, nil)
}
@@ -657,6 +753,23 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
case feishu.SsoTypeName:
f.Name = ssoConfig.Name
setting, err := json.Marshal(ssoConfig.SettingJson)
ginx.Dangerous(err)
f.Content = string(setting)
f.UpdateAt = time.Now().Unix()
sso, err := f.Query(rt.Ctx)
if !errors.Is(err, gorm.ErrRecordNotFound) {
ginx.Dangerous(err)
}
if errors.Is(err, gorm.ErrRecordNotFound) {
err = f.Create(rt.Ctx)
} else {
f.Id = sso.Id
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
default:
f.Id = ssoConfig.Id
f.Name = ssoConfig.Name
@@ -695,6 +808,14 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
rt.Sso.DingTalk = dingtalk.New(config)
}
rt.Sso.DingTalk.Reload(config)
case feishu.SsoTypeName:
var config feishu.Config
err := json.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
if rt.Sso.FeiShu == nil {
rt.Sso.FeiShu = feishu.New(config)
}
rt.Sso.FeiShu.Reload(config)
}
ginx.NewRender(c).Message(nil)

View File

@@ -2,6 +2,7 @@ package router
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
@@ -168,15 +169,8 @@ func (rt *Router) dsProxy(c *gin.Context) {
transport, has := transportGet(dsId, ds.UpdatedAt)
if !has {
// 使用 TLS 配置(支持 mTLS
tlsConfig, err := ds.HTTPJson.TLS.TLSConfig()
if err != nil {
c.String(http.StatusInternalServerError, "failed to create TLS config: %s", err.Error())
return
}
transport = &http.Transport{
TLSClientConfig: tlsConfig,
TLSClientConfig: &tls.Config{InsecureSkipVerify: ds.HTTPJson.TLS.SkipTlsVerify},
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: time.Duration(ds.HTTPJson.DialTimeout) * time.Millisecond,

View File

@@ -1,13 +1,15 @@
package router
import (
"context"
"fmt"
"sort"
"sync"
"github.com/ccfos/nightingale/v6/alert/eval"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/dskit/doris"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
@@ -59,13 +61,6 @@ func QueryLogBatchConcurrently(anonymousAccess bool, ctx *gin.Context, f QueryFr
return LogResp{}, fmt.Errorf("cluster not exists")
}
// 根据数据源类型对 Query 进行模板渲染处理
err := eval.ExecuteQueryTemplate(q.DsCate, q.Query, nil)
if err != nil {
logger.Warningf("query template execute error: %v", err)
return LogResp{}, fmt.Errorf("query template execute error: %v", err)
}
wg.Add(1)
go func(query Query) {
defer wg.Done()
@@ -117,10 +112,13 @@ func (rt *Router) QueryLogBatch(c *gin.Context) {
}
func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.QueryParam) ([]models.DataResp, error) {
var resp []models.DataResp
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
var (
resp []models.DataResp
mu sync.Mutex
wg sync.WaitGroup
errs []error
rCtx = ctx.Request.Context()
)
for _, q := range f.Queries {
if !anonymousAccess && !CheckDsPerm(ctx, f.DatasourceId, f.Cate, q) {
@@ -132,12 +130,17 @@ func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.Quer
logger.Warningf("cluster:%d not exists", f.DatasourceId)
return nil, fmt.Errorf("cluster not exists")
}
vCtx := rCtx
if f.Cate == models.DORIS {
vCtx = context.WithValue(vCtx, doris.NoNeedCheckMaxRow, true)
}
wg.Add(1)
go func(query interface{}) {
defer wg.Done()
data, err := plug.QueryData(ctx.Request.Context(), query)
data, err := plug.QueryData(vCtx, query)
if err != nil {
logger.Warningf("query data error: req:%+v err:%v", query, err)
mu.Lock()

View File

@@ -2,14 +2,13 @@ package router
import (
"fmt"
"net/http"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/datasource/tdengine"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"net/http"
)
type databasesQueryForm struct {

View File

@@ -12,6 +12,7 @@ import (
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
"github.com/ccfos/nightingale/v6/pkg/feishu"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
@@ -27,6 +28,7 @@ type SsoClient struct {
CAS *cas.SsoClient
OAuth2 *oauth2x.SsoClient
DingTalk *dingtalk.SsoClient
FeiShu *feishu.SsoClient
LastUpdateTime int64
configCache *memsto.ConfigCache
configLastUpdateTime int64
@@ -203,6 +205,13 @@ func Init(center cconf.Center, ctx *ctx.Context, configCache *memsto.ConfigCache
log.Fatalf("init %s failed: %s", dingtalk.SsoTypeName, err)
}
ssoClient.DingTalk = dingtalk.New(config)
case feishu.SsoTypeName:
var config feishu.Config
err := json.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalf("init %s failed: %s", feishu.SsoTypeName, err)
}
ssoClient.FeiShu = feishu.New(config)
}
}
@@ -291,6 +300,22 @@ func (s *SsoClient) reload(ctx *ctx.Context) error {
s.DingTalk = nil
}
if feiShuConfig, ok := ssoConfigMap[feishu.SsoTypeName]; ok {
var config feishu.Config
err := json.Unmarshal([]byte(feiShuConfig.Content), &config)
if err != nil {
logger.Warningf("reload %s failed: %s", feishu.SsoTypeName, err)
} else {
if s.FeiShu != nil {
s.FeiShu.Reload(config)
} else {
s.FeiShu = feishu.New(config)
}
}
} else {
s.FeiShu = nil
}
s.LastUpdateTime = lastUpdateTime
s.configLastUpdateTime = lastCacheUpdateTime
return nil

View File

@@ -26,10 +26,6 @@ const (
FieldId FixedField = "_id"
)
// LabelSeparator 用于分隔多个标签的分隔符
// 使用 ASCII 控制字符 Record Separator (0x1E),避免与用户数据中的 "--" 冲突
const LabelSeparator = "\x1e"
type Query struct {
Ref string `json:"ref" mapstructure:"ref"`
IndexType string `json:"index_type" mapstructure:"index_type"` // 普通索引:index 索引模式:index_pattern
@@ -132,7 +128,7 @@ func TransferData(metric, ref string, m map[string][][]float64) []models.DataRes
}
data.Metric["__name__"] = model.LabelValue(metric)
labels := strings.Split(k, LabelSeparator)
labels := strings.Split(k, "--")
for _, label := range labels {
arr := strings.SplitN(label, "=", 2)
if len(arr) == 2 {
@@ -201,7 +197,7 @@ func GetBuckets(labelKey string, keys []string, arr []interface{}, metrics *Metr
case json.Number, string:
if !getTs {
if labels != "" {
newlabels = fmt.Sprintf("%s%s%s=%v", labels, LabelSeparator, labelKey, keyValue)
newlabels = fmt.Sprintf("%s--%s=%v", labels, labelKey, keyValue)
} else {
newlabels = fmt.Sprintf("%s=%v", labelKey, keyValue)
}

View File

@@ -67,13 +67,6 @@ func init() {
PluginType: "pgsql",
PluginTypeName: "PostgreSQL",
}
DatasourceTypes[7] = DatasourceType{
Id: 7,
Category: "logging",
PluginType: "victorialogs",
PluginTypeName: "VictoriaLogs",
}
}
type NewDatasourceFn func(settings map[string]interface{}) (Datasource, error)

View File

@@ -181,7 +181,7 @@ func (d *Doris) QueryData(ctx context.Context, query interface{}) ([]models.Data
}
}
items, err := d.QueryTimeseries(context.TODO(), &doris.QueryParam{
items, err := d.QueryTimeseries(ctx, &doris.QueryParam{
Database: dorisQueryParam.Database,
Sql: dorisQueryParam.SQL,
Keys: types.Keys{

View File

@@ -110,6 +110,25 @@ func (e *Elasticsearch) InitClient() error {
return err
}
if e.Client != nil {
for _, addr := range e.Nodes {
if addr == "" {
continue
}
if ver, verr := e.Client.ElasticsearchVersion(addr); verr == nil {
logger.Infof("detected elasticsearch version from %s: %s", addr, ver)
e.Version = ver
e.Addr = addr
break
} else {
logger.Debugf("detect version failed from %s: %v", addr, verr)
}
}
if e.Version == "" {
logger.Warning("failed to detect elasticsearch version from configured nodes, keep configured version")
}
}
return err
}
@@ -171,6 +190,10 @@ func (e *Elasticsearch) Validate(ctx context.Context) (err error) {
e.Timeout = 60000
}
if !strings.HasPrefix(e.Version, "6") && !strings.HasPrefix(e.Version, "7") {
return fmt.Errorf("version must be 6.0+ or 7.0+")
}
return nil
}

View File

@@ -1,339 +0,0 @@
package victorialogs
import (
"context"
"fmt"
"net/url"
"reflect"
"strconv"
"time"
"github.com/ccfos/nightingale/v6/datasource"
"github.com/ccfos/nightingale/v6/dskit/victorialogs"
"github.com/ccfos/nightingale/v6/models"
"github.com/mitchellh/mapstructure"
"github.com/prometheus/common/model"
)
const (
VictoriaLogsType = "victorialogs"
)
// VictoriaLogs 数据源实现
type VictoriaLogs struct {
victorialogs.VictoriaLogs `json:",inline" mapstructure:",squash"`
}
// Query 查询参数
type Query struct {
Query string `json:"query" mapstructure:"query"` // LogsQL 查询语句
Start int64 `json:"start" mapstructure:"start"` // 开始时间(秒)
End int64 `json:"end" mapstructure:"end"` // 结束时间(秒)
Time int64 `json:"time" mapstructure:"time"` // 单点时间(秒)- 用于告警
Step string `json:"step" mapstructure:"step"` // 步长,如 "1m", "5m"
Limit int `json:"limit" mapstructure:"limit"` // 限制返回数量
Ref string `json:"ref" mapstructure:"ref"` // 变量引用名(如 A、B
}
// IsInstantQuery 判断是否为即时查询(告警场景)
func (q *Query) IsInstantQuery() bool {
return q.Time > 0 || (q.Start >= 0 && q.Start == q.End)
}
func init() {
datasource.RegisterDatasource(VictoriaLogsType, new(VictoriaLogs))
}
// Init 初始化配置
func (vl *VictoriaLogs) Init(settings map[string]interface{}) (datasource.Datasource, error) {
newest := new(VictoriaLogs)
err := mapstructure.Decode(settings, newest)
return newest, err
}
// InitClient 初始化客户端
func (vl *VictoriaLogs) InitClient() error {
if err := vl.InitHTTPClient(); err != nil {
return fmt.Errorf("failed to init victorialogs http client: %w", err)
}
return nil
}
// Validate 参数验证
func (vl *VictoriaLogs) Validate(ctx context.Context) error {
if vl.VictorialogsAddr == "" {
return fmt.Errorf("victorialogs.addr is required")
}
// 验证 URL 格式
_, err := url.Parse(vl.VictorialogsAddr)
if err != nil {
return fmt.Errorf("invalid victorialogs.addr: %w", err)
}
// 必须同时提供用户名和密码
if (vl.VictorialogsBasic.VictorialogsUser != "" && vl.VictorialogsBasic.VictorialogsPass == "") ||
(vl.VictorialogsBasic.VictorialogsUser == "" && vl.VictorialogsBasic.VictorialogsPass != "") {
return fmt.Errorf("both username and password must be provided")
}
// 设置默认值
if vl.Timeout == 0 {
vl.Timeout = 10000 // 默认 10 秒
}
if vl.MaxQueryRows == 0 {
vl.MaxQueryRows = 1000
}
return nil
}
// Equal 验证是否相等
func (vl *VictoriaLogs) Equal(other datasource.Datasource) bool {
o, ok := other.(*VictoriaLogs)
if !ok {
return false
}
return vl.VictorialogsAddr == o.VictorialogsAddr &&
vl.VictorialogsBasic.VictorialogsUser == o.VictorialogsBasic.VictorialogsUser &&
vl.VictorialogsBasic.VictorialogsPass == o.VictorialogsBasic.VictorialogsPass &&
vl.VictorialogsTls.SkipTlsVerify == o.VictorialogsTls.SkipTlsVerify &&
vl.Timeout == o.Timeout &&
reflect.DeepEqual(vl.Headers, o.Headers)
}
// QueryLog 日志查询
func (vl *VictoriaLogs) QueryLog(ctx context.Context, queryParam interface{}) ([]interface{}, int64, error) {
param := new(Query)
if err := mapstructure.Decode(queryParam, param); err != nil {
return nil, 0, fmt.Errorf("decode query param failed: %w", err)
}
logs, err := vl.Query(ctx, param.Query, param.Start, param.End, param.Limit)
if err != nil {
return nil, 0, err
}
// 转换为 interface{} 数组
result := make([]interface{}, len(logs))
for i, log := range logs {
result[i] = log
}
// 调用 HitsLogs 获取真实的 total
total, err := vl.HitsLogs(ctx, param.Query, param.Start, param.End)
if err != nil {
// 如果获取 total 失败,使用当前结果数量
total = int64(len(logs))
}
return result, total, nil
}
// QueryData 指标数据查询
func (vl *VictoriaLogs) QueryData(ctx context.Context, queryParam interface{}) ([]models.DataResp, error) {
param := new(Query)
if err := mapstructure.Decode(queryParam, param); err != nil {
return nil, fmt.Errorf("decode query param failed: %w", err)
}
// 判断使用哪个 API
if param.IsInstantQuery() {
return vl.queryDataInstant(ctx, param)
}
return vl.queryDataRange(ctx, param)
}
// queryDataInstant 告警场景,调用 /select/logsql/stats_query
func (vl *VictoriaLogs) queryDataInstant(ctx context.Context, param *Query) ([]models.DataResp, error) {
queryTime := param.Time
if queryTime == 0 {
queryTime = param.End // 如果没有 time使用 end 作为查询时间点
}
if queryTime == 0 {
queryTime = time.Now().Unix()
}
result, err := vl.StatsQuery(ctx, param.Query, queryTime)
if err != nil {
return nil, err
}
return convertPrometheusInstantToDataResp(result, param.Ref), nil
}
// queryDataRange 看图场景,调用 /select/logsql/stats_query_range
func (vl *VictoriaLogs) queryDataRange(ctx context.Context, param *Query) ([]models.DataResp, error) {
step := param.Step
if step == "" {
// 根据时间范围计算合适的步长
duration := param.End - param.Start
if duration <= 3600 {
step = "1m" // 1 小时内1 分钟步长
} else if duration <= 86400 {
step = "5m" // 1 天内5 分钟步长
} else {
step = "1h" // 超过 1 天1 小时步长
}
}
result, err := vl.StatsQueryRange(ctx, param.Query, param.Start, param.End, step)
if err != nil {
return nil, err
}
return convertPrometheusRangeToDataResp(result, param.Ref), nil
}
// convertPrometheusInstantToDataResp 将 Prometheus Instant Query 格式转换为 DataResp
func convertPrometheusInstantToDataResp(resp *victorialogs.PrometheusResponse, ref string) []models.DataResp {
var dataResps []models.DataResp
for _, item := range resp.Data.Result {
dataResp := models.DataResp{
Ref: ref,
}
// 转换 Metric
dataResp.Metric = make(model.Metric)
for k, v := range item.Metric {
dataResp.Metric[model.LabelName(k)] = model.LabelValue(v)
}
if len(item.Value) == 2 {
// [timestamp, value]
timestamp := item.Value[0].(float64)
value, _ := strconv.ParseFloat(item.Value[1].(string), 64)
dataResp.Values = [][]float64{
{timestamp, value},
}
}
dataResps = append(dataResps, dataResp)
}
return dataResps
}
// convertPrometheusRangeToDataResp 将 Prometheus Range Query 格式转换为 DataResp
func convertPrometheusRangeToDataResp(resp *victorialogs.PrometheusResponse, ref string) []models.DataResp {
var dataResps []models.DataResp
for _, item := range resp.Data.Result {
dataResp := models.DataResp{
Ref: ref,
}
// 转换 Metric
dataResp.Metric = make(model.Metric)
for k, v := range item.Metric {
dataResp.Metric[model.LabelName(k)] = model.LabelValue(v)
}
var values [][]float64
for _, v := range item.Values {
if len(v) == 2 {
timestamp := v[0].(float64)
value, _ := strconv.ParseFloat(v[1].(string), 64)
values = append(values, []float64{timestamp, value})
}
}
dataResp.Values = values
dataResps = append(dataResps, dataResp)
}
return dataResps
}
// MakeLogQuery 构造日志查询参数
func (vl *VictoriaLogs) MakeLogQuery(ctx context.Context, query interface{}, eventTags []string, start, end int64) (interface{}, error) {
q := &Query{
Start: start,
End: end,
Limit: 1000,
}
// 如果 query 是字符串,直接使用
if queryStr, ok := query.(string); ok {
q.Query = queryStr
} else if queryMap, ok := query.(map[string]interface{}); ok {
// 如果是 map尝试提取 query 字段
if qStr, exists := queryMap["query"]; exists {
q.Query = fmt.Sprintf("%v", qStr)
}
if limit, exists := queryMap["limit"]; exists {
if limitInt, ok := limit.(int); ok {
q.Limit = limitInt
} else if limitFloat, ok := limit.(float64); ok {
q.Limit = int(limitFloat)
}
}
}
return q, nil
}
// MakeTSQuery 构造时序查询参数
func (vl *VictoriaLogs) MakeTSQuery(ctx context.Context, query interface{}, eventTags []string, start, end int64) (interface{}, error) {
q := &Query{
Start: start,
End: end,
}
// 如果 query 是字符串,直接使用
if queryStr, ok := query.(string); ok {
q.Query = queryStr
} else if queryMap, ok := query.(map[string]interface{}); ok {
// 如果是 map提取相关字段
if qStr, exists := queryMap["query"]; exists {
q.Query = fmt.Sprintf("%v", qStr)
}
if step, exists := queryMap["step"]; exists {
q.Step = fmt.Sprintf("%v", step)
}
}
return q, nil
}
// QueryMapData 用于告警事件生成时获取额外数据
func (vl *VictoriaLogs) QueryMapData(ctx context.Context, query interface{}) ([]map[string]string, error) {
param := new(Query)
if err := mapstructure.Decode(query, param); err != nil {
return nil, err
}
// 扩大查询范围,解决时间滞后问题
if param.End > 0 && param.Start > 0 {
param.Start = param.Start - 30
}
// 限制只取 1 条
param.Limit = 1
logs, _, err := vl.QueryLog(ctx, param)
if err != nil {
return nil, err
}
var result []map[string]string
for _, log := range logs {
if logMap, ok := log.(map[string]interface{}); ok {
strMap := make(map[string]string)
for k, v := range logMap {
strMap[k] = fmt.Sprintf("%v", v)
}
result = append(result, strMap)
break // 只取第一条
}
}
return result, nil
}

View File

@@ -5,7 +5,7 @@ WORKDIR /app
ADD n9e /app/
ADD etc /app/etc/
ADD integrations /app/integrations/
RUN pip install requests Jinja2
RUN pip install requests
EXPOSE 17000

View File

@@ -87,8 +87,8 @@ services:
- mysql
- redis
- victoriametrics
command:
- /app/n9e
command: >
sh -c "/app/n9e"
categraf:
image: "flashcatcloud/categraf:latest"

View File

@@ -59,8 +59,8 @@ services:
- mysql
- redis
- prometheus
command:
- /app/n9e
command: >
sh -c "/app/n9e"
categraf:
image: "flashcatcloud/categraf:latest"

View File

@@ -58,8 +58,8 @@ services:
- mysql
- redis
- prometheus
command:
- /app/n9e
command: >
sh -c "/app/n9e"
categraf:
image: "flashcatcloud/categraf:latest"

View File

@@ -74,8 +74,8 @@ services:
- postgres:postgres
- redis:redis
- victoriametrics:victoriametrics
command:
- /app/n9e
command: >
sh -c "/app/n9e"
categraf:
image: "flashcatcloud/categraf:latest"

View File

@@ -804,9 +804,6 @@ CREATE TABLE builtin_metrics (
lang varchar(191) NOT NULL DEFAULT '',
note varchar(4096) NOT NULL,
expression varchar(4096) NOT NULL,
expression_type varchar(32) NOT NULL DEFAULT 'promql',
metric_type varchar(191) NOT NULL DEFAULT '',
extra_fields text,
created_at bigint NOT NULL DEFAULT 0,
created_by varchar(191) NOT NULL DEFAULT '',
updated_at bigint NOT NULL DEFAULT 0,
@@ -829,9 +826,6 @@ COMMENT ON COLUMN builtin_metrics.unit IS 'unit of metric';
COMMENT ON COLUMN builtin_metrics.lang IS 'language of metric';
COMMENT ON COLUMN builtin_metrics.note IS 'description of metric in Chinese';
COMMENT ON COLUMN builtin_metrics.expression IS 'expression of metric';
COMMENT ON COLUMN builtin_metrics.expression_type IS 'expression type: metric_name or promql';
COMMENT ON COLUMN builtin_metrics.metric_type IS 'metric type like counter/gauge';
COMMENT ON COLUMN builtin_metrics.extra_fields IS 'custom extra fields';
COMMENT ON COLUMN builtin_metrics.created_at IS 'create time';
COMMENT ON COLUMN builtin_metrics.created_by IS 'creator';
COMMENT ON COLUMN builtin_metrics.updated_at IS 'update time';

View File

@@ -719,9 +719,6 @@ CREATE TABLE `builtin_metrics` (
`lang` varchar(191) NOT NULL DEFAULT 'zh' COMMENT '''language''',
`note` varchar(4096) NOT NULL COMMENT '''description of metric''',
`expression` varchar(4096) NOT NULL COMMENT '''expression of metric''',
`expression_type` varchar(32) NOT NULL DEFAULT 'promql' COMMENT '''expression type: metric_name or promql''',
`metric_type` varchar(191) NOT NULL DEFAULT '' COMMENT '''metric type like counter/gauge''',
`extra_fields` text COMMENT '''custom extra fields''',
`created_at` bigint NOT NULL DEFAULT 0 COMMENT '''create time''',
`created_by` varchar(191) NOT NULL DEFAULT '' COMMENT '''creator''',
`updated_at` bigint NOT NULL DEFAULT 0 COMMENT '''update time''',

View File

@@ -292,42 +292,4 @@ ALTER TABLE `alert_rule` ADD COLUMN `pipeline_configs` text COMMENT 'pipeline co
/* v8.4.2 2025-11-13 */
ALTER TABLE `board` ADD COLUMN `note` varchar(1024) not null default '' comment 'note';
ALTER TABLE `builtin_payloads` ADD COLUMN `note` varchar(1024) not null default '' comment 'note of payload';
/* v9 2026-01-09 */
ALTER TABLE `event_pipeline` ADD COLUMN `typ` varchar(128) NOT NULL DEFAULT '' COMMENT 'pipeline type: builtin, user-defined';
ALTER TABLE `event_pipeline` ADD COLUMN `use_case` varchar(128) NOT NULL DEFAULT '' COMMENT 'use case: metric_explorer, event_summary, event_pipeline';
ALTER TABLE `event_pipeline` ADD COLUMN `trigger_mode` varchar(128) NOT NULL DEFAULT 'event' COMMENT 'trigger mode: event, api, cron';
ALTER TABLE `event_pipeline` ADD COLUMN `disabled` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'disabled flag';
ALTER TABLE `event_pipeline` ADD COLUMN `nodes` text COMMENT 'workflow nodes (JSON)';
ALTER TABLE `event_pipeline` ADD COLUMN `connections` text COMMENT 'node connections (JSON)';
ALTER TABLE `event_pipeline` ADD COLUMN `env_variables` text COMMENT 'environment variables (JSON)';
ALTER TABLE `event_pipeline` ADD COLUMN `label_filters` text COMMENT 'label filters (JSON)';
CREATE TABLE `event_pipeline_execution` (
`id` varchar(36) NOT NULL COMMENT 'execution id',
`pipeline_id` bigint NOT NULL COMMENT 'pipeline id',
`pipeline_name` varchar(128) DEFAULT '' COMMENT 'pipeline name snapshot',
`event_id` bigint DEFAULT 0 COMMENT 'related alert event id',
`mode` varchar(16) NOT NULL DEFAULT 'event' COMMENT 'trigger mode: event/api/cron',
`status` varchar(16) NOT NULL DEFAULT 'running' COMMENT 'status: running/success/failed',
`node_results` mediumtext COMMENT 'node execution results (JSON)',
`error_message` varchar(1024) DEFAULT '' COMMENT 'error message',
`error_node` varchar(36) DEFAULT '' COMMENT 'error node id',
`created_at` bigint NOT NULL DEFAULT 0 COMMENT 'start timestamp',
`finished_at` bigint DEFAULT 0 COMMENT 'finish timestamp',
`duration_ms` bigint DEFAULT 0 COMMENT 'duration in milliseconds',
`trigger_by` varchar(64) DEFAULT '' COMMENT 'trigger by',
`env_snapshot` text COMMENT 'environment variables snapshot (sanitized)',
PRIMARY KEY (`id`),
KEY `idx_pipeline_id` (`pipeline_id`),
KEY `idx_event_id` (`event_id`),
KEY `idx_mode` (`mode`),
KEY `idx_status` (`status`),
KEY `idx_created_at` (`created_at`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='event pipeline execution records';
/* v8.5.0 builtin_metrics new fields */
ALTER TABLE `builtin_metrics` ADD COLUMN `expression_type` varchar(32) NOT NULL DEFAULT 'promql' COMMENT 'expression type: metric_name or promql';
ALTER TABLE `builtin_metrics` ADD COLUMN `metric_type` varchar(191) NOT NULL DEFAULT '' COMMENT 'metric type like counter/gauge';
ALTER TABLE `builtin_metrics` ADD COLUMN `extra_fields` text COMMENT 'custom extra fields';
ALTER TABLE `builtin_payloads` ADD COLUMN `note` varchar(1024) not null default '' comment 'note of payload';

View File

@@ -651,9 +651,6 @@ CREATE TABLE `builtin_metrics` (
`lang` varchar(191) NOT NULL DEFAULT '',
`note` varchar(4096) NOT NULL,
`expression` varchar(4096) NOT NULL,
`expression_type` varchar(32) NOT NULL DEFAULT 'promql',
`metric_type` varchar(191) NOT NULL DEFAULT '',
`extra_fields` text,
`created_at` bigint NOT NULL DEFAULT 0,
`created_by` varchar(191) NOT NULL DEFAULT '',
`updated_at` bigint NOT NULL DEFAULT 0,

View File

@@ -14,7 +14,6 @@ import (
_ "github.com/ccfos/nightingale/v6/datasource/mysql"
_ "github.com/ccfos/nightingale/v6/datasource/opensearch"
_ "github.com/ccfos/nightingale/v6/datasource/postgresql"
_ "github.com/ccfos/nightingale/v6/datasource/victorialogs"
"github.com/ccfos/nightingale/v6/dskit/tdengine"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"

View File

@@ -10,13 +10,14 @@ const (
TimeseriesAggregationTimestamp = "__ts__"
)
// QueryLogs 查询日志
// TODO: 待测试, MAP/ARRAY/STRUCT/JSON 等类型能否处理
func (d *Doris) QueryLogs(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
// 等同于 Query()
return d.Query(ctx, query)
return d.Query(ctx, query, true)
}
// 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
// QueryHistogram 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
func (d *Doris) QueryHistogram(ctx context.Context, query *QueryParam) ([][]float64, error) {
values, err := d.QueryTimeseries(ctx, query)
if err != nil {

View File

@@ -15,6 +15,10 @@ const (
TimeFieldFormatDateTime = "datetime"
)
type noNeedCheckMaxRowKey struct{}
var NoNeedCheckMaxRow = noNeedCheckMaxRowKey{}
// 不再拼接SQL, 完全信赖用户的输入
type QueryParam struct {
Database string `json:"database"`
@@ -39,7 +43,7 @@ var (
)
// Query executes a given SQL query in Doris and returns the results with MaxQueryRows check
func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
func (d *Doris) Query(ctx context.Context, query *QueryParam, checkMaxRow bool) ([]map[string]interface{}, error) {
// 校验SQL的合法性, 过滤掉 write请求
sqlItem := strings.Split(strings.ToUpper(query.Sql), " ")
for _, item := range sqlItem {
@@ -48,10 +52,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
}
}
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
if checkMaxRow {
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
}
}
rows, err := d.ExecQuery(ctx, query.Database, query.Sql)
@@ -63,8 +69,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
// QueryTimeseries executes a time series data query using the given parameters with MaxQueryRows check
func (d *Doris) QueryTimeseries(ctx context.Context, query *QueryParam) ([]types.MetricValues, error) {
// 使用 Query 方法执行查询Query方法内部已包含MaxQueryRows检查
rows, err := d.Query(ctx, query)
// 默认需要检查,除非调用方声明不需要检查
checkMaxRow := true
if noCheck, ok := ctx.Value(NoNeedCheckMaxRow).(bool); ok && noCheck {
checkMaxRow = false
}
rows, err := d.Query(ctx, query, checkMaxRow)
if err != nil {
return nil, err
}

View File

@@ -1,304 +0,0 @@
package victorialogs
import (
"bufio"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
type VictoriaLogs struct {
VictorialogsAddr string `json:"victorialogs.addr" mapstructure:"victorialogs.addr"`
VictorialogsBasic struct {
VictorialogsUser string `json:"victorialogs.user" mapstructure:"victorialogs.user"`
VictorialogsPass string `json:"victorialogs.password" mapstructure:"victorialogs.password"`
IsEncrypt bool `json:"victorialogs.is_encrypt" mapstructure:"victorialogs.is_encrypt"`
} `json:"victorialogs.basic" mapstructure:"victorialogs.basic"`
VictorialogsTls struct {
SkipTlsVerify bool `json:"victorialogs.tls.skip_tls_verify" mapstructure:"victorialogs.tls.skip_tls_verify"`
} `json:"victorialogs.tls" mapstructure:"victorialogs.tls"`
Headers map[string]string `json:"victorialogs.headers" mapstructure:"victorialogs.headers"`
Timeout int64 `json:"victorialogs.timeout" mapstructure:"victorialogs.timeout"` // millis
ClusterName string `json:"victorialogs.cluster_name" mapstructure:"victorialogs.cluster_name"`
MaxQueryRows int `json:"victorialogs.max_query_rows" mapstructure:"victorialogs.max_query_rows"`
HTTPClient *http.Client `json:"-" mapstructure:"-"`
}
// LogEntry 日志条目
type LogEntry map[string]interface{}
// PrometheusResponse Prometheus 响应格式
type PrometheusResponse struct {
Status string `json:"status"`
Data PrometheusData `json:"data"`
Error string `json:"error,omitempty"`
}
// PrometheusData Prometheus 数据部分
type PrometheusData struct {
ResultType string `json:"resultType"`
Result []PrometheusItem `json:"result"`
}
// PrometheusItem Prometheus 数据项
type PrometheusItem struct {
Metric map[string]string `json:"metric"`
Value []interface{} `json:"value,omitempty"` // [timestamp, value]
Values [][]interface{} `json:"values,omitempty"` // [[timestamp, value], ...]
}
// HitsResult hits 查询响应
type HitsResult struct {
Hits []struct {
Total int64 `json:"total"`
}
}
// InitHTTPClient 初始化 HTTP 客户端
func (vl *VictoriaLogs) InitHTTPClient() error {
transport := &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: vl.VictorialogsTls.SkipTlsVerify,
},
}
timeout := time.Duration(vl.Timeout) * time.Millisecond
if timeout == 0 {
timeout = 60 * time.Second
}
vl.HTTPClient = &http.Client{
Transport: transport,
Timeout: timeout,
}
return nil
}
// Query 执行日志查询
// GET/POST /select/logsql/query?query=<query>&start=<start>&end=<end>&limit=<limit>
func (vl *VictoriaLogs) Query(ctx context.Context, query string, start, end int64, limit int) ([]LogEntry, error) {
params := url.Values{}
params.Set("query", query)
if start > 0 {
params.Set("start", strconv.FormatInt(start, 10))
}
if end > 0 {
params.Set("end", strconv.FormatInt(end, 10))
}
if limit > 0 {
params.Set("limit", strconv.Itoa(limit))
} else {
params.Set("limit", strconv.Itoa(vl.MaxQueryRows)) // 默认 1000 条
}
endpoint := fmt.Sprintf("%s/select/logsql/query", vl.VictorialogsAddr)
resp, err := vl.doRequest(ctx, "POST", endpoint, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response body failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("query failed: status=%d, body=%s", resp.StatusCode, string(body))
}
// VictoriaLogs returns NDJSON format (one JSON object per line)
var logs []LogEntry
scanner := bufio.NewScanner(strings.NewReader(string(body)))
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
var entry LogEntry
if err := json.Unmarshal([]byte(line), &entry); err != nil {
return nil, fmt.Errorf("decode log entry failed: %w, line=%s", err, line)
}
logs = append(logs, entry)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("scan response failed: %w", err)
}
return logs, nil
}
// StatsQuery 执行统计查询(单点时间)
// POST /select/logsql/stats_query?query=<query>&time=<time>
func (vl *VictoriaLogs) StatsQuery(ctx context.Context, query string, time int64) (*PrometheusResponse, error) {
params := url.Values{}
params.Set("query", query)
if time > 0 {
params.Set("time", strconv.FormatInt(time, 10))
}
endpoint := fmt.Sprintf("%s/select/logsql/stats_query", vl.VictorialogsAddr)
resp, err := vl.doRequest(ctx, "POST", endpoint, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response body failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("stats query failed: status=%d, body=%s", resp.StatusCode, string(body))
}
var result PrometheusResponse
if err := json.Unmarshal(body, &result); err != nil {
return nil, fmt.Errorf("decode response failed: %w, body=%s", err, string(body))
}
if result.Status != "success" {
return nil, fmt.Errorf("query failed: %s", result.Error)
}
return &result, nil
}
// StatsQueryRange 执行统计查询(时间范围)
// POST /select/logsql/stats_query_range?query=<query>&start=<start>&end=<end>&step=<step>
func (vl *VictoriaLogs) StatsQueryRange(ctx context.Context, query string, start, end int64, step string) (*PrometheusResponse, error) {
params := url.Values{}
params.Set("query", query)
if start > 0 {
params.Set("start", strconv.FormatInt(start, 10))
}
if end > 0 {
params.Set("end", strconv.FormatInt(end, 10))
}
if step != "" {
params.Set("step", step)
}
endpoint := fmt.Sprintf("%s/select/logsql/stats_query_range", vl.VictorialogsAddr)
resp, err := vl.doRequest(ctx, "POST", endpoint, params)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response body failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("stats query range failed: status=%d, body=%s", resp.StatusCode, string(body))
}
var result PrometheusResponse
if err := json.Unmarshal(body, &result); err != nil {
return nil, fmt.Errorf("decode response failed: %w, body=%s", err, string(body))
}
if result.Status != "success" {
return nil, fmt.Errorf("query failed: %s", result.Error)
}
return &result, nil
}
// HitsLogs 返回查询命中的日志数量,用于计算 total
// POST /select/logsql/hits?query=<query>&start=<start>&end=<end>
func (vl *VictoriaLogs) HitsLogs(ctx context.Context, query string, start, end int64) (int64, error) {
params := url.Values{}
params.Set("query", query)
if start > 0 {
params.Set("start", strconv.FormatInt(start, 10))
}
if end > 0 {
params.Set("end", strconv.FormatInt(end, 10))
}
endpoint := fmt.Sprintf("%s/select/logsql/hits", vl.VictorialogsAddr)
resp, err := vl.doRequest(ctx, "POST", endpoint, params)
if err != nil {
return 0, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return 0, fmt.Errorf("read response body failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("hits query failed: status=%d, body=%s", resp.StatusCode, string(body))
}
var result HitsResult
if err := json.Unmarshal(body, &result); err != nil {
return 0, fmt.Errorf("decode response failed: %w, body=%s", err, string(body))
}
if len(result.Hits) == 0 {
return 0, nil
}
return result.Hits[0].Total, nil
}
// doRequest 执行 HTTP 请求
func (vl *VictoriaLogs) doRequest(ctx context.Context, method, endpoint string, params url.Values) (*http.Response, error) {
var req *http.Request
var err error
if method == "GET" {
fullURL := endpoint
if len(params) > 0 {
fullURL = fmt.Sprintf("%s?%s", endpoint, params.Encode())
}
req, err = http.NewRequestWithContext(ctx, method, fullURL, nil)
} else {
// POST with form data
req, err = http.NewRequestWithContext(ctx, method, endpoint, strings.NewReader(params.Encode()))
if err == nil {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
}
if err != nil {
return nil, fmt.Errorf("create request failed: %w", err)
}
if vl.VictorialogsBasic.VictorialogsUser != "" {
req.SetBasicAuth(vl.VictorialogsBasic.VictorialogsUser, vl.VictorialogsBasic.VictorialogsPass)
}
// Custom Headers
for k, v := range vl.Headers {
req.Header.Set(k, v)
}
return vl.HTTPClient.Do(req)
}

View File

@@ -1,136 +0,0 @@
package victorialogs
import (
"context"
"testing"
"time"
)
var v = VictoriaLogs{
VictorialogsAddr: "http://127.0.0.1:9428",
Headers: make(map[string]string),
Timeout: 10000, // 10 seconds in milliseconds
}
func TestVictoriaLogs_InitHTTPClient(t *testing.T) {
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
if v.HTTPClient == nil {
t.Fatal("HTTPClient should not be nil after initialization")
}
}
func TestVictoriaLogs_Query(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Query logs with basic query
now := time.Now().UnixNano()
start := now - int64(time.Hour) // 1 hour ago
end := now
logs, err := v.Query(ctx, "*", start, end, 10)
if err != nil {
t.Fatalf("Query failed: %v", err)
}
t.Logf("Query returned %d log entries", len(logs))
for i, log := range logs {
t.Logf("Log[%d]: %v", i, log)
}
}
func TestVictoriaLogs_StatsQuery(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Stats query with count
now := time.Now().UnixNano()
result, err := v.StatsQuery(ctx, "* | stats count() as total", now)
if err != nil {
t.Fatalf("StatsQuery failed: %v", err)
}
t.Logf("StatsQuery result: status=%s, resultType=%s", result.Status, result.Data.ResultType)
for i, item := range result.Data.Result {
t.Logf("Result[%d]: metric=%v, value=%v", i, item.Metric, item.Value)
}
}
func TestVictoriaLogs_StatsQueryRange(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Stats query range
now := time.Now().UnixNano()
start := now - int64(time.Hour) // 1 hour ago
end := now
result, err := v.StatsQueryRange(ctx, "* | stats count() as total", start, end, "5m")
if err != nil {
t.Fatalf("StatsQueryRange failed: %v", err)
}
t.Logf("StatsQueryRange result: status=%s, resultType=%s", result.Status, result.Data.ResultType)
for i, item := range result.Data.Result {
t.Logf("Result[%d]: metric=%v, values count=%d", i, item.Metric, len(item.Values))
}
}
func TestVictoriaLogs_HitsLogs(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Get total hits count
now := time.Now().UnixNano()
start := now - int64(time.Hour) // 1 hour ago
end := now
count, err := v.HitsLogs(ctx, "*", start, end)
if err != nil {
t.Fatalf("HitsLogs failed: %v", err)
}
t.Logf("HitsLogs total count: %d", count)
}
func TestVictoriaLogs_QueryWithFilter(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Query with a filter condition
now := time.Now().UnixNano()
start := now - int64(time.Hour)
end := now
logs, err := v.Query(ctx, "_stream:{app=\"test\"}", start, end, 5)
if err != nil {
t.Fatalf("Query with filter failed: %v", err)
}
t.Logf("Query with filter returned %d log entries", len(logs))
}
func TestVictoriaLogs_StatsQueryByField(t *testing.T) {
ctx := context.Background()
if err := v.InitHTTPClient(); err != nil {
t.Fatalf("InitHTTPClient failed: %v", err)
}
// Stats query grouped by field
now := time.Now().UnixNano()
result, err := v.StatsQuery(ctx, "* | stats by (level) count() as cnt", now)
if err != nil {
t.Fatalf("StatsQuery by field failed: %v", err)
}
t.Logf("StatsQuery by field result: status=%s", result.Status)
for i, item := range result.Data.Result {
t.Logf("Result[%d]: metric=%v, value=%v", i, item.Metric, item.Value)
}
}

1
go.mod
View File

@@ -101,6 +101,7 @@ require (
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/larksuite/oapi-sdk-go/v3 v3.5.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect

3
go.sum
View File

@@ -243,6 +243,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -315,6 +316,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/larksuite/oapi-sdk-go/v3 v3.5.1 h1:gX4dz92YU70inuIX+ug+PBe64eHToIN9rHB4Vupv5Eg=
github.com/larksuite/oapi-sdk-go/v3 v3.5.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=

View File

@@ -1,4 +1,5 @@
{
[
{
"name": "JMX - Kubernetes",
"tags": "Prometheus JMX Kubernetes",
"configs": {
@@ -1870,4 +1871,5 @@
"version": "3.0.0"
},
"uuid": 1755595969673000
}
}
]

View File

@@ -125,7 +125,6 @@ type SiteInfo struct {
PrintBodyPaths []string `json:"print_body_paths"`
PrintAccessLog bool `json:"print_access_log"`
SiteUrl string `json:"site_url"`
ReportHostNIC bool `json:"report_host_nic"`
}
func (c *CvalCache) GetSiteInfo() *SiteInfo {

View File

@@ -68,6 +68,18 @@ func (epc *EventProcessorCacheType) Get(processorId int64) *models.EventPipeline
return epc.eventPipelines[processorId]
}
func (epc *EventProcessorCacheType) GetProcessorsById(processorId int64) []models.Processor {
epc.RLock()
defer epc.RUnlock()
eventPipeline, ok := epc.eventPipelines[processorId]
if !ok {
return []models.Processor{}
}
return eventPipeline.Processors
}
func (epc *EventProcessorCacheType) GetProcessorIds() []int64 {
epc.RLock()
defer epc.RUnlock()
@@ -125,7 +137,18 @@ func (epc *EventProcessorCacheType) syncEventProcessors() error {
m := make(map[int64]*models.EventPipeline)
for i := 0; i < len(lst); i++ {
m[lst[i].ID] = lst[i]
eventPipeline := lst[i]
for _, p := range eventPipeline.ProcessorConfigs {
processor, err := models.GetProcessorByType(p.Typ, p.Config)
if err != nil {
logger.Warningf("event_pipeline_id: %d, event:%+v, processor:%+v get processor err: %+v", eventPipeline.ID, eventPipeline, p, err)
continue
}
eventPipeline.Processors = append(eventPipeline.Processors, processor)
}
m[lst[i].ID] = eventPipeline
}
epc.Set(m, stat.Total, stat.LastUpdated)

View File

@@ -283,7 +283,7 @@ func (ncc *NotifyChannelCacheType) processNotifyTask(task *NotifyTask) {
if len(task.Sendtos) == 0 || ncc.needBatchContacts(task.NotifyChannel.RequestConfig.HTTPRequestConfig) {
start := time.Now()
resp, err := task.NotifyChannel.SendHTTP(task.Events, task.TplContent, task.CustomParams, task.Sendtos, httpClient)
resp = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), resp)
resp = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), resp)
logger.Infof("http_sendernotify_id: %d, channel_name: %v, event:%+v, tplContent:%v, customParams:%v, userInfo:%+v, respBody: %v, err: %v",
task.NotifyRuleId, task.NotifyChannel.Name, task.Events[0], task.TplContent, task.CustomParams, task.Sendtos, resp, err)

View File

@@ -33,8 +33,7 @@ const (
DORIS = "doris"
OPENSEARCH = "opensearch"
CLICKHOUSE = "ck"
VICTORIALOGS = "victorialogs"
CLICKHOUSE = "ck"
)
const (
@@ -1220,8 +1219,7 @@ func (ar *AlertRule) IsInnerRule() bool {
ar.Cate == MYSQL ||
ar.Cate == POSTGRESQL ||
ar.Cate == DORIS ||
ar.Cate == OPENSEARCH ||
ar.Cate == VICTORIALOGS
ar.Cate == OPENSEARCH
}
func (ar *AlertRule) GetRuleType() string {

View File

@@ -46,6 +46,12 @@ func NewAnomalyPoint(key string, labels map[string]string, ts int64, value float
}
func (v *AnomalyPoint) ReadableValue() string {
if len(v.ValuesUnit) > 0 {
for _, unit := range v.ValuesUnit { // 配置了单位,优先用配置了单位的值
return unit.Text
}
}
ret := fmt.Sprintf("%.5f", v.Value)
ret = strings.TrimRight(ret, "0")
return strings.TrimRight(ret, ".")

View File

@@ -1,7 +1,6 @@
package models
import (
"encoding/json"
"errors"
"fmt"
"strings"
@@ -13,23 +12,20 @@ import (
// BuiltinMetric represents a metric along with its metadata.
type BuiltinMetric struct {
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
UUID int64 `json:"uuid" gorm:"type:bigint;not null;default:0;comment:'uuid'"`
Collector string `json:"collector" gorm:"type:varchar(191);not null;index:idx_collector,sort:asc;comment:'type of collector'"`
Typ string `json:"typ" gorm:"type:varchar(191);not null;index:idx_typ,sort:asc;comment:'type of metric'"`
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_builtinmetric_name,sort:asc;comment:'name of metric'"`
Unit string `json:"unit" gorm:"type:varchar(191);not null;comment:'unit of metric'"`
Note string `json:"note" gorm:"type:varchar(4096);not null;comment:'description of metric'"`
Lang string `json:"lang" gorm:"type:varchar(191);not null;default:'zh';index:idx_lang,sort:asc;comment:'language'"`
Translation []Translation `json:"translation" gorm:"type:text;serializer:json;comment:'translation of metric'"`
Expression string `json:"expression" gorm:"type:varchar(4096);not null;comment:'expression of metric'"`
ExpressionType string `json:"expression_type" gorm:"type:varchar(32);not null;default:'promql';comment:'expression type: metric_name or promql'"`
MetricType string `json:"metric_type" gorm:"type:varchar(191);not null;default:'';comment:'metric type like counter/gauge'"`
ExtraFields json.RawMessage `json:"extra_fields" gorm:"type:text;serializer:json;comment:'custom extra fields'"`
CreatedAt int64 `json:"created_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
CreatedBy string `json:"created_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
UpdatedAt int64 `json:"updated_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
UpdatedBy string `json:"updated_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
UUID int64 `json:"uuid" gorm:"type:bigint;not null;default:0;comment:'uuid'"`
Collector string `json:"collector" gorm:"type:varchar(191);not null;index:idx_collector,sort:asc;comment:'type of collector'"`
Typ string `json:"typ" gorm:"type:varchar(191);not null;index:idx_typ,sort:asc;comment:'type of metric'"`
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_builtinmetric_name,sort:asc;comment:'name of metric'"`
Unit string `json:"unit" gorm:"type:varchar(191);not null;comment:'unit of metric'"`
Note string `json:"note" gorm:"type:varchar(4096);not null;comment:'description of metric'"`
Lang string `json:"lang" gorm:"type:varchar(191);not null;default:'zh';index:idx_lang,sort:asc;comment:'language'"`
Translation []Translation `json:"translation" gorm:"type:text;serializer:json;comment:'translation of metric'"`
Expression string `json:"expression" gorm:"type:varchar(4096);not null;comment:'expression of metric'"`
CreatedAt int64 `json:"created_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
CreatedBy string `json:"created_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
UpdatedAt int64 `json:"updated_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
UpdatedBy string `json:"updated_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
}
type Translation struct {

View File

@@ -29,27 +29,6 @@ func (bp *BuiltinPayload) TableName() string {
return "builtin_payloads"
}
type PostgresBuiltinPayload struct {
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
Type string `json:"type" gorm:"type:varchar(191);not null;index:idx_type,sort:asc;comment:'type of payload'"`
Component string `json:"component" gorm:"type:varchar(191);not null;index:idx_component,sort:asc;comment:'component of payload'"`
ComponentID uint64 `json:"component_id" gorm:"type:bigint;index:idx_component,sort:asc;comment:'component_id of payload'"`
Cate string `json:"cate" gorm:"type:varchar(191);not null;comment:'category of payload'"`
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_buildinpayload_name,sort:asc;comment:'name of payload'"`
Tags string `json:"tags" gorm:"type:varchar(191);not null;default:'';comment:'tags of payload'"`
Content string `json:"content" gorm:"type:text;not null;comment:'content of payload'"`
UUID int64 `json:"uuid" gorm:"type:bigint;not null;index:idx_uuid;comment:'uuid of payload'"`
Note string `json:"note" gorm:"type:varchar(1024);not null;default:'';comment:'note of payload'"`
CreatedAt int64 `json:"created_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
CreatedBy string `json:"created_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
UpdatedAt int64 `json:"updated_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
UpdatedBy string `json:"updated_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
}
func (bp *PostgresBuiltinPayload) TableName() string {
return "builtin_payloads"
}
func (bp *BuiltinPayload) Verify() error {
bp.Type = strings.TrimSpace(bp.Type)
if bp.Type == "" {

View File

@@ -1,10 +1,7 @@
package models
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"net/url"
@@ -147,72 +144,6 @@ func (h HTTP) ParseUrl() (target *url.URL, err error) {
type TLS struct {
SkipTlsVerify bool `json:"skip_tls_verify"`
// mTLS 配置
CACert string `json:"ca_cert"` // CA 证书内容 (PEM 格式)
ClientCert string `json:"client_cert"` // 客户端证书内容 (PEM 格式)
ClientKey string `json:"client_key"` // 客户端密钥内容 (PEM 格式)
ClientKeyPassword string `json:"client_key_password"` // 密钥密码(可选)
ServerName string `json:"server_name"` // TLS ServerName可选用于证书验证
MinVersion string `json:"min_version"` // TLS 最小版本 (1.0, 1.1, 1.2, 1.3)
MaxVersion string `json:"max_version"` // TLS 最大版本
}
// TLSConfig 从证书内容创建 tls.Config
// 证书内容为 PEM 格式字符串
func (t *TLS) TLSConfig() (*tls.Config, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: t.SkipTlsVerify,
}
// 设置 ServerName
if t.ServerName != "" {
tlsConfig.ServerName = t.ServerName
}
// 设置 TLS 版本
if t.MinVersion != "" {
if v, ok := tlsVersionMap[t.MinVersion]; ok {
tlsConfig.MinVersion = v
}
}
if t.MaxVersion != "" {
if v, ok := tlsVersionMap[t.MaxVersion]; ok {
tlsConfig.MaxVersion = v
}
}
// 如果配置了客户端证书,则加载 mTLS 配置
clientCert := strings.TrimSpace(t.ClientCert)
clientKey := strings.TrimSpace(t.ClientKey)
caCert := strings.TrimSpace(t.CACert)
if clientCert != "" && clientKey != "" {
// 加载客户端证书和密钥
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("failed to load client certificate: %w", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
// 加载 CA 证书
if caCert != "" {
caCertPool := x509.NewCertPool()
if !caCertPool.AppendCertsFromPEM([]byte(caCert)) {
return nil, fmt.Errorf("failed to parse CA certificate")
}
tlsConfig.RootCAs = caCertPool
}
return tlsConfig, nil
}
// tlsVersionMap TLS 版本映射
var tlsVersionMap = map[string]uint16{
"1.0": tls.VersionTLS10,
"1.1": tls.VersionTLS11,
"1.2": tls.VersionTLS12,
"1.3": tls.VersionTLS13,
}
func (ds *Datasource) TableName() string {
@@ -517,7 +448,8 @@ func (ds *Datasource) Encrypt(openRsa bool, publicKeyData []byte) error {
// Decrypt 用于 edge 将从中心同步的数据源解密,中心不可调用
func (ds *Datasource) Decrypt() error {
if rsaConfig == nil {
return errors.New("rsa config is nil")
logger.Debugf("datasource %s rsa config is nil", ds.Name)
return nil
}
if !rsaConfig.OpenRSA {

View File

@@ -1,9 +1,6 @@
package models
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
@@ -51,70 +48,9 @@ func EsIndexPatternDel(ctx *ctx.Context, ids []int64) error {
if len(ids) == 0 {
return nil
}
// 检查是否有告警规则引用了这些 index pattern
for _, id := range ids {
alertRules, err := GetAlertRulesByEsIndexPatternId(ctx, id)
if err != nil {
return errors.WithMessage(err, "failed to check alert rules")
}
if len(alertRules) > 0 {
names := make([]string, 0, len(alertRules))
for _, rule := range alertRules {
names = append(names, rule.Name)
}
return errors.Errorf("index pattern(id=%d) is used by alert rules: %s", id, strings.Join(names, ", "))
}
}
return DB(ctx).Where("id in ?", ids).Delete(new(EsIndexPattern)).Error
}
// GetAlertRulesByEsIndexPatternId 获取引用了指定 index pattern 的告警规则
func GetAlertRulesByEsIndexPatternId(ctx *ctx.Context, indexPatternId int64) ([]*AlertRule, error) {
// index_pattern 存储在 rule_config JSON 字段的 queries 数组中
// 格式如: {"queries":[{"index_type":"index_pattern","index_pattern":123,...}]}
// 先用 LIKE 粗筛,再在代码中精确过滤
pattern := fmt.Sprintf(`%%"index_pattern":%d%%`, indexPatternId)
var candidates []*AlertRule
err := DB(ctx).Where("rule_config LIKE ?", pattern).Find(&candidates).Error
if err != nil {
return nil, err
}
// 精确过滤:解析 JSON 检查 index_pattern 字段值是否精确匹配
var alertRules []*AlertRule
for _, rule := range candidates {
if ruleUsesIndexPattern(rule.RuleConfig, indexPatternId) {
alertRules = append(alertRules, rule)
}
}
return alertRules, nil
}
// ruleUsesIndexPattern 检查告警规则的 rule_config 是否引用了指定的 index_pattern
func ruleUsesIndexPattern(ruleConfig string, indexPatternId int64) bool {
var config struct {
Queries []struct {
IndexPattern int64 `json:"index_pattern"`
} `json:"queries"`
}
if err := json.Unmarshal([]byte(ruleConfig), &config); err != nil {
return false
}
for _, query := range config.Queries {
if query.IndexPattern == indexPatternId {
return true
}
}
return false
}
func (ei *EsIndexPattern) Update(ctx *ctx.Context, eip EsIndexPattern) error {
if ei.Name != eip.Name || ei.DatasourceId != eip.DatasourceId {
exists, err := EsIndexPatternExists(ctx, ei.Id, eip.DatasourceId, eip.Name)

View File

@@ -13,10 +13,6 @@ import (
type EventPipeline struct {
ID int64 `json:"id" gorm:"primaryKey"`
Name string `json:"name" gorm:"type:varchar(128)"`
Typ string `json:"typ" gorm:"type:varchar(128)"` // builtin, user-defined // event_pipeline, event_summary, metric_explorer
UseCase string `json:"use_case" gorm:"type:varchar(128)"` // metric_explorer, event_summary, event_pipeline
TriggerMode string `json:"trigger_mode" gorm:"type:varchar(128)"` // event, api, cron
Disabled bool `json:"disabled" gorm:"type:boolean"`
TeamIds []int64 `json:"team_ids" gorm:"type:text;serializer:json"`
TeamNames []string `json:"team_names" gorm:"-"`
Description string `json:"description" gorm:"type:varchar(255)"`
@@ -24,18 +20,12 @@ type EventPipeline struct {
LabelFilters []TagFilter `json:"label_filters" gorm:"type:text;serializer:json"`
AttrFilters []TagFilter `json:"attribute_filters" gorm:"type:text;serializer:json"`
ProcessorConfigs []ProcessorConfig `json:"processors" gorm:"type:text;serializer:json"`
CreateAt int64 `json:"create_at" gorm:"type:bigint"`
CreateBy string `json:"create_by" gorm:"type:varchar(64)"`
UpdateAt int64 `json:"update_at" gorm:"type:bigint"`
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
// 工作流节点列表
Nodes []WorkflowNode `json:"nodes,omitempty" gorm:"type:text;serializer:json"`
// 节点连接关系
Connections Connections `json:"connections,omitempty" gorm:"type:text;serializer:json"`
// 环境变量(工作流级别的配置变量)
EnvVariables []EnvVariable `json:"env_variables,omitempty" gorm:"type:text;serializer:json"`
CreateAt int64 `json:"create_at" gorm:"type:bigint"`
CreateBy string `json:"create_by" gorm:"type:varchar(64)"`
UpdateAt int64 `json:"update_at" gorm:"type:bigint"`
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
Processors []Processor `json:"-" gorm:"-"`
}
type ProcessorConfig struct {
@@ -56,6 +46,9 @@ func (e *EventPipeline) Verify() error {
return errors.New("team_ids cannot be empty")
}
if len(e.TeamIds) == 0 {
e.TeamIds = make([]int64, 0)
}
if len(e.LabelFilters) == 0 {
e.LabelFilters = make([]TagFilter, 0)
}
@@ -66,17 +59,6 @@ func (e *EventPipeline) Verify() error {
e.ProcessorConfigs = make([]ProcessorConfig, 0)
}
// 初始化空数组,避免 null
if e.Nodes == nil {
e.Nodes = make([]WorkflowNode, 0)
}
if e.Connections == nil {
e.Connections = make(Connections)
}
if e.EnvVariables == nil {
e.EnvVariables = make([]EnvVariable, 0)
}
return nil
}
@@ -194,87 +176,3 @@ func EventPipelineStatistics(ctx *ctx.Context) (*Statistics, error) {
return stats[0], nil
}
// 无论是新格式还是旧格式,都返回统一的 []WorkflowNode
func (e *EventPipeline) GetWorkflowNodes() []WorkflowNode {
// 优先使用新格式
if len(e.Nodes) > 0 {
return e.Nodes
}
// 兼容旧格式:将 ProcessorConfigs 转换为 WorkflowNode
nodes := make([]WorkflowNode, len(e.ProcessorConfigs))
for i, pc := range e.ProcessorConfigs {
nodeID := fmt.Sprintf("node_%d", i)
nodeName := pc.Typ
nodes[i] = WorkflowNode{
ID: nodeID,
Name: nodeName,
Type: pc.Typ,
Config: pc.Config,
}
}
return nodes
}
func (e *EventPipeline) GetWorkflowConnections() Connections {
// 优先使用显式定义的连接
if len(e.Connections) > 0 {
return e.Connections
}
// 自动生成线性连接node_0 → node_1 → node_2 → ...
nodes := e.GetWorkflowNodes()
conns := make(Connections)
for i := 0; i < len(nodes)-1; i++ {
conns[nodes[i].ID] = NodeConnections{
Main: [][]ConnectionTarget{
{{Node: nodes[i+1].ID, Type: "main", Index: 0}},
},
}
}
return conns
}
func (e *EventPipeline) FillWorkflowFields() {
if len(e.Nodes) == 0 && len(e.ProcessorConfigs) > 0 {
e.Nodes = e.GetWorkflowNodes()
e.Connections = e.GetWorkflowConnections()
}
}
func (e *EventPipeline) GetEnvMap() map[string]string {
envMap := make(map[string]string)
for _, v := range e.EnvVariables {
envMap[v.Key] = v.Value
}
return envMap
}
func (e *EventPipeline) GetSecretKeys() map[string]bool {
secretKeys := make(map[string]bool)
for _, v := range e.EnvVariables {
if v.Secret {
secretKeys[v.Key] = true
}
}
return secretKeys
}
func (e *EventPipeline) ValidateEnvVariables(overrides map[string]string) error {
// 合并默认值和覆盖值
merged := e.GetEnvMap()
for k, v := range overrides {
merged[k] = v
}
// 校验必填项
for _, v := range e.EnvVariables {
if v.Required && merged[v.Key] == "" {
return fmt.Errorf("required env variable %s is missing", v.Key)
}
}
return nil
}

View File

@@ -1,282 +0,0 @@
package models
import (
"encoding/json"
"fmt"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
// 执行状态常量
const (
ExecutionStatusRunning = "running"
ExecutionStatusSuccess = "success"
ExecutionStatusFailed = "failed"
)
// EventPipelineExecution 工作流执行记录
type EventPipelineExecution struct {
ID string `json:"id" gorm:"primaryKey;type:varchar(36)"`
PipelineID int64 `json:"pipeline_id" gorm:"index"`
PipelineName string `json:"pipeline_name" gorm:"type:varchar(128)"`
EventID int64 `json:"event_id" gorm:"index"`
// 触发模式event告警触发、apiAPI触发、cron定时触发
Mode string `json:"mode" gorm:"type:varchar(16);index"`
// 状态running、success、failed
Status string `json:"status" gorm:"type:varchar(16);index"`
// 各节点执行结果JSON
NodeResults string `json:"node_results" gorm:"type:mediumtext"`
// 错误信息
ErrorMessage string `json:"error_message" gorm:"type:varchar(1024)"`
ErrorNode string `json:"error_node" gorm:"type:varchar(36)"`
// 时间
CreatedAt int64 `json:"created_at" gorm:"index"`
FinishedAt int64 `json:"finished_at"`
DurationMs int64 `json:"duration_ms"`
// 触发者信息
TriggerBy string `json:"trigger_by" gorm:"type:varchar(64)"`
// 环境变量快照(脱敏后存储)
EnvSnapshot string `json:"env_snapshot,omitempty" gorm:"type:text"`
}
func (e *EventPipelineExecution) TableName() string {
return "event_pipeline_execution"
}
// SetNodeResults 设置节点执行结果(序列化为 JSON
func (e *EventPipelineExecution) SetNodeResults(results []*NodeExecutionResult) error {
data, err := json.Marshal(results)
if err != nil {
return err
}
e.NodeResults = string(data)
return nil
}
// GetNodeResults 获取节点执行结果(反序列化)
func (e *EventPipelineExecution) GetNodeResults() ([]*NodeExecutionResult, error) {
if e.NodeResults == "" {
return nil, nil
}
var results []*NodeExecutionResult
err := json.Unmarshal([]byte(e.NodeResults), &results)
return results, err
}
// SetEnvSnapshot 设置环境变量快照(脱敏后存储)
func (e *EventPipelineExecution) SetEnvSnapshot(env map[string]string) error {
data, err := json.Marshal(env)
if err != nil {
return err
}
e.EnvSnapshot = string(data)
return nil
}
// GetEnvSnapshot 获取环境变量快照
func (e *EventPipelineExecution) GetEnvSnapshot() (map[string]string, error) {
if e.EnvSnapshot == "" {
return nil, nil
}
var env map[string]string
err := json.Unmarshal([]byte(e.EnvSnapshot), &env)
return env, err
}
// CreateEventPipelineExecution 创建执行记录
func CreateEventPipelineExecution(c *ctx.Context, execution *EventPipelineExecution) error {
return DB(c).Create(execution).Error
}
// UpdateEventPipelineExecution 更新执行记录
func UpdateEventPipelineExecution(c *ctx.Context, execution *EventPipelineExecution) error {
return DB(c).Save(execution).Error
}
// GetEventPipelineExecution 获取单条执行记录
func GetEventPipelineExecution(c *ctx.Context, id string) (*EventPipelineExecution, error) {
var execution EventPipelineExecution
err := DB(c).Where("id = ?", id).First(&execution).Error
if err != nil {
return nil, err
}
return &execution, nil
}
// ListEventPipelineExecutions 获取 Pipeline 的执行记录列表
func ListEventPipelineExecutions(c *ctx.Context, pipelineID int64, mode, status string, limit, offset int) ([]*EventPipelineExecution, int64, error) {
var executions []*EventPipelineExecution
var total int64
session := DB(c).Model(&EventPipelineExecution{}).Where("pipeline_id = ?", pipelineID)
if mode != "" {
session = session.Where("mode = ?", mode)
}
if status != "" {
session = session.Where("status = ?", status)
}
err := session.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = session.Order("created_at desc").Limit(limit).Offset(offset).Find(&executions).Error
if err != nil {
return nil, 0, err
}
return executions, total, nil
}
// ListEventPipelineExecutionsByEventID 根据事件ID获取执行记录
func ListEventPipelineExecutionsByEventID(c *ctx.Context, eventID int64) ([]*EventPipelineExecution, error) {
var executions []*EventPipelineExecution
err := DB(c).Where("event_id = ?", eventID).Order("created_at desc").Find(&executions).Error
return executions, err
}
// ListAllEventPipelineExecutions 获取所有 Pipeline 的执行记录列表
func ListAllEventPipelineExecutions(c *ctx.Context, pipelineName, mode, status string, limit, offset int) ([]*EventPipelineExecution, int64, error) {
var executions []*EventPipelineExecution
var total int64
session := DB(c).Model(&EventPipelineExecution{})
if pipelineName != "" {
session = session.Where("pipeline_name LIKE ?", "%"+pipelineName+"%")
}
if mode != "" {
session = session.Where("mode = ?", mode)
}
if status != "" {
session = session.Where("status = ?", status)
}
err := session.Count(&total).Error
if err != nil {
return nil, 0, err
}
err = session.Order("created_at desc").Limit(limit).Offset(offset).Find(&executions).Error
if err != nil {
return nil, 0, err
}
return executions, total, nil
}
// DeleteEventPipelineExecutions 批量删除执行记录(按时间)
func DeleteEventPipelineExecutions(c *ctx.Context, beforeTime int64) (int64, error) {
result := DB(c).Where("created_at < ?", beforeTime).Delete(&EventPipelineExecution{})
return result.RowsAffected, result.Error
}
// DeleteEventPipelineExecutionsByPipelineID 删除指定 Pipeline 的所有执行记录
func DeleteEventPipelineExecutionsByPipelineID(c *ctx.Context, pipelineID int64) error {
return DB(c).Where("pipeline_id = ?", pipelineID).Delete(&EventPipelineExecution{}).Error
}
// EventPipelineExecutionStatistics 执行统计
type EventPipelineExecutionStatistics struct {
Total int64 `json:"total"`
Success int64 `json:"success"`
Failed int64 `json:"failed"`
Running int64 `json:"running"`
AvgDurMs int64 `json:"avg_duration_ms"`
LastRunAt int64 `json:"last_run_at"`
}
// GetEventPipelineExecutionStatistics 获取执行统计信息
func GetEventPipelineExecutionStatistics(c *ctx.Context, pipelineID int64) (*EventPipelineExecutionStatistics, error) {
var stats EventPipelineExecutionStatistics
// 总数
err := DB(c).Model(&EventPipelineExecution{}).Where("pipeline_id = ?", pipelineID).Count(&stats.Total).Error
if err != nil {
return nil, err
}
// 成功数
err = DB(c).Model(&EventPipelineExecution{}).Where("pipeline_id = ? AND status = ?", pipelineID, ExecutionStatusSuccess).Count(&stats.Success).Error
if err != nil {
return nil, err
}
// 失败数
err = DB(c).Model(&EventPipelineExecution{}).Where("pipeline_id = ? AND status = ?", pipelineID, ExecutionStatusFailed).Count(&stats.Failed).Error
if err != nil {
return nil, err
}
// 运行中
err = DB(c).Model(&EventPipelineExecution{}).Where("pipeline_id = ? AND status = ?", pipelineID, ExecutionStatusRunning).Count(&stats.Running).Error
if err != nil {
return nil, err
}
// 平均耗时
var avgDur struct {
AvgDur float64 `gorm:"column:avg_dur"`
}
err = DB(c).Model(&EventPipelineExecution{}).
Select("AVG(duration_ms) as avg_dur").
Where("pipeline_id = ? AND status = ?", pipelineID, ExecutionStatusSuccess).
Scan(&avgDur).Error
if err != nil {
return nil, err
}
stats.AvgDurMs = int64(avgDur.AvgDur)
// 最后执行时间
var lastExec EventPipelineExecution
err = DB(c).Where("pipeline_id = ?", pipelineID).Order("created_at desc").First(&lastExec).Error
if err == nil {
stats.LastRunAt = lastExec.CreatedAt
}
return &stats, nil
}
// EventPipelineExecutionDetail 执行详情(包含解析后的节点结果)
type EventPipelineExecutionDetail struct {
EventPipelineExecution
NodeResultsParsed []*NodeExecutionResult `json:"node_results_parsed"`
EnvSnapshotParsed map[string]string `json:"env_snapshot_parsed"`
}
// GetEventPipelineExecutionDetail 获取执行详情
func GetEventPipelineExecutionDetail(c *ctx.Context, id string) (*EventPipelineExecutionDetail, error) {
execution, err := GetEventPipelineExecution(c, id)
if err != nil {
return nil, err
}
detail := &EventPipelineExecutionDetail{
EventPipelineExecution: *execution,
}
// 解析节点结果
nodeResults, err := execution.GetNodeResults()
if err != nil {
return nil, fmt.Errorf("parse node results error: %w", err)
}
detail.NodeResultsParsed = nodeResults
// 解析环境变量快照
envSnapshot, err := execution.GetEnvSnapshot()
if err != nil {
return nil, fmt.Errorf("parse env snapshot error: %w", err)
}
detail.EnvSnapshotParsed = envSnapshot
return detail, nil
}

View File

@@ -9,21 +9,11 @@ import (
type Processor interface {
Init(settings interface{}) (Processor, error) // 初始化配置
Process(ctx *ctx.Context, wfCtx *WorkflowContext) (*WorkflowContext, string, error)
Process(ctx *ctx.Context, event *AlertCurEvent) (*AlertCurEvent, string, error)
// 处理器有三种情况:
// 1. 处理成功,返回处理后的 WorkflowContext
// 2. 处理成功,不需要返回处理后的上下文,只返回处理结果,将处理结果放到 string 中,比如 eventdrop callback 处理器
// 1. 处理成功,返回处理后的事件
// 2. 处理成功,不需要返回处理后端事件,只返回处理结果,将处理结果放到 string 中,比如 eventdrop callback 处理器
// 3. 处理失败,返回错误,将错误放到 error 中
// WorkflowContext 包含Event事件、Env环境变量/输入参数、Metadata执行元数据
}
// BranchProcessor 分支处理器接口
// 用于 if、switch、foreach 等需要返回分支索引或特殊输出的处理器
type BranchProcessor interface {
Processor
// ProcessWithBranch 处理事件并返回 NodeOutput
// NodeOutput 包含:处理后的上下文、消息、是否终止、分支索引
ProcessWithBranch(ctx *ctx.Context, wfCtx *WorkflowContext) (*NodeOutput, error)
}
type NewProcessorFn func(settings interface{}) (Processor, error)

View File

@@ -622,26 +622,9 @@ var NewTplMap = map[string]string{
{{if $event.RuleNote }}**Alarm description:** **{{$event.RuleNote}}**{{end}}
{{- end -}}
[Event Details]({{.domain}}/share/alert-his-events/{{$event.Id}})|[Block for 1 hour]({{.domain}}/alert-mutes/add?__event_id={{$event.Id}})|[View Curve]({{.domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph)`,
// Jira and JSMAlert share the same template format
Jira: `Severity: S{{$event.Severity}} {{if $event.IsRecovered}}Recovered{{else}}Triggered{{end}}
Rule Name: {{$event.RuleName}}{{if $event.RuleNote}}
Rule Notes: {{$event.RuleNote}}{{end}}
Metrics: {{$event.TagsJSON}}
Annotations:
{{- range $key, $val := $event.AnnotationsJSON}}
{{$key}}: {{$val}}
{{- end}}\n{{if $event.IsRecovered}}Recovery Time: {{timeformat $event.LastEvalTime}}{{else}}Trigger Time: {{timeformat $event.TriggerTime}}
Trigger Value: {{$event.TriggerValue}}{{end}}
Send Time: {{timestamp}}
Event Details: {{.domain}}/share/alert-his-events/{{$event.Id}}
Mute for 1 Hour: {{.domain}}/alert-mutes/add?__event_id={{$event.Id}}`,
}
// Weight 用于页面元素排序weight 越大 排序越靠后
var MsgTplMap = []MessageTemplate{
{Name: "Jira", Ident: Jira, Weight: 18, Content: map[string]string{"content": NewTplMap[Jira]}},
{Name: "JSMAlert", Ident: JSMAlert, Weight: 17, Content: map[string]string{"content": NewTplMap[Jira]}},
{Name: "Callback", Ident: "callback", Weight: 16, Content: map[string]string{"content": ""}},
{Name: "MattermostWebhook", Ident: MattermostWebhook, Weight: 15, Content: map[string]string{"content": NewTplMap[MattermostWebhook]}},
{Name: "MattermostBot", Ident: MattermostBot, Weight: 14, Content: map[string]string{"content": NewTplMap[MattermostWebhook]}},

View File

@@ -68,7 +68,7 @@ func MigrateTables(db *gorm.DB) error {
&Board{}, &BoardBusigroup{}, &Users{}, &SsoConfig{}, &models.BuiltinMetric{},
&models.MetricFilter{}, &models.NotificationRecord{}, &models.TargetBusiGroup{},
&models.UserToken{}, &models.DashAnnotation{}, MessageTemplate{}, NotifyRule{}, NotifyChannelConfig{}, &EsIndexPatternMigrate{},
&models.EventPipeline{}, &models.EventPipelineExecution{}, &models.EmbeddedProduct{}, &models.SourceToken{},
&models.EventPipeline{}, &models.EmbeddedProduct{}, &models.SourceToken{},
&models.SavedView{}, &models.UserViewFavorite{}}
if isPostgres(db) {
@@ -99,11 +99,7 @@ func MigrateTables(db *gorm.DB) error {
}()
if !db.Migrator().HasTable(&models.BuiltinPayload{}) {
if isPostgres(db) {
dts = append(dts, &models.PostgresBuiltinPayload{})
} else {
dts = append(dts, &models.BuiltinPayload{})
}
dts = append(dts, &models.BuiltinPayload{})
} else {
dts = append(dts, &BuiltinPayloads{})
}

View File

@@ -725,10 +725,10 @@ func (ncc *NotifyChannelConfig) SendHTTP(events []*AlertCurEvent, tpl map[string
logger.Errorf("send_http: failed to read response. url=%s request_body=%s error=%v", url, string(body), err)
}
if resp.StatusCode == http.StatusOK {
return fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), nil
return string(body), nil
}
return fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), fmt.Errorf("failed to send request, status code: %d, body: %s", resp.StatusCode, string(body))
return "", fmt.Errorf("failed to send request, status code: %d, body: %s", resp.StatusCode, string(body))
}
return lastErrorMessage, errors.New("all retries failed, last error: " + lastErrorMessage)
@@ -1204,56 +1204,24 @@ func (c NotiChList) IfUsed(nr *NotifyRule) bool {
return false
}
// Weight 用于页面元素排序weight 越大 排序越靠后
var NotiChMap = []*NotifyChannelConfig{
{
Name: "PagerDuty", Ident: "pagerduty", RequestType: "pagerduty", Weight: 19, Enable: true,
RequestConfig: &RequestConfig{
PagerDutyRequestConfig: &PagerDutyRequestConfig{
ApiKey: "pagerduty api key",
Timeout: 5000,
RetryTimes: 3,
},
},
},
{
Name: "JIRA", Ident: Jira, RequestType: "http", Weight: 18, Enable: true,
Name: "Callback", Ident: "callback", RequestType: "http", Weight: 2, Enable: true,
RequestConfig: &RequestConfig{
HTTPRequestConfig: &HTTPRequestConfig{
URL: "https://{JIRA Service Account Email}:{API Token}@api.atlassian.com/ex/jira/{CloudID}/rest/api/3/issue",
Method: "POST",
Headers: map[string]string{"Content-Type": "application/json"},
URL: "{{$params.callback_url}}",
Method: "POST", Headers: map[string]string{"Content-Type": "application/json"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Body: `{"fields":{"project":{"key":"{{$params.project_key}}"},"issuetype":{"name":"{{if $event.IsRecovered}}Recovery{{else}}Alert{{end}}"},"summary":"{{$event.RuleName}}","description":{"type":"doc","version":1,"content":[{"type":"paragraph","content":[{"type":"text","text":"{{$tpl.content}}"}]}]},"labels":["{{join $event.TagsJSON "\",\""}}", "eventHash={{$event.Hash}}"]}}`,
Body: `{{ jsonMarshal $events }}`,
},
},
},
ParamConfig: &NotifyParamConfig{
Custom: Params{
Params: []ParamItem{
{Key: "project_key", CName: "Project Key", Type: "string"},
},
},
},
},
{
Name: "JSM Alert", Ident: JSMAlert, RequestType: "http", Weight: 17, Enable: true,
RequestConfig: &RequestConfig{
HTTPRequestConfig: &HTTPRequestConfig{
URL: `https://api.atlassian.com/jsm/ops/integration/v2/alerts{{if $event.IsRecovered}}/{{$event.Hash}}/close?identifierType=alias{{else}}{{end}}`,
Method: "POST",
Headers: map[string]string{"Content-Type": "application/json", "Authorization": "GenieKey {{$params.api_key}}"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Body: `{{if $event.IsRecovered}}{"note":"{{$tpl.content}}","source":"{{$event.Cluster}}"}{{else}}{"message":"{{$event.RuleName}}","description":"{{$tpl.content}}","alias":"{{$event.Hash}}","priority":"P{{$event.Severity}}","tags":[{{range $i, $v := $event.TagsJSON}}{{if $i}},{{end}}"{{$v}}"{{end}}],"details":{{jsonMarshal $event.AnnotationsJSON}},"entity":"{{$event.TargetIdent}}","source":"{{$event.Cluster}}"}{{end}}`,
},
},
},
ParamConfig: &NotifyParamConfig{
Custom: Params{
Params: []ParamItem{
{Key: "api_key", CName: "API Key", Type: "string"},
{Key: "callback_url", CName: "Callback Url", Type: "string"},
{Key: "note", CName: "Note", Type: "string"},
},
},
},
@@ -1646,27 +1614,6 @@ var NotiChMap = []*NotifyChannelConfig{
},
},
},
{
Name: "Callback", Ident: "callback", RequestType: "http", Weight: 2, Enable: true,
RequestConfig: &RequestConfig{
HTTPRequestConfig: &HTTPRequestConfig{
URL: "{{$params.callback_url}}",
Method: "POST", Headers: map[string]string{"Content-Type": "application/json"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Body: `{{ jsonMarshal $events }}`,
},
},
},
ParamConfig: &NotifyParamConfig{
Custom: Params{
Params: []ParamItem{
{Key: "callback_url", CName: "Callback Url", Type: "string"},
{Key: "note", CName: "Note", Type: "string"},
},
},
},
},
{
Name: "FlashDuty", Ident: "flashduty", RequestType: "flashduty", Weight: 1, Enable: true,
RequestConfig: &RequestConfig{
@@ -1683,6 +1630,16 @@ var NotiChMap = []*NotifyChannelConfig{
},
},
},
{
Name: "PagerDuty", Ident: "pagerduty", RequestType: "pagerduty", Weight: 1, Enable: true,
RequestConfig: &RequestConfig{
PagerDutyRequestConfig: &PagerDutyRequestConfig{
ApiKey: "pagerduty api key",
Timeout: 5000,
RetryTimes: 3,
},
},
},
}
func InitNotifyChannel(ctx *ctx.Context) {

View File

@@ -68,7 +68,7 @@ type RelationKey struct {
type QueryParam struct {
Cate string `json:"cate"`
DatasourceId int64 `json:"datasource_id"`
Queries []interface{} `json:"query"`
Queries []interface{} `json:"query"`
}
type Series struct {

View File

@@ -1,14 +1,12 @@
package models
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ormx"
@@ -42,8 +40,6 @@ const (
Lark = "lark"
LarkCard = "larkcard"
Phone = "phone"
Jira = "jira"
JSMAlert = "jsm_alert"
DingtalkKey = "dingtalk_robot_token"
WecomKey = "wecom_robot_token"
@@ -146,42 +142,6 @@ func (u *User) CheckGroupPermission(ctx *ctx.Context, groupIds []int64) error {
return nil
}
// stripInvisibleChars removes invisible Unicode characters from a string
// This includes zero-width spaces, control characters, and other invisible chars
func stripInvisibleChars(s string) string {
return strings.Map(func(r rune) rune {
// Keep printable characters and common whitespace (space, tab, newline)
if unicode.IsPrint(r) || r == ' ' || r == '\t' || r == '\n' || r == '\r' {
return r
}
// Remove invisible characters
return -1
}, s)
}
// stripInvisibleCharsFromContacts removes invisible characters from Contacts JSON values
func stripInvisibleCharsFromContacts(contacts ormx.JSONObj) ormx.JSONObj {
if len(contacts) == 0 {
return contacts
}
var contactsMap map[string]string
if err := json.Unmarshal(contacts, &contactsMap); err != nil {
return contacts
}
for k, v := range contactsMap {
contactsMap[k] = stripInvisibleChars(v)
}
result, err := json.Marshal(contactsMap)
if err != nil {
return contacts
}
return ormx.JSONObj(result)
}
func (u *User) Verify() error {
u.Username = strings.TrimSpace(u.Username)
@@ -205,9 +165,6 @@ func (u *User) Verify() error {
return errors.New("Email invalid")
}
// Strip invisible characters from Contacts values
u.Contacts = stripInvisibleCharsFromContacts(u.Contacts)
if u.Phone != "" {
return u.EncryptPhone()
}
@@ -341,11 +298,6 @@ func (u *User) Del(ctx *ctx.Context) error {
}
func (u *User) ChangePassword(ctx *ctx.Context, oldpass, newpass string) error {
// SSO 用户ldap/oidc/cas/oauth2/dingtalk等且未设置本地密码不支持本地修改密码
if u.Belong != "" && u.Password == "******" {
return fmt.Errorf("SSO user(%s) cannot change password locally, please change password in %s", u.Username, u.Belong)
}
_oldpass, err := CryptoPass(ctx, oldpass)
if err != nil {
return err

View File

@@ -1,160 +0,0 @@
package models
// WorkflowNode 工作流节点
type WorkflowNode struct {
ID string `json:"id"` // 节点唯一ID
Name string `json:"name"` // 显示名称
Type string `json:"type"` // 节点类型(对应 Processor typ
Position []float64 `json:"position,omitempty"` // [x, y] UI位置
Config interface{} `json:"config"` // 节点配置
// 执行控制
Disabled bool `json:"disabled,omitempty"`
ContinueOnFail bool `json:"continue_on_fail,omitempty"`
RetryOnFail bool `json:"retry_on_fail,omitempty"`
MaxRetries int `json:"max_retries,omitempty"`
RetryInterval int `json:"retry_interval,omitempty"` // 秒
}
// Connections 节点连接关系 map[源节点ID]NodeConnections
type Connections map[string]NodeConnections
// NodeConnections 单个节点的输出连接
type NodeConnections struct {
// Main 输出端口的连接
// Main[outputIndex] = []ConnectionTarget
Main [][]ConnectionTarget `json:"main"`
}
// ConnectionTarget 连接目标
type ConnectionTarget struct {
Node string `json:"node"` // 目标节点ID
Type string `json:"type"` // 输入类型,通常是 "main"
Index int `json:"index"` // 目标节点的输入端口索引
}
// EnvVariable 环境变量
type EnvVariable struct {
Key string `json:"key"` // 变量名
Value string `json:"value"` // 默认值
Description string `json:"description,omitempty"` // 描述
Secret bool `json:"secret,omitempty"` // 是否敏感(日志脱敏)
Required bool `json:"required,omitempty"` // 是否必填
}
// NodeOutput 节点执行输出
type NodeOutput struct {
WfCtx *WorkflowContext `json:"wf_ctx"` // 处理后的工作流上下文
Message string `json:"message"` // 处理消息
Terminate bool `json:"terminate"` // 是否终止流程
BranchIndex *int `json:"branch_index,omitempty"` // 分支索引(条件节点使用)
// 流式输出支持
Stream bool `json:"stream,omitempty"` // 是否流式输出
StreamChan chan *StreamChunk `json:"-"` // 流式数据通道(不序列化)
}
// WorkflowResult 工作流执行结果
type WorkflowResult struct {
Event *AlertCurEvent `json:"event"` // 最终事件
Status string `json:"status"` // success, failed, streaming
Message string `json:"message"` // 汇总消息
NodeResults []*NodeExecutionResult `json:"node_results"` // 各节点执行结果
ErrorNode string `json:"error_node,omitempty"`
// 流式输出支持
Stream bool `json:"stream,omitempty"` // 是否流式输出
StreamChan chan *StreamChunk `json:"-"` // 流式数据通道(不序列化)
}
// NodeExecutionResult 节点执行结果
type NodeExecutionResult struct {
NodeID string `json:"node_id"`
NodeName string `json:"node_name"`
NodeType string `json:"node_type"`
Status string `json:"status"` // success, failed, skipped
Message string `json:"message"`
StartedAt int64 `json:"started_at"`
FinishedAt int64 `json:"finished_at"`
DurationMs int64 `json:"duration_ms"`
Error string `json:"error,omitempty"`
BranchIndex *int `json:"branch_index,omitempty"` // 条件节点的分支选择
}
// 触发模式常量
const (
TriggerModeEvent = "event" // 告警事件触发
TriggerModeAPI = "api" // API 触发
TriggerModeCron = "cron" // 定时触发(后续支持)
)
// WorkflowTriggerContext 工作流触发上下文
type WorkflowTriggerContext struct {
// 触发模式
Mode string `json:"mode"`
// 触发者
TriggerBy string `json:"trigger_by"`
// 请求IDAPI/Cron 触发使用)
RequestID string `json:"request_id"`
// 环境变量覆盖
EnvOverrides map[string]string `json:"env_overrides"`
// 流式输出API 调用时动态指定)
Stream bool `json:"stream"`
// Cron 相关(后续使用)
CronJobID string `json:"cron_job_id,omitempty"`
CronExpr string `json:"cron_expr,omitempty"`
ScheduledAt int64 `json:"scheduled_at,omitempty"`
}
type WorkflowContext struct {
Event *AlertCurEvent `json:"event"` // 当前事件
Env map[string]string `json:"env"` // 环境变量/配置(静态,来自 Pipeline 配置)
Vars map[string]interface{} `json:"vars"` // 节点间传递的数据(动态,运行时产生)
Metadata map[string]string `json:"metadata"` // 执行元数据request_id、start_time 等)
Output map[string]interface{} `json:"output,omitempty"` // 输出结果(非告警场景使用)
// 流式输出支持
Stream bool `json:"-"` // 是否启用流式输出(不序列化)
StreamChan chan *StreamChunk `json:"-"` // 流式数据通道(不序列化)
}
// SanitizedEnv 返回脱敏后的环境变量(用于日志和存储)
func (ctx *WorkflowContext) SanitizedEnv(secretKeys map[string]bool) map[string]string {
sanitized := make(map[string]string)
for k, v := range ctx.Env {
if secretKeys[k] {
sanitized[k] = "******"
} else {
sanitized[k] = v
}
}
return sanitized
}
// StreamChunk 类型常量
const (
StreamTypeThinking = "thinking" // AI 思考过程ReAct Thought
StreamTypeToolCall = "tool_call" // 工具调用
StreamTypeToolResult = "tool_result" // 工具执行结果
StreamTypeText = "text" // LLM 文本输出
StreamTypeDone = "done" // 完成
StreamTypeError = "error" // 错误
)
// StreamChunk 流式数据块
type StreamChunk struct {
Type string `json:"type"` // thinking / tool_call / tool_result / text / done / error
Content string `json:"content"` // 完整内容(累积)
Delta string `json:"delta,omitempty"` // 增量内容
NodeID string `json:"node_id,omitempty"` // 当前节点 ID
RequestID string `json:"request_id,omitempty"` // 请求追踪 ID
Metadata interface{} `json:"metadata,omitempty"` // 额外元数据(如工具调用参数)
Done bool `json:"done"` // 是否结束
Error string `json:"error,omitempty"` // 错误信息
Timestamp int64 `json:"timestamp"` // 时间戳(毫秒)
}

345
pkg/feishu/feishu.go Normal file
View File

@@ -0,0 +1,345 @@
package feishu
import (
"bytes"
"context"
"fmt"
"net/url"
"strings"
"sync"
"time"
"github.com/ccfos/nightingale/v6/storage"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/toolkits/pkg/logger"
lark "github.com/larksuite/oapi-sdk-go/v3"
larkcore "github.com/larksuite/oapi-sdk-go/v3/core"
larkauthen "github.com/larksuite/oapi-sdk-go/v3/service/authen/v1"
larkcontact "github.com/larksuite/oapi-sdk-go/v3/service/contact/v3"
)
const defaultAuthURL = "https://accounts.feishu.cn/open-apis/authen/v1/authorize"
const SsoTypeName = "feishu"
type SsoClient struct {
Enable bool
FeiShuConfig *Config `json:"-"`
Ctx context.Context
client *lark.Client
sync.RWMutex
}
type Config struct {
Enable bool `json:"enable"`
AuthURL string `json:"auth_url"`
DisplayName string `json:"display_name"`
AppID string `json:"app_id"`
AppSecret string `json:"app_secret"`
RedirectURL string `json:"redirect_url"`
UsernameField string `json:"username_field"` // name, email, phone
FeiShuEndpoint string `json:"feishu_endpoint"` // 飞书API端点默认为 open.feishu.cn
Proxy string `json:"proxy"`
CoverAttributes bool `json:"cover_attributes"`
DefaultRoles []string `json:"default_roles"`
}
type CallbackOutput struct {
Redirect string `json:"redirect"`
Msg string `json:"msg"`
AccessToken string `json:"accessToken"`
Username string `json:"Username"`
Nickname string `json:"Nickname"`
Phone string `yaml:"Phone"`
Email string `yaml:"Email"`
}
func wrapStateKey(key string) string {
return "n9e_feishu_oauth_" + key
}
// createClient 创建飞书SDK客户端v3版本
func (c *Config) createClient() (*lark.Client, error) {
opts := []lark.ClientOptionFunc{
lark.WithLogLevel(larkcore.LogLevelInfo),
lark.WithEnableTokenCache(true), // 启用token缓存
}
if c.FeiShuEndpoint != "" {
lark.FeishuBaseUrl = c.FeiShuEndpoint
}
// 创建客户端v3版本
client := lark.NewClient(
c.AppID,
c.AppSecret,
opts...,
)
return client, nil
}
func New(cf Config) *SsoClient {
var s = &SsoClient{}
if !cf.Enable {
return s
}
s.Reload(cf)
return s
}
func (s *SsoClient) AuthCodeURL(state string) (string, error) {
var buf bytes.Buffer
feishuAuthURL := defaultAuthURL
if s.FeiShuConfig.AuthURL != "" {
feishuAuthURL = s.FeiShuConfig.AuthURL
}
buf.WriteString(feishuAuthURL)
v := url.Values{
"app_id": {s.FeiShuConfig.AppID},
"state": {state},
}
v.Set("redirect_uri", s.FeiShuConfig.RedirectURL)
if s.FeiShuConfig.RedirectURL == "" {
return "", errors.New("FeiShu OAuth RedirectURL is empty")
}
if strings.Contains(feishuAuthURL, "?") {
buf.WriteByte('&')
} else {
buf.WriteByte('?')
}
buf.WriteString(v.Encode())
return buf.String(), nil
}
// GetUserToken 通过授权码获取用户access token和user_id使用SDK v3
func (s *SsoClient) GetUserToken(code string) (string, string, error) {
if s.client == nil {
return "", "", errors.New("feishu client is not initialized")
}
ctx := context.Background()
// 使用SDK v3的authen服务获取access token
req := larkauthen.NewCreateAccessTokenReqBuilder().
Body(larkauthen.NewCreateAccessTokenReqBodyBuilder().
GrantType("authorization_code").
Code(code).
Build()).
Build()
resp, err := s.client.Authen.AccessToken.Create(ctx, req)
if err != nil {
return "", "", fmt.Errorf("feishu get access token error: %w", err)
}
// 检查响应
if !resp.Success() {
return "", "", fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
}
if resp.Data == nil {
return "", "", errors.New("feishu api returned empty data")
}
userID := ""
if resp.Data.UserId != nil {
userID = *resp.Data.UserId
}
if userID == "" {
return "", "", errors.New("feishu api returned empty user_id")
}
accessToken := ""
if resp.Data.AccessToken != nil {
accessToken = *resp.Data.AccessToken
}
if accessToken == "" {
return "", "", errors.New("feishu api returned empty access_token")
}
return accessToken, userID, nil
}
// GetUserInfo 通过user_id获取用户详细信息使用SDK v3
// 注意SDK内部会自动管理token所以不需要传入accessToken
func (s *SsoClient) GetUserInfo(userID string) (*larkcontact.GetUserRespData, error) {
if s.client == nil {
return nil, errors.New("feishu client is not initialized")
}
ctx := context.Background()
// 使用SDK v3的contact服务获取用户详情
req := larkcontact.NewGetUserReqBuilder().
UserId(userID).
UserIdType(larkcontact.UserIdTypeUserId).
Build()
resp, err := s.client.Contact.User.Get(ctx, req)
if err != nil {
return nil, fmt.Errorf("feishu get user detail error: %w", err)
}
// 检查响应
if !resp.Success() {
return nil, fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
}
if resp.Data == nil || resp.Data.User == nil {
return nil, errors.New("feishu api returned empty user data")
}
return resp.Data, nil
}
func (s *SsoClient) Reload(feishuConfig Config) {
s.Lock()
defer s.Unlock()
s.Enable = feishuConfig.Enable
s.FeiShuConfig = &feishuConfig
// 重新创建客户端
if feishuConfig.Enable && feishuConfig.AppID != "" && feishuConfig.AppSecret != "" {
client, err := feishuConfig.createClient()
if err != nil {
logger.Errorf("create feishu client error: %v", err)
} else {
s.client = client
}
}
}
func (s *SsoClient) GetDisplayName() string {
s.RLock()
defer s.RUnlock()
if !s.Enable {
return ""
}
return s.FeiShuConfig.DisplayName
}
func (s *SsoClient) Authorize(redis storage.Redis, redirect string) (string, error) {
state := uuid.New().String()
ctx := context.Background()
err := redis.Set(ctx, wrapStateKey(state), redirect, time.Duration(300*time.Second)).Err()
if err != nil {
return "", err
}
s.RLock()
defer s.RUnlock()
return s.AuthCodeURL(state)
}
func (s *SsoClient) Callback(redis storage.Redis, ctx context.Context, code, state string) (*CallbackOutput, error) {
// 通过code获取access token和user_id
accessToken, userID, err := s.GetUserToken(code)
if err != nil {
return nil, fmt.Errorf("feishu GetUserToken error: %s", err)
}
// 获取用户详细信息
userData, err := s.GetUserInfo(userID)
if err != nil {
return nil, fmt.Errorf("feishu GetUserInfo error: %s", err)
}
// 获取redirect URL
redirect := ""
if redis != nil {
redirect, err = fetchRedirect(redis, ctx, state)
if err != nil {
logger.Errorf("get redirect err:%v code:%s state:%s", err, code, state)
}
}
if redirect == "" {
redirect = "/"
}
err = deleteRedirect(redis, ctx, state)
if err != nil {
logger.Errorf("delete redirect err:%v code:%s state:%s", err, code, state)
}
var callbackOutput CallbackOutput
if userData == nil || userData.User == nil {
return nil, fmt.Errorf("feishu GetUserInfo failed, user data is nil")
}
user := userData.User
logger.Debugf("feishu get user info userID %s result %+v", userID, user)
// 提取用户信息
username := ""
if user.UserId != nil {
username = *user.UserId
}
if username == "" {
return nil, errors.New("feishu user_id is empty")
}
nickname := ""
if user.Name != nil {
nickname = *user.Name
}
phone := ""
if user.Mobile != nil {
phone = *user.Mobile
}
email := ""
if user.Email != nil {
email = *user.Email
}
if email == "" {
if user.EnterpriseEmail != nil {
email = *user.EnterpriseEmail
}
}
callbackOutput.Redirect = redirect
callbackOutput.AccessToken = accessToken
// 根据UsernameField配置确定username
switch s.FeiShuConfig.UsernameField {
case "name":
if nickname == "" {
return nil, errors.New("feishu user name is empty")
}
callbackOutput.Username = nickname
case "phone":
if phone == "" {
return nil, errors.New("feishu user phone is empty")
}
callbackOutput.Username = phone
default:
if email == "" {
return nil, errors.New("feishu user email is empty")
}
callbackOutput.Username = email
}
callbackOutput.Nickname = nickname
callbackOutput.Email = email
callbackOutput.Phone = phone
return &callbackOutput, nil
}
func fetchRedirect(redis storage.Redis, ctx context.Context, state string) (string, error) {
return redis.Get(ctx, wrapStateKey(state)).Result()
}
func deleteRedirect(redis storage.Redis, ctx context.Context, state string) error {
return redis.Del(ctx, wrapStateKey(state)).Err()
}

View File

@@ -1084,7 +1084,7 @@ type InitPostgresDatasource struct {
Status string `gorm:"size:255;not null;default:''"`
HTTP string `gorm:"size:4096;not null;default:''"`
Auth string `gorm:"size:8192;not null;default:''"`
IsDefault bool `gorm:"type:boolean;not null;default:0"`
IsDefault bool `gorm:"typr:boolean;not null;default:0"`
CreatedAt int64 `gorm:"not null;default:0"`
CreatedBy string `gorm:"size:64;not null;default:''"`
UpdatedAt int64 `gorm:"not null;default:0"`
@@ -1494,6 +1494,10 @@ func sqliteDataBaseInit(db *gorm.DB) error {
{RoleName: "Standard", Operation: "/alert-rules-built-in"},
{RoleName: "Standard", Operation: "/dashboards-built-in"},
{RoleName: "Standard", Operation: "/trace/dependencies"},
{RoleName: "Admin", Operation: "/help/source"},
{RoleName: "Admin", Operation: "/help/sso"},
{RoleName: "Admin", Operation: "/help/notification-tpls"},
{RoleName: "Admin", Operation: "/help/notification-settings"},
{RoleName: "Standard", Operation: "/users"},
{RoleName: "Standard", Operation: "/user-groups"},
{RoleName: "Standard", Operation: "/user-groups/add"},
@@ -1655,7 +1659,8 @@ func mysqlDataBaseInit(db *gorm.DB) error {
for _, dt := range dts {
err := db.AutoMigrate(dt)
if err != nil {
logger.Errorf("mysqlDataBaseInit AutoMigrate error: %v\n", err)
fmt.Printf("mysqlDataBaseInit AutoMigrate error: %v\n", err)
return err
}
}
@@ -1663,7 +1668,7 @@ func mysqlDataBaseInit(db *gorm.DB) error {
tableName := "task_host_" + strconv.Itoa(i)
err := db.Table(tableName).AutoMigrate(&InitTaskHost{})
if err != nil {
logger.Errorf("mysqlDataBaseInit AutoMigrate task_host_%d error: %v\n", i, err)
return err
}
}
@@ -1685,6 +1690,10 @@ func mysqlDataBaseInit(db *gorm.DB) error {
{RoleName: "Standard", Operation: "/alert-rules-built-in"},
{RoleName: "Standard", Operation: "/dashboards-built-in"},
{RoleName: "Standard", Operation: "/trace/dependencies"},
{RoleName: "Admin", Operation: "/help/source"},
{RoleName: "Admin", Operation: "/help/sso"},
{RoleName: "Admin", Operation: "/help/notification-tpls"},
{RoleName: "Admin", Operation: "/help/notification-settings"},
{RoleName: "Standard", Operation: "/users"},
{RoleName: "Standard", Operation: "/user-groups"},
{RoleName: "Standard", Operation: "/user-groups/add"},
@@ -1877,6 +1886,10 @@ func postgresDataBaseInit(db *gorm.DB) error {
{RoleName: "Standard", Operation: "/alert-rules-built-in"},
{RoleName: "Standard", Operation: "/dashboards-built-in"},
{RoleName: "Standard", Operation: "/trace/dependencies"},
{RoleName: "Admin", Operation: "/help/source"},
{RoleName: "Admin", Operation: "/help/sso"},
{RoleName: "Admin", Operation: "/help/notification-tpls"},
{RoleName: "Admin", Operation: "/help/notification-settings"},
{RoleName: "Standard", Operation: "/users"},
{RoleName: "Standard", Operation: "/user-groups"},
{RoleName: "Standard", Operation: "/user-groups/add"},

View File

@@ -348,15 +348,6 @@ func New(c DBConfig) (*gorm.DB, error) {
return nil, fmt.Errorf("failed to open database: %v", err)
}
// 检查 user 表是否存在,可能用户自己创建了空的数据库,如果不存在也执行 DataBaseInit
if dbExist && !db.Migrator().HasTable("users") {
fmt.Printf("Database exists but user table not found, initializing tables for %s\n", c.DBType)
err = DataBaseInit(c, db)
if err != nil {
return nil, fmt.Errorf("failed to init database: %v", err)
}
}
if c.Debug {
db = db.Debug()
}

View File

@@ -3,7 +3,7 @@ package prom
import (
"sync"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/tlsx"
)
type PromOption struct {
@@ -20,8 +20,7 @@ type PromOption struct {
Headers []string
// TLS 配置(支持 mTLS
TLS models.TLS
tlsx.ClientConfig
}
func (po *PromOption) Equal(target PromOption) bool {
@@ -53,6 +52,10 @@ func (po *PromOption) Equal(target PromOption) bool {
return false
}
if po.InsecureSkipVerify != target.InsecureSkipVerify {
return false
}
if len(po.Headers) != len(target.Headers) {
return false
}
@@ -63,29 +66,6 @@ func (po *PromOption) Equal(target PromOption) bool {
}
}
// 比较 TLS 配置
if po.TLS.SkipTlsVerify != target.TLS.SkipTlsVerify {
return false
}
if po.TLS.CACert != target.TLS.CACert {
return false
}
if po.TLS.ClientCert != target.TLS.ClientCert {
return false
}
if po.TLS.ClientKey != target.TLS.ClientKey {
return false
}
if po.TLS.ServerName != target.TLS.ServerName {
return false
}
if po.TLS.MinVersion != target.TLS.MinVersion {
return false
}
if po.TLS.MaxVersion != target.TLS.MaxVersion {
return false
}
return true
}

View File

@@ -85,15 +85,15 @@ func (pc *PromClientMap) loadFromDatabase() {
var internalAddr string
for k, v := range ds.SettingsJson {
if strings.Contains(k, "write_addr") {
writeAddr = strings.TrimSpace(v.(string))
writeAddr = v.(string)
} else if strings.Contains(k, "internal_addr") && v.(string) != "" {
internalAddr = strings.TrimSpace(v.(string))
internalAddr = v.(string)
}
}
po := PromOption{
ClusterName: ds.Name,
Url: strings.TrimSpace(ds.HTTPJson.Url),
Url: ds.HTTPJson.Url,
WriteAddr: writeAddr,
BasicAuthUser: ds.AuthJson.BasicAuthUser,
BasicAuthPass: ds.AuthJson.BasicAuthPassword,
@@ -101,7 +101,11 @@ func (pc *PromClientMap) loadFromDatabase() {
DialTimeout: ds.HTTPJson.DialTimeout,
MaxIdleConnsPerHost: ds.HTTPJson.MaxIdleConnsPerHost,
Headers: header,
TLS: ds.HTTPJson.TLS,
}
if strings.HasPrefix(ds.HTTPJson.Url, "https") {
po.UseTLS = true
po.InsecureSkipVerify = ds.HTTPJson.TLS.SkipTlsVerify
}
if internalAddr != "" && !pc.ctx.IsCenter {
@@ -145,10 +149,7 @@ func (pc *PromClientMap) loadFromDatabase() {
}
func (pc *PromClientMap) newReaderClientFromPromOption(po PromOption) (api.Client, error) {
tlsConfig, err := po.TLS.TLSConfig()
if err != nil {
return nil, fmt.Errorf("failed to create TLS config: %v", err)
}
tlsConfig, _ := po.TLSConfig()
return api.NewClient(api.Config{
Address: po.Url,
@@ -165,10 +166,7 @@ func (pc *PromClientMap) newReaderClientFromPromOption(po PromOption) (api.Clien
}
func (pc *PromClientMap) newWriterClientFromPromOption(po PromOption) (api.Client, error) {
tlsConfig, err := po.TLS.TLSConfig()
if err != nil {
return nil, fmt.Errorf("failed to create TLS config: %v", err)
}
tlsConfig, _ := po.TLSConfig()
return api.NewClient(api.Config{
Address: po.WriteAddr,

View File

@@ -35,6 +35,12 @@ type Pushgw struct {
WriterOpt WriterGlobalOpt
Writers []WriterOptions
KafkaWriters []KafkaWriterOptions
// 预处理的字段,用于快速匹配只有 __name__ 的 DropSample 规则
// key: metric name, value: struct{}
DropMetricNames map[string]struct{}
// 包含多个标签的复杂 DropSample 规则
DropSampleComplex []map[string]string
}
type WriterGlobalOpt struct {

View File

@@ -109,21 +109,30 @@ func (rt *Router) debugSample(remoteAddr string, v *prompb.TimeSeries) {
}
func (rt *Router) DropSample(v *prompb.TimeSeries) bool {
filters := rt.Pushgw.DropSample
if len(filters) == 0 {
// 快速路径:检查仅 __name__ 的过滤器 O(1)
if len(rt.dropByNameOnly) > 0 {
for i := 0; i < len(v.Labels); i++ {
if v.Labels[i].Name == "__name__" {
if _, ok := rt.dropByNameOnly[v.Labels[i].Value]; ok {
return true
}
break // __name__ 只会出现一次,找到后直接跳出
}
}
}
// 慢速路径:处理复杂的多条件过滤器
if len(rt.dropComplex) == 0 {
return false
}
labelMap := make(map[string]string)
// 只有复杂过滤器存在时才创建 labelMap
labelMap := make(map[string]string, len(v.Labels))
for i := 0; i < len(v.Labels); i++ {
labelMap[v.Labels[i].Name] = v.Labels[i].Value
}
for _, filter := range filters {
if len(filter) == 0 {
continue
}
for _, filter := range rt.dropComplex {
if matchSample(filter, labelMap) {
return true
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/prometheus/prompb"
"github.com/toolkits/pkg/logger"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/center/metas"
@@ -33,6 +34,10 @@ type Router struct {
Ctx *ctx.Context
HandleTS HandleTSFunc
HeartbeatApi string
// 预编译的 DropSample 过滤器
dropByNameOnly map[string]struct{} // 仅 __name__ 条件的快速匹配
dropComplex []map[string]string // 多条件的复杂匹配
}
func stat() gin.HandlerFunc {
@@ -51,7 +56,7 @@ func stat() gin.HandlerFunc {
func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *memsto.TargetCacheType, bg *memsto.BusiGroupCacheType,
idents *idents.Set, metas *metas.Set,
writers *writer.WritersType, ctx *ctx.Context) *Router {
return &Router{
rt := &Router{
HTTP: httpConfig,
Pushgw: pushgw,
Aconf: aconf,
@@ -63,6 +68,38 @@ func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *me
MetaSet: metas,
HandleTS: func(pt *prompb.TimeSeries) *prompb.TimeSeries { return pt },
}
// 预编译 DropSample 过滤器
rt.initDropSampleFilters()
return rt
}
// initDropSampleFilters 预编译 DropSample 过滤器,将单条件 __name__ 过滤器
// 放入 map 实现 O(1) 查找,多条件过滤器保留原有逻辑
func (rt *Router) initDropSampleFilters() {
rt.dropByNameOnly = make(map[string]struct{})
rt.dropComplex = make([]map[string]string, 0)
for _, filter := range rt.Pushgw.DropSample {
if len(filter) == 0 {
continue
}
// 如果只有一个条件且是 __name__放入快速匹配 map
if len(filter) == 1 {
if name, ok := filter["__name__"]; ok {
rt.dropByNameOnly[name] = struct{}{}
continue
}
}
// 其他情况放入复杂匹配列表
rt.dropComplex = append(rt.dropComplex, filter)
}
logger.Infof("DropSample filters initialized: %d name-only, %d complex",
len(rt.dropByNameOnly), len(rt.dropComplex))
}
func (rt *Router) Config(r *gin.Engine) {