mirror of
https://github.com/ccfos/nightingale.git
synced 2026-03-03 14:38:55 +00:00
Compare commits
20 Commits
dev23
...
release-21
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bec92fdc60 | ||
|
|
671f14092c | ||
|
|
99d6ba9508 | ||
|
|
47f3eae09d | ||
|
|
5e89c670a8 | ||
|
|
e1cc37c753 | ||
|
|
2be94f592c | ||
|
|
5babc4310a | ||
|
|
f968fcd593 | ||
|
|
4dc7035550 | ||
|
|
2a2b46ca7b | ||
|
|
ed96ab9d5b | ||
|
|
2e2bbd6aeb | ||
|
|
c93694a2a9 | ||
|
|
cfb8c3b66a | ||
|
|
cb5e62b7bb | ||
|
|
ebfde8d6a0 | ||
|
|
b4dcaebf83 | ||
|
|
fa491e313a | ||
|
|
4fe2b5042f |
@@ -253,7 +253,7 @@ func HandleEventPipeline(pipelineConfigs []models.PipelineConfig, eventOrigin, e
|
||||
// 统一使用工作流引擎执行(兼容线性模式和工作流模式)
|
||||
triggerCtx := &models.WorkflowTriggerContext{
|
||||
Mode: models.TriggerModeEvent,
|
||||
TriggerBy: from,
|
||||
TriggerBy: from + "_" + strconv.FormatInt(id, 10),
|
||||
}
|
||||
|
||||
resultEvent, result, err := workflowEngine.Execute(eventPipeline, event, triggerCtx)
|
||||
|
||||
@@ -1614,11 +1614,15 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
|
||||
continue
|
||||
}
|
||||
|
||||
switch v.(type) {
|
||||
case float64:
|
||||
values += fmt.Sprintf("%s:%.3f ", k, v)
|
||||
case string:
|
||||
values += fmt.Sprintf("%s:%s ", k, v)
|
||||
if u, exists := valuesUnitMap[k]; exists { // 配置了单位,优先用配置了单位的值
|
||||
values += fmt.Sprintf("%s:%s ", k, u.Text)
|
||||
} else {
|
||||
switch v.(type) {
|
||||
case float64:
|
||||
values += fmt.Sprintf("%s:%.3f ", k, v)
|
||||
case string:
|
||||
values += fmt.Sprintf("%s:%s ", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,11 +60,11 @@ func (e *WorkflowEngine) Execute(pipeline *models.EventPipeline, event *models.A
|
||||
}
|
||||
|
||||
func (e *WorkflowEngine) initWorkflowContext(pipeline *models.EventPipeline, event *models.AlertCurEvent, triggerCtx *models.WorkflowTriggerContext) *models.WorkflowContext {
|
||||
// 合并环境变量
|
||||
env := pipeline.GetEnvMap()
|
||||
if triggerCtx != nil && triggerCtx.EnvOverrides != nil {
|
||||
for k, v := range triggerCtx.EnvOverrides {
|
||||
env[k] = v
|
||||
// 合并输入参数
|
||||
inputs := pipeline.GetInputsMap()
|
||||
if triggerCtx != nil && triggerCtx.InputsOverrides != nil {
|
||||
for k, v := range triggerCtx.InputsOverrides {
|
||||
inputs[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (e *WorkflowEngine) initWorkflowContext(pipeline *models.EventPipeline, eve
|
||||
|
||||
return &models.WorkflowContext{
|
||||
Event: event,
|
||||
Env: env,
|
||||
Inputs: inputs,
|
||||
Vars: make(map[string]interface{}), // 初始化空的 Vars,供节点间传递数据
|
||||
Metadata: metadata,
|
||||
Stream: stream,
|
||||
@@ -182,7 +182,6 @@ func (e *WorkflowEngine) executeDAG(nodeMap map[string]*models.WorkflowNode, con
|
||||
result.Status = models.ExecutionStatusFailed
|
||||
result.ErrorNode = nodeID
|
||||
result.Message = fmt.Sprintf("node %s failed: %s", node.Name, nodeResult.Error)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,10 +370,8 @@ func (e *WorkflowEngine) saveExecutionRecord(pipeline *models.EventPipeline, wfC
|
||||
logger.Errorf("workflow: failed to set node results: pipeline_id=%d, error=%v", pipeline.ID, err)
|
||||
}
|
||||
|
||||
secretKeys := pipeline.GetSecretKeys()
|
||||
sanitizedEnv := wfCtx.SanitizedEnv(secretKeys)
|
||||
if err := execution.SetEnvSnapshot(sanitizedEnv); err != nil {
|
||||
logger.Errorf("workflow: failed to set env snapshot: pipeline_id=%d, error=%v", pipeline.ID, err)
|
||||
if err := execution.SetInputsSnapshot(wfCtx.Inputs); err != nil {
|
||||
logger.Errorf("workflow: failed to set inputs snapshot: pipeline_id=%d, error=%v", pipeline.ID, err)
|
||||
}
|
||||
|
||||
if err := models.CreateEventPipelineExecution(e.ctx, execution); err != nil {
|
||||
|
||||
@@ -114,7 +114,7 @@ func (c *AISummaryConfig) initHTTPClient() error {
|
||||
func (c *AISummaryConfig) prepareEventInfo(wfCtx *models.WorkflowContext) (string, error) {
|
||||
var defs = []string{
|
||||
"{{$event := .Event}}",
|
||||
"{{$env := .Env}}",
|
||||
"{{$inputs := .Inputs}}",
|
||||
}
|
||||
|
||||
text := strings.Join(append(defs, c.PromptTemplate), "")
|
||||
|
||||
@@ -44,8 +44,8 @@ func TestAISummaryConfig_Process(t *testing.T) {
|
||||
|
||||
// 创建 WorkflowContext
|
||||
wfCtx := &models.WorkflowContext{
|
||||
Event: event,
|
||||
Env: map[string]string{},
|
||||
Event: event,
|
||||
Inputs: map[string]string{},
|
||||
}
|
||||
|
||||
// 测试模板处理
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
|
||||
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/utils"
|
||||
"github.com/ccfos/nightingale/v6/models"
|
||||
"github.com/ccfos/nightingale/v6/pkg/ctx"
|
||||
"github.com/toolkits/pkg/logger"
|
||||
@@ -71,12 +72,17 @@ func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext
|
||||
headers[k] = v
|
||||
}
|
||||
|
||||
url, err := utils.TplRender(wfCtx, c.URL)
|
||||
if err != nil {
|
||||
return wfCtx, "", fmt.Errorf("failed to render url template: %v processor: %v", err, c)
|
||||
}
|
||||
|
||||
body, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return wfCtx, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.URL, strings.NewReader(string(body)))
|
||||
req, err := http.NewRequest("POST", url, strings.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return wfCtx, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (c *EventDropConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContex
|
||||
"{{ $event := .Event }}",
|
||||
"{{ $labels := .Event.TagsMap }}",
|
||||
"{{ $value := .Event.TriggerValue }}",
|
||||
"{{ $env := .Env }}",
|
||||
"{{ $inputs := .Inputs }}",
|
||||
}
|
||||
|
||||
text := strings.Join(append(defs, c.Content), "")
|
||||
|
||||
@@ -148,7 +148,7 @@ func (c *IfConfig) evaluateExpressionCondition(wfCtx *models.WorkflowContext) (b
|
||||
"{{ $event := .Event }}",
|
||||
"{{ $labels := .Event.TagsMap }}",
|
||||
"{{ $value := .Event.TriggerValue }}",
|
||||
"{{ $env := .Env }}",
|
||||
"{{ $inputs := .Inputs }}",
|
||||
}
|
||||
|
||||
text := strings.Join(append(defs, c.Condition), "")
|
||||
|
||||
@@ -175,7 +175,7 @@ func (c *SwitchConfig) evaluateExpressionCondition(condition string, wfCtx *mode
|
||||
"{{ $event := .Event }}",
|
||||
"{{ $labels := .Event.TagsMap }}",
|
||||
"{{ $value := .Event.TriggerValue }}",
|
||||
"{{ $env := .Env }}",
|
||||
"{{ $inputs := .Inputs }}",
|
||||
}
|
||||
|
||||
text := strings.Join(append(defs, condition), "")
|
||||
|
||||
32
alert/pipeline/processor/utils/utils.go
Normal file
32
alert/pipeline/processor/utils/utils.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/ccfos/nightingale/v6/models"
|
||||
"github.com/ccfos/nightingale/v6/pkg/tplx"
|
||||
)
|
||||
|
||||
func TplRender(wfCtx *models.WorkflowContext, content string) (string, error) {
|
||||
var defs = []string{
|
||||
"{{ $event := .Event }}",
|
||||
"{{ $labels := .Event.TagsMap }}",
|
||||
"{{ $value := .Event.TriggerValue }}",
|
||||
"{{ $inputs := .Inputs }}",
|
||||
}
|
||||
text := strings.Join(append(defs, content), "")
|
||||
tpl, err := template.New("tpl").Funcs(tplx.TemplateFuncMap).Parse(text)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template: %v", err)
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
if err = tpl.Execute(&body, wfCtx); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template: %v", err)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(body.String()), nil
|
||||
}
|
||||
@@ -271,10 +271,8 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
if metric.UUID == 0 {
|
||||
time.Sleep(time.Microsecond)
|
||||
metric.UUID = time.Now().UnixMicro()
|
||||
}
|
||||
time.Sleep(time.Microsecond)
|
||||
metric.UUID = time.Now().UnixMicro()
|
||||
metric.ID = metric.UUID
|
||||
metric.CreatedBy = SYSTEM
|
||||
metric.UpdatedBy = SYSTEM
|
||||
|
||||
@@ -118,7 +118,7 @@ func (s *Set) updateTargets(m map[string]models.HostMeta) error {
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := storage.MSet(context.Background(), s.redis, newMap)
|
||||
err := storage.MSet(context.Background(), s.redis, newMap, 7*24*time.Hour)
|
||||
if err != nil {
|
||||
cstats.RedisOperationLatency.WithLabelValues("mset_target_meta", "fail").Observe(time.Since(start).Seconds())
|
||||
return err
|
||||
@@ -127,7 +127,7 @@ func (s *Set) updateTargets(m map[string]models.HostMeta) error {
|
||||
}
|
||||
|
||||
if len(extendMap) > 0 {
|
||||
err = storage.MSet(context.Background(), s.redis, extendMap)
|
||||
err = storage.MSet(context.Background(), s.redis, extendMap, 7*24*time.Hour)
|
||||
if err != nil {
|
||||
cstats.RedisOperationLatency.WithLabelValues("mset_target_extend", "fail").Observe(time.Since(start).Seconds())
|
||||
return err
|
||||
|
||||
@@ -251,10 +251,12 @@ func (rt *Router) Config(r *gin.Engine) {
|
||||
pages.GET("/auth/redirect/cas", rt.loginRedirectCas)
|
||||
pages.GET("/auth/redirect/oauth", rt.loginRedirectOAuth)
|
||||
pages.GET("/auth/redirect/dingtalk", rt.loginRedirectDingTalk)
|
||||
pages.GET("/auth/redirect/feishu", rt.loginRedirectFeiShu)
|
||||
pages.GET("/auth/callback", rt.loginCallback)
|
||||
pages.GET("/auth/callback/cas", rt.loginCallbackCas)
|
||||
pages.GET("/auth/callback/oauth", rt.loginCallbackOAuth)
|
||||
pages.GET("/auth/callback/dingtalk", rt.loginCallbackDingTalk)
|
||||
pages.GET("/auth/callback/feishu", rt.loginCallbackFeiShu)
|
||||
pages.GET("/auth/perms", rt.allPerms)
|
||||
|
||||
pages.GET("/metrics/desc", rt.metricsDescGetFile)
|
||||
@@ -389,8 +391,8 @@ func (rt *Router) Config(r *gin.Engine) {
|
||||
pages.GET("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGets)
|
||||
pages.POST("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/add"), rt.bgrw(), rt.recordingRuleAddByFE)
|
||||
pages.DELETE("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/del"), rt.bgrw(), rt.recordingRuleDel)
|
||||
pages.PUT("/busi-group/:id/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.bgrw(), rt.recordingRulePutByFE)
|
||||
pages.GET("/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGet)
|
||||
pages.PUT("/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRulePutByFE)
|
||||
pages.PUT("/busi-group/:id/recording-rules/fields", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.recordingRulePutFields)
|
||||
|
||||
pages.GET("/busi-groups/alert-mutes", rt.auth(), rt.user(), rt.perm("/alert-mutes"), rt.alertMuteGetsByGids)
|
||||
@@ -705,6 +707,7 @@ func (rt *Router) Config(r *gin.Engine) {
|
||||
service.GET("/event-pipelines", rt.eventPipelinesListByService)
|
||||
service.POST("/event-pipeline/:id/trigger", rt.triggerEventPipelineByService)
|
||||
service.POST("/event-pipeline/:id/stream", rt.streamEventPipelineByService)
|
||||
service.POST("/event-pipeline-execution", rt.eventPipelineExecutionAdd)
|
||||
|
||||
// 手机号加密存储配置接口
|
||||
service.POST("/users/phone/encrypt", rt.usersPhoneEncrypt)
|
||||
|
||||
@@ -36,6 +36,7 @@ func (rt *Router) alertRuleGets(c *gin.Context) {
|
||||
for i := 0; i < len(ars); i++ {
|
||||
ars[i].FillNotifyGroups(rt.Ctx, cache)
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, ars)
|
||||
}
|
||||
ginx.NewRender(c).Data(ars, err)
|
||||
}
|
||||
@@ -76,7 +77,6 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
|
||||
if err == nil {
|
||||
cache := make(map[int64]*models.UserGroup)
|
||||
rids := make([]int64, 0, len(ars))
|
||||
names := make([]string, 0, len(ars))
|
||||
for i := 0; i < len(ars); i++ {
|
||||
ars[i].FillNotifyGroups(rt.Ctx, cache)
|
||||
|
||||
@@ -85,7 +85,6 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
|
||||
}
|
||||
|
||||
rids = append(rids, ars[i].Id)
|
||||
names = append(names, ars[i].UpdateBy)
|
||||
}
|
||||
|
||||
stime, etime := GetAlertCueEventTimeRange(c)
|
||||
@@ -96,14 +95,7 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
users := models.UserMapGet(rt.Ctx, "username in (?)", names)
|
||||
if users != nil {
|
||||
for i := 0; i < len(ars); i++ {
|
||||
if user, exist := users[ars[i].UpdateBy]; exist {
|
||||
ars[i].UpdateByNickname = user.Nickname
|
||||
}
|
||||
}
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, ars)
|
||||
}
|
||||
ginx.NewRender(c).Data(ars, err)
|
||||
}
|
||||
@@ -135,6 +127,7 @@ func (rt *Router) alertRulesGetByService(c *gin.Context) {
|
||||
ars[i].DatasourceIdsJson = rt.DatasourceCache.GetIDsByDsCateAndQueries(ars[i].Cate, ars[i].DatasourceQueries)
|
||||
}
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, ars)
|
||||
}
|
||||
ginx.NewRender(c).Data(ars, err)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ func (rt *Router) alertSubscribeGets(c *gin.Context) {
|
||||
ginx.Dangerous(lst[i].FillDatasourceIds(rt.Ctx))
|
||||
ginx.Dangerous(lst[i].DB2FE())
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
@@ -66,6 +67,7 @@ func (rt *Router) alertSubscribeGetsByGids(c *gin.Context) {
|
||||
ginx.Dangerous(lst[i].FillDatasourceIds(rt.Ctx))
|
||||
ginx.Dangerous(lst[i].DB2FE())
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
@@ -260,6 +260,9 @@ func (rt *Router) boardGets(c *gin.Context) {
|
||||
query := ginx.QueryStr(c, "query", "")
|
||||
|
||||
boards, err := models.BoardGetsByGroupId(rt.Ctx, bgid, query)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, boards)
|
||||
}
|
||||
ginx.NewRender(c).Data(boards, err)
|
||||
}
|
||||
|
||||
@@ -273,6 +276,9 @@ func (rt *Router) publicBoardGets(c *gin.Context) {
|
||||
ginx.Dangerous(err)
|
||||
|
||||
boards, err := models.BoardGets(rt.Ctx, "", "public=1 and (public_cate in (?) or id in (?))", []int64{0, 1}, boardIds)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, boards)
|
||||
}
|
||||
ginx.NewRender(c).Data(boards, err)
|
||||
}
|
||||
|
||||
@@ -312,6 +318,7 @@ func (rt *Router) boardGetsByGids(c *gin.Context) {
|
||||
boards[i].Bgids = ids
|
||||
}
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, boards)
|
||||
|
||||
ginx.NewRender(c).Data(boards, err)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ func (rt *Router) metricFilterGets(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
models.FillUpdateByNicknames(rt.Ctx, arr)
|
||||
|
||||
ginx.NewRender(c).Data(arr, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -119,6 +119,9 @@ func (rt *Router) busiGroupGets(c *gin.Context) {
|
||||
if len(lst) == 0 {
|
||||
lst = []models.BusiGroup{}
|
||||
}
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,9 @@ func (rt *Router) configsGet(c *gin.Context) {
|
||||
prefix := ginx.QueryStr(c, "prefix", "")
|
||||
limit := ginx.QueryInt(c, "limit", 10)
|
||||
configs, err := models.ConfigsGets(rt.Ctx, prefix, limit, ginx.Offset(c, limit))
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, configs)
|
||||
}
|
||||
ginx.NewRender(c).Data(configs, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
func (rt *Router) embeddedProductGets(c *gin.Context) {
|
||||
products, err := models.EmbeddedProductGets(rt.Ctx)
|
||||
ginx.Dangerous(err)
|
||||
models.FillUpdateByNicknames(rt.Ctx, products)
|
||||
// 获取当前用户可访问的Group ID 列表
|
||||
me := c.MustGet("user").(*models.User)
|
||||
|
||||
|
||||
@@ -69,6 +69,10 @@ func (rt *Router) esIndexPatternGetList(c *gin.Context) {
|
||||
lst, err = models.EsIndexPatternGets(rt.Ctx, "")
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ func (rt *Router) eventPipelinesList(c *gin.Context) {
|
||||
// 兼容处理:自动填充工作流字段
|
||||
pipeline.FillWorkflowFields()
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, pipelines)
|
||||
|
||||
gids, err := models.MyGroupIdsMap(rt.Ctx, me.Id)
|
||||
ginx.Dangerous(err)
|
||||
@@ -164,7 +165,7 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
|
||||
var f struct {
|
||||
EventId int64 `json:"event_id"`
|
||||
PipelineConfig models.EventPipeline `json:"pipeline_config"`
|
||||
EnvVariables map[string]string `json:"env_variables,omitempty"`
|
||||
InputVariables map[string]string `json:"input_variables,omitempty"`
|
||||
}
|
||||
|
||||
ginx.BindJSON(c, &f)
|
||||
@@ -182,9 +183,9 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
|
||||
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
|
||||
|
||||
triggerCtx := &models.WorkflowTriggerContext{
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: me.Username,
|
||||
EnvOverrides: f.EnvVariables,
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: me.Username,
|
||||
InputsOverrides: f.InputVariables,
|
||||
}
|
||||
|
||||
resultEvent, result, err := workflowEngine.Execute(&f.PipelineConfig, event, triggerCtx)
|
||||
@@ -302,8 +303,8 @@ func (rt *Router) eventPipelinesListByService(c *gin.Context) {
|
||||
type EventPipelineRequest struct {
|
||||
// 事件数据(可选,如果不传则使用空事件)
|
||||
Event *models.AlertCurEvent `json:"event,omitempty"`
|
||||
// 环境变量覆盖
|
||||
EnvOverrides map[string]string `json:"env_overrides,omitempty"`
|
||||
// 输入参数覆盖
|
||||
InputsOverrides map[string]string `json:"inputs_overrides,omitempty"`
|
||||
|
||||
Username string `json:"username,omitempty"`
|
||||
}
|
||||
@@ -321,20 +322,15 @@ func (rt *Router) executePipelineTrigger(pipeline *models.EventPipeline, req *Ev
|
||||
}
|
||||
}
|
||||
|
||||
// 校验必填环境变量
|
||||
if err := pipeline.ValidateEnvVariables(req.EnvOverrides); err != nil {
|
||||
return "", fmt.Errorf("env validation failed: %v", err)
|
||||
}
|
||||
|
||||
// 生成执行ID
|
||||
executionID := uuid.New().String()
|
||||
|
||||
// 创建触发上下文
|
||||
triggerCtx := &models.WorkflowTriggerContext{
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: triggerBy,
|
||||
EnvOverrides: req.EnvOverrides,
|
||||
RequestID: executionID,
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: triggerBy,
|
||||
InputsOverrides: req.InputsOverrides,
|
||||
RequestID: executionID,
|
||||
}
|
||||
|
||||
// 异步执行工作流
|
||||
@@ -401,6 +397,7 @@ func (rt *Router) triggerEventPipelineByAPI(c *gin.Context) {
|
||||
}
|
||||
|
||||
func (rt *Router) listAllEventPipelineExecutions(c *gin.Context) {
|
||||
pipelineId := ginx.QueryInt64(c, "pipeline_id", 0)
|
||||
pipelineName := ginx.QueryStr(c, "pipeline_name", "")
|
||||
mode := ginx.QueryStr(c, "mode", "")
|
||||
status := ginx.QueryStr(c, "status", "")
|
||||
@@ -414,7 +411,7 @@ func (rt *Router) listAllEventPipelineExecutions(c *gin.Context) {
|
||||
offset = 1
|
||||
}
|
||||
|
||||
executions, total, err := models.ListAllEventPipelineExecutions(rt.Ctx, pipelineName, mode, status, limit, (offset-1)*limit)
|
||||
executions, total, err := models.ListAllEventPipelineExecutions(rt.Ctx, pipelineId, pipelineName, mode, status, limit, (offset-1)*limit)
|
||||
ginx.Dangerous(err)
|
||||
|
||||
ginx.NewRender(c).Data(gin.H{
|
||||
@@ -509,11 +506,11 @@ func (rt *Router) streamEventPipeline(c *gin.Context) {
|
||||
}
|
||||
|
||||
triggerCtx := &models.WorkflowTriggerContext{
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: me.Username,
|
||||
EnvOverrides: f.EnvOverrides,
|
||||
RequestID: uuid.New().String(),
|
||||
Stream: true, // 流式端点强制启用流式输出
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: me.Username,
|
||||
InputsOverrides: f.InputsOverrides,
|
||||
RequestID: uuid.New().String(),
|
||||
Stream: true, // 流式端点强制启用流式输出
|
||||
}
|
||||
|
||||
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
|
||||
@@ -604,11 +601,11 @@ func (rt *Router) streamEventPipelineByService(c *gin.Context) {
|
||||
}
|
||||
|
||||
triggerCtx := &models.WorkflowTriggerContext{
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: f.Username,
|
||||
EnvOverrides: f.EnvOverrides,
|
||||
RequestID: uuid.New().String(),
|
||||
Stream: true, // 流式端点强制启用流式输出
|
||||
Mode: models.TriggerModeAPI,
|
||||
TriggerBy: f.Username,
|
||||
InputsOverrides: f.InputsOverrides,
|
||||
RequestID: uuid.New().String(),
|
||||
Stream: true, // 流式端点强制启用流式输出
|
||||
}
|
||||
|
||||
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
|
||||
@@ -625,3 +622,18 @@ func (rt *Router) streamEventPipelineByService(c *gin.Context) {
|
||||
|
||||
ginx.NewRender(c).Data(result, nil)
|
||||
}
|
||||
|
||||
// eventPipelineExecutionAdd 接收 edge 节点同步的 Pipeline 执行记录
|
||||
func (rt *Router) eventPipelineExecutionAdd(c *gin.Context) {
|
||||
var execution models.EventPipelineExecution
|
||||
ginx.BindJSON(c, &execution)
|
||||
|
||||
if execution.ID == "" {
|
||||
ginx.Bomb(http.StatusBadRequest, "id is required")
|
||||
}
|
||||
if execution.PipelineID <= 0 {
|
||||
ginx.Bomb(http.StatusBadRequest, "pipeline_id is required")
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Message(models.DB(rt.Ctx).Create(&execution).Error)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/ccfos/nightingale/v6/models"
|
||||
"github.com/ccfos/nightingale/v6/pkg/cas"
|
||||
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
|
||||
"github.com/ccfos/nightingale/v6/pkg/feishu"
|
||||
"github.com/ccfos/nightingale/v6/pkg/ldapx"
|
||||
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
|
||||
"github.com/ccfos/nightingale/v6/pkg/oidcx"
|
||||
@@ -519,6 +520,95 @@ func (rt *Router) loginCallbackDingTalk(c *gin.Context) {
|
||||
|
||||
}
|
||||
|
||||
func (rt *Router) loginRedirectFeiShu(c *gin.Context) {
|
||||
redirect := ginx.QueryStr(c, "redirect", "/")
|
||||
|
||||
v, exists := c.Get("userid")
|
||||
if exists {
|
||||
userid := v.(int64)
|
||||
user, err := models.UserGetById(rt.Ctx, userid)
|
||||
ginx.Dangerous(err)
|
||||
if user == nil {
|
||||
ginx.Bomb(200, "user not found")
|
||||
}
|
||||
|
||||
if user.Username != "" { // already login
|
||||
ginx.NewRender(c).Data(redirect, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if rt.Sso.FeiShu == nil || !rt.Sso.FeiShu.Enable {
|
||||
ginx.NewRender(c).Data("", nil)
|
||||
return
|
||||
}
|
||||
|
||||
redirect, err := rt.Sso.FeiShu.Authorize(rt.Redis, redirect)
|
||||
ginx.Dangerous(err)
|
||||
|
||||
ginx.NewRender(c).Data(redirect, err)
|
||||
}
|
||||
|
||||
func (rt *Router) loginCallbackFeiShu(c *gin.Context) {
|
||||
code := ginx.QueryStr(c, "code", "")
|
||||
state := ginx.QueryStr(c, "state", "")
|
||||
|
||||
ret, err := rt.Sso.FeiShu.Callback(rt.Redis, c.Request.Context(), code, state)
|
||||
if err != nil {
|
||||
logger.Errorf("sso_callback FeiShu fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
|
||||
ginx.NewRender(c).Data(CallbackOutput{}, err)
|
||||
return
|
||||
}
|
||||
|
||||
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
|
||||
ginx.Dangerous(err)
|
||||
|
||||
if user != nil {
|
||||
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil && rt.Sso.FeiShu.FeiShuConfig.CoverAttributes {
|
||||
updatedFields := user.UpdateSsoFields(feishu.SsoTypeName, ret.Nickname, ret.Phone, ret.Email)
|
||||
ginx.Dangerous(user.Update(rt.Ctx, "update_at", updatedFields...))
|
||||
}
|
||||
} else {
|
||||
user = new(models.User)
|
||||
defaultRoles := []string{}
|
||||
defaultUserGroups := []int64{}
|
||||
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil {
|
||||
defaultRoles = rt.Sso.FeiShu.FeiShuConfig.DefaultRoles
|
||||
defaultUserGroups = rt.Sso.FeiShu.FeiShuConfig.DefaultUserGroups
|
||||
}
|
||||
|
||||
user.FullSsoFields(feishu.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, defaultRoles)
|
||||
ginx.Dangerous(user.Add(rt.Ctx))
|
||||
|
||||
if len(defaultUserGroups) > 0 {
|
||||
err = user.AddToUserGroups(rt.Ctx, defaultUserGroups)
|
||||
if err != nil {
|
||||
logger.Errorf("sso feishu add user group error %v", ret, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// set user login state
|
||||
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
|
||||
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
|
||||
ginx.Dangerous(err)
|
||||
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
|
||||
|
||||
redirect := "/"
|
||||
if ret.Redirect != "/login" {
|
||||
redirect = ret.Redirect
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(CallbackOutput{
|
||||
Redirect: redirect,
|
||||
User: user,
|
||||
AccessToken: ts.AccessToken,
|
||||
RefreshToken: ts.RefreshToken,
|
||||
}, nil)
|
||||
|
||||
}
|
||||
|
||||
func (rt *Router) loginCallbackOAuth(c *gin.Context) {
|
||||
code := ginx.QueryStr(c, "code", "")
|
||||
state := ginx.QueryStr(c, "state", "")
|
||||
@@ -569,10 +659,11 @@ type SsoConfigOutput struct {
|
||||
CasDisplayName string `json:"casDisplayName"`
|
||||
OauthDisplayName string `json:"oauthDisplayName"`
|
||||
DingTalkDisplayName string `json:"dingTalkDisplayName"`
|
||||
FeiShuDisplayName string `json:"feishuDisplayName"`
|
||||
}
|
||||
|
||||
func (rt *Router) ssoConfigNameGet(c *gin.Context) {
|
||||
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName string
|
||||
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName, feiShuDisplayName string
|
||||
if rt.Sso.OIDC != nil {
|
||||
oidcDisplayName = rt.Sso.OIDC.GetDisplayName()
|
||||
}
|
||||
@@ -589,11 +680,16 @@ func (rt *Router) ssoConfigNameGet(c *gin.Context) {
|
||||
dingTalkDisplayName = rt.Sso.DingTalk.GetDisplayName()
|
||||
}
|
||||
|
||||
if rt.Sso.FeiShu != nil {
|
||||
feiShuDisplayName = rt.Sso.FeiShu.GetDisplayName()
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(SsoConfigOutput{
|
||||
OidcDisplayName: oidcDisplayName,
|
||||
CasDisplayName: casDisplayName,
|
||||
OauthDisplayName: oauthDisplayName,
|
||||
DingTalkDisplayName: dingTalkDisplayName,
|
||||
FeiShuDisplayName: feiShuDisplayName,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
@@ -608,6 +704,7 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
|
||||
|
||||
// TODO: dingTalkExist 为了兼容当前前端配置, 后期单点登陆统一调整后不在预先设置默认内容
|
||||
dingTalkExist := false
|
||||
feiShuExist := false
|
||||
for _, config := range lst {
|
||||
var ssoReqConfig models.SsoConfig
|
||||
ssoReqConfig.Id = config.Id
|
||||
@@ -618,6 +715,10 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
|
||||
dingTalkExist = true
|
||||
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
|
||||
ginx.Dangerous(err)
|
||||
case feishu.SsoTypeName:
|
||||
feiShuExist = true
|
||||
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
|
||||
ginx.Dangerous(err)
|
||||
default:
|
||||
ssoReqConfig.Content = config.Content
|
||||
}
|
||||
@@ -630,6 +731,11 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
|
||||
ssoConfig.Name = dingtalk.SsoTypeName
|
||||
ssoConfigs = append(ssoConfigs, ssoConfig)
|
||||
}
|
||||
if !feiShuExist {
|
||||
var ssoConfig models.SsoConfig
|
||||
ssoConfig.Name = feishu.SsoTypeName
|
||||
ssoConfigs = append(ssoConfigs, ssoConfig)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(ssoConfigs, nil)
|
||||
}
|
||||
@@ -657,6 +763,23 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
|
||||
err = f.Update(rt.Ctx)
|
||||
}
|
||||
ginx.Dangerous(err)
|
||||
case feishu.SsoTypeName:
|
||||
f.Name = ssoConfig.Name
|
||||
setting, err := json.Marshal(ssoConfig.SettingJson)
|
||||
ginx.Dangerous(err)
|
||||
f.Content = string(setting)
|
||||
f.UpdateAt = time.Now().Unix()
|
||||
sso, err := f.Query(rt.Ctx)
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
ginx.Dangerous(err)
|
||||
}
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
err = f.Create(rt.Ctx)
|
||||
} else {
|
||||
f.Id = sso.Id
|
||||
err = f.Update(rt.Ctx)
|
||||
}
|
||||
ginx.Dangerous(err)
|
||||
default:
|
||||
f.Id = ssoConfig.Id
|
||||
f.Name = ssoConfig.Name
|
||||
@@ -695,6 +818,14 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
|
||||
rt.Sso.DingTalk = dingtalk.New(config)
|
||||
}
|
||||
rt.Sso.DingTalk.Reload(config)
|
||||
case feishu.SsoTypeName:
|
||||
var config feishu.Config
|
||||
err := json.Unmarshal([]byte(f.Content), &config)
|
||||
ginx.Dangerous(err)
|
||||
if rt.Sso.FeiShu == nil {
|
||||
rt.Sso.FeiShu = feishu.New(config)
|
||||
}
|
||||
rt.Sso.FeiShu.Reload(config)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Message(nil)
|
||||
|
||||
@@ -154,6 +154,7 @@ func (rt *Router) messageTemplatesGet(c *gin.Context) {
|
||||
|
||||
lst, err := models.MessageTemplatesGetBy(rt.Ctx, notifyChannelIdents)
|
||||
ginx.Dangerous(err)
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
|
||||
if me.IsAdmin() {
|
||||
ginx.NewRender(c).Data(lst, nil)
|
||||
|
||||
@@ -22,6 +22,9 @@ func (rt *Router) alertMuteGetsByBG(c *gin.Context) {
|
||||
query := ginx.QueryStr(c, "query", "")
|
||||
expired := ginx.QueryInt(c, "expired", -1)
|
||||
lst, err := models.AlertMuteGets(rt.Ctx, prods, bgid, -1, expired, query)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
@@ -47,6 +50,9 @@ func (rt *Router) alertMuteGetsByGids(c *gin.Context) {
|
||||
}
|
||||
|
||||
lst, err := models.AlertMuteGetsByBGIds(rt.Ctx, gids)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
@@ -58,6 +64,9 @@ func (rt *Router) alertMuteGets(c *gin.Context) {
|
||||
disabled := ginx.QueryInt(c, "disabled", -1)
|
||||
expired := ginx.QueryInt(c, "expired", -1)
|
||||
lst, err := models.AlertMuteGets(rt.Ctx, prods, bgid, disabled, expired, query)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
@@ -118,6 +118,9 @@ func (rt *Router) notifyChannelGetBy(c *gin.Context) {
|
||||
|
||||
func (rt *Router) notifyChannelsGet(c *gin.Context) {
|
||||
lst, err := models.NotifyChannelsGet(rt.Ctx, "", nil)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -118,6 +118,7 @@ func (rt *Router) notifyRulesGet(c *gin.Context) {
|
||||
|
||||
lst, err := models.NotifyRulesGet(rt.Ctx, "", nil)
|
||||
ginx.Dangerous(err)
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
if me.IsAdmin() {
|
||||
ginx.NewRender(c).Data(lst, nil)
|
||||
return
|
||||
|
||||
@@ -25,11 +25,14 @@ func (rt *Router) notifyTplGets(c *gin.Context) {
|
||||
m[models.EmailSubject] = struct{}{}
|
||||
|
||||
lst, err := models.NotifyTplGets(rt.Ctx)
|
||||
ginx.Dangerous(err)
|
||||
|
||||
for i := 0; i < len(lst); i++ {
|
||||
if _, exists := m[lst[i].Channel]; exists {
|
||||
lst[i].BuiltIn = true
|
||||
}
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
@@ -200,6 +203,9 @@ func (rt *Router) messageTemplateGets(c *gin.Context) {
|
||||
ident := ginx.QueryStr(c, "ident", "")
|
||||
|
||||
tpls, err := models.MessageTemplateGets(rt.Ctx, id, name, ident)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, tpls)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(tpls, err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
func (rt *Router) recordingRuleGets(c *gin.Context) {
|
||||
busiGroupId := ginx.UrlParamInt64(c, "id")
|
||||
ars, err := models.RecordingRuleGets(rt.Ctx, busiGroupId)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, ars)
|
||||
}
|
||||
ginx.NewRender(c).Data(ars, err)
|
||||
}
|
||||
|
||||
@@ -39,6 +42,9 @@ func (rt *Router) recordingRuleGetsByGids(c *gin.Context) {
|
||||
}
|
||||
|
||||
ars, err := models.RecordingRuleGetsByBGIds(rt.Ctx, gids)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, ars)
|
||||
}
|
||||
ginx.NewRender(c).Data(ars, err)
|
||||
}
|
||||
|
||||
@@ -112,6 +118,7 @@ func (rt *Router) recordingRulePutByFE(c *gin.Context) {
|
||||
}
|
||||
|
||||
rt.bgrwCheck(c, ar.GroupId)
|
||||
rt.bgroCheck(c, f.GroupId)
|
||||
|
||||
f.UpdateBy = c.MustGet("username").(string)
|
||||
ginx.NewRender(c).Message(ar.Update(rt.Ctx, f))
|
||||
|
||||
@@ -20,6 +20,7 @@ func (rt *Router) savedViewGets(c *gin.Context) {
|
||||
ginx.NewRender(c).Data(nil, err)
|
||||
return
|
||||
}
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
|
||||
userGids, err := models.MyGroupIds(rt.Ctx, me.Id)
|
||||
if err != nil {
|
||||
|
||||
@@ -25,6 +25,7 @@ func (rt *Router) taskTplGets(c *gin.Context) {
|
||||
|
||||
list, err := models.TaskTplGets(rt.Ctx, []int64{groupId}, query, limit, ginx.Offset(c, limit))
|
||||
ginx.Dangerous(err)
|
||||
models.FillUpdateByNicknames(rt.Ctx, list)
|
||||
|
||||
ginx.NewRender(c).Data(gin.H{
|
||||
"total": total,
|
||||
@@ -60,6 +61,7 @@ func (rt *Router) taskTplGetsByGids(c *gin.Context) {
|
||||
|
||||
list, err := models.TaskTplGets(rt.Ctx, gids, query, limit, ginx.Offset(c, limit))
|
||||
ginx.Dangerous(err)
|
||||
models.FillUpdateByNicknames(rt.Ctx, list)
|
||||
|
||||
ginx.NewRender(c).Data(gin.H{
|
||||
"total": total,
|
||||
|
||||
@@ -27,6 +27,9 @@ func (rt *Router) userGroupGets(c *gin.Context) {
|
||||
|
||||
me := c.MustGet("user").(*models.User)
|
||||
lst, err := me.UserGroups(rt.Ctx, limit, query)
|
||||
if err == nil {
|
||||
models.FillUpdateByNicknames(rt.Ctx, lst)
|
||||
}
|
||||
|
||||
ginx.NewRender(c).Data(lst, err)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/ccfos/nightingale/v6/pkg/cas"
|
||||
"github.com/ccfos/nightingale/v6/pkg/ctx"
|
||||
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
|
||||
"github.com/ccfos/nightingale/v6/pkg/feishu"
|
||||
"github.com/ccfos/nightingale/v6/pkg/ldapx"
|
||||
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
|
||||
"github.com/ccfos/nightingale/v6/pkg/oidcx"
|
||||
@@ -27,6 +28,7 @@ type SsoClient struct {
|
||||
CAS *cas.SsoClient
|
||||
OAuth2 *oauth2x.SsoClient
|
||||
DingTalk *dingtalk.SsoClient
|
||||
FeiShu *feishu.SsoClient
|
||||
LastUpdateTime int64
|
||||
configCache *memsto.ConfigCache
|
||||
configLastUpdateTime int64
|
||||
@@ -203,6 +205,13 @@ func Init(center cconf.Center, ctx *ctx.Context, configCache *memsto.ConfigCache
|
||||
log.Fatalf("init %s failed: %s", dingtalk.SsoTypeName, err)
|
||||
}
|
||||
ssoClient.DingTalk = dingtalk.New(config)
|
||||
case feishu.SsoTypeName:
|
||||
var config feishu.Config
|
||||
err := json.Unmarshal([]byte(cfg.Content), &config)
|
||||
if err != nil {
|
||||
log.Fatalf("init %s failed: %s", feishu.SsoTypeName, err)
|
||||
}
|
||||
ssoClient.FeiShu = feishu.New(config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,6 +300,22 @@ func (s *SsoClient) reload(ctx *ctx.Context) error {
|
||||
s.DingTalk = nil
|
||||
}
|
||||
|
||||
if feiShuConfig, ok := ssoConfigMap[feishu.SsoTypeName]; ok {
|
||||
var config feishu.Config
|
||||
err := json.Unmarshal([]byte(feiShuConfig.Content), &config)
|
||||
if err != nil {
|
||||
logger.Warningf("reload %s failed: %s", feishu.SsoTypeName, err)
|
||||
} else {
|
||||
if s.FeiShu != nil {
|
||||
s.FeiShu.Reload(config)
|
||||
} else {
|
||||
s.FeiShu = feishu.New(config)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.FeiShu = nil
|
||||
}
|
||||
|
||||
s.LastUpdateTime = lastUpdateTime
|
||||
s.configLastUpdateTime = lastCacheUpdateTime
|
||||
return nil
|
||||
|
||||
@@ -79,52 +79,19 @@ func (d *Doris) Equal(p datasource.Datasource) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// only compare first shard
|
||||
if d.Addr != newest.Addr {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.User != newest.User {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.Password != newest.Password {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.EnableWrite != newest.EnableWrite {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.FeAddr != newest.FeAddr {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.MaxQueryRows != newest.MaxQueryRows {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.Timeout != newest.Timeout {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.MaxIdleConns != newest.MaxIdleConns {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.MaxOpenConns != newest.MaxOpenConns {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.ConnMaxLifetime != newest.ConnMaxLifetime {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.ClusterName != newest.ClusterName {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return d.Addr == newest.Addr &&
|
||||
d.FeAddr == newest.FeAddr &&
|
||||
d.User == newest.User &&
|
||||
d.Password == newest.Password &&
|
||||
d.EnableWrite == newest.EnableWrite &&
|
||||
d.UserWrite == newest.UserWrite &&
|
||||
d.PasswordWrite == newest.PasswordWrite &&
|
||||
d.MaxQueryRows == newest.MaxQueryRows &&
|
||||
d.Timeout == newest.Timeout &&
|
||||
d.MaxIdleConns == newest.MaxIdleConns &&
|
||||
d.MaxOpenConns == newest.MaxOpenConns &&
|
||||
d.ConnMaxLifetime == newest.ConnMaxLifetime &&
|
||||
d.ClusterName == newest.ClusterName
|
||||
}
|
||||
|
||||
func (d *Doris) MakeLogQuery(ctx context.Context, query interface{}, eventTags []string, start, end int64) (interface{}, error) {
|
||||
|
||||
@@ -301,7 +301,7 @@ ALTER TABLE `event_pipeline` ADD COLUMN `trigger_mode` varchar(128) NOT NULL DEF
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `disabled` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'disabled flag';
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `nodes` text COMMENT 'workflow nodes (JSON)';
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `connections` text COMMENT 'node connections (JSON)';
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `env_variables` text COMMENT 'environment variables (JSON)';
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `input_variables` text COMMENT 'input variables (JSON)';
|
||||
ALTER TABLE `event_pipeline` ADD COLUMN `label_filters` text COMMENT 'label filters (JSON)';
|
||||
|
||||
CREATE TABLE `event_pipeline_execution` (
|
||||
@@ -318,7 +318,7 @@ CREATE TABLE `event_pipeline_execution` (
|
||||
`finished_at` bigint DEFAULT 0 COMMENT 'finish timestamp',
|
||||
`duration_ms` bigint DEFAULT 0 COMMENT 'duration in milliseconds',
|
||||
`trigger_by` varchar(64) DEFAULT '' COMMENT 'trigger by',
|
||||
`env_snapshot` text COMMENT 'environment variables snapshot (sanitized)',
|
||||
`inputs_snapshot` text COMMENT 'inputs snapshot',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_pipeline_id` (`pipeline_id`),
|
||||
KEY `idx_event_id` (`event_id`),
|
||||
|
||||
@@ -39,6 +39,9 @@ type Doris struct {
|
||||
MaxQueryRows int `json:"doris.max_query_rows" mapstructure:"doris.max_query_rows"`
|
||||
ClusterName string `json:"doris.cluster_name" mapstructure:"doris.cluster_name"`
|
||||
EnableWrite bool `json:"doris.enable_write" mapstructure:"doris.enable_write"`
|
||||
// 写用户,用来区分读写用户,减少数据源
|
||||
UserWrite string `json:"doris.user_write" mapstructure:"doris.user_write"`
|
||||
PasswordWrite string `json:"doris.password_write" mapstructure:"doris.password_write"`
|
||||
}
|
||||
|
||||
// NewDorisWithSettings initializes a new Doris instance with the given settings
|
||||
@@ -88,13 +91,13 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
|
||||
|
||||
var keys []string
|
||||
keys = append(keys, d.Addr)
|
||||
keys = append(keys, d.Password, d.User)
|
||||
keys = append(keys, d.User, d.Password)
|
||||
if len(database) > 0 {
|
||||
keys = append(keys, database)
|
||||
}
|
||||
cachedkey := strings.Join(keys, ":")
|
||||
cachedKey := strings.Join(keys, ":")
|
||||
// cache conn with database
|
||||
conn, ok := pool.PoolClient.Load(cachedkey)
|
||||
conn, ok := pool.PoolClient.Load(cachedKey)
|
||||
if ok {
|
||||
return conn.(*sql.DB), nil
|
||||
}
|
||||
@@ -102,7 +105,7 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
|
||||
var err error
|
||||
defer func() {
|
||||
if db != nil && err == nil {
|
||||
pool.PoolClient.Store(cachedkey, db)
|
||||
pool.PoolClient.Store(cachedKey, db)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -121,6 +124,79 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// NewWriteConn establishes a new connection to Doris for write operations
|
||||
// When EnableWrite is true and UserWrite is configured, it uses the write user credentials
|
||||
// Otherwise, it reuses the read connection from NewConn
|
||||
func (d *Doris) NewWriteConn(ctx context.Context, database string) (*sql.DB, error) {
|
||||
// If write user is not configured, reuse the read connection
|
||||
if !d.EnableWrite || len(d.UserWrite) == 0 {
|
||||
return d.NewConn(ctx, database)
|
||||
}
|
||||
|
||||
if len(d.Addr) == 0 {
|
||||
return nil, errors.New("empty fe-node addr")
|
||||
}
|
||||
|
||||
// Set default values similar to postgres implementation
|
||||
if d.Timeout == 0 {
|
||||
d.Timeout = 60000
|
||||
}
|
||||
if d.MaxIdleConns == 0 {
|
||||
d.MaxIdleConns = 10
|
||||
}
|
||||
if d.MaxOpenConns == 0 {
|
||||
d.MaxOpenConns = 100
|
||||
}
|
||||
if d.ConnMaxLifetime == 0 {
|
||||
d.ConnMaxLifetime = 14400
|
||||
}
|
||||
if d.MaxQueryRows == 0 {
|
||||
d.MaxQueryRows = 500
|
||||
}
|
||||
|
||||
// Use write user credentials
|
||||
user := d.UserWrite
|
||||
password := d.PasswordWrite
|
||||
|
||||
var keys []string
|
||||
keys = append(keys, d.Addr)
|
||||
keys = append(keys, user, password)
|
||||
if len(database) > 0 {
|
||||
keys = append(keys, database)
|
||||
}
|
||||
cachedKey := strings.Join(keys, ":")
|
||||
// cache conn with database
|
||||
conn, ok := pool.PoolClient.Load(cachedKey)
|
||||
if ok {
|
||||
return conn.(*sql.DB), nil
|
||||
}
|
||||
var db *sql.DB
|
||||
var err error
|
||||
defer func() {
|
||||
if db != nil && err == nil {
|
||||
pool.PoolClient.Store(cachedKey, db)
|
||||
}
|
||||
}()
|
||||
|
||||
// Simplified connection logic for Doris using MySQL driver
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8", user, password, d.Addr, database)
|
||||
db, err = sql.Open("mysql", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set connection pool configuration for write connections
|
||||
// Use more conservative values since write operations are typically less frequent
|
||||
writeMaxIdleConns := max(d.MaxIdleConns/5, 2)
|
||||
writeMaxOpenConns := max(d.MaxOpenConns/10, 5)
|
||||
|
||||
db.SetMaxIdleConns(writeMaxIdleConns)
|
||||
db.SetMaxOpenConns(writeMaxOpenConns)
|
||||
db.SetConnMaxLifetime(time.Duration(d.ConnMaxLifetime) * time.Second)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// createTimeoutContext creates a context with timeout based on Doris configuration
|
||||
func (d *Doris) createTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
timeout := d.Timeout
|
||||
@@ -472,7 +548,7 @@ func (d *Doris) ExecContext(ctx context.Context, database string, sql string) er
|
||||
timeoutCtx, cancel := d.createTimeoutContext(ctx)
|
||||
defer cancel()
|
||||
|
||||
db, err := d.NewConn(timeoutCtx, database)
|
||||
db, err := d.NewWriteConn(timeoutCtx, database)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
324
dskit/doris/sql_analyzer.go
Normal file
324
dskit/doris/sql_analyzer.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package doris
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pingcap/tidb/pkg/parser"
|
||||
"github.com/pingcap/tidb/pkg/parser/ast"
|
||||
_ "github.com/pingcap/tidb/pkg/parser/test_driver" // required for parser
|
||||
)
|
||||
|
||||
// mapAccessPattern matches Doris map/array access syntax like `col['key']` or col["key"]
|
||||
var mapAccessPattern = regexp.MustCompile(`\[['"]\w+['"]\]`)
|
||||
|
||||
// castStringPattern matches Doris CAST(... AS STRING) syntax
|
||||
var castStringPattern = regexp.MustCompile(`(?i)\bAS\s+STRING\b`)
|
||||
|
||||
// macro patterns
|
||||
var timeGroupPattern = regexp.MustCompile(`\$__timeGroup\([^)]+\)`)
|
||||
var timeFilterPattern = regexp.MustCompile(`\$__timeFilter\([^)]+\)`)
|
||||
var intervalPattern = regexp.MustCompile(`\$__interval`)
|
||||
|
||||
// SQLAnalyzeResult holds the analysis result of a SQL statement
|
||||
type SQLAnalyzeResult struct {
|
||||
IsSelectLike bool // whether the statement is a SELECT-like query
|
||||
HasTopAgg bool // whether the top-level query has aggregate functions
|
||||
LimitConst *int64 // top-level LIMIT constant value (nil if no LIMIT or non-constant)
|
||||
}
|
||||
|
||||
// AnalyzeSQL analyzes a SQL statement and extracts top-level features
|
||||
func AnalyzeSQL(sql string) (*SQLAnalyzeResult, error) {
|
||||
// Preprocess SQL to remove Doris-specific syntax that TiDB parser doesn't support
|
||||
preprocessedSQL := preprocessDorisSQL(sql)
|
||||
|
||||
p := parser.New()
|
||||
stmtNodes, _, err := p.Parse(preprocessedSQL, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmtNodes) == 0 {
|
||||
return &SQLAnalyzeResult{}, nil
|
||||
}
|
||||
|
||||
result := &SQLAnalyzeResult{}
|
||||
stmt := stmtNodes[0]
|
||||
|
||||
switch s := stmt.(type) {
|
||||
case *ast.SelectStmt:
|
||||
result.IsSelectLike = true
|
||||
analyzeSelectStmt(s, result)
|
||||
case *ast.SetOprStmt: // UNION / INTERSECT / EXCEPT
|
||||
result.IsSelectLike = true
|
||||
analyzeSetOprStmt(s, result)
|
||||
default:
|
||||
result.IsSelectLike = false
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// analyzeSelectStmt analyzes a SELECT statement
|
||||
func analyzeSelectStmt(sel *ast.SelectStmt, result *SQLAnalyzeResult) {
|
||||
// Check if top-level SELECT has aggregate functions
|
||||
if sel.Fields != nil {
|
||||
for _, field := range sel.Fields.Fields {
|
||||
if field.Expr != nil && hasAggregateFunc(field.Expr) {
|
||||
result.HasTopAgg = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if any CTE has aggregate functions
|
||||
if !result.HasTopAgg && sel.With != nil {
|
||||
for _, cte := range sel.With.CTEs {
|
||||
if selectHasAggregate(cte.Query) {
|
||||
result.HasTopAgg = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract top-level LIMIT
|
||||
if sel.Limit != nil && sel.Limit.Count != nil {
|
||||
if val, ok := extractConstValue(sel.Limit.Count); ok {
|
||||
result.LimitConst = &val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// selectHasAggregate checks if a node (SELECT, UNION, or SubqueryExpr) has aggregate functions
|
||||
func selectHasAggregate(node ast.Node) bool {
|
||||
switch n := node.(type) {
|
||||
case *ast.SelectStmt:
|
||||
if n.Fields != nil {
|
||||
for _, field := range n.Fields.Fields {
|
||||
if field.Expr != nil && hasAggregateFunc(field.Expr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.SetOprStmt:
|
||||
// For UNION, check all branches
|
||||
if n.SelectList != nil {
|
||||
for _, sel := range n.SelectList.Selects {
|
||||
if selectHasAggregate(sel) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.SubqueryExpr:
|
||||
// CTE query is wrapped in SubqueryExpr
|
||||
if n.Query != nil {
|
||||
return selectHasAggregate(n.Query)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// analyzeSetOprStmt analyzes UNION/INTERSECT/EXCEPT statements
|
||||
func analyzeSetOprStmt(setOpr *ast.SetOprStmt, result *SQLAnalyzeResult) {
|
||||
// UNION's LIMIT is at the outermost level
|
||||
if setOpr.Limit != nil && setOpr.Limit.Count != nil {
|
||||
if val, ok := extractConstValue(setOpr.Limit.Count); ok {
|
||||
result.LimitConst = &val
|
||||
}
|
||||
}
|
||||
|
||||
// Check if all branches are aggregates (conservative: if any is non-aggregate, don't skip)
|
||||
if setOpr.SelectList == nil || len(setOpr.SelectList.Selects) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allAgg := true
|
||||
for _, sel := range setOpr.SelectList.Selects {
|
||||
if selectStmt, ok := sel.(*ast.SelectStmt); ok {
|
||||
if selectStmt.Fields != nil {
|
||||
hasAgg := false
|
||||
for _, field := range selectStmt.Fields.Fields {
|
||||
if field.Expr != nil && hasAggregateFunc(field.Expr) {
|
||||
hasAgg = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasAgg {
|
||||
allAgg = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result.HasTopAgg = allAgg
|
||||
}
|
||||
|
||||
// hasAggregateFunc checks if an expression contains aggregate functions (without entering subqueries)
|
||||
func hasAggregateFunc(expr ast.ExprNode) bool {
|
||||
checker := &aggregateChecker{}
|
||||
expr.Accept(checker)
|
||||
return checker.found
|
||||
}
|
||||
|
||||
// aggregateChecker implements ast.Visitor to find aggregate functions
|
||||
type aggregateChecker struct {
|
||||
found bool
|
||||
}
|
||||
|
||||
func (c *aggregateChecker) Enter(n ast.Node) (ast.Node, bool) {
|
||||
if c.found {
|
||||
return n, true // stop traversal
|
||||
}
|
||||
|
||||
switch node := n.(type) {
|
||||
case *ast.SubqueryExpr:
|
||||
return n, true // don't enter subquery
|
||||
case *ast.AggregateFuncExpr:
|
||||
c.found = true
|
||||
return n, true
|
||||
case *ast.FuncCallExpr:
|
||||
// Check for Doris-specific aggregate/statistic functions
|
||||
funcName := strings.ToUpper(node.FnName.L)
|
||||
if isDorisAggregateFunc(funcName) {
|
||||
c.found = true
|
||||
return n, true
|
||||
}
|
||||
}
|
||||
return n, false // continue traversal
|
||||
}
|
||||
|
||||
func (c *aggregateChecker) Leave(n ast.Node) (ast.Node, bool) {
|
||||
return n, true
|
||||
}
|
||||
|
||||
// isDorisAggregateFunc checks if a function is a Doris-specific aggregate/statistic function
|
||||
func isDorisAggregateFunc(funcName string) bool {
|
||||
dorisAggFuncs := map[string]bool{
|
||||
// Standard aggregates (in case parser doesn't recognize them)
|
||||
"COUNT": true,
|
||||
"SUM": true,
|
||||
"AVG": true,
|
||||
"MIN": true,
|
||||
"MAX": true,
|
||||
"ANY": true,
|
||||
"ANY_VALUE": true,
|
||||
|
||||
// HLL related
|
||||
"HLL_UNION_AGG": true,
|
||||
"HLL_RAW_AGG": true,
|
||||
"HLL_CARDINALITY": true,
|
||||
"HLL_UNION": true,
|
||||
"HLL_HASH": true,
|
||||
|
||||
// Bitmap related
|
||||
"BITMAP_UNION": true,
|
||||
"BITMAP_UNION_COUNT": true,
|
||||
"BITMAP_INTERSECT": true,
|
||||
"BITMAP_COUNT": true,
|
||||
"BITMAP_AND_COUNT": true,
|
||||
"BITMAP_OR_COUNT": true,
|
||||
"BITMAP_XOR_COUNT": true,
|
||||
"BITMAP_AND_NOT_COUNT": true,
|
||||
|
||||
// Other aggregates
|
||||
"PERCENTILE": true,
|
||||
"PERCENTILE_APPROX": true,
|
||||
"APPROX_COUNT_DISTINCT": true,
|
||||
"NDV": true,
|
||||
"COLLECT_LIST": true,
|
||||
"COLLECT_SET": true,
|
||||
"GROUP_CONCAT": true,
|
||||
"GROUP_BIT_AND": true,
|
||||
"GROUP_BIT_OR": true,
|
||||
"GROUP_BIT_XOR": true,
|
||||
"GROUPING": true,
|
||||
"GROUPING_ID": true,
|
||||
|
||||
// Statistical functions
|
||||
"STDDEV": true,
|
||||
"STDDEV_POP": true,
|
||||
"STDDEV_SAMP": true,
|
||||
"STD": true,
|
||||
"VARIANCE": true,
|
||||
"VAR_POP": true,
|
||||
"VAR_SAMP": true,
|
||||
"COVAR_POP": true,
|
||||
"COVAR_SAMP": true,
|
||||
"CORR": true,
|
||||
|
||||
// Window functions that are also aggregates
|
||||
"FIRST_VALUE": true,
|
||||
"LAST_VALUE": true,
|
||||
"LAG": true,
|
||||
"LEAD": true,
|
||||
"ROW_NUMBER": true,
|
||||
"RANK": true,
|
||||
"DENSE_RANK": true,
|
||||
"NTILE": true,
|
||||
"CUME_DIST": true,
|
||||
"PERCENT_RANK": true,
|
||||
}
|
||||
return dorisAggFuncs[funcName]
|
||||
}
|
||||
|
||||
// extractConstValue extracts constant integer value from an expression
|
||||
func extractConstValue(expr ast.ExprNode) (int64, bool) {
|
||||
switch v := expr.(type) {
|
||||
case ast.ValueExpr:
|
||||
switch val := v.GetValue().(type) {
|
||||
case int64:
|
||||
return val, true
|
||||
case uint64:
|
||||
return int64(val), true
|
||||
case float64:
|
||||
return int64(val), true
|
||||
case int:
|
||||
return int64(val), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// preprocessDorisSQL removes Doris-specific syntax that TiDB parser doesn't support
|
||||
func preprocessDorisSQL(sql string) string {
|
||||
// Remove map/array access syntax like ['key'] or ["key"]
|
||||
// This is used in Doris for accessing map/variant/json fields
|
||||
sql = mapAccessPattern.ReplaceAllString(sql, "")
|
||||
|
||||
// Replace Doris CAST(... AS STRING) with CAST(... AS CHAR)
|
||||
sql = castStringPattern.ReplaceAllString(sql, "AS CHAR")
|
||||
|
||||
// Replace macros with valid SQL equivalents
|
||||
sql = timeGroupPattern.ReplaceAllString(sql, "ts")
|
||||
sql = timeFilterPattern.ReplaceAllString(sql, "1=1")
|
||||
sql = intervalPattern.ReplaceAllString(sql, "60")
|
||||
|
||||
return sql
|
||||
}
|
||||
|
||||
// NeedsRowCountCheck determines if a SQL query needs row count checking
|
||||
// Returns: needsCheck bool, directReject bool, rejectReason string
|
||||
func NeedsRowCountCheck(sql string, maxQueryRows int) (bool, bool, string) {
|
||||
result, err := AnalyzeSQL(sql)
|
||||
if err != nil {
|
||||
// Parse failed, fall back to probe check
|
||||
return true, false, ""
|
||||
}
|
||||
|
||||
if !result.IsSelectLike {
|
||||
// Not a SELECT query, skip check
|
||||
return false, false, ""
|
||||
}
|
||||
|
||||
// Rule 1: Top-level has aggregate functions -> skip check
|
||||
if result.HasTopAgg {
|
||||
return false, false, ""
|
||||
}
|
||||
|
||||
// Rule 2: Top-level LIMIT <= maxRows -> skip check
|
||||
if result.LimitConst != nil && *result.LimitConst <= int64(maxQueryRows) {
|
||||
return false, false, ""
|
||||
}
|
||||
|
||||
// Otherwise, needs probe check (including LIMIT > maxRows, since actual result may be smaller)
|
||||
return true, false, ""
|
||||
}
|
||||
784
dskit/doris/sql_analyzer_test.go
Normal file
784
dskit/doris/sql_analyzer_test.go
Normal file
@@ -0,0 +1,784 @@
|
||||
package doris
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAnalyzeSQL_AggregateQueries(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantHasAgg bool
|
||||
wantIsSelect bool
|
||||
}{
|
||||
// Standard aggregate functions - should skip check
|
||||
{
|
||||
name: "COUNT(*)",
|
||||
sql: "SELECT COUNT(*) AS `cnt`, FLOOR(UNIX_TIMESTAMP(event_date) DIV 10) * 10 AS `time`, CAST(`labels`['event'] AS STRING) AS `labels.event` FROM `db_insight_doris`.`ewall_event` WHERE `event_date` BETWEEN FROM_UNIXTIME(1768965669) AND FROM_UNIXTIME(1768965969) GROUP BY `time`, `labels.event` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "COUNT with column",
|
||||
sql: "SELECT COUNT(id) FROM users",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "SUM function",
|
||||
sql: "SELECT SUM(amount) FROM orders",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "AVG function",
|
||||
sql: "SELECT AVG(price) FROM products",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "MIN function",
|
||||
sql: "SELECT MIN(created_at) FROM logs",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "MAX function",
|
||||
sql: "SELECT MAX(score) FROM results",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple aggregates",
|
||||
sql: "SELECT COUNT(*), SUM(amount), AVG(price) FROM orders",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "Aggregate with GROUP BY",
|
||||
sql: "SELECT user_id, COUNT(*) FROM orders GROUP BY user_id",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "Aggregate with WHERE and GROUP BY",
|
||||
sql: "SELECT category, SUM(sales) FROM products WHERE status = 'active' GROUP BY category",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "Aggregate with HAVING",
|
||||
sql: "SELECT user_id, COUNT(*) as cnt FROM orders GROUP BY user_id HAVING cnt > 10",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
// macro queries with aggregates
|
||||
{
|
||||
name: "COUNT with timeGroup",
|
||||
sql: "SELECT COUNT(*) AS `cnt`, $__timeGroup(timestamp,$__interval) AS `time` FROM `apm`.`traces_span` WHERE (`service_name` = 'demo-logic-server') AND $__timeFilter(`timestamp`) GROUP BY `time` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "CTE with ratio calculation",
|
||||
sql: "WITH `time_totals` AS (SELECT $__timeGroup(timestamp,$__interval) AS `time`, COUNT(*) AS `total_count` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time`), `time_counts` AS (SELECT ANY_VALUE(`service_name`) AS `service_name`, $__timeGroup(timestamp,$__interval) AS `time`, COUNT(*) AS `count` FROM `apm`.`traces_span` WHERE (`service_name` = 'demo-logic-server') AND $__timeFilter(`timestamp`) GROUP BY `time`) SELECT tc.`service_name`, tc.`time`, ROUND(tc.`count` * 100.0 / tt.`total_count`, 2) AS `ratio` FROM `time_counts` tc JOIN `time_totals` tt ON tc.`time` = tt.`time` ORDER BY tc.`time` ASC",
|
||||
wantHasAgg: true, // CTE has aggregate functions
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "CTE with top values and ratio",
|
||||
sql: "WITH `top_values` AS (SELECT `service_name` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `service_name` ORDER BY COUNT(*) DESC LIMIT 5), `time_totals` AS (SELECT $__timeGroup(timestamp,$__interval) AS `time`, COUNT(*) AS `total_count` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time`), `time_counts` AS (SELECT `service_name`, $__timeGroup(timestamp,$__interval) AS `time`, COUNT(*) AS `count` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) AND `service_name` IN (SELECT `service_name` FROM `top_values`) GROUP BY `service_name`, `time`) SELECT tc.`service_name`, tc.`time`, ROUND(tc.`count` * 100.0 / tt.`total_count`, 2) AS `ratio` FROM `time_counts` tc JOIN `time_totals` tt ON tc.`time` = tt.`time` ORDER BY tc.`time` ASC",
|
||||
wantHasAgg: true, // CTE has aggregate functions
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "PERCENTILE_APPROX with timeGroup",
|
||||
sql: "SELECT PERCENTILE_APPROX(`duration`, 0.95) AS `p95`, $__timeGroup(timestamp,$__interval) AS `time` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "COUNT DISTINCT with timeGroup",
|
||||
sql: "SELECT COUNT(DISTINCT `duration`) AS `unique_count`, $__timeGroup(timestamp,$__interval) AS `time` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "CASE WHEN with COUNT and ROUND",
|
||||
sql: "SELECT ROUND(COUNT(CASE WHEN `duration` IS NOT NULL THEN 1 END) * 100.0 / COUNT(*), 2) AS `exist_ratio`, $__timeGroup(timestamp,$__interval) AS `time` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "AVG with timeGroup",
|
||||
sql: "SELECT AVG(`duration`) AS `avg`, $__timeGroup(timestamp,$__interval) AS `time` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`) GROUP BY `time` ORDER BY `time` ASC",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "Simple COUNT with timeFilter",
|
||||
sql: "SELECT COUNT(*) AS `cnt` FROM `apm`.`traces_span` WHERE (`span_name` = 'GET /backend/detail') AND $__timeFilter(`timestamp`)",
|
||||
wantHasAgg: true,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "CTE with CROSS JOIN ratio",
|
||||
sql: "WITH `total` AS (SELECT COUNT(*) AS `total_count` FROM `apm`.`traces_span` WHERE $__timeFilter(`timestamp`)), `value_counts` AS (SELECT ANY_VALUE(`span_kind`) AS `span_kind`, COUNT(*) AS `count` FROM `apm`.`traces_span` WHERE (`span_kind` = 'SPAN_KIND_SERVER') AND $__timeFilter(`timestamp`)) SELECT vc.`span_kind`, vc.`count` AS `count`, ROUND(vc.`count` * 100.0 / t.`total_count`, 2) AS `ratio` FROM `value_counts` vc CROSS JOIN `total` t ORDER BY vc.`count` DESC;",
|
||||
wantHasAgg: true, // CTE has aggregate functions
|
||||
wantIsSelect: true,
|
||||
},
|
||||
// Non-aggregate queries - should not skip check
|
||||
{
|
||||
name: "Simple SELECT *",
|
||||
sql: "SELECT * FROM users",
|
||||
wantHasAgg: false,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "SELECT with columns",
|
||||
sql: "SELECT id, name, email FROM users",
|
||||
wantHasAgg: false,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "SELECT with WHERE",
|
||||
sql: "SELECT * FROM users WHERE status = 'active'",
|
||||
wantHasAgg: false,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "SELECT with JOIN",
|
||||
sql: "SELECT u.name, o.amount FROM users u JOIN orders o ON u.id = o.user_id",
|
||||
wantHasAgg: false,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := AnalyzeSQL(tt.sql)
|
||||
if err != nil {
|
||||
t.Fatalf("AnalyzeSQL() error = %v", err)
|
||||
}
|
||||
if result.HasTopAgg != tt.wantHasAgg {
|
||||
t.Errorf("name: %s, HasTopAgg = %v, want %v", tt.name, result.HasTopAgg, tt.wantHasAgg)
|
||||
}
|
||||
if result.IsSelectLike != tt.wantIsSelect {
|
||||
t.Errorf("IsSelectLike = %v, want %v", result.IsSelectLike, tt.wantIsSelect)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzeSQL_SubqueryWithAggregate(t *testing.T) {
|
||||
// Aggregate in subquery should NOT skip check for main query
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantHasAgg bool
|
||||
}{
|
||||
{
|
||||
name: "Aggregate in subquery only",
|
||||
sql: "SELECT * FROM (SELECT user_id, COUNT(*) as cnt FROM orders GROUP BY user_id) t",
|
||||
wantHasAgg: false, // top-level has no aggregate
|
||||
},
|
||||
{
|
||||
name: "Aggregate in WHERE subquery",
|
||||
sql: "SELECT * FROM users WHERE id IN (SELECT user_id FROM orders GROUP BY user_id HAVING COUNT(*) > 5)",
|
||||
wantHasAgg: false, // top-level has no aggregate
|
||||
},
|
||||
{
|
||||
name: "Both top-level and subquery aggregates",
|
||||
sql: "SELECT COUNT(*) FROM (SELECT user_id FROM orders GROUP BY user_id) t",
|
||||
wantHasAgg: true, // top-level has aggregate
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := AnalyzeSQL(tt.sql)
|
||||
if err != nil {
|
||||
t.Fatalf("AnalyzeSQL() error = %v", err)
|
||||
}
|
||||
if result.HasTopAgg != tt.wantHasAgg {
|
||||
t.Errorf("HasTopAgg = %v, want %v", result.HasTopAgg, tt.wantHasAgg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzeSQL_LimitQueries(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantLimit *int64
|
||||
wantIsSelect bool
|
||||
}{
|
||||
{
|
||||
name: "LIMIT 10",
|
||||
sql: "SELECT * FROM users LIMIT 10",
|
||||
wantLimit: ptr(int64(10)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 100",
|
||||
sql: "SELECT * FROM users LIMIT 100",
|
||||
wantLimit: ptr(int64(100)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 1000",
|
||||
sql: "SELECT * FROM users LIMIT 1000",
|
||||
wantLimit: ptr(int64(1000)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "LIMIT with OFFSET",
|
||||
sql: "SELECT * FROM users LIMIT 50 OFFSET 100",
|
||||
wantLimit: ptr(int64(50)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "No LIMIT",
|
||||
sql: "SELECT * FROM users",
|
||||
wantLimit: nil,
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 0",
|
||||
sql: "SELECT * FROM users LIMIT 0",
|
||||
wantLimit: ptr(int64(0)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 1",
|
||||
sql: "SELECT * FROM users LIMIT 1",
|
||||
wantLimit: ptr(int64(1)),
|
||||
wantIsSelect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := AnalyzeSQL(tt.sql)
|
||||
if err != nil {
|
||||
t.Fatalf("AnalyzeSQL() error = %v", err)
|
||||
}
|
||||
if result.IsSelectLike != tt.wantIsSelect {
|
||||
t.Errorf("IsSelectLike = %v, want %v", result.IsSelectLike, tt.wantIsSelect)
|
||||
}
|
||||
if tt.wantLimit == nil {
|
||||
if result.LimitConst != nil {
|
||||
t.Errorf("LimitConst = %v, want nil", *result.LimitConst)
|
||||
}
|
||||
} else {
|
||||
if result.LimitConst == nil {
|
||||
t.Errorf("LimitConst = nil, want %v", *tt.wantLimit)
|
||||
} else if *result.LimitConst != *tt.wantLimit {
|
||||
t.Errorf("LimitConst = %v, want %v", *result.LimitConst, *tt.wantLimit)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzeSQL_UnionQueries(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantHasAgg bool
|
||||
wantLimit *int64
|
||||
}{
|
||||
{
|
||||
name: "UNION without aggregate",
|
||||
sql: "SELECT id, name FROM users UNION SELECT id, name FROM admins",
|
||||
wantHasAgg: false,
|
||||
wantLimit: nil,
|
||||
},
|
||||
{
|
||||
name: "UNION ALL without aggregate",
|
||||
sql: "SELECT * FROM users UNION ALL SELECT * FROM admins",
|
||||
wantHasAgg: false,
|
||||
wantLimit: nil,
|
||||
},
|
||||
{
|
||||
name: "UNION with aggregate in all branches",
|
||||
sql: "SELECT COUNT(*) FROM users UNION SELECT COUNT(*) FROM admins",
|
||||
wantHasAgg: true,
|
||||
wantLimit: nil,
|
||||
},
|
||||
{
|
||||
name: "UNION with aggregate in one branch only",
|
||||
sql: "SELECT COUNT(*) FROM users UNION SELECT id FROM admins",
|
||||
wantHasAgg: false, // not all branches have aggregate
|
||||
wantLimit: nil,
|
||||
},
|
||||
{
|
||||
name: "UNION with outer LIMIT",
|
||||
sql: "SELECT * FROM users UNION SELECT * FROM admins LIMIT 100",
|
||||
wantHasAgg: false,
|
||||
wantLimit: ptr(int64(100)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := AnalyzeSQL(tt.sql)
|
||||
if err != nil {
|
||||
t.Fatalf("AnalyzeSQL() error = %v", err)
|
||||
}
|
||||
if result.HasTopAgg != tt.wantHasAgg {
|
||||
t.Errorf("HasTopAgg = %v, want %v", result.HasTopAgg, tt.wantHasAgg)
|
||||
}
|
||||
if tt.wantLimit == nil {
|
||||
if result.LimitConst != nil {
|
||||
t.Errorf("LimitConst = %v, want nil", *result.LimitConst)
|
||||
}
|
||||
} else {
|
||||
if result.LimitConst == nil {
|
||||
t.Errorf("LimitConst = nil, want %v", *tt.wantLimit)
|
||||
} else if *result.LimitConst != *tt.wantLimit {
|
||||
t.Errorf("LimitConst = %v, want %v", *result.LimitConst, *tt.wantLimit)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzeSQL_NonSelectStatements(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantIsSelect bool
|
||||
}{
|
||||
{
|
||||
name: "SHOW DATABASES",
|
||||
sql: "SHOW DATABASES",
|
||||
wantIsSelect: false,
|
||||
},
|
||||
{
|
||||
name: "SHOW TABLES",
|
||||
sql: "SHOW TABLES",
|
||||
wantIsSelect: false,
|
||||
},
|
||||
{
|
||||
name: "DESCRIBE table",
|
||||
sql: "DESCRIBE users",
|
||||
wantIsSelect: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := AnalyzeSQL(tt.sql)
|
||||
if err != nil {
|
||||
// Some statements may not be parseable, which is fine
|
||||
return
|
||||
}
|
||||
if result.IsSelectLike != tt.wantIsSelect {
|
||||
t.Errorf("IsSelectLike = %v, want %v", result.IsSelectLike, tt.wantIsSelect)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsRowCountCheck(t *testing.T) {
|
||||
maxRows := 500
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantNeedCheck bool
|
||||
wantReject bool
|
||||
}{
|
||||
// Should skip check (needsCheck = false)
|
||||
{
|
||||
name: "Aggregate COUNT(*)",
|
||||
sql: "SELECT COUNT(*) FROM users",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Aggregate SUM",
|
||||
sql: "SELECT SUM(amount) FROM orders",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Aggregate with GROUP BY",
|
||||
sql: "SELECT user_id, COUNT(*) FROM orders GROUP BY user_id",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT equal to max",
|
||||
sql: "SELECT * FROM users LIMIT 500",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT less than max",
|
||||
sql: "SELECT * FROM users LIMIT 100",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 1",
|
||||
sql: "SELECT * FROM users LIMIT 1",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
|
||||
// LIMIT > maxRows still needs probe check (actual result might be smaller)
|
||||
{
|
||||
name: "LIMIT exceeds max",
|
||||
sql: "SELECT * FROM users LIMIT 1000",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT much larger than max",
|
||||
sql: "SELECT * FROM users LIMIT 10000",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
|
||||
// Should execute probe check (needsCheck = true)
|
||||
{
|
||||
name: "No LIMIT no aggregate",
|
||||
sql: "SELECT * FROM users",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "SELECT with WHERE no LIMIT",
|
||||
sql: "SELECT * FROM users WHERE status = 'active'",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "SELECT with JOIN no LIMIT",
|
||||
sql: "SELECT u.*, o.* FROM users u JOIN orders o ON u.id = o.user_id",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Aggregate in subquery only",
|
||||
sql: "SELECT * FROM (SELECT user_id, COUNT(*) as cnt FROM orders GROUP BY user_id) t",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
needsCheck, directReject, _ := NeedsRowCountCheck(tt.sql, maxRows)
|
||||
if needsCheck != tt.wantNeedCheck {
|
||||
t.Errorf("needsCheck = %v, want %v", needsCheck, tt.wantNeedCheck)
|
||||
}
|
||||
if directReject != tt.wantReject {
|
||||
t.Errorf("directReject = %v, want %v", directReject, tt.wantReject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsRowCountCheck_DorisSpecificFunctions(t *testing.T) {
|
||||
maxRows := 500
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantNeedCheck bool
|
||||
}{
|
||||
// Doris HLL functions
|
||||
{
|
||||
name: "HLL_UNION_AGG",
|
||||
sql: "SELECT HLL_UNION_AGG(hll_col) FROM user_stats",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
{
|
||||
name: "HLL_CARDINALITY",
|
||||
sql: "SELECT HLL_CARDINALITY(hll_col) FROM user_stats",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
// Doris Bitmap functions
|
||||
{
|
||||
name: "BITMAP_UNION_COUNT",
|
||||
sql: "SELECT BITMAP_UNION_COUNT(bitmap_col) FROM user_tags",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
{
|
||||
name: "BITMAP_UNION",
|
||||
sql: "SELECT BITMAP_UNION(bitmap_col) FROM user_tags GROUP BY category",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
// Other Doris aggregate functions
|
||||
{
|
||||
name: "APPROX_COUNT_DISTINCT",
|
||||
sql: "SELECT APPROX_COUNT_DISTINCT(user_id) FROM events",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
{
|
||||
name: "GROUP_CONCAT",
|
||||
sql: "SELECT GROUP_CONCAT(name) FROM users GROUP BY department",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
{
|
||||
name: "PERCENTILE_APPROX",
|
||||
sql: "SELECT PERCENTILE_APPROX(latency, 0.99) FROM requests",
|
||||
wantNeedCheck: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
needsCheck, _, _ := NeedsRowCountCheck(tt.sql, maxRows)
|
||||
if needsCheck != tt.wantNeedCheck {
|
||||
t.Errorf("needsCheck = %v, want %v (should skip check for Doris aggregate functions)", needsCheck, tt.wantNeedCheck)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsRowCountCheck_ComplexQueries(t *testing.T) {
|
||||
maxRows := 500
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantNeedCheck bool
|
||||
wantReject bool
|
||||
}{
|
||||
{
|
||||
name: "CTE with aggregate",
|
||||
sql: "WITH user_counts AS (SELECT user_id, COUNT(*) as cnt FROM orders GROUP BY user_id) SELECT * FROM user_counts",
|
||||
wantNeedCheck: false, // CTE has aggregate, skip check
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Complex JOIN with aggregate",
|
||||
sql: "SELECT u.department, COUNT(*) FROM users u JOIN orders o ON u.id = o.user_id GROUP BY u.department",
|
||||
wantNeedCheck: false, // has aggregate
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Nested subquery",
|
||||
sql: "SELECT * FROM users WHERE id IN (SELECT user_id FROM orders WHERE amount > 100)",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "DISTINCT query",
|
||||
sql: "SELECT DISTINCT category FROM products",
|
||||
wantNeedCheck: true, // DISTINCT is not aggregate
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "ORDER BY with LIMIT",
|
||||
sql: "SELECT * FROM users ORDER BY created_at DESC LIMIT 100",
|
||||
wantNeedCheck: false, // has valid LIMIT
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple aggregates in single query",
|
||||
sql: "SELECT COUNT(*), SUM(amount), AVG(amount), MIN(amount), MAX(amount) FROM orders",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
needsCheck, directReject, _ := NeedsRowCountCheck(tt.sql, maxRows)
|
||||
if needsCheck != tt.wantNeedCheck {
|
||||
t.Errorf("needsCheck = %v, want %v", needsCheck, tt.wantNeedCheck)
|
||||
}
|
||||
if directReject != tt.wantReject {
|
||||
t.Errorf("directReject = %v, want %v", directReject, tt.wantReject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsRowCountCheck_EdgeCases(t *testing.T) {
|
||||
maxRows := 500
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
wantNeedCheck bool
|
||||
wantReject bool
|
||||
}{
|
||||
{
|
||||
name: "Empty-ish LIMIT 0",
|
||||
sql: "SELECT * FROM users LIMIT 0",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT at boundary",
|
||||
sql: "SELECT * FROM users LIMIT 501",
|
||||
wantNeedCheck: true, // 501 > 500, needs probe check
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "SELECT with trailing semicolon",
|
||||
sql: "SELECT * FROM users;",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "SELECT with extra whitespace",
|
||||
sql: " SELECT * FROM users ",
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Lowercase keywords",
|
||||
sql: "select count(*) from users",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "Mixed case keywords",
|
||||
sql: "Select Count(*) From users",
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
needsCheck, directReject, _ := NeedsRowCountCheck(tt.sql, maxRows)
|
||||
if needsCheck != tt.wantNeedCheck {
|
||||
t.Errorf("needsCheck = %v, want %v", needsCheck, tt.wantNeedCheck)
|
||||
}
|
||||
if directReject != tt.wantReject {
|
||||
t.Errorf("directReject = %v, want %v", directReject, tt.wantReject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsRowCountCheck_DifferentMaxRows(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
sql string
|
||||
maxRows int
|
||||
wantNeedCheck bool
|
||||
wantReject bool
|
||||
}{
|
||||
{
|
||||
name: "LIMIT 100 with maxRows 50",
|
||||
sql: "SELECT * FROM users LIMIT 100",
|
||||
maxRows: 50,
|
||||
wantNeedCheck: true, // LIMIT > maxRows, needs probe check
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 100 with maxRows 100",
|
||||
sql: "SELECT * FROM users LIMIT 100",
|
||||
maxRows: 100,
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "LIMIT 100 with maxRows 200",
|
||||
sql: "SELECT * FROM users LIMIT 100",
|
||||
maxRows: 200,
|
||||
wantNeedCheck: false,
|
||||
wantReject: false,
|
||||
},
|
||||
{
|
||||
name: "No LIMIT with maxRows 1000",
|
||||
sql: "SELECT * FROM users",
|
||||
maxRows: 1000,
|
||||
wantNeedCheck: true,
|
||||
wantReject: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
needsCheck, directReject, _ := NeedsRowCountCheck(tt.sql, tt.maxRows)
|
||||
if needsCheck != tt.wantNeedCheck {
|
||||
t.Errorf("needsCheck = %v, want %v", needsCheck, tt.wantNeedCheck)
|
||||
}
|
||||
if directReject != tt.wantReject {
|
||||
t.Errorf("directReject = %v, want %v", directReject, tt.wantReject)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSummary_SkipProbeCheck prints a summary of which SQL patterns skip the probe check
|
||||
func TestSummary_SkipProbeCheck(t *testing.T) {
|
||||
maxRows := 500
|
||||
|
||||
skipCheckCases := []struct {
|
||||
category string
|
||||
sql string
|
||||
}{
|
||||
// Aggregate functions
|
||||
{"Aggregate - COUNT(*)", "SELECT COUNT(*) FROM users"},
|
||||
{"Aggregate - COUNT(col)", "SELECT COUNT(id) FROM users"},
|
||||
{"Aggregate - SUM", "SELECT SUM(amount) FROM orders"},
|
||||
{"Aggregate - AVG", "SELECT AVG(price) FROM products"},
|
||||
{"Aggregate - MIN", "SELECT MIN(created_at) FROM logs"},
|
||||
{"Aggregate - MAX", "SELECT MAX(score) FROM results"},
|
||||
{"Aggregate - GROUP BY", "SELECT user_id, COUNT(*) FROM orders GROUP BY user_id"},
|
||||
{"Aggregate - HAVING", "SELECT user_id, SUM(amount) FROM orders GROUP BY user_id HAVING SUM(amount) > 1000"},
|
||||
|
||||
// Doris specific aggregates
|
||||
{"Doris - HLL_UNION_AGG", "SELECT HLL_UNION_AGG(hll_col) FROM stats"},
|
||||
{"Doris - BITMAP_UNION_COUNT", "SELECT BITMAP_UNION_COUNT(bitmap_col) FROM tags"},
|
||||
{"Doris - APPROX_COUNT_DISTINCT", "SELECT APPROX_COUNT_DISTINCT(user_id) FROM events"},
|
||||
{"Doris - GROUP_CONCAT", "SELECT GROUP_CONCAT(name) FROM users GROUP BY dept"},
|
||||
|
||||
// LIMIT <= maxRows
|
||||
{"LIMIT - Equal to max", "SELECT * FROM users LIMIT 500"},
|
||||
{"LIMIT - Less than max", "SELECT * FROM users LIMIT 100"},
|
||||
{"LIMIT - With OFFSET", "SELECT * FROM users LIMIT 100 OFFSET 50"},
|
||||
{"LIMIT - Value 1", "SELECT * FROM users LIMIT 1"},
|
||||
{"LIMIT - Value 0", "SELECT * FROM users LIMIT 0"},
|
||||
}
|
||||
|
||||
t.Log("=== SQL patterns that SKIP probe check (no extra query needed) ===")
|
||||
for _, tc := range skipCheckCases {
|
||||
needsCheck, _, _ := NeedsRowCountCheck(tc.sql, maxRows)
|
||||
status := "✓ SKIP"
|
||||
if needsCheck {
|
||||
status = "✗ NEEDS CHECK (unexpected)"
|
||||
}
|
||||
t.Logf(" %s: %s\n SQL: %s", status, tc.category, tc.sql)
|
||||
}
|
||||
|
||||
needsCheckCases := []struct {
|
||||
category string
|
||||
sql string
|
||||
}{
|
||||
{"No LIMIT - Simple SELECT", "SELECT * FROM users"},
|
||||
{"No LIMIT - With WHERE", "SELECT * FROM users WHERE status = 'active'"},
|
||||
{"No LIMIT - With JOIN", "SELECT u.*, o.* FROM users u JOIN orders o ON u.id = o.user_id"},
|
||||
{"No LIMIT - Subquery with agg", "SELECT * FROM (SELECT user_id, COUNT(*) FROM orders GROUP BY user_id) t"},
|
||||
{"No LIMIT - DISTINCT", "SELECT DISTINCT category FROM products"},
|
||||
{"LIMIT > max (actual may be smaller)", "SELECT * FROM users LIMIT 1000"},
|
||||
{"LIMIT >> max", "SELECT * FROM users LIMIT 10000"},
|
||||
}
|
||||
|
||||
t.Log("\n=== SQL patterns that NEED probe check ===")
|
||||
for _, tc := range needsCheckCases {
|
||||
needsCheck, _, _ := NeedsRowCountCheck(tc.sql, maxRows)
|
||||
status := "✓ NEEDS CHECK"
|
||||
if !needsCheck {
|
||||
status = "✗ SKIP (unexpected)"
|
||||
}
|
||||
t.Logf(" %s: %s\n SQL: %s", status, tc.category, tc.sql)
|
||||
}
|
||||
}
|
||||
|
||||
// ptr is a helper function to create a pointer to int64
|
||||
func ptr(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
@@ -73,35 +73,44 @@ func (d *Doris) QueryTimeseries(ctx context.Context, query *QueryParam) ([]types
|
||||
}
|
||||
|
||||
// CheckMaxQueryRows checks if the query result exceeds the maximum allowed rows
|
||||
// It uses SQL analysis to skip unnecessary checks for aggregate queries or queries with LIMIT <= maxRows
|
||||
// For queries that need checking, it uses probe approach (LIMIT maxRows+1) instead of COUNT(*) for better performance
|
||||
func (d *Doris) CheckMaxQueryRows(ctx context.Context, database, sql string) error {
|
||||
maxQueryRows := d.MaxQueryRows
|
||||
if maxQueryRows == 0 {
|
||||
maxQueryRows = 500
|
||||
}
|
||||
|
||||
cleanedSQL := strings.TrimSpace(strings.TrimSuffix(strings.TrimSpace(sql), ";"))
|
||||
|
||||
// Step 1: Analyze SQL to determine if check is needed
|
||||
needsCheck, _, _ := NeedsRowCountCheck(cleanedSQL, maxQueryRows)
|
||||
if !needsCheck {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 2: Execute probe query (more efficient than COUNT(*))
|
||||
return d.probeRowCount(ctx, database, cleanedSQL, maxQueryRows)
|
||||
}
|
||||
|
||||
// probeRowCount uses threshold probing to check row count
|
||||
// It reads at most maxRows+1 rows, which is O(maxRows) instead of O(totalRows) for COUNT(*)
|
||||
// Doris optimizes LIMIT queries by stopping scan early once limit is reached
|
||||
func (d *Doris) probeRowCount(ctx context.Context, database, sql string, maxRows int) error {
|
||||
timeoutCtx, cancel := d.createTimeoutContext(ctx)
|
||||
defer cancel()
|
||||
|
||||
cleanedSQL := strings.ReplaceAll(sql, ";", "")
|
||||
checkQuery := fmt.Sprintf("SELECT COUNT(*) as count FROM (%s) AS subquery;", cleanedSQL)
|
||||
// Probe SQL: only need to check if exceeds threshold, not actual data
|
||||
probeSQL := fmt.Sprintf("SELECT 1 FROM (%s) AS __probe_chk LIMIT %d", sql, maxRows+1)
|
||||
|
||||
// 执行计数查询
|
||||
results, err := d.ExecQuery(timeoutCtx, database, checkQuery)
|
||||
results, err := d.ExecQuery(timeoutCtx, database, probeSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(results) > 0 {
|
||||
if count, exists := results[0]["count"]; exists {
|
||||
v, err := sqlbase.ParseFloat64Value(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxQueryRows := d.MaxQueryRows
|
||||
if maxQueryRows == 0 {
|
||||
maxQueryRows = 500
|
||||
}
|
||||
|
||||
if v > float64(maxQueryRows) {
|
||||
return fmt.Errorf("query result rows count %d exceeds the maximum limit %d", int(v), maxQueryRows)
|
||||
}
|
||||
}
|
||||
// If returned rows > maxRows, it exceeds the limit
|
||||
if len(results) > maxRows {
|
||||
return fmt.Errorf("query result rows count exceeds the maximum limit %d", maxRows)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
12
go.mod
12
go.mod
@@ -33,6 +33,7 @@ require (
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.5.1
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
@@ -42,6 +43,7 @@ require (
|
||||
github.com/opensearch-project/opensearch-go/v2 v2.3.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pelletier/go-toml/v2 v2.0.8
|
||||
github.com/pingcap/tidb/pkg/parser v0.0.0-20260120034856-e15515e804da
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/common v0.60.1
|
||||
@@ -101,6 +103,9 @@ require (
|
||||
github.com/jcmturner/gofork v1.7.6 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee // indirect
|
||||
github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 // indirect
|
||||
github.com/pingcap/log v1.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
@@ -108,10 +113,13 @@ require (
|
||||
github.com/valyala/fastrand v1.1.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/yuin/gopher-lua v1.1.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
modernc.org/libc v1.22.5 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/sqlite v1.23.1 // indirect
|
||||
)
|
||||
@@ -135,7 +143,7 @@ require (
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/go-sql-driver/mysql v1.7.1
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
|
||||
39
go.sum
39
go.sum
@@ -101,6 +101,7 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3w
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow=
|
||||
@@ -195,8 +196,9 @@ github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
|
||||
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
@@ -243,6 +245,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -315,6 +318,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.5.1 h1:gX4dz92YU70inuIX+ug+PBe64eHToIN9rHB4Vupv5Eg=
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.5.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -361,9 +366,19 @@ github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZ
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee h1:/IDPbpzkzA97t1/Z1+C3KlxbevjMeaI6BQYxvivu4u8=
|
||||
github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
|
||||
github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE=
|
||||
github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4=
|
||||
github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8=
|
||||
github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4=
|
||||
github.com/pingcap/tidb/pkg/parser v0.0.0-20260120034856-e15515e804da h1:PhkRZgMWdq9kTsu7vtVbcDs+SBXjHfFj84027WVZCzI=
|
||||
github.com/pingcap/tidb/pkg/parser v0.0.0-20260120034856-e15515e804da/go.mod h1:oHE+ub2QaDERd+UNHe4z2BhFV2jZrm7VNOe6atR9AF4=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -392,7 +407,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.0.2 h1:BA426Zqe/7r56kCcvxYLWe1mkaz71LKF77GwgFzSxfE=
|
||||
github.com/redis/go-redis/v9 v9.0.2/go.mod h1:/xDTe9EF1LM61hek62Poq2nzQSGj0xSrEtEHbBQevps=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -467,13 +481,24 @@ go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
|
||||
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
|
||||
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
|
||||
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
@@ -507,6 +532,7 @@ golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@@ -623,6 +649,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -668,6 +696,9 @@ gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkp
|
||||
gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -693,8 +724,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||
modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM=
|
||||
|
||||
@@ -192,6 +192,7 @@ type AlertMute struct {
|
||||
Activated int `json:"activated" gorm:"-"` // 0: not activated, 1: activated
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
ITags []TagFilter `json:"-" gorm:"-"` // inner tags
|
||||
|
||||
@@ -45,6 +45,7 @@ type AlertSubscribe struct {
|
||||
CreateAt int64 `json:"create_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
ITags []TagFilter `json:"-" gorm:"-"` // inner tags
|
||||
BusiGroups ormx.JSONArr `json:"busi_groups"`
|
||||
IBusiGroups []TagFilter `json:"-" gorm:"-"` // inner busiGroups
|
||||
|
||||
@@ -46,6 +46,12 @@ func NewAnomalyPoint(key string, labels map[string]string, ts int64, value float
|
||||
}
|
||||
|
||||
func (v *AnomalyPoint) ReadableValue() string {
|
||||
if len(v.ValuesUnit) > 0 {
|
||||
for _, unit := range v.ValuesUnit { // 配置了单位,优先用配置了单位的值
|
||||
return unit.Text
|
||||
}
|
||||
}
|
||||
|
||||
ret := fmt.Sprintf("%.5f", v.Value)
|
||||
ret = strings.TrimRight(ret, "0")
|
||||
return strings.TrimRight(ret, ".")
|
||||
|
||||
@@ -19,22 +19,23 @@ const (
|
||||
)
|
||||
|
||||
type Board struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
GroupId int64 `json:"group_id"`
|
||||
Name string `json:"name"`
|
||||
Ident string `json:"ident"`
|
||||
Tags string `json:"tags"`
|
||||
Note string `json:"note"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Configs string `json:"configs" gorm:"-"`
|
||||
Public int `json:"public"` // 0: false, 1: true
|
||||
PublicCate int `json:"public_cate"` // 0: anonymous, 1: login, 2: busi
|
||||
Bgids []int64 `json:"bgids" gorm:"-"`
|
||||
BuiltIn int `json:"built_in"` // 0: false, 1: true
|
||||
Hide int `json:"hide"` // 0: false, 1: true
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
GroupId int64 `json:"group_id"`
|
||||
Name string `json:"name"`
|
||||
Ident string `json:"ident"`
|
||||
Tags string `json:"tags"`
|
||||
Note string `json:"note"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
Configs string `json:"configs" gorm:"-"`
|
||||
Public int `json:"public"` // 0: false, 1: true
|
||||
PublicCate int `json:"public_cate"` // 0: anonymous, 1: login, 2: busi
|
||||
Bgids []int64 `json:"bgids" gorm:"-"`
|
||||
BuiltIn int `json:"built_in"` // 0: false, 1: true
|
||||
Hide int `json:"hide"` // 0: false, 1: true
|
||||
}
|
||||
|
||||
func (b *Board) TableName() string {
|
||||
|
||||
@@ -9,14 +9,15 @@ import (
|
||||
)
|
||||
|
||||
type MetricFilter struct {
|
||||
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
|
||||
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_metricfilter_name,sort:asc;comment:'name of metric filter'"`
|
||||
Configs string `json:"configs" gorm:"type:varchar(4096);not null;comment:'configuration of metric filter'"`
|
||||
GroupsPerm []GroupPerm `json:"groups_perm" gorm:"type:text;serializer:json;"`
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
|
||||
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
|
||||
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_metricfilter_name,sort:asc;comment:'name of metric filter'"`
|
||||
Configs string `json:"configs" gorm:"type:varchar(4096);not null;comment:'configuration of metric filter'"`
|
||||
GroupsPerm []GroupPerm `json:"groups_perm" gorm:"type:text;serializer:json;"`
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
type GroupPerm struct {
|
||||
|
||||
@@ -12,16 +12,17 @@ import (
|
||||
)
|
||||
|
||||
type BusiGroup struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
LabelEnable int `json:"label_enable"`
|
||||
LabelValue string `json:"label_value"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UserGroups []UserGroupWithPermFlag `json:"user_groups" gorm:"-"`
|
||||
DB *gorm.DB `json:"-" gorm:"-"`
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
LabelEnable int `json:"label_enable"`
|
||||
LabelValue string `json:"label_value"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
UserGroups []UserGroupWithPermFlag `json:"user_groups" gorm:"-"`
|
||||
DB *gorm.DB `json:"-" gorm:"-"`
|
||||
}
|
||||
|
||||
func New(db *gorm.DB) *BusiGroup {
|
||||
|
||||
@@ -20,16 +20,17 @@ import (
|
||||
)
|
||||
|
||||
type Configs struct { //ckey+external
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Ckey string `json:"ckey"` // Before inserting external configs, check if they are already defined as built-in configs.
|
||||
Cval string `json:"cval"`
|
||||
Note string `json:"note"`
|
||||
External int `json:"external"` //Controls frontend list display: 0 hides built-in (default), 1 shows external
|
||||
Encrypted int `json:"encrypted"` //Indicates whether the value(cval) is encrypted (1 for ciphertext, 0 for plaintext(default))
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Ckey string `json:"ckey"` // Before inserting external configs, check if they are already defined as built-in configs.
|
||||
Cval string `json:"cval"`
|
||||
Note string `json:"note"`
|
||||
External int `json:"external"` //Controls frontend list display: 0 hides built-in (default), 1 shows external
|
||||
Encrypted int `json:"encrypted"` //Indicates whether the value(cval) is encrypted (1 for ciphertext, 0 for plaintext(default))
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (Configs) TableName() string {
|
||||
|
||||
@@ -7,19 +7,20 @@ import (
|
||||
)
|
||||
|
||||
type DashAnnotation struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
DashboardId int64 `json:"dashboard_id"`
|
||||
PanelId string `json:"panel_id"`
|
||||
Tags string `json:"-"`
|
||||
TagsJSON []string `json:"tags" gorm:"-"`
|
||||
Description string `json:"description"`
|
||||
Config string `json:"config"`
|
||||
TimeStart int64 `json:"time_start"`
|
||||
TimeEnd int64 `json:"time_end"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
DashboardId int64 `json:"dashboard_id"`
|
||||
PanelId string `json:"panel_id"`
|
||||
Tags string `json:"-"`
|
||||
TagsJSON []string `json:"tags" gorm:"-"`
|
||||
Description string `json:"description"`
|
||||
Config string `json:"config"`
|
||||
TimeStart int64 `json:"time_start"`
|
||||
TimeEnd int64 `json:"time_end"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (da *DashAnnotation) TableName() string {
|
||||
|
||||
@@ -517,7 +517,8 @@ func (ds *Datasource) Encrypt(openRsa bool, publicKeyData []byte) error {
|
||||
// Decrypt 用于 edge 将从中心同步的数据源解密,中心不可调用
|
||||
func (ds *Datasource) Decrypt() error {
|
||||
if rsaConfig == nil {
|
||||
return errors.New("rsa config is nil")
|
||||
logger.Debugf("datasource %s rsa config is nil", ds.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !rsaConfig.OpenRSA {
|
||||
|
||||
@@ -14,15 +14,16 @@ import (
|
||||
)
|
||||
|
||||
type EmbeddedProduct struct {
|
||||
ID int64 `json:"id" gorm:"primaryKey"` // 主键
|
||||
Name string `json:"name" gorm:"column:name;type:varchar(255)"`
|
||||
URL string `json:"url" gorm:"column:url;type:varchar(255)"`
|
||||
IsPrivate bool `json:"is_private" gorm:"column:is_private;type:boolean"`
|
||||
TeamIDs []int64 `json:"team_ids" gorm:"serializer:json"`
|
||||
CreateAt int64 `json:"create_at" gorm:"column:create_at;not null;default:0"`
|
||||
CreateBy string `json:"create_by" gorm:"column:create_by;type:varchar(64);not null;default:''"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"column:update_at;not null;default:0"`
|
||||
UpdateBy string `json:"update_by" gorm:"column:update_by;type:varchar(64);not null;default:''"`
|
||||
ID int64 `json:"id" gorm:"primaryKey"` // 主键
|
||||
Name string `json:"name" gorm:"column:name;type:varchar(255)"`
|
||||
URL string `json:"url" gorm:"column:url;type:varchar(255)"`
|
||||
IsPrivate bool `json:"is_private" gorm:"column:is_private;type:boolean"`
|
||||
TeamIDs []int64 `json:"team_ids" gorm:"serializer:json"`
|
||||
CreateAt int64 `json:"create_at" gorm:"column:create_at;not null;default:0"`
|
||||
CreateBy string `json:"create_by" gorm:"column:create_by;type:varchar(64);not null;default:''"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"column:update_at;not null;default:0"`
|
||||
UpdateBy string `json:"update_by" gorm:"column:update_by;type:varchar(64);not null;default:''"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (e *EmbeddedProduct) TableName() string {
|
||||
|
||||
@@ -24,6 +24,7 @@ type EsIndexPattern struct {
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
CrossClusterEnabled int `json:"cross_cluster_enabled"`
|
||||
Note string `json:"note"`
|
||||
}
|
||||
|
||||
@@ -29,13 +29,14 @@ type EventPipeline struct {
|
||||
Nodes []WorkflowNode `json:"nodes,omitempty" gorm:"type:text;serializer:json"`
|
||||
// 节点连接关系
|
||||
Connections Connections `json:"connections,omitempty" gorm:"type:text;serializer:json"`
|
||||
// 环境变量(工作流级别的配置变量)
|
||||
EnvVariables []EnvVariable `json:"env_variables,omitempty" gorm:"type:text;serializer:json"`
|
||||
// 输入参数(工作流级别的配置变量)
|
||||
Inputs []InputVariable `json:"inputs,omitempty" gorm:"type:text;serializer:json"`
|
||||
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(64)"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(64)"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
type ProcessorConfig struct {
|
||||
@@ -73,8 +74,8 @@ func (e *EventPipeline) Verify() error {
|
||||
if e.Connections == nil {
|
||||
e.Connections = make(Connections)
|
||||
}
|
||||
if e.EnvVariables == nil {
|
||||
e.EnvVariables = make([]EnvVariable, 0)
|
||||
if e.Inputs == nil {
|
||||
e.Inputs = make([]InputVariable, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -245,36 +246,10 @@ func (e *EventPipeline) FillWorkflowFields() {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventPipeline) GetEnvMap() map[string]string {
|
||||
envMap := make(map[string]string)
|
||||
for _, v := range e.EnvVariables {
|
||||
envMap[v.Key] = v.Value
|
||||
func (e *EventPipeline) GetInputsMap() map[string]string {
|
||||
inputsMap := make(map[string]string)
|
||||
for _, v := range e.Inputs {
|
||||
inputsMap[v.Key] = v.Value
|
||||
}
|
||||
return envMap
|
||||
}
|
||||
|
||||
func (e *EventPipeline) GetSecretKeys() map[string]bool {
|
||||
secretKeys := make(map[string]bool)
|
||||
for _, v := range e.EnvVariables {
|
||||
if v.Secret {
|
||||
secretKeys[v.Key] = true
|
||||
}
|
||||
}
|
||||
return secretKeys
|
||||
}
|
||||
|
||||
func (e *EventPipeline) ValidateEnvVariables(overrides map[string]string) error {
|
||||
// 合并默认值和覆盖值
|
||||
merged := e.GetEnvMap()
|
||||
for k, v := range overrides {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
// 校验必填项
|
||||
for _, v := range e.EnvVariables {
|
||||
if v.Required && merged[v.Key] == "" {
|
||||
return fmt.Errorf("required env variable %s is missing", v.Key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return inputsMap
|
||||
}
|
||||
|
||||
@@ -2,9 +2,13 @@ package models
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ccfos/nightingale/v6/pkg/ctx"
|
||||
"github.com/ccfos/nightingale/v6/pkg/poster"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// 执行状态常量
|
||||
@@ -42,8 +46,8 @@ type EventPipelineExecution struct {
|
||||
// 触发者信息
|
||||
TriggerBy string `json:"trigger_by" gorm:"type:varchar(64)"`
|
||||
|
||||
// 环境变量快照(脱敏后存储)
|
||||
EnvSnapshot string `json:"env_snapshot,omitempty" gorm:"type:text"`
|
||||
// 输入参数快照(脱敏后存储)
|
||||
InputsSnapshot string `json:"inputs_snapshot,omitempty" gorm:"type:text"`
|
||||
}
|
||||
|
||||
func (e *EventPipelineExecution) TableName() string {
|
||||
@@ -70,28 +74,31 @@ func (e *EventPipelineExecution) GetNodeResults() ([]*NodeExecutionResult, error
|
||||
return results, err
|
||||
}
|
||||
|
||||
// SetEnvSnapshot 设置环境变量快照(脱敏后存储)
|
||||
func (e *EventPipelineExecution) SetEnvSnapshot(env map[string]string) error {
|
||||
data, err := json.Marshal(env)
|
||||
// SetInputsSnapshot 设置输入参数快照(脱敏后存储)
|
||||
func (e *EventPipelineExecution) SetInputsSnapshot(inputs map[string]string) error {
|
||||
data, err := json.Marshal(inputs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.EnvSnapshot = string(data)
|
||||
e.InputsSnapshot = string(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEnvSnapshot 获取环境变量快照
|
||||
func (e *EventPipelineExecution) GetEnvSnapshot() (map[string]string, error) {
|
||||
if e.EnvSnapshot == "" {
|
||||
// GetInputsSnapshot 获取输入参数快照
|
||||
func (e *EventPipelineExecution) GetInputsSnapshot() (map[string]string, error) {
|
||||
if e.InputsSnapshot == "" {
|
||||
return nil, nil
|
||||
}
|
||||
var env map[string]string
|
||||
err := json.Unmarshal([]byte(e.EnvSnapshot), &env)
|
||||
return env, err
|
||||
var inputs map[string]string
|
||||
err := json.Unmarshal([]byte(e.InputsSnapshot), &inputs)
|
||||
return inputs, err
|
||||
}
|
||||
|
||||
// CreateEventPipelineExecution 创建执行记录
|
||||
func CreateEventPipelineExecution(c *ctx.Context, execution *EventPipelineExecution) error {
|
||||
if !c.IsCenter {
|
||||
return poster.PostByUrls(c, "/v1/n9e/event-pipeline-execution", execution)
|
||||
}
|
||||
return DB(c).Create(execution).Error
|
||||
}
|
||||
|
||||
@@ -105,6 +112,9 @@ func GetEventPipelineExecution(c *ctx.Context, id string) (*EventPipelineExecuti
|
||||
var execution EventPipelineExecution
|
||||
err := DB(c).Where("id = ?", id).First(&execution).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &execution, nil
|
||||
@@ -145,12 +155,15 @@ func ListEventPipelineExecutionsByEventID(c *ctx.Context, eventID int64) ([]*Eve
|
||||
}
|
||||
|
||||
// ListAllEventPipelineExecutions 获取所有 Pipeline 的执行记录列表
|
||||
func ListAllEventPipelineExecutions(c *ctx.Context, pipelineName, mode, status string, limit, offset int) ([]*EventPipelineExecution, int64, error) {
|
||||
func ListAllEventPipelineExecutions(c *ctx.Context, pipelineId int64, pipelineName, mode, status string, limit, offset int) ([]*EventPipelineExecution, int64, error) {
|
||||
var executions []*EventPipelineExecution
|
||||
var total int64
|
||||
|
||||
session := DB(c).Model(&EventPipelineExecution{})
|
||||
|
||||
if pipelineId > 0 {
|
||||
session = session.Where("pipeline_id = ?", pipelineId)
|
||||
}
|
||||
if pipelineName != "" {
|
||||
session = session.Where("pipeline_name LIKE ?", "%"+pipelineName+"%")
|
||||
}
|
||||
@@ -270,8 +283,8 @@ func GetEventPipelineExecutionStatistics(c *ctx.Context, pipelineID int64) (*Eve
|
||||
// EventPipelineExecutionDetail 执行详情(包含解析后的节点结果)
|
||||
type EventPipelineExecutionDetail struct {
|
||||
EventPipelineExecution
|
||||
NodeResultsParsed []*NodeExecutionResult `json:"node_results_parsed"`
|
||||
EnvSnapshotParsed map[string]string `json:"env_snapshot_parsed"`
|
||||
NodeResultsParsed []*NodeExecutionResult `json:"node_results_parsed"`
|
||||
InputsSnapshotParsed map[string]string `json:"inputs_snapshot_parsed"`
|
||||
}
|
||||
|
||||
// GetEventPipelineExecutionDetail 获取执行详情
|
||||
@@ -281,6 +294,10 @@ func GetEventPipelineExecutionDetail(c *ctx.Context, id string) (*EventPipelineE
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if execution == nil {
|
||||
return &EventPipelineExecutionDetail{}, nil
|
||||
}
|
||||
|
||||
detail := &EventPipelineExecutionDetail{
|
||||
EventPipelineExecution: *execution,
|
||||
}
|
||||
@@ -292,12 +309,12 @@ func GetEventPipelineExecutionDetail(c *ctx.Context, id string) (*EventPipelineE
|
||||
}
|
||||
detail.NodeResultsParsed = nodeResults
|
||||
|
||||
// 解析环境变量快照
|
||||
envSnapshot, err := execution.GetEnvSnapshot()
|
||||
// 解析输入参数快照
|
||||
inputsSnapshot, err := execution.GetInputsSnapshot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse env snapshot error: %w", err)
|
||||
return nil, fmt.Errorf("parse inputs snapshot error: %w", err)
|
||||
}
|
||||
detail.EnvSnapshotParsed = envSnapshot
|
||||
detail.InputsSnapshotParsed = inputsSnapshot
|
||||
|
||||
return detail, nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ type MessageTemplate struct {
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func MessageTemplateStatistics(ctx *ctx.Context) (*Statistics, error) {
|
||||
|
||||
@@ -57,11 +57,12 @@ type NotifyChannelConfig struct {
|
||||
RequestType string `json:"request_type"` // http, stmp, script, flashduty
|
||||
RequestConfig *RequestConfig `json:"request_config,omitempty" gorm:"serializer:json"`
|
||||
|
||||
Weight int `json:"weight"` // 权重,根据此字段对内置模板进行排序
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Weight int `json:"weight"` // 权重,根据此字段对内置模板进行排序
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (ncc *NotifyChannelConfig) TableName() string {
|
||||
|
||||
@@ -24,10 +24,11 @@ type NotifyRule struct {
|
||||
NotifyConfigs []NotifyConfig `json:"notify_configs" gorm:"serializer:json"`
|
||||
ExtraConfig interface{} `json:"extra_config,omitempty" gorm:"serializer:json"`
|
||||
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
type PipelineConfig struct {
|
||||
|
||||
@@ -17,15 +17,16 @@ import (
|
||||
)
|
||||
|
||||
type NotifyTpl struct {
|
||||
Id int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Channel string `json:"channel"`
|
||||
Content string `json:"content"`
|
||||
BuiltIn bool `json:"built_in" gorm:"-"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Id int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Channel string `json:"channel"`
|
||||
Content string `json:"content"`
|
||||
BuiltIn bool `json:"built_in" gorm:"-"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (n *NotifyTpl) TableName() string {
|
||||
|
||||
@@ -36,6 +36,7 @@ type RecordingRule struct {
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
type QueryConfig struct {
|
||||
@@ -44,6 +45,7 @@ type QueryConfig struct {
|
||||
Exp string `json:"exp"`
|
||||
WriteDatasourceId int64 `json:"write_datasource_id"`
|
||||
Delay int `json:"delay"`
|
||||
WritebackEnabled bool `json:"writeback_enabled"` // 是否写入与查询数据源相同的数据源
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
@@ -211,7 +213,6 @@ func (re *RecordingRule) Update(ctx *ctx.Context, ref RecordingRule) error {
|
||||
|
||||
ref.FE2DB()
|
||||
ref.Id = re.Id
|
||||
ref.GroupId = re.GroupId
|
||||
ref.CreateAt = re.CreateAt
|
||||
ref.CreateBy = re.CreateBy
|
||||
ref.UpdateAt = time.Now().Unix()
|
||||
|
||||
@@ -16,16 +16,17 @@ var (
|
||||
)
|
||||
|
||||
type SavedView struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
|
||||
Name string `json:"name" gorm:"type:varchar(255);not null"`
|
||||
Page string `json:"page" gorm:"type:varchar(64);not null;index"`
|
||||
Filter string `json:"filter" gorm:"type:text"`
|
||||
PublicCate int `json:"public_cate" gorm:"default:0"` // 0: self, 1: team, 2: all
|
||||
Gids []int64 `json:"gids" gorm:"column:gids;type:text;serializer:json"`
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint;not null;default:0"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(64);index"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint;not null;default:0"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
|
||||
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
|
||||
Name string `json:"name" gorm:"type:varchar(255);not null"`
|
||||
Page string `json:"page" gorm:"type:varchar(64);not null;index"`
|
||||
Filter string `json:"filter" gorm:"type:text"`
|
||||
PublicCate int `json:"public_cate" gorm:"default:0"` // 0: self, 1: team, 2: all
|
||||
Gids []int64 `json:"gids" gorm:"column:gids;type:text;serializer:json"`
|
||||
CreateAt int64 `json:"create_at" gorm:"type:bigint;not null;default:0"`
|
||||
CreateBy string `json:"create_by" gorm:"type:varchar(64);index"`
|
||||
UpdateAt int64 `json:"update_at" gorm:"type:bigint;not null;default:0"`
|
||||
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
|
||||
// 查询时填充的字段
|
||||
IsFavorite bool `json:"is_favorite" gorm:"-"`
|
||||
|
||||
@@ -15,22 +15,23 @@ import (
|
||||
)
|
||||
|
||||
type TaskTpl struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
GroupId int64 `json:"group_id"`
|
||||
Title string `json:"title"`
|
||||
Batch int `json:"batch"`
|
||||
Tolerance int `json:"tolerance"`
|
||||
Timeout int `json:"timeout"`
|
||||
Pause string `json:"pause"`
|
||||
Script string `json:"script"`
|
||||
Args string `json:"args"`
|
||||
Tags string `json:"-"`
|
||||
TagsJSON []string `json:"tags" gorm:"-"`
|
||||
Account string `json:"account"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
GroupId int64 `json:"group_id"`
|
||||
Title string `json:"title"`
|
||||
Batch int `json:"batch"`
|
||||
Tolerance int `json:"tolerance"`
|
||||
Timeout int `json:"timeout"`
|
||||
Pause string `json:"pause"`
|
||||
Script string `json:"script"`
|
||||
Args string `json:"args"`
|
||||
Tags string `json:"-"`
|
||||
TagsJSON []string `json:"tags" gorm:"-"`
|
||||
Account string `json:"account"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
}
|
||||
|
||||
func (t *TaskTpl) TableName() string {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -315,6 +316,18 @@ func (u *User) UpdatePassword(ctx *ctx.Context, password, updateBy string) error
|
||||
}).Error
|
||||
}
|
||||
|
||||
func (u *User) AddToUserGroups(ctx *ctx.Context, userGroupIds []int64) error {
|
||||
|
||||
count := len(userGroupIds)
|
||||
for i := 0; i < count; i++ {
|
||||
err := UserGroupMemberAdd(ctx, userGroupIds[i], u.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateUserLastActiveTime(ctx *ctx.Context, userId int64, lastActiveTime int64) error {
|
||||
return DB(ctx).Model(&User{}).Where("id = ?", userId).Updates(map[string]interface{}{
|
||||
"last_active_time": lastActiveTime,
|
||||
@@ -410,6 +423,80 @@ func UserMapGet(ctx *ctx.Context, where string, args ...interface{}) map[string]
|
||||
return um
|
||||
}
|
||||
|
||||
// UserNicknameMap returns a deduplicated username -> nickname map.
|
||||
func UserNicknameMap(ctx *ctx.Context, names []string) map[string]string {
|
||||
m := make(map[string]string)
|
||||
if len(names) == 0 {
|
||||
return m
|
||||
}
|
||||
seen := make(map[string]struct{}, len(names))
|
||||
unique := make([]string, 0, len(names))
|
||||
for _, name := range names {
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[name]; ok {
|
||||
continue
|
||||
}
|
||||
seen[name] = struct{}{}
|
||||
unique = append(unique, name)
|
||||
}
|
||||
if len(unique) == 0 {
|
||||
return m
|
||||
}
|
||||
users := UserMapGet(ctx, "username in (?)", unique)
|
||||
for username, user := range users {
|
||||
m[username] = user.Nickname
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// FillUpdateByNicknames fills the UpdateByNickname field for each element in items
|
||||
// by looking up the UpdateBy username. Supports both []T and []*T slices.
|
||||
func FillUpdateByNicknames[T any](ctx *ctx.Context, items []T) {
|
||||
if len(items) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
elemType := reflect.TypeOf(items).Elem()
|
||||
isPtr := elemType.Kind() == reflect.Ptr
|
||||
if isPtr {
|
||||
elemType = elemType.Elem()
|
||||
}
|
||||
|
||||
updateByField, ok1 := elemType.FieldByName("UpdateBy")
|
||||
nicknameField, ok2 := elemType.FieldByName("UpdateByNickname")
|
||||
if !ok1 || !ok2 {
|
||||
return
|
||||
}
|
||||
|
||||
names := make([]string, 0, len(items))
|
||||
for i := range items {
|
||||
v := reflect.ValueOf(&items[i]).Elem()
|
||||
if isPtr {
|
||||
if v.IsNil() {
|
||||
continue
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
names = append(names, v.FieldByIndex(updateByField.Index).String())
|
||||
}
|
||||
|
||||
nm := UserNicknameMap(ctx, names)
|
||||
|
||||
for i := range items {
|
||||
v := reflect.ValueOf(&items[i]).Elem()
|
||||
if isPtr {
|
||||
if v.IsNil() {
|
||||
continue
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
updateBy := v.FieldByIndex(updateByField.Index).String()
|
||||
v.FieldByIndex(nicknameField.Index).SetString(nm[updateBy])
|
||||
}
|
||||
}
|
||||
|
||||
func UserGetByUsername(ctx *ctx.Context, username string) (*User, error) {
|
||||
return UserGet(ctx, "username=?", username)
|
||||
}
|
||||
|
||||
@@ -12,16 +12,17 @@ import (
|
||||
)
|
||||
|
||||
type UserGroup struct {
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Note string `json:"note"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UserIds []int64 `json:"-" gorm:"-"`
|
||||
Users []User `json:"users" gorm:"-"`
|
||||
BusiGroups []*BusiGroup `json:"busi_groups" gorm:"-"`
|
||||
Id int64 `json:"id" gorm:"primaryKey"`
|
||||
Name string `json:"name"`
|
||||
Note string `json:"note"`
|
||||
CreateAt int64 `json:"create_at"`
|
||||
CreateBy string `json:"create_by"`
|
||||
UpdateAt int64 `json:"update_at"`
|
||||
UpdateBy string `json:"update_by"`
|
||||
UpdateByNickname string `json:"update_by_nickname" gorm:"-"`
|
||||
UserIds []int64 `json:"-" gorm:"-"`
|
||||
Users []User `json:"users" gorm:"-"`
|
||||
BusiGroups []*BusiGroup `json:"busi_groups" gorm:"-"`
|
||||
}
|
||||
|
||||
func (ug *UserGroup) TableName() string {
|
||||
|
||||
@@ -33,13 +33,11 @@ type ConnectionTarget struct {
|
||||
Index int `json:"index"` // 目标节点的输入端口索引
|
||||
}
|
||||
|
||||
// EnvVariable 环境变量
|
||||
type EnvVariable struct {
|
||||
// InputVariable 输入参数
|
||||
type InputVariable struct {
|
||||
Key string `json:"key"` // 变量名
|
||||
Value string `json:"value"` // 默认值
|
||||
Description string `json:"description,omitempty"` // 描述
|
||||
Secret bool `json:"secret,omitempty"` // 是否敏感(日志脱敏)
|
||||
Required bool `json:"required,omitempty"` // 是否必填
|
||||
}
|
||||
|
||||
// NodeOutput 节点执行输出
|
||||
@@ -104,8 +102,8 @@ type WorkflowTriggerContext struct {
|
||||
// 请求ID(API/Cron 触发使用)
|
||||
RequestID string `json:"request_id"`
|
||||
|
||||
// 环境变量覆盖
|
||||
EnvOverrides map[string]string `json:"env_overrides"`
|
||||
// 输入参数覆盖
|
||||
InputsOverrides map[string]string `json:"inputs_overrides"`
|
||||
|
||||
// 流式输出(API 调用时动态指定)
|
||||
Stream bool `json:"stream"`
|
||||
@@ -118,7 +116,7 @@ type WorkflowTriggerContext struct {
|
||||
|
||||
type WorkflowContext struct {
|
||||
Event *AlertCurEvent `json:"event"` // 当前事件
|
||||
Env map[string]string `json:"env"` // 环境变量/配置(静态,来自 Pipeline 配置)
|
||||
Inputs map[string]string `json:"inputs"` // 前置输入参数(静态,用户配置)
|
||||
Vars map[string]interface{} `json:"vars"` // 节点间传递的数据(动态,运行时产生)
|
||||
Metadata map[string]string `json:"metadata"` // 执行元数据(request_id、start_time 等)
|
||||
Output map[string]interface{} `json:"output,omitempty"` // 输出结果(非告警场景使用)
|
||||
@@ -128,19 +126,6 @@ type WorkflowContext struct {
|
||||
StreamChan chan *StreamChunk `json:"-"` // 流式数据通道(不序列化)
|
||||
}
|
||||
|
||||
// SanitizedEnv 返回脱敏后的环境变量(用于日志和存储)
|
||||
func (ctx *WorkflowContext) SanitizedEnv(secretKeys map[string]bool) map[string]string {
|
||||
sanitized := make(map[string]string)
|
||||
for k, v := range ctx.Env {
|
||||
if secretKeys[k] {
|
||||
sanitized[k] = "******"
|
||||
} else {
|
||||
sanitized[k] = v
|
||||
}
|
||||
}
|
||||
return sanitized
|
||||
}
|
||||
|
||||
// StreamChunk 类型常量
|
||||
const (
|
||||
StreamTypeThinking = "thinking" // AI 思考过程(ReAct Thought)
|
||||
|
||||
348
pkg/feishu/feishu.go
Normal file
348
pkg/feishu/feishu.go
Normal file
@@ -0,0 +1,348 @@
|
||||
package feishu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ccfos/nightingale/v6/storage"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/toolkits/pkg/logger"
|
||||
|
||||
lark "github.com/larksuite/oapi-sdk-go/v3"
|
||||
larkcore "github.com/larksuite/oapi-sdk-go/v3/core"
|
||||
larkauthen "github.com/larksuite/oapi-sdk-go/v3/service/authen/v1"
|
||||
larkcontact "github.com/larksuite/oapi-sdk-go/v3/service/contact/v3"
|
||||
)
|
||||
|
||||
const defaultAuthURL = "https://accounts.feishu.cn/open-apis/authen/v1/authorize"
|
||||
const SsoTypeName = "feishu"
|
||||
|
||||
type SsoClient struct {
|
||||
Enable bool
|
||||
FeiShuConfig *Config `json:"-"`
|
||||
Ctx context.Context
|
||||
client *lark.Client
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Enable bool `json:"enable"`
|
||||
AuthURL string `json:"auth_url"`
|
||||
DisplayName string `json:"display_name"`
|
||||
AppID string `json:"app_id"`
|
||||
AppSecret string `json:"app_secret"`
|
||||
RedirectURL string `json:"redirect_url"`
|
||||
UsernameField string `json:"username_field"` // name, email, phone
|
||||
FeiShuEndpoint string `json:"feishu_endpoint"` // 飞书API端点,默认为 open.feishu.cn
|
||||
Proxy string `json:"proxy"`
|
||||
CoverAttributes bool `json:"cover_attributes"`
|
||||
DefaultRoles []string `json:"default_roles"`
|
||||
DefaultUserGroups []int64 `json:"default_user_groups"`
|
||||
}
|
||||
|
||||
type CallbackOutput struct {
|
||||
Redirect string `json:"redirect"`
|
||||
Msg string `json:"msg"`
|
||||
AccessToken string `json:"accessToken"`
|
||||
Username string `json:"Username"`
|
||||
Nickname string `json:"Nickname"`
|
||||
Phone string `yaml:"Phone"`
|
||||
Email string `yaml:"Email"`
|
||||
}
|
||||
|
||||
func wrapStateKey(key string) string {
|
||||
return "n9e_feishu_oauth_" + key
|
||||
}
|
||||
|
||||
// createClient 创建飞书SDK客户端(v3版本)
|
||||
func (c *Config) createClient() (*lark.Client, error) {
|
||||
opts := []lark.ClientOptionFunc{
|
||||
lark.WithLogLevel(larkcore.LogLevelInfo),
|
||||
lark.WithEnableTokenCache(true), // 启用token缓存
|
||||
}
|
||||
|
||||
if c.FeiShuEndpoint != "" {
|
||||
lark.FeishuBaseUrl = c.FeiShuEndpoint
|
||||
}
|
||||
|
||||
// 创建客户端(v3版本)
|
||||
client := lark.NewClient(
|
||||
c.AppID,
|
||||
c.AppSecret,
|
||||
opts...,
|
||||
)
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func New(cf Config) *SsoClient {
|
||||
var s = &SsoClient{}
|
||||
if !cf.Enable {
|
||||
return s
|
||||
}
|
||||
s.Reload(cf)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SsoClient) AuthCodeURL(state string) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
feishuAuthURL := defaultAuthURL
|
||||
if s.FeiShuConfig.AuthURL != "" {
|
||||
feishuAuthURL = s.FeiShuConfig.AuthURL
|
||||
}
|
||||
buf.WriteString(feishuAuthURL)
|
||||
v := url.Values{
|
||||
"app_id": {s.FeiShuConfig.AppID},
|
||||
"state": {state},
|
||||
}
|
||||
v.Set("redirect_uri", s.FeiShuConfig.RedirectURL)
|
||||
|
||||
if s.FeiShuConfig.RedirectURL == "" {
|
||||
return "", errors.New("FeiShu OAuth RedirectURL is empty")
|
||||
}
|
||||
|
||||
if strings.Contains(feishuAuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// GetUserToken 通过授权码获取用户access token和user_id(使用SDK v3)
|
||||
func (s *SsoClient) GetUserToken(code string) (string, string, error) {
|
||||
if s.client == nil {
|
||||
return "", "", errors.New("feishu client is not initialized")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// 使用SDK v3的authen服务获取access token
|
||||
req := larkauthen.NewCreateAccessTokenReqBuilder().
|
||||
Body(larkauthen.NewCreateAccessTokenReqBodyBuilder().
|
||||
GrantType("authorization_code").
|
||||
Code(code).
|
||||
Build()).
|
||||
Build()
|
||||
|
||||
resp, err := s.client.Authen.AccessToken.Create(ctx, req)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("feishu get access token error: %w", err)
|
||||
}
|
||||
|
||||
// 检查响应
|
||||
if !resp.Success() {
|
||||
return "", "", fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
|
||||
}
|
||||
|
||||
if resp.Data == nil {
|
||||
return "", "", errors.New("feishu api returned empty data")
|
||||
}
|
||||
|
||||
userID := ""
|
||||
if resp.Data.UserId != nil {
|
||||
userID = *resp.Data.UserId
|
||||
}
|
||||
if userID == "" {
|
||||
return "", "", errors.New("feishu api returned empty user_id")
|
||||
}
|
||||
|
||||
accessToken := ""
|
||||
if resp.Data.AccessToken != nil {
|
||||
accessToken = *resp.Data.AccessToken
|
||||
}
|
||||
if accessToken == "" {
|
||||
return "", "", errors.New("feishu api returned empty access_token")
|
||||
}
|
||||
|
||||
return accessToken, userID, nil
|
||||
}
|
||||
|
||||
// GetUserInfo 通过user_id获取用户详细信息(使用SDK v3)
|
||||
// 注意:SDK内部会自动管理token,所以不需要传入accessToken
|
||||
func (s *SsoClient) GetUserInfo(userID string) (*larkcontact.GetUserRespData, error) {
|
||||
if s.client == nil {
|
||||
return nil, errors.New("feishu client is not initialized")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// 使用SDK v3的contact服务获取用户详情
|
||||
req := larkcontact.NewGetUserReqBuilder().
|
||||
UserId(userID).
|
||||
UserIdType(larkcontact.UserIdTypeUserId).
|
||||
Build()
|
||||
|
||||
resp, err := s.client.Contact.User.Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("feishu get user detail error: %w", err)
|
||||
}
|
||||
|
||||
// 检查响应
|
||||
if !resp.Success() {
|
||||
return nil, fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
|
||||
}
|
||||
|
||||
if resp.Data == nil || resp.Data.User == nil {
|
||||
return nil, errors.New("feishu api returned empty user data")
|
||||
}
|
||||
|
||||
return resp.Data, nil
|
||||
}
|
||||
|
||||
func (s *SsoClient) Reload(feishuConfig Config) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.Enable = feishuConfig.Enable
|
||||
s.FeiShuConfig = &feishuConfig
|
||||
|
||||
// 重新创建客户端
|
||||
if feishuConfig.Enable && feishuConfig.AppID != "" && feishuConfig.AppSecret != "" {
|
||||
client, err := feishuConfig.createClient()
|
||||
if err != nil {
|
||||
logger.Errorf("create feishu client error: %v", err)
|
||||
} else {
|
||||
s.client = client
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SsoClient) GetDisplayName() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if !s.Enable {
|
||||
return ""
|
||||
}
|
||||
|
||||
return s.FeiShuConfig.DisplayName
|
||||
}
|
||||
|
||||
func (s *SsoClient) Authorize(redis storage.Redis, redirect string) (string, error) {
|
||||
state := uuid.New().String()
|
||||
ctx := context.Background()
|
||||
|
||||
err := redis.Set(ctx, wrapStateKey(state), redirect, time.Duration(300*time.Second)).Err()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.AuthCodeURL(state)
|
||||
}
|
||||
|
||||
func (s *SsoClient) Callback(redis storage.Redis, ctx context.Context, code, state string) (*CallbackOutput, error) {
|
||||
// 通过code获取access token和user_id
|
||||
accessToken, userID, err := s.GetUserToken(code)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("feishu GetUserToken error: %s", err)
|
||||
}
|
||||
|
||||
// 获取用户详细信息
|
||||
userData, err := s.GetUserInfo(userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("feishu GetUserInfo error: %s", err)
|
||||
}
|
||||
|
||||
// 获取redirect URL
|
||||
redirect := ""
|
||||
if redis != nil {
|
||||
redirect, err = fetchRedirect(redis, ctx, state)
|
||||
if err != nil {
|
||||
logger.Errorf("get redirect err:%v code:%s state:%s", err, code, state)
|
||||
}
|
||||
}
|
||||
if redirect == "" {
|
||||
redirect = "/"
|
||||
}
|
||||
|
||||
err = deleteRedirect(redis, ctx, state)
|
||||
if err != nil {
|
||||
logger.Errorf("delete redirect err:%v code:%s state:%s", err, code, state)
|
||||
}
|
||||
|
||||
var callbackOutput CallbackOutput
|
||||
if userData == nil || userData.User == nil {
|
||||
return nil, fmt.Errorf("feishu GetUserInfo failed, user data is nil")
|
||||
}
|
||||
|
||||
user := userData.User
|
||||
logger.Debugf("feishu get user info userID %s result %+v", userID, user)
|
||||
|
||||
// 提取用户信息
|
||||
username := ""
|
||||
if user.UserId != nil {
|
||||
username = *user.UserId
|
||||
}
|
||||
if username == "" {
|
||||
return nil, errors.New("feishu user_id is empty")
|
||||
}
|
||||
|
||||
nickname := ""
|
||||
if user.Name != nil {
|
||||
nickname = *user.Name
|
||||
}
|
||||
|
||||
phone := ""
|
||||
if user.Mobile != nil {
|
||||
phone = *user.Mobile
|
||||
}
|
||||
|
||||
email := ""
|
||||
if user.Email != nil {
|
||||
email = *user.Email
|
||||
}
|
||||
|
||||
if email == "" {
|
||||
if user.EnterpriseEmail != nil {
|
||||
email = *user.EnterpriseEmail
|
||||
}
|
||||
}
|
||||
|
||||
callbackOutput.Redirect = redirect
|
||||
callbackOutput.AccessToken = accessToken
|
||||
|
||||
// 根据UsernameField配置确定username
|
||||
switch s.FeiShuConfig.UsernameField {
|
||||
case "userid":
|
||||
callbackOutput.Username = username
|
||||
case "name":
|
||||
if nickname == "" {
|
||||
return nil, errors.New("feishu user name is empty")
|
||||
}
|
||||
callbackOutput.Username = nickname
|
||||
case "phone":
|
||||
if phone == "" {
|
||||
return nil, errors.New("feishu user phone is empty")
|
||||
}
|
||||
callbackOutput.Username = phone
|
||||
default:
|
||||
if email == "" {
|
||||
return nil, errors.New("feishu user email is empty")
|
||||
}
|
||||
callbackOutput.Username = email
|
||||
}
|
||||
|
||||
callbackOutput.Nickname = nickname
|
||||
callbackOutput.Email = email
|
||||
callbackOutput.Phone = phone
|
||||
|
||||
return &callbackOutput, nil
|
||||
}
|
||||
|
||||
func fetchRedirect(redis storage.Redis, ctx context.Context, state string) (string, error) {
|
||||
return redis.Get(ctx, wrapStateKey(state)).Result()
|
||||
}
|
||||
|
||||
func deleteRedirect(redis storage.Redis, ctx context.Context, state string) error {
|
||||
return redis.Del(ctx, wrapStateKey(state)).Err()
|
||||
}
|
||||
@@ -184,7 +184,8 @@ func (s *Set) updateDBTargetTs(ident string, now int64) {
|
||||
|
||||
func (s *Set) updateTargetsUpdateTs(lst []string, now int64, redis storage.Redis) error {
|
||||
if redis == nil {
|
||||
return fmt.Errorf("redis is nil")
|
||||
logger.Debugf("update_ts: redis is nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
newMap := make(map[string]interface{}, len(lst))
|
||||
@@ -247,7 +248,7 @@ func (s *Set) writeTargetTsInRedis(ctx context.Context, redis storage.Redis, con
|
||||
|
||||
for i := 0; i < retryCount; i++ {
|
||||
start := time.Now()
|
||||
err := storage.MSet(ctx, redis, content)
|
||||
err := storage.MSet(ctx, redis, content, 24*time.Hour)
|
||||
duration := time.Since(start).Seconds()
|
||||
|
||||
logger.Debugf("update_ts: write target ts in redis, keys: %v, retryCount: %d, retryInterval: %v, error: %v", keys, retryCount, retryInterval, err)
|
||||
|
||||
@@ -35,6 +35,12 @@ type Pushgw struct {
|
||||
WriterOpt WriterGlobalOpt
|
||||
Writers []WriterOptions
|
||||
KafkaWriters []KafkaWriterOptions
|
||||
|
||||
// 预处理的字段,用于快速匹配只有 __name__ 的 DropSample 规则
|
||||
// key: metric name, value: struct{}
|
||||
DropMetricNames map[string]struct{}
|
||||
// 包含多个标签的复杂 DropSample 规则
|
||||
DropSampleComplex []map[string]string
|
||||
}
|
||||
|
||||
type WriterGlobalOpt struct {
|
||||
|
||||
@@ -109,21 +109,30 @@ func (rt *Router) debugSample(remoteAddr string, v *prompb.TimeSeries) {
|
||||
}
|
||||
|
||||
func (rt *Router) DropSample(v *prompb.TimeSeries) bool {
|
||||
filters := rt.Pushgw.DropSample
|
||||
if len(filters) == 0 {
|
||||
// 快速路径:检查仅 __name__ 的过滤器 O(1)
|
||||
if len(rt.dropByNameOnly) > 0 {
|
||||
for i := 0; i < len(v.Labels); i++ {
|
||||
if v.Labels[i].Name == "__name__" {
|
||||
if _, ok := rt.dropByNameOnly[v.Labels[i].Value]; ok {
|
||||
return true
|
||||
}
|
||||
break // __name__ 只会出现一次,找到后直接跳出
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 慢速路径:处理复杂的多条件过滤器
|
||||
if len(rt.dropComplex) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
labelMap := make(map[string]string)
|
||||
// 只有复杂过滤器存在时才创建 labelMap
|
||||
labelMap := make(map[string]string, len(v.Labels))
|
||||
for i := 0; i < len(v.Labels); i++ {
|
||||
labelMap[v.Labels[i].Name] = v.Labels[i].Value
|
||||
}
|
||||
|
||||
for _, filter := range filters {
|
||||
if len(filter) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, filter := range rt.dropComplex {
|
||||
if matchSample(filter, labelMap) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/toolkits/pkg/logger"
|
||||
|
||||
"github.com/ccfos/nightingale/v6/alert/aconf"
|
||||
"github.com/ccfos/nightingale/v6/center/metas"
|
||||
@@ -33,6 +34,10 @@ type Router struct {
|
||||
Ctx *ctx.Context
|
||||
HandleTS HandleTSFunc
|
||||
HeartbeatApi string
|
||||
|
||||
// 预编译的 DropSample 过滤器
|
||||
dropByNameOnly map[string]struct{} // 仅 __name__ 条件的快速匹配
|
||||
dropComplex []map[string]string // 多条件的复杂匹配
|
||||
}
|
||||
|
||||
func stat() gin.HandlerFunc {
|
||||
@@ -51,7 +56,7 @@ func stat() gin.HandlerFunc {
|
||||
func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *memsto.TargetCacheType, bg *memsto.BusiGroupCacheType,
|
||||
idents *idents.Set, metas *metas.Set,
|
||||
writers *writer.WritersType, ctx *ctx.Context) *Router {
|
||||
return &Router{
|
||||
rt := &Router{
|
||||
HTTP: httpConfig,
|
||||
Pushgw: pushgw,
|
||||
Aconf: aconf,
|
||||
@@ -63,6 +68,38 @@ func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *me
|
||||
MetaSet: metas,
|
||||
HandleTS: func(pt *prompb.TimeSeries) *prompb.TimeSeries { return pt },
|
||||
}
|
||||
|
||||
// 预编译 DropSample 过滤器
|
||||
rt.initDropSampleFilters()
|
||||
|
||||
return rt
|
||||
}
|
||||
|
||||
// initDropSampleFilters 预编译 DropSample 过滤器,将单条件 __name__ 过滤器
|
||||
// 放入 map 实现 O(1) 查找,多条件过滤器保留原有逻辑
|
||||
func (rt *Router) initDropSampleFilters() {
|
||||
rt.dropByNameOnly = make(map[string]struct{})
|
||||
rt.dropComplex = make([]map[string]string, 0)
|
||||
|
||||
for _, filter := range rt.Pushgw.DropSample {
|
||||
if len(filter) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// 如果只有一个条件且是 __name__,放入快速匹配 map
|
||||
if len(filter) == 1 {
|
||||
if name, ok := filter["__name__"]; ok {
|
||||
rt.dropByNameOnly[name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// 其他情况放入复杂匹配列表
|
||||
rt.dropComplex = append(rt.dropComplex, filter)
|
||||
}
|
||||
|
||||
logger.Infof("DropSample filters initialized: %d name-only, %d complex",
|
||||
len(rt.dropByNameOnly), len(rt.dropComplex))
|
||||
}
|
||||
|
||||
func (rt *Router) Config(r *gin.Engine) {
|
||||
|
||||
@@ -163,10 +163,10 @@ func MGet(ctx context.Context, r Redis, keys []string) [][]byte {
|
||||
return vals
|
||||
}
|
||||
|
||||
func MSet(ctx context.Context, r Redis, m map[string]interface{}) error {
|
||||
func MSet(ctx context.Context, r Redis, m map[string]interface{}, expiration time.Duration) error {
|
||||
pipe := r.Pipeline()
|
||||
for k, v := range m {
|
||||
pipe.Set(ctx, k, v, 0)
|
||||
pipe.Set(ctx, k, v, expiration)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestMiniRedisMGet(t *testing.T) {
|
||||
mp["key2"] = "value2"
|
||||
mp["key3"] = "value3"
|
||||
|
||||
err = MSet(context.Background(), rdb, mp)
|
||||
err = MSet(context.Background(), rdb, mp, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set miniredis value: %v", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user