Compare commits

...

23 Commits

Author SHA1 Message Date
ning
507959d4cb refactor: add eval duration 2025-05-27 11:27:33 +08:00
Yening Qin
67275b0cf1 refactor: es add offset query and add es-index-pattern ops 2025-04-27 18:59:32 +08:00
Yening Qin
464e7d8709 change notify channel auth 2025-04-23 15:49:46 +08:00
Yening Qin
8974dc896c refactor: update es event index (#2603) 2025-04-14 14:57:19 +08:00
Yening Qin
dee9aac87e refactor: change query tpl func (#2598) 2025-04-11 17:10:37 +08:00
Yening Qin
fb7487ccf0 refactor: add logic SingleLogin
Co-authored-by: Ulric Qin <ulric.qin@gmail.com>
2025-04-02 22:05:11 +08:00
Yening Qin
e683650cd6 refactor: change get ids 2025-04-02 19:54:09 +08:00
Yening Qin
3e1d3e1191 refactor: targets get api (#2584) 2025-04-02 19:10:23 +08:00
Yening Qin
4900b042a8 fix: auto migrate add cross_cluster_enabled of es_index_pattern (#2570) 2025-03-27 16:14:57 +08:00
Yening Qin
4fd41a8d21 refactor: es query add IgnoreUnavailable(true) (#2567) 2025-03-26 11:12:27 +08:00
Yening Qin
f8066399ab refactor: es query support concurrency 2025-03-24 11:16:33 +08:00
ning
d580f0e799 es query add log 2025-03-21 18:19:46 +08:00
ning
f24e4c61f4 Merge branch 'release-14' of github.com:ccfos/nightingale into release-14 2025-03-21 16:58:27 +08:00
ning
2d186fb5fd fix: getSendTarget panic 2025-03-21 16:58:14 +08:00
Yening Qin
610f9a7095 Merge branch 'main' into release-14 2025-03-21 16:28:16 +08:00
ning
1cc12c3755 Merge branch 'release-14' of github.com:ccfos/nightingale into release-14 2025-03-21 16:24:03 +08:00
ning
ec7cec50c2 change record crontab 2025-03-21 16:23:19 +08:00
Yening Qin
19c5bdae39 add metrics (#2558) 2025-03-21 15:53:37 +08:00
ning
027e623330 change crontab 2025-03-21 12:15:00 +08:00
ning
02de723bfd code refactor 2025-03-18 22:38:37 +08:00
ning
feee48daab code refactor 2025-03-18 22:37:26 +08:00
ning
37d168876a update migrate bg 2025-03-18 22:33:18 +08:00
ning
b51f67659c change prom query retry count 2025-03-17 20:02:14 +08:00
35 changed files with 482 additions and 152 deletions

View File

@@ -17,6 +17,7 @@ type Stats struct {
CounterRuleEval *prometheus.CounterVec
CounterQueryDataErrorTotal *prometheus.CounterVec
CounterQueryDataTotal *prometheus.CounterVec
CounterVarFillingQuery *prometheus.CounterVec
CounterRecordEval *prometheus.CounterVec
CounterRecordEvalErrorTotal *prometheus.CounterVec
CounterMuteTotal *prometheus.CounterVec
@@ -24,6 +25,7 @@ type Stats struct {
CounterHeartbeatErrorTotal *prometheus.CounterVec
CounterSubEventTotal *prometheus.CounterVec
GaugeQuerySeriesCount *prometheus.GaugeVec
GaugeRuleEvalDuration *prometheus.GaugeVec
GaugeNotifyRecordQueueSize prometheus.Gauge
}
@@ -54,7 +56,7 @@ func NewSyncStats() *Stats {
Subsystem: subsystem,
Name: "query_data_total",
Help: "Number of rule eval query data.",
}, []string{"datasource"})
}, []string{"datasource", "rule_id"})
CounterRecordEval := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
@@ -135,6 +137,20 @@ func NewSyncStats() *Stats {
Help: "The size of notify record queue.",
})
GaugeRuleEvalDuration := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "rule_eval_duration_ms",
Help: "Duration of rule eval in milliseconds.",
}, []string{"rule_id", "datasource_id"})
CounterVarFillingQuery := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "var_filling_query_total",
Help: "Number of var filling query.",
}, []string{"rule_id", "datasource_id", "ref", "typ"})
prometheus.MustRegister(
CounterAlertsTotal,
GaugeAlertQueueSize,
@@ -150,7 +166,9 @@ func NewSyncStats() *Stats {
CounterHeartbeatErrorTotal,
CounterSubEventTotal,
GaugeQuerySeriesCount,
GaugeRuleEvalDuration,
GaugeNotifyRecordQueueSize,
CounterVarFillingQuery,
)
return &Stats{
@@ -168,6 +186,8 @@ func NewSyncStats() *Stats {
CounterHeartbeatErrorTotal: CounterHeartbeatErrorTotal,
CounterSubEventTotal: CounterSubEventTotal,
GaugeQuerySeriesCount: GaugeQuerySeriesCount,
GaugeRuleEvalDuration: GaugeRuleEvalDuration,
GaugeNotifyRecordQueueSize: GaugeNotifyRecordQueueSize,
CounterVarFillingQuery: CounterVarFillingQuery,
}
}

View File

@@ -732,10 +732,10 @@ func getSendTarget(customParams map[string]string, sendtos []string) string {
values := make([]string, 0)
for _, value := range customParams {
if len(value) <= 4 {
runes := []rune(value)
if len(runes) <= 4 {
values = append(values, value)
} else {
runes := []rune(value)
maskedValue := string(runes[:len(runes)-4]) + "****"
values = append(values, maskedValue)
}

View File

@@ -13,6 +13,7 @@ import (
"sync"
"time"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/dscache"
@@ -99,7 +100,7 @@ func NewAlertRuleWorker(rule *models.AlertRule, datasourceId int64, Processor *p
rule.CronPattern = fmt.Sprintf("@every %ds", interval)
}
arw.Scheduler = cron.New(cron.WithSeconds())
arw.Scheduler = cron.New(cron.WithSeconds(), cron.WithChain(cron.SkipIfStillRunning(cron.DefaultLogger)))
entryID, err := arw.Scheduler.AddFunc(rule.CronPattern, func() {
arw.Eval()
@@ -233,6 +234,10 @@ func (arw *AlertRuleWorker) Stop() {
func (arw *AlertRuleWorker) GetPromAnomalyPoint(ruleConfig string) ([]models.AnomalyPoint, error) {
var lst []models.AnomalyPoint
var severity int
start := time.Now()
defer func() {
arw.Processor.Stats.GaugeRuleEvalDuration.WithLabelValues(fmt.Sprintf("%v", arw.Rule.Id), fmt.Sprintf("%v", arw.Processor.DatasourceId())).Set(float64(time.Since(start).Milliseconds()))
}()
var rule *models.PromRuleConfig
if err := json.Unmarshal([]byte(ruleConfig), &rule); err != nil {
@@ -281,9 +286,21 @@ func (arw *AlertRuleWorker) GetPromAnomalyPoint(ruleConfig string) ([]models.Ano
if hasLabelLossAggregator(query) || notExactMatch(query) {
// 若有聚合函数或非精确匹配则需要先填充变量然后查询,这个方式效率较低
anomalyPoints = arw.VarFillingBeforeQuery(query, readerClient)
arw.Processor.Stats.CounterVarFillingQuery.WithLabelValues(
fmt.Sprintf("%v", arw.Rule.Id),
fmt.Sprintf("%v", arw.Processor.DatasourceId()),
fmt.Sprintf("%v", i),
"BeforeQuery",
).Inc()
} else {
// 先查询再过滤变量,效率较高,但无法处理有聚合函数的情况
anomalyPoints = arw.VarFillingAfterQuery(query, readerClient)
arw.Processor.Stats.CounterVarFillingQuery.WithLabelValues(
fmt.Sprintf("%v", arw.Rule.Id),
fmt.Sprintf("%v", arw.Processor.DatasourceId()),
fmt.Sprintf("%v", i),
"AfterQuery",
).Inc()
}
lst = append(lst, anomalyPoints...)
} else {
@@ -302,7 +319,7 @@ func (arw *AlertRuleWorker) GetPromAnomalyPoint(ruleConfig string) ([]models.Ano
}
var warnings promsdk.Warnings
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId)).Inc()
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId), fmt.Sprintf("%d", arw.Rule.Id)).Inc()
value, warnings, err := readerClient.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%s promql:%s, error:%v", arw.Key(), promql, err)
@@ -413,6 +430,7 @@ func (arw *AlertRuleWorker) VarFillingAfterQuery(query models.PromQuery, readerC
realQuery = strings.Replace(realQuery, fmt.Sprintf("$%s", key), val, -1)
}
// 得到满足值变量的所有结果
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId), fmt.Sprintf("%d", arw.Rule.Id)).Inc()
value, _, err := readerClient.Query(context.Background(), curQuery, time.Now())
if err != nil {
logger.Errorf("rule_eval:%s, promql:%s, error:%v", arw.Key(), curQuery, err)
@@ -574,7 +592,7 @@ func (arw *AlertRuleWorker) getParamPermutation(paramVal map[string]models.Param
logger.Errorf("query:%s fail to unmarshalling into string slice, error:%v", paramQuery.Query, err)
}
if len(query) == 0 {
paramsKeyAllLabel, err := getParamKeyAllLabel(varToLabel[paramKey], originPromql, readerClient)
paramsKeyAllLabel, err := getParamKeyAllLabel(varToLabel[paramKey], originPromql, readerClient, arw.DatasourceId, arw.Rule.Id, arw.Processor.Stats)
if err != nil {
logger.Errorf("rule_eval:%s, fail to getParamKeyAllLabel, error:%v query:%s", arw.Key(), err, paramQuery.Query)
}
@@ -605,7 +623,7 @@ func (arw *AlertRuleWorker) getParamPermutation(paramVal map[string]models.Param
return res, nil
}
func getParamKeyAllLabel(paramKey string, promql string, client promsdk.API) ([]string, error) {
func getParamKeyAllLabel(paramKey string, promql string, client promsdk.API, dsId int64, rid int64, stats *astats.Stats) ([]string, error) {
labels, metricName, err := promql2.GetLabelsAndMetricNameWithReplace(promql, "$")
if err != nil {
return nil, fmt.Errorf("promql:%s, get labels error:%v", promql, err)
@@ -619,6 +637,7 @@ func getParamKeyAllLabel(paramKey string, promql string, client promsdk.API) ([]
}
pr := metricName + "{" + strings.Join(labelstrs, ",") + "}"
stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", dsId), fmt.Sprintf("%d", rid)).Inc()
value, _, err := client.Query(context.Background(), pr, time.Now())
if err != nil {
return nil, fmt.Errorf("promql: %s query error: %v", pr, err)
@@ -734,6 +753,10 @@ func combine(paramKeys []string, paraMap map[string][]string, index int, current
func (arw *AlertRuleWorker) GetHostAnomalyPoint(ruleConfig string) ([]models.AnomalyPoint, error) {
var lst []models.AnomalyPoint
var severity int
start := time.Now()
defer func() {
arw.Processor.Stats.GaugeRuleEvalDuration.WithLabelValues(fmt.Sprintf("%v", arw.Rule.Id), fmt.Sprintf("%v", arw.Processor.DatasourceId())).Set(float64(time.Since(start).Milliseconds()))
}()
var rule *models.HostRuleConfig
if err := json.Unmarshal([]byte(ruleConfig), &rule); err != nil {
@@ -1276,6 +1299,7 @@ func (arw *AlertRuleWorker) VarFillingBeforeQuery(query models.PromQuery, reader
<-semaphore
wg.Done()
}()
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId), fmt.Sprintf("%d", arw.Rule.Id)).Inc()
value, _, err := readerClient.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%s, promql:%s, error:%v", arw.Key(), promql, err)
@@ -1411,6 +1435,10 @@ func fillVar(curRealQuery string, paramKey string, val string) string {
func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64) ([]models.AnomalyPoint, []models.AnomalyPoint) {
// 获取查询和规则判断条件
start := time.Now()
defer func() {
arw.Processor.Stats.GaugeRuleEvalDuration.WithLabelValues(fmt.Sprintf("%v", arw.Rule.Id), fmt.Sprintf("%v", arw.Processor.DatasourceId())).Set(float64(time.Since(start).Milliseconds()))
}()
points := []models.AnomalyPoint{}
recoverPoints := []models.AnomalyPoint{}
ruleConfig := strings.TrimSpace(rule.RuleConfig)
@@ -1456,7 +1484,7 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
ctx := context.WithValue(context.Background(), "delay", int64(rule.Delay))
series, err := plug.QueryData(ctx, query)
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId)).Inc()
arw.Processor.Stats.CounterQueryDataTotal.WithLabelValues(fmt.Sprintf("%d", arw.DatasourceId), fmt.Sprintf("%d", rule.Id)).Inc()
if err != nil {
logger.Warningf("rule_eval rid:%d query data error: %v", rule.Id, err)
arw.Processor.Stats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", arw.Processor.DatasourceId()), GET_CLIENT, arw.Processor.BusiGroupCache.GetNameByBusiGroupId(arw.Rule.GroupId), fmt.Sprintf("%v", arw.Rule.Id)).Inc()
@@ -1579,6 +1607,11 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
}
}
queries := ruleQuery.Queries
if sample.Query != "" {
queries = []interface{}{sample.Query}
}
point := models.AnomalyPoint{
Key: sample.MetricName(),
Labels: sample.Metric,
@@ -1587,7 +1620,7 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
Values: values,
Severity: trigger.Severity,
Triggered: isTriggered,
Query: fmt.Sprintf("query:%+v trigger:%+v", ruleQuery.Queries, trigger),
Query: fmt.Sprintf("query:%+v trigger:%+v", queries, trigger),
RecoverConfig: trigger.RecoverConfig,
ValuesUnit: valuesUnitMap,
}

View File

@@ -39,7 +39,7 @@ func NewRecordRuleContext(rule *models.RecordingRule, datasourceId int64, promCl
rule.CronPattern = fmt.Sprintf("@every %ds", rule.PromEvalInterval)
}
rrc.scheduler = cron.New(cron.WithSeconds())
rrc.scheduler = cron.New(cron.WithSeconds(), cron.WithChain(cron.SkipIfStillRunning(cron.DefaultLogger)))
_, err := rrc.scheduler.AddFunc(rule.CronPattern, func() {
rrc.Eval()
})

View File

@@ -142,6 +142,13 @@ ops:
cname: View Logs
- name: "/log/index-patterns"
cname: View Index Patterns
- name: "/log/index-patterns/add"
cname: Add Index Pattern
- name: "/log/index-patterns/put"
cname: Modify Index Pattern
- name: "/log/index-patterns/del"
cname: Delete Index Pattern
- name: alert
cname: Alert Rules

View File

@@ -481,9 +481,9 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/es-index-pattern", rt.auth(), rt.esIndexPatternGet)
pages.GET("/es-index-pattern-list", rt.auth(), rt.esIndexPatternGetList)
pages.POST("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternAdd)
pages.PUT("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternPut)
pages.DELETE("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternDel)
pages.POST("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/add"), rt.esIndexPatternAdd)
pages.PUT("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/put"), rt.esIndexPatternPut)
pages.DELETE("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/del"), rt.esIndexPatternDel)
pages.GET("/embedded-dashboards", rt.auth(), rt.user(), rt.perm("/embedded-dashboards"), rt.embeddedDashboardsGet)
pages.PUT("/embedded-dashboards", rt.auth(), rt.user(), rt.perm("/embedded-dashboards/put"), rt.embeddedDashboardsPut)

View File

@@ -12,6 +12,7 @@ import (
"gopkg.in/yaml.v2"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
@@ -20,7 +21,6 @@ import (
"github.com/prometheus/prometheus/prompb"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/str"
)
type AlertRuleModifyHookFunc func(ar *models.AlertRule)
@@ -52,7 +52,7 @@ func getAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
}
func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -5,10 +5,10 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
// Return all, front-end search and paging
@@ -31,7 +31,7 @@ func (rt *Router) alertSubscribeGets(c *gin.Context) {
}
func (rt *Router) alertSubscribeGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -6,11 +6,11 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/str"
)
type boardForm struct {
@@ -96,7 +96,7 @@ func (rt *Router) boardGet(c *gin.Context) {
// 根据 bids 参数,获取多个 board
func (rt *Router) boardGetsByBids(c *gin.Context) {
bids := str.IdsInt64(ginx.QueryStr(c, "bids", ""), ",")
bids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "bids", ""), ",")
boards, err := models.BoardGetsByBids(rt.Ctx, bids)
ginx.Dangerous(err)
ginx.NewRender(c).Data(boards, err)
@@ -265,7 +265,7 @@ func (rt *Router) publicBoardGets(c *gin.Context) {
}
func (rt *Router) boardGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
query := ginx.QueryStr(c, "query", "")
if len(gids) > 0 {

View File

@@ -4,11 +4,11 @@ import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type busiGroupForm struct {
@@ -131,7 +131,7 @@ func (rt *Router) busiGroupGetsByService(c *gin.Context) {
// 这个接口只有在活跃告警页面才调用获取各个BG的活跃告警数量
func (rt *Router) busiGroupAlertingsGets(c *gin.Context) {
ids := ginx.QueryStr(c, "ids", "")
ret, err := models.AlertNumbers(rt.Ctx, str.IdsInt64(ids))
ret, err := models.AlertNumbers(rt.Ctx, strx.IdsInt64ForAPI(ids))
ginx.NewRender(c).Data(ret, err)
}
@@ -142,7 +142,7 @@ func (rt *Router) busiGroupGet(c *gin.Context) {
}
func (rt *Router) busiGroupsGetTags(c *gin.Context) {
bgids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
bgids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
targetIdents, err := models.TargetIndentsGetByBgids(rt.Ctx, bgids)
ginx.Dangerous(err)
tags, err := models.TargetGetTags(rt.Ctx, targetIdents, true, "busigroup")

View File

@@ -4,15 +4,15 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
func (rt *Router) chartShareGets(c *gin.Context) {
ids := ginx.QueryStr(c, "ids", "")
lst, err := models.ChartShareGetsByIds(rt.Ctx, str.IdsInt64(ids, ","))
lst, err := models.ChartShareGetsByIds(rt.Ctx, strx.IdsInt64ForAPI(ids, ","))
ginx.NewRender(c).Data(lst, err)
}

View File

@@ -152,6 +152,13 @@ func (rt *Router) refreshPost(c *gin.Context) {
return
}
// 看这个 token 是否还存在 redis 中
val, err := rt.fetchAuth(c.Request.Context(), refreshUuid)
if err != nil || val == "" {
ginx.NewRender(c, http.StatusUnauthorized).Message("refresh token expired")
return
}
userIdentity, ok := claims["user_identity"].(string)
if !ok {
// Theoretically impossible

View File

@@ -10,10 +10,10 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/slice"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
func (rt *Router) messageTemplatesAdd(c *gin.Context) {
@@ -137,7 +137,7 @@ func (rt *Router) messageTemplatesGet(c *gin.Context) {
if tmp := ginx.QueryStr(c, "notify_channel_idents", ""); tmp != "" {
notifyChannelIdents = strings.Split(tmp, ",")
}
notifyChannelIds := str.IdsInt64(ginx.QueryStr(c, "notify_channel_ids", ""))
notifyChannelIds := strx.IdsInt64ForAPI(ginx.QueryStr(c, "notify_channel_ids", ""))
if len(notifyChannelIds) > 0 {
ginx.Dangerous(models.DB(rt.Ctx).Model(models.NotifyChannelConfig{}).
Where("id in (?)", notifyChannelIds).Pluck("ident", &notifyChannelIdents).Error)

View File

@@ -7,10 +7,10 @@ import (
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
// Return all, front-end search and paging
@@ -22,7 +22,7 @@ func (rt *Router) alertMuteGetsByBG(c *gin.Context) {
}
func (rt *Router) alertMuteGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -335,6 +335,12 @@ func (rt *Router) extractTokenMetadata(r *http.Request) (*AccessDetails, error)
return nil, errors.New("failed to parse access_uuid from jwt")
}
// accessUuid 在 redis 里存在才放行
val, err := rt.fetchAuth(r.Context(), accessUuid)
if err != nil || val == "" {
return nil, errors.New("unauthorized")
}
return &AccessDetails{
AccessUuid: accessUuid,
UserIdentity: claims["user_identity"].(string),
@@ -355,18 +361,43 @@ func (rt *Router) extractToken(r *http.Request) string {
}
func (rt *Router) createAuth(ctx context.Context, userIdentity string, td *TokenDetails) error {
username := strings.Split(userIdentity, "-")[1]
// 如果只能有一个账号登录,那么就删除之前的 token
if rt.HTTP.JWTAuth.SingleLogin {
delKeys, err := rt.Redis.SMembers(ctx, rt.wrapJwtKey(username)).Result()
if err != nil {
return err
}
if len(delKeys) > 0 {
errDel := rt.Redis.Del(ctx, delKeys...).Err()
if errDel != nil {
return errDel
}
}
if errDel := rt.Redis.Del(ctx, rt.wrapJwtKey(username)).Err(); errDel != nil {
return errDel
}
}
at := time.Unix(td.AtExpires, 0)
rte := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := rt.Redis.Set(ctx, rt.wrapJwtKey(td.AccessUuid), userIdentity, at.Sub(now)).Err()
if errAccess != nil {
return errAccess
if err := rt.Redis.Set(ctx, rt.wrapJwtKey(td.AccessUuid), userIdentity, at.Sub(now)).Err(); err != nil {
return err
}
errRefresh := rt.Redis.Set(ctx, rt.wrapJwtKey(td.RefreshUuid), userIdentity, rte.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
if err := rt.Redis.Set(ctx, rt.wrapJwtKey(td.RefreshUuid), userIdentity, rte.Sub(now)).Err(); err != nil {
return err
}
if rt.HTTP.JWTAuth.SingleLogin {
if err := rt.Redis.SAdd(ctx, rt.wrapJwtKey(username), rt.wrapJwtKey(td.AccessUuid), rt.wrapJwtKey(td.RefreshUuid)).Err(); err != nil {
return err
}
}
return nil
@@ -413,9 +444,10 @@ type TokenDetails struct {
}
func (rt *Router) createTokens(signingKey, userIdentity string) (*TokenDetails, error) {
username := strings.Split(userIdentity, "-")[1]
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * time.Duration(rt.HTTP.JWTAuth.AccessExpired)).Unix()
td.AccessUuid = uuid.NewString()
td.AccessUuid = username + "/" + uuid.NewString()
td.RtExpires = time.Now().Add(time.Minute * time.Duration(rt.HTTP.JWTAuth.RefreshExpired)).Unix()
td.RefreshUuid = td.AccessUuid + "++" + userIdentity

View File

@@ -17,9 +17,6 @@ import (
func (rt *Router) notifyChannelsAdd(c *gin.Context) {
me := c.MustGet("user").(*models.User)
if !me.IsAdmin() {
ginx.Bomb(http.StatusForbidden, "no permission")
}
var lst []*models.NotifyChannelConfig
ginx.BindJSON(c, &lst)
@@ -55,11 +52,6 @@ func (rt *Router) notifyChannelsAdd(c *gin.Context) {
}
func (rt *Router) notifyChannelsDel(c *gin.Context) {
me := c.MustGet("user").(*models.User)
if !me.IsAdmin() {
ginx.Bomb(http.StatusForbidden, "no permission")
}
var f idsForm
ginx.BindJSON(c, &f)
f.Verify()
@@ -79,9 +71,6 @@ func (rt *Router) notifyChannelsDel(c *gin.Context) {
func (rt *Router) notifyChannelPut(c *gin.Context) {
me := c.MustGet("user").(*models.User)
if !me.IsAdmin() {
ginx.Bomb(http.StatusForbidden, "no permission")
}
var f models.NotifyChannelConfig
ginx.BindJSON(c, &f)

View File

@@ -3,6 +3,7 @@ package router
import (
"fmt"
"sort"
"sync"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/models"
@@ -38,71 +39,116 @@ type LogResp struct {
List []interface{} `json:"list"`
}
func (rt *Router) QueryLogBatch(c *gin.Context) {
var f QueryFrom
ginx.BindJSON(c, &f)
func QueryLogBatchConcurrently(anonymousAccess bool, ctx *gin.Context, f QueryFrom) (LogResp, error) {
var resp LogResp
var errMsg string
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
for _, q := range f.Queries {
if !rt.Center.AnonymousAccess.PromQuerier && !CheckDsPerm(c, q.Did, q.DsCate, q) {
ginx.Bomb(200, "no permission")
if !anonymousAccess && !CheckDsPerm(ctx, q.Did, q.DsCate, q) {
return LogResp{}, fmt.Errorf("no permission")
}
plug, exists := dscache.DsCache.Get(q.DsCate, q.Did)
if !exists {
logger.Warningf("cluster:%d not exists query:%+v", q.Did, q)
ginx.Bomb(200, "cluster not exists")
return LogResp{}, fmt.Errorf("cluster not exists")
}
data, total, err := plug.QueryLog(c.Request.Context(), q.Query)
if err != nil {
errMsg += fmt.Sprintf("query data error: %v query:%v\n ", err, q)
logger.Warningf("query data error: %v query:%v", err, q)
continue
}
wg.Add(1)
go func(query Query) {
defer wg.Done()
m := make(map[string]interface{})
m["ref"] = q.Ref
m["ds_id"] = q.Did
m["ds_cate"] = q.DsCate
m["data"] = data
resp.List = append(resp.List, m)
resp.Total += total
data, total, err := plug.QueryLog(ctx.Request.Context(), query.Query)
mu.Lock()
defer mu.Unlock()
if err != nil {
errMsg := fmt.Sprintf("query data error: %v query:%v\n ", err, query)
logger.Warningf(errMsg)
errs = append(errs, err)
return
}
m := make(map[string]interface{})
m["ref"] = query.Ref
m["ds_id"] = query.Did
m["ds_cate"] = query.DsCate
m["data"] = data
resp.List = append(resp.List, m)
resp.Total += total
}(q)
}
if errMsg != "" || len(resp.List) == 0 {
ginx.Bomb(200, errMsg)
wg.Wait()
if len(errs) > 0 {
return LogResp{}, errs[0]
}
if len(resp.List) == 0 {
return LogResp{}, fmt.Errorf("no data")
}
return resp, nil
}
func (rt *Router) QueryLogBatch(c *gin.Context) {
var f QueryFrom
ginx.BindJSON(c, &f)
resp, err := QueryLogBatchConcurrently(rt.Center.AnonymousAccess.PromQuerier, c, f)
if err != nil {
ginx.Bomb(200, "err:%v", err)
}
ginx.NewRender(c).Data(resp, nil)
}
func (rt *Router) QueryData(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.QueryParam) ([]models.DataResp, error) {
var resp []models.DataResp
var err error
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
for _, q := range f.Querys {
if !rt.Center.AnonymousAccess.PromQuerier && !CheckDsPerm(c, f.DatasourceId, f.Cate, q) {
ginx.Bomb(403, "no permission")
if !anonymousAccess && !CheckDsPerm(ctx, f.DatasourceId, f.Cate, q) {
return nil, fmt.Errorf("no permission")
}
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logger.Warningf("cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
return nil, fmt.Errorf("cluster not exists")
}
var datas []models.DataResp
datas, err = plug.QueryData(c.Request.Context(), q)
if err != nil {
logger.Warningf("query data error: req:%+v err:%v", q, err)
ginx.Bomb(200, "err:%v", err)
}
logger.Debugf("query data: req:%+v resp:%+v", q, datas)
resp = append(resp, datas...)
wg.Add(1)
go func(query interface{}) {
defer wg.Done()
datas, err := plug.QueryData(ctx.Request.Context(), query)
if err != nil {
logger.Warningf("query data error: req:%+v err:%v", query, err)
mu.Lock()
errs = append(errs, err)
mu.Unlock()
return
}
logger.Debugf("query data: req:%+v resp:%+v", query, datas)
mu.Lock()
resp = append(resp, datas...)
mu.Unlock()
}(q)
}
wg.Wait()
if len(errs) > 0 {
return nil, errs[0]
}
// 面向API的统一处理
// 按照 .Metric 排序
// 确保仪表盘中相同图例的曲线颜色相同
@@ -115,41 +161,80 @@ func (rt *Router) QueryData(c *gin.Context) {
})
}
ginx.NewRender(c).Data(resp, err)
return resp, nil
}
func (rt *Router) QueryData(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
resp, err := QueryDataConcurrently(rt.Center.AnonymousAccess.PromQuerier, c, f)
if err != nil {
ginx.Bomb(200, "err:%v", err)
}
ginx.NewRender(c).Data(resp, nil)
}
// QueryLogConcurrently 并发查询日志
func QueryLogConcurrently(anonymousAccess bool, ctx *gin.Context, f models.QueryParam) (LogResp, error) {
var resp LogResp
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
for _, q := range f.Querys {
if !anonymousAccess && !CheckDsPerm(ctx, f.DatasourceId, f.Cate, q) {
return LogResp{}, fmt.Errorf("no permission")
}
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logger.Warningf("cluster:%d not exists query:%+v", f.DatasourceId, f)
return LogResp{}, fmt.Errorf("cluster not exists")
}
wg.Add(1)
go func(query interface{}) {
defer wg.Done()
data, total, err := plug.QueryLog(ctx.Request.Context(), query)
logger.Debugf("query log: req:%+v resp:%+v", query, data)
if err != nil {
errMsg := fmt.Sprintf("query data error: %v query:%v\n ", err, query)
logger.Warningf(errMsg)
mu.Lock()
errs = append(errs, err)
mu.Unlock()
return
}
mu.Lock()
resp.List = append(resp.List, data...)
resp.Total += total
mu.Unlock()
}(q)
}
wg.Wait()
if len(errs) > 0 {
return LogResp{}, errs[0]
}
if len(resp.List) == 0 {
return LogResp{}, fmt.Errorf("no data")
}
return resp, nil
}
func (rt *Router) QueryLogV2(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
var resp LogResp
var errMsg string
for _, q := range f.Querys {
if !rt.Center.AnonymousAccess.PromQuerier && !CheckDsPerm(c, f.DatasourceId, f.Cate, q) {
ginx.Bomb(200, "no permission")
}
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logger.Warningf("cluster:%d not exists query:%+v", f.DatasourceId, f)
ginx.Bomb(200, "cluster not exists")
}
data, total, err := plug.QueryLog(c.Request.Context(), q)
if err != nil {
errMsg += fmt.Sprintf("query data error: %v query:%v\n ", err, q)
logger.Warningf("query data error: %v query:%v", err, q)
continue
}
resp.List = append(resp.List, data...)
resp.Total += total
}
if errMsg != "" || len(resp.List) == 0 {
ginx.Bomb(200, errMsg)
}
ginx.NewRender(c).Data(resp, nil)
resp, err := QueryLogConcurrently(rt.Center.AnonymousAccess.PromQuerier, c, f)
ginx.NewRender(c).Data(resp, err)
}
func (rt *Router) QueryLog(c *gin.Context) {

View File

@@ -6,10 +6,10 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
func (rt *Router) recordingRuleGets(c *gin.Context) {
@@ -19,7 +19,7 @@ func (rt *Router) recordingRuleGets(c *gin.Context) {
}
func (rt *Router) recordingRuleGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -10,13 +10,13 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/storage"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/model"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type TargetQuery struct {
@@ -44,7 +44,7 @@ func (rt *Router) targetGetsByHostFilter(c *gin.Context) {
}
func (rt *Router) targetGets(c *gin.Context) {
bgids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
bgids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
query := ginx.QueryStr(c, "query", "")
limit := ginx.QueryInt(c, "limit", 30)
downtime := ginx.QueryInt64(c, "downtime", 0)
@@ -56,7 +56,11 @@ func (rt *Router) targetGets(c *gin.Context) {
hosts := queryStrListField(c, "hosts", ",", " ", "\n")
var err error
if len(bgids) == 0 {
if len(bgids) > 0 {
for _, gid := range bgids {
rt.bgroCheck(c, gid)
}
} else {
user := c.MustGet("user").(*models.User)
if !user.IsAdmin() {
// 如果是非 admin 用户,全部对象的情况,找到用户有权限的业务组

View File

@@ -5,11 +5,11 @@ import (
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/str"
)
func (rt *Router) taskGets(c *gin.Context) {
@@ -40,7 +40,7 @@ func (rt *Router) taskGets(c *gin.Context) {
}
func (rt *Router) taskGetsByGids(c *gin.Context) {
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
@@ -35,7 +36,7 @@ func (rt *Router) taskTplGetsByGids(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
limit := ginx.QueryInt(c, "limit", 20)
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)

View File

@@ -6,11 +6,11 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/flashduty"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
func (rt *Router) checkBusiGroupPerm(c *gin.Context) {
@@ -32,7 +32,7 @@ func (rt *Router) userGroupGets(c *gin.Context) {
}
func (rt *Router) userGroupGetsByService(c *gin.Context) {
ids := str.IdsInt64(ginx.QueryStr(c, "ids", ""))
ids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "ids", ""))
if len(ids) == 0 {
lst, err := models.UserGroupGetAll(rt.Ctx)

View File

@@ -24,6 +24,7 @@ type Query struct {
Index string `json:"index" mapstructure:"index"`
IndexPatternId int64 `json:"index_pattern" mapstructure:"index_pattern"`
Filter string `json:"filter" mapstructure:"filter"`
Offset int64 `json:"offset" mapstructure:"offset"`
MetricAggr MetricAggr `json:"value" mapstructure:"value"`
GroupBy []GroupBy `json:"group_by" mapstructure:"group_by"`
DateField string `json:"date_field" mapstructure:"date_field"`
@@ -347,12 +348,14 @@ func QueryData(ctx context.Context, queryParam interface{}, cliTimeout int64, ve
if ip, ok := GetEsIndexPatternCacheType().Get(param.IndexPatternId); ok {
param.DateField = ip.TimeField
indexArr = []string{ip.Name}
param.Index = ip.Name
} else {
return nil, fmt.Errorf("index pattern:%d not found", param.IndexPatternId)
}
} else {
indexArr = strings.Split(param.Index, ",")
}
q := elastic.NewRangeQuery(param.DateField)
now := time.Now().Unix()
var start, end int64
@@ -370,6 +373,11 @@ func QueryData(ctx context.Context, queryParam interface{}, cliTimeout int64, ve
start = start - delay
}
if param.Offset > 0 {
end = end - param.Offset
start = start - param.Offset
}
q.Gte(time.Unix(start, 0).UnixMilli())
q.Lte(time.Unix(end, 0).UnixMilli())
q.Format("epoch_millis")
@@ -481,7 +489,7 @@ func QueryData(ctx context.Context, queryParam interface{}, cliTimeout int64, ve
source, _ := queryString.Source()
b, _ := json.Marshal(source)
logger.Debugf("query_data q:%+v tsAggr:%+v query_string:%s", param, tsAggr, string(b))
logger.Debugf("query_data q:%+v indexArr:%+v tsAggr:%+v query_string:%s", param, indexArr, tsAggr, string(b))
searchSource := elastic.NewSearchSource().
Query(queryString).
@@ -528,7 +536,16 @@ func QueryData(ctx context.Context, queryParam interface{}, cliTimeout int64, ve
GetBuckts("", keys, bucketsData, metrics, "", 0, param.MetricAggr.Func)
return TransferData(fmt.Sprintf("%s_%s", field, param.MetricAggr.Func), param.Ref, metrics.Data), nil
items, err := TransferData(fmt.Sprintf("%s_%s", field, param.MetricAggr.Func), param.Ref, metrics.Data), nil
var m map[string]interface{}
bs, _ := json.Marshal(queryParam)
json.Unmarshal(bs, &m)
m["index"] = param.Index
for i := range items {
items[i].Query = fmt.Sprintf("%+v", m)
}
return items, nil
}
func HitFilter(typ string) bool {

View File

@@ -187,6 +187,7 @@ func (e *Elasticsearch) QueryData(ctx context.Context, queryParam interface{}) (
search := func(ctx context.Context, indices []string, source interface{}, timeout int, maxShard int) (*elastic.SearchResult, error) {
return e.Client.Search().
Index(indices...).
IgnoreUnavailable(true).
Source(source).
Timeout(fmt.Sprintf("%ds", timeout)).
MaxConcurrentShardRequests(maxShard).
@@ -204,7 +205,7 @@ func (e *Elasticsearch) QueryIndices() ([]string, error) {
func (e *Elasticsearch) QueryFields(indexs []string) ([]string, error) {
var fields []string
result, err := elastic.NewGetFieldMappingService(e.Client).Index(indexs...).Do(context.Background())
result, err := elastic.NewGetFieldMappingService(e.Client).Index(indexs...).IgnoreUnavailable(true).Do(context.Background())
if err != nil {
return fields, err
}
@@ -264,6 +265,7 @@ func (e *Elasticsearch) QueryLog(ctx context.Context, queryParam interface{}) ([
return e.Client.Search().
Index(indices...).
IgnoreUnavailable(true).
MaxConcurrentShardRequests(maxShard).
Source(source).
Timeout(fmt.Sprintf("%ds", timeout)).
@@ -276,6 +278,7 @@ func (e *Elasticsearch) QueryLog(ctx context.Context, queryParam interface{}) ([
func (e *Elasticsearch) QueryFieldValue(indexs []string, field string, query string) ([]string, error) {
var values []string
search := e.Client.Search().
IgnoreUnavailable(true).
Index(indexs...).
Size(0)
@@ -359,6 +362,7 @@ func (e *Elasticsearch) QueryMapData(ctx context.Context, query interface{}) ([]
return e.Client.Search().
Index(indices...).
IgnoreUnavailable(true).
Source(source).
Timeout(fmt.Sprintf("%ds", timeout)).
Do(ctx)

File diff suppressed because one or more lines are too long

View File

@@ -121,7 +121,6 @@ func (ncc *NotifyChannelCacheType) SyncNotifyChannels() {
err := ncc.syncNotifyChannels()
if err != nil {
fmt.Println("failed to sync notify channels:", err)
exit(1)
}
go ncc.loopSyncNotifyChannels()

View File

@@ -193,13 +193,13 @@ func (e *AlertCurEvent) ParseRule(field string) error {
}
templateFuncMapCopy := tplx.NewTemplateFuncMap()
templateFuncMapCopy["query"] = func(promql string, param ...int64) []AnomalyPoint {
templateFuncMapCopy["query"] = func(promql string, param ...int64) tplx.QueryResult {
datasourceId := e.DatasourceId
if len(param) > 0 {
datasourceId = param[0]
}
value := tplx.Query(datasourceId, promql)
return ConvertAnomalyPoints(value)
return tplx.ConvertToQueryResult(value)
}
text := strings.Join(append(defs, f), "")

View File

@@ -67,7 +67,7 @@ func MigrateTables(db *gorm.DB) error {
&TaskRecord{}, &ChartShare{}, &Target{}, &Configs{}, &Datasource{}, &NotifyTpl{},
&Board{}, &BoardBusigroup{}, &Users{}, &SsoConfig{}, &models.BuiltinMetric{},
&models.MetricFilter{}, &models.NotificaitonRecord{}, &models.TargetBusiGroup{},
&models.UserToken{}, &models.DashAnnotation{}, MessageTemplate{}, NotifyRule{}, NotifyChannelConfig{}}
&models.UserToken{}, &models.DashAnnotation{}, MessageTemplate{}, NotifyRule{}, NotifyChannelConfig{}, &EsIndexPatternMigrate{}}
if isPostgres(db) {
dts = append(dts, &models.PostgresBuiltinComponent{})

View File

@@ -2,8 +2,8 @@ package models
import (
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
const (
@@ -65,7 +65,7 @@ func (n *NotificaitonRecord) GetGroupIds(ctx *ctx.Context) (groupIds []int64) {
if sub, err := AlertSubscribeGet(ctx, "id=?", n.SubId); err != nil {
logger.Errorf("AlertSubscribeGet failed, err: %v", err)
} else {
groupIds = str.IdsInt64(sub.UserGroupIds, " ")
groupIds = strx.IdsInt64ForAPI(sub.UserGroupIds, " ")
}
return
}
@@ -73,7 +73,7 @@ func (n *NotificaitonRecord) GetGroupIds(ctx *ctx.Context) (groupIds []int64) {
if event, err := AlertHisEventGetById(ctx, n.EventId); err != nil {
logger.Errorf("AlertHisEventGetById failed, err: %v", err)
} else {
groupIds = str.IdsInt64(event.NotifyGroups, " ")
groupIds = strx.IdsInt64ForAPI(event.NotifyGroups, " ")
}
return
}

View File

@@ -69,6 +69,7 @@ type JWTAuth struct {
AccessExpired int64
RefreshExpired int64
RedisKeyPrefix string
SingleLogin bool
}
type TokenAuth struct {

View File

@@ -74,6 +74,9 @@ var I18N = `{
"Log Analysis": "日志分析",
"View Logs": "查看日志",
"View Index Patterns": "查看索引模式",
"Add Index Pattern": "添加索引模式",
"Modify Index Pattern": "修改索引模式",
"Delete Index Pattern": "删除索引模式",
"Alert Rules": "告警规则",
"View Alert Rules": "查看告警规则",
"Add Alert Rule": "添加告警规则",
@@ -227,6 +230,9 @@ var I18N = `{
"Log Analysis": "日志分析",
"View Logs": "查看日志",
"View Index Patterns": "查看索引模式",
"Add Index Pattern": "添加索引模式",
"Modify Index Pattern": "修改索引模式",
"Delete Index Pattern": "删除索引模式",
"Alert Rules": "告警规则",
"View Alert Rules": "查看告警规则",
"Add Alert Rule": "添加告警规则",
@@ -391,6 +397,9 @@ var I18N = `{
"Log Analysis": "日誌分析",
"View Logs": "查看日誌",
"View Index Patterns": "查看索引模式",
"Add Index Pattern": "添加索引模式",
"Modify Index Pattern": "修改索引模式",
"Delete Index Pattern": "刪除索引模式",
"Alert Rules": "告警規則",
"View Alert Rules": "查看告警規則",
"Add Alert Rule": "添加告警規則",
@@ -545,6 +554,9 @@ var I18N = `{
"Log Analysis": "ログ分析",
"View Logs": "ログの表示",
"View Index Patterns": "インデックスパターンの表示",
"Add Index Pattern": "インデックスパターンの追加",
"Modify Index Pattern": "インデックスパターンの修正",
"Delete Index Pattern": "インデックスパターンの削除",
"Alert Rules": "アラートルール",
"View Alert Rules": "アラートルールの表示",
"Add Alert Rule": "アラートルールの追加",

View File

@@ -397,7 +397,7 @@ type Metadata struct {
}
// queryResult contains result data for a query.
type queryResult struct {
type QueryResult struct {
Type model.ValueType `json:"resultType"`
Result interface{} `json:"result"`
@@ -510,7 +510,7 @@ func (r *RecordingRule) UnmarshalJSON(b []byte) error {
return nil
}
func (qr *queryResult) UnmarshalJSON(b []byte) error {
func (qr *QueryResult) UnmarshalJSON(b []byte) error {
v := struct {
Type model.ValueType `json:"resultType"`
Result json.RawMessage `json:"result"`
@@ -701,6 +701,7 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.
var warnings Warnings
var value model.Value
var statusCode int
for i := 0; i < 1; i++ {
value, warnings, statusCode, err = h.query(ctx, query, ts)
if err == nil {
@@ -731,7 +732,7 @@ func (h *httpAPI) query(ctx context.Context, query string, ts time.Time) (model.
return nil, warnings, 0, err
}
var qres queryResult
var qres QueryResult
return model.Value(qres.v), warnings, resp.StatusCode, json.Unmarshal(body, &qres)
}
@@ -749,7 +750,7 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.
return nil, warnings, err
}
var qres queryResult
var qres QueryResult
return qres.v, warnings, json.Unmarshal(body, &qres)
}

View File

@@ -1,8 +0,0 @@
package str
import "regexp"
func IsValidURL(url string) bool {
re := regexp.MustCompile(`^https?://[^\s/$.?#].[^\s]*$`)
return re.MatchString(url)
}

49
pkg/strx/verify.go Normal file
View File

@@ -0,0 +1,49 @@
package strx
import (
"net/http"
"regexp"
"strconv"
"strings"
"github.com/toolkits/pkg/errorx"
)
func IsValidURL(url string) bool {
re := regexp.MustCompile(`^https?://[^\s/$.?#].[^\s]*$`)
return re.MatchString(url)
}
func IdsInt64ForAPI(ids string, sep ...string) []int64 {
if ids == "" {
return []int64{}
}
s := ","
if len(sep) > 0 {
s = sep[0]
}
var arr []string
if s == " " {
arr = strings.Fields(ids)
} else {
arr = strings.Split(ids, s)
}
count := len(arr)
ret := make([]int64, 0, count)
for i := 0; i < count; i++ {
if arr[i] != "" {
id, err := strconv.ParseInt(arr[i], 10, 64)
if err != nil {
errorx.Bomb(http.StatusBadRequest, "cannot convert %s to int64", arr[i])
}
ret = append(ret, id)
}
}
return ret
}

View File

@@ -37,10 +37,10 @@ func RegisterQueryFunc(f QueryFunc) {
queryFunc = f
}
type queryResult []*sample
type QueryResult []*sample
type queryResultByLabelSorter struct {
results queryResult
results QueryResult
by string
}
@@ -426,7 +426,7 @@ func FormatDecimal(s string, n int) string {
return fmt.Sprintf(format, num)
}
func First(v queryResult) (*sample, error) {
func First(v QueryResult) (*sample, error) {
if len(v) > 0 {
return v[0], nil
}
@@ -472,7 +472,7 @@ func TableLink(expr string) string {
return strutil.TableLinkForExpression(expr)
}
func SortByLabel(label string, v queryResult) queryResult {
func SortByLabel(label string, v QueryResult) QueryResult {
sorter := queryResultByLabelSorter{v[:], label}
sort.Stable(sorter)
return v
@@ -585,6 +585,83 @@ func Query(datasourceID int64, promql string) model.Value {
return nil
}
// ConvertToQueryResult 将model.Value转换为queryResult
func ConvertToQueryResult(value model.Value) QueryResult {
if value == nil {
return nil
}
var result QueryResult
switch value.Type() {
case model.ValVector:
items, ok := value.(model.Vector)
if !ok {
return nil
}
for _, item := range items {
if math.IsNaN(float64(item.Value)) {
continue
}
labels := make(map[string]string)
for k, v := range item.Metric {
labels[string(k)] = string(v)
}
result = append(result, &sample{
Labels: labels,
Value: float64(item.Value),
})
}
case model.ValMatrix:
items, ok := value.(model.Matrix)
if !ok {
return nil
}
for _, item := range items {
if len(item.Values) == 0 {
continue
}
last := item.Values[len(item.Values)-1]
if math.IsNaN(float64(last.Value)) {
continue
}
labels := make(map[string]string)
for k, v := range item.Metric {
labels[string(k)] = string(v)
}
result = append(result, &sample{
Labels: labels,
Value: float64(last.Value),
})
}
case model.ValScalar:
item, ok := value.(*model.Scalar)
if !ok {
return nil
}
if math.IsNaN(float64(item.Value)) {
return nil
}
result = append(result, &sample{
Labels: map[string]string{},
Value: float64(item.Value),
})
default:
return nil
}
return result
}
func MappingAndJoin(arr interface{}, prefix, suffix, join string) string {
var result []string