Compare commits

...

111 Commits

Author SHA1 Message Date
ning
6bad0fc8c1 code refactor 2025-10-13 12:02:53 +08:00
ning
3a94612792 code refactor 2025-10-13 12:00:29 +08:00
ning
643de04c10 fix sql 2025-10-13 10:36:50 +08:00
Yening Qin
16b3cb1abc feat: support encrypt user phone (#2906)
* feature: add a new configuration option to encrypt user phone numbers in the database (#2902)

Co-authored-by: yuanzaiping_dxm <yuanzaiping@duxiaoman.com>
---------

Co-authored-by: zaipingY <30775871+zaipingy@users.noreply.github.com>
Co-authored-by: yuanzaiping_dxm <yuanzaiping@duxiaoman.com>
2025-10-11 14:37:14 +08:00
ning
32995c1b2d fix: event pipeline insert by PostgreSQL 2025-09-23 16:40:27 +08:00
ning
b4fa36fa0e refactor: update message tpl 2025-09-22 18:18:19 +08:00
ning
f412f82eb8 fix: event value is inf 2025-09-19 19:32:40 +08:00
ning
9da1cd506b fix: datasource sync 2025-09-19 17:41:57 +08:00
ning
99ea838863 refactor: optimize query target 2025-09-19 15:00:07 +08:00
ning
7feb003b72 refactor: change feishucard body 2025-09-19 10:37:58 +08:00
ning
b0a053361f refactor: change log 2025-09-17 20:28:22 +08:00
ning
959f75394b refactor: alert rule api 2025-09-17 12:10:43 +08:00
ning
03e95973b2 refactor: message tpl api 2025-09-16 20:13:25 +08:00
ning
e890705167 refactor: event notify 2025-09-16 19:24:05 +08:00
ning
6716f1bdf1 refactor: update event notify 2025-09-15 20:19:18 +08:00
ning
739b9406a4 Merge branch 'main' of github.com:ccfos/nightingale 2025-09-15 17:37:38 +08:00
ning
77f280d1cc fix: event delete api 2025-09-15 17:37:25 +08:00
pioneerlfn
04fe1b9dd6 for slice, when marshal, return [] instead null (#2878) 2025-09-15 17:28:57 +08:00
ning
552758e0e1 refactor: pushgw writer support async 2025-09-14 15:00:40 +08:00
ning
68bc474c1b refactor: update message tpl 2025-09-11 20:38:17 +08:00
ning
f692035deb refactor: change datasource log 2025-09-11 16:14:59 +08:00
ning
eb441353c3 refactor: message tpl 2025-09-11 15:33:02 +08:00
ning
b606b22ae6 fix: opensearch alert 2025-09-10 22:24:50 +08:00
ning
1de0428860 docs: update init.sql 2025-09-09 18:51:23 +08:00
ning
3d0c288c9f refactor: event api 2025-09-09 16:36:40 +08:00
ning
343814a802 refactor: notify rule update 2025-09-09 11:05:58 +08:00
ning
12e2761467 fix: dingtalk message tpl 2025-09-09 10:18:08 +08:00
Yening Qin
0edd5ee772 refactor: event notify (#2869) 2025-09-08 15:04:33 +08:00
ning
5e430cedc7 fix: build, router_target miss idents 2025-09-03 14:17:15 +08:00
ning
a791a9901e refactor: push ts to kafka 2025-09-03 12:17:32 +08:00
totoro
222cdd76f0 refactor : es support search_after (#2859) 2025-09-03 12:12:07 +08:00
arch3754
ed4e3937e0 feat: add target update (#2853) 2025-09-03 12:05:38 +08:00
Yening Qin
60f9e1c48e refactor: dscache sync add datasource process hook (#2792) 2025-09-02 18:12:36 +08:00
Ulric Qin
276dfe7372 update linux dash 2025-09-02 08:57:36 +08:00
Ulric Qin
4a6dacbe30 add host table ng 2025-09-02 08:54:54 +08:00
Ulric Qin
48eebba11a update linux dashboard 2025-09-02 08:52:58 +08:00
ning
eca82e5ec2 change ops update 2025-09-01 14:24:17 +08:00
Yening Qin
21478fcf3d fix: send http notify retry (#2849) 2025-08-30 01:57:32 +08:00
ulricqin
a87c856299 fix: call flashduty to push event (#2848) 2025-08-30 00:06:53 +08:00
ning
ba035a446d refactor: change some log 2025-08-29 16:32:57 +08:00
ning
bf840e6bb2 docs: update dashboard tpl 2025-08-29 10:14:59 +08:00
ning
cd01092aed refactor: update alert rule import api 2025-08-28 19:44:05 +08:00
ning
e202fd50c8 refactor: datasource api 2025-08-28 16:48:04 +08:00
ning
f0e5062485 refactor: optimize edge ident update 2025-08-28 16:24:33 +08:00
ning
861fe96de5 add UpdateDBTargetTimestampDisable 2025-08-27 19:12:56 +08:00
Ulric Qin
5b66ada96d Merge branch 'main' of https://github.com/ccfos/nightingale 2025-08-27 10:06:19 +08:00
Ulric Qin
d5a98debff upgrade ibex 2025-08-27 10:06:11 +08:00
ning
4977052a67 Merge branch 'main' of github.com:ccfos/nightingale 2025-08-22 15:03:17 +08:00
ning
dcc461e587 refactor: push writer support sync 2025-08-22 15:00:49 +08:00
Ulric Qin
f5ce1733bb update minio dashboard 2025-08-21 18:58:47 +08:00
Ulric Qin
436cf25409 Merge branch 'main' of https://github.com/ccfos/nightingale 2025-08-21 10:34:56 +08:00
Ulric Qin
038f68b0b7 add minio dashboard for new version 2025-08-21 10:34:50 +08:00
ning
96ef1895b7 refactor: event_script_notify_result log add stdin 2025-08-20 14:24:28 +08:00
zjxpsetp
eeaa7b46f1 update es dashboard for categraf version bigger than 0.3.102 2025-08-19 23:43:54 +08:00
zjxpsetp
dc525352f1 Merge remote-tracking branch 'origin/main' 2025-08-19 17:36:12 +08:00
zjxpsetp
98a3fe9375 update jmx dashboard in kubernetes 2025-08-19 17:35:55 +08:00
ning
74b0f802ec Merge branch 'main' of github.com:ccfos/nightingale 2025-08-18 11:19:07 +08:00
ning
85bd3148d5 refactor: add update db metric 2025-08-18 11:18:52 +08:00
ning
0931fa9603 fix: target update ts 2025-08-18 11:18:39 +08:00
zjxpsetp
65cdb2da9e 更新 jmx 的仪表盘,新的jmx Exporter 指标和之前有一些差别 2025-08-17 17:23:39 +08:00
ning
9ad6514af6 refactor: ds query api 2025-08-13 11:50:01 +08:00
ning
302c6549e4 refactor: ds query api 2025-08-13 11:12:57 +08:00
ning
a3122270e6 refactor: ds query api 2025-08-13 11:02:00 +08:00
Yening Qin
1245c453bb refactor: send flashduty (#2824) 2025-08-12 20:55:23 +08:00
Ulric Qin
9c5ccf0c8f fix: update update_at when batch-updating-rules 2025-08-06 20:21:57 +08:00
Ulric Qin
cd468af250 refactor batch updating rules 2025-08-06 17:16:02 +08:00
Ulric Qin
2d3449c0ec code refactor for batch updating 2025-08-06 15:45:36 +08:00
ning
e15bdbce92 refactor: optimize import prom alert rule 2025-08-06 11:09:53 +08:00
ning
3890243d42 fix: new mysql db client 2025-08-04 18:40:04 +08:00
ning
37fb4ee867 add case-insensitive search for builtin payload filtering 2025-08-04 16:31:28 +08:00
ning
6db63eafc1 refactor: change import prom rule 2025-08-01 19:00:06 +08:00
ning
1e9cbfc316 fix: event query log 2025-08-01 16:47:14 +08:00
ning
4f95554fe3 refactor: update msg tpl 2025-07-31 18:08:12 +08:00
ning
8eba9aa92f refactor: update msg tpl 2025-07-31 15:31:29 +08:00
ning
6ba74b8e21 fix: pgsql cross database query 2025-07-31 11:51:27 +08:00
ning
8ea4632681 refactor: update duty user sync 2025-07-28 14:36:08 +08:00
ning
f958f27de1 fix: AlertRuleExists 2025-07-27 13:28:46 +08:00
ning
1bdfa3e032 refactor: update TargetDel 2025-07-27 12:46:22 +08:00
ning
143880cd46 Merge branch 'main' of github.com:ccfos/nightingale 2025-07-25 13:21:55 +08:00
ning
38f0b4f1bb refactor: modify add loki api resp 2025-07-25 13:01:27 +08:00
dependabot[bot]
2bccd5be99 build(deps): bump golang.org/x/oauth2 from 0.23.0 to 0.27.0 (#2793)
Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.23.0 to 0.27.0.
- [Commits](https://github.com/golang/oauth2/compare/v0.23.0...v0.27.0)

---
updated-dependencies:
- dependency-name: golang.org/x/oauth2
  dependency-version: 0.27.0
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 21:21:29 +08:00
ning
7b328b3eaa refactor: update prom rule import 2025-07-24 21:17:12 +08:00
Ulric Qin
8bd5b90e94 fix: support old rule format when importing 2025-07-23 09:36:28 +08:00
ning
96629e284f refactor: add email notify record 2025-07-17 11:45:58 +08:00
ning
67d2875690 fix: update record rule datasource ids api 2025-07-15 16:29:30 +08:00
ning
238895a1f8 refactor: init tpl 2025-07-15 15:52:38 +08:00
ning
fb341b645d refactor: sub alert add host filter 2025-07-15 14:46:09 +08:00
Haobo Zhang
2d84fd8cf3 fix: ai summary customize parameter parse from interface (#2788) 2025-07-14 14:54:15 +08:00
ning
2611f87c41 refactor: drop builtin_components idx_ident 2025-07-11 19:12:24 +08:00
ning
a5b7aa7a26 refactor: drop builtin_components idx_ident 2025-07-11 18:57:03 +08:00
ning
0714a0f8f1 refactor: change log level 2025-07-11 16:41:14 +08:00
ning
063cc750e1 refactor: update notify channel api 2025-07-11 12:25:08 +08:00
ning
b2a912d72f refactor: log level 2025-07-11 12:06:18 +08:00
ning
4ba745f442 fix: alert rule batch update notify rule 2025-07-11 11:38:09 +08:00
smx_Morgan
fa7d46ecad fix: compatible user_token table with postgresql (#2785) 2025-07-10 11:03:33 +08:00
pioneerlfn
a5a43df44f refactor: doris search sql (#2778)
* doris:support search sql with macro

* Update doris.go

---------

Co-authored-by: Yening Qin <710leo@gmail.com>
2025-07-09 21:33:17 +08:00
smx_Morgan
fbf1d68b84 fix: update postgresql init sql (#2784) 2025-07-09 20:53:56 +08:00
ulricqin
ca712f62a4 fix execution of notify script (#2769) 2025-07-06 08:40:13 +08:00
ulricqin
84ee14d21e add img (#2767) 2025-07-03 19:48:39 +08:00
ning
c9cf1cfdd2 refactor: change alert rule update api 2025-07-02 20:32:56 +08:00
ning
9d1c01107f refactor: builtin tpl 2025-07-02 20:12:43 +08:00
ning
7ea31b5c6d refactor: builtin tpl 2025-07-02 19:37:25 +08:00
ning
e8e1c67cc8 refactor: event notify filter 2025-07-02 15:44:02 +08:00
ning
8079bcd288 docs: enbale token auth 2025-07-01 18:38:28 +08:00
ning
33b178ce82 refactor: roles api 2025-07-01 15:37:42 +08:00
ning
28c9cd7b43 refactor: change eval for duration check 2025-06-30 15:10:20 +08:00
ning
b771e8a3e8 refactor: change eval for duration check 2025-06-30 12:06:48 +08:00
Yening Qin
4945e98200 refactor: builtin tpl gets api (#2760) 2025-06-27 19:45:28 +08:00
ning
a938ea3e56 docs: update migrate.sql 2025-06-26 18:44:55 +08:00
ning
25c339025b Merge branch 'main' of github.com:ccfos/nightingale 2025-06-25 18:05:46 +08:00
ning
bb0ee35275 refactor: optimize notify rule check api 2025-06-25 18:05:34 +08:00
105 changed files with 18056 additions and 3127 deletions

4
.gitignore vendored
View File

@@ -58,6 +58,10 @@ _test
.idea
.index
.vscode
.issue
.issue/*
.cursor
.claude
.DS_Store
.cache-loader
.payload

View File

@@ -31,7 +31,9 @@
Nightingale is an open-source monitoring project that focuses on alerting. Similar to Grafana, Nightingale also connects with various existing data sources. However, while Grafana emphasizes visualization, Nightingale places greater emphasis on the alerting engine, as well as the processing and distribution of alarms.
The Nightingale project was initially developed and open-sourced by DiDi.inc. On May 11, 2022, it was donated to the Open Source Development Committee of the China Computer Federation (CCF ODC).
> The Nightingale project was initially developed and open-sourced by DiDi.inc. On May 11, 2022, it was donated to the Open Source Development Committee of the China Computer Federation (CCF ODC).
![](https://n9e.github.io/img/global/arch-bg.png)
## 💡 How Nightingale Works
@@ -104,7 +106,7 @@ Then Nightingale is not suitable. It is recommended that you choose on-call prod
## 🤝 Community Co-Building
- ❇️ Please read the [Nightingale Open Source Project and Community Governance Draft](./doc/community-governance.md). We sincerely welcome every user, developer, company, and organization to use Nightingale, actively report bugs, submit feature requests, share best practices, and help build a professional and active open-source community.
- ❤️ Nightingale Contributors
- ❤️ Nightingale Contributors
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img src="https://contrib.rocks/image?repo=ccfos/nightingale" />
</a>

View File

@@ -31,7 +31,9 @@
夜莺监控Nightingale是一款侧重告警的监控类开源项目。类似 Grafana 的数据源集成方式,夜莺也是对接多种既有的数据源,不过 Grafana 侧重在可视化,夜莺是侧重在告警引擎、告警事件的处理和分发。
夜莺监控项目,最初由滴滴开发和开源,并于 2022 年 5 月 11 日捐赠予中国计算机学会开源发展委员会CCF ODC为 CCF ODC 成立后接受捐赠的第一个开源项目。
> 夜莺监控项目,最初由滴滴开发和开源,并于 2022 年 5 月 11 日捐赠予中国计算机学会开源发展委员会CCF ODC为 CCF ODC 成立后接受捐赠的第一个开源项目。
![](https://n9e.github.io/img/global/arch-bg.png)
## 夜莺的工作逻辑

View File

@@ -35,9 +35,9 @@ func MatchGroupsName(groupName string, groupFilter []models.TagFilter) bool {
func matchTag(value string, filter models.TagFilter) bool {
switch filter.Func {
case "==":
return strings.TrimSpace(filter.Value) == strings.TrimSpace(value)
return strings.TrimSpace(fmt.Sprintf("%v", filter.Value)) == strings.TrimSpace(value)
case "!=":
return strings.TrimSpace(filter.Value) != strings.TrimSpace(value)
return strings.TrimSpace(fmt.Sprintf("%v", filter.Value)) != strings.TrimSpace(value)
case "in":
_, has := filter.Vset[value]
return has

View File

@@ -110,10 +110,6 @@ func (e *Consumer) consumeOne(event *models.AlertCurEvent) {
e.persist(event)
if event.IsRecovered && event.NotifyRecovered == 0 {
return
}
e.dispatch.HandleEventNotify(event, false)
}

View File

@@ -24,6 +24,15 @@ import (
"github.com/toolkits/pkg/logger"
)
var ShouldSkipNotify func(*ctx.Context, *models.AlertCurEvent, int64) bool
var SendByNotifyRule func(*ctx.Context, *memsto.UserCacheType, *memsto.UserGroupCacheType, *memsto.NotifyChannelCacheType,
[]*models.AlertCurEvent, int64, *models.NotifyConfig, *models.NotifyChannelConfig, *models.MessageTemplate)
func init() {
ShouldSkipNotify = shouldSkipNotify
SendByNotifyRule = SendNotifyRuleMessage
}
type Dispatch struct {
alertRuleCache *memsto.AlertRuleCacheType
userCache *memsto.UserCacheType
@@ -45,9 +54,8 @@ type Dispatch struct {
tpls map[string]*template.Template
ExtraSenders map[string]sender.Sender
BeforeSenderHook func(*models.AlertCurEvent) bool
ctx *ctx.Context
Astats *astats.Stats
ctx *ctx.Context
Astats *astats.Stats
RwLock sync.RWMutex
}
@@ -56,7 +64,7 @@ type Dispatch struct {
func NewDispatch(alertRuleCache *memsto.AlertRuleCacheType, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType,
alertSubscribeCache *memsto.AlertSubscribeCacheType, targetCache *memsto.TargetCacheType, notifyConfigCache *memsto.NotifyConfigCacheType,
taskTplsCache *memsto.TaskTplCache, notifyRuleCache *memsto.NotifyRuleCacheType, notifyChannelCache *memsto.NotifyChannelCacheType,
messageTemplateCache *memsto.MessageTemplateCacheType, eventProcessorCache *memsto.EventProcessorCacheType, alerting aconf.Alerting, ctx *ctx.Context, astats *astats.Stats) *Dispatch {
messageTemplateCache *memsto.MessageTemplateCacheType, eventProcessorCache *memsto.EventProcessorCacheType, alerting aconf.Alerting, c *ctx.Context, astats *astats.Stats) *Dispatch {
notify := &Dispatch{
alertRuleCache: alertRuleCache,
userCache: userCache,
@@ -77,7 +85,7 @@ func NewDispatch(alertRuleCache *memsto.AlertRuleCacheType, userCache *memsto.Us
ExtraSenders: make(map[string]sender.Sender),
BeforeSenderHook: func(*models.AlertCurEvent) bool { return true },
ctx: ctx,
ctx: c,
Astats: astats,
}
@@ -166,6 +174,8 @@ func (e *Dispatch) HandleEventWithNotifyRule(eventOrigin *models.AlertCurEvent)
if !notifyRule.Enable {
continue
}
eventCopy.NotifyRuleId = notifyRuleId
eventCopy.NotifyRuleName = notifyRule.Name
var processors []models.Processor
for _, pipelineConfig := range notifyRule.PipelineConfigs {
@@ -188,25 +198,31 @@ func (e *Dispatch) HandleEventWithNotifyRule(eventOrigin *models.AlertCurEvent)
}
for _, processor := range processors {
var res string
var err error
logger.Infof("before processor notify_id: %d, event:%+v, processor:%+v", notifyRuleId, eventCopy, processor)
eventCopy, res, err := processor.Process(e.ctx, eventCopy)
logger.Infof("after processor notify_id: %d, event:%+v, processor:%+v, res:%v, err:%v", notifyRuleId, eventCopy, processor, res, err)
eventCopy, res, err = processor.Process(e.ctx, eventCopy)
if eventCopy == nil {
logger.Warningf("notify_id: %d, event:%+v, processor:%+v, event is nil", notifyRuleId, eventCopy, processor)
logger.Warningf("after processor notify_id: %d, event:%+v, processor:%+v, event is nil", notifyRuleId, eventCopy, processor)
sender.NotifyRecord(e.ctx, []*models.AlertCurEvent{eventOrigin}, notifyRuleId, "", "", res, errors.New("drop by processor"))
break
}
logger.Infof("after processor notify_id: %d, event:%+v, processor:%+v, res:%v, err:%v", notifyRuleId, eventCopy, processor, res, err)
}
if eventCopy == nil {
// 如果 eventCopy 为 nil说明 eventCopy 被 processor drop 掉了, 不再发送通知
if ShouldSkipNotify(e.ctx, eventCopy, notifyRuleId) {
logger.Infof("notify_id: %d, event:%+v, should skip notify", notifyRuleId, eventCopy)
continue
}
// notify
for i := range notifyRule.NotifyConfigs {
if !NotifyRuleApplicable(&notifyRule.NotifyConfigs[i], eventCopy) {
err := NotifyRuleMatchCheck(&notifyRule.NotifyConfigs[i], eventCopy)
if err != nil {
logger.Errorf("notify_id: %d, event:%+v, channel_id:%d, template_id: %d, notify_config:%+v, err:%v", notifyRuleId, eventCopy, notifyRule.NotifyConfigs[i].ChannelID, notifyRule.NotifyConfigs[i].TemplateID, notifyRule.NotifyConfigs[i], err)
continue
}
notifyChannel := e.notifyChannelCache.Get(notifyRule.NotifyConfigs[i].ChannelID)
messageTemplate := e.messageTemplateCache.Get(notifyRule.NotifyConfigs[i].TemplateID)
if notifyChannel == nil {
@@ -222,14 +238,25 @@ func (e *Dispatch) HandleEventWithNotifyRule(eventOrigin *models.AlertCurEvent)
continue
}
// todo go send
// todo 聚合 event
go e.sendV2([]*models.AlertCurEvent{eventCopy}, notifyRuleId, &notifyRule.NotifyConfigs[i], notifyChannel, messageTemplate)
go SendByNotifyRule(e.ctx, e.userCache, e.userGroupCache, e.notifyChannelCache, []*models.AlertCurEvent{eventCopy}, notifyRuleId, &notifyRule.NotifyConfigs[i], notifyChannel, messageTemplate)
}
}
}
}
func shouldSkipNotify(ctx *ctx.Context, event *models.AlertCurEvent, notifyRuleId int64) bool {
if event == nil {
// 如果 eventCopy 为 nil说明 eventCopy 被 processor drop 掉了, 不再发送通知
return true
}
if event.IsRecovered && event.NotifyRecovered == 0 {
// 如果 eventCopy 是恢复事件,且 NotifyRecovered 为 0则不发送通知
return true
}
return false
}
func pipelineApplicable(pipeline *models.EventPipeline, event *models.AlertCurEvent) bool {
if pipeline == nil {
return true
@@ -269,7 +296,7 @@ func pipelineApplicable(pipeline *models.EventPipeline, event *models.AlertCurEv
return tagMatch && attributesMatch
}
func NotifyRuleApplicable(notifyConfig *models.NotifyConfig, event *models.AlertCurEvent) bool {
func NotifyRuleMatchCheck(notifyConfig *models.NotifyConfig, event *models.AlertCurEvent) error {
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := int(tm.Weekday())
@@ -321,6 +348,10 @@ func NotifyRuleApplicable(notifyConfig *models.NotifyConfig, event *models.Alert
}
}
if !timeMatch {
return fmt.Errorf("event time not match time filter")
}
severityMatch := false
for i := range notifyConfig.Severities {
if notifyConfig.Severities[i] == event.Severity {
@@ -328,6 +359,10 @@ func NotifyRuleApplicable(notifyConfig *models.NotifyConfig, event *models.Alert
}
}
if !severityMatch {
return fmt.Errorf("event severity not match severity filter")
}
tagMatch := true
if len(notifyConfig.LabelKeys) > 0 {
for i := range notifyConfig.LabelKeys {
@@ -339,23 +374,32 @@ func NotifyRuleApplicable(notifyConfig *models.NotifyConfig, event *models.Alert
tagFilters, err := models.ParseTagFilter(notifyConfig.LabelKeys)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%+v notify_config:%+v", err, event, notifyConfig)
return false
return fmt.Errorf("failed to parse tag filter: %v", err)
}
tagMatch = common.MatchTags(event.TagsMap, tagFilters)
}
if !tagMatch {
return fmt.Errorf("event tag not match tag filter")
}
attributesMatch := true
if len(notifyConfig.Attributes) > 0 {
tagFilters, err := models.ParseTagFilter(notifyConfig.Attributes)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%+v notify_config:%+v err:%v", tagFilters, event, notifyConfig, err)
return false
return fmt.Errorf("failed to parse tag filter: %v", err)
}
attributesMatch = common.MatchTags(event.JsonTagsAndValue(), tagFilters)
}
if !attributesMatch {
return fmt.Errorf("event attributes not match attributes filter")
}
logger.Infof("notify send timeMatch:%v severityMatch:%v tagMatch:%v attributesMatch:%v event:%+v notify_config:%+v", timeMatch, severityMatch, tagMatch, attributesMatch, event, notifyConfig)
return timeMatch && severityMatch && tagMatch && attributesMatch
return nil
}
func GetNotifyConfigParams(notifyConfig *models.NotifyConfig, contactKey string, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType) ([]string, []int64, map[string]string) {
@@ -423,7 +467,8 @@ func GetNotifyConfigParams(notifyConfig *models.NotifyConfig, contactKey string,
return sendtos, flashDutyChannelIDs, customParams
}
func (e *Dispatch) sendV2(events []*models.AlertCurEvent, notifyRuleId int64, notifyConfig *models.NotifyConfig, notifyChannel *models.NotifyChannelConfig, messageTemplate *models.MessageTemplate) {
func SendNotifyRuleMessage(ctx *ctx.Context, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType, notifyChannelCache *memsto.NotifyChannelCacheType,
events []*models.AlertCurEvent, notifyRuleId int64, notifyConfig *models.NotifyConfig, notifyChannel *models.NotifyChannelConfig, messageTemplate *models.MessageTemplate) {
if len(events) == 0 {
logger.Errorf("notify_id: %d events is empty", notifyRuleId)
return
@@ -439,10 +484,7 @@ func (e *Dispatch) sendV2(events []*models.AlertCurEvent, notifyRuleId int64, no
contactKey = notifyChannel.ParamConfig.UserInfo.ContactKey
}
sendtos, flashDutyChannelIDs, customParams := GetNotifyConfigParams(notifyConfig, contactKey, e.userCache, e.userGroupCache)
e.Astats.GaugeNotifyRecordQueueSize.Inc()
defer e.Astats.GaugeNotifyRecordQueueSize.Dec()
sendtos, flashDutyChannelIDs, customParams := GetNotifyConfigParams(notifyConfig, contactKey, userCache, userGroupCache)
switch notifyChannel.RequestType {
case "flashduty":
@@ -452,10 +494,10 @@ func (e *Dispatch) sendV2(events []*models.AlertCurEvent, notifyRuleId int64, no
for i := range flashDutyChannelIDs {
start := time.Now()
respBody, err := notifyChannel.SendFlashDuty(events, flashDutyChannelIDs[i], e.notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody, err := notifyChannel.SendFlashDuty(events, flashDutyChannelIDs[i], notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), respBody)
logger.Infof("notify_id: %d, channel_name: %v, event:%+v, IntegrationUrl: %v dutychannel_id: %v, respBody: %v, err: %v", notifyRuleId, notifyChannel.Name, events[0], notifyChannel.RequestConfig.FlashDutyRequestConfig.IntegrationUrl, flashDutyChannelIDs[i], respBody, err)
sender.NotifyRecord(e.ctx, events, notifyRuleId, notifyChannel.Name, strconv.FormatInt(flashDutyChannelIDs[i], 10), respBody, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, strconv.FormatInt(flashDutyChannelIDs[i], 10), respBody, err)
}
case "http":
@@ -471,22 +513,22 @@ func (e *Dispatch) sendV2(events []*models.AlertCurEvent, notifyRuleId int64, no
}
// 将任务加入队列
success := e.notifyChannelCache.EnqueueNotifyTask(task)
success := notifyChannelCache.EnqueueNotifyTask(task)
if !success {
logger.Errorf("failed to enqueue notify task for channel %d, notify_id: %d", notifyChannel.ID, notifyRuleId)
// 如果入队失败,记录错误通知
sender.NotifyRecord(e.ctx, events, notifyRuleId, notifyChannel.Name, getSendTarget(customParams, sendtos), "", errors.New("failed to enqueue notify task, queue is full"))
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, getSendTarget(customParams, sendtos), "", errors.New("failed to enqueue notify task, queue is full"))
}
case "smtp":
notifyChannel.SendEmail(notifyRuleId, events, tplContent, sendtos, e.notifyChannelCache.GetSmtpClient(notifyChannel.ID))
notifyChannel.SendEmail(notifyRuleId, events, tplContent, sendtos, notifyChannelCache.GetSmtpClient(notifyChannel.ID))
case "script":
start := time.Now()
target, res, err := notifyChannel.SendScript(events, tplContent, customParams, sendtos)
res = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), res)
logger.Infof("notify_id: %d, channel_name: %v, event:%+v, tplContent:%s, customParams:%v, target:%s, res:%s, err:%v", notifyRuleId, notifyChannel.Name, events[0], tplContent, customParams, target, res, err)
sender.NotifyRecord(e.ctx, events, notifyRuleId, notifyChannel.Name, target, res, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, target, res, err)
default:
logger.Warningf("notify_id: %d, channel_name: %v, event:%+v send type not found", notifyRuleId, notifyChannel.Name, events[0])
}
@@ -501,6 +543,11 @@ func NeedBatchContacts(requestConfig *models.HTTPRequestConfig) bool {
// event: 告警/恢复事件
// isSubscribe: 告警事件是否由subscribe的配置产生
func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bool) {
go e.HandleEventWithNotifyRule(event)
if event.IsRecovered && event.NotifyRecovered == 0 {
return
}
rule := e.alertRuleCache.Get(event.RuleId)
if rule == nil {
return
@@ -533,7 +580,6 @@ func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bo
notifyTarget.AndMerge(handler(rule, event, notifyTarget, e))
}
go e.HandleEventWithNotifyRule(event)
go e.Send(rule, event, notifyTarget, isSubscribe)
// 如果是不是订阅规则出现的event, 则需要处理订阅规则的event
@@ -573,6 +619,10 @@ func (e *Dispatch) handleSub(sub *models.AlertSubscribe, event models.AlertCurEv
return
}
if !sub.MatchCate(event.Cate) {
return
}
if !common.MatchTags(event.TagsMap, sub.ITags) {
return
}

View File

@@ -18,7 +18,7 @@ func LogEvent(event *models.AlertCurEvent, location string, err ...error) {
}
logger.Infof(
"event(%s %s) %s: rule_id=%d sub_id:%d notify_rule_ids:%v cluster:%s %v%s@%d %s",
"event(%s %s) %s: rule_id=%d sub_id:%d notify_rule_ids:%v cluster:%s %v%s@%d last_eval_time:%d %s",
event.Hash,
status,
location,
@@ -29,6 +29,7 @@ func LogEvent(event *models.AlertCurEvent, location string, err ...error) {
event.TagsJSON,
event.TriggerValue,
event.TriggerTime,
event.LastEvalTime,
message,
)
}

View File

@@ -8,6 +8,7 @@ import (
"io"
"net/http"
"net/url"
"strconv"
"strings"
"text/template"
"time"
@@ -143,7 +144,11 @@ func (c *AISummaryConfig) generateAISummary(eventInfo string) (string, error) {
// 合并自定义参数
for k, v := range c.CustomParams {
reqParams[k] = v
converted, err := convertCustomParam(v)
if err != nil {
return "", fmt.Errorf("failed to convert custom param %s: %v", k, err)
}
reqParams[k] = converted
}
// 序列化请求体
@@ -196,3 +201,44 @@ func (c *AISummaryConfig) generateAISummary(eventInfo string) (string, error) {
return chatResp.Choices[0].Message.Content, nil
}
// convertCustomParam 将前端传入的参数转换为正确的类型
func convertCustomParam(value interface{}) (interface{}, error) {
if value == nil {
return nil, nil
}
// 如果是字符串,尝试转换为其他类型
if str, ok := value.(string); ok {
// 尝试转换为数字
if f, err := strconv.ParseFloat(str, 64); err == nil {
// 检查是否为整数
if f == float64(int64(f)) {
return int64(f), nil
}
return f, nil
}
// 尝试转换为布尔值
if b, err := strconv.ParseBool(str); err == nil {
return b, nil
}
// 尝试解析为JSON数组
if strings.HasPrefix(strings.TrimSpace(str), "[") {
var arr []interface{}
if err := json.Unmarshal([]byte(str), &arr); err == nil {
return arr, nil
}
}
// 尝试解析为JSON对象
if strings.HasPrefix(strings.TrimSpace(str), "{") {
var obj map[string]interface{}
if err := json.Unmarshal([]byte(str), &obj); err == nil {
return obj, nil
}
}
}
return value, nil
}

View File

@@ -67,3 +67,73 @@ func TestAISummaryConfig_Process(t *testing.T) {
t.Logf("原始注释: %v", result.AnnotationsJSON["description"])
t.Logf("AI总结: %s", result.AnnotationsJSON["ai_summary"])
}
func TestConvertCustomParam(t *testing.T) {
tests := []struct {
name string
input interface{}
expected interface{}
hasError bool
}{
{
name: "nil value",
input: nil,
expected: nil,
hasError: false,
},
{
name: "string number to int64",
input: "123",
expected: int64(123),
hasError: false,
},
{
name: "string float to float64",
input: "123.45",
expected: 123.45,
hasError: false,
},
{
name: "string boolean to bool",
input: "true",
expected: true,
hasError: false,
},
{
name: "string false to bool",
input: "false",
expected: false,
hasError: false,
},
{
name: "JSON array string to slice",
input: `["a", "b", "c"]`,
expected: []interface{}{"a", "b", "c"},
hasError: false,
},
{
name: "JSON object string to map",
input: `{"key": "value", "num": 123}`,
expected: map[string]interface{}{"key": "value", "num": float64(123)},
hasError: false,
},
{
name: "plain string remains string",
input: "hello world",
expected: "hello world",
hasError: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
converted, err := convertCustomParam(test.input)
if test.hasError {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.Equal(t, test.expected, converted)
})
}
}

View File

@@ -428,17 +428,18 @@ func (p *Processor) handleEvent(events []*models.AlertCurEvent) {
continue
}
var preTriggerTime int64 // 第一个 pending event 的触发时间
var preEvalTime int64 // 第一个 pending event 的检测时间
preEvent, has := p.pendings.Get(event.Hash)
if has {
p.pendings.UpdateLastEvalTime(event.Hash, event.LastEvalTime)
preTriggerTime = preEvent.TriggerTime
preEvalTime = preEvent.FirstEvalTime
} else {
event.FirstEvalTime = event.LastEvalTime
p.pendings.Set(event.Hash, event)
preTriggerTime = event.TriggerTime
preEvalTime = event.FirstEvalTime
}
if event.LastEvalTime-preTriggerTime+int64(event.PromEvalInterval) >= int64(p.rule.PromForDuration) {
if event.LastEvalTime-preEvalTime+int64(event.PromEvalInterval) >= int64(p.rule.PromForDuration) {
fireEvents = append(fireEvents, event)
if severity > event.Severity {
severity = event.Severity

View File

@@ -141,7 +141,7 @@ func updateSmtp(ctx *ctx.Context, ncc *memsto.NotifyConfigCacheType) {
func startEmailSender(ctx *ctx.Context, smtp aconf.SMTPConfig) {
conf := smtp
if conf.Host == "" || conf.Port == 0 {
logger.Warning("SMTP configurations invalid")
logger.Debug("SMTP configurations invalid")
<-mailQuit
return
}

View File

@@ -3,11 +3,15 @@ package integration
import (
"encoding/json"
"path"
"sort"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/pkg/errors"
"github.com/toolkits/pkg/container/set"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/runner"
@@ -15,7 +19,18 @@ import (
const SYSTEM = "system"
var BuiltinPayloadInFile *BuiltinPayloadInFileType
type BuiltinPayloadInFileType struct {
Data map[uint64]map[string]map[string][]*models.BuiltinPayload // map[componet_id]map[type]map[cate][]*models.BuiltinPayload
IndexData map[int64]*models.BuiltinPayload // map[uuid]payload
BuiltinMetrics map[string]*models.BuiltinMetric
}
func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
BuiltinPayloadInFile = NewBuiltinPayloadInFileType()
err := models.InitBuiltinPayloads(ctx)
if err != nil {
logger.Warning("init old builtinPayloads fail ", err)
@@ -146,11 +161,10 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
}
newAlerts := []models.AlertRule{}
writeAlertFileFlag := false
for _, alert := range alerts {
if alert.UUID == 0 {
writeAlertFileFlag = true
alert.UUID = time.Now().UnixNano()
time.Sleep(time.Microsecond)
alert.UUID = time.Now().UnixMicro()
}
newAlerts = append(newAlerts, alert)
@@ -169,47 +183,13 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
Tags: alert.AppendTags,
Content: string(content),
UUID: alert.UUID,
ID: alert.UUID,
CreatedBy: SYSTEM,
UpdatedBy: SYSTEM,
}
BuiltinPayloadInFile.AddBuiltinPayload(&builtinAlert)
old, err := models.BuiltinPayloadGet(ctx, "uuid = ?", alert.UUID)
if err != nil {
logger.Warning("get builtin alert fail ", builtinAlert, err)
continue
}
if old == nil {
err := builtinAlert.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin alert fail ", builtinAlert, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.ComponentID = component.ID
old.Content = string(content)
old.Name = alert.Name
old.Tags = alert.AppendTags
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin alert:%+v fail %v", builtinAlert, err)
}
}
}
if writeAlertFileFlag {
bs, err = json.MarshalIndent(newAlerts, "", " ")
if err != nil {
logger.Warning("marshal builtin alerts fail ", newAlerts, err)
continue
}
_, err = file.WriteBytes(fp, bs)
if err != nil {
logger.Warning("write builtin alerts file fail ", f, err)
}
}
}
}
@@ -261,32 +241,11 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
Tags: dashboard.Tags,
Content: string(content),
UUID: dashboard.UUID,
ID: dashboard.UUID,
CreatedBy: SYSTEM,
UpdatedBy: SYSTEM,
}
old, err := models.BuiltinPayloadGet(ctx, "uuid = ?", dashboard.UUID)
if err != nil {
logger.Warning("get builtin alert fail ", builtinDashboard, err)
continue
}
if old == nil {
err := builtinDashboard.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin alert fail ", builtinDashboard, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.ComponentID = component.ID
old.Content = string(content)
old.Name = dashboard.Name
old.Tags = dashboard.Tags
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin alert:%+v fail %v", builtinDashboard, err)
}
}
BuiltinPayloadInFile.AddBuiltinPayload(&builtinDashboard)
}
} else if err != nil {
logger.Warningf("read builtin component dash dir fail %s %v", component.Ident, err)
@@ -304,64 +263,23 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
}
metrics := []models.BuiltinMetric{}
newMetrics := []models.BuiltinMetric{}
err = json.Unmarshal(bs, &metrics)
if err != nil {
logger.Warning("parse builtin component metrics file fail", f, err)
continue
}
writeMetricFileFlag := false
for _, metric := range metrics {
if metric.UUID == 0 {
writeMetricFileFlag = true
metric.UUID = time.Now().UnixNano()
time.Sleep(time.Microsecond)
metric.UUID = time.Now().UnixMicro()
}
newMetrics = append(newMetrics, metric)
metric.ID = metric.UUID
metric.CreatedBy = SYSTEM
metric.UpdatedBy = SYSTEM
old, err := models.BuiltinMetricGet(ctx, "uuid = ?", metric.UUID)
if err != nil {
logger.Warning("get builtin metrics fail ", metric, err)
continue
}
if old == nil {
err := metric.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin metrics fail ", metric, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.Collector = metric.Collector
old.Typ = metric.Typ
old.Name = metric.Name
old.Unit = metric.Unit
old.Note = metric.Note
old.Lang = metric.Lang
old.Expression = metric.Expression
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin metric:%+v fail %v", metric, err)
}
}
BuiltinPayloadInFile.BuiltinMetrics[metric.Expression] = &metric
}
if writeMetricFileFlag {
bs, err = json.MarshalIndent(newMetrics, "", " ")
if err != nil {
logger.Warning("marshal builtin metrics fail ", newMetrics, err)
continue
}
_, err = file.WriteBytes(fp, bs)
if err != nil {
logger.Warning("write builtin metrics file fail ", f, err)
}
}
}
} else if err != nil {
logger.Warningf("read builtin component metrics dir fail %s %v", component.Ident, err)
@@ -387,3 +305,322 @@ type BuiltinBoard struct {
Hide int `json:"hide"` // 0: false, 1: true
UUID int64 `json:"uuid"`
}
func NewBuiltinPayloadInFileType() *BuiltinPayloadInFileType {
return &BuiltinPayloadInFileType{
Data: make(map[uint64]map[string]map[string][]*models.BuiltinPayload),
IndexData: make(map[int64]*models.BuiltinPayload),
BuiltinMetrics: make(map[string]*models.BuiltinMetric),
}
}
func (b *BuiltinPayloadInFileType) AddBuiltinPayload(bp *models.BuiltinPayload) {
if _, exists := b.Data[bp.ComponentID]; !exists {
b.Data[bp.ComponentID] = make(map[string]map[string][]*models.BuiltinPayload)
}
bpInType := b.Data[bp.ComponentID]
if _, exists := bpInType[bp.Type]; !exists {
bpInType[bp.Type] = make(map[string][]*models.BuiltinPayload)
}
bpInCate := bpInType[bp.Type]
if _, exists := bpInCate[bp.Cate]; !exists {
bpInCate[bp.Cate] = make([]*models.BuiltinPayload, 0)
}
bpInCate[bp.Cate] = append(bpInCate[bp.Cate], bp)
b.IndexData[bp.UUID] = bp
}
func (b *BuiltinPayloadInFileType) GetBuiltinPayload(typ, cate, query string, componentId uint64) ([]*models.BuiltinPayload, error) {
var result []*models.BuiltinPayload
source := b.Data[componentId]
if source == nil {
return nil, nil
}
typeMap, exists := source[typ]
if !exists {
return nil, nil
}
if cate != "" {
payloads, exists := typeMap[cate]
if !exists {
return nil, nil
}
result = append(result, filterByQuery(payloads, query)...)
} else {
for _, payloads := range typeMap {
result = append(result, filterByQuery(payloads, query)...)
}
}
if len(result) > 0 {
sort.Slice(result, func(i, j int) bool {
return result[i].Name < result[j].Name
})
}
return result, nil
}
func (b *BuiltinPayloadInFileType) GetBuiltinPayloadCates(typ string, componentId uint64) ([]string, error) {
var result []string
source := b.Data[componentId]
if source == nil {
return result, nil
}
typeData := source[typ]
if typeData == nil {
return result, nil
}
for cate := range typeData {
result = append(result, cate)
}
sort.Strings(result)
return result, nil
}
func filterByQuery(payloads []*models.BuiltinPayload, query string) []*models.BuiltinPayload {
if query == "" {
return payloads
}
queryLower := strings.ToLower(query)
var filtered []*models.BuiltinPayload
for _, p := range payloads {
if strings.Contains(strings.ToLower(p.Name), queryLower) || strings.Contains(strings.ToLower(p.Tags), queryLower) {
filtered = append(filtered, p)
}
}
return filtered
}
func (b *BuiltinPayloadInFileType) BuiltinMetricGets(metricsInDB []*models.BuiltinMetric, lang, collector, typ, query, unit string, limit, offset int) ([]*models.BuiltinMetric, int, error) {
var filteredMetrics []*models.BuiltinMetric
expressionSet := set.NewStringSet()
builtinMetricsByDB := convertBuiltinMetricByDB(metricsInDB)
builtinMetricsMap := make(map[string]*models.BuiltinMetric)
for expression, metric := range builtinMetricsByDB {
builtinMetricsMap[expression] = metric
}
for expression, metric := range b.BuiltinMetrics {
builtinMetricsMap[expression] = metric
}
for _, metric := range builtinMetricsMap {
if !applyFilter(metric, collector, typ, query, unit) {
continue
}
// Skip if expression is already in db cache
// NOTE: 忽略重复的expression特别的在旧版本中用户可能已经创建了重复的metrics需要覆盖掉ByFile中相同的Metrics
// NOTE: Ignore duplicate expressions, especially in the old version, users may have created duplicate metrics,
if expressionSet.Exists(metric.Expression) {
continue
}
// Add db expression in set.
expressionSet.Add(metric.Expression)
// Apply language
trans, err := getTranslationWithLanguage(metric, lang)
if err != nil {
logger.Errorf("Error getting translation for metric %s: %v", metric.Name, err)
continue // Skip if translation not found
}
metric.Name = trans.Name
metric.Note = trans.Note
filteredMetrics = append(filteredMetrics, metric)
}
// Sort metrics
sort.Slice(filteredMetrics, func(i, j int) bool {
if filteredMetrics[i].Collector != filteredMetrics[j].Collector {
return filteredMetrics[i].Collector < filteredMetrics[j].Collector
}
if filteredMetrics[i].Typ != filteredMetrics[j].Typ {
return filteredMetrics[i].Typ < filteredMetrics[j].Typ
}
return filteredMetrics[i].Expression < filteredMetrics[j].Expression
})
totalCount := len(filteredMetrics)
// Validate parameters
if offset < 0 {
offset = 0
}
if limit < 0 {
limit = 0
}
// Handle edge cases
if offset >= totalCount || limit == 0 {
return []*models.BuiltinMetric{}, totalCount, nil
}
// Apply pagination
end := offset + limit
if end > totalCount {
end = totalCount
}
return filteredMetrics[offset:end], totalCount, nil
}
func (b *BuiltinPayloadInFileType) BuiltinMetricTypes(lang, collector, query string) []string {
typeSet := set.NewStringSet()
for _, metric := range b.BuiltinMetrics {
if !applyFilter(metric, collector, "", query, "") {
continue
}
typeSet.Add(metric.Typ)
}
return typeSet.ToSlice()
}
func (b *BuiltinPayloadInFileType) BuiltinMetricCollectors(lang, typ, query string) []string {
collectorSet := set.NewStringSet()
for _, metric := range b.BuiltinMetrics {
if !applyFilter(metric, "", typ, query, "") {
continue
}
collectorSet.Add(metric.Collector)
}
return collectorSet.ToSlice()
}
func applyFilter(metric *models.BuiltinMetric, collector, typ, query, unit string) bool {
if collector != "" && collector != metric.Collector {
return false
}
if typ != "" && typ != metric.Typ {
return false
}
if unit != "" && !containsUnit(unit, metric.Unit) {
return false
}
if query != "" && !applyQueryFilter(metric, query) {
return false
}
return true
}
func containsUnit(unit, metricUnit string) bool {
us := strings.Split(unit, ",")
for _, u := range us {
if u == metricUnit {
return true
}
}
return false
}
func applyQueryFilter(metric *models.BuiltinMetric, query string) bool {
qs := strings.Split(query, " ")
for _, q := range qs {
if strings.HasPrefix(q, "-") {
q = strings.TrimPrefix(q, "-")
if strings.Contains(metric.Name, q) || strings.Contains(metric.Note, q) || strings.Contains(metric.Expression, q) {
return false
}
} else {
if !strings.Contains(metric.Name, q) && !strings.Contains(metric.Note, q) && !strings.Contains(metric.Expression, q) {
return false
}
}
}
return true
}
func getTranslationWithLanguage(bm *models.BuiltinMetric, lang string) (*models.Translation, error) {
var defaultTranslation *models.Translation
for _, t := range bm.Translation {
if t.Lang == lang {
return &t, nil
}
if t.Lang == "en_US" {
defaultTranslation = &t
}
}
if defaultTranslation != nil {
return defaultTranslation, nil
}
return nil, errors.Errorf("translation not found for metric %s", bm.Name)
}
func convertBuiltinMetricByDB(metricsInDB []*models.BuiltinMetric) map[string]*models.BuiltinMetric {
builtinMetricsByDB := make(map[string]*models.BuiltinMetric)
builtinMetricsByDBList := make(map[string][]*models.BuiltinMetric)
for _, metric := range metricsInDB {
builtinMetrics, ok := builtinMetricsByDBList[metric.Expression]
if !ok {
builtinMetrics = []*models.BuiltinMetric{}
}
builtinMetrics = append(builtinMetrics, metric)
builtinMetricsByDBList[metric.Expression] = builtinMetrics
}
for expression, builtinMetrics := range builtinMetricsByDBList {
if len(builtinMetrics) == 0 {
continue
}
// NOTE: 为兼容旧版本用户已经创建的 metrics同时将修改 metrics 收敛到同一个记录上,
// 我们选择使用 expression 相同但是 id 最小的 metric 记录作为主要的 Metric。
sort.Slice(builtinMetrics, func(i, j int) bool {
return builtinMetrics[i].ID < builtinMetrics[j].ID
})
currentBuiltinMetric := builtinMetrics[0]
// User have no customed translation, so we can merge it
if len(currentBuiltinMetric.Translation) == 0 {
translationMap := make(map[string]models.Translation)
for _, bm := range builtinMetrics {
for _, t := range getDefaultTranslation(bm) {
translationMap[t.Lang] = t
}
}
currentBuiltinMetric.Translation = make([]models.Translation, 0, len(translationMap))
for _, t := range translationMap {
currentBuiltinMetric.Translation = append(currentBuiltinMetric.Translation, t)
}
}
builtinMetricsByDB[expression] = currentBuiltinMetric
}
return builtinMetricsByDB
}
func getDefaultTranslation(bm *models.BuiltinMetric) []models.Translation {
if len(bm.Translation) != 0 {
return bm.Translation
}
return []models.Translation{{
Lang: bm.Lang,
Name: bm.Name,
Note: bm.Note,
}}
}

View File

@@ -316,6 +316,7 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/busi-groups/tags", rt.auth(), rt.user(), rt.busiGroupsGetTags)
pages.GET("/targets", rt.auth(), rt.user(), rt.targetGets)
pages.POST("/target-update", rt.auth(), rt.targetUpdate)
pages.GET("/target/extra-meta", rt.auth(), rt.user(), rt.targetExtendInfoByIdent)
pages.POST("/target/list", rt.auth(), rt.user(), rt.targetGetsByHostFilter)
pages.DELETE("/targets", rt.auth(), rt.user(), rt.perm("/targets/del"), rt.targetDel)
@@ -449,7 +450,7 @@ func (rt *Router) Config(r *gin.Engine) {
pages.POST("/datasource/status/update", rt.auth(), rt.admin(), rt.datasourceUpdataStatus)
pages.DELETE("/datasource/", rt.auth(), rt.admin(), rt.datasourceDel)
pages.GET("/roles", rt.auth(), rt.user(), rt.perm("/roles"), rt.roleGets)
pages.GET("/roles", rt.auth(), rt.user(), rt.roleGets)
pages.POST("/roles", rt.auth(), rt.user(), rt.perm("/roles/add"), rt.roleAdd)
pages.PUT("/roles", rt.auth(), rt.user(), rt.perm("/roles/put"), rt.rolePut)
pages.DELETE("/role/:id", rt.auth(), rt.user(), rt.perm("/roles/del"), rt.roleDel)
@@ -523,10 +524,9 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/builtin-payloads", rt.auth(), rt.user(), rt.builtinPayloadsGets)
pages.GET("/builtin-payloads/cates", rt.auth(), rt.user(), rt.builtinPayloadcatesGet)
pages.POST("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/add"), rt.builtinPayloadsAdd)
pages.GET("/builtin-payload/:id", rt.auth(), rt.user(), rt.perm("/components"), rt.builtinPayloadGet)
pages.PUT("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/put"), rt.builtinPayloadsPut)
pages.DELETE("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/del"), rt.builtinPayloadsDel)
pages.GET("/builtin-payload", rt.auth(), rt.user(), rt.builtinPayloadsGetByUUIDOrID)
pages.GET("/builtin-payload", rt.auth(), rt.user(), rt.builtinPayloadsGetByUUID)
pages.POST("/message-templates", rt.auth(), rt.user(), rt.perm("/notification-templates/add"), rt.messageTemplatesAdd)
pages.DELETE("/message-templates", rt.auth(), rt.user(), rt.perm("/notification-templates/del"), rt.messageTemplatesDel)
@@ -544,6 +544,9 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/notify-rule/custom-params", rt.auth(), rt.user(), rt.perm("/notification-rules"), rt.notifyRuleCustomParamsGet)
pages.POST("/notify-rule/event-pipelines-tryrun", rt.auth(), rt.user(), rt.perm("/notification-rules/add"), rt.tryRunEventProcessorByNotifyRule)
pages.GET("/event-tagkeys", rt.auth(), rt.user(), rt.eventTagKeys)
pages.GET("/event-tagvalues", rt.auth(), rt.user(), rt.eventTagValues)
// 事件Pipeline相关路由
pages.GET("/event-pipelines", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.eventPipelinesList)
pages.POST("/event-pipeline", rt.auth(), rt.user(), rt.perm("/event-pipelines/add"), rt.addEventPipeline)
@@ -673,6 +676,10 @@ func (rt *Router) Config(r *gin.Engine) {
service.GET("/message-templates", rt.messageTemplateGets)
service.GET("/event-pipelines", rt.eventPipelinesListByService)
// 手机号加密存储配置接口
service.POST("/users/phone/encrypt", rt.usersPhoneEncrypt)
service.POST("/users/phone/decrypt", rt.usersPhoneDecrypt)
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
func getUserGroupIds(ctx *gin.Context, rt *Router, myGroups bool) ([]int64, error) {
@@ -305,3 +306,123 @@ func (rt *Router) alertCurEventDelByHash(c *gin.Context) {
hash := ginx.QueryStr(c, "hash")
ginx.NewRender(c).Message(models.AlertCurEventDelByHash(rt.Ctx, hash))
}
func (rt *Router) eventTagKeys(c *gin.Context) {
// 获取最近1天的活跃告警事件
now := time.Now().Unix()
stime := now - 24*3600
etime := now
// 获取用户可见的业务组ID列表
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, false)
if err != nil {
logger.Warningf("failed to get business group ids: %v", err)
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 查询活跃告警事件,限制数量以提高性能
events, err := models.AlertCurEventsGet(rt.Ctx, []string{}, bgids, stime, etime, []int64{}, []int64{}, []string{}, 0, "", 200, 0, []int64{})
if err != nil {
logger.Warningf("failed to get current alert events: %v", err)
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 如果没有查到事件,返回默认标签
if len(events) == 0 {
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 收集所有标签键并去重
tagKeys := make(map[string]struct{})
for _, event := range events {
for key := range event.TagsMap {
tagKeys[key] = struct{}{}
}
}
// 转换为字符串切片
var result []string
for key := range tagKeys {
result = append(result, key)
}
// 如果没有收集到任何标签键,返回默认值
if len(result) == 0 {
result = []string{"ident", "app", "service", "instance"}
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) eventTagValues(c *gin.Context) {
// 获取标签key
tagKey := ginx.QueryStr(c, "key")
// 获取最近1天的活跃告警事件
now := time.Now().Unix()
stime := now - 24*3600
etime := now
// 获取用户可见的业务组ID列表
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, false)
if err != nil {
logger.Warningf("failed to get business group ids: %v", err)
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 查询活跃告警事件,获取更多数据以保证统计准确性
events, err := models.AlertCurEventsGet(rt.Ctx, []string{}, bgids, stime, etime, []int64{}, []int64{}, []string{}, 0, "", 1000, 0, []int64{})
if err != nil {
logger.Warningf("failed to get current alert events: %v", err)
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 如果没有查到事件,返回空数组
if len(events) == 0 {
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 统计标签值出现次数
valueCount := make(map[string]int)
for _, event := range events {
// TagsMap已经在AlertCurEventsGet中处理直接使用
if value, exists := event.TagsMap[tagKey]; exists && value != "" {
valueCount[value]++
}
}
// 转换为切片并按出现次数降序排序
type tagValue struct {
value string
count int
}
tagValues := make([]tagValue, 0, len(valueCount))
for value, count := range valueCount {
tagValues = append(tagValues, tagValue{value, count})
}
// 按出现次数降序排序
sort.Slice(tagValues, func(i, j int) bool {
return tagValues[i].count > tagValues[j].count
})
// 只取Top20并转换为字符串数组
limit := 20
if len(tagValues) < limit {
limit = len(tagValues)
}
result := make([]string, 0, limit)
for i := 0; i < limit; i++ {
result = append(result, tagValues[i].value)
}
ginx.NewRender(c).Data(result, nil)
}

View File

@@ -62,11 +62,11 @@ func (rt *Router) alertHisEventsList(c *gin.Context) {
ginx.Dangerous(err)
total, err := models.AlertHisEventTotal(rt.Ctx, prods, bgids, stime, etime, severity,
recovered, dsIds, cates, ruleId, query)
recovered, dsIds, cates, ruleId, query, []int64{})
ginx.Dangerous(err)
list, err := models.AlertHisEventGets(rt.Ctx, prods, bgids, stime, etime, severity, recovered,
dsIds, cates, ruleId, query, limit, ginx.Offset(c, limit))
dsIds, cates, ruleId, query, limit, ginx.Offset(c, limit), []int64{})
ginx.Dangerous(err)
cache := make(map[int64]*models.UserGroup)
@@ -115,7 +115,18 @@ func (rt *Router) alertHisEventsDelete(c *gin.Context) {
time.Sleep(100 * time.Millisecond) // 防止锁表
}
}()
ginx.NewRender(c).Message("Alert history events deletion started")
ginx.NewRender(c).Data("Alert history events deletion started", nil)
}
var TransferEventToCur func(*ctx.Context, *models.AlertHisEvent) *models.AlertCurEvent
func init() {
TransferEventToCur = transferEventToCur
}
func transferEventToCur(ctx *ctx.Context, event *models.AlertHisEvent) *models.AlertCurEvent {
cur := event.ToCur()
return cur
}
func (rt *Router) alertHisEventGet(c *gin.Context) {
@@ -142,7 +153,7 @@ func (rt *Router) alertHisEventGet(c *gin.Context) {
ginx.Dangerous(err)
event.NotifyRules, err = GetEventNorifyRuleNames(rt.Ctx, event.NotifyRuleIds)
ginx.NewRender(c).Data(event, err)
ginx.NewRender(c).Data(TransferEventToCur(rt.Ctx, event), err)
}
func GetBusinessGroupIds(c *gin.Context, ctx *ctx.Context, onlySelfGroupView bool, myGroups bool) ([]int64, error) {

View File

@@ -35,13 +35,12 @@ func (rt *Router) alertRuleGets(c *gin.Context) {
cache := make(map[int64]*models.UserGroup)
for i := 0; i < len(ars); i++ {
ars[i].FillNotifyGroups(rt.Ctx, cache)
ars[i].FillSeverities()
}
}
ginx.NewRender(c).Data(ars, err)
}
func getAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
func GetAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
stime = ginx.QueryInt64(c, "stime", 0)
etime = ginx.QueryInt64(c, "etime", 0)
if etime == 0 {
@@ -80,7 +79,6 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
names := make([]string, 0, len(ars))
for i := 0; i < len(ars); i++ {
ars[i].FillNotifyGroups(rt.Ctx, cache)
ars[i].FillSeverities()
if len(ars[i].DatasourceQueries) != 0 {
ars[i].DatasourceIdsJson = rt.DatasourceCache.GetIDsByDsCateAndQueries(ars[i].Cate, ars[i].DatasourceQueries)
@@ -90,7 +88,7 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
names = append(names, ars[i].UpdateBy)
}
stime, etime := getAlertCueEventTimeRange(c)
stime, etime := GetAlertCueEventTimeRange(c)
cnt := models.AlertCurEventCountByRuleId(rt.Ctx, rids, stime, etime)
if cnt != nil {
for i := 0; i < len(ars); i++ {
@@ -290,6 +288,15 @@ func (rt *Router) alertRuleAddByImport(c *gin.Context) {
models.DataSourceQueryAll,
}
}
// 将导入的规则统一转为新版本的通知规则配置
lst[i].NotifyVersion = 1
lst[i].NotifyChannelsJSON = []string{}
lst[i].NotifyGroupsJSON = []string{}
lst[i].NotifyChannels = ""
lst[i].NotifyGroups = ""
lst[i].Callbacks = ""
lst[i].CallbacksJSON = []string{}
}
bgid := ginx.UrlParamInt64(c, "id")
@@ -308,19 +315,52 @@ func (rt *Router) alertRuleAddByImportPromRule(c *gin.Context) {
var f promRuleForm
ginx.Dangerous(c.BindJSON(&f))
// 首先尝试解析带 groups 的格式
var pr struct {
Groups []models.PromRuleGroup `yaml:"groups"`
}
err := yaml.Unmarshal([]byte(f.Payload), &pr)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "invalid yaml format, please use the example format. err: %v", err)
var groups []models.PromRuleGroup
if err != nil || len(pr.Groups) == 0 {
// 如果解析失败或没有 groups尝试解析规则数组格式
var rules []models.PromRule
err = yaml.Unmarshal([]byte(f.Payload), &rules)
if err != nil {
// 最后尝试解析单个规则格式
var singleRule models.PromRule
err = yaml.Unmarshal([]byte(f.Payload), &singleRule)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "invalid yaml format. err: %v", err)
}
// 验证单个规则是否有效
if singleRule.Alert == "" && singleRule.Record == "" {
ginx.Bomb(http.StatusBadRequest, "input yaml is empty or invalid")
}
rules = []models.PromRule{singleRule}
}
// 验证规则数组是否为空
if len(rules) == 0 {
ginx.Bomb(http.StatusBadRequest, "input yaml contains no rules")
}
// 将规则数组包装成 group
groups = []models.PromRuleGroup{
{
Name: "imported_rules",
Rules: rules,
},
}
} else {
// 使用已解析的 groups
groups = pr.Groups
}
if len(pr.Groups) == 0 {
ginx.Bomb(http.StatusBadRequest, "input yaml is empty")
}
lst := models.DealPromGroup(pr.Groups, f.DatasourceQueries, f.Disabled)
lst := models.DealPromGroup(groups, f.DatasourceQueries, f.Disabled)
username := c.MustGet("username").(string)
bgid := ginx.UrlParamInt64(c, "id")
ginx.NewRender(c).Data(rt.alertRuleAdd(lst, username, bgid, c.GetHeader("X-Language")), nil)
@@ -465,8 +505,8 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
ginx.Bomb(http.StatusBadRequest, "fields empty")
}
f.Fields["update_by"] = c.MustGet("username").(string)
f.Fields["update_at"] = time.Now().Unix()
updateBy := c.MustGet("username").(string)
updateAt := time.Now().Unix()
for i := 0; i < len(f.Ids); i++ {
ar, err := models.AlertRuleGetById(rt.Ctx, f.Ids[i])
@@ -483,7 +523,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(originRule)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"rule_config": string(b)}))
continue
}
}
@@ -496,7 +535,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(ar.AnnotationsJSON)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"annotations": string(b)}))
continue
}
}
@@ -509,7 +547,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(ar.AnnotationsJSON)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"annotations": string(b)}))
continue
}
}
@@ -519,7 +556,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
callback := callbacks.(string)
if !strings.Contains(ar.Callbacks, callback) {
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"callbacks": ar.Callbacks + " " + callback}))
continue
}
}
}
@@ -529,7 +565,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
if callbacks, has := f.Fields["callbacks"]; has {
callback := callbacks.(string)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"callbacks": strings.ReplaceAll(ar.Callbacks, callback, "")}))
continue
}
}
@@ -539,7 +574,6 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
bytes, err := json.Marshal(datasourceQueries)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"datasource_queries": bytes}))
continue
}
}
@@ -555,6 +589,12 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
ginx.Dangerous(ar.UpdateColumn(rt.Ctx, k, v))
}
}
// 统一更新更新时间和更新人,只有更新时间变了,告警规则才会被引擎拉取
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{
"update_by": updateBy,
"update_at": updateAt,
}))
}
ginx.NewRender(c).Message(nil)

View File

@@ -288,6 +288,7 @@ func (rt *Router) alertSubscribePut(c *gin.Context) {
"busi_groups",
"note",
"notify_rule_ids",
"notify_version",
))
}

View File

@@ -2,8 +2,10 @@ package router
import (
"net/http"
"sort"
"time"
"github.com/ccfos/nightingale/v6/center/integration"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
@@ -29,7 +31,7 @@ func (rt *Router) builtinMetricsAdd(c *gin.Context) {
reterr := make(map[string]string)
for i := 0; i < count; i++ {
lst[i].Lang = lang
lst[i].UUID = time.Now().UnixNano()
lst[i].UUID = time.Now().UnixMicro()
if err := lst[i].Add(rt.Ctx, username); err != nil {
reterr[lst[i].Name] = i18n.Sprintf(c.GetHeader("X-Language"), err.Error())
}
@@ -48,11 +50,12 @@ func (rt *Router) builtinMetricsGets(c *gin.Context) {
lang = "zh_CN"
}
bm, err := models.BuiltinMetricGets(rt.Ctx, lang, collector, typ, query, unit, limit, ginx.Offset(c, limit))
bmInDB, err := models.BuiltinMetricGets(rt.Ctx, "", collector, typ, query, unit, limit, ginx.Offset(c, limit))
ginx.Dangerous(err)
total, err := models.BuiltinMetricCount(rt.Ctx, lang, collector, typ, query, unit)
bm, total, err := integration.BuiltinPayloadInFile.BuiltinMetricGets(bmInDB, lang, collector, typ, query, unit, limit, ginx.Offset(c, limit))
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": bm,
"total": total,
@@ -100,8 +103,26 @@ func (rt *Router) builtinMetricsTypes(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
lang := c.GetHeader("X-Language")
metricTypeList, err := models.BuiltinMetricTypes(rt.Ctx, lang, collector, query)
ginx.NewRender(c).Data(metricTypeList, err)
metricTypeListInDB, err := models.BuiltinMetricTypes(rt.Ctx, lang, collector, query)
ginx.Dangerous(err)
metricTypeListInFile := integration.BuiltinPayloadInFile.BuiltinMetricTypes(lang, collector, query)
typeMap := make(map[string]struct{})
for _, metricType := range metricTypeListInDB {
typeMap[metricType] = struct{}{}
}
for _, metricType := range metricTypeListInFile {
typeMap[metricType] = struct{}{}
}
metricTypeList := make([]string, 0, len(typeMap))
for metricType := range typeMap {
metricTypeList = append(metricTypeList, metricType)
}
sort.Strings(metricTypeList)
ginx.NewRender(c).Data(metricTypeList, nil)
}
func (rt *Router) builtinMetricsCollectors(c *gin.Context) {
@@ -109,5 +130,24 @@ func (rt *Router) builtinMetricsCollectors(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(models.BuiltinMetricCollectors(rt.Ctx, lang, typ, query))
collectorListInDB, err := models.BuiltinMetricCollectors(rt.Ctx, lang, typ, query)
ginx.Dangerous(err)
collectorListInFile := integration.BuiltinPayloadInFile.BuiltinMetricCollectors(lang, typ, query)
collectorMap := make(map[string]struct{})
for _, collector := range collectorListInDB {
collectorMap[collector] = struct{}{}
}
for _, collector := range collectorListInFile {
collectorMap[collector] = struct{}{}
}
collectorList := make([]string, 0, len(collectorMap))
for collector := range collectorMap {
collectorList = append(collectorList, collector)
}
sort.Strings(collectorList)
ginx.NewRender(c).Data(collectorList, nil)
}

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/BurntSushi/toml"
"github.com/ccfos/nightingale/v6/center/integration"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
@@ -192,13 +193,26 @@ func (rt *Router) builtinPayloadsAdd(c *gin.Context) {
func (rt *Router) builtinPayloadsGets(c *gin.Context) {
typ := ginx.QueryStr(c, "type", "")
if typ == "" {
ginx.Bomb(http.StatusBadRequest, "type is required")
return
}
ComponentID := ginx.QueryInt64(c, "component_id", 0)
cate := ginx.QueryStr(c, "cate", "")
query := ginx.QueryStr(c, "query", "")
lst, err := models.BuiltinPayloadGets(rt.Ctx, uint64(ComponentID), typ, cate, query)
ginx.NewRender(c).Data(lst, err)
ginx.Dangerous(err)
lstInFile, err := integration.BuiltinPayloadInFile.GetBuiltinPayload(typ, cate, query, uint64(ComponentID))
ginx.Dangerous(err)
if len(lstInFile) > 0 {
lst = append(lst, lstInFile...)
}
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) builtinPayloadcatesGet(c *gin.Context) {
@@ -206,21 +220,31 @@ func (rt *Router) builtinPayloadcatesGet(c *gin.Context) {
ComponentID := ginx.QueryInt64(c, "component_id", 0)
cates, err := models.BuiltinPayloadCates(rt.Ctx, typ, uint64(ComponentID))
ginx.NewRender(c).Data(cates, err)
}
ginx.Dangerous(err)
func (rt *Router) builtinPayloadGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
catesInFile, err := integration.BuiltinPayloadInFile.GetBuiltinPayloadCates(typ, uint64(ComponentID))
ginx.Dangerous(err)
bp, err := models.BuiltinPayloadGet(rt.Ctx, "id = ?", id)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, err.Error())
}
if bp == nil {
ginx.Bomb(http.StatusNotFound, "builtin payload not found")
// 使用 map 进行去重
cateMap := make(map[string]bool)
// 添加数据库中的分类
for _, cate := range cates {
cateMap[cate] = true
}
ginx.NewRender(c).Data(bp, nil)
// 添加文件中的分类
for _, cate := range catesInFile {
cateMap[cate] = true
}
// 将去重后的结果转换回切片
result := make([]string, 0, len(cateMap))
for cate := range cateMap {
result = append(result, cate)
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) builtinPayloadsPut(c *gin.Context) {
@@ -273,14 +297,15 @@ func (rt *Router) builtinPayloadsDel(c *gin.Context) {
ginx.NewRender(c).Message(models.BuiltinPayloadDels(rt.Ctx, req.Ids))
}
func (rt *Router) builtinPayloadsGetByUUIDOrID(c *gin.Context) {
uuid := ginx.QueryInt64(c, "uuid", 0)
// 优先以 uuid 为准
if uuid != 0 {
ginx.NewRender(c).Data(models.BuiltinPayloadGet(rt.Ctx, "uuid = ?", uuid))
return
}
func (rt *Router) builtinPayloadsGetByUUID(c *gin.Context) {
uuid := ginx.QueryInt64(c, "uuid")
id := ginx.QueryInt64(c, "id", 0)
ginx.NewRender(c).Data(models.BuiltinPayloadGet(rt.Ctx, "id = ?", id))
bp, err := models.BuiltinPayloadGet(rt.Ctx, "uuid = ?", uuid)
ginx.Dangerous(err)
if bp != nil {
ginx.NewRender(c).Data(bp, nil)
} else {
ginx.NewRender(c).Data(integration.BuiltinPayloadInFile.IndexData[uuid], nil)
}
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/logger"
)
@@ -102,7 +103,7 @@ func (rt *Router) datasourceUpsert(c *gin.Context) {
if !req.ForceSave {
if req.PluginType == models.PROMETHEUS || req.PluginType == models.LOKI || req.PluginType == models.TDENGINE {
err = DatasourceCheck(req)
err = DatasourceCheck(c, req)
if err != nil {
Dangerous(c, err)
return
@@ -173,7 +174,7 @@ func (rt *Router) datasourceUpsert(c *gin.Context) {
Render(c, nil, err)
}
func DatasourceCheck(ds models.Datasource) error {
func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
if ds.PluginType == models.PROMETHEUS || ds.PluginType == models.LOKI || ds.PluginType == models.TDENGINE {
if ds.HTTPJson.Url == "" {
return fmt.Errorf("url is empty")
@@ -232,6 +233,10 @@ func DatasourceCheck(ds models.Datasource) error {
req, err = http.NewRequest("GET", fullURL, nil)
if err != nil {
logger.Errorf("Error creating request: %v", err)
if !strings.Contains(ds.HTTPJson.Url, "/loki") {
lang := c.GetHeader("X-Language")
return fmt.Errorf(i18n.Sprintf(lang, "/loki suffix is miss, please add /loki to the url: %s", ds.HTTPJson.Url+"/loki"))
}
return fmt.Errorf("request url:%s failed: %v", fullURL, err)
}
}
@@ -253,6 +258,10 @@ func DatasourceCheck(ds models.Datasource) error {
if resp.StatusCode != 200 {
logger.Errorf("Error making request: %v\n", resp.StatusCode)
if resp.StatusCode == 404 && ds.PluginType == models.LOKI && !strings.Contains(ds.HTTPJson.Url, "/loki") {
lang := c.GetHeader("X-Language")
return fmt.Errorf(i18n.Sprintf(lang, "/loki suffix is miss, please add /loki to the url: %s", ds.HTTPJson.Url+"/loki"))
}
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("request url:%s failed code:%d body:%s", fullURL, resp.StatusCode, string(body))
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
)
// 获取事件Pipeline列表
@@ -139,12 +140,14 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
}
event := hisEvent.ToCur()
lang := c.GetHeader("X-Language")
var result string
for _, p := range f.PipelineConfig.ProcessorConfigs {
processor, err := models.GetProcessorByType(p.Typ, p.Config)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "get processor: %+v err: %+v", p, err)
}
event, _, err = processor.Process(rt.Ctx, event)
event, result, err = processor.Process(rt.Ctx, event)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "processor: %+v err: %+v", p, err)
}
@@ -152,7 +155,7 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
if event == nil {
ginx.NewRender(c).Data(map[string]interface{}{
"event": event,
"result": "event is dropped",
"result": i18n.Sprintf(lang, "event is dropped"),
}, nil)
return
}
@@ -160,7 +163,7 @@ func (rt *Router) tryRunEventPipeline(c *gin.Context) {
m := map[string]interface{}{
"event": event,
"result": "",
"result": i18n.Sprintf(lang, result),
}
ginx.NewRender(c).Data(m, nil)
}
@@ -188,9 +191,10 @@ func (rt *Router) tryRunEventProcessor(c *gin.Context) {
ginx.Bomb(200, "processor err: %+v", err)
}
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": event,
"result": res,
"result": i18n.Sprintf(lang, res),
}, nil)
}
@@ -231,9 +235,10 @@ func (rt *Router) tryRunEventProcessorByNotifyRule(c *gin.Context) {
ginx.Bomb(http.StatusBadRequest, "processor: %+v err: %+v", p, err)
}
if event == nil {
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": event,
"result": "event is dropped",
"result": i18n.Sprintf(lang, "event is dropped"),
}, nil)
return
}

View File

@@ -193,10 +193,9 @@ func (rt *Router) eventsMessage(c *gin.Context) {
events[i] = he.ToCur()
}
var defs = []string{
"{{$events := .}}",
"{{$event := index . 0}}",
}
renderData := make(map[string]interface{})
renderData["events"] = events
defs := models.GetDefs(renderData)
ret := make(map[string]string, len(req.Tpl.Content))
for k, v := range req.Tpl.Content {
text := strings.Join(append(defs, v), "")
@@ -207,7 +206,7 @@ func (rt *Router) eventsMessage(c *gin.Context) {
}
var buf bytes.Buffer
err = tpl.Execute(&buf, events)
err = tpl.Execute(&buf, renderData)
if err != nil {
ret[k] = err.Error()
continue

View File

@@ -18,7 +18,9 @@ import (
// Return all, front-end search and paging
func (rt *Router) alertMuteGetsByBG(c *gin.Context) {
bgid := ginx.UrlParamInt64(c, "id")
lst, err := models.AlertMuteGetsByBG(rt.Ctx, bgid)
prods := strings.Fields(ginx.QueryStr(c, "prods", ""))
query := ginx.QueryStr(c, "query", "")
lst, err := models.AlertMuteGets(rt.Ctx, prods, bgid, -1, query)
ginx.NewRender(c).Data(lst, err)
}

View File

@@ -153,16 +153,17 @@ func (rt *Router) notifyTest(c *gin.Context) {
for _, he := range hisEvents {
event := he.ToCur()
event.SetTagsMap()
if dispatch.NotifyRuleApplicable(&f.NotifyConfig, event) {
events = append(events, event)
if err := dispatch.NotifyRuleMatchCheck(&f.NotifyConfig, event); err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
}
}
if len(events) == 0 {
ginx.Bomb(http.StatusBadRequest, "not events applicable")
events = append(events, event)
}
resp, err := SendNotifyChannelMessage(rt.Ctx, rt.UserCache, rt.UserGroupCache, f.NotifyConfig, events)
if resp == "" {
resp = "success"
}
ginx.NewRender(c).Data(resp, err)
}

View File

@@ -148,6 +148,8 @@ func (rt *Router) dsProxy(c *gin.Context) {
if ds.AuthJson.BasicAuthUser != "" {
req.SetBasicAuth(ds.AuthJson.BasicAuthUser, ds.AuthJson.BasicAuthPassword)
} else {
req.Header.Del("Authorization")
}
headerCount := len(ds.HTTPJson.Headers)

View File

@@ -149,6 +149,12 @@ func (rt *Router) recordingRulePutFields(c *gin.Context) {
f.Fields["datasource_queries"] = string(bytes)
}
if datasourceIds, ok := f.Fields["datasource_ids"]; ok {
bytes, err := json.Marshal(datasourceIds)
ginx.Dangerous(err)
f.Fields["datasource_ids"] = string(bytes)
}
for i := 0; i < len(f.Ids); i++ {
ar, err := models.RecordingRuleGetById(rt.Ctx, f.Ids[i])
ginx.Dangerous(err)

View File

@@ -11,6 +11,7 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pushgw/idents"
"github.com/ccfos/nightingale/v6/storage"
"github.com/gin-gonic/gin"
@@ -601,3 +602,10 @@ func (rt *Router) targetsOfHostQuery(c *gin.Context) {
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) targetUpdate(c *gin.Context) {
var f idents.TargetUpdate
ginx.BindJSON(c, &f)
ginx.NewRender(c).Message(rt.IdentSet.UpdateTargets(f.Lst, f.Now))
}

View File

@@ -1,6 +1,7 @@
package router
import (
"fmt"
"net/http"
"strings"
@@ -12,6 +13,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"gorm.io/gorm"
)
func (rt *Router) userBusiGroupsGets(c *gin.Context) {
@@ -252,3 +254,200 @@ func (rt *Router) installDateGet(c *gin.Context) {
ginx.NewRender(c).Data(rootUser.CreateAt, nil)
}
// usersPhoneEncrypt 统一手机号加密
func (rt *Router) usersPhoneEncrypt(c *gin.Context) {
users, err := models.UserGetAll(rt.Ctx)
if err != nil {
ginx.NewRender(c).Message(fmt.Errorf("get users failed: %v", err))
return
}
// 获取RSA密钥
_, publicKey, _, err := models.GetRSAKeys(rt.Ctx)
if err != nil {
ginx.NewRender(c).Message(fmt.Errorf("get RSA keys failed: %v", err))
return
}
// 先启用手机号加密功能
err = models.SetPhoneEncryptionEnabled(rt.Ctx, true)
if err != nil {
ginx.NewRender(c).Message(fmt.Errorf("enable phone encryption failed: %v", err))
return
}
// 刷新配置缓存
err = models.RefreshPhoneEncryptionCache(rt.Ctx)
if err != nil {
logger.Errorf("Failed to refresh phone encryption cache: %v", err)
// 回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, false)
ginx.NewRender(c).Message(fmt.Errorf("refresh cache failed: %v", err))
return
}
successCount := 0
failCount := 0
var failedUsers []string
// 使用事务处理所有用户的手机号加密
err = models.DB(rt.Ctx).Transaction(func(tx *gorm.DB) error {
// 对每个用户的手机号进行加密
for _, user := range users {
if user.Phone == "" {
continue
}
if isPhoneEncrypted(user.Phone) {
continue
}
encryptedPhone, err := secu.EncryptValue(user.Phone, publicKey)
if err != nil {
logger.Errorf("Failed to encrypt phone for user %s: %v", user.Username, err)
failCount++
failedUsers = append(failedUsers, user.Username)
continue
}
err = tx.Model(&models.User{}).Where("id = ?", user.Id).Update("phone", encryptedPhone).Error
if err != nil {
logger.Errorf("Failed to update phone for user %s: %v", user.Username, err)
failCount++
failedUsers = append(failedUsers, user.Username)
continue
}
successCount++
logger.Debugf("Successfully encrypted phone for user %s", user.Username)
}
// 如果有失败的用户,回滚事务
if failCount > 0 {
return fmt.Errorf("encrypt failed users: %d, failed users: %v", failCount, failedUsers)
}
return nil
})
if err != nil {
// 加密失败,回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, false)
models.RefreshPhoneEncryptionCache(rt.Ctx)
ginx.NewRender(c).Message(fmt.Errorf("encrypt phone failed: %v", err))
return
}
ginx.NewRender(c).Data(gin.H{
"success_count": successCount,
"fail_count": failCount,
}, nil)
}
// usersPhoneDecrypt 统一手机号解密
func (rt *Router) usersPhoneDecrypt(c *gin.Context) {
// 先关闭手机号加密功能
err := models.SetPhoneEncryptionEnabled(rt.Ctx, false)
if err != nil {
ginx.NewRender(c).Message(fmt.Errorf("disable phone encryption failed: %v", err))
return
}
// 刷新配置缓存
err = models.RefreshPhoneEncryptionCache(rt.Ctx)
if err != nil {
logger.Errorf("Failed to refresh phone encryption cache: %v", err)
// 回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, true)
ginx.NewRender(c).Message(fmt.Errorf("refresh cache failed: %v", err))
return
}
// 获取所有用户(此时加密开关已关闭,直接读取数据库原始数据)
var users []*models.User
err = models.DB(rt.Ctx).Find(&users).Error
if err != nil {
// 回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, true)
models.RefreshPhoneEncryptionCache(rt.Ctx)
ginx.NewRender(c).Message(fmt.Errorf("get users failed: %v", err))
return
}
// 获取RSA密钥
privateKey, _, password, err := models.GetRSAKeys(rt.Ctx)
if err != nil {
// 回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, true)
models.RefreshPhoneEncryptionCache(rt.Ctx)
ginx.NewRender(c).Message(fmt.Errorf("get RSA keys failed: %v", err))
return
}
successCount := 0
failCount := 0
var failedUsers []string
// 使用事务处理所有用户的手机号解密
err = models.DB(rt.Ctx).Transaction(func(tx *gorm.DB) error {
// 对每个用户的手机号进行解密
for _, user := range users {
if user.Phone == "" {
continue
}
// 检查是否是加密的手机号
if !isPhoneEncrypted(user.Phone) {
continue
}
// 对手机号进行解密
decryptedPhone, err := secu.Decrypt(user.Phone, privateKey, password)
if err != nil {
logger.Errorf("Failed to decrypt phone for user %s: %v", user.Username, err)
failCount++
failedUsers = append(failedUsers, user.Username)
continue
}
// 直接更新数据库中的手机号字段绕过GORM钩子
err = tx.Model(&models.User{}).Where("id = ?", user.Id).Update("phone", decryptedPhone).Error
if err != nil {
logger.Errorf("Failed to update phone for user %s: %v", user.Username, err)
failCount++
failedUsers = append(failedUsers, user.Username)
continue
}
successCount++
logger.Debugf("Successfully decrypted phone for user %s", user.Username)
}
// 如果有失败的用户,回滚事务
if failCount > 0 {
return fmt.Errorf("decrypt failed users: %d, failed users: %v", failCount, failedUsers)
}
return nil
})
if err != nil {
// 解密失败,回滚配置
models.SetPhoneEncryptionEnabled(rt.Ctx, true)
models.RefreshPhoneEncryptionCache(rt.Ctx)
ginx.NewRender(c).Message(fmt.Errorf("decrypt phone failed: %v", err))
return
}
ginx.NewRender(c).Data(gin.H{
"success_count": successCount,
"fail_count": failCount,
}, nil)
}
// isPhoneEncrypted 检查手机号是否已经加密
func isPhoneEncrypted(phone string) bool {
// 检查是否有 "enc:" 前缀标记
return len(phone) > 4 && phone[:4] == "enc:"
}

View File

@@ -10,12 +10,20 @@ import (
"github.com/araddon/dateparse"
"github.com/bitly/go-simplejson"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/mitchellh/mapstructure"
"github.com/olivere/elastic/v7"
"github.com/prometheus/common/model"
"github.com/toolkits/pkg/logger"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
)
type FixedField string
const (
FieldIndex FixedField = "_index"
FieldId FixedField = "_id"
)
type Query struct {
@@ -37,6 +45,18 @@ type Query struct {
Timeout int `json:"timeout" mapstructure:"timeout"`
MaxShard int `json:"max_shard" mapstructure:"max_shard"`
SearchAfter *SearchAfter `json:"search_after" mapstructure:"search_after"`
}
type SortField struct {
Field string `json:"field" mapstructure:"field"`
Ascending bool `json:"ascending" mapstructure:"ascending"`
}
type SearchAfter struct {
SortFields []SortField `json:"sort_fields" mapstructure:"sort_fields"` // 指定排序字段, 一般是timestamp:desc, _index:asc, _id:asc 三者组合,构成唯一的排序字段
SearchAfter []interface{} `json:"search_after" mapstructure:"search_after"` // 指定排序字段的搜索值搜索值必须和sort_fields的顺序一致为上一次查询的最后一条日志的值
}
type MetricAggr struct {
@@ -271,7 +291,10 @@ func MakeLogQuery(ctx context.Context, query interface{}, eventTags []string, st
}
for i := 0; i < len(eventTags); i++ {
eventTags[i] = strings.Replace(eventTags[i], "=", ":", 1)
arr := strings.SplitN(eventTags[i], "=", 2)
if len(arr) == 2 {
eventTags[i] = fmt.Sprintf("%s:%s", arr[0], strconv.Quote(arr[1]))
}
}
if len(eventTags) > 0 {
@@ -295,7 +318,10 @@ func MakeTSQuery(ctx context.Context, query interface{}, eventTags []string, sta
}
for i := 0; i < len(eventTags); i++ {
eventTags[i] = strings.Replace(eventTags[i], "=", ":", 1)
arr := strings.SplitN(eventTags[i], "=", 2)
if len(arr) == 2 {
eventTags[i] = fmt.Sprintf("%s:%s", arr[0], strconv.Quote(arr[1]))
}
}
if len(eventTags) > 0 {
@@ -605,14 +631,27 @@ func QueryLog(ctx context.Context, queryParam interface{}, timeout int64, versio
if param.MaxShard < 1 {
param.MaxShard = maxShard
}
// from+size 分页方式获取日志受es 的max_result_window参数限制默认最多返回1w条日志, 可以使用search_after方式获取更多日志
source := elastic.NewSearchSource().
TrackTotalHits(true).
Query(queryString).
From(param.P).
Size(param.Limit).
Sort(param.DateField, param.Ascending)
Size(param.Limit)
// 是否使用search_after方式
if param.SearchAfter != nil {
// 设置默认排序字段
if len(param.SearchAfter.SortFields) == 0 {
source = source.Sort(param.DateField, param.Ascending).Sort(string(FieldIndex), true).Sort(string(FieldId), true)
} else {
for _, field := range param.SearchAfter.SortFields {
source = source.Sort(field.Field, field.Ascending)
}
}
if len(param.SearchAfter.SearchAfter) > 0 {
source = source.SearchAfter(param.SearchAfter.SearchAfter...)
}
} else {
source = source.From(param.P).Sort(param.DateField, param.Ascending)
}
result, err := search(ctx, indexArr, source, param.Timeout, param.MaxShard)
if err != nil {
logger.Warningf("query data error:%v", err)

View File

@@ -8,6 +8,7 @@ import (
"github.com/ccfos/nightingale/v6/datasource"
"github.com/ccfos/nightingale/v6/dskit/doris"
"github.com/ccfos/nightingale/v6/dskit/types"
"github.com/ccfos/nightingale/v6/pkg/macros"
"github.com/ccfos/nightingale/v6/models"
"github.com/mitchellh/mapstructure"
@@ -27,11 +28,16 @@ type Doris struct {
}
type QueryParam struct {
Ref string `json:"ref" mapstructure:"ref"`
Database string `json:"database" mapstructure:"database"`
Table string `json:"table" mapstructure:"table"`
SQL string `json:"sql" mapstructure:"sql"`
Keys datasource.Keys `json:"keys" mapstructure:"keys"`
Ref string `json:"ref" mapstructure:"ref"`
Database string `json:"database" mapstructure:"database"`
Table string `json:"table" mapstructure:"table"`
SQL string `json:"sql" mapstructure:"sql"`
Keys datasource.Keys `json:"keys" mapstructure:"keys"`
Limit int `json:"limit" mapstructure:"limit"`
From int64 `json:"from" mapstructure:"from"`
To int64 `json:"to" mapstructure:"to"`
TimeField string `json:"time_field" mapstructure:"time_field"`
TimeFormat string `json:"time_format" mapstructure:"time_format"`
}
func (d *Doris) InitClient() error {
@@ -66,7 +72,7 @@ func (d *Doris) Validate(ctx context.Context) error {
func (d *Doris) Equal(p datasource.Datasource) bool {
newest, ok := p.(*Doris)
if !ok {
logger.Errorf("unexpected plugin type, expected is ck")
logger.Errorf("unexpected plugin type, expected is doris")
return false
}
@@ -174,6 +180,14 @@ func (d *Doris) QueryLog(ctx context.Context, query interface{}) ([]interface{},
return nil, 0, err
}
if strings.Contains(dorisQueryParam.SQL, "$__") {
var err error
dorisQueryParam.SQL, err = macros.Macro(dorisQueryParam.SQL, dorisQueryParam.From, dorisQueryParam.To)
if err != nil {
return nil, 0, err
}
}
items, err := d.QueryLogs(ctx, &doris.QueryParam{
Database: dorisQueryParam.Database,
Sql: dorisQueryParam.SQL,
@@ -187,7 +201,7 @@ func (d *Doris) QueryLog(ctx context.Context, query interface{}) ([]interface{},
logs = append(logs, items[i])
}
return logs, 0, nil
return logs, int64(len(logs)), nil
}
func (d *Doris) DescribeTable(ctx context.Context, query interface{}) ([]*types.ColumnProperty, error) {

View File

@@ -100,7 +100,8 @@ func (os *OpenSearch) InitClient() error {
Header: headers,
}
if os.Basic.Enable && os.Basic.Username != "" {
// 只要有用户名就添加认证,不依赖 Enable 字段
if os.Basic.Username != "" {
options.Username = os.Basic.Username
options.Password = os.Basic.Password
}
@@ -154,8 +155,9 @@ func (os *OpenSearch) Validate(ctx context.Context) (err error) {
}
}
if os.Basic.Enable && (len(os.Basic.Username) == 0 || len(os.Basic.Password) == 0) {
return fmt.Errorf("need a valid user, password")
// 如果提供了用户名,必须同时提供密码
if len(os.Basic.Username) > 0 && len(os.Basic.Password) == 0 {
return fmt.Errorf("password is required when username is provided")
}
if os.MaxShard == 0 {

View File

@@ -23,7 +23,7 @@ const (
)
var (
regx = "(?i)from\\s+([a-zA-Z0-9_]+)\\.([a-zA-Z0-9_]+)\\.([a-zA-Z0-9_]+)"
regx = `(?i)from\s+((?:"[^"]+"|[a-zA-Z0-9_]+))\.((?:"[^"]+"|[a-zA-Z0-9_]+))\.((?:"[^"]+"|[a-zA-Z0-9_]+))`
)
func init() {
@@ -162,6 +162,7 @@ func (p *PostgreSQL) QueryData(ctx context.Context, query interface{}) ([]models
return nil, err
}
postgresqlQueryParam.SQL = formatSQLDatabaseNameWithRegex(postgresqlQueryParam.SQL)
if strings.Contains(postgresqlQueryParam.SQL, "$__") {
var err error
postgresqlQueryParam.SQL, err = macros.Macro(postgresqlQueryParam.SQL, postgresqlQueryParam.From, postgresqlQueryParam.To)
@@ -229,6 +230,7 @@ func (p *PostgreSQL) QueryLog(ctx context.Context, query interface{}) ([]interfa
p.Shards[0].DB = db
}
postgresqlQueryParam.SQL = formatSQLDatabaseNameWithRegex(postgresqlQueryParam.SQL)
if strings.Contains(postgresqlQueryParam.SQL, "$__") {
var err error
postgresqlQueryParam.SQL, err = macros.Macro(postgresqlQueryParam.SQL, postgresqlQueryParam.From, postgresqlQueryParam.To)
@@ -280,7 +282,17 @@ func parseDBName(sql string) (db string, err error) {
if len(matches) != 4 {
return "", fmt.Errorf("no valid table name in format database.schema.table found")
}
return matches[1], nil
return strings.Trim(matches[1], `"`), nil
}
// formatSQLDatabaseNameWithRegex 只对 dbname.scheme.tabname 格式进行数据库名称格式化,转为 "dbname".scheme.tabname
// 在pgsql中大小写是通过"" 双引号括起来区分的,默认pg都是转为小写的所以这里转为 "dbname".scheme."tabname"
func formatSQLDatabaseNameWithRegex(sql string) string {
// 匹配 from dbname.scheme.table_name 的模式
// 使用捕获组来精确匹配数据库名称确保后面跟着scheme和table
re := regexp.MustCompile(`(?i)\bfrom\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\.\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\.\s*([a-zA-Z_][a-zA-Z0-9_]*)`)
return re.ReplaceAllString(sql, `from "$1"."$2"."$3"`)
}
func extractColumns(sql string) ([]string, error) {

View File

@@ -956,7 +956,7 @@ CREATE TABLE notify_rule (
id bigserial PRIMARY KEY,
name varchar(255) NOT NULL,
description text,
enable smallint NOT NULL DEFAULT 0,
enable boolean DEFAULT false,
user_group_ids varchar(255) NOT NULL DEFAULT '',
notify_configs text,
pipeline_configs text,
@@ -971,7 +971,7 @@ CREATE TABLE notify_channel (
name varchar(255) NOT NULL,
ident varchar(255) NOT NULL,
description text,
enable smallint NOT NULL DEFAULT 0,
enable boolean DEFAULT false,
param_config text,
request_type varchar(50) NOT NULL,
request_config text,

View File

@@ -723,7 +723,6 @@ CREATE TABLE `builtin_metrics` (
`updated_by` varchar(191) NOT NULL DEFAULT '' COMMENT '''updater''',
`uuid` bigint NOT NULL DEFAULT 0 COMMENT '''uuid''',
PRIMARY KEY (`id`),
UNIQUE KEY `idx_collector_typ_name` (`lang`,`collector`, `typ`, `name`),
INDEX `idx_uuid` (`uuid`),
INDEX `idx_collector` (`collector`),
INDEX `idx_typ` (`typ`),
@@ -837,8 +836,8 @@ CREATE TABLE `event_pipeline` (
`description` varchar(255) not null default '',
`filter_enable` tinyint(1) not null default 0,
`label_filters` text,
`attribute_filters` text,
`processors` text,
`attr_filters` text,
`processor_configs` text,
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,

View File

@@ -13,7 +13,6 @@ CREATE TABLE `builtin_metrics` (
`updated_at` bigint NOT NULL DEFAULT 0 COMMENT 'update time',
`updated_by` varchar(191) NOT NULL DEFAULT '' COMMENT 'updater',
PRIMARY KEY (`id`),
UNIQUE KEY `idx_collector_typ_name` (`lang`,`collector`, `typ`, `name`),
INDEX `idx_collector` (`collector`),
INDEX `idx_typ` (`typ`),
INDEX `idx_name` (`name`),
@@ -236,9 +235,8 @@ CREATE TABLE `event_pipeline` (
`team_ids` text,
`description` varchar(255) not null default '',
`filter_enable` tinyint(1) not null default 0,
`label_filters` text,
`attribute_filters` text,
`processors` text,
`attr_filters` text,
`processor_configs` text,
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
@@ -246,7 +244,21 @@ CREATE TABLE `event_pipeline` (
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
/* v8.0.0-next */
/* v8.0.0 2025-05-15 */
CREATE TABLE `embedded_product` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(255) DEFAULT NULL,
`url` varchar(255) DEFAULT NULL,
`is_private` boolean DEFAULT NULL,
`team_ids` varchar(255),
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
`update_by` varchar(64) not null default '',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
/* v8.0.0 2025-05-29 */
CREATE TABLE `source_token` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT,
`source_type` varchar(64) NOT NULL DEFAULT '' COMMENT 'source type',
@@ -259,6 +271,15 @@ CREATE TABLE `source_token` (
KEY `idx_source_type_id_token` (`source_type`, `source_id`, `token`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
/* Add translation column for builtin metrics */
ALTER TABLE `builtin_metrics` ADD COLUMN `translation` TEXT COMMENT 'translation of metric' AFTER `lang`;
/* v8.0.0-beta.12 2025-06-03 */
ALTER TABLE `alert_his_event` ADD COLUMN `notify_rule_ids` text COMMENT 'notify rule ids';
ALTER TABLE `alert_cur_event` ADD COLUMN `notify_rule_ids` text COMMENT 'notify rule ids';
/* v8.0.0-beta.13 */
-- 删除 builtin_metrics 表的 idx_collector_typ_name 唯一索引
DROP INDEX IF EXISTS `idx_collector_typ_name` ON `builtin_metrics`;

View File

@@ -656,7 +656,6 @@ CREATE TABLE `builtin_metrics` (
`uuid integer` not null default 0
);
CREATE UNIQUE INDEX idx_collector_typ_name ON builtin_metrics (lang, collector, typ, name);
CREATE INDEX idx_collector ON builtin_metrics (collector);
CREATE INDEX idx_typ ON builtin_metrics (typ);
CREATE INDEX idx_builtinmetric_name ON builtin_metrics (name);

View File

@@ -22,6 +22,8 @@ import (
var FromAPIHook func()
var DatasourceProcessHook func(items []datasource.DatasourceInfo) []datasource.DatasourceInfo
func Init(ctx *ctx.Context, fromAPI bool) {
go getDatasourcesFromDBLoop(ctx, fromAPI)
}
@@ -100,6 +102,10 @@ func getDatasourcesFromDBLoop(ctx *ctx.Context, fromAPI bool) {
atomic.StoreInt64(&PromDefaultDatasourceId, 0)
}
if DatasourceProcessHook != nil {
dss = DatasourceProcessHook(dss)
}
PutDatasources(dss)
} else {
FromAPIHook()
@@ -163,7 +169,7 @@ func PutDatasources(items []datasource.DatasourceInfo) {
ds, err := datasource.GetDatasourceByType(typ, item.Settings)
if err != nil {
logger.Warningf("get plugin:%+v fail: %v", item, err)
logger.Debugf("get plugin:%+v fail: %v", item, err)
continue
}

View File

@@ -129,9 +129,7 @@ func (c *Clickhouse) QueryRows(ctx context.Context, query string) (*sql.Rows, er
// ShowDatabases lists all databases in Clickhouse
func (c *Clickhouse) ShowDatabases(ctx context.Context) ([]string, error) {
var (
res []string
)
res := make([]string, 0)
rows, err := c.QueryRows(ctx, ShowDatabases)
if err != nil {
@@ -151,9 +149,7 @@ func (c *Clickhouse) ShowDatabases(ctx context.Context) ([]string, error) {
// ShowTables lists all tables in a given database
func (c *Clickhouse) ShowTables(ctx context.Context, database string) ([]string, error) {
var (
res []string
)
res := make([]string, 0)
showTables := fmt.Sprintf(ShowTables, database)
rows, err := c.QueryRows(ctx, showTables)

View File

@@ -20,8 +20,8 @@ import (
// Doris struct to hold connection details and the connection object
type Doris struct {
Addr string `json:"doris.addr" mapstructure:"doris.addr"` // be node
FeAddr string `json:"doris.fe_addr" mapstructure:"doris.fe_addr"` // fe node
Addr string `json:"doris.addr" mapstructure:"doris.addr"` // fe mysql endpoint
FeAddr string `json:"doris.fe_addr" mapstructure:"doris.fe_addr"` // fe http endpoint
User string `json:"doris.user" mapstructure:"doris.user"` //
Password string `json:"doris.password" mapstructure:"doris.password"` //
Timeout int `json:"doris.timeout" mapstructure:"doris.timeout"`
@@ -138,7 +138,7 @@ func (d *Doris) ShowDatabases(ctx context.Context) ([]string, error) {
}
defer rows.Close()
var databases []string
databases := make([]string, 0)
for rows.Next() {
var dbName string
if err := rows.Scan(&dbName); err != nil {
@@ -201,7 +201,7 @@ func (d *Doris) ShowResources(ctx context.Context, resourceType string) ([]strin
}
// 将 map 转换为切片
var resources []string
resources := make([]string, 0)
for name := range distinctName {
resources = append(resources, name)
}
@@ -226,7 +226,7 @@ func (d *Doris) ShowTables(ctx context.Context, database string) ([]string, erro
}
defer rows.Close()
var tables []string
tables := make([]string, 0)
for rows.Next() {
var tableName string
if err := rows.Scan(&tableName); err != nil {

View File

@@ -115,14 +115,14 @@ func (m *MySQL) NewConn(ctx context.Context, database string) (*gorm.DB, error)
}()
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True", shard.User, shard.Password, shard.Addr, database)
return sqlbase.NewDB(
db, err = sqlbase.NewDB(
ctx,
mysql.Open(dsn),
shard.MaxIdleConns,
shard.MaxOpenConns,
time.Duration(shard.ConnMaxLifetime)*time.Second,
)
return db, err
}
func (m *MySQL) ShowDatabases(ctx context.Context) ([]string, error) {

View File

@@ -48,7 +48,7 @@ func CloseDB(db *gorm.DB) error {
// ShowTables retrieves a list of all tables in the specified database
func ShowTables(ctx context.Context, db *gorm.DB, query string) ([]string, error) {
var tables []string
tables := make([]string, 0)
rows, err := db.WithContext(ctx).Raw(query).Rows()
if err != nil {

View File

@@ -122,7 +122,7 @@ func (tc *Tdengine) QueryTable(query string) (APIResponse, error) {
}
func (tc *Tdengine) ShowDatabases(context.Context) ([]string, error) {
var databases []string
databases := make([]string, 0)
data, err := tc.QueryTable("show databases")
if err != nil {
return databases, err
@@ -135,7 +135,7 @@ func (tc *Tdengine) ShowDatabases(context.Context) ([]string, error) {
}
func (tc *Tdengine) ShowTables(ctx context.Context, database string) ([]string, error) {
var tables []string
tables := make([]string, 0)
sql := fmt.Sprintf("show %s", database)
data, err := tc.QueryTable(sql)
if err != nil {

View File

@@ -68,6 +68,9 @@ Enable = false
HeaderUserNameKey = "X-User-Name"
DefaultRoles = ["Standard"]
[HTTP.TokenAuth]
Enable = true
[HTTP.RSA]
# open RSA
OpenRSA = false

8
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/ccfos/nightingale/v6
go 1.22
go 1.23.0
require (
github.com/BurntSushi/toml v1.4.0
@@ -13,7 +13,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/expr-lang/expr v1.16.1
github.com/flashcatcloud/ibex v1.3.5
github.com/flashcatcloud/ibex v1.3.6
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/glebarez/sqlite v1.11.0
@@ -47,7 +47,7 @@ require (
github.com/tidwall/gjson v1.14.2
github.com/toolkits/pkg v1.3.8
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/oauth2 v0.23.0
golang.org/x/oauth2 v0.27.0
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gopkg.in/yaml.v2 v2.4.0
gorm.io/driver/clickhouse v0.6.1
@@ -160,3 +160,5 @@ require (
)
replace golang.org/x/exp v0.0.0-20231006140011-7918f672742d => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
// replace github.com/flashcatcloud/ibex => ../github.com/flashcatcloud/ibex

8
go.sum
View File

@@ -89,8 +89,8 @@ github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/flashcatcloud/ibex v1.3.5 h1:8GOOf5+aJT0TP/MC6izz7CO5JKJSdKVFBwL0vQp93Nc=
github.com/flashcatcloud/ibex v1.3.5/go.mod h1:T8hbMUySK2q6cXUaYp0AUVeKkU9Od2LjzwmB5lmTRBM=
github.com/flashcatcloud/ibex v1.3.6 h1:lJShPFxcZksmkB0w99a3uROGB+Fie1NsqOlkAdar12A=
github.com/flashcatcloud/ibex v1.3.6/go.mod h1:iTU1dKT9TnDNllRPRHUOjXe+HDTQkPH2TeaucHtSuh4=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
@@ -416,8 +416,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse HTTP 连接数",
"note": "通过HTTP协议连接到ClickHouse服务器的客户端数量。"
},
{
"lang": "en_US",
"name": "ClickHouse HTTP Connections",
"note": "The number of clients connected to the ClickHouse server via the HTTP protocol."
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse INSERT查询平均时间",
"note": "插入查询执行的平均时间(微秒)。"
},
{
"lang": "en_US",
"name": "ClickHouse INSERT query average time",
"note": "The average time in microseconds for the insertion query to execute."
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse SELECT 查询数",
"note": "执行的选择SELECT查询的数量"
},
{
"lang": "en_US",
"name": "ClickHouse SELECT Query Number",
"note": "Number of SELECT queries executed"
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse SELECT查询平均时间",
"note": "选择查询执行的平均时间(微秒)。"
},
{
"lang": "en_US",
"name": "ClickHouse SELECT query average time",
"note": "Select the average time (microseconds) for query execution."
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse TCP 连接数",
"note": "通过TCP协议连接到ClickHouse服务器的客户端数量。"
},
{
"lang": "en_US",
"name": "ClickHouse TCP Connections",
"note": "The number of clients connected to the ClickHouse server via the TCP protocol."
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 临时数据量",
"note": "临时数据部分的数量,这些部分当前正在生成。"
},
{
"lang": "en_US",
"name": "ClickHouse Temporary Data Volume",
"note": "The number of temporary data sections that are currently being generated."
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 分布式表连接数",
"note": "发送到分布式表的远程服务器的数据连接数。"
},
{
"lang": "en_US",
"name": "ClickHouse Distributed Table Joins",
"note": "The number of data connections sent to the remote server of the distributed table."
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 宽数据量",
"note": "宽数据部分的数量。"
},
{
"lang": "en_US",
"name": "ClickHouse wide data volume",
"note": "Number of wide data sections."
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 待插入分布式表文件数",
"note": "等待异步插入到分布式表的文件数量。"
},
{
"lang": "en_US",
"name": "ClickHouse Number of distributed table files to be inserted",
"note": "The number of files waiting to be inserted asynchronously into the distributed table."
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 提交前数据量",
"note": "提交前的数据部分数量这些部分在data_parts列表中但不用于SELECT查询。"
},
{
"lang": "en_US",
"name": "Data volume before ClickHouse submission",
"note": "The number of data parts before submission, which are in the data _ parts list, but are not used for SELECT queries."
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 提交后数据量",
"note": "提交后的数据部分数量这些部分在data_parts列表中并且用于SELECT查询。"
},
{
"lang": "en_US",
"name": "Data volume after ClickHouse submission",
"note": "The number of submitted data parts, which are in the data _ parts list and used for SELECT queries."
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 插入未压缩",
"note": " 插入操作写入的未压缩字节数。"
},
{
"lang": "en_US",
"name": "ClickHouse Insert Uncompressed",
"note": "The number of uncompressed bytes written by the insert operation."
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 插入行数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of ClickHouse inserted rows",
"note": ""
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 查询优先级",
"note": "由于优先级设置,被停止并等待的查询数量。\n"
},
{
"lang": "en_US",
"name": "ClickHouse Query Priority",
"note": "The number of queries that were stopped and waiting due to the priority setting. \n"
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 查询总数",
"note": "ClickHouse执行的查询总数。"
},
{
"lang": "en_US",
"name": "Total ClickHouse Queries",
"note": "The total number of queries executed by ClickHouse."
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 查询总时间",
"note": "查询执行的总时间(微秒)。"
},
{
"lang": "en_US",
"name": "Total ClickHouse query time",
"note": "The total time in microseconds for the query to execute."
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 正被删除数据量",
"note": "正在被删除的数据部分数量。"
},
{
"lang": "en_US",
"name": "ClickHouse Amount of Data being Deleted",
"note": "The number of data parts being deleted."
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 移动池活动任务数",
"note": "后台移动池中的活动任务数,用于处理数据移动。"
},
{
"lang": "en_US",
"name": "Number of active tasks in ClickHouse mobile pool",
"note": "The number of active tasks in the background move pool, used to handle data moves."
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 紧凑数据量",
"note": "紧凑数据部分的数量。"
},
{
"lang": "en_US",
"name": "ClickHouse Compact Data Volume",
"note": "Number of compact data sections."
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 缓冲区活动任务数",
"note": "后台缓冲区冲洗调度池中的活动任务数,用于定期缓冲区冲洗。"
},
{
"lang": "en_US",
"name": "Number of active tasks in ClickHouse buffer",
"note": "The number of active tasks in the background buffer flushing scheduling pool for periodic buffer flushing."
}
]
},
{
"id": 0,
@@ -312,7 +552,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 跨磁盘量",
"note": "移动到另一个磁盘并应在析构函数中删除的数据部分数量。"
},
{
"lang": "en_US",
"name": "ClickHouse cross-disk volume",
"note": "The number of portions of data that are moved to another disk and should be deleted in the destructor."
}
]
},
{
"id": 0,
@@ -327,7 +579,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse 过时数据量",
"note": " 过时的数据部分数量这些部分不是活动数据部分但当前SELECT查询可能使用它们。"
},
{
"lang": "en_US",
"name": "ClickHouse Obsolete Data Volume",
"note": "The number of obsolete data parts that are not active data parts, but may be used by the current SELECT query."
}
]
},
{
"id": 0,
@@ -342,7 +606,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse中内存使用情况",
"note": "ClickHouse服务器使用的总内存量。"
},
{
"lang": "en_US",
"name": "Memory usage in ClickHouse",
"note": "The total amount of memory used by the ClickHouse server."
}
]
},
{
"id": 0,
@@ -357,7 +633,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse中数据库数量",
"note": "ClickHouse数据库数量"
},
{
"lang": "en_US",
"name": "Number of databases in ClickHouse",
"note": "Number of ClickHouse databases"
}
]
},
{
"id": 0,
@@ -372,7 +660,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse中表的数量",
"note": "ClickHouse表数量"
},
{
"lang": "en_US",
"name": "Number of tables in ClickHouse",
"note": "Number of ClickHouse tables"
}
]
},
{
"id": 0,
@@ -387,7 +687,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse修订",
"note": "ClickHouse服务器的修订号通常是一个用于标识特定构建的数字。"
},
{
"lang": "en_US",
"name": "ClickHouse Revision",
"note": "The revision number of the ClickHouse server, usually a number used to identify a specific build."
}
]
},
{
"id": 0,
@@ -402,7 +714,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse服务器运行时间",
"note": "ClickHouse服务器自启动以来的运行时间。"
},
{
"lang": "en_US",
"name": "ClickHouse server runtime",
"note": "The running time of the ClickHouse server since it started."
}
]
},
{
"id": 0,
@@ -417,6 +741,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "ClickHouse版本号",
"note": "ClickHouse服务器的版本号以整数形式表示。"
},
{
"lang": "en_US",
"name": "ClickHouse version number",
"note": "Version number of the ClickHouse server, expressed as an integer."
}
]
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health delayed unassigned 的分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Cluster Health delayed unassigned shards",
"note": ""
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health Pending task 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Cluster Health Pending tasks quantity",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health relocating 的分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of shards for Cluster Health relocating",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health unassigned 的分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Cluster Health unassigned number of shards",
"note": ""
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 健康度状态码",
"note": "- 1Green绿色状态表示所有分片都正常\n- 2Yellow黄色状态主分片都正常从分片有不正常的\n- 3Red红色状态有些主分片不正常"
},
{
"lang": "en_US",
"name": "Cluster Health health status code",
"note": "-1: Green, Green state, indicating that all shards are normal \n-2: Yellow, Yellow state, the main shard is normal, the slave shard is abnormal \n-3: Red, Red state, some main shards are abnormal"
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 数据节点数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Cluster Health data nodes",
"note": ""
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 正在初始化的分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of shards being initialized by Cluster Health",
"note": ""
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 活跃主分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Cluster Health Number of active primary shards",
"note": ""
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 活跃分片数",
"note": ""
},
{
"lang": "en_US",
"name": "Cluster Health Active Shards",
"note": ""
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Cluster Health 节点数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Cluster Health nodes",
"note": ""
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Indexing 平均耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Indexing average time consumption",
"note": ""
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Merge 平均耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Average time consumed by Merge",
"note": ""
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Query 平均耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Query average time consumption",
"note": ""
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "每秒 indexing 数量",
"note": ""
},
{
"lang": "en_US",
"name": "indexing per second",
"note": ""
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "每秒 merge 大小",
"note": ""
},
{
"lang": "en_US",
"name": "merge size per second",
"note": ""
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "每秒 merge 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of merges per second",
"note": ""
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "每秒删除 doc 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of docs deleted per second",
"note": ""
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Hard Drive Usage",
"note": ""
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网络流量 - 入向每秒流量",
"note": ""
},
{
"lang": "en_US",
"name": "Network traffic-inbound traffic per second",
"note": ""
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网络流量 - 出向每秒流量",
"note": ""
},
{
"lang": "en_US",
"name": "Network traffic-outbound traffic per second",
"note": ""
}
]
},
{
"id": 0,
@@ -312,7 +552,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 CPU 使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Process CPU usage",
"note": ""
}
]
},
{
"id": 0,
@@ -327,7 +579,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 JVM Heap 使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Process JVM Heap Usage",
"note": ""
}
]
},
{
"id": 0,
@@ -342,7 +606,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 JVM Heap 区 committed 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Process JVM Heap area committed size",
"note": ""
}
]
},
{
"id": 0,
@@ -357,7 +633,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 JVM Non Heap 区 committed 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Process JVM Non Heap area committed size",
"note": ""
}
]
},
{
"id": 0,
@@ -372,7 +660,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 JVM Old 内存池 used 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Process JVM Old memory pool used size",
"note": ""
}
]
},
{
"id": 0,
@@ -387,7 +687,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 JVM Young 内存池 used 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Process JVM Young memory pool used size",
"note": ""
}
]
},
{
"id": 0,
@@ -402,7 +714,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程新生代每秒 GC 次数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of GCs per second for the new generation of the process",
"note": ""
}
]
},
{
"id": 0,
@@ -417,7 +741,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程新生代每秒 GC 耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Process new generation time per second GC",
"note": ""
}
]
},
{
"id": 0,
@@ -432,7 +768,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程老生代每秒 GC 次数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of GCs per second of process old generation",
"note": ""
}
]
},
{
"id": 0,
@@ -447,6 +795,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程老生代每秒 GC 耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Process old generation GC time per second",
"note": ""
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "HTTP 探测响应码",
"note": "如果没有拿到 response这个指标就没有值了"
},
{
"lang": "en_US",
"name": "HTTP probe response code",
"note": "If you don't get response, this indicator has no value"
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "HTTP 探测结果状态码",
"note": "0 值表示正常,大于 0 就是异常,各个值的含义如下:\n\n```\nSuccess = 0\nConnectionFailed = 1\nTimeout = 2\nDNSError = 3\nAddressError = 4\nBodyMismatch = 5\nCodeMismatch = 6\n```"
},
{
"lang": "en_US",
"name": "HTTP probe result status code",
"note": "A value of 0 means normal, and a value greater than 0 means abnormal. The meanings of each value are as follows: \n \n``` \nSuccess = 0 \nConnectionFailed = 1 \nTimeout = 2 \nDNSError = 3 \nAddressError = 4 \nBodyMismatch = 5 \nCodeMismatch = 6 \n```"
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "HTTP 探测耗时",
"note": ""
},
{
"lang": "en_US",
"name": "HTTP probe time-consuming",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "HTTP 证书过期时间",
"note": ""
},
{
"lang": "en_US",
"name": "HTTP certificate expiration time",
"note": ""
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - DNS 请求耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-DNS request time-consuming",
"note": ""
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - TCP建连耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-TCP connection establishment time",
"note": ""
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - TLS握手耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-TLS handshake time-consuming",
"note": ""
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - 探测结果状态码",
"note": "探测结果0 是正常,其他数字有不同含义\n- 0成功\n- 1连接失败\n- 2监测超时\n- 3DNS解析失败\n- 4地址格式错误\n- 5返回内容不匹配\n- 6返回码不匹配\n- 其他数字为未知错误"
},
{
"lang": "en_US",
"name": "Dial test-detection result status code",
"note": "Detection result, 0 is normal, other numbers have different meanings \n-0: Success \n-1: Connection failed \n-2: Monitoring timeout \n-3: DNS resolution failed \n-4: Address format is wrong \n-5: Return content does not match \n-6: Return code mismatch \n-Other numbers are unknown error"
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - 整体耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-overall time-consuming",
"note": ""
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - 返回状态码",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-Return status code",
"note": ""
}
]
},
{
"id": 0,
@@ -162,6 +282,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "拨测 - 首包耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Dial test-first package time-consuming",
"note": ""
}
]
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Broker 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Brokers",
"note": ""
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Partition 副本不同步的数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of out-of-sync copies of Partition",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Partition 副本数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Partition copies",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "各个 Topic 每秒消费消息量",
"note": ""
},
{
"lang": "en_US",
"name": "Each Topic consumes messages per second",
"note": ""
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "各个 Topic 每秒生产消息量",
"note": ""
},
{
"lang": "en_US",
"name": "Production message volume per second per Topic",
"note": ""
}
]
},
{
"id": 0,
@@ -87,6 +147,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "各个 Topic 的 Partition 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of Partitions for each Topic",
"note": ""
}
]
}
]

File diff suppressed because it is too large Load Diff

View File

@@ -1,282 +1,618 @@
[
{
"uuid": 1745893024149445000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "Inode数量",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_inodes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024149445000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "Inode数量",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_inodes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "Inode数量",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Number of Inodes",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024121015300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "不可中断任务数量",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_tasks_state{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\", state=\"uninterruptible\"}) by (name)"
"uuid": 1745893024121015300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "不可中断任务数量",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_tasks_state{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\", state=\"uninterruptible\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "不可中断任务数量",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Number of uninterruptible tasks",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024130551800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器cache使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_cache{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))"
"uuid": 1745893024130551800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器cache使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_cache{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))",
"translation": [
{
"lang": "zh_CN",
"name": "容器cache使用",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container cache use",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024108569900,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU Limit",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\",",
"lang": "zh_CN",
"expression": "(sum(container_spec_cpu_quota{namespace=\"$namespace\", pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\", pod=~\"$pod_name\"}) by (name))"
"uuid": 1745893024108569900,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU Limit",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\",",
"lang": "zh_CN",
"expression": "(sum(container_spec_cpu_quota{namespace=\"$namespace\", pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\", pod=~\"$pod_name\"}) by (name))",
"translation": [
{
"lang": "zh_CN",
"name": "容器CPU Limit",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\","
},
{
"lang": "en_US",
"name": "Container CPU Limit",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"}/container _ spec _ cpu _ period {namespace = \"$namespace\","
}
]
},
{
"uuid": 1745893024112672500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU load 10",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_cpu_load_average_10s{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024112672500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU load 10",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_cpu_load_average_10s{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器CPU load 10",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container CPU load 10",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024026246700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)"
"uuid": 1745893024026246700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器CPU使用率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container CPU usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024029544000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU归一化后使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)/((sum(container_spec_cpu_quota{namespace=\"$namespace\", pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\", pod=~\"$pod_name\"}) by (name)))"
"uuid": 1745893024029544000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器CPU归一化后使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)/((sum(container_spec_cpu_quota{namespace=\"$namespace\", pod=~\"$pod_name\"}/container_spec_cpu_period{namespace=\"$namespace\", pod=~\"$pod_name\"}) by (name)))",
"translation": [
{
"lang": "zh_CN",
"name": "容器CPU归一化后使用率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container CPU usage after normalization",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024146207700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器I/O",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_io_current{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024146207700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器I/O",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_io_current{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器I/O",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container I/O",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024136457000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器RSS内存使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_rss{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))"
"uuid": 1745893024136457000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器RSS内存使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_rss{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))",
"translation": [
{
"lang": "zh_CN",
"name": "容器RSS内存使用",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container RSS memory usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024139900200,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存 Limit",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_spec_memory_limit_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024139900200,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存 Limit",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_spec_memory_limit_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器内存 Limit",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container Memory Limit",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024032984300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))"
"uuid": 1745893024032984300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存使用",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "(sum(container_memory_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name))",
"translation": [
{
"lang": "zh_CN",
"name": "容器内存使用",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container memory usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024127585500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "((sum(container_memory_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)) /(sum(container_spec_memory_limit_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)))*100"
"uuid": 1745893024127585500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内存使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "((sum(container_memory_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)) /(sum(container_spec_memory_limit_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)))*100",
"translation": [
{
"lang": "zh_CN",
"name": "容器内存使用率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container memory usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024093620000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内核态CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_system_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)"
"uuid": 1745893024093620000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器内核态CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_system_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器内核态CPU使用率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container kernel mode CPU usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024102879000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器发生CPU throttle的比率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m]))by(name) *100"
"uuid": 1745893024102879000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器发生CPU throttle的比率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m]))by(name) *100",
"translation": [
{
"lang": "zh_CN",
"name": "容器发生CPU throttle的比率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "The rate at which container CPU throttle occurs",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024143177000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器发生OOM次数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_oom_events_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024143177000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器发生OOM次数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_oom_events_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器发生OOM次数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Number of OOM occurrences for container",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024083942000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器启动时长(小时)",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum((time()-container_start_time_seconds{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"})) by (name)"
"uuid": 1745893024083942000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器启动时长(小时)",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum((time()-container_start_time_seconds{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"})) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器启动时长(小时)",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container startup time (hours)",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024152466200,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器已使用的文件系统大小",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)"
"uuid": 1745893024152466200,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器已使用的文件系统大小",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(container_fs_usage_bytes{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}) by (name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器已使用的文件系统大小",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "File system size used by the container",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024097849600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器用户态CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_user_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)"
"uuid": 1745893024097849600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "容器用户态CPU使用率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_cpu_user_seconds_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])*100) by(name)",
"translation": [
{
"lang": "zh_CN",
"name": "容器用户态CPU使用率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "Container user mode CPU usage",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024036896800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "文件系统写入速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_fs_writes_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])) by(name)"
"uuid": 1745893024036896800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "文件系统写入速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_fs_writes_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])) by(name)",
"translation": [
{
"lang": "zh_CN",
"name": "文件系统写入速率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "File system write rate",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024057722000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "文件系统读取速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_fs_reads_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])) by(name)"
"uuid": 1745893024057722000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "文件系统读取速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\",",
"lang": "zh_CN",
"expression": "sum(rate(container_fs_reads_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\", image!~\".*pause.*\"}[1m])) by(name)",
"translation": [
{
"lang": "zh_CN",
"name": "文件系统读取速率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\","
},
{
"lang": "en_US",
"name": "File system read rate",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\","
}
]
},
{
"uuid": 1745893024166898000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送丢包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_packets_dropped_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024166898000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送丢包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_packets_dropped_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络发送丢包数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Number of packets lost by network transmission",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024160266500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送数据包",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_packets_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024160266500,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送数据包",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_packets_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络发送数据包",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "The network sends packets",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024069935000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024069935000,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络发送速率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Network transmission rate",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024163721700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送错误数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_errors_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024163721700,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络发送错误数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_transmit_errors_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络发送错误数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Number of network transmission errors",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024173485600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收丢包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_packets_dropped_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024173485600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收丢包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_packets_dropped_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络接收丢包数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Number of packet losses received by network",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024156389600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收数据包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_packets_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024156389600,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收数据包数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_packets_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络接收数据包数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Number of packets received by network",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024075864800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024075864800,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收速率",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_bytes_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络接收速率",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Network reception rate",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
},
{
"uuid": 1745893024170233300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收错误数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_errors_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)"
"uuid": 1745893024170233300,
"collector": "Pod",
"typ": "Kubernetes",
"name": "网络接收错误数",
"unit": "",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))",
"lang": "zh_CN",
"expression": "sum(rate(container_network_receive_errors_total{namespace=\"$namespace\", pod=~\"$pod_name\"}[1m])) by(name, interface)",
"translation": [
{
"lang": "zh_CN",
"name": "网络接收错误数",
"note": "Pod自身指标\n类型: pod=~\"$pod_name\"}[1m]))"
},
{
"lang": "en_US",
"name": "Number of network reception errors",
"note": "Pod's own indicators \nType: pod = ~ \"$pod _ name\"} [1m]))"
}
]
}
]
]

View File

@@ -1,5 +1,5 @@
{
"name": "机器常用指标(使用 Categraf 作为采集器,如果只想看当前业务组内的机器修改大盘变量 ident 的变量类型为机器标识即可)",
"name": "机器常用指标(如果只想看当前业务组内的机器修改大盘变量 ident 的变量类型为机器标识即可)",
"tags": "Categraf",
"ident": "",
"uuid": 1737103014612000,

View File

@@ -0,0 +1,300 @@
{
"name": "Host Table NG",
"tags": "Categraf",
"ident": "",
"uuid": 1756720567064000,
"configs": {
"var": [
{
"name": "prom",
"label": "PROM",
"type": "datasource",
"hide": false,
"definition": "prometheus"
},
{
"name": "ident",
"label": "机器",
"type": "query",
"hide": false,
"datasource": {
"cate": "prometheus",
"value": "${prom}"
},
"definition": "label_values(mem_free, ident)",
"multi": true,
"allOption": true
}
],
"panels": [
{
"type": "tableNG",
"id": "306cab0d-f643-4d86-94d0-248fc05fd8a8",
"layout": {
"h": 10,
"w": 24,
"x": 0,
"y": 0,
"i": "306cab0d-f643-4d86-94d0-248fc05fd8a8",
"isResizable": true
},
"version": "3.1.0",
"datasourceCate": "prometheus",
"datasourceValue": "${prom}",
"targets": [
{
"refId": "A",
"expr": "cpu_usage_active{ident=~\"$ident\"}",
"instant": true
},
{
"expr": "100 - mem_available_percent{ident=~\"$ident\"}",
"__mode__": "__query__",
"refId": "B",
"instant": true
},
{
"expr": "disk_used_percent{path=\"/\", ident=~\"$ident\"}",
"__mode__": "__query__",
"refId": "C",
"instant": true
},
{
"expr": "categraf_info{ident=~\"$ident\"}",
"__mode__": "__query__",
"refId": "D",
"instant": true
}
],
"transformationsNG": [
{
"id": "joinByField",
"options": {
"mode": "outer",
"byField": "ident"
}
},
{
"id": "organize",
"options": {
"fields": [
"ident",
"__time_0",
"__name___0",
"cpu",
"__value_#A",
"__time_1",
"__value_#B",
"__time_2",
"__name___2",
"device",
"fstype",
"mode",
"path",
"__value_#C",
"__time_3",
"__name___3",
"version",
"__value_#D"
],
"renameByName": {
"ident": "机器",
"__value_#A": "CPU利用率%",
"__value_#B": "内存利用率%",
"__value_#C": "根分区利用率%",
"version": "Categraf Version"
},
"excludeByName": {
"__time_0": true,
"__name__": true,
"agent_isp": true,
"agent_region": true,
"cpu": true,
"env": true,
"myenv": true,
"__time_1": true,
"__time_2": true,
"__name___2": true,
"device": true,
"fstype": true,
"mode": true,
"path": true,
"__name___0": true,
"__value_#D": true,
"__time_3": true,
"__name___3": true
},
"indexByName": {
"ident": 0,
"version": 1,
"__time_0": 2,
"__name___0": 3,
"agent_isp": 4,
"agent_region": 5,
"cpu": 6,
"env": 7,
"myenv": 8,
"__value_#A": 9,
"__time_1": 10,
"__value_#B": 11,
"__time_2": 12,
"__name___2": 13,
"device": 14,
"fstype": 15,
"mode": 16,
"path": 17,
"__value_#C": 18,
"__time_3": 19,
"__name___3": 20,
"__value_#D": 21
}
}
}
],
"name": "机器表格样例",
"maxPerRow": 4,
"custom": {
"showHeader": true,
"filterable": true,
"cellOptions": {
"type": "none",
"wrapText": false
}
},
"options": {
"links": [
{
"title": "详情",
"url": "/components/dashboard/detail?__uuid__=1737103014612000&ident=${ident}&prom=${prom}",
"targetBlank": true
}
],
"standardOptions": {
"decimals": 2
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"value": "CPU利用率%"
},
"properties": {
"cellOptions": {
"type": "color-background",
"mode": "lcd",
"valueDisplayMode": "text"
},
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgb(255, 101, 107)",
"value": 85,
"type": ""
},
{
"color": "rgba(236, 210, 69, 1)",
"value": 70,
"type": ""
},
{
"color": "rgb(44, 157, 61)",
"value": null,
"type": "base"
}
]
},
"valueMappings": [],
"standardOptions": {
"util": "percent",
"decimals": 2,
"min": 0,
"max": 100
}
}
},
{
"matcher": {
"id": "byName",
"value": "内存利用率%"
},
"properties": {
"cellOptions": {
"type": "gauge",
"mode": "lcd",
"valueDisplayMode": "text"
},
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgb(255, 101, 107)",
"value": 80,
"type": ""
},
{
"color": "rgba(236, 210, 69, 1)",
"value": 60,
"type": ""
},
{
"color": "rgb(44, 157, 61)",
"value": null,
"type": "base"
}
]
},
"standardOptions": {
"util": "percent",
"decimals": 2,
"min": 0,
"max": 100
}
}
},
{
"matcher": {
"id": "byName",
"value": "根分区利用率%"
},
"properties": {
"cellOptions": {
"type": "gauge",
"mode": "basic",
"valueDisplayMode": "text"
},
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "rgb(255, 101, 107)",
"value": 90,
"type": ""
},
{
"color": "rgba(236, 210, 69, 1)",
"value": 60,
"type": ""
},
{
"color": "rgb(44, 157, 61)",
"value": null,
"type": "base"
}
]
},
"standardOptions": {
"util": "percent",
"decimals": 2,
"min": 0,
"max": 100
}
}
}
]
}
],
"version": "3.1.0"
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU Steal 时间占比(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "CPU Steal time ratio (average of the whole machine)",
"note": ""
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 内核态时间占比(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "CPU core mode time ratio (average of the whole machine)",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 利用率(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "CPU utilization (machine average)",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 用户态时间占比(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "CPU user mode time ratio (average of the whole machine)",
"note": ""
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 硬中断时间占比(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "Proportion of CPU hard interrupt time (average of the whole machine)",
"note": ""
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 空闲率(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "CPU idle rate (overall machine average)",
"note": ""
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "CPU 软中断时间占比(整机平均)",
"note": ""
},
{
"lang": "en_US",
"name": "Proportion of CPU soft interrupt time (average of the whole machine)",
"note": ""
}
]
},
{
"id": 0,
@@ -113,11 +197,23 @@
"unit": "percent",
"note": "交换空间使用率。计算原子取自 `/proc/meminfo`。",
"lang": "zh_CN",
"expression": "(node_memory_SwapTotal_bytes - node_memory_SwapFree_bytes)/node_memory_SwapTotal_bytes * 100 and node_memory_SwapTotal_bytes \u003e 0",
"expression": "(node_memory_SwapTotal_bytes - node_memory_SwapFree_bytes)/node_memory_SwapTotal_bytes * 100 and node_memory_SwapTotal_bytes > 0",
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "交换空间使用率",
"note": "交换空间使用率。计算原子取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Swap space usage",
"note": "Swap space usage. The computational atom is taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "交换空间总量",
"note": "交换空间总量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Total swap space",
"note": "Total amount of swap space. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "交换空间空闲量",
"note": "交换空间空闲量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Swap space free amount",
"note": "Exchange space free amount. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存 Buffered 量",
"note": "用作缓冲区的内存量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Memory Buffered amount",
"note": "The amount of memory used as a buffer. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存 Cached 量",
"note": "用作文件缓存的内存量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Memory Cached amount",
"note": "The amount of memory used as file cache. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存使用率基于MemAvailable",
"note": "内存使用率。基于 MemAvailable 计算更准确,但是老版本的 Linux 不支持。"
},
{
"lang": "en_US",
"name": "Memory usage (based on MemAvailable)",
"note": "Memory usage. Calculation based on MemAvailable is more accurate, but older versions of Linux do not support it."
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存可用量",
"note": "可以立即分配给进程的可用内存量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Memory Availability",
"note": "The amount of available memory that can be immediately allocated to a process. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存总量",
"note": "内存总量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Total memory",
"note": "Total amount of memory. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "内存空闲量",
"note": "未使用的内存量。取自 `/proc/meminfo`。"
},
{
"lang": "en_US",
"name": "Free memory amount",
"note": "Amount of unused memory. Taken from `/proc/meminfo `."
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "文件句柄 - 已分配占比",
"note": ""
},
{
"lang": "en_US",
"name": "File handle-allocated proportion",
"note": ""
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "文件句柄 - 已分配量",
"note": ""
},
{
"lang": "en_US",
"name": "File Handle-Amount Allocated",
"note": ""
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "文件句柄 - 总可分配量",
"note": ""
},
{
"lang": "en_US",
"name": "File handle-total allocable quantity",
"note": ""
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘 IO - 时间维度 Utilization",
"note": "在时间维度统计硬盘 IO 时间占比,比如该值是 50%,表示有 50% 的时间是在处理 IO该值 100%,表示一直在处理 IO但是注意现代磁盘设备具备并行处理多个 I/O 请求的能力,所以即便该值是 100%,可能硬盘还是可以接收新的处理请求。\n\n比如某人有两只手最近 1 分钟一直在用单手劳动,从时间维度来看,利用率是 100%,但即便是 100%,再给他更多的活,他也能干,因为他还有一只手可用。"
},
{
"lang": "en_US",
"name": "Hard Disk IO-Time Dimension Utilization",
"note": "Count the proportion of hard disk IO time in the time dimension. For example, if the value is 50%, it means that 50% of the time is processing IO, and if the value is 100%, it means that IO has been processing all the time. However, note that modern disk devices have the ability to process multiple I/O requests in parallel, so even if the value is 100%, the hard disk may still be able to receive new processing requests. \n \nFor example, someone has two hands and has been working with one hand in the last minute. From the time dimension, the utilization rate is 100%, but even if it is 100%, he can do it if he is given more work, because he still has one hand available."
}
]
},
{
"id": 0,
@@ -312,7 +552,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘 IO - 每秒写入字节数量",
"note": ""
},
{
"lang": "en_US",
"name": "Hard disk IO-bytes written per second",
"note": ""
}
]
},
{
"id": 0,
@@ -327,7 +579,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘 IO - 每秒写次数",
"note": "每秒写次数"
},
{
"lang": "en_US",
"name": "Hard drive IO-writes per second",
"note": "Writes per second"
}
]
},
{
"id": 0,
@@ -342,7 +606,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘 IO - 每秒读取字节数量",
"note": ""
},
{
"lang": "en_US",
"name": "Hard Drive IO-bytes read per second",
"note": ""
}
]
},
{
"id": 0,
@@ -357,7 +633,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘 IO - 每秒读次数",
"note": "每秒读次数"
},
{
"lang": "en_US",
"name": "Hard drive IO-Reads per second",
"note": "Reads per second"
}
]
},
{
"id": 0,
@@ -372,7 +660,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘使用率",
"note": "硬盘空间使用率。"
},
{
"lang": "en_US",
"name": "Hard Drive Usage",
"note": "Hard disk space usage."
}
]
},
{
"id": 0,
@@ -387,7 +687,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘剩余量",
"note": "使用 SI 标准渲染数据,和 df 命令保持一致。"
},
{
"lang": "en_US",
"name": "Remaining hard disk",
"note": "Use the SI standard to render data, consistent with the df command."
}
]
},
{
"id": 0,
@@ -402,7 +714,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘可用量",
"note": "使用 SI 标准渲染数据,和 df 命令保持一致。"
},
{
"lang": "en_US",
"name": "Hard Drive Availability",
"note": "Use the SI standard to render data, consistent with the df command."
}
]
},
{
"id": 0,
@@ -417,7 +741,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "硬盘总量",
"note": "使用 SI 标准渲染数据,和 df 命令保持一致。"
},
{
"lang": "en_US",
"name": "Total hard disk",
"note": "Use the SI standard to render data, consistent with the df command."
}
]
},
{
"id": 0,
@@ -432,7 +768,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统 CPU 核数",
"note": "CPU 逻辑核的数量。"
},
{
"lang": "en_US",
"name": "Number of CPU cores",
"note": "Number of CPU logical cores."
}
]
},
{
"id": 0,
@@ -447,7 +795,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载 - 最近 1 分钟",
"note": "取自 `/proc/loadavg`。"
},
{
"lang": "en_US",
"name": "System load average-last 1 minute",
"note": "Taken from `/proc/loadavg `."
}
]
},
{
"id": 0,
@@ -462,7 +822,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载 - 最近 15 分钟",
"note": "取自 `/proc/loadavg`。"
},
{
"lang": "en_US",
"name": "System load average-last 15 minutes",
"note": "Taken from `/proc/loadavg `."
}
]
},
{
"id": 0,
@@ -477,7 +849,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载 - 最近 5 分钟",
"note": "取自 `/proc/loadavg`。"
},
{
"lang": "en_US",
"name": "System load average-last 5 minutes",
"note": "Taken from `/proc/loadavg `."
}
]
},
{
"id": 0,
@@ -492,7 +876,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载(单核) - 最近 1 分钟",
"note": ""
},
{
"lang": "en_US",
"name": "System Load Average (Single Core)-Last 1 Minute",
"note": ""
}
]
},
{
"id": 0,
@@ -507,7 +903,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载(单核) - 最近 15 分钟",
"note": ""
},
{
"lang": "en_US",
"name": "System Load Average (Single Core)-Last 15 Minutes",
"note": ""
}
]
},
{
"id": 0,
@@ -522,7 +930,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "系统平均负载(单核) - 最近 5 分钟",
"note": ""
},
{
"lang": "en_US",
"name": "System Load Average (Single Core)-Last 5 Minutes",
"note": ""
}
]
},
{
"id": 0,
@@ -537,7 +957,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡入方向(接收)每秒丢弃的数据包个数",
"note": "原始指标 node_network_receive_drop_total 表示操作系统启动之后各个网卡入方向(接收)丢弃的数据包总数。"
},
{
"lang": "en_US",
"name": "Number of packets dropped per second in the incoming direction (receiving) of the network card",
"note": "The original indicator node _ network _ receive _ drop _ total indicates the total number of packets dropped (received) by each network card after the operating system starts."
}
]
},
{
"id": 0,
@@ -552,7 +984,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡入方向(接收)每秒数据包数",
"note": "原始指标 node_network_receive_packets_total 表示操作系统启动之后各个网卡入方向(接收)数据包总数。"
},
{
"lang": "en_US",
"name": "NIC incoming (receiving) packets per second",
"note": "The original indicator node _ network _ receive _ packets _ total indicates the total number of data packets incoming (received) by each network card after the operating system is booted."
}
]
},
{
"id": 0,
@@ -567,7 +1011,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡入方向(接收)每秒错包数",
"note": "原始指标 node_network_receive_errs_total 表示操作系统启动之后各个网卡入方向(接收)错包总数。"
},
{
"lang": "en_US",
"name": "Number of wrong packets per second in the incoming direction (receiving) of the network card",
"note": "The original indicator node _ network _ receive _ errs _ total indicates the total number of error packets incoming (received) by each network card after the operating system is started."
}
]
},
{
"id": 0,
@@ -582,7 +1038,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡出方向(发送)每秒丢弃的数据包个数",
"note": "原始指标 node_network_transmit_drop_total 表示操作系统启动之后各个网卡出方向(发送)丢弃的数据包总数。"
},
{
"lang": "en_US",
"name": "Number of packets discarded per second in the outbound direction (sending) of the network card",
"note": "The original indicator node _ network _ transmit _ drop _ total indicates the total number of packets discarded (sent) by each network card after the operating system starts."
}
]
},
{
"id": 0,
@@ -597,7 +1065,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡出方向(发送)每秒数据包数",
"note": "原始指标 node_network_transmit_packets_total 表示操作系统启动之后各个网卡出方向(发送)数据包总数。"
},
{
"lang": "en_US",
"name": "Number of packets per second in the outgoing direction (sent) of the network card",
"note": "The original indicator node _ network _ transmit _ packets _ total indicates the total number of outbound (sent) data packets from each network card after the operating system is started."
}
]
},
{
"id": 0,
@@ -612,7 +1092,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡出方向(发送)每秒错包数",
"note": "原始指标 node_network_transmit_errs_total 表示操作系统启动之后各个网卡出方向(发送)错包总数。"
},
{
"lang": "en_US",
"name": "Number of wrong packets per second in the outgoing direction (sending) of the network card",
"note": "The original indicator node _ network _ transmit _ errs _ total indicates the total number of error packets sent out (sent) by each network card after the operating system is started."
}
]
},
{
"id": 0,
@@ -627,7 +1119,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡每秒发送的 bit 量",
"note": "原始指标 node_network_transmit_bytes_total 表示操作系统启动之后发送的 byte 总量,因为网卡流量习惯使用 bit 作为单位,所以在表达式中做了换算。"
},
{
"lang": "en_US",
"name": "The amount of bits sent per second by the network card",
"note": "The original indicator node _ network _ transmit _ bytes _ total represents the total number of bytes sent after the operating system starts. Because the network card traffic is used to using bits as a unit, it is converted in the expression."
}
]
},
{
"id": 0,
@@ -642,6 +1146,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "网卡每秒接收的 bit 量",
"note": "原始指标 node_network_receive_bytes_total 表示操作系统启动之后接收的 byte 总量,因为网卡流量习惯使用 bit 作为单位,所以在表达式中做了换算。"
},
{
"lang": "en_US",
"name": "The number of bits received by the network card per second",
"note": "The original indicator node _ network _ received _ bytes _ total represents the total number of bytes received after the operating system is started. Because the network card traffic is used to using bits as a unit, it is converted in the expression."
}
]
}
]

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池 data 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status InnoDB buffer pool data size",
"note": ""
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池 dirty 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status InnoDB buffer pool dirty size",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池 free 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status InnoDB buffer pool free size",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池 page 使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status InnoDB buffer pool page usage",
"note": ""
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池 used 大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status InnoDB Buffer Pool used Size",
"note": ""
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status InnoDB 缓冲池总大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Total InnoDB Buffer Pool Size",
"note": ""
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 启动时长",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Startup Time",
"note": ""
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 当前 running 的 threads 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status The number of threads currently running",
"note": ""
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 当前打开的文件句柄数",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Number of file handles currently open",
"note": ""
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 当前连接数",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Number of current connections",
"note": ""
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 最大曾用连接数",
"note": "曾经达到过的最大连接数"
},
{
"lang": "en_US",
"name": "Global Status Maximum number of connections used",
"note": "Maximum number of connections ever reached"
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒 Command 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Number of Commands per second",
"note": ""
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒 query 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status queries per second",
"note": ""
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒 question 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Questions per second",
"note": ""
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒 slow query 数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status slow queries per second",
"note": ""
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒事务操作数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Number of transactions per second",
"note": ""
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒写操作数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Number of writes per second",
"note": ""
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒发送流量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status sends traffic per second",
"note": ""
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒接收流量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status receives traffic per second",
"note": ""
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 每秒读操作数量",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status Read operations per second",
"note": ""
}
]
},
{
"id": 0,
@@ -312,7 +552,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 近 3 分钟 abort 的客户端",
"note": "原始指标 mysql_global_status_aborted_clients 表示由于客户端未正确关闭连接而终止的连接数Counter 类型,单调递增。"
},
{
"lang": "en_US",
"name": "Global Status nearly 3 minutes abort client",
"note": "The raw metric mysql _ global _ status _ aborted _ clients represents the number of connections terminated because the client did not properly close the connection, Counter type, monotonically increasing."
}
]
},
{
"id": 0,
@@ -327,7 +579,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 近 3 分钟 abort 的连接数",
"note": "原始指标 mysql_global_status_aborted_connects 表示尝试连接到 MySQL 服务器失败的次数Counter 类型,单调递增。"
},
{
"lang": "en_US",
"name": "Global Status Number of connections in last 3 minutes abort",
"note": "The raw metric MySQL _ global _ status _ aborted _ connects represents the number of failed attempts to connect to a MySQL server, Counter type, monotonically increasing."
}
]
},
{
"id": 0,
@@ -342,7 +606,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Status 近 3 分钟 table lock 等待次数",
"note": ""
},
{
"lang": "en_US",
"name": "Global Status nearly 3 minutes table lock waiting times",
"note": ""
}
]
},
{
"id": 0,
@@ -357,7 +633,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Variables InnoDB 缓冲池配置大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Variables InnoDB buffer pool configuration size",
"note": ""
}
]
},
{
"id": 0,
@@ -372,7 +660,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Variables read_only 开关值",
"note": "0 就是 OFF1 是 ON"
},
{
"lang": "en_US",
"name": "Global Variables read _ only Switch value",
"note": "0 is OFF, 1 is ON"
}
]
},
{
"id": 0,
@@ -387,7 +687,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Variables 允许打开的文件句柄数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of file handles that Global Variables allows to open",
"note": ""
}
]
},
{
"id": 0,
@@ -402,7 +714,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Variables 最大连接数限制",
"note": "允许的最大连接数,默认值是 151过小了。\n\n- 通过 `SHOW VARIABLES LIKE 'max_connections'` 命令查看当前设置\n- 通过 `SET GLOBAL max_connections = 2048` 重新设置最大连接数\n- 通过修改 MySQL 配置文件,在 `[mysqld]` 下面添加 `max_connections = 2048` 使其重启依旧生效"
},
{
"lang": "en_US",
"name": "Global Variables Maximum Connection Limit",
"note": "The maximum number of connections allowed, the default value is 151, is too small. \n \n-View the current settings with the ` SHOW VARIABLES LIKE'max _ connections ''command \n-Reset the maximum number of connections via ` SET GLOBAL max _ connections = 2048 ` \n-By modifying the MySQL configuration file, add ` max _ connections = 2048 ` under ` [mysqld] ` so that its restart still works"
}
]
},
{
"id": 0,
@@ -417,7 +741,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Global Variables 查询缓存大小",
"note": ""
},
{
"lang": "en_US",
"name": "Global Variables Query Cache Size",
"note": ""
}
]
},
{
"id": 0,
@@ -432,7 +768,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "MySQL 实例是否 UP",
"note": "1 表示 UP说明能正常连到 MySQL 采集数据0 表示无法连通 MySQL 实例,可能是网络问题、认证问题,或者 MySQL 本身就是挂了"
},
{
"lang": "en_US",
"name": "Whether MySQL instance is UP",
"note": "1 means UP, indicating that it can normally connect to MySQL to collect data; 0 means that the MySQL instance cannot be connected. It may be a network problem, authentication problem, or MySQL itself is down"
}
]
},
{
"id": 0,
@@ -447,7 +795,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "MySQL 指标抓取耗时",
"note": ""
},
{
"lang": "en_US",
"name": "MySQL metric crawling time-consuming",
"note": ""
}
]
},
{
"id": 0,
@@ -462,6 +822,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "MySQL 版本信息",
"note": ""
},
{
"lang": "en_US",
"name": "MySQL version information",
"note": ""
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "NET 探测结果状态码",
"note": "0 值表示正常,大于 0 就是异常,各个值的含义如下:\n\n- 0: Success\n- 1: Timeout\n- 2: ConnectionFailed\n- 3: ReadFailed\n- 4: StringMismatch"
},
{
"lang": "en_US",
"name": "NET Probe Result Status Code",
"note": "A value of 0 means normal, and a value greater than 0 means abnormal. The meanings of each value are as follows: \n \n-0: Success \n1: Timeout \n2: ConnectionFailed \n-3: ReadFailed \n4: StringMismatch"
}
]
},
{
"id": 0,
@@ -27,6 +39,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "NET 探测耗时",
"note": ""
},
{
"lang": "en_US",
"name": "NET probe time-consuming",
"note": ""
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 当前空闲连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
},
{
"lang": "en_US",
"name": "Nginx stub _ status Number of current idle connections",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 正在回写 response 的连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
},
{
"lang": "en_US",
"name": "Nginx stub _ status The number of connections that are writing back response",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 正在处理的活动连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)\n\nReading + Writing + Waiting 的总和"
},
{
"lang": "en_US",
"name": "Nginx stub _ status Number of active connections being processed",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md) \n \nSum of Reading + Writing + Waiting"
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 正在读取 request header 的连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
},
{
"lang": "en_US",
"name": "Nginx stub _ status is reading the number of connections to the request header",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 每秒 accept 的新连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
},
{
"lang": "en_US",
"name": "Nginx stub _ status New connections accepted per second",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 每秒 handle 的新连接数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
},
{
"lang": "en_US",
"name": "Nginx stub _ status New connections per second handle",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)"
}
]
},
{
"id": 0,
@@ -102,6 +174,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Nginx stub_status 每秒处理的请求数",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md)\n\n如果有 keep-alive 连接的情况,一个连接上会处理多个请求。"
},
{
"lang": "en_US",
"name": "Nginx stub _ status requests processed per second",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx/README.md) \n \nIf there is a keep-alive connection, multiple requests will be processed on one connection."
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping ttl 时间",
"note": "Time To Live指的是报文在网络中能够“存活”的限制时间"
},
{
"lang": "en_US",
"name": "Ping ttl time",
"note": "Time To Live refers to the limited time that a packet can \"survive\" in the network"
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping 丢包率",
"note": ""
},
{
"lang": "en_US",
"name": "Ping packet loss rate",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping 平均耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Ping average time consumed",
"note": ""
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping 探测结果状态码",
"note": "值为 0 就是正常,非 0 值就是异常。如果 Ping 失败Categraf 日志中理应会有异常日志"
},
{
"lang": "en_US",
"name": "Ping probe result status code",
"note": "A value of 0 is normal, and a non-0 value is abnormal. If the Ping fails, there should be an exception log in the Categraf log"
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping 最大耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Ping maximum time consumption",
"note": ""
}
]
},
{
"id": 0,
@@ -87,6 +147,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "Ping 最小耗时",
"note": ""
},
{
"lang": "en_US",
"name": "Ping minimum time consumption",
"note": ""
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 CPU 利用率(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)\n\nCPU 利用率有两个模式,一个是 solaris一个是 irix默认是 irixirix 模式下CPU 利用率可能会超过 100%solaris 会考虑 CPU 核数solaris 模式的 CPU 利用率不会超过 100%。"
},
{
"lang": "en_US",
"name": "Process CPU utilization (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md) \n \nThere are two modes of CPU utilization, one is solaris and the other is irix. The default is irix. In irix mode, the CPU utilization may exceed 100%. solaris will consider the number of CPU cores, and the CPU utilization in solaris mode will not exceed 100%."
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 CPU 总利用率(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)\n\nCPU 利用率有两个模式,一个是 solaris一个是 irix默认是 irixirix 模式下CPU 利用率可能会超过 100%solaris 会考虑 CPU 核数solaris 模式的 CPU 利用率不会超过 100%。"
},
{
"lang": "en_US",
"name": "Total process CPU utilization (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md) \n \nThere are two modes of CPU utilization, one is solaris and the other is irix. The default is irix. In irix mode, the CPU utilization may exceed 100%. solaris will consider the number of CPU cores, and the CPU utilization in solaris mode will not exceed 100%."
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒写入字节总数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of bytes written per second by process IO (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒写入字节数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Number of bytes written per second by process IO (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒写入次数总数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of process IO writes per second (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒写入次数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process IO writes per second (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒读取字节总数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of bytes read per second by process IO (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒读取字节数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process IO reads bytes per second (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒读取次数总数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of process IO reads per second (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 IO 每秒读取次数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process IO reads per second (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 Memory 利用率(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process Memory utilization (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 Memory 总利用率(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process Memory Total utilization (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 rlimit fd 软限制数量(匹配到的所有进程中的最小值)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process rlimit fd Number of soft limits (minimum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程 rlimit fd 软限制数量(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Process rlimit fd Number of soft limits (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程启动时长(匹配到的所有进程的最小值)",
"note": "启动了多久"
},
{
"lang": "en_US",
"name": "Process start time (minimum of all processes matched to)",
"note": "How long has it started"
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程启动时长(单进程)",
"note": "启动了多久"
},
{
"lang": "en_US",
"name": "Process startup time (single process)",
"note": "How long has it started"
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程数量(根据匹配条件查到的进程数量)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Number of processes (the number of processes found according to matching conditions)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程文件句柄总打开数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of process file handles open (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程文件句柄打开数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Number of process file handle openings (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程线程总数(匹配到的所有进程加和)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Total number of process threads (sum of all processes matched to)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
},
{
"id": 0,
@@ -312,6 +552,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "进程线程数(单进程)",
"note": "[文档](https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
},
{
"lang": "en_US",
"name": "Number of process threads (single process)",
"note": "[Documentation] (https://github.com/flashcatcloud/categraf/blob/main/inputs/procstat/README.md)"
}
]
}
]

View File

@@ -12,7 +12,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 利用率system",
"note": ""
},
{
"lang": "en_US",
"name": "Container CPU utilization (system)",
"note": ""
}
]
},
{
"id": 0,
@@ -27,7 +39,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 利用率user",
"note": ""
},
{
"lang": "en_US",
"name": "Container CPU utilization (user)",
"note": ""
}
]
},
{
"id": 0,
@@ -42,7 +66,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 利用率(整体,值不会大于 100",
"note": "只有设置了 limit 的容器才能计算此利用率"
},
{
"lang": "en_US",
"name": "Container CPU utilization (overall, the value will not be greater than 100)",
"note": "Only containers with limit set can calculate this utilization"
}
]
},
{
"id": 0,
@@ -57,7 +93,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 利用率(整体,值可能大于 100",
"note": "如果是 200% 表示占用了 2 个核"
},
{
"lang": "en_US",
"name": "Container CPU utilization (overall, value may be greater than 100)",
"note": "If 200%, it means that 2 cores are occupied"
}
]
},
{
"id": 0,
@@ -72,7 +120,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 每秒有多少 period",
"note": ""
},
{
"lang": "en_US",
"name": "How many periods does the container CPU have per second",
"note": ""
}
]
},
{
"id": 0,
@@ -87,7 +147,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 每秒被 throttle 的 period 量",
"note": "如果容器限制了 CPU而 app 所需算法过多, 会被抑制使用container_cpu_cfs_throttled_periods_total 统计总共有多少个 period 被抑制了,如果近期发生抑制是需要关注的,一些延迟敏感的 app 受影响尤为明显。出现被抑制的情况,大概率是需要升配了。"
},
{
"lang": "en_US",
"name": "The amount of periods that the container CPU is throttle per second",
"note": "If the container limits the CPU and the app requires too many algorithms, it will be suppressed. container _ CPU _ cfs _ throttled _ periods _ total counts how many periods have been suppressed in total. If suppression occurs recently, it needs attention. Some delay-sensitive apps are particularly affected. If it is suppressed, there is a high probability that it needs to be upgraded."
}
]
},
{
"id": 0,
@@ -102,7 +174,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 CPU 被 throttle 的比例",
"note": "这个值大于 0 就要注意"
},
{
"lang": "en_US",
"name": "The proportion of container CPU being throttle",
"note": "If this value is greater than 0, pay attention"
}
]
},
{
"id": 0,
@@ -117,7 +201,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 filesystem 使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Container filesystem usage",
"note": ""
}
]
},
{
"id": 0,
@@ -132,7 +228,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 filesystem 使用量",
"note": ""
},
{
"lang": "en_US",
"name": "Container filesystem usage",
"note": ""
}
]
},
{
"id": 0,
@@ -147,7 +255,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 filesystem 当前 IO 次数",
"note": ""
},
{
"lang": "en_US",
"name": "Container filesystem Current IO times",
"note": ""
}
]
},
{
"id": 0,
@@ -162,7 +282,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 filesystem 总量",
"note": ""
},
{
"lang": "en_US",
"name": "Container filesystem Total",
"note": ""
}
]
},
{
"id": 0,
@@ -177,7 +309,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 inode free 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container inode free amount",
"note": ""
}
]
},
{
"id": 0,
@@ -192,7 +336,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 inode total 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container inode total",
"note": ""
}
]
},
{
"id": 0,
@@ -207,7 +363,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 inode 使用率",
"note": ""
},
{
"lang": "en_US",
"name": "Container inode usage",
"note": ""
}
]
},
{
"id": 0,
@@ -222,7 +390,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 IO 每秒写入 byte 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container IO writes bytes per second",
"note": ""
}
]
},
{
"id": 0,
@@ -237,7 +417,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 IO 每秒读取 byte 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container IO reads bytes per second",
"note": ""
}
]
},
{
"id": 0,
@@ -252,7 +444,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory cache 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory cache amount",
"note": ""
}
]
},
{
"id": 0,
@@ -267,7 +471,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用率Usage",
"note": "如果有大量文件 IO有大量 container_memory_cachecontainer_memory_usage_bytes 和 container_memory_working_set_bytes 的大小会有差异"
},
{
"lang": "en_US",
"name": "Container memory Usage (Usage)",
"note": "If there is a large number of file IO and a large number of container _ memory _ cache, the size of container _ memory _ usage _ bytes and container _ memory _ working _ set _ bytes will be different"
}
]
},
{
"id": 0,
@@ -282,7 +498,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用率Working Set",
"note": "如果有大量文件 IO有大量 container_memory_cachecontainer_memory_usage_bytes 和 container_memory_working_set_bytes 的大小会有差异"
},
{
"lang": "en_US",
"name": "Container memory usage rate (Working Set)",
"note": "If there is a large number of file IO and a large number of container _ memory _ cache, the size of container _ memory _ usage _ bytes and container _ memory _ working _ set _ bytes will be different"
}
]
},
{
"id": 0,
@@ -297,7 +525,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用量mapped_file",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory usage (mapped _ file)",
"note": ""
}
]
},
{
"id": 0,
@@ -312,7 +552,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用量RSS",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory usage (RSS)",
"note": ""
}
]
},
{
"id": 0,
@@ -327,7 +579,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用量Swap",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory usage (Swap)",
"note": ""
}
]
},
{
"id": 0,
@@ -342,7 +606,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用量Usage",
"note": "如果有大量文件 IO有大量 container_memory_cachecontainer_memory_usage_bytes 和 container_memory_working_set_bytes 的大小会有差异"
},
{
"lang": "en_US",
"name": "Container memory Usage",
"note": "If there is a large number of file IO and a large number of container _ memory _ cache, the size of container _ memory _ usage _ bytes and container _ memory _ working _ set _ bytes will be different"
}
]
},
{
"id": 0,
@@ -357,7 +633,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 使用量Working Set",
"note": "如果有大量文件 IO有大量 container_memory_cachecontainer_memory_usage_bytes 和 container_memory_working_set_bytes 的大小会有差异"
},
{
"lang": "en_US",
"name": "Container memory usage (Working Set)",
"note": "If there is a large number of file IO and a large number of container _ memory _ cache, the size of container _ memory _ usage _ bytes and container _ memory _ working _ set _ bytes will be different"
}
]
},
{
"id": 0,
@@ -372,7 +660,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 分配失败次数(每秒)",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory allocation failures (per second)",
"note": ""
}
]
},
{
"id": 0,
@@ -387,7 +687,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 memory 限制量",
"note": ""
},
{
"lang": "en_US",
"name": "Container memory limit",
"note": ""
}
]
},
{
"id": 0,
@@ -402,7 +714,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒发送 bit 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container net sends bits per second",
"note": ""
}
]
},
{
"id": 0,
@@ -417,7 +741,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒发送 byte 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container net sends bytes per second",
"note": ""
}
]
},
{
"id": 0,
@@ -432,7 +768,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒发送数据包数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of packets sent per second by container net",
"note": ""
}
]
},
{
"id": 0,
@@ -447,7 +795,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒发送时 drop 包数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of drop packets sent by container net per second",
"note": ""
}
]
},
{
"id": 0,
@@ -462,7 +822,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒发送错包数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of wrong packets sent by container net per second",
"note": ""
}
]
},
{
"id": 0,
@@ -477,7 +849,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒接收 bit 量",
"note": ""
},
{
"lang": "en_US",
"name": "The amount of bits received by the container net per second",
"note": ""
}
]
},
{
"id": 0,
@@ -492,7 +876,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒接收 byte 量",
"note": ""
},
{
"lang": "en_US",
"name": "Container net receives bytes per second",
"note": ""
}
]
},
{
"id": 0,
@@ -507,7 +903,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒接收数据包数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of packets received per second by container net",
"note": ""
}
]
},
{
"id": 0,
@@ -522,7 +930,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒接收时 drop 包数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of drop packets received by container net per second",
"note": ""
}
]
},
{
"id": 0,
@@ -537,7 +957,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器 net 每秒接收错包数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of wrong packets received by container net per second",
"note": ""
}
]
},
{
"id": 0,
@@ -552,7 +984,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器允许运行的最大线程数",
"note": ""
},
{
"lang": "en_US",
"name": "The maximum number of threads the container is allowed to run",
"note": ""
}
]
},
{
"id": 0,
@@ -567,7 +1011,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器内 1 号进程 soft ulimit 值",
"note": "容器内1号进程的软 ulimit 值。如果为-1则无限制。"
},
{
"lang": "en_US",
"name": "Process No. 1 soft ulimit value in container",
"note": "Soft ulimit value for process # 1 inside the container. If-1, there is no limit."
}
]
},
{
"id": 0,
@@ -582,7 +1038,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器已经运行的时间",
"note": ""
},
{
"lang": "en_US",
"name": "How long the container has been running",
"note": ""
}
]
},
{
"id": 0,
@@ -597,7 +1065,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器当前打开套接字数量",
"note": ""
},
{
"lang": "en_US",
"name": "Number of currently open sockets in the container",
"note": ""
}
]
},
{
"id": 0,
@@ -612,7 +1092,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器当前打开文件句柄数量",
"note": ""
},
{
"lang": "en_US",
"name": "Container Number of currently open file handles",
"note": ""
}
]
},
{
"id": 0,
@@ -627,7 +1119,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器当前运行的线程数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of threads currently running in the container",
"note": ""
}
]
},
{
"id": 0,
@@ -642,7 +1146,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器当前运行的进程数",
"note": ""
},
{
"lang": "en_US",
"name": "Number of processes currently running in the container",
"note": ""
}
]
},
{
"id": 0,
@@ -657,7 +1173,19 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器总 GPU 加速卡可用内存量",
"note": ""
},
{
"lang": "en_US",
"name": "Container Total GPU Accelerator Available Memory",
"note": ""
}
]
},
{
"id": 0,
@@ -672,6 +1200,18 @@
"created_at": 0,
"created_by": "",
"updated_at": 0,
"updated_by": ""
"updated_by": "",
"translation": [
{
"lang": "zh_CN",
"name": "容器正在使用的 GPU 加速卡内存量",
"note": ""
},
{
"lang": "en_US",
"name": "The amount of GPU accelerator card memory the container is using",
"note": ""
}
]
}
]

View File

@@ -240,17 +240,17 @@ func (ncc *NotifyChannelCacheType) startHttpChannel(chID int64, channel *models.
go ncc.startNotifyConsumer(chID, queue, quitCh)
}
logger.Infof("started %d notify consumers for channel %d", concurrency, chID)
logger.Debugf("started %d notify consumers for channel %d", concurrency, chID)
}
// 启动通知消费者协程
func (ncc *NotifyChannelCacheType) startNotifyConsumer(channelID int64, queue *list.SafeListLimited, quitCh chan struct{}) {
logger.Infof("starting notify consumer for channel %d", channelID)
logger.Debugf("starting notify consumer for channel %d", channelID)
for {
select {
case <-quitCh:
logger.Infof("notify consumer for channel %d stopped", channelID)
logger.Debugf("notify consumer for channel %d stopped", channelID)
return
default:
// 从队列中取出任务
@@ -276,6 +276,7 @@ func (ncc *NotifyChannelCacheType) startNotifyConsumer(channelID int64, queue *l
// processNotifyTask 处理通知任务(仅处理 http 类型)
func (ncc *NotifyChannelCacheType) processNotifyTask(task *NotifyTask) {
httpClient := ncc.GetHttpClient(task.NotifyChannel.ID)
logger.Debugf("processNotifyTask: task: %+v", task)
// 现在只处理 http 类型flashduty 保持直接发送
if task.NotifyChannel.RequestType == "http" {
@@ -294,7 +295,7 @@ func (ncc *NotifyChannelCacheType) processNotifyTask(task *NotifyTask) {
for i := range task.Sendtos {
start := time.Now()
resp, err := task.NotifyChannel.SendHTTP(task.Events, task.TplContent, task.CustomParams, []string{task.Sendtos[i]}, httpClient)
resp = fmt.Sprintf("duration: %d ms %s", time.Since(start).Milliseconds(), resp)
resp = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), resp)
logger.Infof("notify_id: %d, channel_name: %v, event:%+v, tplContent:%v, customParams:%v, userInfo:%+v, respBody: %v, err: %v",
task.NotifyRuleId, task.NotifyChannel.Name, task.Events[0], task.TplContent, task.CustomParams, task.Sendtos[i], resp, err)
@@ -448,7 +449,7 @@ func (ncc *NotifyChannelCacheType) startEmailSender(chID int64, smtp *models.SMT
logger.Warning("SMTP configurations invalid")
return
}
logger.Infof("start email sender... conf.Host:%+v,conf.Port:%+v", conf.Host, conf.Port)
logger.Debugf("start email sender... conf.Host:%+v,conf.Port:%+v", conf.Host, conf.Port)
d := gomail.NewDialer(conf.Host, conf.Port, conf.Username, conf.Password)
if conf.InsecureSkipVerify {
@@ -502,7 +503,11 @@ func (ncc *NotifyChannelCacheType) startEmailSender(chID int64, smtp *models.SMT
m.Mail.GetHeader("Subject"), m.Mail.GetHeader("To"))
}
// sender.NotifyRecord(ncc.ctx, m.Events, m.NotifyRuleId, models.Email, strings.Join(m.Mail.GetHeader("To"), ","), "", err)
// 记录通知详情
if ncc.notifyRecordFunc != nil {
target := strings.Join(m.Mail.GetHeader("To"), ",")
ncc.notifyRecordFunc(ncc.ctx, m.Events, m.NotifyRuleId, "Email", target, "success", err)
}
size++
if size >= conf.Batch {

View File

@@ -65,6 +65,7 @@ type AlertCurEvent struct {
NotifyUsersObj []*User `json:"notify_users_obj,omitempty" gorm:"-"` // for notify.py
LastEvalTime int64 `json:"last_eval_time" gorm:"-"` // for notify.py 上次计算的时间
LastSentTime int64 `json:"last_sent_time" gorm:"-"` // 上次发送时间
FirstEvalTime int64 `json:"first_eval_time" gorm:"-"` // 首次异常检测时间
NotifyCurNumber int `json:"notify_cur_number"` // notify: current number
FirstTriggerTime int64 `json:"first_trigger_time"` // 连续告警的首次告警时间
ExtraConfig interface{} `json:"extra_config" gorm:"-"`
@@ -77,9 +78,12 @@ type AlertCurEvent struct {
RuleHash string `json:"rule_hash" gorm:"-"`
ExtraInfoMap []map[string]string `json:"extra_info_map" gorm:"-"`
NotifyRuleIds []int64 `json:"notify_rule_ids" gorm:"serializer:json"`
NotifyRuleId int64 `json:"notify_rule_id" gorm:"-"`
NotifyRuleName string `json:"notify_rule_name" gorm:"-"`
NotifyVersion int `json:"notify_version" gorm:"-"` // 0: old, 1: new
NotifyRules []*EventNotifyRule `json:"notify_rules" gorm:"-"`
RecoverTime int64 `json:"recover_time" gorm:"-"`
}
type EventNotifyRule struct {

View File

@@ -127,7 +127,7 @@ func (e *AlertHisEvent) FillNotifyGroups(ctx *ctx.Context, cache map[int64]*User
func AlertHisEventTotal(
ctx *ctx.Context, prods []string, bgids []int64, stime, etime int64, severity int,
recovered int, dsIds []int64, cates []string, ruleId int64, query string) (int64, error) {
recovered int, dsIds []int64, cates []string, ruleId int64, query string, eventIds []int64) (int64, error) {
session := DB(ctx).Model(&AlertHisEvent{}).Where("last_eval_time between ? and ?", stime, etime)
if len(prods) > 0 {
@@ -158,6 +158,10 @@ func AlertHisEventTotal(
session = session.Where("rule_id = ?", ruleId)
}
if len(eventIds) > 0 {
session = session.Where("id in ?", eventIds)
}
if query != "" {
arr := strings.Fields(query)
for i := 0; i < len(arr); i++ {
@@ -171,7 +175,7 @@ func AlertHisEventTotal(
func AlertHisEventGets(ctx *ctx.Context, prods []string, bgids []int64, stime, etime int64,
severity int, recovered int, dsIds []int64, cates []string, ruleId int64, query string,
limit, offset int) ([]AlertHisEvent, error) {
limit, offset int, eventIds []int64) ([]AlertHisEvent, error) {
session := DB(ctx).Where("last_eval_time between ? and ?", stime, etime)
if len(prods) != 0 {
@@ -202,6 +206,10 @@ func AlertHisEventGets(ctx *ctx.Context, prods []string, bgids []int64, stime, e
session = session.Where("rule_id = ?", ruleId)
}
if len(eventIds) > 0 {
session = session.Where("id in ?", eventIds)
}
if query != "" {
arr := strings.Fields(query)
for i := 0; i < len(arr); i++ {
@@ -415,6 +423,10 @@ func (e *AlertHisEvent) ToCur() *AlertCurEvent {
NotifyChannelsJSON: e.NotifyChannelsJSON,
NotifyGroupsJSON: e.NotifyGroupsJSON,
OriginalTagsJSON: e.OriginalTagsJSON,
NotifyRuleIds: e.NotifyRuleIds,
NotifyRules: e.NotifyRules,
NotifyVersion: e.NotifyVersion,
RecoverTime: e.RecoverTime,
}
cur.SetTagsMap()

View File

@@ -20,7 +20,7 @@ type TagFilter struct {
Key string `json:"key"` // tag key
Func string `json:"func"` // `==` | `=~` | `in` | `!=` | `!~` | `not in`
Op string `json:"op"` // `==` | `=~` | `in` | `!=` | `!~` | `not in`
Value string `json:"value"` // tag value
Value interface{} `json:"value"` // tag value
Regexp *regexp.Regexp // parse value to regexp if func = '=~' or '!~'
Vset map[string]struct{} // parse value to regexp if func = 'in' or 'not in'
}
@@ -46,15 +46,59 @@ func ParseTagFilter(bFilters []TagFilter) ([]TagFilter, error) {
var err error
for i := 0; i < len(bFilters); i++ {
if bFilters[i].Func == "=~" || bFilters[i].Func == "!~" {
bFilters[i].Regexp, err = regexp.Compile(bFilters[i].Value)
// 这里存在两个情况,一个是 string 一个是 int
var pattern string
switch v := bFilters[i].Value.(type) {
case string:
pattern = v
case int:
pattern = strconv.Itoa(v)
default:
return nil, fmt.Errorf("unsupported value type for regex: %T", v)
}
bFilters[i].Regexp, err = regexp.Compile(pattern)
if err != nil {
return nil, err
}
} else if bFilters[i].Func == "in" || bFilters[i].Func == "not in" {
arr := strings.Fields(bFilters[i].Value)
// 这里存在两个情况,一个是 string 一个是[]int
bFilters[i].Vset = make(map[string]struct{})
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
switch v := bFilters[i].Value.(type) {
case string:
// 处理字符串情况
arr := strings.Fields(v)
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
}
case []int:
// 处理[]int情况
for j := 0; j < len(v); j++ {
bFilters[i].Vset[strconv.Itoa(v[j])] = struct{}{}
}
case []string:
for j := 0; j < len(v); j++ {
bFilters[i].Vset[v[j]] = struct{}{}
}
case []interface{}:
// 处理[]interface{}情况JSON解析可能产生
for j := 0; j < len(v); j++ {
switch item := v[j].(type) {
case string:
bFilters[i].Vset[item] = struct{}{}
case int:
bFilters[i].Vset[strconv.Itoa(item)] = struct{}{}
case float64:
bFilters[i].Vset[strconv.Itoa(int(item))] = struct{}{}
}
}
default:
// 兜底处理,转为字符串
str := fmt.Sprintf("%v", v)
arr := strings.Fields(str)
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
}
}
}
}
@@ -73,15 +117,54 @@ func GetTagFilters(jsonArr ormx.JSONArr) ([]TagFilter, error) {
}
for i := 0; i < len(bFilters); i++ {
if bFilters[i].Func == "=~" || bFilters[i].Func == "!~" {
bFilters[i].Regexp, err = regexp.Compile(bFilters[i].Value)
var pattern string
switch v := bFilters[i].Value.(type) {
case string:
pattern = v
case int:
pattern = strconv.Itoa(v)
default:
return nil, fmt.Errorf("unsupported value type for regex: %T", v)
}
bFilters[i].Regexp, err = regexp.Compile(pattern)
if err != nil {
return nil, err
}
} else if bFilters[i].Func == "in" || bFilters[i].Func == "not in" {
arr := strings.Fields(bFilters[i].Value)
bFilters[i].Vset = make(map[string]struct{})
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
// 在GetTagFilters中Value通常是string类型但也要处理其他可能的类型
switch v := bFilters[i].Value.(type) {
case string:
// 处理字符串情况
arr := strings.Fields(v)
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
}
case []int:
// 处理[]int情况
for j := 0; j < len(v); j++ {
bFilters[i].Vset[strconv.Itoa(v[j])] = struct{}{}
}
case []interface{}:
// 处理[]interface{}情况JSON解析可能产生
for j := 0; j < len(v); j++ {
switch item := v[j].(type) {
case string:
bFilters[i].Vset[item] = struct{}{}
case int:
bFilters[i].Vset[strconv.Itoa(item)] = struct{}{}
case float64:
bFilters[i].Vset[strconv.Itoa(int(item))] = struct{}{}
}
}
default:
// 兜底处理,转为字符串
str := fmt.Sprintf("%v", v)
arr := strings.Fields(str)
for j := 0; j < len(arr); j++ {
bFilters[i].Vset[arr[j]] = struct{}{}
}
}
}
}

View File

@@ -380,8 +380,7 @@ func GetHostsQuery(queries []HostQuery) []map[string]interface{} {
if q.Op == "==" {
m["target_busi_group.group_id in (?)"] = ids
} else {
m["target.ident not in (select target_ident "+
"from target_busi_group where group_id in (?))"] = ids
m["NOT EXISTS (SELECT 1 FROM target_busi_group tbg WHERE tbg.target_ident = target.ident AND tbg.group_id IN (?))"] = ids
}
case "tags":
lst := []string{}
@@ -525,12 +524,19 @@ func (ar *AlertRule) Verify() error {
return err
}
if len(ar.NotifyRuleIds) > 0 {
ar.NotifyVersion = 1
if ar.NotifyVersion == 0 {
// 如果是旧版本,则清空 NotifyRuleIds
ar.NotifyRuleIds = []int64{}
}
if ar.NotifyVersion > 0 {
// 如果是新版本,则清空旧的通知媒介和通知组
ar.NotifyChannelsJSON = []string{}
ar.NotifyGroupsJSON = []string{}
ar.NotifyChannels = ""
ar.NotifyGroups = ""
ar.Callbacks = ""
ar.CallbacksJSON = []string{}
}
return nil
@@ -728,6 +734,15 @@ func (ar *AlertRule) UpdateColumn(ctx *ctx.Context, column string, value interfa
return DB(ctx).Model(ar).Updates(updates).Error
}
if column == "notify_groups" || column == "notify_channels" {
updates := map[string]interface{}{
column: value,
"notify_version": 0,
"notify_rule_ids": []int64{},
}
return DB(ctx).Model(ar).Updates(updates).Error
}
return DB(ctx).Model(ar).UpdateColumn(column, value).Error
}
@@ -885,7 +900,8 @@ func (ar *AlertRule) FE2DB() error {
}
ar.AlgoParams = string(algoParamsByte)
if ar.RuleConfigJson == nil {
// 老的规则,是 PromQl 和 Severity 字段,新版的规则,使用 RuleConfig 字段
if ar.RuleConfigJson == nil || len(ar.PromQl) > 0 {
query := PromQuery{
PromQl: ar.PromQl,
Severity: ar.Severity,
@@ -970,6 +986,8 @@ func (ar *AlertRule) DB2FE() error {
return err
}
ar.FillSeverities()
return nil
}
@@ -1001,11 +1019,8 @@ func AlertRuleExists(ctx *ctx.Context, id, groupId int64, name string) (bool, er
if err != nil {
return false, err
}
if len(lst) == 0 {
return false, nil
}
return false, nil
return len(lst) > 0, nil
}
func GetAlertRuleIdsByTaskId(ctx *ctx.Context, taskId int64) ([]int64, error) {

View File

@@ -116,7 +116,18 @@ func (s *AlertSubscribe) Verify() error {
return errors.New("severities is required")
}
if len(s.NotifyRuleIds) > 0 {
if s.NotifyVersion == 1 {
if len(s.NotifyRuleIds) == 0 {
return errors.New("no notify rules selected")
}
s.UserGroupIds = ""
s.RedefineChannels = 0
s.NewChannels = ""
s.RedefineWebhooks = 0
s.Webhooks = ""
s.RedefineSeverity = 0
s.NewSeverity = 0
return nil
}
@@ -132,8 +143,8 @@ func (s *AlertSubscribe) Verify() error {
}
}
if s.NotifyVersion == 1 && len(s.NotifyRuleIds) == 0 {
return errors.New("no notify rules selected")
if s.NotifyVersion == 0 {
s.NotifyRuleIds = []int64{}
}
return nil
@@ -381,6 +392,17 @@ func (s *AlertSubscribe) MatchProd(prod string) bool {
return s.Prod == prod
}
func (s *AlertSubscribe) MatchCate(cate string) bool {
if s.Cate == "" {
return true
}
if s.Cate == "host" {
return cate == "host"
}
return true
}
func (s *AlertSubscribe) MatchCluster(dsId int64) bool {
// 没有配置数据源, 或者事件不需要关联数据源
// do not match any datasource or event not related to datasource

View File

@@ -8,6 +8,8 @@ import (
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
const SYSTEM = "system"
// BuiltinComponent represents a builtin component along with its metadata.
type BuiltinComponent struct {
ID uint64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
@@ -115,7 +117,7 @@ func BuiltinComponentGets(ctx *ctx.Context, query string, disabled int) ([]*Buil
var lst []*BuiltinComponent
err := session.Order("ident ASC").Find(&lst).Error
err := session.Order("disabled ASC, updated_at DESC, ident ASC").Find(&lst).Error
return lst, err
}

View File

@@ -12,19 +12,26 @@ import (
// BuiltinMetric represents a metric along with its metadata.
type BuiltinMetric struct {
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
UUID int64 `json:"uuid" gorm:"type:bigint;not null;default:0;comment:'uuid'"`
Collector string `json:"collector" gorm:"uniqueIndex:idx_collector_typ_name;type:varchar(191);not null;index:idx_collector,sort:asc;comment:'type of collector'"`
Typ string `json:"typ" gorm:"uniqueIndex:idx_collector_typ_name;type:varchar(191);not null;index:idx_typ,sort:asc;comment:'type of metric'"`
Name string `json:"name" gorm:"uniqueIndex:idx_collector_typ_name;type:varchar(191);not null;index:idx_builtinmetric_name,sort:asc;comment:'name of metric'"`
Unit string `json:"unit" gorm:"type:varchar(191);not null;comment:'unit of metric'"`
Note string `json:"note" gorm:"type:varchar(4096);not null;comment:'description of metric'"`
Lang string `json:"lang" gorm:"uniqueIndex:idx_collector_typ_name;type:varchar(191);not null;default:'zh';index:idx_lang,sort:asc;comment:'language'"`
Expression string `json:"expression" gorm:"type:varchar(4096);not null;comment:'expression of metric'"`
CreatedAt int64 `json:"created_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
CreatedBy string `json:"created_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
UpdatedAt int64 `json:"updated_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
UpdatedBy string `json:"updated_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
ID int64 `json:"id" gorm:"primaryKey;type:bigint;autoIncrement;comment:'unique identifier'"`
UUID int64 `json:"uuid" gorm:"type:bigint;not null;default:0;comment:'uuid'"`
Collector string `json:"collector" gorm:"type:varchar(191);not null;index:idx_collector,sort:asc;comment:'type of collector'"`
Typ string `json:"typ" gorm:"type:varchar(191);not null;index:idx_typ,sort:asc;comment:'type of metric'"`
Name string `json:"name" gorm:"type:varchar(191);not null;index:idx_builtinmetric_name,sort:asc;comment:'name of metric'"`
Unit string `json:"unit" gorm:"type:varchar(191);not null;comment:'unit of metric'"`
Note string `json:"note" gorm:"type:varchar(4096);not null;comment:'description of metric'"`
Lang string `json:"lang" gorm:"type:varchar(191);not null;default:'zh';index:idx_lang,sort:asc;comment:'language'"`
Translation []Translation `json:"translation" gorm:"type:text;serializer:json;comment:'translation of metric'"`
Expression string `json:"expression" gorm:"type:varchar(4096);not null;comment:'expression of metric'"`
CreatedAt int64 `json:"created_at" gorm:"type:bigint;not null;default:0;comment:'create time'"`
CreatedBy string `json:"created_by" gorm:"type:varchar(191);not null;default:'';comment:'creator'"`
UpdatedAt int64 `json:"updated_at" gorm:"type:bigint;not null;default:0;comment:'update time'"`
UpdatedBy string `json:"updated_by" gorm:"type:varchar(191);not null;default:'';comment:'updater'"`
}
type Translation struct {
Lang string `json:"lang"`
Name string `json:"name"`
Note string `json:"note"`
}
func (bm *BuiltinMetric) TableName() string {
@@ -36,6 +43,10 @@ func (bm *BuiltinMetric) TableOptions() string {
}
func (bm *BuiltinMetric) Verify() error {
if len(bm.Translation) == 0 {
return errors.New("translation is required")
}
bm.Collector = strings.TrimSpace(bm.Collector)
if bm.Collector == "" {
return errors.New("collector is blank")
@@ -46,17 +57,12 @@ func (bm *BuiltinMetric) Verify() error {
return errors.New("type is blank")
}
bm.Name = strings.TrimSpace(bm.Name)
if bm.Name == "" {
return errors.New("name is blank")
}
return nil
}
func BuiltinMetricExists(ctx *ctx.Context, bm *BuiltinMetric) (bool, error) {
var count int64
err := DB(ctx).Model(bm).Where("lang = ? and collector = ? and typ = ? and name = ?", bm.Lang, bm.Collector, bm.Typ, bm.Name).Count(&count).Error
err := DB(ctx).Model(bm).Where("expression = ? and collector = ? and typ = ?", bm.Expression, bm.Collector, bm.Typ).Count(&count).Error
if err != nil {
return false, err
}
@@ -88,19 +94,9 @@ func (bm *BuiltinMetric) Update(ctx *ctx.Context, req BuiltinMetric) error {
return err
}
if bm.Lang != req.Lang && bm.Collector != req.Collector && bm.Typ != req.Typ && bm.Name != req.Name {
exists, err := BuiltinMetricExists(ctx, &req)
if err != nil {
return err
}
if exists {
return errors.New("builtin metric already exists")
}
}
req.UpdatedAt = time.Now().Unix()
req.CreatedAt = bm.CreatedAt
req.CreatedBy = bm.CreatedBy
req.Lang = bm.Lang
req.UUID = bm.UUID
return DB(ctx).Model(bm).Select("*").Updates(req).Error
@@ -122,17 +118,9 @@ func BuiltinMetricGets(ctx *ctx.Context, lang, collector, typ, query, unit strin
return lst, err
}
func BuiltinMetricCount(ctx *ctx.Context, lang, collector, typ, query, unit string) (int64, error) {
session := DB(ctx).Model(&BuiltinMetric{})
session = builtinMetricQueryBuild(lang, collector, session, typ, query, unit)
var cnt int64
err := session.Count(&cnt).Error
return cnt, err
}
func builtinMetricQueryBuild(lang, collector string, session *gorm.DB, typ string, query, unit string) *gorm.DB {
session = session.Where("updated_by != ?", SYSTEM)
if lang != "" {
session = session.Where("lang = ?", lang)
}
@@ -183,7 +171,7 @@ func BuiltinMetricGet(ctx *ctx.Context, where string, args ...interface{}) (*Bui
func BuiltinMetricTypes(ctx *ctx.Context, lang, collector, query string) ([]string, error) {
var typs []string
session := DB(ctx).Model(&BuiltinMetric{})
session := DB(ctx).Model(&BuiltinMetric{}).Where("updated_by != ?", SYSTEM)
if lang != "" {
session = session.Where("lang = ?", lang)
}
@@ -202,7 +190,7 @@ func BuiltinMetricTypes(ctx *ctx.Context, lang, collector, query string) ([]stri
func BuiltinMetricCollectors(ctx *ctx.Context, lang, typ, query string) ([]string, error) {
var collectors []string
session := DB(ctx).Model(&BuiltinMetric{})
session := DB(ctx).Model(&BuiltinMetric{}).Where("updated_by != ?", SYSTEM)
if lang != "" {
session = session.Where("lang = ?", lang)
}

View File

@@ -118,7 +118,7 @@ func BuiltinPayloadGet(ctx *ctx.Context, where string, args ...interface{}) (*Bu
}
func BuiltinPayloadGets(ctx *ctx.Context, componentId uint64, typ, cate, query string) ([]*BuiltinPayload, error) {
session := DB(ctx)
session := DB(ctx).Where("updated_by != ?", SYSTEM)
if typ != "" {
session = session.Where("type = ?", typ)
}
@@ -146,7 +146,7 @@ func BuiltinPayloadGets(ctx *ctx.Context, componentId uint64, typ, cate, query s
// get cates of BuiltinPayload by type and component, return []string
func BuiltinPayloadCates(ctx *ctx.Context, typ string, componentID uint64) ([]string, error) {
var cates []string
err := DB(ctx).Model(new(BuiltinPayload)).Where("type = ? and component_id = ?", typ, componentID).Distinct("cate").Pluck("cate", &cates).Error
err := DB(ctx).Model(new(BuiltinPayload)).Where("type = ? and component_id = ? and updated_by != ?", typ, componentID, SYSTEM).Distinct("cate").Pluck("cate", &cates).Error
return cates, err
}

View File

@@ -5,6 +5,7 @@ import (
"log"
"os"
"regexp"
"sync"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
@@ -40,13 +41,68 @@ var (
)
const (
SALT = "salt"
RSA_PRIVATE_KEY = "rsa_private_key"
RSA_PUBLIC_KEY = "rsa_public_key"
RSA_PASSWORD = "rsa_password"
JWT_SIGNING_KEY = "jwt_signing_key"
SALT = "salt"
RSA_PRIVATE_KEY = "rsa_private_key"
RSA_PUBLIC_KEY = "rsa_public_key"
RSA_PASSWORD = "rsa_password"
JWT_SIGNING_KEY = "jwt_signing_key"
PHONE_ENCRYPTION_ENABLED = "phone_encryption_enabled" // 手机号加密开关
)
// 手机号加密配置缓存
var (
phoneEncryptionCache struct {
sync.RWMutex
enabled bool
privateKey []byte
publicKey []byte
password string
loaded bool
}
)
// LoadPhoneEncryptionConfig 加载手机号加密配置到缓存
func LoadPhoneEncryptionConfig(ctx *ctx.Context) error {
enabled, err := GetPhoneEncryptionEnabled(ctx)
if err != nil {
return errors.WithMessage(err, "failed to get phone encryption enabled")
}
privateKey, publicKey, password, err := GetRSAKeys(ctx)
if err != nil {
return errors.WithMessage(err, "failed to get RSA keys")
}
phoneEncryptionCache.Lock()
defer phoneEncryptionCache.Unlock()
phoneEncryptionCache.enabled = enabled
phoneEncryptionCache.privateKey = privateKey
phoneEncryptionCache.publicKey = publicKey
phoneEncryptionCache.password = password
phoneEncryptionCache.loaded = true
logger.Debugf("Phone encryption config loaded: enabled=%v", enabled)
return nil
}
// GetPhoneEncryptionConfigFromCache 从缓存获取手机号加密配置
func GetPhoneEncryptionConfigFromCache() (enabled bool, publicKey []byte, privateKey []byte, password string, loaded bool) {
phoneEncryptionCache.RLock()
defer phoneEncryptionCache.RUnlock()
return phoneEncryptionCache.enabled,
phoneEncryptionCache.publicKey,
phoneEncryptionCache.privateKey,
phoneEncryptionCache.password,
phoneEncryptionCache.loaded
}
// RefreshPhoneEncryptionCache 刷新缓存(在修改配置后调用)
func RefreshPhoneEncryptionCache(ctx *ctx.Context) error {
return LoadPhoneEncryptionConfig(ctx)
}
func InitJWTSigningKey(ctx *ctx.Context) string {
val, err := ConfigsGet(ctx, JWT_SIGNING_KEY)
if err != nil {
@@ -198,6 +254,41 @@ func ConfigsGetFlashDutyAppKey(ctx *ctx.Context) (string, error) {
return configs[0].Cval, nil
}
// GetPhoneEncryptionEnabled 获取手机号加密是否开启
func GetPhoneEncryptionEnabled(ctx *ctx.Context) (bool, error) {
val, err := ConfigsGet(ctx, PHONE_ENCRYPTION_ENABLED)
if err != nil {
return false, err
}
return val == "true" || val == "1", nil
}
// SetPhoneEncryptionEnabled 设置手机号加密开关
func SetPhoneEncryptionEnabled(ctx *ctx.Context, enabled bool) error {
val := "false"
if enabled {
val = "true"
}
return ConfigsSet(ctx, PHONE_ENCRYPTION_ENABLED, val)
}
// GetRSAKeys 获取RSA密钥对
func GetRSAKeys(ctx *ctx.Context) (privateKey []byte, publicKey []byte, password string, err error) {
privateKeyVal, err := ConfigsGet(ctx, RSA_PRIVATE_KEY)
if err != nil {
return nil, nil, "", errors.WithMessage(err, "failed to get RSA private key")
}
publicKeyVal, err := ConfigsGet(ctx, RSA_PUBLIC_KEY)
if err != nil {
return nil, nil, "", errors.WithMessage(err, "failed to get RSA public key")
}
passwordVal, err := ConfigsGet(ctx, RSA_PASSWORD)
if err != nil {
return nil, nil, "", errors.WithMessage(err, "failed to get RSA password")
}
return []byte(privateKeyVal), []byte(publicKeyVal), passwordVal, nil
}
func ConfigsSelectByCkey(ctx *ctx.Context, ckey string) ([]Configs, error) {
if !ctx.IsCenter {
return []Configs{}, nil

View File

@@ -13,6 +13,7 @@ import (
"github.com/pkg/errors"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
"gorm.io/gorm"
)
type Datasource struct {
@@ -140,7 +141,7 @@ func (ds *Datasource) Update(ctx *ctx.Context, selectField interface{}, selectFi
if ds.UpdatedAt == 0 {
ds.UpdatedAt = time.Now().Unix()
}
return DB(ctx).Model(ds).Select(selectField, selectFields...).Updates(ds).Error
return DB(ctx).Model(ds).Session(&gorm.Session{SkipHooks: true}).Select(selectField, selectFields...).Updates(ds).Error
}
func (ds *Datasource) Add(ctx *ctx.Context) error {

View File

@@ -16,7 +16,7 @@ type EventPipeline struct {
TeamIds []int64 `json:"team_ids" gorm:"type:text;serializer:json"`
TeamNames []string `json:"team_names" gorm:"-"`
Description string `json:"description" gorm:"type:varchar(255)"`
FilterEnable bool `json:"filter_enable" gorm:"type:bigint"`
FilterEnable bool `json:"filter_enable" gorm:"type:boolean"`
LabelFilters []TagFilter `json:"label_filters" gorm:"type:text;serializer:json"`
AttrFilters []TagFilter `json:"attribute_filters" gorm:"type:text;serializer:json"`
ProcessorConfigs []ProcessorConfig `json:"processors" gorm:"type:text;serializer:json"`

View File

@@ -209,7 +209,6 @@ func (t MsgTplList) IfUsed(nr *NotifyRule) bool {
const (
DingtalkTitle = `{{if $event.IsRecovered}} Recovered {{else}}Triggered{{end}}: {{$event.RuleName}}`
FeishuCardTitle = `🔔 {{$event.RuleName}}`
FeishuAppTitle = `{{- if $event.IsRecovered }}🔔 ﹝恢复﹞ {{$event.RuleName}}{{- else }}🔔 ﹝告警﹞ {{$event.RuleName}}{{- end -}}`
LarkCardTitle = `🔔 {{$event.RuleName}}`
)
@@ -249,12 +248,7 @@ var NewTplMap = map[string]string{
{{- end}}
{{end}}
{{$domain := "http://127.0.0.1:17000" }}
{{$mutelink := print $domain "/alert-mutes/add?busiGroup=" $event.GroupId "&cate=" $event.Cate "&datasource_ids=" $event.DatasourceId "&prod=" $event.RuleProd}}
{{- range $key, $value := $event.TagsMap}}
{{- $encodedValue := $value | urlquery }}
{{- $mutelink = print $mutelink "&tags=" $key "%3D" $encodedValue}}
{{- end}}
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}}) | [屏蔽1小时]({{$mutelink}}) | [查看曲线]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|urlquery}})`,
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}}) | [屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}}){{if eq $event.Cate "prometheus"}} | [查看曲线]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph){{end}}`,
Email: `<!DOCTYPE html>
<html lang="en">
<head>
@@ -483,8 +477,8 @@ var NewTplMap = map[string]string{
{{if $event.IsRecovered}}恢复时间:{{timeformat $event.LastEvalTime}}{{else}}触发时间: {{timeformat $event.TriggerTime}}
触发时值: {{$event.TriggerValue}}{{end}}
发送时间: {{timestamp}}{{$domain := "http://127.0.0.1:17000" }}
事件详情: {{$domain}}/alert-his-events/{{$event.Id}}{{$muteUrl := print $domain "/alert-mutes/add?busiGroup=" $event.GroupId "&cate=" $event.Cate "&datasource_ids=" $event.DatasourceId "&prod=" $event.RuleProd}}{{range $key, $value := $event.TagsMap}}{{$muteUrl = print $muteUrl "&tags=" $key "%3D" $value}}{{end}}
屏蔽1小时: {{ unescaped $muteUrl }}`,
事件详情: {{$domain}}/alert-his-events/{{$event.Id}}
屏蔽1小时: {{$domain}}/alert-mutes/add?__event_id={{$event.Id}}`,
FeishuCard: `{{- if $event.IsRecovered -}}
{{- if ne $event.Cate "host" -}}
**告警集群:** {{$event.Cluster}}{{end}}
@@ -511,7 +505,7 @@ var NewTplMap = map[string]string{
{{- end}}
{{- end}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}}){{if eq $event.Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph){{end}}`,
EmailSubject: `{{if $event.IsRecovered}}Recovered{{else}}Triggered{{end}}: {{$event.RuleName}} {{$event.TagsJSON}}`,
Mm: `级别状态: S{{$event.Severity}} {{if $event.IsRecovered}}Recovered{{else}}Triggered{{end}}
规则名称: {{$event.RuleName}}{{if $event.RuleNote}}
@@ -540,7 +534,7 @@ var NewTplMap = map[string]string{
{{$time_duration := sub now.Unix $event.FirstTriggerTime }}{{if $event.IsRecovered}}{{$time_duration = sub $event.LastEvalTime $event.FirstTriggerTime }}{{end}}**距离首次告警**: {{humanizeDurationInterface $time_duration}}
**发送时间**: {{timestamp}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}}){{if eq $event.Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph){{end}}`,
Lark: `级别状态: S{{$event.Severity}} {{if $event.IsRecovered}}Recovered{{else}}Triggered{{end}}
规则名称: {{$event.RuleName}}{{if $event.RuleNote}}
规则备注: {{$event.RuleNote}}{{end}}
@@ -550,7 +544,7 @@ var NewTplMap = map[string]string{
发送时间: {{timestamp}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
事件详情: {{$domain}}/alert-his-events/{{$event.Id}}
屏蔽1小时: {{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}}`,
屏蔽1小时: {{$domain}}/alert-mutes/add?__event_id={{$event.Id}}`,
LarkCard: `{{ if $event.IsRecovered }}
{{- if ne $event.Cate "host"}}
**告警集群:** {{$event.Cluster}}{{end}}
@@ -573,7 +567,7 @@ var NewTplMap = map[string]string{
{{if $event.RuleNote }}**告警描述:** **{{$event.RuleNote}}**{{end}}
{{- end -}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{$event.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}}){{if eq $event.Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph){{end}}`,
SlackWebhook: `{{ if $event.IsRecovered }}
{{- if ne $event.Cate "host"}}
*Alarm cluster:* {{$event.Cluster}}{{end}}
@@ -600,8 +594,8 @@ var NewTplMap = map[string]string{
{{$domain := "http://127.0.0.1:17000" }}
<{{$domain}}/alert-his-events/{{$event.Id}}|Event Details>
<{{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}}|Block for 1 hour>
<{{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|escape}}|View Curve>`,
<{{$domain}}/alert-mutes/add?__event_id={{$event.Id}}|Block for 1 hour>
<{{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph|View Curve>`,
Discord: `**Level Status**: {{if $event.IsRecovered}}S{{$event.Severity}} Recovered{{else}}S{{$event.Severity}} Triggered{{end}}
**Rule Title**: {{$event.RuleName}}{{if $event.RuleNote}}
**Rule Note**: {{$event.RuleNote}}{{end}}{{if $event.TargetIdent}}
@@ -613,12 +607,7 @@ var NewTplMap = map[string]string{
**Send Time**: {{timestamp}}
{{$domain := "http://127.0.0.1:17000" }}
{{$mutelink := print $domain "/alert-mutes/add?busiGroup=" $event.GroupId "&cate=" $event.Cate "&datasource_ids=" $event.DatasourceId "&prod=" $event.RuleProd}}
{{- range $key, $value := $event.TagsMap}}
{{- $encodedValue := $value | urlquery }}
{{- $mutelink = print $mutelink "&tags=" $key "%3D" $encodedValue}}
{{- end}}
[Event Details]({{$domain}}/alert-his-events/{{$event.Id}}) | [Silence 1h]({{$mutelink}}) | [View Graph]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|urlquery}})`,
[Event Details]({{$domain}}/alert-his-events/{{$event.Id}}) | [Silence 1h]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}}) | [View Graph]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph)`,
MattermostWebhook: `{{ if $event.IsRecovered }}
{{- if ne $event.Cate "host"}}
@@ -640,26 +629,7 @@ var NewTplMap = map[string]string{
{{if $event.RuleNote }}**Alarm description:** **{{$event.RuleNote}}**{{end}}
{{- end -}}
{{$domain := "http://127.0.0.1:17000" }}
[Event Details]({{$domain}}/alert-his-events/{{$event.Id}})|[Block for 1 hour]({{$domain}}/alert-mutes/add?busiGroup={{$event.GroupId}}&cate={{$event.Cate}}&datasource_ids={{$event.DatasourceId}}&prod={{$event.RuleProd}}{{range $key, $value := $event.TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[View Curve]({{$domain}}/metric/explorer?data_source_id={{$event.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{$event.PromQl|escape}})`,
FeishuApp: `{{- if $event.IsRecovered -}}
{{- if ne $event.Cate "host" -}}
**告警集群:** {{$event.Cluster}}{{end}}
**级别状态:** S{{$event.Severity}} Recovered
**告警名称:** {{$event.RuleName}}
**事件标签:** {{$event.TagsJSON}}
**恢复时间:** {{timeformat $event.LastEvalTime}}
**告警描述:** **服务已恢复**
{{- else }}
{{- if ne $event.Cate "host"}}
**告警集群:** {{$event.Cluster}}{{end}}
**级别状态:** S{{$event.Severity}} Triggered
**告警名称:** {{$event.RuleName}}
**事件标签:** {{$event.TagsJSON}}
**触发时间:** {{timeformat $event.TriggerTime}}
**发送时间:** {{timestamp}}
**触发时值:** {{$event.TriggerValue}}
{{if $event.RuleNote }}**告警描述:** **{{$event.RuleNote}}**{{end}}
{{- end -}}`,
[Event Details]({{$domain}}/alert-his-events/{{$event.Id}})|[Block for 1 hour]({{$domain}}/alert-mutes/add?__event_id={{$event.Id}})|[View Curve]({{$domain}}/metric/explorer?__event_id={{$event.Id}}&mode=graph)`,
}
var MsgTplMap = []MessageTemplate{
@@ -678,7 +648,6 @@ var MsgTplMap = []MessageTemplate{
{Name: "Lark", Ident: Lark, Weight: 5, Content: map[string]string{"content": NewTplMap[Lark]}},
{Name: "Feishu", Ident: Feishu, Weight: 4, Content: map[string]string{"content": NewTplMap[Feishu]}},
{Name: "FeishuCard", Ident: FeishuCard, Weight: 4, Content: map[string]string{"title": FeishuCardTitle, "content": NewTplMap[FeishuCard]}},
{Name: "FeishuApp", Ident: FeishuApp, Weight: 4, Content: map[string]string{"title": FeishuAppTitle, "content": NewTplMap[FeishuApp]}},
{Name: "Wecom", Ident: Wecom, Weight: 3, Content: map[string]string{"content": NewTplMap[Wecom]}},
{Name: "Dingtalk", Ident: Dingtalk, Weight: 2, Content: map[string]string{"title": NewTplMap[EmailSubject], "content": NewTplMap[Dingtalk]}},
{Name: "Email", Ident: Email, Weight: 1, Content: map[string]string{"subject": NewTplMap[EmailSubject], "content": NewTplMap[Email]}},
@@ -724,19 +693,33 @@ func (t *MessageTemplate) Upsert(ctx *ctx.Context, ident string) error {
return tpl.Update(ctx, *t)
}
var GetDefs func(map[string]interface{}) []string
func getDefs(renderData map[string]interface{}) []string {
return []string{
"{{ $events := .events }}",
"{{ $event := index $events 0 }}",
"{{ $labels := $event.TagsMap }}",
"{{ $value := $event.TriggerValue }}",
}
}
func init() {
GetDefs = getDefs
}
func (t *MessageTemplate) RenderEvent(events []*AlertCurEvent) map[string]interface{} {
if t == nil {
return nil
}
renderData := make(map[string]interface{})
renderData["events"] = events
// event 内容渲染到 messageTemplate
tplContent := make(map[string]interface{})
for key, msgTpl := range t.Content {
var defs = []string{
"{{ $events := . }}",
"{{ $event := index $events 0 }}",
"{{ $labels := $event.TagsMap }}",
"{{ $value := $event.TriggerValue }}",
}
defs := GetDefs(renderData)
var body bytes.Buffer
if t.NotifyChannelIdent == "email" {
@@ -749,7 +732,7 @@ func (t *MessageTemplate) RenderEvent(events []*AlertCurEvent) map[string]interf
}
var body bytes.Buffer
if err = tpl.Execute(&body, events); err != nil {
if err = tpl.Execute(&body, renderData); err != nil {
logger.Errorf("failed to execute template: %v", err)
tplContent[key] = fmt.Sprintf("failed to execute template: %v", err)
continue
@@ -764,7 +747,7 @@ func (t *MessageTemplate) RenderEvent(events []*AlertCurEvent) map[string]interf
continue
}
if err = tpl.Execute(&body, events); err != nil {
if err = tpl.Execute(&body, renderData); err != nil {
logger.Errorf("failed to execute template: %v events: %v", err, events)
continue
}
@@ -785,7 +768,7 @@ func (t *MessageTemplate) RenderEvent(events []*AlertCurEvent) map[string]interf
continue
}
if err = tpl.Execute(&body, events); err != nil {
if err = tpl.Execute(&body, renderData); err != nil {
logger.Errorf("failed to execute template: %v events: %v", err, events)
tplContent[key] = fmt.Sprintf("failed to execute template: %v", err)
continue

View File

@@ -72,8 +72,10 @@ func MigrateTables(db *gorm.DB) error {
if isPostgres(db) {
dts = append(dts, &models.PostgresBuiltinComponent{})
DropUniqueFiledLimit(db, &models.PostgresBuiltinComponent{}, "idx_ident", "idx_ident")
} else {
dts = append(dts, &models.BuiltinComponent{})
DropUniqueFiledLimit(db, &models.BuiltinComponent{}, "idx_ident", "idx_ident")
}
if !db.Migrator().HasColumn(&imodels.TaskSchedulerHealth{}, "scheduler") {
@@ -122,11 +124,18 @@ func MigrateTables(db *gorm.DB) error {
}
}
DropUniqueFiledLimit(db, &Configs{}, "ckey", "configs_ckey_key")
InsertPermPoints(db)
// 删除 builtin_metrics 表的 idx_collector_typ_name 唯一索引
DropUniqueFiledLimit(db, &models.BuiltinMetric{}, "idx_collector_typ_name", "idx_collector_typ_name")
return nil
}
func DropUniqueFiledLimit(db *gorm.DB, dst interface{}, uniqueFiled string, pgUniqueFiled string) { // UNIQUE KEY (`ckey`)
// 先检查表是否存在,如果不存在则直接返回
if !db.Migrator().HasTable(dst) {
return
}
if db.Migrator().HasIndex(dst, uniqueFiled) {
err := db.Migrator().DropIndex(dst, uniqueFiled) //mysql DROP INDEX
if err != nil {
@@ -157,110 +166,6 @@ func columnHasIndex(db *gorm.DB, dst interface{}, indexColumn string) bool {
return false
}
func InsertPermPoints(db *gorm.DB) {
var ops []models.RoleOperation
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/alert-mutes/put",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/log/index-patterns",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/help/variable-configs",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/ibex-settings",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-templates",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-templates/add",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-templates/put",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-templates/del",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-rules",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-rules/add",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-rules/put",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/notification-rules/del",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/event-pipelines",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/event-pipelines/add",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/event-pipelines/put",
})
ops = append(ops, models.RoleOperation{
RoleName: "Standard",
Operation: "/event-pipelines/del",
})
for _, op := range ops {
var count int64
session := db.Session(&gorm.Session{}).Model(&models.RoleOperation{})
err := session.Where("operation = ? AND role_name = ?", op.Operation, op.RoleName).Count(&count).Error
if err != nil {
logger.Errorf("check role operation exists failed, %v", err)
continue
}
if count > 0 {
continue
}
err = session.Create(&op).Error
if err != nil {
logger.Errorf("insert role operation failed, %v", err)
}
}
}
type AlertRule struct {
ExtraConfig string `gorm:"type:text;column:extra_config"`
CronPattern string `gorm:"type:varchar(64);column:cron_pattern"`
@@ -356,6 +261,7 @@ type BoardBusigroup struct {
type Users struct {
Belong string `gorm:"column:belong;varchar(16);default:'';comment:belong"`
LastActiveTime int64 `gorm:"column:last_active_time;type:int;default:0;comment:last_active_time"`
Phone string `gorm:"column:phone;type:varchar(1024);not null;default:''"`
}
type SsoConfig struct {
@@ -434,6 +340,7 @@ type NotifyRule struct {
UserGroupIds []int64 `gorm:"column:user_group_ids;type:varchar(255)"`
NotifyConfigs []models.NotifyConfig `gorm:"column:notify_configs;type:text"`
PipelineConfigs []models.PipelineConfig `gorm:"column:pipeline_configs;type:text"`
ExtraConfig interface{} `gorm:"column:extra_config;type:text"`
CreateAt int64 `gorm:"column:create_at;not null;default:0"`
CreateBy string `gorm:"column:create_by;type:varchar(64);not null;default:''"`
UpdateAt int64 `gorm:"column:update_at;not null;default:0"`

View File

@@ -2,7 +2,6 @@ package models
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"crypto/tls"
@@ -21,10 +20,10 @@ import (
"sort"
"strconv"
"strings"
"syscall"
"time"
"unicode/utf8"
"github.com/ccfos/nightingale/v6/pkg/cmdx"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/ccfos/nightingale/v6/pkg/tplx"
@@ -33,7 +32,6 @@ import (
"github.com/pkg/errors"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/sys"
"gopkg.in/gomail.v2"
)
@@ -95,6 +93,9 @@ type UserInfo struct {
type FlashDutyRequestConfig struct {
Proxy string `json:"proxy"`
IntegrationUrl string `json:"integration_url"`
Timeout int `json:"timeout"` // 超时时间(毫秒)
RetryTimes int `json:"retry_times"` // 重试次数
RetrySleep int `json:"retry_sleep"` // 重试等待时间(毫秒)
}
// ParamItem 自定义参数项
@@ -196,10 +197,8 @@ func (ncc *NotifyChannelConfig) SendScript(events []*AlertCurEvent, tpl map[stri
cmd.Stdout = &buf
cmd.Stderr = &buf
err := startCmd(cmd)
if err != nil {
return "", "", fmt.Errorf("failed to start script: %v", err)
}
err, isTimeout := cmdx.RunTimeout(cmd, time.Duration(config.Timeout)*time.Millisecond)
logger.Infof("event_script_notify_result: exec %s output: %s isTimeout: %v err: %v stdin: %s", fpath, buf.String(), isTimeout, err, string(getStdinBytes(events, tpl, params, sendtos)))
res := buf.String()
@@ -218,8 +217,6 @@ func (ncc *NotifyChannelConfig) SendScript(events []*AlertCurEvent, tpl map[stri
res = res[:validLen] + "..."
}
err, isTimeout := sys.WrapTimeout(cmd, time.Duration(config.Timeout)*time.Second)
logger.Infof("event_script_notify_result: exec %s output: %s isTimeout: %v err: %v", fpath, buf.String(), isTimeout, err)
if isTimeout {
if err == nil {
return cmd.String(), res, errors.New("timeout and killed process")
@@ -257,11 +254,6 @@ func getStdinBytes(events []*AlertCurEvent, tpl map[string]interface{}, params m
return jsonBytes
}
func startCmd(c *exec.Cmd) error {
c.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
return c.Start()
}
func NotifyChannelStatistics(ctx *ctx.Context) (*Statistics, error) {
if !ctx.IsCenter {
s, err := poster.GetByUrls[*Statistics](ctx, "/v1/n9e/statistic?name=notify_channel")
@@ -325,9 +317,20 @@ func GetHTTPClient(nc *NotifyChannelConfig) (*http.Client, error) {
}
httpConfig := nc.RequestConfig.HTTPRequestConfig
if httpConfig.Timeout == 0 {
httpConfig.Timeout = 10000
// 对于 FlashDuty 类型,优先使用 FlashDuty 配置中的超时时间
timeout := httpConfig.Timeout
if nc.RequestType == "flashduty" && nc.RequestConfig.FlashDutyRequestConfig != nil {
flashDutyTimeout := nc.RequestConfig.FlashDutyRequestConfig.Timeout
if flashDutyTimeout > 0 {
timeout = flashDutyTimeout
}
}
if timeout == 0 {
timeout = 10000 // HTTP 默认 10 秒
}
if httpConfig.Concurrency == 0 {
httpConfig.Concurrency = 5
}
@@ -357,18 +360,78 @@ func GetHTTPClient(nc *NotifyChannelConfig) (*http.Client, error) {
Proxy: proxyFunc,
TLSClientConfig: tlsConfig,
DialContext: (&net.Dialer{
Timeout: time.Duration(httpConfig.Timeout) * time.Millisecond,
Timeout: time.Duration(timeout) * time.Millisecond,
}).DialContext,
}
client := &http.Client{
Transport: transport,
Timeout: time.Duration(httpConfig.Timeout) * time.Millisecond,
Timeout: time.Duration(timeout) * time.Millisecond,
}
return client, nil
}
func (ncc *NotifyChannelConfig) makeHTTPRequest(httpConfig *HTTPRequestConfig, url string, headers map[string]string, parameters map[string]string, body []byte) (*http.Request, error) {
req, err := http.NewRequest(httpConfig.Method, url, bytes.NewBuffer(body))
if err != nil {
logger.Errorf("failed to create request: %v", err)
return nil, err
}
query := req.URL.Query()
// 设置请求头 腾讯云短信、语音特殊处理
if ncc.Ident == "tx-sms" || ncc.Ident == "tx-voice" {
headers = ncc.setTxHeader(headers, body)
for key, value := range headers {
req.Header.Add(key, value)
}
} else if ncc.Ident == "ali-sms" || ncc.Ident == "ali-voice" {
req, err = http.NewRequest(httpConfig.Method, url, nil)
if err != nil {
return nil, err
}
query, headers = ncc.getAliQuery(ncc.Ident, query, httpConfig.Request.Parameters["AccessKeyId"], httpConfig.Request.Parameters["AccessKeySecret"], parameters)
for key, value := range headers {
req.Header.Set(key, value)
}
} else {
for key, value := range headers {
req.Header.Add(key, value)
}
}
if ncc.Ident != "ali-sms" && ncc.Ident != "ali-voice" {
for key, value := range parameters {
query.Add(key, value)
}
}
req.URL.RawQuery = query.Encode()
// 记录完整的请求信息
logger.Debugf("URL: %v, Method: %s, Headers: %+v, params: %+v, Body: %s", req.URL, req.Method, req.Header, query, string(body))
return req, nil
}
func (ncc *NotifyChannelConfig) makeFlashDutyRequest(url string, bodyBytes []byte, flashDutyChannelID int64) (*http.Request, error) {
req, err := http.NewRequest("POST", url, bytes.NewBuffer(bodyBytes))
if err != nil {
return nil, err
}
// 设置 URL 参数
query := req.URL.Query()
if flashDutyChannelID != 0 {
// 如果 flashduty 有配置协作空间(channel_id),则传入 channel_id 参数
query.Add("channel_id", strconv.FormatInt(flashDutyChannelID, 10))
}
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-Type", "application/json")
return req, nil
}
func (ncc *NotifyChannelConfig) SendFlashDuty(events []*AlertCurEvent, flashDutyChannelID int64, client *http.Client) (string, error) {
// todo 每一个 channel 批量发送事件
if client == nil {
@@ -380,46 +443,57 @@ func (ncc *NotifyChannelConfig) SendFlashDuty(events []*AlertCurEvent, flashDuty
return "", err
}
req, err := http.NewRequest("POST", ncc.RequestConfig.FlashDutyRequestConfig.IntegrationUrl, bytes.NewBuffer(body))
if err != nil {
logger.Errorf("failed to create request: %v, event: %v", err, events)
return "", err
url := ncc.RequestConfig.FlashDutyRequestConfig.IntegrationUrl
retrySleep := time.Second
if ncc.RequestConfig.FlashDutyRequestConfig.RetrySleep > 0 {
retrySleep = time.Duration(ncc.RequestConfig.FlashDutyRequestConfig.RetrySleep) * time.Millisecond
}
// 设置 URL 参数
query := req.URL.Query()
if flashDutyChannelID != 0 {
// 如果 flashduty 有配置协作空间(channel_id),则传入 channel_id 参数
query.Add("channel_id", strconv.FormatInt(flashDutyChannelID, 10))
retryTimes := 3
if ncc.RequestConfig.FlashDutyRequestConfig.RetryTimes > 0 {
retryTimes = ncc.RequestConfig.FlashDutyRequestConfig.RetryTimes
}
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-Type", "application/json")
// 重试机制
for i := 0; i <= 3; i++ {
logger.Infof("send flashduty req:%+v body:%+v", req, string(body))
// 把最后一次错误保存下来,后面返回,让用户在页面上也可以看到
var lastErrorMessage string
for i := 0; i <= retryTimes; i++ {
req, err := ncc.makeFlashDutyRequest(url, body, flashDutyChannelID)
if err != nil {
logger.Errorf("send_flashduty: failed to create request. url=%s request_body=%s error=%v", url, string(body), err)
return fmt.Sprintf("failed to create request. error: %v", err), err
}
// 直接使用客户端发送请求,超时时间已经在 client 中设置
resp, err := client.Do(req)
if err != nil {
logger.Errorf("send flashduty req:%+v err:%v", req, err)
time.Sleep(time.Duration(100) * time.Millisecond)
logger.Errorf("send_flashduty: http_call=fail url=%s request_body=%s error=%v times=%d", url, string(body), err, i+1)
if i < retryTimes {
// 重试等待时间,后面要放到页面上配置
time.Sleep(retrySleep)
}
lastErrorMessage = err.Error()
continue
}
defer resp.Body.Close()
// 读取响应
body, err := io.ReadAll(resp.Body)
if err != nil {
logger.Errorf("failed to read response: %v, event: %v", err, events)
// 走到这里,说明请求 Flashduty 成功,不管 Flashduty 返回了什么结果,都不判断,仅保存,给用户查看即可
// 比如服务端返回 5xx也不要重试重试可能会导致服务端数据有问题。告警事件这样的东西没有那么关键只要最终能在 UI 上看到调用结果就行
var resBody []byte
if resp.Body != nil {
defer resp.Body.Close()
resBody, err = io.ReadAll(resp.Body)
if err != nil {
logger.Errorf("send_flashduty: failed to read response. request_body=%s, error=%v", string(body), err)
resBody = []byte("failed to read response. error: " + err.Error())
}
}
logger.Infof("send flashduty req:%+v resp:%+v body:%+v err:%v", req, resp, string(body), err)
if resp.StatusCode == http.StatusOK {
return string(body), nil
}
time.Sleep(time.Duration(100) * time.Millisecond)
logger.Infof("send_flashduty: http_call=succ url=%s request_body=%s response_code=%d response_body=%s times=%d", url, string(body), resp.StatusCode, string(resBody), i+1)
return fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(resBody)), nil
}
return "", errors.New("failed to send request")
return lastErrorMessage, errors.New("failed to send request")
}
func (ncc *NotifyChannelConfig) SendHTTP(events []*AlertCurEvent, tpl map[string]interface{}, params map[string]string, sendtos []string, client *http.Client) (string, error) {
@@ -457,54 +531,21 @@ func (ncc *NotifyChannelConfig) SendHTTP(events []*AlertCurEvent, tpl map[string
url, headers, parameters := ncc.replaceVariables(fullTpl)
logger.Infof("url: %v, headers: %v, parameters: %v", url, headers, parameters)
req, err := http.NewRequest(httpConfig.Method, url, bytes.NewBuffer(body))
if err != nil {
logger.Errorf("failed to create request: %v, event: %v", err, events)
return "", err
}
query := req.URL.Query()
// 设置请求头 腾讯云短信、语音特殊处理
if ncc.Ident == "tx-sms" || ncc.Ident == "tx-voice" {
headers = ncc.setTxHeader(headers, body)
for key, value := range headers {
req.Header.Add(key, value)
}
} else if ncc.Ident == "ali-sms" || ncc.Ident == "ali-voice" {
req, err = http.NewRequest(httpConfig.Method, url, nil)
if err != nil {
return "", err
}
query, headers = ncc.getAliQuery(ncc.Ident, query, httpConfig.Request.Parameters["AccessKeyId"], httpConfig.Request.Parameters["AccessKeySecret"], parameters)
for key, value := range headers {
req.Header.Set(key, value)
}
} else {
for key, value := range headers {
req.Header.Add(key, value)
}
}
if ncc.Ident != "ali-sms" && ncc.Ident != "ali-voice" {
for key, value := range parameters {
query.Add(key, value)
}
}
req.URL.RawQuery = query.Encode()
// 记录完整的请求信息
logger.Debugf("URL: %v, Method: %s, Headers: %+v, params: %+v, Body: %s", req.URL, req.Method, req.Header, query, string(body))
// 重试机制
for i := 0; i <= httpConfig.RetryTimes; i++ {
var lastErrorMessage string
for i := 0; i < httpConfig.RetryTimes; i++ {
var resp *http.Response
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(httpConfig.Timeout)*time.Millisecond)
resp, err = client.Do(req.WithContext(ctx))
cancel() // 确保释放资源
req, err := ncc.makeHTTPRequest(httpConfig, url, headers, parameters, body)
if err != nil {
logger.Errorf("send_http: failed to create request. url=%s request_body=%s error=%v", url, string(body), err)
return fmt.Sprintf("failed to create request. error: %v", err), err
}
resp, err = client.Do(req)
if err != nil {
logger.Errorf("send_http: failed to send http notify. url=%s request_body=%s error=%v", url, string(body), err)
lastErrorMessage = err.Error()
time.Sleep(time.Duration(httpConfig.RetryInterval) * time.Second)
logger.Errorf("send http request failed to send http notify: %v", err)
continue
}
defer resp.Body.Close()
@@ -512,11 +553,9 @@ func (ncc *NotifyChannelConfig) SendHTTP(events []*AlertCurEvent, tpl map[string
// 读取响应
body, err := io.ReadAll(resp.Body)
logger.Debugf("send http request: %+v, response: %+v, body: %+v", req, resp, string(body))
if err != nil {
logger.Errorf("failed to send http notify: %v", err)
logger.Errorf("send_http: failed to read response. url=%s request_body=%s error=%v", url, string(body), err)
}
if resp.StatusCode == http.StatusOK {
return string(body), nil
}
@@ -524,8 +563,7 @@ func (ncc *NotifyChannelConfig) SendHTTP(events []*AlertCurEvent, tpl map[string
return "", fmt.Errorf("failed to send request, status code: %d, body: %s", resp.StatusCode, string(body))
}
return "", err
return lastErrorMessage, errors.New("all retries failed, last error: " + lastErrorMessage)
}
// getAliQuery 获取阿里云API的查询参数和请求头
@@ -932,11 +970,6 @@ func (ncc *NotifyChannelConfig) ValidateFlashDutyRequestConfig() error {
}
func (ncc *NotifyChannelConfig) Update(ctx *ctx.Context, ref NotifyChannelConfig) error {
// ref.FE2DB()
if ncc.Ident != ref.Ident {
return errors.New("cannot update ident")
}
ref.ID = ncc.ID
ref.CreateAt = ncc.CreateAt
ref.CreateBy = ncc.CreateBy
@@ -1263,8 +1296,7 @@ var NotiChMap = []*NotifyChannelConfig{
Method: "POST", Headers: map[string]string{"Content-Type": "application/json"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Parameters: map[string]string{"token": "{{$params.token}}"},
Body: `{"msg_type": "text", "content": {"text": "{{$tpl.content}}"}}`,
Body: `{"msg_type": "text", "content": {"text": "{{$tpl.content}}"}}`,
},
},
},
@@ -1286,8 +1318,7 @@ var NotiChMap = []*NotifyChannelConfig{
Method: "POST", Headers: map[string]string{"Content-Type": "application/json"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Parameters: map[string]string{"token": "{{$params.token}}"},
Body: `{"msg_type": "interactive", "card": {"config": {"wide_screen_mode": true}, "header": {"title": {"content": "{{$tpl.title}}", "tag": "plain_text"}, "template": "{{if $event.IsRecovered}}green{{else}}red{{end}}"}, "elements": [{"tag": "div", "text": {"tag": "lark_md","content": "{{$tpl.content}}"}}]}}`,
Body: `{"msg_type": "interactive", "card": {"config": {"wide_screen_mode": true}, "header": {"title": {"content": "{{$tpl.title}}", "tag": "plain_text"}, "template": "{{if $event.IsRecovered}}green{{else}}red{{end}}"}, "elements": [{"tag": "markdown", "content": "{{$tpl.content}}"}]}}`,
},
},
},
@@ -1300,27 +1331,6 @@ var NotiChMap = []*NotifyChannelConfig{
},
},
},
{
Name: "FeishuApp", Ident: FeishuApp, RequestType: "script", Weight: 5, Enable: false,
RequestConfig: &RequestConfig{
ScriptRequestConfig: &ScriptRequestConfig{
Timeout: 10000,
ScriptType: "script",
Script: FeishuAppBody,
},
},
ParamConfig: &NotifyParamConfig{
UserInfo: &UserInfo{
ContactKey: "email",
},
Custom: Params{
Params: []ParamItem{
{Key: "feishuapp_id", CName: "FeiShuAppID", Type: "string"},
{Key: "feishuapp_secret", CName: "FeiShuAppSecret", Type: "string"},
},
},
},
},
{
Name: "Feishu", Ident: Feishu, RequestType: "http", Weight: 5, Enable: true,
RequestConfig: &RequestConfig{
@@ -1350,7 +1360,7 @@ var NotiChMap = []*NotifyChannelConfig{
Method: "POST", Headers: map[string]string{"Content-Type": "application/json"},
Timeout: 10000, Concurrency: 5, RetryTimes: 3, RetryInterval: 100,
Request: RequestDetail{
Body: `{"msg_type": "interactive", "card": {"config": {"wide_screen_mode": true}, "header": {"title": {"content": "{{$tpl.title}}", "tag": "plain_text"}, "template": "{{if $event.IsRecovered}}green{{else}}red{{end}}"}, "elements": [{"tag": "div", "text": {"tag": "lark_md","content": "{{$tpl.content}}"}}]}}`,
Body: `{"msg_type": "interactive", "card": {"config": {"wide_screen_mode": true}, "header": {"title": {"content": "{{$tpl.title}}", "tag": "plain_text"}, "template": "{{if $event.IsRecovered}}green{{else}}red{{end}}"}, "elements": [{"tag": "markdown", "content": "{{$tpl.content}}"}]}}`,
},
},
},
@@ -1436,6 +1446,8 @@ var NotiChMap = []*NotifyChannelConfig{
},
FlashDutyRequestConfig: &FlashDutyRequestConfig{
IntegrationUrl: "flashduty integration url",
Timeout: 5000, // 默认5秒超时
RetryTimes: 3, // 默认重试3次
},
},
},
@@ -1473,460 +1485,3 @@ func (ncc *NotifyChannelConfig) Upsert(ctx *ctx.Context) error {
}
return ch.Update(ctx, *ncc)
}
var FeishuAppBody = `#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import json
import requests
import logging
import re
import traceback
import os
import copy
from typing import Dict, Any, Optional
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# 常量
RECOVERED = "recovered"
TRIGGERED = "triggered"
def get_access_token(app_id: str, app_secret: str) -> str:
"""获取飞书访问令牌"""
logger.info(f"正在获取飞书访问令牌... AppID: {app_id}")
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal"
data = {
"app_id": app_id,
"app_secret": app_secret
}
try:
logger.info(f"发送请求到 {url}")
response = requests.post(
url,
json=data,
timeout=30,
headers={"Content-Type": "application/json"}
)
logger.info(f"收到响应: 状态码={response.status_code}")
if response.status_code != 200:
logger.error(f"获取令牌失败,状态码: {response.status_code}")
logger.error(f"响应内容: {response.text}")
return ""
resp_json = response.json()
if resp_json.get("msg") != "ok":
logger.error(f"飞书获取AccessToken失败: error={resp_json.get('msg')}")
logger.error(f"完整响应: {resp_json}")
return ""
else:
token = resp_json.get("tenant_access_token", "")
logger.info(f"飞书获取AccessToken成功Token长度: {len(token)}")
return token
except Exception as e:
logger.error(f"飞书获取AccessToken异常: error={e}")
logger.error(f"错误详情: {traceback.format_exc()}")
return ""
def get_user_info(token: str, emails: list) -> dict:
"""从飞书API获取用户信息"""
url = "https://open.feishu.cn/open-apis/contact/v3/users/batch_get_id?user_id_type=user_id"
data = {"emails": emails}
try:
response = requests.post(
url,
json=data,
timeout=30,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}"
}
)
return response.json()
except Exception as e:
logger.error(f"获取用户信息失败: {e}")
return {}
def send_urgent_app(message_id: str, user_id: str, title: str, token: str) -> Optional[Exception]:
"""发送紧急应用通知"""
if not message_id:
return Exception("消息ID为空")
if not user_id:
return Exception("用户ID为空")
user_body = {
"user_id_list": [user_id],
"content": {
"text": f"紧急告警: {title}",
"type": "text"
}
}
url = f"https://open.feishu.cn/open-apis/im/v1/messages/{message_id}/urgent_app?user_id_type=user_id"
try:
response = requests.patch(
url,
json=user_body,
timeout=30,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}"
}
)
res = response.json()
if response.status_code != 200:
return Exception(f"请求失败,状态码 {response.status_code}")
if res.get("code", -1) != 0:
return Exception(f"飞书拒绝请求: {res.get('msg')}")
return None
except Exception as e:
return e
def create_feishu_app_body(title: str, content: str, color: str, event_url: str) -> dict:
"""创建飞书应用卡片消息体
直接使用传入的模板内容确保使用markdown解析
"""
# 修复双重转义的换行符问题
# 1. 将 \\n 转换为真正的换行符
markdown_content = content.replace('\\n', '\n')
# 2. 处理HTML转义字符
markdown_content = markdown_content.replace('&#34;', '"').replace('&gt;', '>')
# 3. 修复特殊格式问题
# 处理可能导致错误的大括号表达式
markdown_content = markdown_content.replace('{map[', '{ map[')
markdown_content = markdown_content.replace('map[v:{', 'map[v:{ ')
# 创建消息卡片
app_body = {
"config": {
"wide_screen_mode": True,
"enable_forward": True
},
"header": {
"title": {
"tag": "plain_text",
"content": title
},
"template": color
},
"elements": [
{
"tag": "markdown",
"content": markdown_content
},
{
"tag": "hr"
},
{
"tag": "action",
"actions": [
{
"tag": "button",
"text": {
"content": "告警详情",
"tag": "plain_text"
},
"type": "primary",
"url": event_url
}
]
},
{
"tag": "hr"
},
{
"tag": "note",
"elements": [
{
"tag": "lark_md",
"content": title
}
]
}
]
}
# 记录修复后的内容
logger.info(f"飞书卡片标题: {title}")
logger.info(f"飞书卡片颜色: {color}")
logger.info(f"修复后的内容预览: {markdown_content[:100]}...")
return app_body
def extract_data_from_string(stdin_data):
"""从字符串中提取关键数据返回构建的payload"""
payload = {"tpl": {}, "params": {}, "sendto": []}
# 提取tplContent
content_match = re.search(r'tplContent:map\[content:(.*?) title:(.*?)\]', stdin_data)
if content_match:
payload["tpl"]["content"] = content_match.group(1)
payload["tpl"]["title"] = content_match.group(2)
# 提取customParams
params_match = re.search(r'customParams:map\[(.*?)\]', stdin_data)
if params_match:
params_str = params_match.group(1)
# 提取domain_url
domain_match = re.search(r'domain_url:(.*?)(?: |$)', params_str)
if domain_match:
payload["params"]["domain_url"] = domain_match.group(1)
# 提取feishuapp_id
app_id_match = re.search(r'feishuapp_id:(.*?)(?: |$)', params_str)
if app_id_match:
payload["params"]["feishuapp_id"] = app_id_match.group(1)
# 提取feishuapp_secret
secret_match = re.search(r'feishuapp_secret:(.*?)(?:\s|$)', params_str)
if secret_match:
payload["params"]["feishuapp_secret"] = secret_match.group(1)
# 检查是否有err字段
err_match = re.search(r'err:(.*?)(?:,|\s|$)', stdin_data)
if err_match:
error_msg = err_match.group(1)
logger.error(f"检测到脚本错误: {error_msg}")
# 不设置默认发送目标,允许为空
return payload
def send_feishu_app(payload) -> None:
"""
发送飞书应用通知
Args:
payload: 包含告警信息的字典
"""
try:
# 提取必要参数
app_id = payload.get('params', {}).get('feishuapp_id')
app_secret = payload.get('params', {}).get('feishuapp_secret')
domain_url = "https://your-n9e-addr.com"
# 从sendto获取通知人列表
sendtos = payload.get('sendtos', [])
if isinstance(sendtos, str):
# 如果sendto是字符串按逗号分割
sendtos = [s.strip() for s in sendtos.split(',') if s.strip()]
# 检查必要参数
if not app_id or not app_secret:
logger.error("未提供有效的飞书应用凭证 (app_id 或 app_secret)")
return
# 检查发送目标,如果为空则直接返回
if not sendtos:
logger.warning("未提供发送目标,无法发送消息")
return
logger.info(f"发送目标: {sendtos}")
# 提取事件信息 - 优先使用单个event如果没有则使用events中的第一个
event = payload.get('event', {})
if not event and payload.get('events') and len(payload.get('events', [])) > 0:
event = payload.get('events')[0]
# 获取通知内容 - 使用已渲染的模板内容
content = payload.get('tpl', {}).get('content', '未找到告警内容')
title = payload.get('tpl', {}).get('title', '告警通知')
# 获取访问令牌
token = get_access_token(app_id, app_secret)
if not token:
logger.error("获取飞书访问令牌失败,无法继续")
return
# 获取用户信息
user_info = get_user_info(token, sendtos)
# 创建邮箱到用户ID的映射
user_id_map = {}
if user_info and "data" in user_info and "user_list" in user_info["data"]:
for user in user_info["data"]["user_list"]:
if user.get("email"):
user_id_map[user["email"]] = user.get("user_id", "")
# 提取事件信息
event_id = event.get('id', 0)
rule_name = event.get('rule_name', title)
severity = event.get('severity', 1) # 默认为严重级别
# 设置颜色和标题 - 根据事件是否已恢复或严重性级别
color = "red" # 默认严重告警使用红色
send_title = title
# 根据事件状态确定颜色
if "Recovered" in content:
color = "green" # 已恢复告警使用绿色
elif severity == 1:
color = "red" # 严重告警
elif severity == 2:
color = "orange" # 警告
elif severity == 3:
color = "blue" # 信息
event_url = f"{domain_url}/alert-his-events/{event_id}"
# 为每个接收者发送消息
for recipient in sendtos:
if not recipient:
continue
# 确定receive_id_type
if recipient.startswith("ou_"):
receive_type = "open_id"
elif recipient.startswith("on_"):
receive_type = "union_id"
elif recipient.startswith("oc_"):
receive_type = "chat_id"
elif "@" in recipient:
receive_type = "email"
else:
receive_type = "user_id"
fs_url = f"https://open.feishu.cn/open-apis/im/v1/messages?receive_id_type={receive_type}"
# 创建卡片消息体 - 直接使用模板生成的内容在create_feishu_app_body中处理转义问题
app_body = create_feishu_app_body(send_title, content, color, event_url)
content_str = json.dumps(app_body)
body = {
"msg_type": "interactive",
"receive_id": recipient,
"content": content_str
}
# 发送消息
try:
response = requests.post(
fs_url,
json=body,
timeout=30,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}"
}
)
send_res = response.json()
if response.status_code != 200 or send_res.get("code", -1) != 0:
logger.error(f"飞书消息发送失败: 状态码={response.status_code}, 错误={send_res.get('msg', '未知错误')}")
continue
message_id = send_res.get("data", {}).get("message_id", "")
logger.info(f"发送成功 → {recipient} [消息ID: {message_id}]")
# 如果是高严重性,发送紧急消息
if severity == 1 and "Recovered" not in content:
user_id = user_id_map.get(recipient, "")
if user_id:
err = send_urgent_app(message_id, user_id, send_title, token)
if err:
logger.error(f"加急通知失败: {err}")
else:
logger.info(f"已发送加急通知 → {recipient}")
else:
logger.warning(f"无法发送加急: 未找到用户ID (email={recipient})")
except Exception as e:
logger.error(f"发送异常: {e}")
logger.error(f"错误详情: {traceback.format_exc()}")
except Exception as e:
logger.error(f"处理异常: {e}")
logger.error(f"错误详情: {traceback.format_exc()}")
def main():
"""主函数:读取输入数据,解析并发送飞书通知"""
try:
logger.info("开始执行飞书应用告警脚本")
payload = None
# 读取标准输入
try:
stdin_data = sys.stdin.read()
# 保存安全处理后的原始输入
try:
with open(".raw_input", 'w') as f:
sanitized_data = stdin_data.replace(r'feishuapp_secret:[^ ]*', 'feishuapp_secret:[REDACTED]')
f.write(sanitized_data)
except:
pass
# 优先尝试解析JSON
try:
payload = json.loads(stdin_data)
except json.JSONDecodeError:
# JSON解析失败尝试字符串提取
if "tplContent" in stdin_data:
payload = extract_data_from_string(stdin_data)
logger.info("从原始文本提取数据成功")
else:
logger.error("无法识别的数据格式")
payload = {
"tpl": {"content": "无法解析输入数据", "title": "告警通知"},
"params": {},
"sendto": []
}
except Exception as e:
logger.error(f"读取输入失败: {e}")
payload = {
"tpl": {"content": "读取输入失败", "title": "告警通知"},
"params": {},
"sendto": []
}
# 保存处理后的payload隐藏敏感信息
try:
with open(".payload", 'w') as f:
safe_payload = copy.deepcopy(payload)
if 'params' in safe_payload and 'feishuapp_secret' in safe_payload['params']:
safe_payload['params']['feishuapp_secret'] = '[REDACTED]'
f.write(json.dumps(safe_payload, indent=4))
except:
pass
# 处理发送
send_feishu_app(payload)
except Exception as e:
logger.error(f"处理异常: {e}")
logger.error(f"错误详情: {traceback.format_exc()}")
sys.exit(1) # 确保错误状态正确传递
# 脚本入口点 - 只有一个入口点
if __name__ == "__main__":
main()
`

View File

@@ -1,7 +1,10 @@
package models
import (
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
@@ -19,6 +22,7 @@ type NotifyRule struct {
// 通知配置
NotifyConfigs []NotifyConfig `json:"notify_configs" gorm:"serializer:json"`
ExtraConfig interface{} `json:"extra_config,omitempty" gorm:"serializer:json"`
CreateAt int64 `json:"create_at"`
CreateBy string `json:"create_by"`
@@ -39,6 +43,7 @@ type NotifyConfig struct {
ChannelID int64 `json:"channel_id"` // 通知媒介(如:阿里云短信)
TemplateID int64 `json:"template_id"` // 通知模板
Params map[string]interface{} `json:"params"` // 通知参数
Type string `json:"type"`
Severities []int `json:"severities"` // 适用级别(一级告警、二级告警、三级告警)
TimeRanges []TimeRanges `json:"time_ranges"` // 适用时段
@@ -46,6 +51,12 @@ type NotifyConfig struct {
Attributes []TagFilter `json:"attributes"` // 适用属性
}
func (n *NotifyConfig) Hash() string {
hash := sha256.New()
hash.Write([]byte(fmt.Sprintf("%d%d%v%s%v%v%v%v", n.ChannelID, n.TemplateID, n.Params, n.Type, n.Severities, n.TimeRanges, n.LabelKeys, n.Attributes)))
return hex.EncodeToString(hash.Sum(nil))
}
type CustomParams struct {
UserIDs []int64 `json:"user_ids"`
UserGroupIDs []int64 `json:"user_group_ids"`
@@ -75,11 +86,6 @@ func GetNotifyRule(c *ctx.Context, id int64) (*NotifyRule, error) {
return &rule, nil
}
// 更新 NotifyRule
func UpdateNotifyRule(c *ctx.Context, rule *NotifyRule) error {
return DB(c).Save(rule).Error
}
// 删除 NotifyRule
func DeleteNotifyRule(c *ctx.Context, id int64) error {
return DB(c).Delete(&NotifyRule{}, id).Error
@@ -190,7 +196,12 @@ func (r *NotifyRule) Update(ctx *ctx.Context, ref NotifyRule) error {
if err != nil {
return err
}
return DB(ctx).Model(r).Select("*").Updates(ref).Error
db := DB(ctx).Model(r).Select("*")
if ref.ExtraConfig == nil {
db = db.Omit("ExtraConfig")
}
return db.Updates(ref).Error
}
func (r *NotifyRule) DB2FE() {

View File

@@ -232,7 +232,7 @@ var TplMap = map[string]string{
{{- end}}
{{- end}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{.Id}}){{if eq .Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{.Id}}&mode=graph}}){{end}}`,
Email: `<!DOCTYPE html>
<html lang="en">
<head>
@@ -459,7 +459,7 @@ var TplMap = map[string]string{
发送时间: {{timestamp}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
事件详情: {{$domain}}/alert-his-events/{{.Id}}
屏蔽1小时: {{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}}`,
屏蔽1小时: {{$domain}}/alert-mutes/add?__event_id={{.Id}}`,
FeishuCard: `{{ if .IsRecovered }}
{{- if ne .Cate "host"}}
**告警集群:** {{.Cluster}}{{end}}
@@ -478,7 +478,7 @@ var TplMap = map[string]string{
{{if .RuleNote }}**告警描述:** **{{.RuleNote}}**{{end}}
{{- end -}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{.Id}}){{if eq .Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{.Id}}&mode=graph}}){{end}}`,
EmailSubject: `{{if .IsRecovered}}Recovered{{else}}Triggered{{end}}: {{.RuleName}} {{.TagsJSON}}`,
Mm: `级别状态: S{{.Severity}} {{if .IsRecovered}}Recovered{{else}}Triggered{{end}}
规则名称: {{.RuleName}}{{if .RuleNote}}
@@ -506,7 +506,7 @@ var TplMap = map[string]string{
{{$time_duration := sub now.Unix .FirstTriggerTime }}{{if .IsRecovered}}{{$time_duration = sub .LastEvalTime .FirstTriggerTime }}{{end}}**距离首次告警**: {{humanizeDurationInterface $time_duration}}
**发送时间**: {{timestamp}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{.Id}}){{if eq .Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{.Id}}&mode=graph}}){{end}}`,
Lark: `级别状态: S{{.Severity}} {{if .IsRecovered}}Recovered{{else}}Triggered{{end}}
规则名称: {{.RuleName}}{{if .RuleNote}}
规则备注: {{.RuleNote}}{{end}}
@@ -516,7 +516,7 @@ var TplMap = map[string]string{
发送时间: {{timestamp}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
事件详情: {{$domain}}/alert-his-events/{{.Id}}
屏蔽1小时: {{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}}`,
屏蔽1小时: {{$domain}}/alert-mutes/add?__event_id={{.Id}}`,
LarkCard: `{{ if .IsRecovered }}
{{- if ne .Cate "host"}}
**告警集群:** {{.Cluster}}{{end}}
@@ -537,5 +537,5 @@ var TplMap = map[string]string{
{{if .RuleNote }}**告警描述:** **{{.RuleNote}}**{{end}}
{{- end -}}
{{$domain := "http://请联系管理员修改通知模板将域名替换为实际的域名" }}
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?busiGroup={{.GroupId}}&cate={{.Cate}}&datasource_ids={{.DatasourceId}}&prod={{.RuleProd}}{{range $key, $value := .TagsMap}}&tags={{$key}}%3D{{$value}}{{end}})|[查看曲线]({{$domain}}/metric/explorer?data_source_id={{.DatasourceId}}&data_source_name=prometheus&mode=graph&prom_ql={{.PromQl|escape}})`,
[事件详情]({{$domain}}/alert-his-events/{{.Id}})|[屏蔽1小时]({{$domain}}/alert-mutes/add?__event_id={{.Id}}){{if eq .Cate "prometheus"}}|[查看曲线]({{$domain}}/metric/explorer?__event_id={{.Id}}&mode=graph}}){{end}}`,
}

View File

@@ -27,8 +27,13 @@ func convertInterval(interval string) int {
duration, err := time.ParseDuration(interval)
if err != nil {
logger.Errorf("Error parsing interval `%s`, err: %v", interval, err)
return 0
return 60
}
if duration.Seconds() == 0 {
duration = 60 * time.Second
}
return int(duration.Seconds())
}
@@ -57,17 +62,12 @@ func ConvertAlert(rule PromRule, interval string, datasouceQueries []DatasourceQ
}
ar := AlertRule{
Name: rule.Alert,
Severity: severity,
Disabled: disabled,
PromForDuration: convertInterval(rule.For),
PromQl: rule.Expr,
PromEvalInterval: convertInterval(interval),
EnableStimeJSON: "00:00",
EnableEtimeJSON: "23:59",
EnableDaysOfWeekJSON: []string{
"1", "2", "3", "4", "5", "6", "0",
},
Name: rule.Alert,
Severity: severity,
Disabled: disabled,
PromForDuration: convertInterval(rule.For),
PromQl: rule.Expr,
CronPattern: fmt.Sprintf("@every %ds", convertInterval(interval)),
EnableInBG: AlertRuleEnableInGlobalBG,
NotifyRecovered: AlertRuleNotifyRecovered,
NotifyRepeatStep: AlertRuleNotifyRepeatStep60Min,
@@ -75,6 +75,8 @@ func ConvertAlert(rule PromRule, interval string, datasouceQueries []DatasourceQ
AnnotationsJSON: annotations,
AppendTagsJSON: appendTags,
DatasourceQueries: datasouceQueries,
NotifyVersion: 1,
NotifyRuleIds: []int64{},
}
return ar
@@ -86,7 +88,7 @@ func DealPromGroup(promRule []PromRuleGroup, dataSourceQueries []DatasourceQuery
for _, group := range promRule {
interval := group.Interval
if interval == "" {
interval = "15s"
interval = "60s"
}
for _, rule := range group.Rules {
if rule.Alert != "" {

View File

@@ -12,8 +12,8 @@ import (
"github.com/pkg/errors"
"github.com/toolkits/pkg/container/set"
"github.com/toolkits/pkg/slice"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/slice"
"gorm.io/gorm"
)
@@ -124,7 +124,7 @@ func TargetStatistics(ctx *ctx.Context) (*Statistics, error) {
func TargetDel(ctx *ctx.Context, idents []string, deleteHook TargetDeleteHookFunc) error {
if len(idents) == 0 {
panic("idents empty")
return errors.New("idents cannot be empty")
}
return DB(ctx).Transaction(func(tx *gorm.DB) error {
@@ -228,6 +228,9 @@ func TargetTotal(ctx *ctx.Context, options ...BuildTargetWhereOption) (int64, er
func TargetGets(ctx *ctx.Context, limit, offset int, order string, desc bool, options ...BuildTargetWhereOption) ([]*Target, error) {
var lst []*Target
order = validateOrderField(order, "ident")
if desc {
order += " desc"
} else {
@@ -661,7 +664,7 @@ func CanMigrateBg(ctx *ctx.Context) bool {
return false
}
if cnt == 0 {
log.Println("target table is empty, skip migration.")
logger.Debug("target table is empty, skip migration.")
return false
}

View File

@@ -3,6 +3,7 @@ package models
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
@@ -10,6 +11,7 @@ import (
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ormx"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/ccfos/nightingale/v6/pkg/secu"
"github.com/ccfos/nightingale/v6/storage"
"github.com/redis/go-redis/v9"
@@ -26,7 +28,6 @@ const (
Wecom = "wecom"
Feishu = "feishu"
FeishuCard = "feishucard"
FeishuApp = "feishuapp"
Discord = "discord"
MattermostWebhook = "mattermostwebhook"
MattermostBot = "mattermostbot"
@@ -163,6 +164,10 @@ func (u *User) Verify() error {
return errors.New("Email invalid")
}
if u.Phone != "" {
return u.EncryptPhone()
}
return nil
}
@@ -322,6 +327,7 @@ func UserGet(ctx *ctx.Context, where string, args ...interface{}) (*User, error)
lst[0].RolesLst = strings.Fields(lst[0].Roles)
lst[0].Admin = lst[0].IsAdmin()
lst[0].DecryptPhone() // 解密手机号
return lst[0], nil
}
@@ -336,6 +342,7 @@ func UsersGet(ctx *ctx.Context, where string, args ...interface{}) ([]*User, err
for _, user := range lst {
user.RolesLst = strings.Fields(user.Roles)
user.Admin = user.IsAdmin()
user.DecryptPhone() // 解密手机号
}
return lst, nil
@@ -549,6 +556,47 @@ func UserTotal(ctx *ctx.Context, query string, stime, etime int64) (num int64, e
return num, nil
}
var (
// 预编译正则表达式,避免重复编译
whitespaceRegex = regexp.MustCompile(`\s+`)
validOrderRegex = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z_][a-zA-Z0-9_]*)?$`)
)
func validateOrderField(order string, defaultField string) string {
// 空值检查
if order == "" {
return defaultField
}
// 长度检查
if len(order) > 64 {
logger.Warningf("SQL injection attempt detected: order field too long (%d chars)", len(order))
return defaultField
}
// 移除所有空白字符
order = whitespaceRegex.ReplaceAllString(order, "")
if order == "" {
return defaultField
}
// 检查危险字符
orderLower := strings.ToLower(order)
if strings.ContainsAny(order, "();,'\"` --/*\\=+-*/><|&^~") ||
strings.Contains(orderLower, "0x") || strings.Contains(orderLower, "0b") {
logger.Warningf("SQL injection attempt detected: contains dangerous characters")
return defaultField
}
// 使用正则表达式验证格式:只允许字母开头的字段名,可选择性包含表名
if !validOrderRegex.MatchString(order) {
logger.Warningf("SQL injection attempt detected: invalid order field format")
return defaultField
}
return order
}
func UserGets(ctx *ctx.Context, query string, limit, offset int, stime, etime int64,
order string, desc bool, usernames, phones, emails []string) ([]User, error) {
@@ -558,6 +606,8 @@ func UserGets(ctx *ctx.Context, query string, limit, offset int, stime, etime in
session = session.Where("last_active_time between ? and ?", stime, etime)
}
order = validateOrderField(order, "username")
if desc {
order = order + " desc"
} else {
@@ -593,6 +643,7 @@ func UserGets(ctx *ctx.Context, query string, limit, offset int, stime, etime in
users[i].RolesLst = strings.Fields(users[i].Roles)
users[i].Admin = users[i].IsAdmin()
users[i].Password = ""
users[i].DecryptPhone() // 解密手机号
// query for user group information
var userGroupIDs []int64
@@ -634,6 +685,7 @@ func UserGetAll(ctx *ctx.Context) ([]*User, error) {
for i := 0; i < len(lst); i++ {
lst[i].RolesLst = strings.Fields(lst[i].Roles)
lst[i].Admin = lst[i].IsAdmin()
lst[i].DecryptPhone() // 解密手机号
}
}
return lst, err
@@ -650,6 +702,7 @@ func UserGetsByIds(ctx *ctx.Context, ids []int64) ([]User, error) {
for i := 0; i < len(lst); i++ {
lst[i].RolesLst = strings.Fields(lst[i].Roles)
lst[i].Admin = lst[i].IsAdmin()
lst[i].DecryptPhone() // 解密手机号
}
}
@@ -971,3 +1024,60 @@ func (u *User) AddUserAndGroups(ctx *ctx.Context, coverTeams bool) error {
return nil
}
func (u *User) EncryptPhone() (err error) {
// 从缓存获取手机号加密配置
enabled, publicKey, _, _, loaded := GetPhoneEncryptionConfigFromCache()
if !loaded {
// 如果缓存未加载,记录日志但不阻止保存
logger.Infof("Phone encryption config cache not loaded, user: %s", u.Username)
return nil
}
// 检查是否启用了手机号加密
if enabled && u.Phone != "" {
// 检查手机号是否已经加密(避免重复加密)
if len(u.Phone) > 4 && u.Phone[:4] == "enc:" {
// 已经加密,跳过
return nil
}
encryptedPhone, err := secu.EncryptValue(u.Phone, publicKey)
if err != nil {
logger.Warningf("Failed to encrypt phone: %v, user: %s", err, u.Username)
return nil
}
u.Phone = encryptedPhone
}
return nil
}
// DecryptPhone 解密用户手机号(如果已加密)
func (u *User) DecryptPhone() {
if u.Phone == "" {
return
}
// 检查手机号是否是加密格式(有 "enc:" 前缀)
if len(u.Phone) <= 4 || u.Phone[:4] != "enc:" {
// 不是加密格式,不需要解密
return
}
// 从缓存获取手机号加密配置
enabled, _, privateKey, password, loaded := GetPhoneEncryptionConfigFromCache()
if !loaded || !enabled {
// 如果缓存未加载或未启用加密,不解密
return
}
// 对手机号进行解密
decryptedPhone, err := secu.Decrypt(u.Phone, privateKey, password)
if err != nil {
// 如果解密失败,记录错误但保持原样
logger.Warningf("Failed to decrypt phone for user %s: %v", u.Username, err)
return
}
u.Phone = decryptedPhone
}

View File

@@ -8,11 +8,11 @@ import (
type UserToken struct {
Id int64 `json:"id" gorm:"primaryKey"`
Username string `json:"username" gorm:"type:varchar(255) not null default ''"`
TokenName string `json:"token_name" gorm:"type:varchar(255) not null default ''"`
Token string `json:"token" gorm:"type:varchar(255) not null default ''"`
CreateAt int64 `json:"create_at" gorm:"type:bigint not null default 0"`
LastUsed int64 `json:"last_used" gorm:"type:bigint not null default 0"`
Username string `json:"username" gorm:"type:varchar(255); not null; default ''"`
TokenName string `json:"token_name" gorm:"type:varchar(255); not null; default ''"`
Token string `json:"token" gorm:"type:varchar(255); not null; default ''"`
CreateAt int64 `json:"create_at" gorm:"type:bigint; not null; default 0"`
LastUsed int64 `json:"last_used" gorm:"type:bigint; not null; default 0"`
}
func (UserToken) TableName() string {

View File

@@ -0,0 +1,37 @@
//go:build !windows
// +build !windows
package cmdx
import (
"os/exec"
"syscall"
"time"
)
func CmdWait(cmd *exec.Cmd, timeout time.Duration) (error, bool) {
var err error
done := make(chan error)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(timeout):
go func() {
<-done // allow goroutine to exit
}()
// IMPORTANT: cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} is necessary before cmd.Start()
err = syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)
return err, true
case err = <-done:
return err, false
}
}
func CmdStart(cmd *exec.Cmd) error {
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
return cmd.Start()
}

35
pkg/cmdx/cmd_windows.go Normal file
View File

@@ -0,0 +1,35 @@
//go:build windows
// +build windows
package cmdx
import (
"os/exec"
"syscall"
"time"
)
func CmdWait(cmd *exec.Cmd, timeout time.Duration) (error, bool) {
var err error
done := make(chan error)
go func() {
done <- cmd.Wait()
}()
select {
case <-time.After(timeout):
go func() {
<-done // allow goroutine to exit
}()
err = cmd.Process.Signal(syscall.SIGKILL)
return err, true
case err = <-done:
return err, false
}
}
func CmdStart(cmd *exec.Cmd) error {
return cmd.Start()
}

15
pkg/cmdx/cmdx.go Normal file
View File

@@ -0,0 +1,15 @@
package cmdx
import (
"os/exec"
"time"
)
func RunTimeout(cmd *exec.Cmd, timeout time.Duration) (error, bool) {
err := CmdStart(cmd)
if err != nil {
return err, false
}
return CmdWait(cmd, timeout)
}

View File

@@ -89,7 +89,7 @@ func diffMap(m1, m2 map[int64]*models.User) []models.User {
func updateUser(appKey string, m1, m2 map[int64]*models.User) {
for i := range m1 {
if _, ok := m2[i]; ok {
if m1[i].Email != m2[i].Email || m1[i].Phone != m2[i].Phone || m1[i].Username != m2[i].Username {
if m1[i].Email != m2[i].Email || !PhoneIsSame(m1[i].Phone, m2[i].Phone) || m1[i].Username != m2[i].Username {
var flashdutyUser User
flashdutyUser = User{
@@ -110,6 +110,30 @@ func updateUser(appKey string, m1, m2 map[int64]*models.User) {
}
}
func PhoneIsSame(phone1, phone2 string) bool {
// 兼容不同国家/地区前缀,例如 +86、+1、+44 等,以及包含空格或短横线的格式
normalize := func(p string) string {
p = strings.TrimSpace(p)
p = strings.ReplaceAll(p, " ", "")
p = strings.ReplaceAll(p, "-", "")
p = strings.TrimPrefix(p, "+")
return p
}
p1 := normalize(phone1)
p2 := normalize(phone2)
if p1 == p2 {
return true
}
// 如果长度相差不超过 3 且较长的以较短的结尾,则认为是相同号码(忽略最多 3 位国家区号差异)
if len(p1) > len(p2) {
return len(p1)-len(p2) <= 3 && strings.HasSuffix(p1, p2)
}
return len(p2)-len(p1) <= 3 && strings.HasSuffix(p2, p1)
}
type User struct {
Email string `json:"email,omitempty"`
Phone string `json:"phone,omitempty"`

View File

@@ -0,0 +1,67 @@
package flashduty
import "testing"
func TestPhoneIsSame(t *testing.T) {
tests := []struct {
name string
phone1 string
phone2 string
same bool
}{
{
name: "blank",
phone1: "",
phone2: "",
same: true,
},
{
name: "China +86 prefix",
phone1: "+8613812345678",
phone2: "13812345678",
same: true,
},
{
name: "China +86 with spaces and hyphens",
phone1: "+86 138-1234-5678",
phone2: "13812345678",
same: true,
},
{
name: "USA +1 prefix",
phone1: "+1 234-567-8900",
phone2: "2345678900",
same: true,
},
{
name: "UK +44 prefix",
phone1: "+442078765432",
phone2: "2078765432",
same: true,
},
{
name: "India +91 prefix",
phone1: "+919876543210",
phone2: "9876543210",
same: true,
},
{
name: "Germany +49 prefix",
phone1: "+4915123456789",
phone2: "15123456789",
same: true,
},
{
name: "Different numbers",
phone1: "+8613812345678",
phone2: "13812345679",
same: false,
},
}
for _, tt := range tests {
if got := PhoneIsSame(tt.phone1, tt.phone2); got != tt.same {
t.Errorf("%s: expected %v, got %v", tt.name, tt.same, got)
}
}
}

View File

@@ -71,6 +71,18 @@ var I18N = `{
"no notify groups selected": "未选择通知组",
"all users missing notify channel configurations: %v": "所有用户缺少通知渠道配置: %v",
"event match subscribe and notify settings ok": "事件匹配订阅规则,通知设置正常",
"/loki suffix is miss, please add /loki to the url: %s": "缺少/loki后缀请在URL中添加/loki%s",
"event time not match time filter": "事件时间不匹配时间过滤器",
"event severity not match severity filter": "事件等级不匹配等级过滤器",
"event tag not match tag filter": "事件标签不匹配标签过滤器",
"event attributes not match attributes filter": "事件属性不匹配属性过滤器",
"failed to parse tag filter: %v": "解析标签过滤器失败: %v",
"event is dropped": "事件已被丢弃,不会进行通知",
"drop event success": "丢弃事件成功",
"drop event failed": "丢弃事件失败",
"callback success": "回调成功",
"Infrastructure": "基础设施",
"Host - View": "机器 - 查看",
@@ -255,7 +267,18 @@ var I18N = `{
"no notify groups selected": "未選擇通知組",
"all users missing notify channel configurations: %v": "所有用戶缺少通知渠道配置: %v",
"event match subscribe and notify settings ok": "事件匹配訂閱規則,通知設置正常",
"/loki suffix is miss, please add /loki to the url: %s": "缺少/loki後綴請在URL中添加/loki%s",
"event time not match time filter": "事件時間不匹配時間過濾器",
"event severity not match severity filter": "事件等級不匹配等級過濾器",
"event tag not match tag filter": "事件標籤不匹配標籤過濾器",
"event attributes not match attributes filter": "事件屬性不匹配屬性過濾器",
"failed to parse tag filter: %v": "解析標籤過濾器失敗: %v",
"event is dropped": "事件已被丟棄,不會進行通知",
"drop event success": "丟棄事件成功",
"drop event failed": "丟棄事件失敗",
"callback success": "回調成功",
"Infrastructure": "基礎設施",
"Host - View": "機器 - 查看",
"Host - Modify": "機器 - 修改",
@@ -436,7 +459,18 @@ var I18N = `{
"no notify groups selected": "通知グループが選択されていません",
"all users missing notify channel configurations: %v": "すべてのユーザーに通知チャンネル設定がありません: %v",
"event match subscribe and notify settings ok": "イベントがサブスクライブルールに一致し、通知設定が正常です",
"/loki suffix is miss, please add /loki to the url: %s": "/lokiサフィックスがありません。URLに/lokiを追加してください: %s",
"event time not match time filter": "イベント時間が時間フィルタと一致しません",
"event severity not match severity filter": "イベント等級が等級フィルタと一致しません",
"event tag not match tag filter": "イベントタグがタグフィルタと一致しません",
"event attributes not match attributes filter": "イベント属性が属性フィルタと一致しません",
"failed to parse tag filter: %v": "タグフィルタの解析に失敗しました: %v",
"event is dropped": "イベントが破棄されました,通知は行われません",
"drop event success": "イベント破棄成功",
"drop event failed": "イベント破棄失敗",
"callback success": "コールバック成功",
"Infrastructure": "インフラストラクチャ",
"Host - View": "機器 - 閲覧",
"Host - Modify": "機器 - 修正",
@@ -617,7 +651,18 @@ var I18N = `{
"no notify groups selected": "Группы уведомлений не выбраны",
"all users missing notify channel configurations: %v": "У всех пользователей отсутствуют настройки каналов уведомлений: %v",
"event match subscribe and notify settings ok": "Событие соответствует правилу подписки, настройки уведомлений в порядке",
"/loki suffix is miss, please add /loki to the url: %s": "Отсутствует суффикс /loki, пожалуйста, добавьте /loki к URL: %s",
"event time not match time filter": "Время события не соответствует временному фильтру",
"event severity not match severity filter": "Уровень события не соответствует фильтру уровня",
"event tag not match tag filter": "Теги события не соответствуют фильтру тегов",
"event attributes not match attributes filter": "Атрибуты события не соответствуют фильтру атрибутов",
"failed to parse tag filter: %v": "Не удалось разобрать фильтр тегов: %v",
"event is dropped": "Событие отброшено, уведомление не будет отправлено",
"drop event success": "Событие успешно отброшено",
"drop event failed": "Не удалось отбросить событие",
"callback success": "Обратный вызов успешен",
"Infrastructure": "Инфраструктура",
"Host - View": "Хост - Просмотр",
"Host - Modify": "Хост - Изменить",

View File

@@ -1200,11 +1200,11 @@ func (InitPostgresESIndexPattern) TableName() string {
type InitBuiltinMetric struct {
ID uint64 `gorm:"primaryKey;autoIncrement;comment:unique identifier"`
Collector string `gorm:"size:191;not null;comment:type of collector;index:idx_collector;uniqueIndex:idx_collector_typ_name"`
Typ string `gorm:"size:191;not null;comment:type of metric;index:idx_typ;uniqueIndex:idx_collector_typ_name"`
Name string `gorm:"size:191;not null;comment:name of metric;index:idx_name;uniqueIndex:idx_collector_typ_name"`
Collector string `gorm:"size:191;not null;comment:type of collector;index:idx_collector`
Typ string `gorm:"size:191;not null;comment:type of metric;index:idx_typ`
Name string `gorm:"size:191;not null;comment:name of metric;index:idx_name`
Unit string `gorm:"size:191;not null;comment:unit of metric"`
Lang string `gorm:"size:191;not null;default:'';comment:language of metric;index:idx_lang;uniqueIndex:idx_collector_typ_name"`
Lang string `gorm:"size:191;not null;default:'';comment:language of metric;index:idx_lang`
Note string `gorm:"size:4096;not null;comment:description of metric in Chinese"`
Expression string `gorm:"size:4096;not null;comment:expression of metric"`
CreatedAt int64 `gorm:"not null;default:0;comment:create time"`
@@ -1224,11 +1224,11 @@ func (InitBuiltinMetric) TableOptions() string {
type InitSqliteBuiltinMetric struct {
ID uint64 `gorm:"primaryKey;autoIncrement;comment:unique identifier"`
Collector string `gorm:"size:191;not null;comment:type of collector;index:idx_collector;uniqueIndex:idx_collector_typ_name"`
Typ string `gorm:"size:191;not null;comment:type of metric;index:idx_typ;uniqueIndex:idx_collector_typ_name"`
Name string `gorm:"size:191;not null;comment:name of metric;index:idx_name_sqlite;uniqueIndex:idx_collector_typ_name"`
Collector string `gorm:"size:191;not null;comment:type of collector;index:idx_collector`
Typ string `gorm:"size:191;not null;comment:type of metric;index:idx_typ`
Name string `gorm:"size:191;not null;comment:name of metric;index:idx_name_sqlite`
Unit string `gorm:"size:191;not null;comment:unit of metric"`
Lang string `gorm:"size:191;not null;default:'';comment:language of metric;index:idx_lang;uniqueIndex:idx_collector_typ_name"`
Lang string `gorm:"size:191;not null;default:'';comment:language of metric;index:idx_lang`
Note string `gorm:"size:4096;not null;comment:description of metric in Chinese"`
Expression string `gorm:"size:4096;not null;comment:expression of metric"`
CreatedAt int64 `gorm:"not null;default:0;comment:create time"`

View File

@@ -12,6 +12,11 @@ import (
)
func Decrypt(cipherText string, privateKeyByte []byte, password string) (decrypted string, err error) {
// 移除 "enc:" 前缀(如果存在)
if len(cipherText) > 4 && cipherText[:4] == "enc:" {
cipherText = cipherText[4:]
}
decodeCipher, _ := base64.StdEncoding.DecodeString(cipherText)
//pem解码
block, _ := pem.Decode(privateKeyByte)
@@ -53,7 +58,8 @@ func EncryptValue(value string, publicKeyData []byte) (string, error) {
if err != nil {
return "", fmt.Errorf("failed to encrypt value: %v", err)
}
return BASE64StdEncode(ciphertext), nil
// 添加 "enc:" 前缀标记这是加密数据
return "enc:" + BASE64StdEncode(ciphertext), nil
}
func GenerateRsaKeyPair(password string) (privateByte, publicByte []byte, err error) {

View File

@@ -739,3 +739,25 @@ func JsonMarshal(v interface{}) template.HTML {
}
return template.HTML(string(json))
}
func MapDifference(firstMap, secondMap map[string]string) (map[string]string, error) {
// 创建结果 map
result := make(map[string]string)
// 遍历第一个 map将不在第二个 map 中的键值对添加到结果中
for key, value := range firstMap {
if _, exists := secondMap[key]; !exists {
result[key] = value
}
}
return result, nil
}
func TagsMapToStr(m map[string]string) string {
strs := []string{}
for key, value := range m {
strs = append(strs, key+"="+value)
}
sort.Strings(strs)
return strings.Join(strs, ",")
}

View File

@@ -62,6 +62,8 @@ var TemplateFuncMap = template.FuncMap{
"batchContactsAtsInFeishuEmail": BatchContactsAtsInFeishuEmail,
"batchContactsAtsInFeishuId": BatchContactsAtsInFeishuId,
"jsonMarshal": JsonMarshal,
"mapDifference": MapDifference,
"tagsMapToStr": TagsMapToStr,
}
// NewTemplateFuncMap copy on write for TemplateFuncMap

View File

@@ -71,6 +71,24 @@ func ValueFormatter(unit string, decimals int, value float64) FormattedValue {
}
}
// Handle positive and negative infinity
if math.IsInf(value, 1) {
return FormattedValue{
Value: 9999999999,
Unit: "",
Text: "+Inf",
Stat: 9999999999,
}
}
if math.IsInf(value, -1) {
return FormattedValue{
Value: -9999999999,
Unit: "",
Text: "-Inf",
Stat: -9999999999,
}
}
// 处理时间单位
switch unit {
case "none":

View File

@@ -117,7 +117,7 @@ func (pc *PromClientMap) loadFromDatabase() {
continue
}
logger.Info("setClientFromPromOption success: ", dsId)
logger.Infof("setClientFromPromOption success, datasourceId: %d", dsId)
PromOptions.Set(dsId, po)
continue
}

Some files were not shown because too many files have changed in this diff Show More