Compare commits

..

1 Commits

Author SHA1 Message Date
Ulric Qin
92163f44e2 add configuration ForceUseServerTS 2022-08-22 23:22:37 +08:00
472 changed files with 22627 additions and 60447 deletions

1
.gitignore vendored
View File

@@ -41,7 +41,6 @@ _test
/docker/pub
/docker/n9e
/docker/mysqldata
/etc.local
.alerts
.idea

View File

@@ -15,7 +15,7 @@ builds:
hooks:
pre:
- ./fe.sh
main: ./cmd/center/
main: ./src/
binary: n9e
env:
- CGO_ENABLED=0
@@ -26,26 +26,12 @@ builds:
- arm64
ldflags:
- -s -w
- -X github.com/ccfos/nightingale/v6/pkg/version.Version={{ .Tag }}-{{.Commit}}
- id: build-cli
main: ./cmd/cli/
binary: n9e-cli
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
ldflags:
- -s -w
- -X github.com/ccfos/nightingale/v6/pkg/version.Version={{ .Tag }}-{{.Commit}}
- -X github.com/didi/nightingale/v5/src/pkg/version.VERSION={{ .Tag }}-{{.Commit}}
archives:
- id: n9e
builds:
- build
- build-cli
format: tar.gz
format_overrides:
- goos: windows
@@ -56,9 +42,6 @@ archives:
- docker/*
- etc/*
- pub/*
- integrations/*
- cli/*
- n9e.sql
release:
github:
@@ -76,7 +59,6 @@ dockers:
dockerfile: docker/Dockerfile.goreleaser
extra_files:
- pub
- etc
use: buildx
build_flag_templates:
- "--platform=linux/amd64"
@@ -89,7 +71,6 @@ dockers:
dockerfile: docker/Dockerfile.goreleaser
extra_files:
- pub
- etc
use: buildx
build_flag_templates:
- "--platform=linux/arm64/v8"

View File

@@ -430,4 +430,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.

View File

@@ -1,33 +1,50 @@
.PHONY: start build
NOW = $(shell date -u '+%Y%m%d%I%M%S')
APP = n9e
SERVER_BIN = $(APP)
ROOT:=$(shell pwd -P)
GIT_COMMIT:=$(shell git --work-tree ${ROOT} rev-parse 'HEAD^{commit}')
_GIT_VERSION:=$(shell git --work-tree ${ROOT} describe --tags --abbrev=14 "${GIT_COMMIT}^{commit}" 2>/dev/null)
TAG=$(shell echo "${_GIT_VERSION}" | awk -F"-" '{print $$1}')
RELEASE_VERSION:="$(TAG)-$(GIT_COMMIT)"
# RELEASE_ROOT = release
# RELEASE_SERVER = release/${APP}
# GIT_COUNT = $(shell git rev-list --all --count)
# GIT_HASH = $(shell git rev-parse --short HEAD)
# RELEASE_TAG = $(RELEASE_VERSION).$(GIT_COUNT).$(GIT_HASH)
all: build
build:
go build -ldflags "-w -s -X github.com/ccfos/nightingale/v6/pkg/version.Version=$(RELEASE_VERSION)" -o n9e ./cmd/center/main.go
go build -ldflags "-w -s -X github.com/didi/nightingale/v5/src/pkg/version.VERSION=$(RELEASE_VERSION)" -o $(SERVER_BIN) ./src
build-alert:
go build -ldflags "-w -s -X github.com/ccfos/nightingale/v6/pkg/version.Version=$(RELEASE_VERSION)" -o n9e-alert ./cmd/alert/main.go
build-linux:
GOOS=linux GOARCH=amd64 go build -ldflags "-w -s -X github.com/didi/nightingale/v5/src/pkg/version.VERSION=$(RELEASE_VERSION)" -o $(SERVER_BIN) ./src
build-pushgw:
go build -ldflags "-w -s -X github.com/ccfos/nightingale/v6/pkg/version.Version=$(RELEASE_VERSION)" -o n9e-pushgw ./cmd/pushgw/main.go
# start:
# @go run -ldflags "-X main.VERSION=$(RELEASE_TAG)" ./cmd/${APP}/main.go web -c ./configs/config.toml -m ./configs/model.conf --menu ./configs/menu.yaml
run_webapi:
nohup ./n9e webapi > webapi.log 2>&1 &
build-cli:
go build -ldflags "-w -s -X github.com/ccfos/nightingale/v6/pkg/version.Version=$(RELEASE_VERSION)" -o n9e-cli ./cmd/cli/main.go
run_server:
nohup ./n9e server > server.log 2>&1 &
run:
nohup ./n9e > n9e.log 2>&1 &
# swagger:
# @swag init --parseDependency --generalInfo ./cmd/${APP}/main.go --output ./internal/app/swagger
run_alert:
nohup ./n9e-alert > n9e-alert.log 2>&1 &
# wire:
# @wire gen ./internal/app
run_pushgw:
nohup ./n9e-pushgw > n9e-pushgw.log 2>&1 &
# test:
# cd ./internal/app/test && go test -v
release:
goreleaser --skip-validate --skip-publish --snapshot
# clean:
# rm -rf data release $(SERVER_BIN) internal/app/test/data cmd/${APP}/data
pack: build
rm -rf $(APP)-$(RELEASE_VERSION).tar.gz
tar -zcvf $(APP)-$(RELEASE_VERSION).tar.gz docker etc $(SERVER_BIN) pub/font pub/index.html pub/assets pub/image

120
README.md
View File

@@ -1,6 +1,7 @@
<p align="center">
<a href="https://github.com/ccfos/nightingale">
<img src="doc/img/nightingale_logo_h.png" alt="nightingale - cloud native monitoring" width="240" /></a>
<img src="doc/img/ccf-n9e.png" alt="nightingale - cloud native monitoring" width="240" /></a>
<p align="center">夜莺是一款开源的云原生监控系统,采用 all-in-one 的设计,提供企业级的功能特性,开箱即用的产品体验。推荐升级您的 Prometheus + AlertManager + Grafana 组合方案到夜莺</p>
</p>
<p align="center">
@@ -10,87 +11,101 @@
<a href="https://hub.docker.com/u/flashcatcloud">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/flashcatcloud/nightingale"/></a>
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/ccfos/nightingale">
<img alt="GitHub Repo issues" src="https://img.shields.io/github/issues/ccfos/nightingale">
<img alt="GitHub Repo issues closed" src="https://img.shields.io/github/issues-closed/ccfos/nightingale">
<img alt="GitHub forks" src="https://img.shields.io/github/forks/ccfos/nightingale">
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img alt="GitHub contributors" src="https://img.shields.io/github/contributors-anon/ccfos/nightingale"/></a>
<img alt="License" src="https://img.shields.io/badge/license-Apache--2.0-blue"/>
</p>
<p align="center">
An open-source cloud-native monitoring system that is <b>all-in-one</b> <br/>
<b>Out-of-the-box</b>, it integrates data collection, visualization, and monitoring alert <br/>
We recommend upgrading your <b>Prometheus + AlertManager + Grafana</b> combination to Nightingale!
</p>
[English](./README.md) | [中文](./README_ZH.md)
[English](./README_EN.md) | [中文](./README.md)
## Highlighted Features
- **Out-of-the-box**
- Supports multiple deployment methods such as **Docker, Helm Chart, and cloud services**, integrates data collection, monitoring, and alerting into one system, and comes with various monitoring dashboards, quick views, and alert rule templates. **It greatly reduces the construction cost, learning cost, and usage cost of cloud-native monitoring systems**.
- **Professional Alerting**
- Provides visual alert configuration and management, supports various alert rules, offers the ability to configure silence and subscription rules, supports multiple alert delivery channels, and has features such as alert self-healing and event management.
- **Cloud-Native**
- Quickly builds an enterprise-level cloud-native monitoring system through a turnkey approach, supports multiple collectors such as [Categraf](https://github.com/flashcatcloud/categraf), Telegraf, and Grafana-agent, supports multiple data sources such as Prometheus, VictoriaMetrics, M3DB, ElasticSearch, and Jaeger, and is compatible with importing Grafana dashboards. **It seamlessly integrates with the cloud-native ecosystem**.
- **High Performance and High Availability**
- Due to the multi-data-source management engine of Nightingale and its excellent architecture design, and utilizing a high-performance time-series database, it can handle data collection, storage, and alert analysis scenarios with billions of time-series data, saving a lot of costs.
- Nightingale components can be horizontally scaled with no single point of failure. It has been deployed in thousands of enterprises and tested in harsh production practices. Many leading Internet companies have used Nightingale for cluster machines with hundreds of nodes, processing billions of time-series data.
- **Flexible Extension and Centralized Management**
- Nightingale can be deployed on a 1-core 1G cloud host, deployed in a cluster of hundreds of machines, or run in Kubernetes. Time-series databases, alert engines, and other components can also be decentralized to various data centers and regions, balancing edge deployment with centralized management. **It solves the problem of data fragmentation and lack of unified views**.
- **开箱即用**
- 支持 DockerHelm Chart、云服务等多种部署方式,集数据采集、监控告警、可视化为一体,内置多种监控仪表盘、快捷视图、告警规则模板,导入即可快速使用,**大幅降低云原生监控系统的建设成本、学习成本、使用成本**
- **专业告警**
- 可视化的告警配置和管理,支持丰富的告警规则,提供屏蔽规则、订阅规则的配置能力,支持告警多种送达渠道,支持告警自愈、告警事件管理等;
- **云原生**
- 以交钥匙的方式快速构建企业级的云原生监控体系,支持 [**Categraf**](https://github.com/flashcatcloud/categraf)TelegrafGrafana-agent 等多种采集器,支持 PrometheusVictoriaMetricsM3DBElasticSearch 等多种数据库,兼容支持导入 Grafana 仪表盘,**与云原生生态无缝集成**
- **高性能,高可用**
- 得益于夜莺的多数据源管理引擎,和夜莺引擎侧优秀的架构设计,借助于高性能时序库,可以满足数亿时间线的采集、存储、告警分析场景,节省大量成本;
- 夜莺监控组件均可水平扩展,无单点,已在上千家企业部署落地,经受了严苛的生产实践检验。众多互联网头部公司,夜莺集群机器达百台,处理数亿级时间线,重度使用夜莺监控;
- **灵活扩展,中心化管理**
- 夜莺监控,可部署在 1 核 1G 的云主机,可在上百台机器集群化部署,可运行在 K8s 中;也可将时序库、告警引擎等组件下沉到各机房、各 Region兼顾边缘部署和中心化统一管理**解决数据割裂,缺乏统一视图的难题**
- **开放社区**
- 托管于[中国计算机学会开源发展委员会](https://www.ccf.org.cn/kyfzwyh/),有[**快猫星云**](https://flashcat.cloud)和众多公司的持续投入,和数千名社区用户的积极参与,以及夜莺监控项目清晰明确的定位,都保证了夜莺开源社区健康、长久的发展。活跃、专业的社区用户也在持续迭代和沉淀更多的最佳实践于产品中;
> 如果您在使用 Prometheus 过程中,有以下的一个或者多个需求场景,推荐您无缝升级到夜莺:
#### If you are using Prometheus and have one or more of the following requirement scenarios, it is recommended that you upgrade to Nightingale:
- Prometheus、Alertmanager、Grafana 等多个系统较为割裂,缺乏统一视图,无法开箱即用;
- 通过修改配置文件来管理 Prometheus、Alertmanager 的方式,学习曲线大,协同有难度;
- 数据量过大而无法扩展您的 Prometheus 集群;
- 生产环境运行多套 Prometheus 集群,面临管理和使用成本高的问题;
- Multiple systems such as Prometheus, Alertmanager, Grafana, etc. are fragmented and lack a unified view and cannot be used out of the box;
- The way to manage Prometheus and Alertmanager by modifying configuration files has a big learning curve and is difficult to collaborate;
- Too much data to scale-up your Prometheus cluster;
- Multiple Prometheus clusters running in production environments, which faced high management and usage costs;
> 如果您在使用 Zabbix有以下的场景推荐您升级到夜莺
#### If you are using Zabbix and have the following scenarios, it is recommended that you upgrade to Nightingale:
- 监控的数据量太大,希望有更好的扩展解决方案;
- 学习曲线高,多人多团队模式下,希望有更好的协同使用效率;
- 微服务和云原生架构下监控数据的生命周期多变、监控数据维度基数高Zabbix 数据模型不易适配;
- Monitoring too much data and wanting a better scalable solution;
- A high learning curve and a desire for better efficiency of collaborative use in a multi-person, multi-team model;
- Microservice and cloud-native architectures with variable monitoring data lifecycles and high monitoring data dimension bases, which are not easily adaptable to the Zabbix data model;
> 如果您在使用 [Open-Falcon](https://github.com/open-falcon/falcon-plus),我们更推荐您升级到夜莺:
- 关于 Open-Falcon 和夜莺的详细介绍,请参考阅读:[云原生监控的十个特点和趋势](https://mp.weixin.qq.com/s?__biz=MzkzNjI5OTM5Nw==&mid=2247483738&idx=1&sn=e8bdbb974a2cd003c1abcc2b5405dd18&chksm=c2a19fb0f5d616a63185cd79277a79a6b80118ef2185890d0683d2bb20451bd9303c78d083c5#rd)。
#### If you are using [open-falcon](https://github.com/open-falcon/falcon-plus), we recommend you to upgrade to Nightingale
- For more information about open-falcon and Nightingale, please refer to read [Ten features and trends of cloud-native monitoring](https://mp.weixin.qq.com/s?__biz=MzkzNjI5OTM5Nw==&mid=2247483738&idx=1&sn=e8bdbb974a2cd003c1abcc2b5405dd18&chksm=c2a19fb0f5d616a63185cd79277a79a6b80118ef2185890d0683d2bb20451bd9303c78d083c5#rd)。
> 我们推荐您使用 [Categraf](https://github.com/flashcatcloud/categraf) 作为首选的监控数据采集器
- [Categraf](https://github.com/flashcatcloud/categraf) 是夜莺监控的默认采集器采用开放插件机制和 all-in-one 的设计同时支持 metric、log、trace、event 的采集。Categraf 不仅可以采集 CPU、内存、网络等系统层面的指标也集成了众多开源组件的采集能力支持K8s生态。Categraf 内置了对应的仪表盘和告警规则开箱即用。
## Getting Started
[English Doc](https://n9e.github.io/) | [中文文档](http://n9e.flashcat.cloud/)
- [快速安装](https://mp.weixin.qq.com/s/iEC4pfL1TgjMDOWYh8H-FA)
- [详细文档](https://n9e.github.io/)
- [社区分享](https://n9e.github.io/docs/prologue/share/)
## Screenshots
https://user-images.githubusercontent.com/792850/216888712-2565fcea-9df5-47bd-a49e-d60af9bd76e8.mp4
<img src="doc/img/intro.gif" width="480">
## Architecture
<img src="doc/img/arch-product.png" width="600">
<img src="doc/img/arch-product.png" width="480">
Nightingale monitoring can receive monitoring data reported by various collectors (such as [Categraf](https://github.com/flashcatcloud/categraf) , telegraf, grafana-agent, Prometheus, etc.) and write them to various popular time-series databases (such as Prometheus, M3DB, VictoriaMetrics, Thanos, TDEngine, etc.). It provides configuration capabilities for alert rules, silence rules, and subscription rules, as well as the ability to view monitoring data. It also provides automatic alarm self-healing mechanisms (such as automatically calling back to a webhook address or executing a script after an alarm is triggered), and the ability to store and manage historical alarm events and view them in groups.
夜莺监控可以接收各种采集器上报的监控数据(比如 [Categraf](https://github.com/flashcatcloud/categraf)telegrafgrafana-agentPrometheus),并写入多种流行的时序数据库中(可以支持PrometheusM3DBVictoriaMetricsThanosTDEngine提供告警规则、屏蔽规则、订阅规则的配置能力提供监控数据的查看能力提供告警自愈机制告警触发之后自动回调某个webhook地址或者执行某个脚本提供历史告警事件的存储管理、分组查看的能力。
If the performance of a standalone time-series database (such as Prometheus) has bottlenecks or poor disaster recovery, we recommend using [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). The VictoriaMetrics architecture is relatively simple, has excellent performance, and is easy to deploy and maintain. The architecture diagram is as shown above. For more detailed documentation on VictoriaMetrics, please refer to its [official website](https://victoriametrics.com/).
<img src="doc/img/arch-system.png" width="480">
**We welcome you to participate in the Nightingale open-source project and community in various ways, including but not limited to**
- Adding and improving documentation => [n9e.github.io](https://n9e.github.io/)
- Sharing your best practices and experience in using Nightingale monitoring => [Article sharing]((https://n9e.github.io/docs/prologue/share/))
- Submitting product suggestions => [github issue](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md)
- Submitting code to make Nightingale monitoring faster, more stable, and easier to use => [github pull request](https://github.com/didi/nightingale/pulls)
夜莺 v5 版本的设计非常简单,核心是 server 和 webapi 两个模块webapi 无状态放到中心端承接前端请求将用户配置写入数据库server 是告警引擎和数据转发模块,一般随着时序库走,一个时序库就对应一套 server每套 server 可以只用一个实例也可以多个实例组成集群server 可以接收 Categraf、Telegraf、Grafana-Agent、Datadog-Agent、Falcon-Plugins 上报的数据,写入后端时序库,周期性从数据库同步告警规则,然后查询时序库做告警判断。每套 server 依赖一个 redis。
**Respecting, recognizing, and recording the work of every contributor** is the first guiding principle of the Nightingale open-source community. We advocate effective questioning, which not only respects the developer's time but also contributes to the accumulation of knowledge in the entire community
- Before asking a question, please first refer to the [FAQ](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq)
- We use [GitHub Discussions](https://github.com/ccfos/nightingale/discussions) as the communication forum. You can search and ask questions here.
- We also recommend that you join ours [discard](https://discord.gg/qsRmtAuPw2) to exchange experiences with other Nightingale users.
<img src="doc/img/install-vm.png" width="480">
如果单机版本的时序数据库(比如 Prometheus 性能有瓶颈或容灾较差,我们推荐使用 [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)VictoriaMetrics 架构较为简单性能优异易于部署和运维架构图如上。VictoriaMetrics 更详尽的文档,还请参考其[官网](https://victoriametrics.com/)。
## Who is using Nightingale
You can register your usage and share your experience by posting on **[Who is Using Nightingale](https://github.com/ccfos/nightingale/issues/897)**.
## Community
## Stargazers over time
开源项目要更有生命力,离不开开放的治理架构和源源不断的开发者和用户共同参与,我们致力于建立开放、中立的开源治理架构,吸纳更多来自企业、高校等各方面对云原生监控感兴趣、有热情的开发者,一起打造有活力的夜莺开源社区。关于《夜莺开源项目和社区治理架构(草案)》,请查阅 [COMMUNITY GOVERNANCE](./doc/community-governance.md).
**我们欢迎您以各种方式参与到夜莺开源项目和开源社区中来,工作包括不限于**
- 补充和完善文档 => [n9e.github.io](https://n9e.github.io/)
- 分享您在使用夜莺监控过程中的最佳实践和经验心得 => [文章分享](https://n9e.github.io/docs/prologue/share/)
- 提交产品建议 =》 [github issue](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md)
- 提交代码,让夜莺监控更快、更稳、更好用 => [github pull request](https://github.com/didi/nightingale/pulls)
**尊重、认可和记录每一位贡献者的工作**是夜莺开源社区的第一指导原则,我们提倡**高效的提问**,这既是对开发者时间的尊重,也是对整个社区知识沉淀的贡献:
- 提问之前请先查阅 [FAQ](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq)
- 提问之前请先搜索 [github issue](https://github.com/ccfos/nightingale/issues)
- 我们优先推荐通过提交 github issue 来提问,如果[有问题点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&template=bug_report.yml) | [有需求建议点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md)
- 最后,我们推荐你加入微信群,针对相关开放式问题,相互交流咨询 (请先加好友:[UlricGO](https://www.gitlink.org.cn/UlricQin/gist/tree/master/self.jpeg) 备注:夜莺加群+姓名+公司,交流群里会有开发者团队和专业、热心的群友回答问题)
## Who is using
您可以通过在 **[Who is Using Nightingale](https://github.com/ccfos/nightingale/issues/897)** 登记您的使用情况,分享您的使用经验。
## Stargazers
[![Stargazers over time](https://starchart.cc/ccfos/nightingale.svg)](https://starchart.cc/ccfos/nightingale)
## Contributors
@@ -99,4 +114,9 @@ You can register your usage and share your experience by posting on **[Who is Us
</a>
## License
[Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE)
[Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE)
## Contact Us
推荐您关注夜莺监控公众号,及时获取相关产品和社区动态:
<img src="doc/img/n9e-vx-new.png" width="120">

68
README_EN.md Normal file
View File

@@ -0,0 +1,68 @@
<img src="doc/img/ccf-n9e.png" width="240">
Nightingale is an enterprise-level cloud-native monitoring system, which can be used as drop-in replacement of Prometheus for alerting and management.
[English](./README_EN.md) | [中文](./README.md)
## Introduction
Nightingale is an cloud-native monitoring system by All-In-On design, support enterprise-class functional features with an out-of-the-box experience. We recommend upgrading your `Prometheus` + `AlertManager` + `Grafana` combo solution to Nightingale.
- **Multiple prometheus data sources management**: manage all alerts and dashboards in one centralized visually view;
- **Out-of-the-box alert rule**: built-in multiple alert rules, reuse alert rules template by one-click import with detailed explanation of metrics;
- **Multiple modes for visualizing data**: out-of-the-box dashboards, instance customize views, expression browser and Grafana integration;
- **Multiple collection clients**: support using Promethues Exporter、Telegraf、Datadog Agent to collecting metrics;
- **Integration of multiple storage**: support Prometheus, M3DB, VictoriaMetrics, Influxdb, TDEngine as storage solutions, and original support for PromQL;
- **Fault self-healing**: support the ability to self-heal from failures by configuring webhook;
#### If you are using Prometheus and have one or more of the following requirement scenarios, it is recommended that you upgrade to Nightingale:
- Multiple systems such as Prometheus, Alertmanager, Grafana, etc. are fragmented and lack a unified view and cannot be used out of the box;
- The way to manage Prometheus and Alertmanager by modifying configuration files has a big learning curve and is difficult to collaborate;
- Too much data to scale-up your Prometheus cluster;
- Multiple Prometheus clusters running in production environments, which faced high management and usage costs;
#### If you are using Zabbix and have the following scenarios, it is recommended that you upgrade to Nightingale:
- Monitoring too much data and wanting a better scalable solution;
- A high learning curve and a desire for better efficiency of collaborative use in a multi-person, multi-team model;
- Microservice and cloud-native architectures with variable monitoring data lifecycles and high monitoring data dimension bases, which are not easily adaptable to the Zabbix data model;
#### If you are using [open-falcon](https://github.com/open-falcon/falcon-plus), we recommend you to upgrade to Nightingale
- For more information about open-falcon and Nightingale, please refer to read [Ten features and trends of cloud-native monitoring](https://mp.weixin.qq.com/s?__biz=MzkzNjI5OTM5Nw==&mid=2247483738&idx=1&sn=e8bdbb974a2cd003c1abcc2b5405dd18&chksm=c2a19fb0f5d616a63185cd79277a79a6b80118ef2185890d0683d2bb20451bd9303c78d083c5#rd)。
## Quickstart
- [n9e.github.io/quickstart](https://n9e.github.io/docs/install/compose/)
## Documentation
- [n9e.github.io](https://n9e.github.io/)
## Example of use
<img src="doc/img/intro.gif" width="680">
## System Architecture
#### A typical Nightingale deployment architecture:
<img src="doc/img/arch-system.png" width="680">
#### Typical deployment architecture using VictoriaMetrics as storage:
<img src="doc/img/install-vm.png" width="680">
## Contact us and feedback questions
- We recommend that you use [github issue](https://github.com/didi/nightingale/issues) as the preferred channel for issue feedback and requirement submission;
- You can join our WeChat group
<img src="doc/img/n9e-vx-new.png" width="180">
## Contributing
We welcome your participation in the Nightingale open source project and open source community in a variety of ways:
- Feedback on problems and bugs => [github issue](https://github.com/didi/nightingale/issues)
- Additional and improved documentation => [n9e.github.io](https://n9e.github.io/)
- Share your best practices and insights on using Nightingale => [User Story](https://github.com/didi/nightingale/issues/897)
- Join our community events => [Nightingale wechat group](https://s3-gz01.didistatic.com/n9e-pub/image/n9e-wx.png)
- Submit code to make Nightingale better =>[github PR](https://github.com/didi/nightingale/pulls)
## License
Nightingale with [Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE) open source license.

View File

@@ -1,127 +0,0 @@
<p align="center">
<a href="https://github.com/ccfos/nightingale">
<img src="doc/img/nightingale_logo_h.png" alt="nightingale - cloud native monitoring" width="240" /></a>
</p>
<p align="center">
<img alt="GitHub latest release" src="https://img.shields.io/github/v/release/ccfos/nightingale"/>
<a href="https://n9e.github.io">
<img alt="Docs" src="https://img.shields.io/badge/docs-get%20started-brightgreen"/></a>
<a href="https://hub.docker.com/u/flashcatcloud">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/flashcatcloud/nightingale"/></a>
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/ccfos/nightingale">
<img alt="GitHub Repo issues" src="https://img.shields.io/github/issues/ccfos/nightingale">
<img alt="GitHub Repo issues closed" src="https://img.shields.io/github/issues-closed/ccfos/nightingale">
<img alt="GitHub forks" src="https://img.shields.io/github/forks/ccfos/nightingale">
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img alt="GitHub contributors" src="https://img.shields.io/github/contributors-anon/ccfos/nightingale"/></a>
<img alt="License" src="https://img.shields.io/badge/license-Apache--2.0-blue"/>
</p>
<p align="center">
<b>All-in-one</b> 的开源云原生监控系统 <br/>
<b>开箱即用</b>,集数据采集、可视化、监控告警于一体 <br/>
推荐升级您的 <b>Prometheus + AlertManager + Grafana</b> 组合方案到夜莺!
</p>
[English](./README.md) | [中文](./README_ZH.md)
## Highlighted Features
- **开箱即用**
- 支持 Docker、Helm Chart、云服务等多种部署方式集数据采集、监控告警、可视化为一体内置多种监控仪表盘、快捷视图、告警规则模板导入即可快速使用**大幅降低云原生监控系统的建设成本、学习成本、使用成本**
- **专业告警**
- 可视化的告警配置和管理,支持丰富的告警规则,提供屏蔽规则、订阅规则的配置能力,支持告警多种送达渠道,支持告警自愈、告警事件管理等;
- **云原生**
- 以交钥匙的方式快速构建企业级的云原生监控体系,支持 [Categraf](https://github.com/flashcatcloud/categraf)、Telegraf、Grafana-agent 等多种采集器,支持 Prometheus、VictoriaMetrics、M3DB、ElasticSearch、Jaeger 等多种数据源,兼容支持导入 Grafana 仪表盘,**与云原生生态无缝集成**
- **高性能 高可用**
- 得益于夜莺的多数据源管理引擎,和夜莺引擎侧优秀的架构设计,借助于高性能时序库,可以满足数亿时间线的采集、存储、告警分析场景,节省大量成本;
- 夜莺监控组件均可水平扩展,无单点,已在上千家企业部署落地,经受了严苛的生产实践检验。众多互联网头部公司,夜莺集群机器达百台,处理数亿级时间线,重度使用夜莺监控;
- **灵活扩展 中心化管理**
- 夜莺监控,可部署在 1 核 1G 的云主机,可在上百台机器集群化部署,可运行在 K8s 中;也可将时序库、告警引擎等组件下沉到各机房、各 Region兼顾边缘部署和中心化统一管理**解决数据割裂,缺乏统一视图的难题**
- **开放社区**
- 托管于[中国计算机学会开源发展委员会](https://www.ccf.org.cn/kyfzwyh/),有[快猫星云](https://flashcat.cloud)和众多公司的持续投入,和数千名社区用户的积极参与,以及夜莺监控项目清晰明确的定位,都保证了夜莺开源社区健康、长久的发展。活跃、专业的社区用户也在持续迭代和沉淀更多的最佳实践于产品中;
**如果您在使用 Prometheus 过程中,有以下的一个或者多个需求场景,推荐您无缝升级到夜莺**
- Prometheus、Alertmanager、Grafana 等多个系统较为割裂,缺乏统一视图,无法开箱即用;
- 通过修改配置文件来管理 Prometheus、Alertmanager 的方式,学习曲线大,协同有难度;
- 数据量过大而无法扩展您的 Prometheus 集群;
- 生产环境运行多套 Prometheus 集群,面临管理和使用成本高的问题;
**如果您在使用 Zabbix有以下的场景推荐您升级到夜莺**
- 监控的数据量太大,希望有更好的扩展解决方案;
- 学习曲线高,多人多团队模式下,希望有更好的协同使用效率;
- 微服务和云原生架构下监控数据的生命周期多变、监控数据维度基数高Zabbix 数据模型不易适配;
> 了解更多Zabbix和夜莺监控的对比推荐您进一步阅读[《Zabbix 和夜莺监控选型对比》](https://flashcat.cloud/blog/zabbx-vs-nightingale/)
**如果您在使用 [Open-Falcon](https://github.com/open-falcon/falcon-plus),我们推荐您升级到夜莺:**
- 关于 Open-Falcon 和夜莺的详细介绍,请参考阅读:[《云原生监控的十个特点和趋势》](http://flashcat.cloud/blog/10-trends-of-cloudnative-monitoring/)
**我们推荐您使用 [Categraf](https://github.com/flashcatcloud/categraf) 作为首选的监控数据采集器**
- [Categraf](https://github.com/flashcatcloud/categraf) 是夜莺监控的默认采集器,采用开放插件机制和 All-in-one 的设计理念,同时支持 metric、log、trace、event 的采集。Categraf 不仅可以采集 CPU、内存、网络等系统层面的指标也集成了众多开源组件的采集能力支持K8s生态。Categraf 内置了对应的仪表盘和告警规则,开箱即用。
## Getting Started
[English Doc](https://n9e.github.io/) | [中文文档](http://n9e.flashcat.cloud/)
## Screenshots
https://user-images.githubusercontent.com/792850/216888712-2565fcea-9df5-47bd-a49e-d60af9bd76e8.mp4
## Architecture
<img src="doc/img/arch-product.png" width="600">
夜莺监控可以接收各种采集器上报的监控数据(比如 [Categraf](https://github.com/flashcatcloud/categraf)、telegraf、grafana-agent、Prometheus并写入多种流行的时序数据库中可以支持Prometheus、M3DB、VictoriaMetrics、Thanos、TDEngine等提供告警规则、屏蔽规则、订阅规则的配置能力提供监控数据的查看能力提供告警自愈机制告警触发之后自动回调某个webhook地址或者执行某个脚本提供历史告警事件的存储管理、分组查看的能力。
<img src="doc/img/arch-system.png" width="600">
夜莺 v5 版本的设计非常简单,核心是 server 和 webapi 两个模块webapi 无状态放到中心端承接前端请求将用户配置写入数据库server 是告警引擎和数据转发模块,一般随着时序库走,一个时序库就对应一套 server每套 server 可以只用一个实例也可以多个实例组成集群server 可以接收 Categraf、Telegraf、Grafana-Agent、Datadog-Agent、Falcon-Plugins 上报的数据,写入后端时序库,周期性从数据库同步告警规则,然后查询时序库做告警判断。每套 server 依赖一个 redis。
<img src="doc/img/install-vm.png" width="600">
如果单机版本的时序数据库(比如 Prometheus 性能有瓶颈或容灾较差,我们推荐使用 [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)VictoriaMetrics 架构较为简单性能优异易于部署和运维架构图如上。VictoriaMetrics 更详尽的文档,还请参考其[官网](https://victoriametrics.com/)。
## Community
开源项目要更有生命力,离不开开放的治理架构和源源不断的开发者和用户共同参与,我们致力于建立开放、中立的开源治理架构,吸纳更多来自企业、高校等各方面对云原生监控感兴趣、有热情的开发者,一起打造有活力的夜莺开源社区。关于《夜莺开源项目和社区治理架构(草案)》,请查阅 [COMMUNITY GOVERNANCE](./doc/community-governance.md).
**我们欢迎您以各种方式参与到夜莺开源项目和开源社区中来,工作包括不限于**
- 补充和完善文档 => [n9e.github.io](https://n9e.github.io/)
- 分享您在使用夜莺监控过程中的最佳实践和经验心得 => [文章分享](https://n9e.github.io/docs/prologue/share/)
- 提交产品建议 =》 [github issue](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md)
- 提交代码,让夜莺监控更快、更稳、更好用 => [github pull request](https://github.com/didi/nightingale/pulls)
**尊重、认可和记录每一位贡献者的工作**是夜莺开源社区的第一指导原则,我们提倡**高效的提问**,这既是对开发者时间的尊重,也是对整个社区知识沉淀的贡献:
- 提问之前请先查阅 [FAQ](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq)
- 我们使用[GitHub Discussions](https://github.com/ccfos/nightingale/discussions)作为交流论坛,有问题可以到这里搜索、提问
- 我们也推荐你加入微信群,和其他夜莺用户交流经验 (请先加好友:[picobyte](https://www.gitlink.org.cn/UlricQin/gist/tree/master/self.jpeg) 备注:夜莺加群+姓名+公司)
## Who is using Nightingale
您可以通过在 **[Who is Using Nightingale](https://github.com/ccfos/nightingale/issues/897)** 登记您的使用情况,分享您的使用经验。
## Stargazers over time
[![Stargazers over time](https://starchart.cc/ccfos/nightingale.svg)](https://starchart.cc/ccfos/nightingale)
## Contributors
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img src="https://contrib.rocks/image?repo=ccfos/nightingale" />
</a>
## License
[Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE)
## 加入交流群
<img src="doc/img/wecom.png" width="120">

View File

@@ -1,69 +0,0 @@
package aconf
import (
"path"
"github.com/toolkits/pkg/runner"
)
type Alert struct {
EngineDelay int64
Heartbeat HeartbeatConfig
Alerting Alerting
}
type SMTPConfig struct {
Host string
Port int
User string
Pass string
From string
InsecureSkipVerify bool
Batch int
}
type HeartbeatConfig struct {
IP string
Interval int64
Endpoint string
ClusterName string
}
type Alerting struct {
Timeout int64
TemplatesDir string
NotifyConcurrency int
}
type CallPlugin struct {
Enable bool
PluginPath string
Caller string
}
type RedisPub struct {
Enable bool
ChannelPrefix string
ChannelKey string
}
type Ibex struct {
Address string
BasicAuthUser string
BasicAuthPass string
Timeout int64
}
func (a *Alert) PreCheck() {
if a.Alerting.TemplatesDir == "" {
a.Alerting.TemplatesDir = path.Join(runner.Cwd, "etc", "template")
}
if a.Alerting.NotifyConcurrency == 0 {
a.Alerting.NotifyConcurrency = 10
}
if a.Heartbeat.Interval == 0 {
a.Heartbeat.Interval = 1000
}
}

View File

@@ -1,103 +0,0 @@
package alert
import (
"context"
"fmt"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/dispatch"
"github.com/ccfos/nightingale/v6/alert/eval"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/alert/record"
"github.com/ccfos/nightingale/v6/alert/router"
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/conf"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/ccfos/nightingale/v6/storage"
)
func Initialize(configDir string, cryptoKey string) (func(), error) {
config, err := conf.InitConfig(configDir, cryptoKey)
if err != nil {
return nil, fmt.Errorf("failed to init config: %v", err)
}
logxClean, err := logx.Init(config.Log)
if err != nil {
return nil, err
}
db, err := storage.New(config.DB)
if err != nil {
return nil, err
}
ctx := ctx.NewContext(context.Background(), db)
redis, err := storage.NewRedis(config.Redis)
if err != nil {
return nil, err
}
syncStats := memsto.NewSyncStats()
alertStats := astats.NewSyncStats()
targetCache := memsto.NewTargetCache(ctx, syncStats, redis)
busiGroupCache := memsto.NewBusiGroupCache(ctx, syncStats)
alertMuteCache := memsto.NewAlertMuteCache(ctx, syncStats)
alertRuleCache := memsto.NewAlertRuleCache(ctx, syncStats)
notifyConfigCache := memsto.NewNotifyConfigCache(ctx)
dsCache := memsto.NewDatasourceCache(ctx, syncStats)
promClients := prom.NewPromClient(ctx, config.Alert.Heartbeat)
externalProcessors := process.NewExternalProcessors()
Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, dsCache, ctx, promClients, false)
r := httpx.GinEngine(config.Global.RunMode, config.HTTP)
rt := router.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors)
rt.Config(r)
httpClean := httpx.Init(config.HTTP, r)
return func() {
logxClean()
httpClean()
}, nil
}
func Start(alertc aconf.Alert, pushgwc pconf.Pushgw, syncStats *memsto.Stats, alertStats *astats.Stats, externalProcessors *process.ExternalProcessorsType, targetCache *memsto.TargetCacheType, busiGroupCache *memsto.BusiGroupCacheType,
alertMuteCache *memsto.AlertMuteCacheType, alertRuleCache *memsto.AlertRuleCacheType, notifyConfigCache *memsto.NotifyConfigCacheType, datasourceCache *memsto.DatasourceCacheType, ctx *ctx.Context, promClients *prom.PromClientMap, isCenter bool) {
userCache := memsto.NewUserCache(ctx, syncStats)
userGroupCache := memsto.NewUserGroupCache(ctx, syncStats)
alertSubscribeCache := memsto.NewAlertSubscribeCache(ctx, syncStats)
recordingRuleCache := memsto.NewRecordingRuleCache(ctx, syncStats)
go models.InitNotifyConfig(ctx, alertc.Alerting.TemplatesDir)
naming := naming.NewNaming(ctx, alertc.Heartbeat, isCenter)
writers := writer.NewWriters(pushgwc)
record.NewScheduler(alertc, recordingRuleCache, promClients, writers, alertStats)
eval.NewScheduler(isCenter, alertc, externalProcessors, alertRuleCache, targetCache, busiGroupCache, alertMuteCache, datasourceCache, promClients, naming, ctx, alertStats)
dp := dispatch.NewDispatch(alertRuleCache, userCache, userGroupCache, alertSubscribeCache, targetCache, notifyConfigCache, alertc.Alerting, ctx)
consumer := dispatch.NewConsumer(alertc.Alerting, ctx, dp)
go dp.ReloadTpls()
go consumer.LoopConsume()
go queue.ReportQueueSize(alertStats)
go sender.StartEmailSender(notifyConfigCache.GetSMTP()) // todo
}

View File

@@ -1,45 +0,0 @@
package common
import (
"fmt"
"github.com/ccfos/nightingale/v6/models"
)
func RuleKey(datasourceId, id int64) string {
return fmt.Sprintf("alert-%d-%d", datasourceId, id)
}
func MatchTags(eventTagsMap map[string]string, itags []models.TagFilter) bool {
for _, filter := range itags {
value, has := eventTagsMap[filter.Key]
if !has {
return false
}
if !matchTag(value, filter) {
return false
}
}
return true
}
func matchTag(value string, filter models.TagFilter) bool {
switch filter.Func {
case "==":
return filter.Value == value
case "!=":
return filter.Value != value
case "in":
_, has := filter.Vset[value]
return has
case "not in":
_, has := filter.Vset[value]
return !has
case "=~":
return filter.Regexp.MatchString(value)
case "!~":
return !filter.Regexp.MatchString(value)
}
// unexpect func
return false
}

View File

@@ -1,159 +0,0 @@
package dispatch
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/concurrent/semaphore"
"github.com/toolkits/pkg/logger"
)
type Consumer struct {
alerting aconf.Alerting
ctx *ctx.Context
dispatch *Dispatch
}
// 创建一个 Consumer 实例
func NewConsumer(alerting aconf.Alerting, ctx *ctx.Context, dispatch *Dispatch) *Consumer {
return &Consumer{
alerting: alerting,
ctx: ctx,
dispatch: dispatch,
}
}
func (e *Consumer) LoopConsume() {
sema := semaphore.NewSemaphore(e.alerting.NotifyConcurrency)
duration := time.Duration(100) * time.Millisecond
for {
events := queue.EventQueue.PopBackBy(100)
if len(events) == 0 {
time.Sleep(duration)
continue
}
e.consume(events, sema)
}
}
func (e *Consumer) consume(events []interface{}, sema *semaphore.Semaphore) {
for i := range events {
if events[i] == nil {
continue
}
event := events[i].(*models.AlertCurEvent)
sema.Acquire()
go func(event *models.AlertCurEvent) {
defer sema.Release()
e.consumeOne(event)
}(event)
}
}
func (e *Consumer) consumeOne(event *models.AlertCurEvent) {
LogEvent(event, "consume")
if err := event.ParseRule("rule_name"); err != nil {
event.RuleName = fmt.Sprintf("failed to parse rule name: %v", err)
}
if err := event.ParseRule("rule_note"); err != nil {
event.RuleNote = fmt.Sprintf("failed to parse rule note: %v", err)
}
if err := event.ParseRule("annotations"); err != nil {
event.Annotations = fmt.Sprintf("failed to parse rule note: %v", err)
}
e.persist(event)
if event.IsRecovered && event.NotifyRecovered == 0 {
return
}
e.dispatch.HandleEventNotify(event, false)
}
func (e *Consumer) persist(event *models.AlertCurEvent) {
has, err := models.AlertCurEventExists(e.ctx, "hash=?", event.Hash)
if err != nil {
logger.Errorf("event_persist_check_exists_fail: %v rule_id=%d hash=%s", err, event.RuleId, event.Hash)
return
}
his := event.ToHis(e.ctx)
// 不管是告警还是恢复,全量告警里都要记录
if err := his.Add(e.ctx); err != nil {
logger.Errorf(
"event_persist_his_fail: %v rule_id=%d cluster:%s hash=%s tags=%v timestamp=%d value=%s",
err,
event.RuleId,
event.Cluster,
event.Hash,
event.TagsJSON,
event.TriggerTime,
event.TriggerValue,
)
}
if has {
// 活跃告警表中有记录,删之
err = models.AlertCurEventDelByHash(e.ctx, event.Hash)
if err != nil {
logger.Errorf("event_del_cur_fail: %v hash=%s", err, event.Hash)
return
}
if !event.IsRecovered {
// 恢复事件从活跃告警列表彻底删掉告警事件要重新加进来新的event
// use his id as cur id
event.Id = his.Id
if event.Id > 0 {
if err := event.Add(e.ctx); err != nil {
logger.Errorf(
"event_persist_cur_fail: %v rule_id=%d cluster:%s hash=%s tags=%v timestamp=%d value=%s",
err,
event.RuleId,
event.Cluster,
event.Hash,
event.TagsJSON,
event.TriggerTime,
event.TriggerValue,
)
}
}
}
return
}
if event.IsRecovered {
// alert_cur_event表里没有数据表示之前没告警结果现在报了恢复神奇....理论上不应该出现的
return
}
// use his id as cur id
event.Id = his.Id
if event.Id > 0 {
if err := event.Add(e.ctx); err != nil {
logger.Errorf(
"event_persist_cur_fail: %v rule_id=%d cluster:%s hash=%s tags=%v timestamp=%d value=%s",
err,
event.RuleId,
event.Cluster,
event.Hash,
event.TagsJSON,
event.TriggerTime,
event.TriggerValue,
)
}
}
}

View File

@@ -1,263 +0,0 @@
package dispatch
import (
"bytes"
"encoding/json"
"html/template"
"strconv"
"sync"
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/logger"
)
type Dispatch struct {
alertRuleCache *memsto.AlertRuleCacheType
userCache *memsto.UserCacheType
userGroupCache *memsto.UserGroupCacheType
alertSubscribeCache *memsto.AlertSubscribeCacheType
targetCache *memsto.TargetCacheType
notifyConfigCache *memsto.NotifyConfigCacheType
alerting aconf.Alerting
senders map[string]sender.Sender
tpls map[string]*template.Template
ctx *ctx.Context
RwLock sync.RWMutex
}
// 创建一个 Notify 实例
func NewDispatch(alertRuleCache *memsto.AlertRuleCacheType, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType,
alertSubscribeCache *memsto.AlertSubscribeCacheType, targetCache *memsto.TargetCacheType, notifyConfigCache *memsto.NotifyConfigCacheType,
alerting aconf.Alerting, ctx *ctx.Context) *Dispatch {
notify := &Dispatch{
alertRuleCache: alertRuleCache,
userCache: userCache,
userGroupCache: userGroupCache,
alertSubscribeCache: alertSubscribeCache,
targetCache: targetCache,
notifyConfigCache: notifyConfigCache,
alerting: alerting,
senders: make(map[string]sender.Sender),
tpls: make(map[string]*template.Template),
ctx: ctx,
}
return notify
}
func (e *Dispatch) ReloadTpls() error {
err := e.relaodTpls()
if err != nil {
logger.Error("failed to reload tpls: %v", err)
}
duration := time.Duration(9000) * time.Millisecond
for {
time.Sleep(duration)
if err := e.relaodTpls(); err != nil {
logger.Warning("failed to reload tpls:", err)
}
}
}
func (e *Dispatch) relaodTpls() error {
tmpTpls, err := models.ListTpls(e.ctx)
if err != nil {
return err
}
smtp := e.notifyConfigCache.GetSMTP()
senders := map[string]sender.Sender{
models.Email: sender.NewSender(models.Email, tmpTpls, smtp),
models.Dingtalk: sender.NewSender(models.Dingtalk, tmpTpls, smtp),
models.Wecom: sender.NewSender(models.Wecom, tmpTpls, smtp),
models.Feishu: sender.NewSender(models.Feishu, tmpTpls, smtp),
models.Mm: sender.NewSender(models.Mm, tmpTpls, smtp),
models.Telegram: sender.NewSender(models.Telegram, tmpTpls, smtp),
}
e.RwLock.Lock()
e.tpls = tmpTpls
e.senders = senders
e.RwLock.Unlock()
return nil
}
// HandleEventNotify 处理event事件的主逻辑
// event: 告警/恢复事件
// isSubscribe: 告警事件是否由subscribe的配置产生
func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bool) {
rule := e.alertRuleCache.Get(event.RuleId)
if rule == nil {
return
}
fillUsers(event, e.userCache, e.userGroupCache)
var (
// 处理事件到 notifyTarget 关系,处理的notifyTarget用OrMerge进行合并
handlers []NotifyTargetDispatch
// 额外去掉一些订阅,处理的notifyTarget用AndMerge进行合并, 如设置 channel=false,合并后不通过这个channel发送
// 如果实现了相关 Dispatch,可以添加到interceptors中
interceptorHandlers []NotifyTargetDispatch
)
if isSubscribe {
handlers = []NotifyTargetDispatch{NotifyGroupDispatch, EventCallbacksDispatch}
} else {
handlers = []NotifyTargetDispatch{NotifyGroupDispatch, GlobalWebhookDispatch, EventCallbacksDispatch}
}
notifyTarget := NewNotifyTarget()
// 处理订阅关系使用OrMerge
for _, handler := range handlers {
notifyTarget.OrMerge(handler(rule, event, notifyTarget, e))
}
// 处理移除订阅关系的逻辑,比如员工离职,临时静默某个通道的策略等
for _, handler := range interceptorHandlers {
notifyTarget.AndMerge(handler(rule, event, notifyTarget, e))
}
// 处理事件发送,这里用一个goroutine处理一个event的所有发送事件
go e.Send(rule, event, notifyTarget, isSubscribe)
// 如果是不是订阅规则出现的event, 则需要处理订阅规则的event
if !isSubscribe {
e.handleSubs(event)
}
}
func (e *Dispatch) handleSubs(event *models.AlertCurEvent) {
// handle alert subscribes
subscribes := make([]*models.AlertSubscribe, 0)
// rule specific subscribes
if subs, has := e.alertSubscribeCache.Get(event.RuleId); has {
subscribes = append(subscribes, subs...)
}
// global subscribes
if subs, has := e.alertSubscribeCache.Get(0); has {
subscribes = append(subscribes, subs...)
}
for _, sub := range subscribes {
e.handleSub(sub, *event)
}
}
// handleSub 处理订阅规则的event,注意这里event要使用值传递,因为后面会修改event的状态
func (e *Dispatch) handleSub(sub *models.AlertSubscribe, event models.AlertCurEvent) {
if sub.IsDisabled() || !sub.MatchCluster(event.DatasourceId) {
return
}
if !common.MatchTags(event.TagsMap, sub.ITags) {
return
}
if sub.ForDuration > (event.TriggerTime - event.FirstTriggerTime) {
return
}
sub.ModifyEvent(&event)
LogEvent(&event, "subscribe")
e.HandleEventNotify(&event, true)
}
func (e *Dispatch) Send(rule *models.AlertRule, event *models.AlertCurEvent, notifyTarget *NotifyTarget, isSubscribe bool) {
for channel, uids := range notifyTarget.ToChannelUserMap() {
ctx := sender.BuildMessageContext(rule, event, uids, e.userCache)
e.RwLock.RLock()
s := e.senders[channel]
e.RwLock.RUnlock()
if s == nil {
logger.Warningf("no sender for channel: %s", channel)
continue
}
logger.Debugf("send event: %s, channel: %s", event.Hash, channel)
for i := 0; i < len(ctx.Users); i++ {
logger.Debug("send event to user: ", ctx.Users[i])
}
s.Send(ctx)
}
// handle event callbacks
sender.SendCallbacks(e.ctx, notifyTarget.ToCallbackList(), event, e.targetCache, e.notifyConfigCache.GetIbex())
// handle global webhooks
sender.SendWebhooks(notifyTarget.ToWebhookList(), event)
// handle plugin call
go sender.MayPluginNotify(e.genNoticeBytes(event), e.notifyConfigCache.GetNotifyScript())
}
type Notice struct {
Event *models.AlertCurEvent `json:"event"`
Tpls map[string]string `json:"tpls"`
}
func (e *Dispatch) genNoticeBytes(event *models.AlertCurEvent) []byte {
// build notice body with templates
ntpls := make(map[string]string)
e.RwLock.RLock()
defer e.RwLock.RUnlock()
for filename, tpl := range e.tpls {
var body bytes.Buffer
if err := tpl.Execute(&body, event); err != nil {
ntpls[filename] = err.Error()
} else {
ntpls[filename] = body.String()
}
}
notice := Notice{Event: event, Tpls: ntpls}
stdinBytes, err := json.Marshal(notice)
if err != nil {
logger.Errorf("event_notify: failed to marshal notice: %v", err)
return nil
}
return stdinBytes
}
// for alerting
func fillUsers(ce *models.AlertCurEvent, uc *memsto.UserCacheType, ugc *memsto.UserGroupCacheType) {
gids := make([]int64, 0, len(ce.NotifyGroupsJSON))
for i := 0; i < len(ce.NotifyGroupsJSON); i++ {
gid, err := strconv.ParseInt(ce.NotifyGroupsJSON[i], 10, 64)
if err != nil {
continue
}
gids = append(gids, gid)
}
ce.NotifyGroupsObj = ugc.GetByUserGroupIds(gids)
uids := make(map[int64]struct{})
for i := 0; i < len(ce.NotifyGroupsObj); i++ {
ug := ce.NotifyGroupsObj[i]
for j := 0; j < len(ug.UserIds); j++ {
uids[ug.UserIds[j]] = struct{}{}
}
}
ce.NotifyUsersObj = uc.GetByUserIds(mapKeys(uids))
}
func mapKeys(m map[int64]struct{}) []int64 {
lst := make([]int64, 0, len(m))
for k := range m {
lst = append(lst, k)
}
return lst
}

View File

@@ -1,33 +0,0 @@
package dispatch
// NotifyChannels channelKey -> bool
type NotifyChannels map[string]bool
func NewNotifyChannels(channels []string) NotifyChannels {
nc := make(NotifyChannels)
for _, ch := range channels {
nc[ch] = true
}
return nc
}
func (nc NotifyChannels) OrMerge(other NotifyChannels) {
nc.merge(other, func(a, b bool) bool { return a || b })
}
func (nc NotifyChannels) AndMerge(other NotifyChannels) {
nc.merge(other, func(a, b bool) bool { return a && b })
}
func (nc NotifyChannels) merge(other NotifyChannels, f func(bool, bool) bool) {
if other == nil {
return
}
for k, v := range other {
if curV, has := nc[k]; has {
nc[k] = f(curV, v)
} else {
nc[k] = v
}
}
}

View File

@@ -1,134 +0,0 @@
package dispatch
import (
"strconv"
"github.com/ccfos/nightingale/v6/models"
)
// NotifyTarget 维护所有需要发送的目标 用户-通道/回调/钩子信息,用map维护的数据结构具有去重功能
type NotifyTarget struct {
userMap map[int64]NotifyChannels
webhooks map[string]*models.Webhook
callbacks map[string]struct{}
}
func NewNotifyTarget() *NotifyTarget {
return &NotifyTarget{
userMap: make(map[int64]NotifyChannels),
webhooks: make(map[string]*models.Webhook),
callbacks: make(map[string]struct{}),
}
}
// OrMerge 将 channelMap 按照 or 的方式合并,方便实现多种组合的策略,比如根据某个 tag 进行路由等
func (s *NotifyTarget) OrMerge(other *NotifyTarget) {
s.merge(other, NotifyChannels.OrMerge)
}
// AndMerge 将 channelMap 中的 bool 值按照 and 的逻辑进行合并,可以单独将人/通道维度的通知移除
// 常用的场景有:
// 1. 人员离职了不需要发送告警了
// 2. 某个告警通道进行维护,暂时不需要发送告警了
// 3. 业务值班的重定向逻辑,将高等级的告警额外发送给应急人员等
// 可以结合业务需求自己实现router
func (s *NotifyTarget) AndMerge(other *NotifyTarget) {
s.merge(other, NotifyChannels.AndMerge)
}
func (s *NotifyTarget) merge(other *NotifyTarget, f func(NotifyChannels, NotifyChannels)) {
if other == nil {
return
}
for k, v := range other.userMap {
if curV, has := s.userMap[k]; has {
f(curV, v)
} else {
s.userMap[k] = v
}
}
for k, v := range other.webhooks {
s.webhooks[k] = v
}
for k, v := range other.callbacks {
s.callbacks[k] = v
}
}
// ToChannelUserMap userMap(map[uid][channel]bool) 转换为 map[channel][]uid 的结构
func (s *NotifyTarget) ToChannelUserMap() map[string][]int64 {
m := make(map[string][]int64)
for uid, nc := range s.userMap {
for ch, send := range nc {
if send {
m[ch] = append(m[ch], uid)
}
}
}
return m
}
func (s *NotifyTarget) ToCallbackList() []string {
callbacks := make([]string, 0, len(s.callbacks))
for cb := range s.callbacks {
callbacks = append(callbacks, cb)
}
return callbacks
}
func (s *NotifyTarget) ToWebhookList() []*models.Webhook {
webhooks := make([]*models.Webhook, 0, len(s.webhooks))
for _, wh := range s.webhooks {
webhooks = append(webhooks, wh)
}
return webhooks
}
// Dispatch 抽象由告警事件到信息接收者的路由策略
// rule: 告警规则
// event: 告警事件
// prev: 前一次路由结果, Dispatch 的实现可以直接修改 prev, 也可以返回一个新的 NotifyTarget 用于 AndMerge/OrMerge
type NotifyTargetDispatch func(rule *models.AlertRule, event *models.AlertCurEvent, prev *NotifyTarget, dispatch *Dispatch) *NotifyTarget
// GroupDispatch 处理告警规则的组订阅关系
func NotifyGroupDispatch(rule *models.AlertRule, event *models.AlertCurEvent, prev *NotifyTarget, dispatch *Dispatch) *NotifyTarget {
groupIds := make([]int64, 0, len(event.NotifyGroupsJSON))
for _, groupId := range event.NotifyGroupsJSON {
gid, err := strconv.ParseInt(groupId, 10, 64)
if err != nil {
continue
}
groupIds = append(groupIds, gid)
}
groups := dispatch.userGroupCache.GetByUserGroupIds(groupIds)
NotifyTarget := NewNotifyTarget()
for _, group := range groups {
for _, userId := range group.UserIds {
NotifyTarget.userMap[userId] = NewNotifyChannels(event.NotifyChannelsJSON)
}
}
return NotifyTarget
}
func GlobalWebhookDispatch(rule *models.AlertRule, event *models.AlertCurEvent, prev *NotifyTarget, dispatch *Dispatch) *NotifyTarget {
webhooks := dispatch.notifyConfigCache.GetWebhooks()
NotifyTarget := NewNotifyTarget()
for _, webhook := range webhooks {
if !webhook.Enable {
continue
}
NotifyTarget.webhooks[webhook.Url] = webhook
}
return NotifyTarget
}
func EventCallbacksDispatch(rule *models.AlertRule, event *models.AlertCurEvent, prev *NotifyTarget, dispatch *Dispatch) *NotifyTarget {
for _, c := range event.CallbacksJSON {
if c == "" {
continue
}
prev.callbacks[c] = struct{}{}
}
return nil
}

View File

@@ -1,174 +0,0 @@
package eval
import (
"context"
"fmt"
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/toolkits/pkg/logger"
)
type Scheduler struct {
isCenter bool
// key: hash
alertRules map[string]*AlertRuleWorker
ExternalProcessors *process.ExternalProcessorsType
aconf aconf.Alert
alertRuleCache *memsto.AlertRuleCacheType
targetCache *memsto.TargetCacheType
busiGroupCache *memsto.BusiGroupCacheType
alertMuteCache *memsto.AlertMuteCacheType
datasourceCache *memsto.DatasourceCacheType
promClients *prom.PromClientMap
naming *naming.Naming
ctx *ctx.Context
stats *astats.Stats
}
func NewScheduler(isCenter bool, aconf aconf.Alert, externalProcessors *process.ExternalProcessorsType, arc *memsto.AlertRuleCacheType, targetCache *memsto.TargetCacheType,
busiGroupCache *memsto.BusiGroupCacheType, alertMuteCache *memsto.AlertMuteCacheType, datasourceCache *memsto.DatasourceCacheType, promClients *prom.PromClientMap, naming *naming.Naming,
ctx *ctx.Context, stats *astats.Stats) *Scheduler {
scheduler := &Scheduler{
isCenter: isCenter,
aconf: aconf,
alertRules: make(map[string]*AlertRuleWorker),
ExternalProcessors: externalProcessors,
alertRuleCache: arc,
targetCache: targetCache,
busiGroupCache: busiGroupCache,
alertMuteCache: alertMuteCache,
datasourceCache: datasourceCache,
promClients: promClients,
naming: naming,
ctx: ctx,
stats: stats,
}
go scheduler.LoopSyncRules(context.Background())
return scheduler
}
func (s *Scheduler) LoopSyncRules(ctx context.Context) {
time.Sleep(time.Duration(s.aconf.EngineDelay) * time.Second)
duration := 9000 * time.Millisecond
for {
select {
case <-ctx.Done():
return
case <-time.After(duration):
s.syncAlertRules()
}
}
}
func (s *Scheduler) syncAlertRules() {
ids := s.alertRuleCache.GetRuleIds()
alertRuleWorkers := make(map[string]*AlertRuleWorker)
externalRuleWorkers := make(map[string]*process.Processor)
for _, id := range ids {
rule := s.alertRuleCache.Get(id)
if rule == nil {
continue
}
if rule.IsPrometheusRule() {
datasourceIds := s.promClients.Hit(rule.DatasourceIdsJson)
for _, dsId := range datasourceIds {
if !naming.DatasourceHashRing.IsHit(dsId, fmt.Sprintf("%d", rule.Id), s.aconf.Heartbeat.Endpoint) {
continue
}
ds := s.datasourceCache.GetById(dsId)
if ds == nil {
logger.Debugf("datasource %d not found", dsId)
continue
}
if ds.Status != "enabled" {
logger.Debugf("datasource %d status is %s", dsId, ds.Status)
continue
}
processor := process.NewProcessor(rule, dsId, s.alertRuleCache, s.targetCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.promClients, s.ctx, s.stats)
alertRule := NewAlertRuleWorker(rule, dsId, processor, s.promClients, s.ctx)
alertRuleWorkers[alertRule.Hash()] = alertRule
}
} else if rule.IsHostRule() && s.isCenter {
// all host rule will be processed by center instance
if !naming.DatasourceHashRing.IsHit(naming.HostDatasource, fmt.Sprintf("%d", rule.Id), s.aconf.Heartbeat.Endpoint) {
continue
}
processor := process.NewProcessor(rule, 0, s.alertRuleCache, s.targetCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.promClients, s.ctx, s.stats)
alertRule := NewAlertRuleWorker(rule, 0, processor, s.promClients, s.ctx)
alertRuleWorkers[alertRule.Hash()] = alertRule
} else {
// 如果 rule 不是通过 prometheus engine 来告警的,则创建为 externalRule
// if rule is not processed by prometheus engine, create it as externalRule
for _, dsId := range rule.DatasourceIdsJson {
ds := s.datasourceCache.GetById(dsId)
if ds == nil {
logger.Debugf("datasource %d not found", dsId)
continue
}
if ds.Status != "enabled" {
logger.Debugf("datasource %d status is %s", dsId, ds.Status)
continue
}
processor := process.NewProcessor(rule, dsId, s.alertRuleCache, s.targetCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.promClients, s.ctx, s.stats)
externalRuleWorkers[processor.Key()] = processor
}
}
}
for hash, rule := range alertRuleWorkers {
if _, has := s.alertRules[hash]; !has {
rule.Prepare()
rule.Start()
s.alertRules[hash] = rule
}
}
for hash, rule := range s.alertRules {
if _, has := alertRuleWorkers[hash]; !has {
rule.Stop()
delete(s.alertRules, hash)
}
}
s.ExternalProcessors.ExternalLock.Lock()
for key, processor := range externalRuleWorkers {
if curProcessor, has := s.ExternalProcessors.Processors[key]; has {
// rule存在,且hash一致,认为没有变更,这里可以根据需求单独实现一个关联数据更多的hash函数
if processor.Hash() == curProcessor.Hash() {
continue
}
}
// 现有规则中没有rule以及有rule但hash不一致的场景需要触发rule的update
processor.RecoverAlertCurEventFromDb()
s.ExternalProcessors.Processors[key] = processor
}
for key := range s.ExternalProcessors.Processors {
if _, has := externalRuleWorkers[key]; !has {
delete(s.ExternalProcessors.Processors, key)
}
}
s.ExternalProcessors.ExternalLock.Unlock()
}

View File

@@ -1,240 +0,0 @@
package eval
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
promsdk "github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/ccfos/nightingale/v6/prom"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type AlertRuleWorker struct {
datasourceId int64
quit chan struct{}
inhibit bool
severity int
rule *models.AlertRule
processor *process.Processor
promClients *prom.PromClientMap
ctx *ctx.Context
}
func NewAlertRuleWorker(rule *models.AlertRule, datasourceId int64, processor *process.Processor, promClients *prom.PromClientMap, ctx *ctx.Context) *AlertRuleWorker {
arw := &AlertRuleWorker{
datasourceId: datasourceId,
quit: make(chan struct{}),
rule: rule,
processor: processor,
promClients: promClients,
ctx: ctx,
}
return arw
}
func (arw *AlertRuleWorker) Key() string {
return common.RuleKey(arw.datasourceId, arw.rule.Id)
}
func (arw *AlertRuleWorker) Hash() string {
return str.MD5(fmt.Sprintf("%d_%d_%s_%d",
arw.rule.Id,
arw.rule.PromEvalInterval,
arw.rule.RuleConfig,
arw.datasourceId,
))
}
func (arw *AlertRuleWorker) Prepare() {
arw.processor.RecoverAlertCurEventFromDb()
}
func (arw *AlertRuleWorker) Start() {
logger.Infof("eval:%s started", arw.Key())
interval := arw.rule.PromEvalInterval
if interval <= 0 {
interval = 10
}
go func() {
for {
select {
case <-arw.quit:
return
default:
arw.Eval()
time.Sleep(time.Duration(interval) * time.Second)
}
}
}()
}
func (arw *AlertRuleWorker) Eval() {
cachedRule := arw.rule
if cachedRule == nil {
logger.Errorf("rule_eval:%s rule not found", arw.Key())
return
}
typ := cachedRule.GetRuleType()
var lst []common.AnomalyPoint
switch typ {
case models.PROMETHEUS:
lst = arw.GetPromAnomalyPoint(cachedRule.RuleConfig)
case models.HOST:
lst = arw.GetHostAnomalyPoint(cachedRule.RuleConfig)
default:
return
}
if arw.processor == nil {
logger.Warningf("rule_eval:%s processor is nil", arw.Key())
return
}
arw.processor.Handle(lst, "inner", arw.inhibit)
}
func (arw *AlertRuleWorker) Stop() {
logger.Infof("%s stopped", arw.Key())
close(arw.quit)
}
func (arw *AlertRuleWorker) GetPromAnomalyPoint(ruleConfig string) []common.AnomalyPoint {
var lst []common.AnomalyPoint
var severity int
var rule *models.PromRuleConfig
if err := json.Unmarshal([]byte(ruleConfig), &rule); err != nil {
logger.Errorf("rule_eval:%s rule_config:%s, error:%v", arw.Key(), ruleConfig, err)
return lst
}
if rule == nil {
logger.Errorf("rule_eval:%s rule_config:%s, error:rule is nil", arw.Key(), ruleConfig)
return lst
}
arw.inhibit = rule.Inhibit
for _, query := range rule.Queries {
if query.Severity < severity {
arw.severity = query.Severity
}
promql := strings.TrimSpace(query.PromQl)
if promql == "" {
logger.Errorf("rule_eval:%s promql is blank", arw.Key())
continue
}
if arw.promClients.IsNil(arw.datasourceId) {
logger.Errorf("rule_eval:%s error reader client is nil", arw.Key())
continue
}
readerClient := arw.promClients.GetCli(arw.datasourceId)
var warnings promsdk.Warnings
value, warnings, err := readerClient.Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("rule_eval:%s promql:%s, error:%v", arw.Key(), promql, err)
continue
}
if len(warnings) > 0 {
logger.Errorf("rule_eval:%s promql:%s, warnings:%v", arw.Key(), promql, warnings)
continue
}
logger.Debugf("rule_eval:%s query:%+v, value:%v", arw.Key(), query, value)
points := common.ConvertAnomalyPoints(value)
for i := 0; i < len(points); i++ {
points[i].Severity = query.Severity
}
lst = append(lst, points...)
}
return lst
}
func (arw *AlertRuleWorker) GetHostAnomalyPoint(ruleConfig string) []common.AnomalyPoint {
var lst []common.AnomalyPoint
var severity int
var rule *models.HostRuleConfig
if err := json.Unmarshal([]byte(ruleConfig), &rule); err != nil {
logger.Errorf("rule_eval:%s rule_config:%s, error:%v", arw.Key(), ruleConfig, err)
return lst
}
if rule == nil {
logger.Errorf("rule_eval:%s rule_config:%s, error:rule is nil", arw.Key(), ruleConfig)
return lst
}
arw.inhibit = rule.Inhibit
now := time.Now().Unix()
for _, trigger := range rule.Triggers {
if trigger.Severity < severity {
arw.severity = trigger.Severity
}
query := models.GetHostsQuery(rule.Queries)
switch trigger.Type {
case "target_miss":
t := now - int64(trigger.Duration)
targets, err := models.MissTargetGetsByFilter(arw.ctx, query, t)
if err != nil {
logger.Errorf("rule_eval:%s query:%v, error:%v", arw.Key(), query, err)
continue
}
for _, target := range targets {
m := make(map[string]string)
m["ident"] = target.Ident
lst = append(lst, common.NewAnomalyPoint(trigger.Type, m, now, float64(now-target.UpdateAt), trigger.Severity))
}
case "offset":
targets, err := models.TargetGetsByFilter(arw.ctx, query, 0, 0)
if err != nil {
logger.Errorf("rule_eval:%s query:%v, error:%v", arw.Key(), query, err)
continue
}
hostOffsetMap := arw.processor.TargetCache.GetOffsetHost(targets, now, int64(trigger.Duration))
for host, offset := range hostOffsetMap {
m := make(map[string]string)
m["ident"] = host
lst = append(lst, common.NewAnomalyPoint(trigger.Type, m, now, float64(offset), trigger.Severity))
}
case "pct_target_miss":
t := now - int64(trigger.Duration)
count, err := models.MissTargetCountByFilter(arw.ctx, query, t)
if err != nil {
logger.Errorf("rule_eval:%s query:%v, error:%v", arw.Key(), query, err)
continue
}
total, err := models.TargetCountByFilter(arw.ctx, query)
if err != nil {
logger.Errorf("rule_eval:%s query:%v, error:%v", arw.Key(), query, err)
continue
}
pct := float64(count) / float64(total) * 100
if pct >= float64(trigger.Percent) {
lst = append(lst, common.NewAnomalyPoint(trigger.Type, nil, now, pct, trigger.Severity))
}
}
}
return lst
}

View File

@@ -1,178 +0,0 @@
package mute
import (
"strconv"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/toolkits/pkg/logger"
)
func IsMuted(rule *models.AlertRule, event *models.AlertCurEvent, targetCache *memsto.TargetCacheType, alertMuteCache *memsto.AlertMuteCacheType) bool {
if TimeNonEffectiveMuteStrategy(rule, event) {
return true
}
if IdentNotExistsMuteStrategy(rule, event, targetCache) {
return true
}
if BgNotMatchMuteStrategy(rule, event, targetCache) {
return true
}
if EventMuteStrategy(event, alertMuteCache) {
return true
}
return false
}
// TimeNonEffectiveMuteStrategy 根据规则配置的告警时间过滤,如果产生的告警不在规则配置的告警时间内,则不告警
func TimeNonEffectiveMuteStrategy(rule *models.AlertRule, event *models.AlertCurEvent) bool {
if rule.Disabled == 1 {
return true
}
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := strconv.Itoa(int(tm.Weekday()))
enableStime := strings.Fields(rule.EnableStime)
enableEtime := strings.Fields(rule.EnableEtime)
enableDaysOfWeek := strings.Split(rule.EnableDaysOfWeek, ";")
length := len(enableDaysOfWeek)
// enableStime,enableEtime,enableDaysOfWeek三者长度肯定相同这里循环一个即可
for i := 0; i < length; i++ {
enableDaysOfWeek[i] = strings.Replace(enableDaysOfWeek[i], "7", "0", 1)
if !strings.Contains(enableDaysOfWeek[i], triggerWeek) {
continue
}
if enableStime[i] <= enableEtime[i] {
if triggerTime < enableStime[i] || triggerTime > enableEtime[i] {
continue
}
} else {
if triggerTime < enableStime[i] && triggerTime > enableEtime[i] {
continue
}
}
// 到这里说明当前时刻在告警规则的某组生效时间范围内,直接返回 false
return false
}
return true
}
// IdentNotExistsMuteStrategy 根据ident是否存在过滤,如果ident不存在,则target_up的告警直接过滤掉
func IdentNotExistsMuteStrategy(rule *models.AlertRule, event *models.AlertCurEvent, targetCache *memsto.TargetCacheType) bool {
ident, has := event.TagsMap["ident"]
if !has {
return false
}
_, exists := targetCache.Get(ident)
// 如果是target_up的告警,且ident已经不存在了,直接过滤掉
// 这里的判断有点太粗暴了,但是目前没有更好的办法
if !exists && strings.Contains(rule.PromQl, "target_up") {
logger.Debugf("[%s] mute: rule_eval:%d cluster:%s ident:%s", "IdentNotExistsMuteStrategy", rule.Id, event.Cluster, ident)
return true
}
return false
}
// BgNotMatchMuteStrategy 当规则开启只在bg内部告警时,对于非bg内部的机器过滤
func BgNotMatchMuteStrategy(rule *models.AlertRule, event *models.AlertCurEvent, targetCache *memsto.TargetCacheType) bool {
// 没有开启BG内部告警,直接不过滤
if rule.EnableInBG == 0 {
return false
}
ident, has := event.TagsMap["ident"]
if !has {
return false
}
target, exists := targetCache.Get(ident)
// 对于包含ident的告警事件check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效那其他BG的机器就不能因此规则产生告警
if exists && target.GroupId != rule.GroupId {
logger.Debugf("[%s] mute: rule_eval:%d cluster:%s", "BgNotMatchMuteStrategy", rule.Id, event.Cluster)
return true
}
return false
}
func EventMuteStrategy(event *models.AlertCurEvent, alertMuteCache *memsto.AlertMuteCacheType) bool {
mutes, has := alertMuteCache.Gets(event.GroupId)
if !has || len(mutes) == 0 {
return false
}
for i := 0; i < len(mutes); i++ {
if matchMute(event, mutes[i]) {
return true
}
}
return false
}
// matchMute 如果传入了clock这个可选参数就表示使用这个clock表示的时间否则就从event的字段中取TriggerTime
func matchMute(event *models.AlertCurEvent, mute *models.AlertMute, clock ...int64) bool {
if mute.Disabled == 1 {
return false
}
ts := event.TriggerTime
if len(clock) > 0 {
ts = clock[0]
}
// 如果不是全局的,判断 匹配的 datasource id
if !(len(mute.DatasourceIdsJson) != 0 && mute.DatasourceIdsJson[0] == 0) && event.DatasourceId != 0 {
idm := make(map[int64]struct{}, len(mute.DatasourceIdsJson))
for i := 0; i < len(mute.DatasourceIdsJson); i++ {
idm[mute.DatasourceIdsJson[i]] = struct{}{}
}
// 判断 event.datasourceId 是否包含在 idm 中
if _, has := idm[event.DatasourceId]; !has {
return false
}
}
var matchTime bool
if mute.MuteTimeType == models.TimeRange {
if ts < mute.Btime || ts > mute.Etime {
return false
}
matchTime = true
} else if mute.MuteTimeType == models.Periodic {
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := strconv.Itoa(int(tm.Weekday()))
for i := 0; i < len(mute.PeriodicMutesJson); i++ {
if strings.Contains(mute.PeriodicMutesJson[i].EnableDaysOfWeek, triggerWeek) {
if mute.PeriodicMutesJson[i].EnableStime <= mute.PeriodicMutesJson[i].EnableEtime {
if triggerTime >= mute.PeriodicMutesJson[i].EnableStime && triggerTime < mute.PeriodicMutesJson[i].EnableEtime {
matchTime = true
break
}
} else {
if triggerTime < mute.PeriodicMutesJson[i].EnableStime || triggerTime >= mute.PeriodicMutesJson[i].EnableEtime {
matchTime = true
break
}
}
}
}
}
if !matchTime {
return false
}
return common.MatchTags(event.TagsMap, mute.ITags)
}

View File

@@ -1,65 +0,0 @@
package naming
import (
"sync"
"github.com/toolkits/pkg/consistent"
"github.com/toolkits/pkg/logger"
)
const NodeReplicas = 500
type DatasourceHashRingType struct {
sync.RWMutex
Rings map[int64]*consistent.Consistent
}
// for alert_rule sharding
var HostDatasource int64 = 100000
var DatasourceHashRing = DatasourceHashRingType{Rings: make(map[int64]*consistent.Consistent)}
func NewConsistentHashRing(replicas int32, nodes []string) *consistent.Consistent {
ret := consistent.New()
ret.NumberOfReplicas = int(replicas)
for i := 0; i < len(nodes); i++ {
ret.Add(nodes[i])
}
return ret
}
func RebuildConsistentHashRing(datasourceId int64, nodes []string) {
r := consistent.New()
r.NumberOfReplicas = NodeReplicas
for i := 0; i < len(nodes); i++ {
r.Add(nodes[i])
}
DatasourceHashRing.Set(datasourceId, r)
logger.Infof("hash ring %d rebuild %+v", datasourceId, r.Members())
}
func (chr *DatasourceHashRingType) GetNode(datasourceId int64, pk string) (string, error) {
chr.RLock()
defer chr.RUnlock()
_, exists := chr.Rings[datasourceId]
if !exists {
chr.Rings[datasourceId] = NewConsistentHashRing(int32(NodeReplicas), []string{})
}
return chr.Rings[datasourceId].Get(pk)
}
func (chr *DatasourceHashRingType) IsHit(datasourceId int64, pk string, currentNode string) bool {
node, err := chr.GetNode(datasourceId, pk)
if err != nil {
logger.Debugf("datasource id:%d pk:%s failed to get node from hashring:%v", datasourceId, pk, err)
return false
}
return node == currentNode
}
func (chr *DatasourceHashRingType) Set(datasourceId int64, r *consistent.Consistent) {
chr.RLock()
defer chr.RUnlock()
chr.Rings[datasourceId] = r
}

View File

@@ -1,151 +0,0 @@
package naming
import (
"fmt"
"sort"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/logger"
)
type Naming struct {
ctx *ctx.Context
heartbeatConfig aconf.HeartbeatConfig
isCenter bool
}
func NewNaming(ctx *ctx.Context, heartbeat aconf.HeartbeatConfig, isCenter bool) *Naming {
naming := &Naming{
ctx: ctx,
heartbeatConfig: heartbeat,
isCenter: isCenter,
}
naming.Heartbeats()
return naming
}
// local servers
var localss map[int64]string
func (n *Naming) Heartbeats() error {
localss = make(map[int64]string)
if err := n.heartbeat(); err != nil {
fmt.Println("failed to heartbeat:", err)
return err
}
go n.loopHeartbeat()
go n.loopDeleteInactiveInstances()
return nil
}
func (n *Naming) loopDeleteInactiveInstances() {
interval := time.Duration(10) * time.Minute
for {
time.Sleep(interval)
n.DeleteInactiveInstances()
}
}
func (n *Naming) DeleteInactiveInstances() {
err := models.DB(n.ctx).Where("clock < ?", time.Now().Unix()-600).Delete(new(models.AlertingEngines)).Error
if err != nil {
logger.Errorf("delete inactive instances err:%v", err)
}
}
func (n *Naming) loopHeartbeat() {
interval := time.Duration(n.heartbeatConfig.Interval) * time.Millisecond
for {
time.Sleep(interval)
if err := n.heartbeat(); err != nil {
logger.Warning(err)
}
}
}
func (n *Naming) heartbeat() error {
var datasourceIds []int64
var err error
// 在页面上维护实例和集群的对应关系
datasourceIds, err = models.GetDatasourceIdsByClusterName(n.ctx, n.heartbeatConfig.ClusterName)
if err != nil {
return err
}
if len(datasourceIds) == 0 {
err := models.AlertingEngineHeartbeatWithCluster(n.ctx, n.heartbeatConfig.Endpoint, n.heartbeatConfig.ClusterName, 0)
if err != nil {
logger.Warningf("heartbeat with cluster %s err:%v", "", err)
}
} else {
for i := 0; i < len(datasourceIds); i++ {
err := models.AlertingEngineHeartbeatWithCluster(n.ctx, n.heartbeatConfig.Endpoint, n.heartbeatConfig.ClusterName, datasourceIds[i])
if err != nil {
logger.Warningf("heartbeat with cluster %d err:%v", datasourceIds[i], err)
}
}
}
for i := 0; i < len(datasourceIds); i++ {
servers, err := n.ActiveServers(datasourceIds[i])
if err != nil {
logger.Warningf("hearbeat %d get active server err:%v", datasourceIds[i], err)
continue
}
sort.Strings(servers)
newss := strings.Join(servers, " ")
oldss, exists := localss[datasourceIds[i]]
if exists && oldss == newss {
continue
}
RebuildConsistentHashRing(datasourceIds[i], servers)
localss[datasourceIds[i]] = newss
}
if n.isCenter {
// 如果是中心节点,还需要处理 host 类型的告警规则host 类型告警规则,和数据源无关,想复用下数据源的 hash ring想用一个虚假的数据源 id 来处理
// if is center node, we need to handle host type alerting rules, host type alerting rules are not related to datasource, we want to reuse the hash ring of datasource, we want to use a fake datasource id to handle it
err := models.AlertingEngineHeartbeatWithCluster(n.ctx, n.heartbeatConfig.Endpoint, n.heartbeatConfig.ClusterName, HostDatasource)
if err != nil {
logger.Warningf("heartbeat with cluster %s err:%v", "", err)
}
servers, err := n.ActiveServers(HostDatasource)
if err != nil {
logger.Warningf("hearbeat %d get active server err:%v", HostDatasource, err)
return nil
}
sort.Strings(servers)
newss := strings.Join(servers, " ")
oldss, exists := localss[HostDatasource]
if exists && oldss == newss {
return nil
}
RebuildConsistentHashRing(HostDatasource, servers)
localss[HostDatasource] = newss
}
return nil
}
func (n *Naming) ActiveServers(datasourceId int64) ([]string, error) {
if datasourceId == -1 {
return nil, fmt.Errorf("cluster is empty")
}
// 30秒内有心跳就认为是活的
return models.AlertingEngineGetsInstances(n.ctx, "datasource_id = ? and clock > ?", datasourceId, time.Now().Unix()-30)
}

View File

@@ -1,74 +0,0 @@
package process
import (
"sync"
"github.com/ccfos/nightingale/v6/models"
)
type AlertCurEventMap struct {
sync.RWMutex
Data map[string]*models.AlertCurEvent
}
func NewAlertCurEventMap(data map[string]*models.AlertCurEvent) *AlertCurEventMap {
if data == nil {
return &AlertCurEventMap{
Data: make(map[string]*models.AlertCurEvent),
}
}
return &AlertCurEventMap{
Data: data,
}
}
func (a *AlertCurEventMap) SetAll(data map[string]*models.AlertCurEvent) {
a.Lock()
defer a.Unlock()
a.Data = data
}
func (a *AlertCurEventMap) Set(key string, value *models.AlertCurEvent) {
a.Lock()
defer a.Unlock()
a.Data[key] = value
}
func (a *AlertCurEventMap) Get(key string) (*models.AlertCurEvent, bool) {
a.RLock()
defer a.RUnlock()
event, exists := a.Data[key]
return event, exists
}
func (a *AlertCurEventMap) UpdateLastEvalTime(key string, lastEvalTime int64) {
a.Lock()
defer a.Unlock()
event, exists := a.Data[key]
if !exists {
return
}
event.LastEvalTime = lastEvalTime
}
func (a *AlertCurEventMap) Delete(key string) {
a.Lock()
defer a.Unlock()
delete(a.Data, key)
}
func (a *AlertCurEventMap) Keys() []string {
a.RLock()
defer a.RUnlock()
keys := make([]string, 0, len(a.Data))
for k := range a.Data {
keys = append(keys, k)
}
return keys
}
func (a *AlertCurEventMap) GetAll() map[string]*models.AlertCurEvent {
a.RLock()
defer a.RUnlock()
return a.Data
}

View File

@@ -1,440 +0,0 @@
package process
import (
"bytes"
"fmt"
"html/template"
"sort"
"strings"
"sync"
"time"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/dispatch"
"github.com/ccfos/nightingale/v6/alert/mute"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type ExternalProcessorsType struct {
ExternalLock sync.RWMutex
Processors map[string]*Processor
}
var ExternalProcessors ExternalProcessorsType
func NewExternalProcessors() *ExternalProcessorsType {
return &ExternalProcessorsType{
Processors: make(map[string]*Processor),
}
}
func (e *ExternalProcessorsType) GetExternalAlertRule(datasourceId, id int64) (*Processor, bool) {
e.ExternalLock.RLock()
defer e.ExternalLock.RUnlock()
processor, has := e.Processors[common.RuleKey(datasourceId, id)]
return processor, has
}
type Processor struct {
datasourceId int64
rule *models.AlertRule
fires *AlertCurEventMap
pendings *AlertCurEventMap
inhibit bool
tagsMap map[string]string
tagsArr []string
target string
targetNote string
groupName string
atertRuleCache *memsto.AlertRuleCacheType
TargetCache *memsto.TargetCacheType
busiGroupCache *memsto.BusiGroupCacheType
alertMuteCache *memsto.AlertMuteCacheType
datasourceCache *memsto.DatasourceCacheType
promClients *prom.PromClientMap
ctx *ctx.Context
stats *astats.Stats
}
func (p *Processor) Key() string {
return common.RuleKey(p.datasourceId, p.rule.Id)
}
func (p *Processor) DatasourceId() int64 {
return p.datasourceId
}
func (p *Processor) Hash() string {
return str.MD5(fmt.Sprintf("%d_%d_%s_%d",
p.rule.Id,
p.rule.PromEvalInterval,
p.rule.RuleConfig,
p.datasourceId,
))
}
func NewProcessor(rule *models.AlertRule, datasourceId int64, atertRuleCache *memsto.AlertRuleCacheType, targetCache *memsto.TargetCacheType,
busiGroupCache *memsto.BusiGroupCacheType, alertMuteCache *memsto.AlertMuteCacheType, datasourceCache *memsto.DatasourceCacheType, promClients *prom.PromClientMap, ctx *ctx.Context,
stats *astats.Stats) *Processor {
p := &Processor{
datasourceId: datasourceId,
rule: rule,
TargetCache: targetCache,
busiGroupCache: busiGroupCache,
alertMuteCache: alertMuteCache,
atertRuleCache: atertRuleCache,
datasourceCache: datasourceCache,
promClients: promClients,
ctx: ctx,
stats: stats,
}
p.mayHandleGroup()
return p
}
func (p *Processor) Handle(anomalyPoints []common.AnomalyPoint, from string, inhibit bool) {
// 有可能rule的一些配置已经发生变化比如告警接收人、callbacks等
// 这些信息的修改是不会引起worker restart的但是确实会影响告警处理逻辑
// 所以这里直接从memsto.AlertRuleCache中获取并覆盖
p.inhibit = inhibit
p.rule = p.atertRuleCache.Get(p.rule.Id)
cachedRule := p.rule
if cachedRule == nil {
logger.Errorf("rule not found %+v", anomalyPoints)
return
}
now := time.Now().Unix()
alertingKeys := map[string]struct{}{}
// 根据 event 的 tag 将 events 分组,处理告警抑制的情况
eventsMap := make(map[string][]*models.AlertCurEvent)
for _, anomalyPoint := range anomalyPoints {
event := p.BuildEvent(anomalyPoint, from, now)
// 如果 event 被 mute 了,本质也是 fire 的状态,这里无论如何都添加到 alertingKeys 中,防止 fire 的事件自动恢复了
hash := event.Hash
alertingKeys[hash] = struct{}{}
if mute.IsMuted(cachedRule, event, p.TargetCache, p.alertMuteCache) {
logger.Debugf("rule_eval:%s event:%v is muted", p.Key(), event)
continue
}
tagHash := TagHash(anomalyPoint)
eventsMap[tagHash] = append(eventsMap[tagHash], event)
}
for _, events := range eventsMap {
p.handleEvent(events)
}
p.HandleRecover(alertingKeys, now)
}
func (p *Processor) BuildEvent(anomalyPoint common.AnomalyPoint, from string, now int64) *models.AlertCurEvent {
p.fillTags(anomalyPoint)
p.mayHandleIdent()
hash := Hash(p.rule.Id, p.datasourceId, anomalyPoint)
ds := p.datasourceCache.GetById(p.datasourceId)
var dsName string
if ds != nil {
dsName = ds.Name
}
event := p.rule.GenerateNewEvent(p.ctx)
event.TriggerTime = anomalyPoint.Timestamp
event.TagsMap = p.tagsMap
event.DatasourceId = p.datasourceId
event.Cluster = dsName
event.Hash = hash
event.TargetIdent = p.target
event.TargetNote = p.targetNote
event.TriggerValue = anomalyPoint.ReadableValue()
event.TagsJSON = p.tagsArr
event.GroupName = p.groupName
event.Tags = strings.Join(p.tagsArr, ",,")
event.IsRecovered = false
event.Callbacks = p.rule.Callbacks
event.CallbacksJSON = p.rule.CallbacksJSON
event.Annotations = p.rule.Annotations
event.AnnotationsJSON = p.rule.AnnotationsJSON
event.RuleConfig = p.rule.RuleConfig
event.RuleConfigJson = p.rule.RuleConfigJson
event.Severity = anomalyPoint.Severity
if from == "inner" {
event.LastEvalTime = now
} else {
event.LastEvalTime = event.TriggerTime
}
return event
}
func (p *Processor) HandleRecover(alertingKeys map[string]struct{}, now int64) {
for _, hash := range p.pendings.Keys() {
if _, has := alertingKeys[hash]; has {
continue
}
p.pendings.Delete(hash)
}
for hash := range p.fires.GetAll() {
if _, has := alertingKeys[hash]; has {
continue
}
p.RecoverSingle(hash, now, nil)
}
}
func (p *Processor) RecoverSingle(hash string, now int64, value *string) {
cachedRule := p.rule
if cachedRule == nil {
return
}
event, has := p.fires.Get(hash)
if !has {
return
}
// 如果配置了留观时长,就不能立马恢复了
if cachedRule.RecoverDuration > 0 && now-event.LastEvalTime < cachedRule.RecoverDuration {
logger.Debugf("rule_eval:%s event:%v not recover", p.Key(), event)
return
}
if value != nil {
event.TriggerValue = *value
}
// 没查到触发阈值的vector姑且就认为这个vector的值恢复了
// 我确实无法分辨是prom中有值但是未满足阈值所以没返回还是prom中确实丢了一些点导致没有数据可以返回尴尬
p.fires.Delete(hash)
p.pendings.Delete(hash)
// 可能是因为调整了promql才恢复的所以事件里边要体现最新的promql否则用户会比较困惑
// 当然其实rule的各个字段都可能发生变化了都更新一下吧
cachedRule.UpdateEvent(event)
event.IsRecovered = true
event.LastEvalTime = now
p.pushEventToQueue(event)
}
func (p *Processor) handleEvent(events []*models.AlertCurEvent) {
var fireEvents []*models.AlertCurEvent
// severity 初始为 4, 一定为遇到比自己优先级高的事件
severity := 4
for _, event := range events {
if event == nil {
continue
}
if p.rule.PromForDuration == 0 {
fireEvents = append(fireEvents, event)
if severity > event.Severity {
severity = event.Severity
}
continue
}
var preTriggerTime int64
preEvent, has := p.pendings.Get(event.Hash)
if has {
p.pendings.UpdateLastEvalTime(event.Hash, event.LastEvalTime)
preTriggerTime = preEvent.TriggerTime
} else {
p.pendings.Set(event.Hash, event)
preTriggerTime = event.TriggerTime
}
if event.LastEvalTime-preTriggerTime+int64(event.PromEvalInterval) >= int64(p.rule.PromForDuration) {
fireEvents = append(fireEvents, event)
if severity > event.Severity {
severity = event.Severity
}
continue
}
}
p.inhibitEvent(fireEvents, severity)
}
func (p *Processor) inhibitEvent(events []*models.AlertCurEvent, highSeverity int) {
for _, event := range events {
if p.inhibit && event.Severity > highSeverity {
logger.Debugf("rule_eval:%s event:%+v inhibit highSeverity:%d", p.Key(), event, highSeverity)
continue
}
p.fireEvent(event)
}
}
func (p *Processor) fireEvent(event *models.AlertCurEvent) {
// As p.rule maybe outdated, use rule from cache
cachedRule := p.rule
if cachedRule == nil {
return
}
logger.Debugf("rule_eval:%s event:%+v fire", p.Key(), event)
if fired, has := p.fires.Get(event.Hash); has {
p.fires.UpdateLastEvalTime(event.Hash, event.LastEvalTime)
if cachedRule.NotifyRepeatStep == 0 {
logger.Debugf("rule_eval:%s event:%+v repeat is zero nothing to do", p.Key(), event)
// 说明不想重复通知那就直接返回了nothing to do
// do not need to send alert again
return
}
// 之前发送过告警了,这次是否要继续发送,要看是否过了通道静默时间
if event.LastEvalTime > fired.LastSentTime+int64(cachedRule.NotifyRepeatStep)*60 {
if cachedRule.NotifyMaxNumber == 0 {
// 最大可以发送次数如果是0表示不想限制最大发送次数一直发即可
event.NotifyCurNumber = fired.NotifyCurNumber + 1
event.FirstTriggerTime = fired.FirstTriggerTime
p.pushEventToQueue(event)
} else {
// 有最大发送次数的限制,就要看已经发了几次了,是否达到了最大发送次数
if fired.NotifyCurNumber >= cachedRule.NotifyMaxNumber {
logger.Debugf("rule_eval:%s event:%+v reach max number", p.Key(), event)
return
} else {
event.NotifyCurNumber = fired.NotifyCurNumber + 1
event.FirstTriggerTime = fired.FirstTriggerTime
p.pushEventToQueue(event)
}
}
}
} else {
event.NotifyCurNumber = 1
event.FirstTriggerTime = event.TriggerTime
p.pushEventToQueue(event)
}
}
func (p *Processor) pushEventToQueue(e *models.AlertCurEvent) {
if !e.IsRecovered {
e.LastSentTime = e.LastEvalTime
p.fires.Set(e.Hash, e)
}
p.stats.CounterAlertsTotal.WithLabelValues(fmt.Sprintf("%d", e.DatasourceId)).Inc()
dispatch.LogEvent(e, "push_queue")
if !queue.EventQueue.PushFront(e) {
logger.Warningf("event_push_queue: queue is full, event:%+v", e)
}
}
func (p *Processor) RecoverAlertCurEventFromDb() {
p.pendings = NewAlertCurEventMap(nil)
curEvents, err := models.AlertCurEventGetByRuleIdAndCluster(p.ctx, p.rule.Id, p.datasourceId)
if err != nil {
logger.Errorf("recover event from db for rule:%s failed, err:%s", p.Key(), err)
p.fires = NewAlertCurEventMap(nil)
return
}
fireMap := make(map[string]*models.AlertCurEvent)
for _, event := range curEvents {
event.DB2Mem()
fireMap[event.Hash] = event
}
p.fires = NewAlertCurEventMap(fireMap)
}
func (p *Processor) fillTags(anomalyPoint common.AnomalyPoint) {
// handle series tags
tagsMap := make(map[string]string)
for label, value := range anomalyPoint.Labels {
tagsMap[string(label)] = string(value)
}
var e = &models.AlertCurEvent{
TagsMap: tagsMap,
}
// handle rule tags
for _, tag := range p.rule.AppendTagsJSON {
arr := strings.SplitN(tag, "=", 2)
var defs = []string{
"{{$labels := .TagsMap}}",
"{{$value := .TriggerValue}}",
}
tagValue := arr[1]
text := strings.Join(append(defs, tagValue), "")
t, err := template.New(fmt.Sprint(p.rule.Id)).Funcs(template.FuncMap(tplx.TemplateFuncMap)).Parse(text)
if err != nil {
tagValue = fmt.Sprintf("parse tag value failed, err:%s", err)
}
var body bytes.Buffer
err = t.Execute(&body, e)
if err != nil {
tagValue = fmt.Sprintf("parse tag value failed, err:%s", err)
}
if err == nil {
tagValue = body.String()
}
tagsMap[arr[0]] = tagValue
}
tagsMap["rulename"] = p.rule.Name
p.tagsMap = tagsMap
// handle tagsArr
p.tagsArr = labelMapToArr(tagsMap)
}
func (p *Processor) mayHandleIdent() {
// handle ident
if ident, has := p.tagsMap["ident"]; has {
if target, exists := p.TargetCache.Get(ident); exists {
p.target = target.Ident
p.targetNote = target.Note
}
}
}
func (p *Processor) mayHandleGroup() {
// handle bg
bg := p.busiGroupCache.GetByBusiGroupId(p.rule.GroupId)
if bg != nil {
p.groupName = bg.Name
}
}
func labelMapToArr(m map[string]string) []string {
numLabels := len(m)
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%s", label, value))
}
if numLabels > 1 {
sort.Strings(labelStrings)
}
return labelStrings
}
func Hash(ruleId, datasourceId int64, vector common.AnomalyPoint) string {
return str.MD5(fmt.Sprintf("%d_%s_%d_%d", ruleId, vector.Labels.String(), datasourceId, vector.Severity))
}
func TagHash(vector common.AnomalyPoint) string {
return str.MD5(vector.Labels.String())
}

View File

@@ -1,18 +0,0 @@
package queue
import (
"time"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/toolkits/pkg/container/list"
)
var EventQueue = list.NewSafeListLimited(10000000)
func ReportQueueSize(stats *astats.Stats) {
for {
time.Sleep(time.Second)
stats.GaugeAlertQueueSize.Set(float64(EventQueue.Len()))
}
}

View File

@@ -1,102 +0,0 @@
package record
import (
"context"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type RecordRuleContext struct {
datasourceId int64
quit chan struct{}
rule *models.RecordingRule
// writers *writer.WritersType
promClients *prom.PromClientMap
}
func NewRecordRuleContext(rule *models.RecordingRule, datasourceId int64, promClients *prom.PromClientMap, writers *writer.WritersType) *RecordRuleContext {
return &RecordRuleContext{
datasourceId: datasourceId,
quit: make(chan struct{}),
rule: rule,
promClients: promClients,
//writers: writers,
}
}
func (rrc *RecordRuleContext) Key() string {
return fmt.Sprintf("record-%d-%d", rrc.datasourceId, rrc.rule.Id)
}
func (rrc *RecordRuleContext) Hash() string {
return str.MD5(fmt.Sprintf("%d_%d_%s_%d",
rrc.rule.Id,
rrc.rule.PromEvalInterval,
rrc.rule.PromQl,
rrc.datasourceId,
))
}
func (rrc *RecordRuleContext) Prepare() {}
func (rrc *RecordRuleContext) Start() {
logger.Infof("eval:%s started", rrc.Key())
interval := rrc.rule.PromEvalInterval
if interval <= 0 {
interval = 10
}
go func() {
for {
select {
case <-rrc.quit:
return
default:
rrc.Eval()
time.Sleep(time.Duration(interval) * time.Second)
}
}
}()
}
func (rrc *RecordRuleContext) Eval() {
promql := strings.TrimSpace(rrc.rule.PromQl)
if promql == "" {
logger.Errorf("eval:%s promql is blank", rrc.Key())
return
}
if rrc.promClients.IsNil(rrc.datasourceId) {
logger.Errorf("eval:%s reader client is nil", rrc.Key())
return
}
value, warnings, err := rrc.promClients.GetCli(rrc.datasourceId).Query(context.Background(), promql, time.Now())
if err != nil {
logger.Errorf("eval:%s promql:%s, error:%v", rrc.Key(), promql, err)
return
}
if len(warnings) > 0 {
logger.Errorf("eval:%s promql:%s, warnings:%v", rrc.Key(), promql, warnings)
return
}
ts := ConvertToTimeSeries(value, rrc.rule)
if len(ts) != 0 {
rrc.promClients.GetWriterCli(rrc.datasourceId).Write(ts)
}
}
func (rrc *RecordRuleContext) Stop() {
logger.Infof("%s stopped", rrc.Key())
close(rrc.quit)
}

View File

@@ -1,94 +0,0 @@
package record
import (
"context"
"fmt"
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/writer"
)
type Scheduler struct {
// key: hash
recordRules map[string]*RecordRuleContext
aconf aconf.Alert
recordingRuleCache *memsto.RecordingRuleCacheType
promClients *prom.PromClientMap
writers *writer.WritersType
stats *astats.Stats
}
func NewScheduler(aconf aconf.Alert, rrc *memsto.RecordingRuleCacheType, promClients *prom.PromClientMap, writers *writer.WritersType, stats *astats.Stats) *Scheduler {
scheduler := &Scheduler{
aconf: aconf,
recordRules: make(map[string]*RecordRuleContext),
recordingRuleCache: rrc,
promClients: promClients,
writers: writers,
stats: stats,
}
go scheduler.LoopSyncRules(context.Background())
return scheduler
}
func (s *Scheduler) LoopSyncRules(ctx context.Context) {
time.Sleep(time.Duration(s.aconf.EngineDelay) * time.Second)
duration := 9000 * time.Millisecond
for {
select {
case <-ctx.Done():
return
case <-time.After(duration):
s.syncRecordRules()
}
}
}
func (s *Scheduler) syncRecordRules() {
ids := s.recordingRuleCache.GetRuleIds()
recordRules := make(map[string]*RecordRuleContext)
for _, id := range ids {
rule := s.recordingRuleCache.Get(id)
if rule == nil {
continue
}
datasourceIds := s.promClients.Hit(rule.DatasourceIdsJson)
for _, dsId := range datasourceIds {
if !naming.DatasourceHashRing.IsHit(dsId, fmt.Sprintf("%d", rule.Id), s.aconf.Heartbeat.Endpoint) {
continue
}
recordRule := NewRecordRuleContext(rule, dsId, s.promClients, s.writers)
recordRules[recordRule.Hash()] = recordRule
}
}
for hash, rule := range recordRules {
if _, has := s.recordRules[hash]; !has {
rule.Prepare()
rule.Start()
s.recordRules[hash] = rule
}
}
for hash, rule := range s.recordRules {
if _, has := recordRules[hash]; !has {
rule.Stop()
delete(s.recordRules, hash)
}
}
}

View File

@@ -1,78 +0,0 @@
package router
import (
"net/http"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/gin-gonic/gin"
)
type Router struct {
HTTP httpx.Config
Alert aconf.Alert
AlertMuteCache *memsto.AlertMuteCacheType
TargetCache *memsto.TargetCacheType
BusiGroupCache *memsto.BusiGroupCacheType
AlertStats *astats.Stats
Ctx *ctx.Context
ExternalProcessors *process.ExternalProcessorsType
}
func New(httpConfig httpx.Config, alert aconf.Alert, amc *memsto.AlertMuteCacheType, tc *memsto.TargetCacheType, bgc *memsto.BusiGroupCacheType,
astats *astats.Stats, ctx *ctx.Context, externalProcessors *process.ExternalProcessorsType) *Router {
return &Router{
HTTP: httpConfig,
Alert: alert,
AlertStats: astats,
AlertMuteCache: amc,
TargetCache: tc,
BusiGroupCache: bgc,
Ctx: ctx,
ExternalProcessors: externalProcessors,
}
}
func (rt *Router) Config(r *gin.Engine) {
if !rt.HTTP.Alert.Enable {
return
}
service := r.Group("/v1/n9e")
if len(rt.HTTP.Alert.BasicAuth) > 0 {
service.Use(gin.BasicAuth(rt.HTTP.Alert.BasicAuth))
}
service.POST("/event", rt.pushEventToQueue)
service.POST("/make-event", rt.makeEvent)
}
func Render(c *gin.Context, data, msg interface{}) {
if msg == nil {
if data == nil {
data = struct{}{}
}
c.JSON(http.StatusOK, gin.H{"data": data, "error": ""})
} else {
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": msg}})
}
}
func Dangerous(c *gin.Context, v interface{}, code ...int) {
if v == nil {
return
}
switch t := v.(type) {
case string:
if t != "" {
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": v}})
}
case error:
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": t.Error()}})
}
}

View File

@@ -1,141 +0,0 @@
package router
import (
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/dispatch"
"github.com/ccfos/nightingale/v6/alert/mute"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
func (rt *Router) pushEventToQueue(c *gin.Context) {
var event *models.AlertCurEvent
ginx.BindJSON(c, &event)
if event.RuleId == 0 {
ginx.Bomb(200, "event is illegal")
}
event.TagsMap = make(map[string]string)
for i := 0; i < len(event.TagsJSON); i++ {
pair := strings.TrimSpace(event.TagsJSON[i])
if pair == "" {
continue
}
arr := strings.Split(pair, "=")
if len(arr) != 2 {
continue
}
event.TagsMap[arr[0]] = arr[1]
}
if mute.EventMuteStrategy(event, rt.AlertMuteCache) {
logger.Infof("event_muted: rule_id=%d %s", event.RuleId, event.Hash)
ginx.NewRender(c).Message(nil)
return
}
if err := event.ParseRule("rule_name"); err != nil {
event.RuleName = fmt.Sprintf("failed to parse rule name: %v", err)
}
if err := event.ParseRule("rule_note"); err != nil {
event.RuleNote = fmt.Sprintf("failed to parse rule note: %v", err)
}
if err := event.ParseRule("annotations"); err != nil {
event.RuleNote = fmt.Sprintf("failed to parse rule note: %v", err)
}
// 如果 rule_note 中有 ; 前缀,则使用 rule_note 替换 tags 中的内容
if strings.HasPrefix(event.RuleNote, ";") {
event.RuleNote = strings.TrimPrefix(event.RuleNote, ";")
event.Tags = strings.ReplaceAll(event.RuleNote, " ", ",,")
event.TagsJSON = strings.Split(event.Tags, ",,")
} else {
event.Tags = strings.Join(event.TagsJSON, ",,")
}
event.Callbacks = strings.Join(event.CallbacksJSON, " ")
event.NotifyChannels = strings.Join(event.NotifyChannelsJSON, " ")
event.NotifyGroups = strings.Join(event.NotifyGroupsJSON, " ")
rt.AlertStats.CounterAlertsTotal.WithLabelValues(event.Cluster).Inc()
dispatch.LogEvent(event, "http_push_queue")
if !queue.EventQueue.PushFront(event) {
msg := fmt.Sprintf("event:%+v push_queue err: queue is full", event)
ginx.Bomb(200, msg)
logger.Warningf(msg)
}
ginx.NewRender(c).Message(nil)
}
type eventForm struct {
Alert bool `json:"alert"`
AnomalyPoints []common.AnomalyPoint `json:"vectors"`
RuleId int64 `json:"rule_id"`
DatasourceId int64 `json:"datasource_id"`
Inhibit bool `json:"inhibit"`
}
func (rt *Router) makeEvent(c *gin.Context) {
var events []*eventForm
ginx.BindJSON(c, &events)
//now := time.Now().Unix()
for i := 0; i < len(events); i++ {
node, err := naming.DatasourceHashRing.GetNode(events[i].DatasourceId, fmt.Sprintf("%d", events[i].RuleId))
if err != nil {
logger.Warningf("event:%+v get node err:%v", events[i], err)
ginx.Bomb(200, "event node not exists")
}
if node != rt.Alert.Heartbeat.Endpoint {
err := forwardEvent(events[i], node)
if err != nil {
logger.Warningf("event:%+v forward err:%v", events[i], err)
ginx.Bomb(200, "event forward error")
}
continue
}
ruleWorker, exists := rt.ExternalProcessors.GetExternalAlertRule(events[i].DatasourceId, events[i].RuleId)
logger.Debugf("handle event:%+v exists:%v", events[i], exists)
if !exists {
ginx.Bomb(200, "rule not exists")
}
if events[i].Alert {
go ruleWorker.Handle(events[i].AnomalyPoints, "http", events[i].Inhibit)
} else {
for _, vector := range events[i].AnomalyPoints {
readableString := vector.ReadableValue()
go ruleWorker.RecoverSingle(process.Hash(events[i].RuleId, events[i].DatasourceId, vector), vector.Timestamp, &readableString)
}
}
}
ginx.NewRender(c).Message(nil)
}
// event 不归本实例处理,转发给对应的实例
func forwardEvent(event *eventForm, instance string) error {
ur := fmt.Sprintf("http://%s/v1/n9e/make-event", instance)
res, code, err := poster.PostJSON(ur, time.Second*5, []*eventForm{event}, 3)
if err != nil {
return err
}
logger.Infof("forward event: result=succ url=%s code=%d event:%v response=%s", ur, code, event, string(res))
return nil
}

View File

@@ -1,100 +0,0 @@
package sender
import (
"html/template"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
type dingtalkMarkdown struct {
Title string `json:"title"`
Text string `json:"text"`
}
type dingtalkAt struct {
AtMobiles []string `json:"atMobiles"`
IsAtAll bool `json:"isAtAll"`
}
type dingtalk struct {
Msgtype string `json:"msgtype"`
Markdown dingtalkMarkdown `json:"markdown"`
At dingtalkAt `json:"at"`
}
type DingtalkSender struct {
tpl *template.Template
}
func (ds *DingtalkSender) Send(ctx MessageContext) {
if len(ctx.Users) == 0 || ctx.Rule == nil || ctx.Event == nil {
return
}
urls, ats := ds.extract(ctx.Users)
if len(urls) == 0 {
return
}
message := BuildTplMessage(ds.tpl, ctx.Event)
for _, url := range urls {
var body dingtalk
// NoAt in url
if strings.Contains(url, "noat=1") {
body = dingtalk{
Msgtype: "markdown",
Markdown: dingtalkMarkdown{
Title: ctx.Rule.Name,
Text: message,
},
}
} else {
body = dingtalk{
Msgtype: "markdown",
Markdown: dingtalkMarkdown{
Title: ctx.Rule.Name,
Text: message + " " + strings.Join(ats, " "),
},
At: dingtalkAt{
AtMobiles: ats,
IsAtAll: false,
},
}
}
ds.doSend(url, body)
}
}
// extract urls and ats from Users
func (ds *DingtalkSender) extract(users []*models.User) ([]string, []string) {
urls := make([]string, 0, len(users))
ats := make([]string, 0, len(users))
for _, user := range users {
if user.Phone != "" {
ats = append(ats, "@"+user.Phone)
}
if token, has := user.ExtractToken(models.Dingtalk); has {
url := token
if !strings.HasPrefix(token, "https://") {
url = "https://oapi.dingtalk.com/robot/send?access_token=" + token
}
urls = append(urls, url)
}
}
return urls, ats
}
func (ds *DingtalkSender) doSend(url string, body dingtalk) {
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
if err != nil {
logger.Errorf("dingtalk_sender: result=fail url=%s code=%d error=%v response=%s", url, code, err, string(res))
} else {
logger.Infof("dingtalk_sender: result=succ url=%s code=%d response=%s", url, code, string(res))
}
}

View File

@@ -1,82 +0,0 @@
package sender
import (
"html/template"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
type feishuContent struct {
Text string `json:"text"`
}
type feishuAt struct {
AtMobiles []string `json:"atMobiles"`
IsAtAll bool `json:"isAtAll"`
}
type feishu struct {
Msgtype string `json:"msg_type"`
Content feishuContent `json:"content"`
At feishuAt `json:"at"`
}
type FeishuSender struct {
tpl *template.Template
}
func (fs *FeishuSender) Send(ctx MessageContext) {
if len(ctx.Users) == 0 || ctx.Rule == nil || ctx.Event == nil {
return
}
urls, ats := fs.extract(ctx.Users)
message := BuildTplMessage(fs.tpl, ctx.Event)
for _, url := range urls {
body := feishu{
Msgtype: "text",
Content: feishuContent{
Text: message,
},
}
if !strings.Contains(url, "noat=1") {
body.At = feishuAt{
AtMobiles: ats,
IsAtAll: false,
}
}
fs.doSend(url, body)
}
}
func (fs *FeishuSender) extract(users []*models.User) ([]string, []string) {
urls := make([]string, 0, len(users))
ats := make([]string, 0, len(users))
for _, user := range users {
if user.Phone != "" {
ats = append(ats, user.Phone)
}
if token, has := user.ExtractToken(models.Feishu); has {
url := token
if !strings.HasPrefix(token, "https://") {
url = "https://open.feishu.cn/open-apis/bot/v2/hook/" + token
}
urls = append(urls, url)
}
}
return urls, ats
}
func (fs *FeishuSender) doSend(url string, body feishu) {
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
if err != nil {
logger.Errorf("feishu_sender: result=fail url=%s code=%d error=%v response=%s", url, code, err, string(res))
} else {
logger.Infof("feishu_sender: result=succ url=%s code=%d response=%s", url, code, string(res))
}
}

View File

@@ -1,106 +0,0 @@
package sender
import (
"html/template"
"net/url"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
type MatterMostMessage struct {
Text string
Tokens []string
}
type mm struct {
Channel string `json:"channel"`
Username string `json:"username"`
Text string `json:"text"`
}
type MmSender struct {
tpl *template.Template
}
func (ms *MmSender) Send(ctx MessageContext) {
if len(ctx.Users) == 0 || ctx.Rule == nil || ctx.Event == nil {
return
}
urls := ms.extract(ctx.Users)
if len(urls) == 0 {
return
}
message := BuildTplMessage(ms.tpl, ctx.Event)
SendMM(MatterMostMessage{
Text: message,
Tokens: urls,
})
}
func (ms *MmSender) extract(users []*models.User) []string {
tokens := make([]string, 0, len(users))
for _, user := range users {
if token, has := user.ExtractToken(models.Mm); has {
tokens = append(tokens, token)
}
}
return tokens
}
func SendMM(message MatterMostMessage) {
for i := 0; i < len(message.Tokens); i++ {
u, err := url.Parse(message.Tokens[i])
if err != nil {
logger.Errorf("mm_sender: failed to parse error=%v", err)
}
v, err := url.ParseQuery(u.RawQuery)
if err != nil {
logger.Errorf("mm_sender: failed to parse query error=%v", err)
}
channels := v["channel"] // do not get
txt := ""
atuser := v["atuser"]
if len(atuser) != 0 {
txt = strings.Join(MapStrToStr(atuser, func(u string) string {
return "@" + u
}), ",") + "\n"
}
username := v.Get("username")
if err != nil {
logger.Errorf("mm_sender: failed to parse error=%v", err)
}
// simple concatenating
ur := u.Scheme + "://" + u.Host + u.Path
for _, channel := range channels {
body := mm{
Channel: channel,
Username: username,
Text: txt + message.Text,
}
res, code, err := poster.PostJSON(ur, time.Second*5, body, 3)
if err != nil {
logger.Errorf("mm_sender: result=fail url=%s code=%d error=%v response=%s", ur, code, err, string(res))
} else {
logger.Infof("mm_sender: result=succ url=%s code=%d response=%s", ur, code, string(res))
}
}
}
}
func MapStrToStr(arr []string, fn func(s string) string) []string {
var newArray = []string{}
for _, it := range arr {
newArray = append(newArray, fn(it))
}
return newArray
}

View File

@@ -1,97 +0,0 @@
package sender
import (
"bytes"
"os"
"os/exec"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/sys"
)
func MayPluginNotify(noticeBytes []byte, notifyScript models.NotifyScript) {
if len(noticeBytes) == 0 {
return
}
alertingCallScript(noticeBytes, notifyScript)
}
func alertingCallScript(stdinBytes []byte, notifyScript models.NotifyScript) {
// not enable or no notify.py? do nothing
config := notifyScript
if !config.Enable || config.Content == "" {
return
}
fpath := ".notify_scriptt"
if config.Type == 1 {
fpath = config.Content
} else {
rewrite := true
if file.IsExist(fpath) {
oldContent, err := file.ToString(fpath)
if err != nil {
logger.Errorf("event_notify: read script file err: %v", err)
return
}
if oldContent == config.Content {
rewrite = false
}
}
if rewrite {
_, err := file.WriteString(fpath, config.Content)
if err != nil {
logger.Errorf("event_notify: write script file err: %v", err)
return
}
err = os.Chmod(fpath, 0777)
if err != nil {
logger.Errorf("event_notify: chmod script file err: %v", err)
return
}
}
fpath = "./" + fpath
}
cmd := exec.Command(fpath)
cmd.Stdin = bytes.NewReader(stdinBytes)
// combine stdout and stderr
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
err := startCmd(cmd)
if err != nil {
logger.Errorf("event_notify: run cmd err: %v", err)
return
}
err, isTimeout := sys.WrapTimeout(cmd, time.Duration(config.Timeout)*time.Second)
if isTimeout {
if err == nil {
logger.Errorf("event_notify: timeout and killed process %s", fpath)
}
if err != nil {
logger.Errorf("event_notify: kill process %s occur error %v", fpath, err)
}
return
}
if err != nil {
logger.Errorf("event_notify: exec script %s occur error: %v, output: %s", fpath, err, buf.String())
return
}
logger.Infof("event_notify: exec %s output: %s", fpath, buf.String())
}

View File

@@ -1,62 +0,0 @@
package sender
import (
"bytes"
"html/template"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
)
type (
// Sender 发送消息通知的接口
Sender interface {
Send(ctx MessageContext)
}
// MessageContext 一个event所生成的告警通知的上下文
MessageContext struct {
Users []*models.User
Rule *models.AlertRule
Event *models.AlertCurEvent
}
)
func NewSender(key string, tpls map[string]*template.Template, smtp aconf.SMTPConfig) Sender {
switch key {
case models.Dingtalk:
return &DingtalkSender{tpl: tpls[models.Dingtalk]}
case models.Wecom:
return &WecomSender{tpl: tpls[models.Wecom]}
case models.Feishu:
return &FeishuSender{tpl: tpls[models.Feishu]}
case models.Email:
return &EmailSender{subjectTpl: tpls["mailsubject"], contentTpl: tpls[models.Email], smtp: smtp}
case models.Mm:
return &MmSender{tpl: tpls[models.Mm]}
case models.Telegram:
return &TelegramSender{tpl: tpls[models.Telegram]}
}
return nil
}
func BuildMessageContext(rule *models.AlertRule, event *models.AlertCurEvent, uids []int64, userCache *memsto.UserCacheType) MessageContext {
users := userCache.GetByUserIds(uids)
return MessageContext{
Rule: rule,
Event: event,
Users: users,
}
}
func BuildTplMessage(tpl *template.Template, event *models.AlertCurEvent) string {
if tpl == nil {
return "tpl for current sender not found, please check configuration"
}
var body bytes.Buffer
if err := tpl.Execute(&body, event); err != nil {
return err.Error()
}
return body.String()
}

View File

@@ -1,82 +0,0 @@
package sender
import (
"html/template"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
type TelegramMessage struct {
Text string
Tokens []string
}
type telegram struct {
ParseMode string `json:"parse_mode"`
Text string `json:"text"`
}
type TelegramSender struct {
tpl *template.Template
}
func (ts *TelegramSender) Send(ctx MessageContext) {
if len(ctx.Users) == 0 || ctx.Rule == nil || ctx.Event == nil {
return
}
tokens := ts.extract(ctx.Users)
message := BuildTplMessage(ts.tpl, ctx.Event)
SendTelegram(TelegramMessage{
Text: message,
Tokens: tokens,
})
}
func (ts *TelegramSender) extract(users []*models.User) []string {
tokens := make([]string, 0, len(users))
for _, user := range users {
if token, has := user.ExtractToken(models.Telegram); has {
tokens = append(tokens, token)
}
}
return tokens
}
func SendTelegram(message TelegramMessage) {
for i := 0; i < len(message.Tokens); i++ {
if !strings.Contains(message.Tokens[i], "/") && !strings.HasPrefix(message.Tokens[i], "https://") {
logger.Errorf("telegram_sender: result=fail invalid token=%s", message.Tokens[i])
continue
}
var url string
if strings.HasPrefix(message.Tokens[i], "https://") {
url = message.Tokens[i]
} else {
array := strings.Split(message.Tokens[i], "/")
if len(array) != 2 {
logger.Errorf("telegram_sender: result=fail invalid token=%s", message.Tokens[i])
continue
}
botToken := array[0]
chatId := array[1]
url = "https://api.telegram.org/bot" + botToken + "/sendMessage?chat_id=" + chatId
}
body := telegram{
ParseMode: "markdown",
Text: message.Text,
}
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
if err != nil {
logger.Errorf("telegram_sender: result=fail url=%s code=%d error=%v response=%s", url, code, err, string(res))
} else {
logger.Infof("telegram_sender: result=succ url=%s code=%d response=%s", url, code, string(res))
}
}
}

View File

@@ -1,68 +0,0 @@
package sender
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/toolkits/pkg/logger"
)
func SendWebhooks(webhooks []*models.Webhook, event *models.AlertCurEvent) {
for _, conf := range webhooks {
if conf.Url == "" || !conf.Enable {
continue
}
bs, err := json.Marshal(event)
if err != nil {
continue
}
bf := bytes.NewBuffer(bs)
req, err := http.NewRequest("POST", conf.Url, bf)
if err != nil {
logger.Warning("alertingWebhook failed to new request", err)
continue
}
req.Header.Set("Content-Type", "application/json")
if conf.BasicAuthUser != "" && conf.BasicAuthPass != "" {
req.SetBasicAuth(conf.BasicAuthUser, conf.BasicAuthPass)
}
if len(conf.Headers) > 0 && len(conf.Headers)%2 == 0 {
for i := 0; i < len(conf.Headers); i += 2 {
if conf.Headers[i] == "host" {
req.Host = conf.Headers[i+1]
continue
}
req.Header.Set(conf.Headers[i], conf.Headers[i+1])
}
}
// todo add skip verify
client := http.Client{
Timeout: time.Duration(conf.Timeout) * time.Second,
}
var resp *http.Response
resp, err = client.Do(req)
if err != nil {
logger.Warningf("WebhookCallError, ruleId: [%d], eventId: [%d], url: [%s], error: [%s]", event.RuleId, event.Id, conf.Url, err)
continue
}
var body []byte
if resp.Body != nil {
defer resp.Body.Close()
body, _ = ioutil.ReadAll(resp.Body)
}
logger.Debugf("alertingWebhook done, url: %s, response code: %d, body: %s", conf.Url, resp.StatusCode, string(body))
}
}

View File

@@ -1,65 +0,0 @@
package sender
import (
"html/template"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
type wecomMarkdown struct {
Content string `json:"content"`
}
type wecom struct {
Msgtype string `json:"msgtype"`
Markdown wecomMarkdown `json:"markdown"`
}
type WecomSender struct {
tpl *template.Template
}
func (ws *WecomSender) Send(ctx MessageContext) {
if len(ctx.Users) == 0 || ctx.Rule == nil || ctx.Event == nil {
return
}
urls := ws.extract(ctx.Users)
message := BuildTplMessage(ws.tpl, ctx.Event)
for _, url := range urls {
body := wecom{
Msgtype: "markdown",
Markdown: wecomMarkdown{
Content: message,
},
}
ws.doSend(url, body)
}
}
func (ws *WecomSender) extract(users []*models.User) []string {
urls := make([]string, 0, len(users))
for _, user := range users {
if token, has := user.ExtractToken(models.Wecom); has {
url := token
if !strings.HasPrefix(token, "https://") {
url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=" + token
}
urls = append(urls, url)
}
}
return urls
}
func (ws *WecomSender) doSend(url string, body wecom) {
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
if err != nil {
logger.Errorf("wecom_sender: result=fail url=%s code=%d error=%v response=%s", url, code, err, string(res))
} else {
logger.Infof("wecom_sender: result=succ url=%s code=%d response=%s", url, code, string(res))
}
}

View File

@@ -1,35 +0,0 @@
package cconf
import (
"github.com/gin-gonic/gin"
)
type Center struct {
Plugins []Plugin
BasicAuth gin.Accounts
MetricsYamlFile string
OpsYamlFile string
BuiltinIntegrationsDir string
I18NHeaderKey string
MetricDesc MetricDescType
TargetMetrics map[string]string
AnonymousAccess AnonymousAccess
}
type Plugin struct {
Id int64 `json:"id"`
Category string `json:"category"`
Type string `json:"plugin_type"`
TypeName string `json:"plugin_type_name"`
}
type AnonymousAccess struct {
PromQuerier bool
AlertDetail bool
}
func (c *Center) PreCheck() {
if len(c.Plugins) == 0 {
c.Plugins = Plugins
}
}

View File

@@ -1,60 +0,0 @@
package cconf
const EVENT_EXAMPLE = `
{
"id": 1000000,
"cate": "prometheus",
"datasource_id": 1,
"group_id": 1,
"group_name": "Default Busi Group",
"hash": "2cb966f9ba1cdc7af94c3796e855955a",
"rule_id": 23,
"rule_name": "测试告警",
"rule_note": "测试告警",
"rule_prod": "metric",
"rule_config": {
"queries": [
{
"key": "all_hosts",
"op": "==",
"values": []
}
],
"triggers": [
{
"duration": 3,
"percent": 10,
"severity": 3,
"type": "pct_target_miss"
}
]
},
"prom_for_duration": 60,
"prom_eval_interval": 30,
"callbacks": ["https://n9e.github.io"],
"notify_recovered": 1,
"notify_channels": ["dingtalk"],
"notify_groups": [],
"notify_groups_obj": null,
"target_ident": "host01",
"target_note": "机器备注",
"trigger_time": 1677229517,
"trigger_value": "2273533952",
"tags": [
"__name__=disk_free",
"dc=qcloud-dev",
"device=vda1",
"fstype=ext4",
"ident=tt-fc-dev00.nj"
],
"is_recovered": false,
"notify_users_obj": null,
"last_eval_time": 1677229517,
"last_sent_time": 1677229517,
"notify_cur_number": 1,
"first_trigger_time": 1677229517,
"annotations": {
"summary": "测试告警"
}
}
`

View File

@@ -1,39 +0,0 @@
package cconf
import (
"path"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/runner"
)
var Operations = Operation{}
type Operation struct {
Ops []Ops `yaml:"ops"`
}
type Ops struct {
Name string `yaml:"name" json:"name"`
Cname string `yaml:"cname" json:"cname"`
Ops []string `yaml:"ops" json:"ops"`
}
func LoadOpsYaml(opsYamlFile string) error {
fp := opsYamlFile
if fp == "" {
fp = path.Join(runner.Cwd, "etc", "ops.yaml")
}
if !file.IsExist(fp) {
return nil
}
return file.ReadYaml(fp, &Operations)
}
func GetAllOps(ops []Ops) []string {
var ret []string
for _, op := range ops {
ret = append(ret, op.Ops...)
}
return ret
}

View File

@@ -1,22 +0,0 @@
package cconf
var Plugins = []Plugin{
{
Id: 1,
Category: "timeseries",
Type: "prometheus",
TypeName: "Prometheus Like",
},
{
Id: 2,
Category: "logging",
Type: "elasticsearch",
TypeName: "Elasticsearch",
},
{
Id: 3,
Category: "logging",
Type: "jaeger",
TypeName: "Jaeger",
},
}

View File

@@ -1,96 +0,0 @@
package center
import (
"context"
"fmt"
"github.com/ccfos/nightingale/v6/alert"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/center/metas"
"github.com/ccfos/nightingale/v6/center/sso"
"github.com/ccfos/nightingale/v6/conf"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/pkg/i18nx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/idents"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/ccfos/nightingale/v6/storage"
alertrt "github.com/ccfos/nightingale/v6/alert/router"
centerrt "github.com/ccfos/nightingale/v6/center/router"
pushgwrt "github.com/ccfos/nightingale/v6/pushgw/router"
)
func Initialize(configDir string, cryptoKey string) (func(), error) {
config, err := conf.InitConfig(configDir, cryptoKey)
if err != nil {
return nil, fmt.Errorf("failed to init config: %v", err)
}
cconf.LoadMetricsYaml(config.Center.MetricsYamlFile)
cconf.LoadOpsYaml(config.Center.OpsYamlFile)
logxClean, err := logx.Init(config.Log)
if err != nil {
return nil, err
}
i18nx.Init()
db, err := storage.New(config.DB)
if err != nil {
return nil, err
}
ctx := ctx.NewContext(context.Background(), db)
models.InitRoot(ctx)
redis, err := storage.NewRedis(config.Redis)
if err != nil {
return nil, err
}
metas := metas.New(redis)
idents := idents.New(db)
syncStats := memsto.NewSyncStats()
alertStats := astats.NewSyncStats()
sso := sso.Init(config.Center, ctx)
busiGroupCache := memsto.NewBusiGroupCache(ctx, syncStats)
targetCache := memsto.NewTargetCache(ctx, syncStats, redis)
dsCache := memsto.NewDatasourceCache(ctx, syncStats)
alertMuteCache := memsto.NewAlertMuteCache(ctx, syncStats)
alertRuleCache := memsto.NewAlertRuleCache(ctx, syncStats)
notifyConfigCache := memsto.NewNotifyConfigCache(ctx)
promClients := prom.NewPromClient(ctx, config.Alert.Heartbeat)
externalProcessors := process.NewExternalProcessors()
alert.Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, dsCache, ctx, promClients, true)
writers := writer.NewWriters(config.Pushgw)
alertrtRouter := alertrt.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors)
centerRouter := centerrt.New(config.HTTP, config.Center, cconf.Operations, dsCache, notifyConfigCache, promClients, redis, sso, ctx, metas)
pushgwRouter := pushgwrt.New(config.HTTP, config.Pushgw, targetCache, busiGroupCache, idents, writers, ctx)
r := httpx.GinEngine(config.Global.RunMode, config.HTTP)
centerRouter.Config(r)
alertrtRouter.Config(r)
pushgwRouter.Config(r)
httpClean := httpx.Init(config.HTTP, r)
return func() {
logxClean()
httpClean()
}, nil
}

View File

@@ -1,105 +0,0 @@
package metas
import (
"context"
"sync"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/storage"
"github.com/toolkits/pkg/logger"
)
type Set struct {
sync.RWMutex
items map[string]models.HostMeta
redis storage.Redis
}
func New(redis storage.Redis) *Set {
set := &Set{
items: make(map[string]models.HostMeta),
redis: redis,
}
set.Init()
return set
}
func (s *Set) Init() {
go s.LoopPersist()
}
func (s *Set) MSet(items map[string]models.HostMeta) {
s.Lock()
defer s.Unlock()
for ident, meta := range items {
s.items[ident] = meta
}
}
func (s *Set) Set(ident string, meta models.HostMeta) {
s.Lock()
defer s.Unlock()
s.items[ident] = meta
}
func (s *Set) LoopPersist() {
for {
time.Sleep(time.Second)
s.persist()
}
}
func (s *Set) persist() {
var items map[string]models.HostMeta
s.Lock()
if len(s.items) == 0 {
s.Unlock()
return
}
items = s.items
s.items = make(map[string]models.HostMeta)
s.Unlock()
s.updateMeta(items)
}
func (s *Set) updateMeta(items map[string]models.HostMeta) {
m := make(map[string]models.HostMeta, 100)
num := 0
for _, meta := range items {
m[meta.Hostname] = meta
num++
if num == 100 {
if err := s.updateTargets(m); err != nil {
logger.Errorf("failed to update targets: %v", err)
}
m = make(map[string]models.HostMeta, 100)
num = 0
}
}
if err := s.updateTargets(m); err != nil {
logger.Errorf("failed to update targets: %v", err)
}
}
func (s *Set) updateTargets(m map[string]models.HostMeta) error {
count := int64(len(m))
if count == 0 {
return nil
}
var values []interface{}
for ident, meta := range m {
values = append(values, models.WrapIdent(ident))
values = append(values, meta)
}
err := s.redis.MSet(context.Background(), values...).Err()
return err
}

View File

@@ -1,395 +0,0 @@
package router
import (
"fmt"
"net/http"
"path"
"strings"
"time"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/center/cstats"
"github.com/ccfos/nightingale/v6/center/metas"
"github.com/ccfos/nightingale/v6/center/sso"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/pkg/aop"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/storage"
"github.com/toolkits/pkg/ginx"
"github.com/gin-gonic/gin"
)
type Router struct {
HTTP httpx.Config
Center cconf.Center
Operations cconf.Operation
DatasourceCache *memsto.DatasourceCacheType
NotifyConfigCache *memsto.NotifyConfigCacheType
PromClients *prom.PromClientMap
Redis storage.Redis
MetaSet *metas.Set
Sso *sso.SsoClient
Ctx *ctx.Context
}
func New(httpConfig httpx.Config, center cconf.Center, operations cconf.Operation, ds *memsto.DatasourceCacheType, ncc *memsto.NotifyConfigCacheType,
pc *prom.PromClientMap, redis storage.Redis, sso *sso.SsoClient, ctx *ctx.Context, metaSet *metas.Set) *Router {
return &Router{
HTTP: httpConfig,
Center: center,
Operations: operations,
DatasourceCache: ds,
NotifyConfigCache: ncc,
PromClients: pc,
Redis: redis,
Sso: sso,
Ctx: ctx,
MetaSet: metaSet,
}
}
func stat() gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
c.Next()
code := fmt.Sprintf("%d", c.Writer.Status())
method := c.Request.Method
labels := []string{cstats.Service, code, c.FullPath(), method}
cstats.RequestCounter.WithLabelValues(labels...).Inc()
cstats.RequestDuration.WithLabelValues(labels...).Observe(float64(time.Since(start).Seconds()))
}
}
func languageDetector(i18NHeaderKey string) gin.HandlerFunc {
headerKey := i18NHeaderKey
return func(c *gin.Context) {
if headerKey != "" {
lang := c.GetHeader(headerKey)
if lang != "" {
if strings.HasPrefix(lang, "zh") {
c.Request.Header.Set("X-Language", "zh")
} else if strings.HasPrefix(lang, "en") {
c.Request.Header.Set("X-Language", "en")
} else {
c.Request.Header.Set("X-Language", lang)
}
} else {
c.Request.Header.Set("X-Language", "en")
}
}
c.Next()
}
}
func (rt *Router) configNoRoute(r *gin.Engine) {
r.NoRoute(func(c *gin.Context) {
arr := strings.Split(c.Request.URL.Path, ".")
suffix := arr[len(arr)-1]
switch suffix {
case "png", "jpeg", "jpg", "svg", "ico", "gif", "css", "js", "html", "htm", "gz", "zip", "map":
c.File(path.Join(strings.Split("pub/"+c.Request.URL.Path, "/")...))
default:
c.File(path.Join("pub", "index.html"))
}
})
}
func (rt *Router) Config(r *gin.Engine) {
r.Use(stat())
r.Use(languageDetector(rt.Center.I18NHeaderKey))
r.Use(aop.Recovery())
pagesPrefix := "/api/n9e"
pages := r.Group(pagesPrefix)
{
if rt.Center.AnonymousAccess.PromQuerier {
pages.Any("/proxy/:id/*url", rt.dsProxy)
pages.POST("/query-range-batch", rt.promBatchQueryRange)
pages.POST("/query-instant-batch", rt.promBatchQueryInstant)
} else {
pages.Any("/proxy/:id/*url", rt.auth(), rt.dsProxy)
pages.POST("/query-range-batch", rt.auth(), rt.promBatchQueryRange)
pages.POST("/query-instant-batch", rt.auth(), rt.promBatchQueryInstant)
}
pages.POST("/auth/login", rt.jwtMock(), rt.loginPost)
pages.POST("/auth/logout", rt.jwtMock(), rt.logoutPost)
pages.POST("/auth/refresh", rt.jwtMock(), rt.refreshPost)
pages.GET("/auth/sso-config", rt.ssoConfigNameGet)
pages.GET("/auth/redirect", rt.loginRedirect)
pages.GET("/auth/redirect/cas", rt.loginRedirectCas)
pages.GET("/auth/redirect/oauth", rt.loginRedirectOAuth)
pages.GET("/auth/callback", rt.loginCallback)
pages.GET("/auth/callback/cas", rt.loginCallbackCas)
pages.GET("/auth/callback/oauth", rt.loginCallbackOAuth)
pages.GET("/metrics/desc", rt.metricsDescGetFile)
pages.POST("/metrics/desc", rt.metricsDescGetMap)
pages.GET("/notify-channels", rt.notifyChannelsGets)
pages.GET("/contact-keys", rt.contactKeysGets)
pages.GET("/self/perms", rt.auth(), rt.user(), rt.permsGets)
pages.GET("/self/profile", rt.auth(), rt.user(), rt.selfProfileGet)
pages.PUT("/self/profile", rt.auth(), rt.user(), rt.selfProfilePut)
pages.PUT("/self/password", rt.auth(), rt.user(), rt.selfPasswordPut)
pages.GET("/users", rt.auth(), rt.user(), rt.perm("/users"), rt.userGets)
pages.POST("/users", rt.auth(), rt.admin(), rt.userAddPost)
pages.GET("/user/:id/profile", rt.auth(), rt.userProfileGet)
pages.PUT("/user/:id/profile", rt.auth(), rt.admin(), rt.userProfilePut)
pages.PUT("/user/:id/password", rt.auth(), rt.admin(), rt.userPasswordPut)
pages.DELETE("/user/:id", rt.auth(), rt.admin(), rt.userDel)
pages.GET("/metric-views", rt.auth(), rt.metricViewGets)
pages.DELETE("/metric-views", rt.auth(), rt.user(), rt.metricViewDel)
pages.POST("/metric-views", rt.auth(), rt.user(), rt.metricViewAdd)
pages.PUT("/metric-views", rt.auth(), rt.user(), rt.metricViewPut)
pages.GET("/user-groups", rt.auth(), rt.user(), rt.userGroupGets)
pages.POST("/user-groups", rt.auth(), rt.user(), rt.perm("/user-groups/add"), rt.userGroupAdd)
pages.GET("/user-group/:id", rt.auth(), rt.user(), rt.userGroupGet)
pages.PUT("/user-group/:id", rt.auth(), rt.user(), rt.perm("/user-groups/put"), rt.userGroupWrite(), rt.userGroupPut)
pages.DELETE("/user-group/:id", rt.auth(), rt.user(), rt.perm("/user-groups/del"), rt.userGroupWrite(), rt.userGroupDel)
pages.POST("/user-group/:id/members", rt.auth(), rt.user(), rt.perm("/user-groups/put"), rt.userGroupWrite(), rt.userGroupMemberAdd)
pages.DELETE("/user-group/:id/members", rt.auth(), rt.user(), rt.perm("/user-groups/put"), rt.userGroupWrite(), rt.userGroupMemberDel)
pages.GET("/busi-groups", rt.auth(), rt.user(), rt.busiGroupGets)
pages.POST("/busi-groups", rt.auth(), rt.user(), rt.perm("/busi-groups/add"), rt.busiGroupAdd)
pages.GET("/busi-groups/alertings", rt.auth(), rt.busiGroupAlertingsGets)
pages.GET("/busi-group/:id", rt.auth(), rt.user(), rt.bgro(), rt.busiGroupGet)
pages.PUT("/busi-group/:id", rt.auth(), rt.user(), rt.perm("/busi-groups/put"), rt.bgrw(), rt.busiGroupPut)
pages.POST("/busi-group/:id/members", rt.auth(), rt.user(), rt.perm("/busi-groups/put"), rt.bgrw(), rt.busiGroupMemberAdd)
pages.DELETE("/busi-group/:id/members", rt.auth(), rt.user(), rt.perm("/busi-groups/put"), rt.bgrw(), rt.busiGroupMemberDel)
pages.DELETE("/busi-group/:id", rt.auth(), rt.user(), rt.perm("/busi-groups/del"), rt.bgrw(), rt.busiGroupDel)
pages.GET("/busi-group/:id/perm/:perm", rt.auth(), rt.user(), rt.checkBusiGroupPerm)
pages.GET("/targets", rt.auth(), rt.user(), rt.targetGets)
pages.POST("/target/list", rt.auth(), rt.user(), rt.targetGetsByHostFilter)
pages.DELETE("/targets", rt.auth(), rt.user(), rt.perm("/targets/del"), rt.targetDel)
pages.GET("/targets/tags", rt.auth(), rt.user(), rt.targetGetTags)
pages.POST("/targets/tags", rt.auth(), rt.user(), rt.perm("/targets/put"), rt.targetBindTagsByFE)
pages.DELETE("/targets/tags", rt.auth(), rt.user(), rt.perm("/targets/put"), rt.targetUnbindTagsByFE)
pages.PUT("/targets/note", rt.auth(), rt.user(), rt.perm("/targets/put"), rt.targetUpdateNote)
pages.PUT("/targets/bgid", rt.auth(), rt.user(), rt.perm("/targets/put"), rt.targetUpdateBgid)
pages.POST("/builtin-cate-favorite", rt.auth(), rt.user(), rt.builtinCateFavoriteAdd)
pages.DELETE("/builtin-cate-favorite/:name", rt.auth(), rt.user(), rt.builtinCateFavoriteDel)
pages.GET("/builtin-boards", rt.builtinBoardGets)
pages.GET("/builtin-board/:name", rt.builtinBoardGet)
pages.GET("/dashboards/builtin/list", rt.builtinBoardGets)
pages.GET("/builtin-boards-cates", rt.auth(), rt.user(), rt.builtinBoardCateGets)
pages.POST("/builtin-boards-detail", rt.auth(), rt.user(), rt.builtinBoardDetailGets)
pages.GET("/integrations/icon/:cate/:name", func(c *gin.Context) {
cate := ginx.UrlParamStr(c, "cate")
fp := "integrations/" + cate + "/icon/" + ginx.UrlParamStr(c, "name")
c.File(path.Join(fp))
})
pages.GET("/busi-group/:id/boards", rt.auth(), rt.user(), rt.perm("/dashboards"), rt.bgro(), rt.boardGets)
pages.POST("/busi-group/:id/boards", rt.auth(), rt.user(), rt.perm("/dashboards/add"), rt.bgrw(), rt.boardAdd)
pages.POST("/busi-group/:id/board/:bid/clone", rt.auth(), rt.user(), rt.perm("/dashboards/add"), rt.bgrw(), rt.boardClone)
pages.GET("/board/:bid", rt.boardGet)
pages.GET("/board/:bid/pure", rt.boardPureGet)
pages.PUT("/board/:bid", rt.auth(), rt.user(), rt.perm("/dashboards/put"), rt.boardPut)
pages.PUT("/board/:bid/configs", rt.auth(), rt.user(), rt.perm("/dashboards/put"), rt.boardPutConfigs)
pages.PUT("/board/:bid/public", rt.auth(), rt.user(), rt.perm("/dashboards/put"), rt.boardPutPublic)
pages.DELETE("/boards", rt.auth(), rt.user(), rt.perm("/dashboards/del"), rt.boardDel)
pages.GET("/share-charts", rt.chartShareGets)
pages.POST("/share-charts", rt.auth(), rt.chartShareAdd)
pages.GET("/alert-rules/builtin/alerts-cates", rt.auth(), rt.user(), rt.builtinAlertCateGets)
pages.GET("/alert-rules/builtin/list", rt.auth(), rt.user(), rt.builtinAlertRules)
pages.GET("/busi-group/:id/alert-rules", rt.auth(), rt.user(), rt.perm("/alert-rules"), rt.alertRuleGets)
pages.POST("/busi-group/:id/alert-rules", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.bgrw(), rt.alertRuleAddByFE)
pages.POST("/busi-group/:id/alert-rules/import", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.bgrw(), rt.alertRuleAddByImport)
pages.DELETE("/busi-group/:id/alert-rules", rt.auth(), rt.user(), rt.perm("/alert-rules/del"), rt.bgrw(), rt.alertRuleDel)
pages.PUT("/busi-group/:id/alert-rules/fields", rt.auth(), rt.user(), rt.perm("/alert-rules/put"), rt.bgrw(), rt.alertRulePutFields)
pages.PUT("/busi-group/:id/alert-rule/:arid", rt.auth(), rt.user(), rt.perm("/alert-rules/put"), rt.alertRulePutByFE)
pages.GET("/alert-rule/:arid", rt.auth(), rt.user(), rt.perm("/alert-rules"), rt.alertRuleGet)
pages.GET("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGets)
pages.POST("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/add"), rt.bgrw(), rt.recordingRuleAddByFE)
pages.DELETE("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/del"), rt.bgrw(), rt.recordingRuleDel)
pages.PUT("/busi-group/:id/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.bgrw(), rt.recordingRulePutByFE)
pages.GET("/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGet)
pages.PUT("/busi-group/:id/recording-rules/fields", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.recordingRulePutFields)
pages.GET("/busi-group/:id/alert-mutes", rt.auth(), rt.user(), rt.perm("/alert-mutes"), rt.bgro(), rt.alertMuteGetsByBG)
pages.POST("/busi-group/:id/alert-mutes", rt.auth(), rt.user(), rt.perm("/alert-mutes/add"), rt.bgrw(), rt.alertMuteAdd)
pages.DELETE("/busi-group/:id/alert-mutes", rt.auth(), rt.user(), rt.perm("/alert-mutes/del"), rt.bgrw(), rt.alertMuteDel)
pages.PUT("/busi-group/:id/alert-mute/:amid", rt.auth(), rt.user(), rt.perm("/alert-mutes/put"), rt.alertMutePutByFE)
pages.PUT("/busi-group/:id/alert-mutes/fields", rt.auth(), rt.user(), rt.perm("/alert-mutes/put"), rt.bgrw(), rt.alertMutePutFields)
pages.GET("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes"), rt.bgro(), rt.alertSubscribeGets)
pages.GET("/alert-subscribe/:sid", rt.auth(), rt.user(), rt.perm("/alert-subscribes"), rt.alertSubscribeGet)
pages.POST("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/add"), rt.bgrw(), rt.alertSubscribeAdd)
pages.PUT("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/put"), rt.bgrw(), rt.alertSubscribePut)
pages.DELETE("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/del"), rt.bgrw(), rt.alertSubscribeDel)
if rt.Center.AnonymousAccess.AlertDetail {
pages.GET("/alert-cur-event/:eid", rt.alertCurEventGet)
pages.GET("/alert-his-event/:eid", rt.alertHisEventGet)
} else {
pages.GET("/alert-cur-event/:eid", rt.auth(), rt.alertCurEventGet)
pages.GET("/alert-his-event/:eid", rt.auth(), rt.alertHisEventGet)
}
// card logic
pages.GET("/alert-cur-events/list", rt.auth(), rt.alertCurEventsList)
pages.GET("/alert-cur-events/card", rt.auth(), rt.alertCurEventsCard)
pages.POST("/alert-cur-events/card/details", rt.auth(), rt.alertCurEventsCardDetails)
pages.GET("/alert-his-events/list", rt.auth(), rt.alertHisEventsList)
pages.DELETE("/alert-cur-events", rt.auth(), rt.user(), rt.perm("/alert-cur-events/del"), rt.alertCurEventDel)
pages.GET("/alert-aggr-views", rt.auth(), rt.alertAggrViewGets)
pages.DELETE("/alert-aggr-views", rt.auth(), rt.user(), rt.alertAggrViewDel)
pages.POST("/alert-aggr-views", rt.auth(), rt.user(), rt.alertAggrViewAdd)
pages.PUT("/alert-aggr-views", rt.auth(), rt.user(), rt.alertAggrViewPut)
pages.GET("/busi-group/:id/task-tpls", rt.auth(), rt.user(), rt.perm("/job-tpls"), rt.bgro(), rt.taskTplGets)
pages.POST("/busi-group/:id/task-tpls", rt.auth(), rt.user(), rt.perm("/job-tpls/add"), rt.bgrw(), rt.taskTplAdd)
pages.DELETE("/busi-group/:id/task-tpl/:tid", rt.auth(), rt.user(), rt.perm("/job-tpls/del"), rt.bgrw(), rt.taskTplDel)
pages.POST("/busi-group/:id/task-tpls/tags", rt.auth(), rt.user(), rt.perm("/job-tpls/put"), rt.bgrw(), rt.taskTplBindTags)
pages.DELETE("/busi-group/:id/task-tpls/tags", rt.auth(), rt.user(), rt.perm("/job-tpls/put"), rt.bgrw(), rt.taskTplUnbindTags)
pages.GET("/busi-group/:id/task-tpl/:tid", rt.auth(), rt.user(), rt.perm("/job-tpls"), rt.bgro(), rt.taskTplGet)
pages.PUT("/busi-group/:id/task-tpl/:tid", rt.auth(), rt.user(), rt.perm("/job-tpls/put"), rt.bgrw(), rt.taskTplPut)
pages.GET("/busi-group/:id/tasks", rt.auth(), rt.user(), rt.perm("/job-tasks"), rt.bgro(), rt.taskGets)
pages.POST("/busi-group/:id/tasks", rt.auth(), rt.user(), rt.perm("/job-tasks/add"), rt.bgrw(), rt.taskAdd)
pages.GET("/busi-group/:id/task/*url", rt.auth(), rt.user(), rt.perm("/job-tasks"), rt.taskProxy)
pages.PUT("/busi-group/:id/task/*url", rt.auth(), rt.user(), rt.perm("/job-tasks/put"), rt.bgrw(), rt.taskProxy)
pages.GET("/servers", rt.auth(), rt.admin(), rt.serversGet)
pages.GET("/server-clusters", rt.auth(), rt.admin(), rt.serverClustersGet)
pages.POST("/datasource/list", rt.auth(), rt.datasourceList)
pages.POST("/datasource/plugin/list", rt.auth(), rt.pluginList)
pages.POST("/datasource/upsert", rt.auth(), rt.admin(), rt.datasourceUpsert)
pages.POST("/datasource/desc", rt.auth(), rt.admin(), rt.datasourceGet)
pages.POST("/datasource/status/update", rt.auth(), rt.admin(), rt.datasourceUpdataStatus)
pages.DELETE("/datasource/", rt.auth(), rt.admin(), rt.datasourceDel)
pages.GET("/roles", rt.auth(), rt.admin(), rt.roleGets)
pages.POST("/roles", rt.auth(), rt.admin(), rt.roleAdd)
pages.PUT("/roles", rt.auth(), rt.admin(), rt.rolePut)
pages.DELETE("/role/:id", rt.auth(), rt.admin(), rt.roleDel)
pages.GET("/role/:id/ops", rt.auth(), rt.admin(), rt.operationOfRole)
pages.PUT("/role/:id/ops", rt.auth(), rt.admin(), rt.roleBindOperation)
pages.GET("operation", rt.operations)
pages.GET("/notify-tpls", rt.auth(), rt.admin(), rt.notifyTplGets)
pages.PUT("/notify-tpl/content", rt.auth(), rt.admin(), rt.notifyTplUpdateContent)
pages.PUT("/notify-tpl", rt.auth(), rt.admin(), rt.notifyTplUpdate)
pages.POST("/notify-tpl/preview", rt.auth(), rt.admin(), rt.notifyTplPreview)
pages.GET("/sso-configs", rt.auth(), rt.admin(), rt.ssoConfigGets)
pages.PUT("/sso-config", rt.auth(), rt.admin(), rt.ssoConfigUpdate)
pages.GET("/webhooks", rt.auth(), rt.admin(), rt.webhookGets)
pages.PUT("/webhooks", rt.auth(), rt.admin(), rt.webhookPuts)
pages.GET("/notify-script", rt.auth(), rt.admin(), rt.notifyScriptGet)
pages.PUT("/notify-script", rt.auth(), rt.admin(), rt.notifyScriptPut)
pages.GET("/notify-channel", rt.auth(), rt.admin(), rt.notifyChannelGets)
pages.PUT("/notify-channel", rt.auth(), rt.admin(), rt.notifyChannelPuts)
pages.GET("/notify-contact", rt.auth(), rt.admin(), rt.notifyContactGets)
pages.PUT("/notify-contact", rt.auth(), rt.admin(), rt.notifyContactPuts)
pages.GET("/notify-config", rt.auth(), rt.admin(), rt.notifyConfigGet)
pages.PUT("/notify-config", rt.auth(), rt.admin(), rt.notifyConfigPut)
}
if rt.HTTP.Service.Enable {
service := r.Group("/v1/n9e")
if len(rt.HTTP.Service.BasicAuth) > 0 {
service.Use(gin.BasicAuth(rt.HTTP.Service.BasicAuth))
}
{
service.Any("/prometheus/*url", rt.dsProxy)
service.POST("/users", rt.userAddPost)
service.GET("/users", rt.userFindAll)
service.GET("/targets", rt.targetGets)
service.GET("/targets/tags", rt.targetGetTags)
service.POST("/targets/tags", rt.targetBindTagsByService)
service.DELETE("/targets/tags", rt.targetUnbindTagsByService)
service.PUT("/targets/note", rt.targetUpdateNoteByService)
service.POST("/alert-rules", rt.alertRuleAddByService)
service.DELETE("/alert-rules", rt.alertRuleDelByService)
service.PUT("/alert-rule/:arid", rt.alertRulePutByService)
service.GET("/alert-rule/:arid", rt.alertRuleGet)
service.GET("/alert-rules", rt.alertRulesGetByService)
service.GET("/alert-mutes", rt.alertMuteGets)
service.POST("/alert-mutes", rt.alertMuteAddByService)
service.DELETE("/alert-mutes", rt.alertMuteDel)
service.GET("/alert-cur-events", rt.alertCurEventsList)
service.GET("/alert-his-events", rt.alertHisEventsList)
service.GET("/alert-his-event/:eid", rt.alertHisEventGet)
service.GET("/config/:id", rt.configGet)
service.GET("/configs", rt.configsGet)
service.PUT("/configs", rt.configsPut)
service.POST("/configs", rt.configsPost)
service.DELETE("/configs", rt.configsDel)
service.POST("/conf-prop/encrypt", rt.confPropEncrypt)
service.POST("/conf-prop/decrypt", rt.confPropDecrypt)
}
}
if rt.HTTP.Heartbeat.Enable {
heartbeat := r.Group("/v1/n9e")
{
if len(rt.HTTP.Heartbeat.BasicAuth) > 0 {
heartbeat.Use(gin.BasicAuth(rt.HTTP.Heartbeat.BasicAuth))
}
heartbeat.POST("/heartbeat", rt.heartbeat)
}
}
rt.configNoRoute(r)
}
func Render(c *gin.Context, data, msg interface{}) {
if msg == nil {
if data == nil {
data = struct{}{}
}
c.JSON(http.StatusOK, gin.H{"data": data, "error": ""})
} else {
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": msg}})
}
}
func Dangerous(c *gin.Context, v interface{}, code ...int) {
if v == nil {
return
}
switch t := v.(type) {
case string:
if t != "" {
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": v}})
}
case error:
c.JSON(http.StatusOK, gin.H{"error": gin.H{"message": t.Error()}})
}
}

View File

@@ -1,237 +0,0 @@
package router
import (
"net/http"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/toolkits/pkg/ginx"
)
type boardForm struct {
Name string `json:"name"`
Ident string `json:"ident"`
Tags string `json:"tags"`
Configs string `json:"configs"`
Public int `json:"public"`
}
func (rt *Router) boardAdd(c *gin.Context) {
var f boardForm
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
board := &models.Board{
GroupId: ginx.UrlParamInt64(c, "id"),
Name: f.Name,
Ident: f.Ident,
Tags: f.Tags,
Configs: f.Configs,
CreateBy: me.Username,
UpdateBy: me.Username,
}
err := board.Add(rt.Ctx)
ginx.Dangerous(err)
if f.Configs != "" {
ginx.Dangerous(models.BoardPayloadSave(rt.Ctx, board.Id, f.Configs))
}
ginx.NewRender(c).Data(board, nil)
}
func (rt *Router) boardGet(c *gin.Context) {
bid := ginx.UrlParamStr(c, "bid")
board, err := models.BoardGet(rt.Ctx, "id = ? or ident = ?", bid, bid)
ginx.Dangerous(err)
if board == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
if board.Public == 0 {
rt.auth()(c)
rt.user()(c)
me := c.MustGet("user").(*models.User)
if !me.IsAdmin() {
// check permission
rt.bgroCheck(c, board.GroupId)
}
}
ginx.NewRender(c).Data(board, nil)
}
func (rt *Router) boardPureGet(c *gin.Context) {
board, err := models.BoardGetByID(rt.Ctx, ginx.UrlParamInt64(c, "bid"))
ginx.Dangerous(err)
if board == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
ginx.NewRender(c).Data(board, nil)
}
// bgrwCheck
func (rt *Router) boardDel(c *gin.Context) {
var f idsForm
ginx.BindJSON(c, &f)
f.Verify()
for i := 0; i < len(f.Ids); i++ {
bid := f.Ids[i]
board, err := models.BoardGet(rt.Ctx, "id = ?", bid)
ginx.Dangerous(err)
if board == nil {
continue
}
me := c.MustGet("user").(*models.User)
if !me.IsAdmin() {
// check permission
rt.bgrwCheck(c, board.GroupId)
}
ginx.Dangerous(board.Del(rt.Ctx))
}
ginx.NewRender(c).Message(nil)
}
func (rt *Router) Board(id int64) *models.Board {
obj, err := models.BoardGet(rt.Ctx, "id=?", id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
return obj
}
// bgrwCheck
func (rt *Router) boardPut(c *gin.Context) {
var f boardForm
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
bo := rt.Board(ginx.UrlParamInt64(c, "bid"))
if !me.IsAdmin() {
// check permission
rt.bgrwCheck(c, bo.GroupId)
}
can, err := bo.CanRenameIdent(rt.Ctx, f.Ident)
ginx.Dangerous(err)
if !can {
ginx.Bomb(http.StatusOK, "Ident duplicate")
}
bo.Name = f.Name
bo.Ident = f.Ident
bo.Tags = f.Tags
bo.UpdateBy = me.Username
bo.UpdateAt = time.Now().Unix()
err = bo.Update(rt.Ctx, "name", "ident", "tags", "update_by", "update_at")
ginx.NewRender(c).Data(bo, err)
}
// bgrwCheck
func (rt *Router) boardPutConfigs(c *gin.Context) {
var f boardForm
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
bid := ginx.UrlParamStr(c, "bid")
bo, err := models.BoardGet(rt.Ctx, "id = ? or ident = ?", bid, bid)
ginx.Dangerous(err)
if bo == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
// check permission
if !me.IsAdmin() {
rt.bgrwCheck(c, bo.GroupId)
}
bo.UpdateBy = me.Username
bo.UpdateAt = time.Now().Unix()
ginx.Dangerous(bo.Update(rt.Ctx, "update_by", "update_at"))
bo.Configs = f.Configs
ginx.Dangerous(models.BoardPayloadSave(rt.Ctx, bo.Id, f.Configs))
ginx.NewRender(c).Data(bo, nil)
}
// bgrwCheck
func (rt *Router) boardPutPublic(c *gin.Context) {
var f boardForm
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
bo := rt.Board(ginx.UrlParamInt64(c, "bid"))
// check permission
if !me.IsAdmin() {
rt.bgrwCheck(c, bo.GroupId)
}
bo.Public = f.Public
bo.UpdateBy = me.Username
bo.UpdateAt = time.Now().Unix()
err := bo.Update(rt.Ctx, "public", "update_by", "update_at")
ginx.NewRender(c).Data(bo, err)
}
func (rt *Router) boardGets(c *gin.Context) {
bgid := ginx.UrlParamInt64(c, "id")
query := ginx.QueryStr(c, "query", "")
boards, err := models.BoardGetsByGroupId(rt.Ctx, bgid, query)
ginx.NewRender(c).Data(boards, err)
}
func (rt *Router) boardClone(c *gin.Context) {
me := c.MustGet("user").(*models.User)
bo := rt.Board(ginx.UrlParamInt64(c, "bid"))
newBoard := &models.Board{
Name: bo.Name + " Copy",
Tags: bo.Tags,
GroupId: bo.GroupId,
CreateBy: me.Username,
UpdateBy: me.Username,
}
if bo.Ident != "" {
newBoard.Ident = uuid.NewString()
}
ginx.Dangerous(newBoard.Add(rt.Ctx))
// clone payload
payload, err := models.BoardPayloadGet(rt.Ctx, bo.Id)
ginx.Dangerous(err)
if payload != "" {
ginx.Dangerous(models.BoardPayloadSave(rt.Ctx, newBoard.Id, payload))
}
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,300 +0,0 @@
package router
import (
"encoding/json"
"fmt"
"net/http"
"path"
"strings"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/runner"
)
// 创建 builtin_cate
func (rt *Router) builtinCateFavoriteAdd(c *gin.Context) {
var f models.BuiltinCate
ginx.BindJSON(c, &f)
if f.Name == "" {
ginx.Bomb(http.StatusBadRequest, "name is empty")
}
me := c.MustGet("user").(*models.User)
f.UserId = me.Id
ginx.NewRender(c).Message(f.Create(rt.Ctx))
}
// 删除 builtin_cate
func (rt *Router) builtinCateFavoriteDel(c *gin.Context) {
name := ginx.UrlParamStr(c, "name")
me := c.MustGet("user").(*models.User)
ginx.NewRender(c).Message(models.BuiltinCateDelete(rt.Ctx, name, me.Id))
}
type Payload struct {
Cate string `json:"cate"`
Fname string `json:"fname"`
Name string `json:"name"`
Configs interface{} `json:"configs"`
Tags string `json:"tags"`
}
type BoardCate struct {
Name string `json:"name"`
IconUrl string `json:"icon_url"`
Boards []Payload `json:"boards"`
Favorite bool `json:"favorite"`
}
func (rt *Router) builtinBoardDetailGets(c *gin.Context) {
var payload Payload
ginx.BindJSON(c, &payload)
fp := rt.Center.BuiltinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
}
fn := fp + "/" + payload.Cate + "/dashboards/" + payload.Fname
content, err := file.ReadBytes(fn)
ginx.Dangerous(err)
err = json.Unmarshal(content, &payload)
ginx.NewRender(c).Data(payload, err)
}
func (rt *Router) builtinBoardCateGets(c *gin.Context) {
fp := rt.Center.BuiltinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
}
me := c.MustGet("user").(*models.User)
buildinFavoritesMap, err := models.BuiltinCateGetByUserId(rt.Ctx, me.Id)
if err != nil {
logger.Warningf("get builtin favorites fail: %v", err)
}
var boardCates []BoardCate
dirList, err := file.DirsUnder(fp)
ginx.Dangerous(err)
for _, dir := range dirList {
var boardCate BoardCate
boardCate.Name = dir
files, err := file.FilesUnder(fp + "/" + dir + "/dashboards")
ginx.Dangerous(err)
var boards []Payload
for _, f := range files {
fn := fp + "/" + dir + "/dashboards/" + f
content, err := file.ReadBytes(fn)
if err != nil {
logger.Warningf("add board fail: %v", err)
continue
}
var payload Payload
err = json.Unmarshal(content, &payload)
if err != nil {
logger.Warningf("add board:%s fail: %v", fn, err)
continue
}
payload.Cate = dir
payload.Fname = f
payload.Configs = ""
boards = append(boards, payload)
}
boardCate.Boards = boards
if _, ok := buildinFavoritesMap[dir]; ok {
boardCate.Favorite = true
}
iconFiles, _ := file.FilesUnder(fp + "/" + dir + "/icon")
if len(iconFiles) > 0 {
boardCate.IconUrl = fmt.Sprintf("/api/n9e/integrations/icon/%s/%s", dir, iconFiles[0])
}
boardCates = append(boardCates, boardCate)
}
ginx.NewRender(c).Data(boardCates, nil)
}
func (rt *Router) builtinBoardGets(c *gin.Context) {
fp := rt.Center.BuiltinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
}
var fileList []string
dirList, err := file.DirsUnder(fp)
ginx.Dangerous(err)
for _, dir := range dirList {
files, err := file.FilesUnder(fp + "/" + dir + "/dashboards")
ginx.Dangerous(err)
fileList = append(fileList, files...)
}
names := make([]string, 0, len(fileList))
for _, f := range fileList {
if !strings.HasSuffix(f, ".json") {
continue
}
name := strings.TrimSuffix(f, ".json")
names = append(names, name)
}
ginx.NewRender(c).Data(names, nil)
}
type AlertCate struct {
Name string `json:"name"`
IconUrl string `json:"icon_url"`
AlertRules []models.AlertRule `json:"alert_rules"`
Favorite bool `json:"favorite"`
}
func (rt *Router) builtinAlertCateGets(c *gin.Context) {
fp := rt.Center.BuiltinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
}
me := c.MustGet("user").(*models.User)
buildinFavoritesMap, err := models.BuiltinCateGetByUserId(rt.Ctx, me.Id)
if err != nil {
logger.Warningf("get builtin favorites fail: %v", err)
}
var alertCates []AlertCate
dirList, err := file.DirsUnder(fp)
ginx.Dangerous(err)
for _, dir := range dirList {
var alertCate AlertCate
alertCate.Name = dir
files, err := file.FilesUnder(fp + "/" + dir + "/alerts")
ginx.Dangerous(err)
var alertRules []models.AlertRule
for _, f := range files {
fn := fp + "/" + dir + "/alerts/" + f
content, err := file.ReadBytes(fn)
if err != nil {
logger.Warningf("add board fail: %v", err)
continue
}
var ars []models.AlertRule
err = json.Unmarshal(content, &ars)
if err != nil {
logger.Warningf("add board:%s fail: %v", fn, err)
continue
}
alertRules = append(alertRules, ars...)
}
alertCate.AlertRules = alertRules
iconFiles, _ := file.FilesUnder(fp + "/" + dir + "/icon")
if len(iconFiles) > 0 {
alertCate.IconUrl = fmt.Sprintf("/api/n9e/integrations/icon/%s/%s", dir, iconFiles[0])
}
if _, ok := buildinFavoritesMap[dir]; ok {
alertCate.Favorite = true
}
alertCates = append(alertCates, alertCate)
}
ginx.NewRender(c).Data(alertCates, nil)
}
type builtinAlertRulesList struct {
Name string `json:"name"`
IconUrl string `json:"icon_url"`
AlertRules map[string][]models.AlertRule `json:"alert_rules"`
Favorite bool `json:"favorite"`
}
func (rt *Router) builtinAlertRules(c *gin.Context) {
fp := rt.Center.BuiltinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
}
me := c.MustGet("user").(*models.User)
buildinFavoritesMap, err := models.BuiltinCateGetByUserId(rt.Ctx, me.Id)
if err != nil {
logger.Warningf("get builtin favorites fail: %v", err)
}
var alertCates []builtinAlertRulesList
dirList, err := file.DirsUnder(fp)
ginx.Dangerous(err)
for _, dir := range dirList {
var alertCate builtinAlertRulesList
alertCate.Name = dir
files, err := file.FilesUnder(fp + "/" + dir + "/alerts")
ginx.Dangerous(err)
alertRules := make(map[string][]models.AlertRule)
for _, f := range files {
fn := fp + "/" + dir + "/alerts/" + f
content, err := file.ReadBytes(fn)
if err != nil {
logger.Warningf("add board fail: %v", err)
continue
}
var ars []models.AlertRule
err = json.Unmarshal(content, &ars)
if err != nil {
logger.Warningf("add board:%s fail: %v", fn, err)
continue
}
alertRules[strings.TrimSuffix(f, ".json")] = ars
}
alertCate.AlertRules = alertRules
iconFiles, _ := file.FilesUnder(fp + "/" + dir + "/icon")
if len(iconFiles) > 0 {
alertCate.IconUrl = fmt.Sprintf("/api/n9e/integrations/icon/%s/%s", dir, iconFiles[0])
}
if _, ok := buildinFavoritesMap[dir]; ok {
alertCate.Favorite = true
}
alertCates = append(alertCates, alertCate)
}
ginx.NewRender(c).Data(alertCates, nil)
}
// read the json file content
func (rt *Router) builtinBoardGet(c *gin.Context) {
name := ginx.UrlParamStr(c, "name")
dirpath := rt.Center.BuiltinIntegrationsDir
if dirpath == "" {
dirpath = path.Join(runner.Cwd, "integrations")
}
dirList, err := file.DirsUnder(dirpath)
ginx.Dangerous(err)
for _, dir := range dirList {
jsonFile := dirpath + "/" + dir + "/dashboards/" + name + ".json"
if file.IsExist(jsonFile) {
body, err := file.ReadString(jsonFile)
ginx.NewRender(c).Data(body, err)
return
}
}
ginx.Bomb(http.StatusBadRequest, "%s not found", name)
}

View File

@@ -1,64 +0,0 @@
package router
import (
"encoding/json"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) notifyChannelsGets(c *gin.Context) {
var labelAndKeys []models.LabelAndKey
cval, err := models.ConfigsGet(rt.Ctx, models.NOTIFYCHANNEL)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(labelAndKeys, nil)
return
}
var notifyChannels []models.NotifyChannel
err = json.Unmarshal([]byte(cval), &notifyChannels)
ginx.Dangerous(err)
for _, v := range notifyChannels {
if v.Hide {
continue
}
var labelAndKey models.LabelAndKey
labelAndKey.Label = v.Name
labelAndKey.Key = v.Ident
labelAndKeys = append(labelAndKeys, labelAndKey)
}
ginx.NewRender(c).Data(labelAndKeys, nil)
}
func (rt *Router) contactKeysGets(c *gin.Context) {
var labelAndKeys []models.LabelAndKey
cval, err := models.ConfigsGet(rt.Ctx, models.NOTIFYCONTACT)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(labelAndKeys, nil)
return
}
var notifyContacts []models.NotifyContact
err = json.Unmarshal([]byte(cval), &notifyContacts)
ginx.Dangerous(err)
for _, v := range notifyContacts {
if v.Hide {
continue
}
var labelAndKey models.LabelAndKey
labelAndKey.Label = v.Name
labelAndKey.Key = v.Ident
labelAndKeys = append(labelAndKeys, labelAndKey)
}
ginx.NewRender(c).Data(labelAndKeys, nil)
}

View File

@@ -1,49 +0,0 @@
package router
import (
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) configsGet(c *gin.Context) {
prefix := ginx.QueryStr(c, "prefix", "")
limit := ginx.QueryInt(c, "limit", 10)
configs, err := models.ConfigsGets(rt.Ctx, prefix, limit, ginx.Offset(c, limit))
ginx.NewRender(c).Data(configs, err)
}
func (rt *Router) configGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
configs, err := models.ConfigGet(rt.Ctx, id)
ginx.NewRender(c).Data(configs, err)
}
func (rt *Router) configsDel(c *gin.Context) {
var f idsForm
ginx.BindJSON(c, &f)
ginx.NewRender(c).Message(models.ConfigsDel(rt.Ctx, f.Ids))
}
func (rt *Router) configsPut(c *gin.Context) {
var arr []models.Configs
ginx.BindJSON(c, &arr)
for i := 0; i < len(arr); i++ {
ginx.Dangerous(arr[i].Update(rt.Ctx))
}
ginx.NewRender(c).Message(nil)
}
func (rt *Router) configsPost(c *gin.Context) {
var arr []models.Configs
ginx.BindJSON(c, &arr)
for i := 0; i < len(arr); i++ {
ginx.Dangerous(arr[i].Add(rt.Ctx))
}
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,63 +0,0 @@
package router
import (
"github.com/ccfos/nightingale/v6/pkg/secu"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
type confPropCrypto struct {
Data string `json:"data" binding:"required"`
Key string `json:"key" binding:"required"`
}
func (rt *Router) confPropEncrypt(c *gin.Context) {
var f confPropCrypto
ginx.BindJSON(c, &f)
k := len(f.Key)
switch k {
default:
c.String(400, "The key length should be 16, 24 or 32")
return
case 16, 24, 32:
break
}
s, err := secu.DealWithEncrypt(f.Data, f.Key)
if err != nil {
c.String(500, err.Error())
}
c.JSON(200, gin.H{
"src": f.Data,
"key": f.Key,
"encrypt": s,
})
}
func (rt *Router) confPropDecrypt(c *gin.Context) {
var f confPropCrypto
ginx.BindJSON(c, &f)
k := len(f.Key)
switch k {
default:
c.String(400, "The key length should be 16, 24 or 32")
return
case 16, 24, 32:
break
}
s, err := secu.DealWithDecrypt(f.Data, f.Key)
if err != nil {
c.String(500, err.Error())
}
c.JSON(200, gin.H{
"src": f.Data,
"key": f.Key,
"decrypt": s,
})
}

View File

@@ -1,19 +0,0 @@
package router
type ChartPure struct {
Configs string `json:"configs"`
Weight int `json:"weight"`
}
type ChartGroupPure struct {
Name string `json:"name"`
Weight int `json:"weight"`
Charts []ChartPure `json:"charts"`
}
type DashboardPure struct {
Name string `json:"name"`
Tags string `json:"tags"`
Configs string `json:"configs"`
ChartGroups []ChartGroupPure `json:"chart_groups"`
}

View File

@@ -1,87 +0,0 @@
package router
import (
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) pluginList(c *gin.Context) {
Render(c, rt.Center.Plugins, nil)
}
type listReq struct {
Name string `json:"name"`
Type string `json:"plugin_type"`
Category string `json:"category"`
}
func (rt *Router) datasourceList(c *gin.Context) {
var req listReq
ginx.BindJSON(c, &req)
typ := req.Type
category := req.Category
name := req.Name
list, err := models.GetDatasourcesGetsBy(rt.Ctx, typ, category, name, "")
Render(c, list, err)
}
func (rt *Router) datasourceUpsert(c *gin.Context) {
var req models.Datasource
ginx.BindJSON(c, &req)
username := Username(c)
req.UpdatedBy = username
var err error
var count int64
if req.Id == 0 {
req.CreatedBy = username
req.Status = "enabled"
count, err = models.GetDatasourcesCountBy(rt.Ctx, "", "", req.Name)
if err != nil {
Render(c, nil, err)
return
}
if count > 0 {
Render(c, nil, "name already exists")
return
}
err = req.Add(rt.Ctx)
} else {
err = req.Update(rt.Ctx, "name", "description", "cluster_name", "settings", "http", "auth", "status", "updated_by", "updated_at")
}
Render(c, nil, err)
}
func (rt *Router) datasourceGet(c *gin.Context) {
var req models.Datasource
ginx.BindJSON(c, &req)
err := req.Get(rt.Ctx)
Render(c, req, err)
}
func (rt *Router) datasourceUpdataStatus(c *gin.Context) {
var req models.Datasource
ginx.BindJSON(c, &req)
username := Username(c)
req.UpdatedBy = username
err := req.Update(rt.Ctx, "status", "updated_by", "updated_at")
Render(c, req, err)
}
func (rt *Router) datasourceDel(c *gin.Context) {
var ids []int64
ginx.BindJSON(c, &ids)
err := models.DatasourceDel(rt.Ctx, ids)
Render(c, nil, err)
}
func Username(c *gin.Context) string {
return c.MustGet("username").(string)
}

View File

@@ -1,121 +0,0 @@
package router
import (
"fmt"
"net/http"
"strconv"
"strings"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ibex"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
const defaultLimit = 300
func queryDatasourceIds(c *gin.Context) []int64 {
datasourceIds := ginx.QueryStr(c, "datasource_ids", "")
datasourceIds = strings.ReplaceAll(datasourceIds, ",", " ")
idsStr := strings.Fields(datasourceIds)
ids := make([]int64, len(idsStr))
for i, idStr := range idsStr {
id, _ := strconv.ParseInt(idStr, 10, 64)
ids[i] = id
}
return ids
}
type idsForm struct {
Ids []int64 `json:"ids"`
}
func (f idsForm) Verify() {
if len(f.Ids) == 0 {
ginx.Bomb(http.StatusBadRequest, "ids empty")
}
}
func User(ctx *ctx.Context, id int64) *models.User {
obj, err := models.UserGetById(ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "No such user")
}
return obj
}
func UserGroup(ctx *ctx.Context, id int64) *models.UserGroup {
obj, err := models.UserGroupGetById(ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "No such UserGroup")
}
return obj
}
func BusiGroup(ctx *ctx.Context, id int64) *models.BusiGroup {
obj, err := models.BusiGroupGetById(ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "No such BusiGroup")
}
return obj
}
func Dashboard(ctx *ctx.Context, id int64) *models.Dashboard {
obj, err := models.DashboardGet(ctx, "id=?", id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
return obj
}
type DoneIdsReply struct {
Err string `json:"err"`
Dat struct {
List []int64 `json:"list"`
} `json:"dat"`
}
type TaskCreateReply struct {
Err string `json:"err"`
Dat int64 `json:"dat"` // task.id
}
// return task.id, error
func TaskCreate(v interface{}, ibexc aconf.Ibex) (int64, error) {
var res TaskCreateReply
err := ibex.New(
ibexc.Address,
ibexc.BasicAuthUser,
ibexc.BasicAuthPass,
ibexc.Timeout,
).
Path("/ibex/v1/tasks").
In(v).
Out(&res).
POST()
if err != nil {
return 0, err
}
if res.Err != "" {
return 0, fmt.Errorf("response.err: %v", res.Err)
}
return res.Dat, nil
}

View File

@@ -1,41 +0,0 @@
package router
import (
"compress/gzip"
"encoding/json"
"io/ioutil"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) heartbeat(c *gin.Context) {
var bs []byte
var err error
var r *gzip.Reader
var req models.HostMeta
if c.GetHeader("Content-Encoding") == "gzip" {
r, err = gzip.NewReader(c.Request.Body)
if err != nil {
c.String(400, err.Error())
return
}
defer r.Close()
bs, err = ioutil.ReadAll(r)
ginx.Dangerous(err)
} else {
defer c.Request.Body.Close()
bs, err = ioutil.ReadAll(c.Request.Body)
ginx.Dangerous(err)
}
err = json.Unmarshal(bs, &req)
ginx.Dangerous(err)
req.Offset = (time.Now().UnixMilli() - req.UnixTime)
rt.MetaSet.Set(req.Hostname, req)
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,539 +0,0 @@
package router
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
"github.com/pelletier/go-toml/v2"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
type loginForm struct {
Username string `json:"username" binding:"required"`
Password string `json:"password" binding:"required"`
}
func (rt *Router) loginPost(c *gin.Context) {
var f loginForm
ginx.BindJSON(c, &f)
user, err := models.PassLogin(rt.Ctx, f.Username, f.Password)
if err != nil {
// pass validate fail, try ldap
if rt.Sso.LDAP.Enable {
roles := strings.Join(rt.Sso.LDAP.DefaultRoles, " ")
user, err = models.LdapLogin(rt.Ctx, f.Username, f.Password, roles, rt.Sso.LDAP)
if err != nil {
logger.Debugf("ldap login failed: %v username: %s", err, f.Username)
ginx.NewRender(c).Message(err)
return
}
user.RolesLst = rt.Sso.LDAP.DefaultRoles
} else {
ginx.NewRender(c).Message(err)
return
}
}
if user == nil {
// Theoretically impossible
ginx.NewRender(c).Message("Username or password invalid")
return
}
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
ginx.NewRender(c).Data(gin.H{
"user": user,
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}, nil)
}
func (rt *Router) logoutPost(c *gin.Context) {
metadata, err := rt.extractTokenMetadata(c.Request)
if err != nil {
ginx.NewRender(c, http.StatusBadRequest).Message("failed to parse jwt token")
return
}
delErr := rt.deleteTokens(c.Request.Context(), metadata)
if delErr != nil {
ginx.NewRender(c).Message(http.StatusText(http.StatusInternalServerError))
return
}
ginx.NewRender(c).Message("")
}
type refreshForm struct {
RefreshToken string `json:"refresh_token" binding:"required"`
}
func (rt *Router) refreshPost(c *gin.Context) {
var f refreshForm
ginx.BindJSON(c, &f)
// verify the token
token, err := jwt.Parse(f.RefreshToken, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected jwt signing method: %v", token.Header["alg"])
}
return []byte(rt.HTTP.JWTAuth.SigningKey), nil
})
// if there is an error, the token must have expired
if err != nil {
// redirect to login page
ginx.NewRender(c, http.StatusUnauthorized).Message("refresh token expired")
return
}
// Since token is valid, get the uuid:
claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims
if ok && token.Valid {
refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string
if !ok {
// Theoretically impossible
ginx.NewRender(c, http.StatusUnauthorized).Message("failed to parse refresh_uuid from jwt")
return
}
userIdentity, ok := claims["user_identity"].(string)
if !ok {
// Theoretically impossible
ginx.NewRender(c, http.StatusUnauthorized).Message("failed to parse user_identity from jwt")
return
}
userid, err := strconv.ParseInt(strings.Split(userIdentity, "-")[0], 10, 64)
if err != nil {
ginx.NewRender(c, http.StatusUnauthorized).Message("failed to parse user_identity from jwt")
return
}
u, err := models.UserGetById(rt.Ctx, userid)
if err != nil {
ginx.NewRender(c, http.StatusInternalServerError).Message("failed to query user by id")
return
}
if u == nil {
// user already deleted
ginx.NewRender(c, http.StatusUnauthorized).Message("user already deleted")
return
}
// Delete the previous Refresh Token
err = rt.deleteAuth(c.Request.Context(), refreshUuid)
if err != nil {
ginx.NewRender(c, http.StatusUnauthorized).Message(http.StatusText(http.StatusInternalServerError))
return
}
// Delete previous Access Token
rt.deleteAuth(c.Request.Context(), strings.Split(refreshUuid, "++")[0])
// Create new pairs of refresh and access tokens
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
ginx.NewRender(c).Data(gin.H{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}, nil)
} else {
// redirect to login page
ginx.NewRender(c, http.StatusUnauthorized).Message("refresh token expired")
}
}
func (rt *Router) loginRedirect(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if !rt.Sso.OIDC.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.OIDC.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
type CallbackOutput struct {
Redirect string `json:"redirect"`
User *models.User `json:"user"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
func (rt *Router) loginCallback(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.OIDC.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logger.Debugf("sso.callback() get ret %+v error %v", ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.OIDC.CoverAttributes {
if ret.Nickname != "" {
user.Nickname = ret.Nickname
}
if ret.Email != "" {
user.Email = ret.Email
}
if ret.Phone != "" {
user.Phone = ret.Phone
}
user.UpdateAt = time.Now().Unix()
user.Update(rt.Ctx, "email", "nickname", "phone", "update_at")
}
} else {
now := time.Now().Unix()
user = &models.User{
Username: ret.Username,
Password: "******",
Nickname: ret.Nickname,
Phone: ret.Phone,
Email: ret.Email,
Portrait: "",
Roles: strings.Join(rt.Sso.OIDC.DefaultRoles, " "),
RolesLst: rt.Sso.OIDC.DefaultRoles,
Contacts: []byte("{}"),
CreateAt: now,
UpdateAt: now,
CreateBy: "oidc",
UpdateBy: "oidc",
}
// create user from oidc
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
type RedirectOutput struct {
Redirect string `json:"redirect"`
State string `json:"state"`
}
func (rt *Router) loginRedirectCas(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if !rt.Sso.CAS.Enable {
logger.Error("cas is not enable")
ginx.NewRender(c).Data("", nil)
return
}
redirect, state, err := rt.Sso.CAS.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(RedirectOutput{
Redirect: redirect,
State: state,
}, err)
}
func (rt *Router) loginCallbackCas(c *gin.Context) {
ticket := ginx.QueryStr(c, "ticket", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.CAS.ValidateServiceTicket(c.Request.Context(), ticket, state, rt.Redis)
if err != nil {
logger.Errorf("ValidateServiceTicket: %s", err)
ginx.NewRender(c).Data("", err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
if err != nil {
logger.Errorf("UserGet: %s", err)
}
ginx.Dangerous(err)
if user != nil {
if rt.Sso.CAS.CoverAttributes {
if ret.Nickname != "" {
user.Nickname = ret.Nickname
}
if ret.Email != "" {
user.Email = ret.Email
}
if ret.Phone != "" {
user.Phone = ret.Phone
}
user.UpdateAt = time.Now().Unix()
ginx.Dangerous(user.Update(rt.Ctx, "email", "nickname", "phone", "update_at"))
}
} else {
now := time.Now().Unix()
user = &models.User{
Username: ret.Username,
Password: "******",
Nickname: ret.Nickname,
Portrait: "",
Roles: strings.Join(rt.Sso.CAS.DefaultRoles, " "),
RolesLst: rt.Sso.CAS.DefaultRoles,
Contacts: []byte("{}"),
Phone: ret.Phone,
Email: ret.Email,
CreateAt: now,
UpdateAt: now,
CreateBy: "CAS",
UpdateBy: "CAS",
}
// create user from cas
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
if err != nil {
logger.Errorf("createTokens: %s", err)
}
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
func (rt *Router) loginRedirectOAuth(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if !rt.Sso.OAuth2.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.OAuth2.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginCallbackOAuth(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.OAuth2.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logger.Debugf("sso.callback() get ret %+v error %v", ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.OAuth2.CoverAttributes {
if ret.Nickname != "" {
user.Nickname = ret.Nickname
}
if ret.Email != "" {
user.Email = ret.Email
}
if ret.Phone != "" {
user.Phone = ret.Phone
}
user.UpdateAt = time.Now().Unix()
user.Update(rt.Ctx, "email", "nickname", "phone", "update_at")
}
} else {
now := time.Now().Unix()
user = &models.User{
Username: ret.Username,
Password: "******",
Nickname: ret.Nickname,
Phone: ret.Phone,
Email: ret.Email,
Portrait: "",
Roles: strings.Join(rt.Sso.OAuth2.DefaultRoles, " "),
RolesLst: rt.Sso.OAuth2.DefaultRoles,
Contacts: []byte("{}"),
CreateAt: now,
UpdateAt: now,
CreateBy: "oauth2",
UpdateBy: "oauth2",
}
// create user from oidc
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
type SsoConfigOutput struct {
OidcDisplayName string `json:"oidcDisplayName"`
CasDisplayName string `json:"casDisplayName"`
OauthDisplayName string `json:"oauthDisplayName"`
}
func (rt *Router) ssoConfigNameGet(c *gin.Context) {
ginx.NewRender(c).Data(SsoConfigOutput{
OidcDisplayName: rt.Sso.OIDC.GetDisplayName(),
CasDisplayName: rt.Sso.CAS.GetDisplayName(),
OauthDisplayName: rt.Sso.OAuth2.GetDisplayName(),
}, nil)
}
func (rt *Router) ssoConfigGets(c *gin.Context) {
ginx.NewRender(c).Data(models.SsoConfigGets(rt.Ctx))
}
func (rt *Router) ssoConfigUpdate(c *gin.Context) {
var f models.SsoConfig
ginx.BindJSON(c, &f)
err := f.Update(rt.Ctx)
ginx.Dangerous(err)
switch f.Name {
case "LDAP":
var config ldapx.Config
err := toml.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
rt.Sso.LDAP.Reload(config)
case "OIDC":
var config oidcx.Config
err := toml.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
err = rt.Sso.OIDC.Reload(config)
ginx.Dangerous(err)
case "CAS":
var config cas.Config
err := toml.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
rt.Sso.CAS.Reload(config)
case "OAuth2":
var config oauth2x.Config
err := toml.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
rt.Sso.OAuth2.Reload(config)
}
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,105 +0,0 @@
package router
import (
"net/http"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
// Return all, front-end search and paging
func (rt *Router) alertMuteGetsByBG(c *gin.Context) {
bgid := ginx.UrlParamInt64(c, "id")
lst, err := models.AlertMuteGetsByBG(rt.Ctx, bgid)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) alertMuteGets(c *gin.Context) {
prods := strings.Fields(ginx.QueryStr(c, "prods", ""))
bgid := ginx.QueryInt64(c, "bgid", 0)
query := ginx.QueryStr(c, "query", "")
lst, err := models.AlertMuteGets(rt.Ctx, prods, bgid, query)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) alertMuteAdd(c *gin.Context) {
var f models.AlertMute
ginx.BindJSON(c, &f)
username := c.MustGet("username").(string)
f.CreateBy = username
f.GroupId = ginx.UrlParamInt64(c, "id")
ginx.NewRender(c).Message(f.Add(rt.Ctx))
}
func (rt *Router) alertMuteAddByService(c *gin.Context) {
var f models.AlertMute
ginx.BindJSON(c, &f)
ginx.NewRender(c).Message(f.Add(rt.Ctx))
}
func (rt *Router) alertMuteDel(c *gin.Context) {
var f idsForm
ginx.BindJSON(c, &f)
f.Verify()
ginx.NewRender(c).Message(models.AlertMuteDel(rt.Ctx, f.Ids))
}
func (rt *Router) alertMutePutByFE(c *gin.Context) {
var f models.AlertMute
ginx.BindJSON(c, &f)
amid := ginx.UrlParamInt64(c, "amid")
am, err := models.AlertMuteGetById(rt.Ctx, amid)
ginx.Dangerous(err)
if am == nil {
ginx.NewRender(c, http.StatusNotFound).Message("No such AlertMute")
return
}
rt.bgrwCheck(c, am.GroupId)
f.UpdateBy = c.MustGet("username").(string)
ginx.NewRender(c).Message(am.Update(rt.Ctx, f))
}
type alertMuteFieldForm struct {
Ids []int64 `json:"ids"`
Fields map[string]interface{} `json:"fields"`
}
func (rt *Router) alertMutePutFields(c *gin.Context) {
var f alertMuteFieldForm
ginx.BindJSON(c, &f)
if len(f.Fields) == 0 {
ginx.Bomb(http.StatusBadRequest, "fields empty")
}
f.Fields["update_by"] = c.MustGet("username").(string)
f.Fields["update_at"] = time.Now().Unix()
for i := 0; i < len(f.Ids); i++ {
am, err := models.AlertMuteGetById(rt.Ctx, f.Ids[i])
ginx.Dangerous(err)
if am == nil {
continue
}
am.FE2DB()
ginx.Dangerous(am.UpdateFieldsMap(rt.Ctx, f.Fields))
}
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,206 +0,0 @@
package router
import (
"encoding/json"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/models"
"github.com/pelletier/go-toml/v2"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) webhookGets(c *gin.Context) {
var webhooks []models.Webhook
cval, err := models.ConfigsGet(rt.Ctx, models.WEBHOOKKEY)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(webhooks, nil)
return
}
err = json.Unmarshal([]byte(cval), &webhooks)
ginx.NewRender(c).Data(webhooks, err)
}
func (rt *Router) webhookPuts(c *gin.Context) {
var webhooks []models.Webhook
ginx.BindJSON(c, &webhooks)
for i := 0; i < len(webhooks); i++ {
for k, v := range webhooks[i].HeaderMap {
webhooks[i].Headers = append(webhooks[i].Headers, k)
webhooks[i].Headers = append(webhooks[i].Headers, v)
}
}
data, err := json.Marshal(webhooks)
ginx.Dangerous(err)
ginx.NewRender(c).Message(models.ConfigsSet(rt.Ctx, models.WEBHOOKKEY, string(data)))
}
func (rt *Router) notifyScriptGet(c *gin.Context) {
var notifyScript models.NotifyScript
cval, err := models.ConfigsGet(rt.Ctx, models.NOTIFYSCRIPT)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(notifyScript, nil)
return
}
err = json.Unmarshal([]byte(cval), &notifyScript)
ginx.NewRender(c).Data(notifyScript, err)
}
func (rt *Router) notifyScriptPut(c *gin.Context) {
var notifyScript models.NotifyScript
ginx.BindJSON(c, &notifyScript)
data, err := json.Marshal(notifyScript)
ginx.Dangerous(err)
ginx.NewRender(c).Message(models.ConfigsSet(rt.Ctx, models.NOTIFYSCRIPT, string(data)))
}
func (rt *Router) notifyChannelGets(c *gin.Context) {
var notifyChannels []models.NotifyChannel
cval, err := models.ConfigsGet(rt.Ctx, models.NOTIFYCHANNEL)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(notifyChannels, nil)
return
}
err = json.Unmarshal([]byte(cval), &notifyChannels)
ginx.NewRender(c).Data(notifyChannels, err)
}
func (rt *Router) notifyChannelPuts(c *gin.Context) {
var notifyChannels []models.NotifyChannel
ginx.BindJSON(c, &notifyChannels)
channels := []string{models.Dingtalk, models.Wecom, models.Feishu, models.Mm, models.Telegram, models.Email}
m := make(map[string]struct{})
for _, v := range notifyChannels {
m[v.Ident] = struct{}{}
}
for _, v := range channels {
if _, ok := m[v]; !ok {
ginx.Bomb(200, "channel %s ident can not modify", v)
}
}
data, err := json.Marshal(notifyChannels)
ginx.Dangerous(err)
ginx.NewRender(c).Message(models.ConfigsSet(rt.Ctx, models.NOTIFYCHANNEL, string(data)))
}
func (rt *Router) notifyContactGets(c *gin.Context) {
var notifyContacts []models.NotifyContact
cval, err := models.ConfigsGet(rt.Ctx, models.NOTIFYCONTACT)
ginx.Dangerous(err)
if cval == "" {
ginx.NewRender(c).Data(notifyContacts, nil)
return
}
err = json.Unmarshal([]byte(cval), &notifyContacts)
ginx.NewRender(c).Data(notifyContacts, err)
}
func (rt *Router) notifyContactPuts(c *gin.Context) {
var notifyContacts []models.NotifyContact
ginx.BindJSON(c, &notifyContacts)
keys := []string{models.DingtalkKey, models.WecomKey, models.FeishuKey, models.MmKey, models.TelegramKey}
m := make(map[string]struct{})
for _, v := range notifyContacts {
m[v.Ident] = struct{}{}
}
for _, v := range keys {
if _, ok := m[v]; !ok {
ginx.Bomb(200, "contact %s ident can not modify", v)
}
}
data, err := json.Marshal(notifyContacts)
ginx.Dangerous(err)
ginx.NewRender(c).Message(models.ConfigsSet(rt.Ctx, models.NOTIFYCONTACT, string(data)))
}
const DefaultSMTP = `
Host = ""
Port = 994
User = "username"
Pass = "password"
From = "username@163.com"
InsecureSkipVerify = true
Batch = 5
`
const DefaultIbex = `
Address = "http://127.0.0.1:10090"
BasicAuthUser = "ibex"
BasicAuthPass = "ibex"
Timeout = 3000
`
func (rt *Router) notifyConfigGet(c *gin.Context) {
key := ginx.QueryStr(c, "ckey")
cval, err := models.ConfigsGet(rt.Ctx, key)
if cval == "" {
switch key {
case models.IBEX:
cval = DefaultIbex
case models.SMTP:
cval = DefaultSMTP
}
}
ginx.NewRender(c).Data(cval, err)
}
func (rt *Router) notifyConfigPut(c *gin.Context) {
var f models.Configs
ginx.BindJSON(c, &f)
switch f.Ckey {
case models.SMTP:
var smtp aconf.SMTPConfig
err := toml.Unmarshal([]byte(f.Cval), &smtp)
ginx.Dangerous(err)
case models.IBEX:
var ibex aconf.Ibex
err := toml.Unmarshal([]byte(f.Cval), &ibex)
ginx.Dangerous(err)
default:
ginx.Bomb(200, "key %s can not modify", f.Ckey)
}
err := models.ConfigsSet(rt.Ctx, f.Ckey, f.Cval)
if err != nil {
ginx.Bomb(200, err.Error())
}
if f.Ckey == models.SMTP {
// 重置邮件发送器
var smtp aconf.SMTPConfig
err := toml.Unmarshal([]byte(f.Cval), &smtp)
ginx.Dangerous(err)
if smtp.Host == "" || smtp.Port == 0 {
ginx.Bomb(200, "smtp host or port can not be empty")
}
go sender.RestartEmailSender(smtp)
}
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,58 +0,0 @@
package router
import (
"bytes"
"encoding/json"
"html/template"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) notifyTplGets(c *gin.Context) {
lst, err := models.NotifyTplGets(rt.Ctx)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) notifyTplUpdateContent(c *gin.Context) {
var f models.NotifyTpl
ginx.BindJSON(c, &f)
ginx.NewRender(c).Message(f.UpdateContent(rt.Ctx))
}
func (rt *Router) notifyTplUpdate(c *gin.Context) {
var f models.NotifyTpl
ginx.BindJSON(c, &f)
ginx.NewRender(c).Message(f.Update(rt.Ctx))
}
func (rt *Router) notifyTplPreview(c *gin.Context) {
var event models.AlertCurEvent
err := json.Unmarshal([]byte(cconf.EVENT_EXAMPLE), &event)
if err != nil {
ginx.NewRender(c).Message(err.Error())
return
}
var f models.NotifyTpl
ginx.BindJSON(c, &f)
tpl, err := template.New(f.Channel).Funcs(tplx.TemplateFuncMap).Parse(f.Content)
ginx.Dangerous(err)
var body bytes.Buffer
var ret string
if err := tpl.Execute(&body, event); err != nil {
ret = err.Error()
} else {
ret = body.String()
}
ginx.NewRender(c).Data(ret, nil)
}

View File

@@ -1,157 +0,0 @@
package router
import (
"context"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
pkgprom "github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/model"
"github.com/toolkits/pkg/ginx"
)
type queryFormItem struct {
Start int64 `json:"start" binding:"required"`
End int64 `json:"end" binding:"required"`
Step int64 `json:"step" binding:"required"`
Query string `json:"query" binding:"required"`
}
type batchQueryForm struct {
DatasourceId int64 `json:"datasource_id" binding:"required"`
Queries []queryFormItem `json:"queries" binding:"required"`
}
func (rt *Router) promBatchQueryRange(c *gin.Context) {
var f batchQueryForm
ginx.Dangerous(c.BindJSON(&f))
cli := rt.PromClients.GetCli(f.DatasourceId)
var lst []model.Value
for _, item := range f.Queries {
r := pkgprom.Range{
Start: time.Unix(item.Start, 0),
End: time.Unix(item.End, 0),
Step: time.Duration(item.Step) * time.Second,
}
resp, _, err := cli.QueryRange(context.Background(), item.Query, r)
ginx.Dangerous(err)
lst = append(lst, resp)
}
ginx.NewRender(c).Data(lst, nil)
}
type batchInstantForm struct {
DatasourceId int64 `json:"datasource_id" binding:"required"`
Queries []InstantFormItem `json:"queries" binding:"required"`
}
type InstantFormItem struct {
Time int64 `json:"time" binding:"required"`
Query string `json:"query" binding:"required"`
}
func (rt *Router) promBatchQueryInstant(c *gin.Context) {
var f batchInstantForm
ginx.Dangerous(c.BindJSON(&f))
cli := rt.PromClients.GetCli(f.DatasourceId)
var lst []model.Value
for _, item := range f.Queries {
resp, _, err := cli.Query(context.Background(), item.Query, time.Unix(item.Time, 0))
ginx.Dangerous(err)
lst = append(lst, resp)
}
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) dsProxy(c *gin.Context) {
dsId := ginx.UrlParamInt64(c, "id")
ds := rt.DatasourceCache.GetById(dsId)
if ds == nil {
c.String(http.StatusBadRequest, "no such datasource")
return
}
target, err := url.Parse(ds.HTTPJson.Url)
if err != nil {
c.String(http.StatusInternalServerError, "invalid url: %s", ds.HTTPJson.Url)
return
}
director := func(req *http.Request) {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.Host = target.Host
req.Header.Set("Host", target.Host)
// fe request e.g. /api/n9e/proxy/:id/*
arr := strings.Split(req.URL.Path, "/")
if len(arr) < 6 {
c.String(http.StatusBadRequest, "invalid url path")
return
}
req.URL.Path = strings.TrimRight(target.Path, "/") + "/" + strings.Join(arr[5:], "/")
if target.RawQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = target.RawQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = target.RawQuery + "&" + req.URL.RawQuery
}
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", "")
}
if ds.AuthJson.BasicAuthUser != "" {
req.SetBasicAuth(ds.AuthJson.BasicAuthUser, ds.AuthJson.BasicAuthPassword)
}
headerCount := len(ds.HTTPJson.Headers)
if headerCount > 0 {
for key, value := range ds.HTTPJson.Headers {
req.Header.Set(key, value)
if key == "Host" {
req.Host = value
}
}
}
}
errFunc := func(w http.ResponseWriter, r *http.Request, err error) {
http.Error(w, err.Error(), http.StatusBadGateway)
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: time.Duration(ds.HTTPJson.DialTimeout) * time.Millisecond,
}).DialContext,
ResponseHeaderTimeout: time.Duration(ds.HTTPJson.Timeout) * time.Millisecond,
MaxIdleConnsPerHost: ds.HTTPJson.MaxIdleConnsPerHost,
}
proxy := &httputil.ReverseProxy{
Director: director,
Transport: transport,
ErrorHandler: errFunc,
}
proxy.ServeHTTP(c.Writer, c.Request)
}

View File

@@ -1,85 +0,0 @@
package router
import (
"net/http"
"strings"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) rolesGets(c *gin.Context) {
lst, err := models.RoleGetsAll(rt.Ctx)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) permsGets(c *gin.Context) {
user := c.MustGet("user").(*models.User)
lst, err := models.OperationsOfRole(rt.Ctx, strings.Fields(user.Roles))
ginx.NewRender(c).Data(lst, err)
}
// 创建角色
func (rt *Router) roleAdd(c *gin.Context) {
var f models.Role
ginx.BindJSON(c, &f)
err := f.Add(rt.Ctx)
ginx.NewRender(c).Message(err)
}
// 更新角色
func (rt *Router) rolePut(c *gin.Context) {
var f models.Role
ginx.BindJSON(c, &f)
oldRule, err := models.RoleGet(rt.Ctx, "id=?", f.Id)
ginx.Dangerous(err)
if oldRule == nil {
ginx.Bomb(http.StatusOK, "role not found")
}
if oldRule.Name == "Admin" {
ginx.Bomb(http.StatusOK, "admin role can not be modified")
}
if oldRule.Name != f.Name {
// name changed, check duplication
num, err := models.RoleCount(rt.Ctx, "name=? and id<>?", f.Name, oldRule.Id)
ginx.Dangerous(err)
if num > 0 {
ginx.Bomb(http.StatusOK, "role name already exists")
}
}
oldRule.Name = f.Name
oldRule.Note = f.Note
ginx.NewRender(c).Message(oldRule.Update(rt.Ctx, "name", "note"))
}
func (rt *Router) roleDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
target, err := models.RoleGet(rt.Ctx, "id=?", id)
ginx.Dangerous(err)
if target.Name == "Admin" {
ginx.Bomb(http.StatusOK, "admin role can not be modified")
}
if target == nil {
ginx.NewRender(c).Message(nil)
return
}
ginx.NewRender(c).Message(target.Del(rt.Ctx))
}
// 角色列表
func (rt *Router) roleGets(c *gin.Context) {
lst, err := models.RoleGetsAll(rt.Ctx)
ginx.NewRender(c).Data(lst, err)
}

View File

@@ -1,43 +0,0 @@
package router
import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) operationOfRole(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
role, err := models.RoleGet(rt.Ctx, "id=?", id)
ginx.Dangerous(err)
if role == nil {
ginx.Bomb(http.StatusOK, "role not found")
}
ops, err := models.OperationsOfRole(rt.Ctx, []string{role.Name})
ginx.NewRender(c).Data(ops, err)
}
func (rt *Router) roleBindOperation(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
role, err := models.RoleGet(rt.Ctx, "id=?", id)
ginx.Dangerous(err)
if role == nil {
ginx.Bomb(http.StatusOK, "role not found")
}
if role.Name == "Admin" {
ginx.Bomb(http.StatusOK, "admin role can not be modified")
}
var ops []string
ginx.BindJSON(c, &ops)
ginx.NewRender(c).Message(models.RoleOperationBind(rt.Ctx, role.Name, ops))
}
func (rt *Router) operations(c *gin.Context) {
ginx.NewRender(c).Data(rt.Operations.Ops, nil)
}

View File

@@ -1,18 +0,0 @@
package router
import (
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) serversGet(c *gin.Context) {
list, err := models.AlertingEngineGets(rt.Ctx, "")
ginx.NewRender(c).Data(list, err)
}
func (rt *Router) serverClustersGet(c *gin.Context) {
list, err := models.AlertingEngineGetsClusters(rt.Ctx, "")
ginx.NewRender(c).Data(list, err)
}

View File

@@ -1,169 +0,0 @@
package sso
import (
"log"
"github.com/BurntSushi/toml"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
"github.com/toolkits/pkg/logger"
)
type SsoClient struct {
OIDC *oidcx.SsoClient
LDAP *ldapx.SsoClient
CAS *cas.SsoClient
OAuth2 *oauth2x.SsoClient
}
const LDAP = `
Enable = false
Host = 'ldap.example.org'
Port = 389
BaseDn = 'dc=example,dc=org'
BindUser = 'cn=manager,dc=example,dc=org'
BindPass = '*******'
AuthFilter = '(&(uid=%s))'
CoverAttributes = true
TLS = false
StartTLS = true
DefaultRoles = ['Standard']
[Attributes]
Nickname = 'cn'
Phone = 'mobile'
Email = 'mail'
`
const OAuth2 = `
Enable = false
DisplayName = 'OAuth2登录'
RedirectURL = 'http://127.0.0.1:18000/callback/oauth'
SsoAddr = 'https://sso.example.com/oauth2/authorize'
TokenAddr = 'https://sso.example.com/oauth2/token'
UserInfoAddr = 'https://api.example.com/api/v1/user/info'
TranTokenMethod = 'header'
ClientId = ''
ClientSecret = ''
CoverAttributes = true
DefaultRoles = ['Standard']
UserinfoIsArray = false
UserinfoPrefix = 'data'
Scopes = ['profile', 'email', 'phone']
[Attributes]
Username = 'username'
Nickname = 'nickname'
Phone = 'phone_number'
Email = 'email'
`
const CAS = `
Enable = false
SsoAddr = 'https://cas.example.com/cas/'
RedirectURL = 'http://127.0.0.1:18000/callback/cas'
DisplayName = 'CAS登录'
CoverAttributes = false
DefaultRoles = ['Standard']
[Attributes]
Nickname = 'nickname'
Phone = 'phone_number'
Email = 'email'
`
const OIDC = `
Enable = false
DisplayName = 'OIDC登录'
RedirectURL = 'http://n9e.com/callback'
SsoAddr = 'http://sso.example.org'
ClientId = ''
ClientSecret = ''
CoverAttributes = true
DefaultRoles = ['Standard']
[Attributes]
Nickname = 'nickname'
Phone = 'phone_number'
Email = 'email'
`
func Init(center cconf.Center, ctx *ctx.Context) *SsoClient {
ssoClient := new(SsoClient)
m := make(map[string]string)
m["LDAP"] = LDAP
m["CAS"] = CAS
m["OIDC"] = OIDC
m["OAuth2"] = OAuth2
for name, config := range m {
count, err := models.SsoConfigCountByName(ctx, name)
if err != nil {
logger.Error(err)
continue
}
if count > 0 {
continue
}
ssoConfig := models.SsoConfig{
Name: name,
Content: config,
}
err = ssoConfig.Create(ctx)
if err != nil {
log.Fatalln(err)
}
}
configs, err := models.SsoConfigGets(ctx)
if err != nil {
log.Fatalln(err)
}
for _, cfg := range configs {
switch cfg.Name {
case "LDAP":
var config ldapx.Config
err := toml.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalln("init ldap failed", err)
}
ssoClient.LDAP = ldapx.New(config)
case "OIDC":
var config oidcx.Config
err := toml.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalln("init oidc failed:", err)
}
oidcClient, err := oidcx.New(config)
if err != nil {
logger.Error("init oidc failed:", err)
} else {
ssoClient.OIDC = oidcClient
}
case "CAS":
var config cas.Config
err := toml.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalln("init cas failed:", err)
}
ssoClient.CAS = cas.New(config)
case "OAuth2":
var config oauth2x.Config
err := toml.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalln("init oauth2 failed:", err)
}
ssoClient.OAuth2 = oauth2x.New(config)
}
}
return ssoClient
}

View File

@@ -1,9 +0,0 @@
package cli
import (
"github.com/ccfos/nightingale/v6/cli/upgrade"
)
func Upgrade(configFile string) error {
return upgrade.Upgrade(configFile)
}

View File

@@ -1,63 +0,0 @@
package upgrade
import (
"bytes"
"path"
"github.com/ccfos/nightingale/v6/pkg/cfg"
"github.com/ccfos/nightingale/v6/pkg/ormx"
"github.com/ccfos/nightingale/v6/pkg/tlsx"
"github.com/koding/multiconfig"
)
type Config struct {
DB ormx.DBConfig
Clusters []ClusterOptions
}
type ClusterOptions struct {
Name string
Prom string
BasicAuthUser string
BasicAuthPass string
Headers []string
Timeout int64
DialTimeout int64
UseTLS bool
tlsx.ClientConfig
MaxIdleConnsPerHost int
}
func Parse(fpath string, configPtr interface{}) error {
var (
tBuf []byte
)
loaders := []multiconfig.Loader{
&multiconfig.TagLoader{},
&multiconfig.EnvironmentLoader{},
}
s := cfg.NewFileScanner()
s.Read(path.Join(fpath))
tBuf = append(tBuf, s.Data()...)
tBuf = append(tBuf, []byte("\n")...)
if s.Err() != nil {
return s.Err()
}
if len(tBuf) != 0 {
loaders = append(loaders, &multiconfig.TOMLLoader{Reader: bytes.NewReader(tBuf)})
}
m := multiconfig.DefaultLoader{
Loader: multiconfig.MultiLoader(loaders...),
Validator: multiconfig.MultiValidator(&multiconfig.RequiredValidator{}),
}
return m.Load(configPtr)
}

View File

@@ -1,19 +0,0 @@
# v5 升级 v6 手册
1. 解压 n9e 安装包
2. 导入 upgrade.sql 到 n9e_v5 数据库
```
mysql -h 127.0.0.1 -u root -p1234 < cli/upgrade/upgrade.sql
```
3. 执行 n9e-cli 完成数据库表结构升级, webapi.conf 为 v5 版本 n9e-webapi 正在使用的配置文件
```
./n9e-cli --upgrade --config webapi.conf
```
4. 修改 n9e 配置文件中的数据库为 n9e_v5启动 n9e 进程
```
nohup ./n9e &> n9e.log &
```
5. n9e 监听的端口为 17000如果想使用之前的端口可以在配置文件中将端口改为 18000

View File

@@ -1,117 +0,0 @@
package upgrade
import (
"context"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/storage"
"github.com/toolkits/pkg/logger"
)
func Upgrade(configFile string) error {
var config Config
Parse(configFile, &config)
db, err := storage.New(config.DB)
if err != nil {
return err
}
ctx := ctx.NewContext(context.Background(), db)
for _, cluster := range config.Clusters {
count, err := models.GetDatasourcesCountBy(ctx, "", "", cluster.Name)
if err != nil {
logger.Errorf("get datasource %s count error: %v", cluster.Name, err)
continue
}
if count > 0 {
continue
}
header := make(map[string]string)
headerCount := len(cluster.Headers)
if headerCount > 0 && headerCount%2 == 0 {
for i := 0; i < len(cluster.Headers); i += 2 {
header[cluster.Headers[i]] = cluster.Headers[i+1]
}
}
authJosn := models.Auth{
BasicAuthUser: cluster.BasicAuthUser,
BasicAuthPassword: cluster.BasicAuthPass,
}
httpJson := models.HTTP{
Timeout: cluster.Timeout,
DialTimeout: cluster.DialTimeout,
TLS: models.TLS{
SkipTlsVerify: cluster.UseTLS,
},
MaxIdleConnsPerHost: cluster.MaxIdleConnsPerHost,
Url: cluster.Prom,
Headers: header,
}
datasrouce := models.Datasource{
PluginId: 1,
PluginType: "prometheus",
PluginTypeName: "Prometheus Like",
Name: cluster.Name,
HTTPJson: httpJson,
AuthJson: authJosn,
ClusterName: "default",
Status: "enabled",
}
err = datasrouce.Add(ctx)
if err != nil {
logger.Errorf("add datasource %s error: %v", cluster.Name, err)
}
}
datasources, err := models.GetDatasources(ctx)
if err != nil {
return err
}
m := make(map[string]models.Datasource)
for i := 0; i < len(datasources); i++ {
m[datasources[i].Name] = datasources[i]
}
err = models.AlertRuleUpgradeToV6(ctx, m)
if err != nil {
return err
}
// alert mute
err = models.AlertMuteUpgradeToV6(ctx, m)
if err != nil {
return err
}
// alert subscribe
err = models.AlertSubscribeUpgradeToV6(ctx, m)
if err != nil {
return err
}
// recoding rule
err = models.RecordingRuleUpgradeToV6(ctx, m)
if err != nil {
return err
}
// alert cur event
err = models.AlertCurEventUpgradeToV6(ctx, m)
if err != nil {
return err
}
// alert his event
err = models.AlertHisEventUpgradeToV6(ctx, m)
if err != nil {
return err
}
return nil
}

View File

@@ -1,88 +0,0 @@
use n9e_v5;
insert into `role_operation`(role_name, operation) values('Guest', '/log/explorer');
insert into `role_operation`(role_name, operation) values('Guest', '/trace/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/log/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/trace/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/alert-rules-built-in');
insert into `role_operation`(role_name, operation) values('Standard', '/dashboards-built-in');
insert into `role_operation`(role_name, operation) values('Standard', '/trace/dependencies');
alter table `board` add built_in tinyint(1) not null default 0 comment '0:false 1:true';
alter table `board` add hide tinyint(1) not null default 0 comment '0:false 1:true';
alter table `chart_share` add datasource_id bigint unsigned not null default 0;
alter table `alert_rule` add datasource_ids varchar(255) not null default '';
alter table `alert_rule` add rule_config text not null comment 'rule_config';
alter table `alert_rule` add annotations text not null comment 'annotations';
alter table `alert_mute` add datasource_ids varchar(255) not null default '';
alter table `alert_mute` add periodic_mutes varchar(4096) not null default '';
alter table `alert_mute` add mute_time_type tinyint(1) not null default 0;
alter table `alert_subscribe` add datasource_ids varchar(255) not null default '';
alter table `alert_subscribe` add prod varchar(255) not null default '';
alter table `alert_subscribe` add webhooks text;
alter table `alert_subscribe` add redefine_webhooks tinyint(1) default 0;
alter table `alert_subscribe` add for_duration bigint not null default 0;
alter table `recording_rule` add datasource_ids varchar(255) default '';
alter table `target` modify cluster varchar(128) not null default '';
alter table `alert_cur_event` add datasource_id bigint unsigned not null default 0;
alter table `alert_cur_event` add annotations text not null comment 'annotations';
alter table `alert_cur_event` add rule_config text not null comment 'rule_config';
alter table `alert_his_event` add datasource_id bigint unsigned not null default 0;
alter table `alert_his_event` add annotations text not null comment 'annotations';
alter table `alert_his_event` add rule_config text not null comment 'rule_config';
alter table `alerting_engines` add datasource_id bigint unsigned not null default 0;
CREATE TABLE `datasource`
(
`id` int unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(255) not null default '',
`description` varchar(255) not null default '',
`category` varchar(255) not null default '',
`plugin_id` int unsigned not null default 0,
`plugin_type` varchar(255) not null default '',
`plugin_type_name` varchar(255) not null default '',
`cluster_name` varchar(255) not null default '',
`settings` text not null,
`status` varchar(255) not null default '',
`http` varchar(4096) not null default '',
`auth` varchar(8192) not null default '',
`created_at` bigint not null default 0,
`created_by` varchar(64) not null default '',
`updated_at` bigint not null default 0,
`updated_by` varchar(64) not null default '',
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `builtin_cate` (
`id` bigint unsigned not null auto_increment,
`name` varchar(191) not null,
`user_id` bigint not null default 0,
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `notify_tpl` (
`id` bigint unsigned not null auto_increment,
`channel` varchar(32) not null,
`name` varchar(255) not null,
`content` text not null,
PRIMARY KEY (`id`),
UNIQUE KEY (`channel`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `sso_config` (
`id` bigint unsigned not null auto_increment,
`name` varchar(255) not null,
`content` text not null,
PRIMARY KEY (`id`),
UNIQUE KEY (`name`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;

View File

@@ -1,69 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/ccfos/nightingale/v6/alert"
"github.com/ccfos/nightingale/v6/pkg/osx"
"github.com/ccfos/nightingale/v6/pkg/version"
"github.com/toolkits/pkg/runner"
)
var (
showVersion = flag.Bool("version", false, "Show version.")
configDir = flag.String("configs", osx.GetEnv("N9E_CONFIGS", "etc"), "Specify configuration directory.(env:N9E_CONFIGS)")
cryptoKey = flag.String("crypto-key", "", "Specify the secret key for configuration file field encryption.")
)
func main() {
flag.Parse()
if *showVersion {
fmt.Println(version.Version)
os.Exit(0)
}
printEnv()
cleanFunc, err := alert.Initialize(*configDir, *cryptoKey)
if err != nil {
log.Fatalln("failed to initialize:", err)
}
code := 1
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
EXIT:
for {
sig := <-sc
fmt.Println("received signal:", sig.String())
switch sig {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
code = 0
break EXIT
case syscall.SIGHUP:
// reload configuration?
default:
break EXIT
}
}
cleanFunc()
fmt.Println("process exited")
os.Exit(code)
}
func printEnv() {
runner.Init()
fmt.Println("runner.cwd:", runner.Cwd)
fmt.Println("runner.hostname:", runner.Hostname)
fmt.Println("runner.fd_limits:", runner.FdLimits())
fmt.Println("runner.vm_limits:", runner.VMLimits())
}

View File

@@ -1,69 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/ccfos/nightingale/v6/center"
"github.com/ccfos/nightingale/v6/pkg/osx"
"github.com/ccfos/nightingale/v6/pkg/version"
"github.com/toolkits/pkg/runner"
)
var (
showVersion = flag.Bool("version", false, "Show version.")
configDir = flag.String("configs", osx.GetEnv("N9E_CONFIGS", "etc"), "Specify configuration directory.(env:N9E_CONFIGS)")
cryptoKey = flag.String("crypto-key", "", "Specify the secret key for configuration file field encryption.")
)
func main() {
flag.Parse()
if *showVersion {
fmt.Println(version.Version)
os.Exit(0)
}
printEnv()
cleanFunc, err := center.Initialize(*configDir, *cryptoKey)
if err != nil {
log.Fatalln("failed to initialize:", err)
}
code := 1
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
EXIT:
for {
sig := <-sc
fmt.Println("received signal:", sig.String())
switch sig {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
code = 0
break EXIT
case syscall.SIGHUP:
// reload configuration?
default:
break EXIT
}
}
cleanFunc()
fmt.Println("process exited")
os.Exit(code)
}
func printEnv() {
runner.Init()
fmt.Println("runner.cwd:", runner.Cwd)
fmt.Println("runner.hostname:", runner.Hostname)
fmt.Println("runner.fd_limits:", runner.FdLimits())
fmt.Println("runner.vm_limits:", runner.VMLimits())
}

View File

@@ -1,40 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/ccfos/nightingale/v6/cli"
"github.com/ccfos/nightingale/v6/pkg/version"
)
var (
upgrade = flag.Bool("upgrade", false, "Upgrade the database.")
showVersion = flag.Bool("version", false, "Show version.")
configFile = flag.String("config", "", "Specify webapi.conf of v5.x version")
)
func main() {
flag.Parse()
if *showVersion {
fmt.Println(version.Version)
os.Exit(0)
}
if *upgrade {
if *configFile == "" {
fmt.Println("Please specify the configuration directory.")
os.Exit(1)
}
err := cli.Upgrade(*configFile)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Print("Upgrade successfully.")
os.Exit(0)
}
}

View File

@@ -1,69 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/ccfos/nightingale/v6/pkg/osx"
"github.com/ccfos/nightingale/v6/pkg/version"
"github.com/ccfos/nightingale/v6/pushgw"
"github.com/toolkits/pkg/runner"
)
var (
showVersion = flag.Bool("version", false, "Show version.")
configDir = flag.String("configs", osx.GetEnv("N9E_CONFIGS", "etc"), "Specify configuration directory.(env:N9E_CONFIGS)")
cryptoKey = flag.String("crypto-key", "", "Specify the secret key for configuration file field encryption.")
)
func main() {
flag.Parse()
if *showVersion {
fmt.Println(version.Version)
os.Exit(0)
}
printEnv()
cleanFunc, err := pushgw.Initialize(*configDir, *cryptoKey)
if err != nil {
log.Fatalln("failed to initialize:", err)
}
code := 1
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
EXIT:
for {
sig := <-sc
fmt.Println("received signal:", sig.String())
switch sig {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
code = 0
break EXIT
case syscall.SIGHUP:
// reload configuration?
default:
break EXIT
}
}
cleanFunc()
fmt.Println("process exited")
os.Exit(code)
}
func printEnv() {
runner.Init()
fmt.Println("runner.cwd:", runner.Cwd)
fmt.Println("runner.hostname:", runner.Hostname)
fmt.Println("runner.fd_limits:", runner.FdLimits())
fmt.Println("runner.vm_limits:", runner.VMLimits())
}

View File

@@ -1,76 +0,0 @@
package conf
import (
"fmt"
"os"
"strings"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/pkg/cfg"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/pkg/ormx"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/storage"
)
type ConfigType struct {
Global GlobalConfig
Log logx.Config
HTTP httpx.Config
DB ormx.DBConfig
Redis storage.RedisConfig
Pushgw pconf.Pushgw
Alert aconf.Alert
Center cconf.Center
}
type GlobalConfig struct {
RunMode string
}
func InitConfig(configDir, cryptoKey string) (*ConfigType, error) {
var config = new(ConfigType)
if err := cfg.LoadConfigByDir(configDir, config); err != nil {
return nil, fmt.Errorf("failed to load configs of directory: %s error: %s", configDir, err)
}
config.Pushgw.PreCheck()
config.Alert.PreCheck()
config.Center.PreCheck()
err := decryptConfig(config, cryptoKey)
if err != nil {
return nil, err
}
if config.Alert.Heartbeat.IP == "" {
// auto detect
// config.Alert.Heartbeat.IP = fmt.Sprint(GetOutboundIP())
// 自动获取IP在有些环境下容易出错这里用hostname+pid来作唯一标识
hostname, err := os.Hostname()
if err != nil {
fmt.Println("failed to get hostname:", err)
os.Exit(1)
}
if strings.Contains(hostname, "localhost") {
fmt.Println("Warning! hostname contains substring localhost, setting a more unique hostname is recommended")
}
config.Alert.Heartbeat.IP = hostname
// if config.Alert.Heartbeat.IP == "" {
// fmt.Println("heartbeat ip auto got is blank")
// os.Exit(1)
// }
}
config.Alert.Heartbeat.Endpoint = fmt.Sprintf("%s:%d", config.Alert.Heartbeat.IP, config.HTTP.Port)
return config, nil
}

View File

@@ -1,62 +0,0 @@
package conf
import (
"fmt"
"github.com/ccfos/nightingale/v6/pkg/secu"
)
func decryptConfig(config *ConfigType, cryptoKey string) error {
decryptDsn, err := secu.DealWithDecrypt(config.DB.DSN, cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt the db dsn: %s", err)
}
config.DB.DSN = decryptDsn
for k := range config.HTTP.Alert.BasicAuth {
decryptPwd, err := secu.DealWithDecrypt(config.HTTP.Alert.BasicAuth[k], cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt http basic auth password: %s", err)
}
config.HTTP.Alert.BasicAuth[k] = decryptPwd
}
for k := range config.HTTP.Pushgw.BasicAuth {
decryptPwd, err := secu.DealWithDecrypt(config.HTTP.Pushgw.BasicAuth[k], cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt http basic auth password: %s", err)
}
config.HTTP.Pushgw.BasicAuth[k] = decryptPwd
}
for k := range config.HTTP.Heartbeat.BasicAuth {
decryptPwd, err := secu.DealWithDecrypt(config.HTTP.Heartbeat.BasicAuth[k], cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt http basic auth password: %s", err)
}
config.HTTP.Heartbeat.BasicAuth[k] = decryptPwd
}
for k := range config.HTTP.Service.BasicAuth {
decryptPwd, err := secu.DealWithDecrypt(config.HTTP.Service.BasicAuth[k], cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt http basic auth password: %s", err)
}
config.HTTP.Service.BasicAuth[k] = decryptPwd
}
for i, v := range config.Pushgw.Writers {
decryptWriterPwd, err := secu.DealWithDecrypt(v.BasicAuthPass, cryptoKey)
if err != nil {
return fmt.Errorf("failed to decrypt writer basic auth password: %s", err)
}
config.Pushgw.Writers[i].BasicAuthPass = decryptWriterPwd
}
return nil
}

View File

@@ -3,5 +3,3 @@
- [xiaoziv](https://github.com/xiaoziv)
- [tanxiao1990](https://github.com/tanxiao1990)
- [bbaobelief](https://github.com/bbaobelief)
- [freedomkk-qfeng](https://github.com/freedomkk-qfeng)
- [lsy1990](https://github.com/lsy1990)

View File

@@ -1,36 +1,29 @@
[夜莺监控](https://github.com/ccfos/nightingale "夜莺监控")是一款开源云原生监控系统由滴滴设计开发2020 年 3 月份开源之后,凭借其优秀的产品设计、灵活性架构和明确清晰的定位,夜莺监控快速发展为国内最活跃的企业级云原生监控方案。[截止当前](具体指2022年8月 "截止当前"),在 [Github](https://github.com/ccfos/nightingale "Github") 上已经迭代发布了 **70** 多个版本,获得了 **5K** 多个 Star**80** 多位代码贡献者。快速的迭代,也让夜莺监控的用户群越来越大,涉及各行各业。
# 夜莺开源项目和社区治理架构(草案)
更进一步,夜莺监控于 2022 年 5 月 11 日,正式捐赠予中国计算机学会开源发展委员会 [CCF ODC](https://www.ccf.org.cn/kyfzwyh/ "CCF ODC"),为 CCF ODC 成立后接受捐赠的第一个开源项目。
## 社区架构
开源项目要更有生命力,离不开开放的治理架构和源源不断的开发者共同参与。夜莺监控项目加入 CCF 开源大家庭后,能在计算机学会的支持和带动下,进一步结合云原生、可观测、国产化等多个技术发展的需求,建立开放、中立的开源治理架构,打造更专业、有活力的开发者社区。
### 用户(User)
**今天,我们郑重发布夜莺监控开源社区治理架构,并公示相关的任命和社区荣誉,期待开源的道路上,一起同行。**
> 欢迎任何个人、公司以及组织,使用夜莺监控,并积极的反馈 bug、提交功能需求、以及相互帮助我们推荐使用 [github issue](https://github.com/ccfos/nightingale/issues) 来跟踪 bug 和管理需求。
# 夜莺监控开源社区架构
社区用户,可以通过在 **[Who is Using Nightingale](https://github.com/ccfos/nightingale/issues/897)** 登记您的使用情况,并分享您使用夜莺监控的经验,将会自动进入 **[END USERS](./end-users.md)** 列表,并获得社区的 **VIP Support**
### User|用户
### 贡献者(Contributer)
> 欢迎任何个人、公司以及组织,使用夜莺监控,并积极的反馈 bug、提交功能需求、以及相互帮助我们推荐使用 [Github Issue](https://github.com/ccfos/nightingale/issues "Github Issue") 来跟踪 bug 和管理需求。
> 欢迎每一位用户,包括但不限于以下列方式参与到夜莺开源社区并做出贡献:
社区用户,可以通过在 **[Who is Using Nightingale](https://github.com/ccfos/nightingale/issues/897 "Who is Using Nightingale")** 登记您的使用情况,并分享您使用夜莺监控的经验,将会自动进入 **[END USERS](https://github.com/ccfos/nightingale/blob/main/doc/end-users.md "END USERS")** 文件列表,并获得社区的 **VIP Support**
### Contributor|贡献者
> 欢迎每一位用户,包括但不限于以下方式参与到夜莺开源社区并做出贡献:
1. 在 [Github Issue](https://github.com/ccfos/nightingale/issues "Github Issue") 中积极参与讨论,参与社区活动;
1. 在 [github issue](https://github.com/ccfos/nightingale/issues) 中积极参与讨论,参与社区活动;
1. 提交代码补丁;
1. 翻译、修订、补充和完善[文档](https://n9e.github.io "文档")
1. 翻译、修订、补充和完善[文档](https://n9e.github.io)
1. 分享夜莺监控的使用经验,积极布道;
1. 提交建议 / 批评;
年度累计向 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale "CCFOS/NIGHTINGALE") 提交 **5** 个PR被合并或者因为其他贡献被**项目管委会**一致认可,将会自动进入到 **[ACTIVE CONTRIBUTORS](https://github.com/ccfos/nightingale/blob/main/doc/active-contributors.md "ACTIVE CONTRIBUTORS")** 列表,并获得夜莺开源社区颁发的证书,享有夜莺开源社区一定的权益和福利。
年度累计向 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale) 提交 **5** 个PR被合并或者因为其他贡献被**项目管委会**一致认可,将会自动进入到 **[ACTIVE CONTRIBUTORS](./active-contributors.md)** 列表,并获得 **[CCF ODC](https://www.ccf.org.cn/kyfzwyh/)** 颁发的电子证书,享有夜莺开源社区一定的权益和福利。
所有向 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale "CCFOS/NIGHTINGALE") 提交过PR被合并或者做出过重要贡献的 Contributor都会被永久记载于 [CONTRIBUTORS](https://github.com/ccfos/nightingale/blob/main/doc/contributors.md "CONTRIBUTORS") 列表。
### Committer|提交者
### 提交者(Committer)
> Committer 是指拥有 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale "CCFOS/NIGHTINGALE") 代码仓库写操作权限的贡献者。原则上 Committer 能够自主决策某个代码补丁是否可以合入到夜莺代码仓库,但是项目管委会拥有最终的决策权。
> Committer 是指拥有 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale) 代码仓库写操作权限的贡献者,他们拥有 ccf.org.cn 为后缀的邮箱地址(待上线)。原则上 Committer 能够自主决策某个代码补丁是否可以合入到夜莺代码仓库,但是项目管委会拥有最终的决策权。
Committer 承担以下一个或多个职责:
- 积极回应 Issues
@@ -38,43 +31,43 @@ Committer 承担以下一个或多个职责:
- 参加开发者例行会议,积极讨论项目规划和技术方案;
- 代表夜莺开源社区出席相关技术会议并做演讲;
Committer 记录并公示于 **[COMMITTERS](https://github.com/ccfos/nightingale/blob/main/doc/committers.md "COMMITTERS")** 列表,并获得夜莺开源社区颁发的证书,以及享有夜莺开源社区的各种权益和福利。
Committer 记录并公示于 **[COMMITTERS](./committers.md)** 列表,并获得 **[CCF ODC](https://www.ccf.org.cn/kyfzwyh/)** 颁发的电子证书,以及享有夜莺开源社区的各种权益和福利。
### PMC|项目管委会
### 项目管委会(PMC)
> PMC项目管委会作为一个实体,来管理和领导夜莺项目,为整个项目的发展全权负责。项目管委会相关内容记录并公示于文件[PMC](https://github.com/ccfos/nightingale/blob/main/doc/pmc.md "PMC").
> 项目管委会作为一个实体,来管理和领导夜莺项目,为整个项目的发展全权负责。项目管委会相关内容记录并公示于文件[PMC](./pmc.md).
- 项目管委会成员(PMC Member),从 Contributor 或者 Committer 中选举产生,他们拥有 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale "CCFOS/NIGHTINGALE") 代码仓库的写操作权限,拥有 Nightingale 社区相关事务的投票权、以及提名 Committer 候选人的权利。
- 项目管委会主席(PMC Chair)从项目管委会成员中投票产生。管委会主席是 **[CCF ODC](https://www.ccf.org.cn/kyfzwyh/ "CCF ODC")** 和项目管委会之间的沟通桥梁,履行特定的项目管理职责。
- 项目管委会成员(PMC Member),从 Contributor 或者 Committer 中选举产生,他们拥有 [CCFOS/NIGHTINGALE](https://github.com/ccfos/nightingale) 代码仓库的写操作权限,拥有 ccf.org.cn 为后缀的邮箱地址(待上线),拥有 Nightingale 社区相关事务的投票权、以及提名 Committer 候选人的权利。
- 项目管委会主席(PMC Chair) **[CCF ODC](https://www.ccf.org.cn/kyfzwyh/)** 从项目管委会成员中任命产生。管委会主席是 CCF ODC 和项目管委会之间的沟通桥梁,履行特定的项目管理职责。
## Communication|沟通机制
## 沟通机制(Communication)
1. 我们推荐使用邮件列表来反馈建议(待发布);
2. 我们推荐使用 [Github Issue](https://github.com/ccfos/nightingale/issues "Github Issue") 跟踪 bug 和管理需求;
3. 我们推荐使用 [Github Milestone](https://github.com/ccfos/nightingale/milestones "Github Milestone") 来管理项目进度和规划;
2. 我们推荐使用 [github issue](https://github.com/ccfos/nightingale/issues) 跟踪 bug 和管理需求;
3. 我们推荐使用 [github milestone](https://github.com/ccfos/nightingale/milestones) 来管理项目进度和规划;
4. 我们推荐使用腾讯会议来定期召开项目例会(会议 ID 待发布);
## Documentation|文档
1. 我们推荐使用 [Github Pages](https://n9e.github.io "Github Pages") 来沉淀文档;
2. 我们推荐使用 [Gitlink Wiki](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq "Gitlink Wiki") 来沉淀 FAQ
## 文档(Documentation)
1. 我们推荐使用 [github pages](https://n9e.github.io) 来沉淀文档;
2. 我们推荐使用 [gitlink wiki](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq) 来沉淀FAQ
## Operation|运营机制
## 运营机制(Operation)
1. 我们定期组织用户、贡献者、项目管委会成员之间的沟通会议,讨论项目开发的目标、方案、进度,以及讨论相关需求的合理性、优先级等议题;
2. 我们定期组织 meetup (线上&线下),创造良好的用户交流分享环境,并沉淀相关内容到文档站点;
3. 我们定期组织夜莺开发者大会,分享 [best user story](https://n9e.github.io/docs/prologue/share/ "best user story")、同步年度开发目标和计划、讨论新技术方向等;
3. 我们定期组织夜莺开发者大会,分享 best user story、同步年度开发目标和计划、讨论新技术方向等
## Philosophy|社区指导原则
## 社区指导原则(Philosophy)
>尊重、认可和记录每一位贡献者的工作。
**尊重、认可和记录每一位贡献者的工作。**
## 关于提问的原则
按照**尊重、认可、记录每一位贡献者的工作**原则,我们提倡**高效的提问**,这既是对开发者时间的尊重,也是对整个社区的知识沉淀的贡献:
1. 提问之前请先查阅 [FAQ](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq "FAQ")
2. 提问之前请先搜索 [Github Issues](https://github.com/ccfos/nightingale/issues "Github Issue")
3. 我们优先推荐通过提交 [Github Issue](https://github.com/ccfos/nightingale/issues "Github Issue") 来提问,如果[有问题点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&template=bug_report.yml "有问题点击这里") | [有需求建议点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md "有需求建议点击这里")
1. 提问之前请先查阅 [FAQ](https://www.gitlink.org.cn/ccfos/nightingale/wiki/faq)
2. 提问之前请先搜索 [github issue](https://github.com/ccfos/nightingale/issues)
3. 我们优先推荐通过提交 github issue 来提问,如果[有问题点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&template=bug_report.yml) | [有需求建议点击这里](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Ffeature&template=enhancement.md)
最后,我们推荐你加入微信群,针对相关开放式问题,相互交流咨询 (请先加好友:[UlricGO](https://www.gitlink.org.cn/UlricQin/gist/tree/master/self.jpeg "UlricGO") 备注:夜莺加群+姓名+公司,交流群里会有开发者团队和专业、热心的群友回答问题)
最后,我们推荐你加入微信群,针对相关开放式问题,相互交流咨询 (请先加好友:[UlricGO](https://www.gitlink.org.cn/UlricQin/gist/tree/master/self.jpeg) 备注:夜莺加群+姓名+公司,交流群里会有开发者团队和专业、热心的群友回答问题)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 625 KiB

View File

@@ -1,7 +1,7 @@
### PMC Chair
## PMC Chair
- [laiwei](https://github.com/laiwei)
### PMC Co-Chair
- [UlricQin](https://github.com/UlricQin)
## PMC Member
### PMC Member
- [UlricQin](https://github.com/UlricQin)

View File

@@ -1,4 +1,5 @@
FROM python:3-slim
FROM python:2.7.8-slim
#FROM python:2
#FROM ubuntu:21.04
WORKDIR /app
@@ -8,6 +9,7 @@ RUN mkdir -p /app/pub && chmod +x /wait
ADD pub /app/pub/
RUN chmod +x n9e
EXPOSE 17000
EXPOSE 19000
EXPOSE 18000
CMD ["/app/n9e", "-h"]

View File

@@ -1,15 +1,14 @@
FROM --platform=$BUILDPLATFORM python:3-slim
FROM --platform=$BUILDPLATFORM python:2.7.8-slim
WORKDIR /app
ADD n9e /app
ADD etc /app
ADD inegrations /app
ADD http://download.flashcat.cloud/wait /wait
RUN mkdir -p /app/pub && chmod +x /wait
ADD pub /app/pub/
RUN chmod +x n9e
EXPOSE 17000
EXPOSE 19000
EXPOSE 18000
CMD ["/app/n9e", "-h"]

View File

@@ -31,7 +31,7 @@ batch = 2000
chan_size = 10000
[[writers]]
url = "http://n9e:17000/prometheus/v1/write"
url = "http://nserver:19000/prometheus/v1/write"
# Basic auth username
basic_auth_user = ""
@@ -48,36 +48,4 @@ max_idle_conns_per_host = 100
enable = false
address = ":9100"
print_access = false
run_mode = "release"
[heartbeat]
enable = true
# report os version cpu.util mem.util metadata
url = "http://n9e:17000/v1/n9e/heartbeat"
# interval, unit: s
interval = 10
# Basic auth username
basic_auth_user = ""
# Basic auth password
basic_auth_pass = ""
## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]
# timeout settings, unit: ms
timeout = 5000
dial_timeout = 2500
max_idle_conns_per_host = 100
[ibex]
enable = true
## ibex flush interval
interval = "1000ms"
## n9e ibex server rpc address
servers = ["ibex:20090"]
## temp script dir
meta_dir = "./meta"
run_mode = "release"

View File

@@ -0,0 +1,35 @@
[logs]
## key 占位符
api_key = "ef4ahfbwzwwtlwfpbertgq1i6mq0ab1q"
## 是否开启日志采集
enable = false
## 接受日志的server地址
send_to = "127.0.0.1:17878"
## 发送日志的协议 http/tcp
send_type = "http"
## 是否压缩发送
use_compress = false
## 是否采用ssl
send_with_tls = false
##
batch_wait = 5
## 日志offset信息保存目录
run_path = "/opt/categraf/run"
## 最多同时采集多少个日志文件
open_files_limit = 100
## 定期扫描目录下是否有新增日志
scan_period = 10
##
frame_size = 10
##
collect_container_all = true
## 全局的处理规则
[[logs.Processing_rules]]
## 单个日志采集配置
[[logs.items]]
## file/journald
type = "file"
## type=file时 path必填type=journald时 port必填
path = "/opt/tomcat/logs/*.txt"
source = "tomcat"
service = "my_service"

View File

@@ -6,13 +6,13 @@ networks:
services:
mysql:
# platform: linux/x86_64
platform: linux/x86_64
image: "mysql:5.7"
container_name: mysql
hostname: mysql
restart: always
ports:
- "3406:3306"
- "3306:3306"
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: 1234
@@ -79,10 +79,10 @@ services:
command: >
sh -c "/wait && /app/ibex server"
n9e:
nwebapi:
image: flashcatcloud/nightingale:latest
container_name: n9e
hostname: n9e
container_name: nwebapi
hostname: nwebapi
restart: always
environment:
GIN_MODE: release
@@ -90,9 +90,8 @@ services:
WAIT_HOSTS: mysql:3306, redis:6379
volumes:
- ./n9eetc:/app/etc
- ./integrations:/app/integrations
ports:
- "17000:17000"
- "18000:18000"
networks:
- nightingale
depends_on:
@@ -106,7 +105,35 @@ services:
- prometheus:prometheus
- ibex:ibex
command: >
sh -c "/wait && /app/n9e"
sh -c "/wait && /app/n9e webapi"
nserver:
image: flashcatcloud/nightingale:latest
container_name: nserver
hostname: nserver
restart: always
environment:
GIN_MODE: release
TZ: Asia/Shanghai
WAIT_HOSTS: mysql:3306, redis:6379
volumes:
- ./n9eetc:/app/etc
ports:
- "19000:19000"
networks:
- nightingale
depends_on:
- mysql
- redis
- prometheus
- ibex
links:
- mysql:mysql
- redis:redis
- prometheus:prometheus
- ibex:ibex
command: >
sh -c "/wait && /app/n9e server"
categraf:
image: "flashcatcloud/categraf:latest"
@@ -122,13 +149,31 @@ services:
- ./categraf/conf:/etc/categraf/conf
- /:/hostfs
- /var/run/docker.sock:/var/run/docker.sock
# ports:
# - "9100:9100/tcp"
ports:
- "9100:9100/tcp"
networks:
- nightingale
depends_on:
- nserver
links:
- nserver:nserver
agentd:
image: ulric2019/ibex:0.3
container_name: agentd
hostname: agentd
restart: always
environment:
GIN_MODE: release
TZ: Asia/Shanghai
volumes:
- ./ibexetc:/app/etc
networks:
- nightingale
depends_on:
- n9e
- ibex
links:
- n9e:n9e
- ibex:ibex
command:
- "/app/ibex"
- "agentd"

View File

@@ -0,0 +1,38 @@
# debug, release
RunMode = "release"
# task meta storage dir
MetaDir = "./meta"
[HTTP]
Enable = true
# http listening address
Host = "0.0.0.0"
# http listening port
Port = 2090
# https cert file path
CertFile = ""
# https key file path
KeyFile = ""
# whether print access log
PrintAccessLog = true
# whether enable pprof
PProf = false
# http graceful shutdown timeout, unit: s
ShutdownTimeout = 30
# max content length: 64M
MaxContentLength = 67108864
# http server read timeout, unit: s
ReadTimeout = 20
# http server write timeout, unit: s
WriteTimeout = 40
# http server idle timeout, unit: s
IdleTimeout = 120
[Heartbeat]
# unit: ms
Interval = 1000
# rpc servers
Servers = ["ibex:20090"]
# $ip or $hostname or specified string
Host = "categraf01"

View File

@@ -1,8 +1,8 @@
set names utf8mb4;
drop database if exists n9e_v6;
create database n9e_v6;
use n9e_v6;
drop database if exists n9e_v5;
create database n9e_v5;
use n9e_v5;
CREATE TABLE `users` (
`id` bigint unsigned not null auto_increment,
@@ -41,12 +41,10 @@ CREATE TABLE `user_group` (
insert into user_group(id, name, create_at, create_by, update_at, update_by) values(1, 'demo-root-group', unix_timestamp(now()), 'root', unix_timestamp(now()), 'root');
CREATE TABLE `user_group_member` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint unsigned not null,
`user_id` bigint unsigned not null,
KEY (`group_id`),
KEY (`user_id`),
PRIMARY KEY(`id`)
KEY (`user_id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
insert into user_group_member(group_id, user_id) values(1, 1);
@@ -72,32 +70,21 @@ insert into `role`(name, note) values('Standard', 'Ordinary user role');
insert into `role`(name, note) values('Guest', 'Readonly user role');
CREATE TABLE `role_operation`(
`id` bigint unsigned not null auto_increment,
`role_name` varchar(128) not null,
`operation` varchar(191) not null,
KEY (`role_name`),
KEY (`operation`),
PRIMARY KEY(`id`)
KEY (`operation`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
-- Admin is special, who has no concrete operation but can do anything.
insert into `role_operation`(role_name, operation) values('Guest', '/metric/explorer');
insert into `role_operation`(role_name, operation) values('Guest', '/object/explorer');
insert into `role_operation`(role_name, operation) values('Guest', '/log/explorer');
insert into `role_operation`(role_name, operation) values('Guest', '/trace/explorer');
insert into `role_operation`(role_name, operation) values('Guest', '/help/version');
insert into `role_operation`(role_name, operation) values('Guest', '/help/contact');
insert into `role_operation`(role_name, operation) values('Standard', '/metric/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/object/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/log/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/trace/explorer');
insert into `role_operation`(role_name, operation) values('Standard', '/help/version');
insert into `role_operation`(role_name, operation) values('Standard', '/help/contact');
insert into `role_operation`(role_name, operation) values('Standard', '/alert-rules-built-in');
insert into `role_operation`(role_name, operation) values('Standard', '/dashboards-built-in');
insert into `role_operation`(role_name, operation) values('Standard', '/trace/dependencies');
insert into `role_operation`(role_name, operation) values('Standard', '/users');
insert into `role_operation`(role_name, operation) values('Standard', '/user-groups');
insert into `role_operation`(role_name, operation) values('Standard', '/user-groups/add');
@@ -174,18 +161,13 @@ CREATE TABLE `board` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint not null default 0 comment 'busi group id',
`name` varchar(191) not null,
`ident` varchar(200) not null default '',
`tags` varchar(255) not null comment 'split by space',
`public` tinyint(1) not null default 0 comment '0:false 1:true',
`built_in` tinyint(1) not null default 0 comment '0:false 1:true',
`hide` tinyint(1) not null default 0 comment '0:false 1:true',
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
`update_by` varchar(64) not null default '',
PRIMARY KEY (`id`),
UNIQUE KEY (`group_id`, `name`),
KEY(`ident`)
UNIQUE KEY (`group_id`, `name`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
-- for dashboard new version
@@ -234,7 +216,6 @@ CREATE TABLE `chart` (
CREATE TABLE `chart_share` (
`id` bigint unsigned not null auto_increment,
`cluster` varchar(128) not null,
`dashboard_id` bigint unsigned not null,
`configs` text,
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
@@ -246,7 +227,6 @@ CREATE TABLE `alert_rule` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint not null default 0 comment 'busi group id',
`cate` varchar(128) not null,
`datasource_ids` varchar(255) not null default '' comment 'datasource ids',
`cluster` varchar(128) not null,
`name` varchar(255) not null,
`note` varchar(1024) not null default '',
@@ -257,12 +237,11 @@ CREATE TABLE `alert_rule` (
`severity` tinyint(1) not null comment '1:Emergency 2:Warning 3:Notice',
`disabled` tinyint(1) not null comment '0:enabled 1:disabled',
`prom_for_duration` int not null comment 'prometheus for, unit:s',
`rule_config` text not null comment 'rule_config',
`prom_ql` text not null comment 'promql',
`prom_eval_interval` int not null comment 'evaluate interval',
`enable_stime` varchar(255) not null default '00:00',
`enable_etime` varchar(255) not null default '23:59',
`enable_days_of_week` varchar(255) not null default '' comment 'split by space: 0 1 2 3 4 5 6',
`enable_stime` char(5) not null default '00:00',
`enable_etime` char(5) not null default '23:59',
`enable_days_of_week` varchar(32) not null default '' comment 'split by space: 0 1 2 3 4 5 6',
`enable_in_bg` tinyint(1) not null default 0 comment '1: only this bg 0: global',
`notify_recovered` tinyint(1) not null comment 'whether notify when recovery',
`notify_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom',
@@ -273,7 +252,6 @@ CREATE TABLE `alert_rule` (
`callbacks` varchar(255) not null default '' comment 'split by space: http://a.com/api/x http://a.com/api/y',
`runbook_url` varchar(255),
`append_tags` varchar(255) not null default '' comment 'split by space: service=n9e mod=api',
`annotations` text not null comment 'annotations',
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
@@ -287,21 +265,14 @@ CREATE TABLE `alert_mute` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint not null default 0 comment 'busi group id',
`prod` varchar(255) not null default '',
`note` varchar(1024) not null default '',
`cate` varchar(128) not null,
`cluster` varchar(128) not null,
`datasource_ids` varchar(255) not null default '' comment 'datasource ids',
`tags` varchar(4096) not null default '' comment 'json,map,tagkey->regexp|value',
`cause` varchar(255) not null default '',
`btime` bigint not null default 0 comment 'begin time',
`etime` bigint not null default 0 comment 'end time',
`disabled` tinyint(1) not null default 0 comment '0:enabled 1:disabled',
`mute_time_type` tinyint(1) not null default 0,
`periodic_mutes` varchar(4096) not null default '',
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
`update_by` varchar(64) not null default '',
PRIMARY KEY (`id`),
KEY (`create_at`),
KEY (`group_id`)
@@ -309,12 +280,8 @@ CREATE TABLE `alert_mute` (
CREATE TABLE `alert_subscribe` (
`id` bigint unsigned not null auto_increment,
`name` varchar(255) not null default '',
`disabled` tinyint(1) not null default 0 comment '0:enabled 1:disabled',
`group_id` bigint not null default 0 comment 'busi group id',
`prod` varchar(255) not null default '',
`cate` varchar(128) not null,
`datasource_ids` varchar(255) not null default '' comment 'datasource ids',
`cluster` varchar(128) not null,
`rule_id` bigint not null default 0,
`tags` varchar(4096) not null default '' comment 'json,map,tagkey->regexp|value',
@@ -323,9 +290,6 @@ CREATE TABLE `alert_subscribe` (
`redefine_channels` tinyint(1) default 0 comment 'is redefine channels?',
`new_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom',
`user_group_ids` varchar(250) not null comment 'split by space 1 34 5, notify cc to user_group_ids',
`webhooks` text not null,
`redefine_webhooks` tinyint(1) default 0,
`for_duration` bigint not null default 0,
`create_at` bigint not null default 0,
`create_by` varchar(64) not null default '',
`update_at` bigint not null default 0,
@@ -334,10 +298,11 @@ CREATE TABLE `alert_subscribe` (
KEY (`update_at`),
KEY (`group_id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `target` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint not null default 0 comment 'busi group id',
`cluster` varchar(128) not null comment 'append to alert event as field',
`ident` varchar(191) not null comment 'target id',
`note` varchar(255) not null default '' comment 'append to alert event as field',
`tags` varchar(512) not null default '' comment 'append to series data as tags, split by space, append external space at suffix',
@@ -347,8 +312,6 @@ CREATE TABLE `target` (
KEY (`group_id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
-- case1: target_idents; case2: target_tags
-- CREATE TABLE `collect_rule` (
-- `id` bigint unsigned not null auto_increment,
@@ -383,15 +346,14 @@ CREATE TABLE `metric_view` (
) ENGINE=InnoDB DEFAULT CHARSET = utf8mb4;
insert into metric_view(name, cate, configs) values('Host View', 0, '{"filters":[{"oper":"=","label":"__name__","value":"cpu_usage_idle"}],"dynamicLabels":[],"dimensionLabels":[{"label":"ident","value":""}]}');
CREATE TABLE `recording_rule` (
`id` bigint unsigned not null auto_increment,
`group_id` bigint not null default '0' comment 'group_id',
`datasource_id` bigint not null default 0 comment 'datasource id',
`cluster` varchar(128) not null,
`name` varchar(255) not null comment 'new metric name',
`note` varchar(255) not null comment 'rule note',
`disabled` tinyint(1) not null default 0 comment '0:enabled 1:disabled',
`disabled` tinyint(1) not null comment '0:enabled 1:disabled',
`prom_ql` varchar(8192) not null comment 'promql',
`prom_eval_interval` int not null comment 'evaluate interval',
`append_tags` varchar(255) default '' comment 'split by space: service=n9e mod=api',
@@ -422,7 +384,6 @@ insert into alert_aggr_view(name, rule, cate) values('By RuleName', 'field:rule_
CREATE TABLE `alert_cur_event` (
`id` bigint unsigned not null comment 'use alert_his_event.id',
`cate` varchar(128) not null,
`datasource_id` bigint not null default 0 comment 'datasource id',
`cluster` varchar(128) not null,
`group_id` bigint unsigned not null comment 'busi group id of rule',
`group_name` varchar(255) not null default '' comment 'busi group name',
@@ -448,8 +409,6 @@ CREATE TABLE `alert_cur_event` (
`first_trigger_time` bigint,
`trigger_time` bigint not null,
`trigger_value` varchar(255) not null,
`annotations` text not null comment 'annotations',
`rule_config` text not null comment 'annotations',
`tags` varchar(1024) not null default '' comment 'merge data_tags rule_tags, split by ,,',
PRIMARY KEY (`id`),
KEY (`hash`),
@@ -462,7 +421,6 @@ CREATE TABLE `alert_his_event` (
`id` bigint unsigned not null AUTO_INCREMENT,
`is_recovered` tinyint(1) not null,
`cate` varchar(128) not null,
`datasource_id` bigint not null default 0 comment 'datasource id',
`cluster` varchar(128) not null,
`group_id` bigint unsigned not null comment 'busi group id of rule',
`group_name` varchar(255) not null default '' comment 'busi group name',
@@ -490,8 +448,6 @@ CREATE TABLE `alert_his_event` (
`recover_time` bigint not null default 0,
`last_eval_time` bigint not null default 0 comment 'for time filter',
`tags` varchar(1024) not null default '' comment 'merge data_tags rule_tags, split by ,,',
`annotations` text not null comment 'annotations',
`rule_config` text not null comment 'annotations',
PRIMARY KEY (`id`),
KEY (`hash`),
KEY (`rule_id`),
@@ -554,55 +510,8 @@ CREATE TABLE `alerting_engines`
(
`id` int unsigned NOT NULL AUTO_INCREMENT,
`instance` varchar(128) not null default '' comment 'instance identification, e.g. 10.9.0.9:9090',
`datasource_id` bigint not null default 0 comment 'datasource id',
`cluster` varchar(128) not null default '' comment 'n9e-alert cluster',
`cluster` varchar(128) not null default '' comment 'target reader cluster',
`clock` bigint not null,
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `datasource`
(
`id` int unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(191) not null default '',
`description` varchar(255) not null default '',
`category` varchar(255) not null default '',
`plugin_id` int unsigned not null default 0,
`plugin_type` varchar(255) not null default '',
`plugin_type_name` varchar(255) not null default '',
`cluster_name` varchar(255) not null default '',
`settings` text not null,
`status` varchar(255) not null default '',
`http` varchar(4096) not null default '',
`auth` varchar(8192) not null default '',
`created_at` bigint not null default 0,
`created_by` varchar(64) not null default '',
`updated_at` bigint not null default 0,
`updated_by` varchar(64) not null default '',
UNIQUE KEY (`name`),
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `builtin_cate` (
`id` bigint unsigned not null auto_increment,
`name` varchar(191) not null,
`user_id` bigint not null default 0,
PRIMARY KEY (`id`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `notify_tpl` (
`id` bigint unsigned not null auto_increment,
`channel` varchar(32) not null,
`name` varchar(255) not null,
`content` text not null,
PRIMARY KEY (`id`),
UNIQUE KEY (`channel`)
UNIQUE KEY (`instance`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;
CREATE TABLE `sso_config` (
`id` bigint unsigned not null auto_increment,
`name` varchar(191) not null,
`content` text not null,
PRIMARY KEY (`id`),
UNIQUE KEY (`name`)
) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4;

View File

@@ -0,0 +1,594 @@
-- n9e version 5.8 for postgres
CREATE TABLE users (
id bigserial,
username varchar(64) not null ,
nickname varchar(64) not null ,
password varchar(128) not null default '',
phone varchar(16) not null default '',
email varchar(64) not null default '',
portrait varchar(255) not null default '' ,
roles varchar(255) not null ,
contacts varchar(1024) ,
maintainer smallint not null default 0,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE users ADD CONSTRAINT users_pk PRIMARY KEY (id);
ALTER TABLE users ADD CONSTRAINT users_un UNIQUE (username);
COMMENT ON COLUMN users.username IS 'login name, cannot rename';
COMMENT ON COLUMN users.nickname IS 'display name, chinese name';
COMMENT ON COLUMN users.portrait IS 'portrait image url';
COMMENT ON COLUMN users.roles IS 'Admin | Standard | Guest, split by space';
COMMENT ON COLUMN users.contacts IS 'json e.g. {wecom:xx, dingtalk_robot_token:yy}';
insert into users(id, username, nickname, password, roles, create_at, create_by, update_at, update_by) values(1, 'root', '超管', 'root.2020', 'Admin', date_part('epoch',current_timestamp)::int, 'system', date_part('epoch',current_timestamp)::int, 'system');
CREATE TABLE user_group (
id bigserial,
name varchar(128) not null default '',
note varchar(255) not null default '',
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE user_group ADD CONSTRAINT user_group_pk PRIMARY KEY (id);
CREATE INDEX user_group_create_by_idx ON user_group (create_by);
CREATE INDEX user_group_update_at_idx ON user_group (update_at);
insert into user_group(id, name, create_at, create_by, update_at, update_by) values(1, 'demo-root-group', date_part('epoch',current_timestamp)::int, 'root', date_part('epoch',current_timestamp)::int, 'root');
CREATE TABLE user_group_member (
group_id bigint not null,
user_id bigint not null
) ;
CREATE INDEX user_group_member_group_id_idx ON user_group_member (group_id);
CREATE INDEX user_group_member_user_id_idx ON user_group_member (user_id);
insert into user_group_member(group_id, user_id) values(1, 1);
CREATE TABLE configs (
id bigserial,
ckey varchar(191) not null,
cval varchar(4096) not null default ''
) ;
ALTER TABLE configs ADD CONSTRAINT configs_pk PRIMARY KEY (id);
ALTER TABLE configs ADD CONSTRAINT configs_un UNIQUE (ckey);
CREATE TABLE role (
id bigserial,
name varchar(191) not null default '',
note varchar(255) not null default ''
) ;
ALTER TABLE role ADD CONSTRAINT role_pk PRIMARY KEY (id);
ALTER TABLE role ADD CONSTRAINT role_un UNIQUE ("name");
insert into role(name, note) values('Admin', 'Administrator role');
insert into role(name, note) values('Standard', 'Ordinary user role');
insert into role(name, note) values('Guest', 'Readonly user role');
CREATE TABLE role_operation(
role_name varchar(128) not null,
operation varchar(191) not null
) ;
CREATE INDEX role_operation_role_name_idx ON role_operation (role_name);
CREATE INDEX role_operation_operation_idx ON role_operation (operation);
-- Admin is special, who has no concrete operation but can do anything.
insert into role_operation(role_name, operation) values('Guest', '/metric/explorer');
insert into role_operation(role_name, operation) values('Guest', '/object/explorer');
insert into role_operation(role_name, operation) values('Guest', '/help/version');
insert into role_operation(role_name, operation) values('Guest', '/help/contact');
insert into role_operation(role_name, operation) values('Standard', '/metric/explorer');
insert into role_operation(role_name, operation) values('Standard', '/object/explorer');
insert into role_operation(role_name, operation) values('Standard', '/help/version');
insert into role_operation(role_name, operation) values('Standard', '/help/contact');
insert into role_operation(role_name, operation) values('Standard', '/users');
insert into role_operation(role_name, operation) values('Standard', '/user-groups');
insert into role_operation(role_name, operation) values('Standard', '/user-groups/add');
insert into role_operation(role_name, operation) values('Standard', '/user-groups/put');
insert into role_operation(role_name, operation) values('Standard', '/user-groups/del');
insert into role_operation(role_name, operation) values('Standard', '/busi-groups');
insert into role_operation(role_name, operation) values('Standard', '/busi-groups/add');
insert into role_operation(role_name, operation) values('Standard', '/busi-groups/put');
insert into role_operation(role_name, operation) values('Standard', '/busi-groups/del');
insert into role_operation(role_name, operation) values('Standard', '/targets');
insert into role_operation(role_name, operation) values('Standard', '/targets/add');
insert into role_operation(role_name, operation) values('Standard', '/targets/put');
insert into role_operation(role_name, operation) values('Standard', '/targets/del');
insert into role_operation(role_name, operation) values('Standard', '/dashboards');
insert into role_operation(role_name, operation) values('Standard', '/dashboards/add');
insert into role_operation(role_name, operation) values('Standard', '/dashboards/put');
insert into role_operation(role_name, operation) values('Standard', '/dashboards/del');
insert into role_operation(role_name, operation) values('Standard', '/alert-rules');
insert into role_operation(role_name, operation) values('Standard', '/alert-rules/add');
insert into role_operation(role_name, operation) values('Standard', '/alert-rules/put');
insert into role_operation(role_name, operation) values('Standard', '/alert-rules/del');
insert into role_operation(role_name, operation) values('Standard', '/alert-mutes');
insert into role_operation(role_name, operation) values('Standard', '/alert-mutes/add');
insert into role_operation(role_name, operation) values('Standard', '/alert-mutes/del');
insert into role_operation(role_name, operation) values('Standard', '/alert-subscribes');
insert into role_operation(role_name, operation) values('Standard', '/alert-subscribes/add');
insert into role_operation(role_name, operation) values('Standard', '/alert-subscribes/put');
insert into role_operation(role_name, operation) values('Standard', '/alert-subscribes/del');
insert into role_operation(role_name, operation) values('Standard', '/alert-cur-events');
insert into role_operation(role_name, operation) values('Standard', '/alert-cur-events/del');
insert into role_operation(role_name, operation) values('Standard', '/alert-his-events');
insert into role_operation(role_name, operation) values('Standard', '/job-tpls');
insert into role_operation(role_name, operation) values('Standard', '/job-tpls/add');
insert into role_operation(role_name, operation) values('Standard', '/job-tpls/put');
insert into role_operation(role_name, operation) values('Standard', '/job-tpls/del');
insert into role_operation(role_name, operation) values('Standard', '/job-tasks');
insert into role_operation(role_name, operation) values('Standard', '/job-tasks/add');
insert into role_operation(role_name, operation) values('Standard', '/job-tasks/put');
insert into role_operation(role_name, operation) values('Standard', '/recording-rules');
insert into role_operation(role_name, operation) values('Standard', '/recording-rules/add');
insert into role_operation(role_name, operation) values('Standard', '/recording-rules/put');
insert into role_operation(role_name, operation) values('Standard', '/recording-rules/del');
CREATE TABLE recording_rule (
id bigserial NOT NULL,
group_id bigint not null default 0,
cluster varchar(128) not null,
name varchar(255) not null,
note varchar(255) not null,
disabled smallint not null,
prom_ql varchar(8192) not null,
prom_eval_interval int not null,
append_tags varchar(255) default '',
create_at bigint default 0,
create_by varchar(64) default '',
update_at bigint default 0,
update_by varchar(64) default '',
CONSTRAINT recording_rule_pk PRIMARY KEY (id)
) ;
CREATE INDEX recording_rule_group_id_idx ON recording_rule (group_id);
CREATE INDEX recording_rule_update_at_idx ON recording_rule (update_at);
COMMENT ON COLUMN recording_rule.group_id IS 'group_id';
COMMENT ON COLUMN recording_rule.name IS 'new metric name';
COMMENT ON COLUMN recording_rule.note IS 'rule note';
COMMENT ON COLUMN recording_rule.disabled IS '0:enabled 1:disabled';
COMMENT ON COLUMN recording_rule.prom_ql IS 'promql';
COMMENT ON COLUMN recording_rule.prom_eval_interval IS 'evaluate interval';
COMMENT ON COLUMN recording_rule.append_tags IS 'split by space: service=n9e mod=api';
-- for alert_rule | collect_rule | mute | dashboard grouping
CREATE TABLE busi_group (
id bigserial,
name varchar(191) not null,
label_enable smallint not null default 0,
label_value varchar(191) not null default '' ,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE busi_group ADD CONSTRAINT busi_group_pk PRIMARY KEY (id);
ALTER TABLE busi_group ADD CONSTRAINT busi_group_un UNIQUE ("name");
COMMENT ON COLUMN busi_group.label_value IS 'if label_enable: label_value can not be blank';
insert into busi_group(id, name, create_at, create_by, update_at, update_by) values(1, 'Default Busi Group', date_part('epoch',current_timestamp)::int, 'root', date_part('epoch',current_timestamp)::int, 'root');
CREATE TABLE busi_group_member (
id bigserial,
busi_group_id bigint not null ,
user_group_id bigint not null ,
perm_flag char(2) not null
) ;
ALTER TABLE busi_group_member ADD CONSTRAINT busi_group_member_pk PRIMARY KEY (id);
CREATE INDEX busi_group_member_busi_group_id_idx ON busi_group_member (busi_group_id);
CREATE INDEX busi_group_member_user_group_id_idx ON busi_group_member (user_group_id);
COMMENT ON COLUMN busi_group_member.busi_group_id IS 'busi group id';
COMMENT ON COLUMN busi_group_member.user_group_id IS 'user group id';
COMMENT ON COLUMN busi_group_member.perm_flag IS 'ro | rw';
insert into busi_group_member(busi_group_id, user_group_id, perm_flag) values(1, 1, 'rw');
-- for dashboard new version
CREATE TABLE board (
id bigserial not null ,
group_id bigint not null default 0 ,
name varchar(191) not null,
tags varchar(255) not null ,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE board ADD CONSTRAINT board_pk PRIMARY KEY (id);
ALTER TABLE board ADD CONSTRAINT board_un UNIQUE (group_id,"name");
COMMENT ON COLUMN board.group_id IS 'busi group id';
COMMENT ON COLUMN board.tags IS 'split by space';
-- for dashboard new version
CREATE TABLE board_payload (
id bigint not null ,
payload text not null
);
ALTER TABLE board_payload ADD CONSTRAINT board_payload_un UNIQUE (id);
COMMENT ON COLUMN board_payload.id IS 'dashboard id';
-- deprecated
CREATE TABLE dashboard (
id bigserial,
group_id bigint not null default 0 ,
name varchar(191) not null,
tags varchar(255) not null ,
configs varchar(8192) ,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE dashboard ADD CONSTRAINT dashboard_pk PRIMARY KEY (id);
-- deprecated
-- auto create the first subclass 'Default chart group' of dashboard
CREATE TABLE chart_group (
id bigserial,
dashboard_id bigint not null,
name varchar(255) not null,
weight int not null default 0
) ;
ALTER TABLE chart_group ADD CONSTRAINT chart_group_pk PRIMARY KEY (id);
-- deprecated
CREATE TABLE chart (
id bigserial,
group_id bigint not null ,
configs text,
weight int not null default 0
) ;
ALTER TABLE chart ADD CONSTRAINT chart_pk PRIMARY KEY (id);
CREATE TABLE chart_share (
id bigserial,
cluster varchar(128) not null,
configs text,
create_at bigint not null default 0,
create_by varchar(64) not null default ''
) ;
ALTER TABLE chart_share ADD CONSTRAINT chart_share_pk PRIMARY KEY (id);
CREATE INDEX chart_share_create_at_idx ON chart_share (create_at);
CREATE TABLE alert_rule (
id bigserial NOT NULL,
group_id int8 NOT NULL DEFAULT 0,
"cluster" varchar(128) NOT NULL,
"name" varchar(255) NOT NULL,
note varchar(1024) NOT NULL,
severity int2 NOT NULL,
disabled int2 NOT NULL,
prom_for_duration int4 NOT NULL,
prom_ql text NOT NULL,
prom_eval_interval int4 NOT NULL,
enable_stime bpchar(5) NOT NULL DEFAULT '00:00'::bpchar,
enable_etime bpchar(5) NOT NULL DEFAULT '23:59'::bpchar,
enable_days_of_week varchar(32) NOT NULL DEFAULT ''::character varying,
enable_in_bg int2 NOT NULL DEFAULT 0,
notify_recovered int2 NOT NULL,
notify_channels varchar(255) NOT NULL DEFAULT ''::character varying,
notify_groups varchar(255) NOT NULL DEFAULT ''::character varying,
notify_repeat_step int4 NOT NULL DEFAULT 0,
notify_max_number int4 not null default 0,
recover_duration int4 NOT NULL DEFAULT 0,
callbacks varchar(255) NOT NULL DEFAULT ''::character varying,
runbook_url varchar(255) NULL,
append_tags varchar(255) NOT NULL DEFAULT ''::character varying,
create_at int8 NOT NULL DEFAULT 0,
create_by varchar(64) NOT NULL DEFAULT ''::character varying,
update_at int8 NOT NULL DEFAULT 0,
update_by varchar(64) NOT NULL DEFAULT ''::character varying,
prod varchar(255) NOT NULL DEFAULT ''::character varying,
algorithm varchar(255) NOT NULL DEFAULT ''::character varying,
algo_params varchar(255) NULL,
delay int4 NOT NULL DEFAULT 0,
CONSTRAINT alert_rule_pk PRIMARY KEY (id)
);
CREATE INDEX alert_rule_group_id_idx ON alert_rule USING btree (group_id);
CREATE INDEX alert_rule_update_at_idx ON alert_rule USING btree (update_at);
COMMENT ON COLUMN alert_rule.group_id IS 'busi group id';
COMMENT ON COLUMN alert_rule.severity IS '0:Emergency 1:Warning 2:Notice';
COMMENT ON COLUMN alert_rule.disabled IS '0:enabled 1:disabled';
COMMENT ON COLUMN alert_rule.prom_for_duration IS 'prometheus for, unit:s';
COMMENT ON COLUMN alert_rule.prom_ql IS 'promql';
COMMENT ON COLUMN alert_rule.prom_eval_interval IS 'evaluate interval';
COMMENT ON COLUMN alert_rule.enable_stime IS '00:00';
COMMENT ON COLUMN alert_rule.enable_etime IS '23:59';
COMMENT ON COLUMN alert_rule.enable_days_of_week IS 'split by space: 0 1 2 3 4 5 6';
COMMENT ON COLUMN alert_rule.enable_in_bg IS '1: only this bg 0: global';
COMMENT ON COLUMN alert_rule.notify_recovered IS 'whether notify when recovery';
COMMENT ON COLUMN alert_rule.notify_channels IS 'split by space: sms voice email dingtalk wecom';
COMMENT ON COLUMN alert_rule.notify_groups IS 'split by space: 233 43';
COMMENT ON COLUMN alert_rule.notify_repeat_step IS 'unit: min';
COMMENT ON COLUMN alert_rule.recover_duration IS 'unit: s';
COMMENT ON COLUMN alert_rule.callbacks IS 'split by space: http://a.com/api/x http://a.com/api/y';
COMMENT ON COLUMN alert_rule.append_tags IS 'split by space: service=n9e mod=api';
CREATE TABLE alert_mute (
id bigserial,
group_id bigint not null default 0 ,
prod varchar(255) NOT NULL DEFAULT '' ,
cluster varchar(128) not null,
tags varchar(4096) not null default '' ,
cause varchar(255) not null default '',
btime bigint not null default 0 ,
etime bigint not null default 0 ,
create_at bigint not null default 0,
create_by varchar(64) not null default ''
) ;
ALTER TABLE alert_mute ADD CONSTRAINT alert_mute_pk PRIMARY KEY (id);
CREATE INDEX alert_mute_create_at_idx ON alert_mute (create_at);
CREATE INDEX alert_mute_group_id_idx ON alert_mute (group_id);
COMMENT ON COLUMN alert_mute.group_id IS 'busi group id';
COMMENT ON COLUMN alert_mute.tags IS 'json,map,tagkey->regexp|value';
COMMENT ON COLUMN alert_mute.btime IS 'begin time';
COMMENT ON COLUMN alert_mute.etime IS 'end time';
CREATE TABLE alert_subscribe (
id bigserial,
group_id bigint not null default 0 ,
cluster varchar(128) not null,
rule_id bigint not null default 0,
tags jsonb not null ,
redefine_severity smallint default 0 ,
new_severity smallint not null ,
redefine_channels smallint default 0 ,
new_channels varchar(255) not null default '' ,
user_group_ids varchar(250) not null ,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE alert_subscribe ADD CONSTRAINT alert_subscribe_pk PRIMARY KEY (id);
CREATE INDEX alert_subscribe_group_id_idx ON alert_subscribe (group_id);
CREATE INDEX alert_subscribe_update_at_idx ON alert_subscribe (update_at);
COMMENT ON COLUMN alert_subscribe.group_id IS 'busi group id';
COMMENT ON COLUMN alert_subscribe.tags IS 'json,map,tagkey->regexp|value';
COMMENT ON COLUMN alert_subscribe.redefine_severity IS 'is redefine severity?';
COMMENT ON COLUMN alert_subscribe.new_severity IS '0:Emergency 1:Warning 2:Notice';
COMMENT ON COLUMN alert_subscribe.redefine_channels IS 'is redefine channels?';
COMMENT ON COLUMN alert_subscribe.new_channels IS 'split by space: sms voice email dingtalk wecom';
COMMENT ON COLUMN alert_subscribe.user_group_ids IS 'split by space 1 34 5, notify cc to user_group_ids';
CREATE TABLE target (
id bigserial,
group_id bigint not null default 0 ,
cluster varchar(128) not null ,
ident varchar(191) not null ,
note varchar(255) not null default '' ,
tags varchar(512) not null default '' ,
update_at bigint not null default 0
) ;
ALTER TABLE target ADD CONSTRAINT target_pk PRIMARY KEY (id);
ALTER TABLE target ADD CONSTRAINT target_un UNIQUE (ident);
CREATE INDEX target_group_id_idx ON target (group_id);
COMMENT ON COLUMN target.group_id IS 'busi group id';
COMMENT ON COLUMN target."cluster" IS 'append to alert event as field';
COMMENT ON COLUMN target.ident IS 'target id';
COMMENT ON COLUMN target.note IS 'append to alert event as field';
COMMENT ON COLUMN target.tags IS 'append to series data as tags, split by space, append external space at suffix';
CREATE TABLE metric_view (
id bigserial,
name varchar(191) not null default '',
cate smallint not null ,
configs varchar(8192) not null default '',
create_at bigint not null default 0,
create_by bigint not null default 0 ,
update_at bigint not null default 0
) ;
ALTER TABLE metric_view ADD CONSTRAINT metric_view_pk PRIMARY KEY (id);
CREATE INDEX metric_view_create_by_idx ON metric_view (create_by);
COMMENT ON COLUMN metric_view.cate IS '0: preset 1: custom';
COMMENT ON COLUMN metric_view.create_by IS 'user id';
insert into metric_view(name, cate, configs) values('Host View', 0, '{"filters":[{"oper":"=","label":"__name__","value":"cpu_usage_idle"}],"dynamicLabels":[],"dimensionLabels":[{"label":"ident","value":""}]}');
CREATE TABLE alert_aggr_view (
id bigserial,
name varchar(191) not null default '',
rule varchar(2048) not null default '',
cate smallint not null ,
create_at bigint not null default 0,
create_by bigint not null default 0 ,
update_at bigint not null default 0
) ;
ALTER TABLE alert_aggr_view ADD CONSTRAINT alert_aggr_view_pk PRIMARY KEY (id);
CREATE INDEX alert_aggr_view_create_by_idx ON alert_aggr_view (create_by);
COMMENT ON COLUMN alert_aggr_view.cate IS '0: preset 1: custom';
COMMENT ON COLUMN alert_aggr_view.create_by IS 'user id';
insert into alert_aggr_view(name, rule, cate) values('By BusiGroup, Severity', 'field:group_name::field:severity', 0);
insert into alert_aggr_view(name, rule, cate) values('By RuleName', 'field:rule_name', 0);
CREATE TABLE alert_cur_event (
id bigserial NOT NULL,
"cluster" varchar(128) NOT NULL,
group_id int8 NOT NULL,
group_name varchar(255) NOT NULL DEFAULT ''::character varying,
hash varchar(64) NOT NULL,
rule_id int8 NOT NULL,
rule_name varchar(255) NOT NULL,
rule_note varchar(2048) NOT NULL DEFAULT 'alert rule note'::character varying,
severity int2 NOT NULL,
prom_for_duration int4 NOT NULL,
prom_ql varchar(8192) NOT NULL,
prom_eval_interval int4 NOT NULL,
callbacks varchar(255) NOT NULL DEFAULT ''::character varying,
runbook_url varchar(255) NULL,
notify_recovered int2 NOT NULL,
notify_channels varchar(255) NOT NULL DEFAULT ''::character varying,
notify_groups varchar(255) NOT NULL DEFAULT ''::character varying,
notify_repeat_next int8 NOT NULL DEFAULT 0,
notify_cur_number int4 not null default 0,
target_ident varchar(191) NOT NULL DEFAULT ''::character varying,
target_note varchar(191) NOT NULL DEFAULT ''::character varying,
first_trigger_time int8,
trigger_time int8 NOT NULL,
trigger_value varchar(255) NOT NULL,
tags varchar(1024) NOT NULL DEFAULT ''::character varying,
rule_prod varchar(255) NOT NULL DEFAULT ''::character varying,
rule_algo varchar(255) NOT NULL DEFAULT ''::character varying,
CONSTRAINT alert_cur_event_pk PRIMARY KEY (id)
);
CREATE INDEX alert_cur_event_hash_idx ON alert_cur_event USING btree (hash);
CREATE INDEX alert_cur_event_notify_repeat_next_idx ON alert_cur_event USING btree (notify_repeat_next);
CREATE INDEX alert_cur_event_rule_id_idx ON alert_cur_event USING btree (rule_id);
CREATE INDEX alert_cur_event_trigger_time_idx ON alert_cur_event USING btree (trigger_time, group_id);
COMMENT ON COLUMN alert_cur_event.id IS 'use alert_his_event.id';
COMMENT ON COLUMN alert_cur_event.group_id IS 'busi group id of rule';
COMMENT ON COLUMN alert_cur_event.group_name IS 'busi group name';
COMMENT ON COLUMN alert_cur_event.hash IS 'rule_id + vector_pk';
COMMENT ON COLUMN alert_cur_event.rule_note IS 'alert rule note';
COMMENT ON COLUMN alert_cur_event.severity IS '1:Emergency 2:Warning 3:Notice';
COMMENT ON COLUMN alert_cur_event.prom_for_duration IS 'prometheus for, unit:s';
COMMENT ON COLUMN alert_cur_event.prom_ql IS 'promql';
COMMENT ON COLUMN alert_cur_event.prom_eval_interval IS 'evaluate interval';
COMMENT ON COLUMN alert_cur_event.callbacks IS 'split by space: http://a.com/api/x http://a.com/api/y';
COMMENT ON COLUMN alert_cur_event.notify_recovered IS 'whether notify when recovery';
COMMENT ON COLUMN alert_cur_event.notify_channels IS 'split by space: sms voice email dingtalk wecom';
COMMENT ON COLUMN alert_cur_event.notify_groups IS 'split by space: 233 43';
COMMENT ON COLUMN alert_cur_event.notify_repeat_next IS 'next timestamp to notify, get repeat settings from rule';
COMMENT ON COLUMN alert_cur_event.target_ident IS 'target ident, also in tags';
COMMENT ON COLUMN alert_cur_event.target_note IS 'target note';
COMMENT ON COLUMN alert_cur_event.tags IS 'merge data_tags rule_tags, split by ,,';
CREATE TABLE alert_his_event (
id bigserial NOT NULL,
is_recovered int2 NOT NULL,
"cluster" varchar(128) NOT NULL,
group_id int8 NOT NULL,
group_name varchar(255) NOT NULL DEFAULT ''::character varying,
hash varchar(64) NOT NULL,
rule_id int8 NOT NULL,
rule_name varchar(255) NOT NULL,
rule_note varchar(2048) NOT NULL DEFAULT 'alert rule note'::character varying,
severity int2 NOT NULL,
prom_for_duration int4 NOT NULL,
prom_ql varchar(8192) NOT NULL,
prom_eval_interval int4 NOT NULL,
callbacks varchar(255) NOT NULL DEFAULT ''::character varying,
runbook_url varchar(255) NULL,
notify_recovered int2 NOT NULL,
notify_channels varchar(255) NOT NULL DEFAULT ''::character varying,
notify_groups varchar(255) NOT NULL DEFAULT ''::character varying,
notify_cur_number int4 not null default 0,
target_ident varchar(191) NOT NULL DEFAULT ''::character varying,
target_note varchar(191) NOT NULL DEFAULT ''::character varying,
first_trigger_time int8,
trigger_time int8 NOT NULL,
trigger_value varchar(255) NOT NULL,
recover_time int8 NOT NULL DEFAULT 0,
last_eval_time int8 NOT NULL DEFAULT 0,
tags varchar(1024) NOT NULL DEFAULT ''::character varying,
rule_prod varchar(255) NOT NULL DEFAULT ''::character varying,
rule_algo varchar(255) NOT NULL DEFAULT ''::character varying,
CONSTRAINT alert_his_event_pk PRIMARY KEY (id)
);
CREATE INDEX alert_his_event_hash_idx ON alert_his_event USING btree (hash);
CREATE INDEX alert_his_event_rule_id_idx ON alert_his_event USING btree (rule_id);
CREATE INDEX alert_his_event_trigger_time_idx ON alert_his_event USING btree (trigger_time, group_id);
COMMENT ON COLUMN alert_his_event.group_id IS 'busi group id of rule';
COMMENT ON COLUMN alert_his_event.group_name IS 'busi group name';
COMMENT ON COLUMN alert_his_event.hash IS 'rule_id + vector_pk';
COMMENT ON COLUMN alert_his_event.rule_note IS 'alert rule note';
COMMENT ON COLUMN alert_his_event.severity IS '0:Emergency 1:Warning 2:Notice';
COMMENT ON COLUMN alert_his_event.prom_for_duration IS 'prometheus for, unit:s';
COMMENT ON COLUMN alert_his_event.prom_ql IS 'promql';
COMMENT ON COLUMN alert_his_event.prom_eval_interval IS 'evaluate interval';
COMMENT ON COLUMN alert_his_event.callbacks IS 'split by space: http://a.com/api/x http://a.com/api/y';
COMMENT ON COLUMN alert_his_event.notify_recovered IS 'whether notify when recovery';
COMMENT ON COLUMN alert_his_event.notify_channels IS 'split by space: sms voice email dingtalk wecom';
COMMENT ON COLUMN alert_his_event.notify_groups IS 'split by space: 233 43';
COMMENT ON COLUMN alert_his_event.target_ident IS 'target ident, also in tags';
COMMENT ON COLUMN alert_his_event.target_note IS 'target note';
COMMENT ON COLUMN alert_his_event.last_eval_time IS 'for time filter';
COMMENT ON COLUMN alert_his_event.tags IS 'merge data_tags rule_tags, split by ,,';
CREATE TABLE task_tpl
(
id serial,
group_id int not null ,
title varchar(255) not null default '',
account varchar(64) not null,
batch int not null default 0,
tolerance int not null default 0,
timeout int not null default 0,
pause varchar(255) not null default '',
script text not null,
args varchar(512) not null default '',
tags varchar(255) not null default '' ,
create_at bigint not null default 0,
create_by varchar(64) not null default '',
update_at bigint not null default 0,
update_by varchar(64) not null default ''
) ;
ALTER TABLE task_tpl ADD CONSTRAINT task_tpl_pk PRIMARY KEY (id);
CREATE INDEX task_tpl_group_id_idx ON task_tpl (group_id);
COMMENT ON COLUMN task_tpl.group_id IS 'busi group id';
COMMENT ON COLUMN task_tpl.tags IS 'split by space';
CREATE TABLE task_tpl_host
(
ii serial,
id int not null ,
host varchar(128) not null
) ;
ALTER TABLE task_tpl_host ADD CONSTRAINT task_tpl_host_pk PRIMARY KEY (id);
CREATE INDEX task_tpl_host_id_idx ON task_tpl_host (id,host);
COMMENT ON COLUMN task_tpl_host.id IS 'task tpl id';
COMMENT ON COLUMN task_tpl_host.host IS 'ip or hostname';
CREATE TABLE task_record
(
id bigserial,
group_id bigint not null ,
ibex_address varchar(128) not null,
ibex_auth_user varchar(128) not null default '',
ibex_auth_pass varchar(128) not null default '',
title varchar(255) not null default '',
account varchar(64) not null,
batch int not null default 0,
tolerance int not null default 0,
timeout int not null default 0,
pause varchar(255) not null default '',
script text not null,
args varchar(512) not null default '',
create_at bigint not null default 0,
create_by varchar(64) not null default ''
) ;
ALTER TABLE task_record ADD CONSTRAINT task_record_pk PRIMARY KEY (id);
CREATE INDEX task_record_create_at_idx ON task_record (create_at,group_id);
CREATE INDEX task_record_create_by_idx ON task_record (create_by);
COMMENT ON COLUMN task_record.id IS 'ibex task id';
COMMENT ON COLUMN task_record.group_id IS 'busi group id';
CREATE TABLE alerting_engines
(
id bigserial NOT NULL,
instance varchar(128) not null default '',
cluster varchar(128) not null default '',
clock bigint not null
) ;
ALTER TABLE alerting_engines ADD CONSTRAINT alerting_engines_pk PRIMARY KEY (id);
ALTER TABLE alerting_engines ADD CONSTRAINT alerting_engines_un UNIQUE (instance);
COMMENT ON COLUMN alerting_engines.instance IS 'instance identification, e.g. 10.9.0.9:9090';
COMMENT ON COLUMN alerting_engines.cluster IS 'target reader cluster';

View File

@@ -1,28 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="32px" height="32px" viewBox="0 0 32 32" version="1.1">
<!-- Generator: Sketch 54.1 (76490) - https://sketchapp.com -->
<title>icon / product-logo / 32x32px / elasticsearch / color</title>
<desc>Created with Sketch.</desc>
<defs>
<polygon id="path-1" points="0.6438 0.0005 27.479 0.0005 27.479 9.0005 0.6438 9.0005"/>
<polygon id="path-3" points="0.6437 0.0004 27.479 0.0004 27.479 9 0.6437 9"/>
</defs>
<g id="icon-/-product-logo-/-32x32px-/-elasticsearch-/-color" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Group-9" transform="translate(1.000000, 0.000000)">
<path d="M0,16.0004 C0,17.3844 0.194,18.7194 0.524,20.0004 L20,20.0004 C22.209,20.0004 24,18.2094 24,16.0004 C24,13.7904 22.209,12.0004 20,12.0004 L0.524,12.0004 C0.194,13.2804 0,14.6164 0,16.0004" id="Fill-1" fill="#343741"/>
<g id="Group-5" transform="translate(1.000000, 0.000000)">
<mask id="mask-2" fill="white">
<use xlink:href="#path-1"/>
</mask>
<g id="Clip-4"/>
<path d="M25.9238,7.6615 C26.4828,7.1465 27.0028,6.5935 27.4798,6.0005 C24.5468,2.3455 20.0498,0.0005 14.9998,0.0005 C8.6788,0.0005 3.2388,3.6775 0.6438,9.0005 L22.5108,9.0005 C23.7768,9.0005 24.9938,8.5195 25.9238,7.6615" id="Fill-3" fill="#FEC514" mask="url(#mask-2)"/>
</g>
<g id="Group-8" transform="translate(1.000000, 23.000000)">
<mask id="mask-4" fill="white">
<use xlink:href="#path-3"/>
</mask>
<g id="Clip-7"/>
<path d="M22.5107,0.0004 L0.6437,0.0004 C3.2397,5.3224 8.6787,9.0004 14.9997,9.0004 C20.0497,9.0004 24.5467,6.6544 27.4797,3.0004 C27.0027,2.4064 26.4827,1.8534 25.9237,1.3384 C24.9937,0.4794 23.7767,0.0004 22.5107,0.0004" id="Fill-6" fill="#00BFB3" mask="url(#mask-4)"/>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.0 KiB

View File

@@ -1,112 +0,0 @@
{
"name": "http detect",
"tags": "",
"ident": "",
"configs": {
"version": "2.0.0",
"panels": [
{
"id": "0cd7c8aa-456c-4522-97ef-0b1710e7af8a",
"type": "row",
"name": "Default chart group",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 0,
"i": "0cd7c8aa-456c-4522-97ef-0b1710e7af8a"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "max(http_response_result_code) by (target)",
"legend": "UP?"
},
{
"expr": "max(http_response_response_code) by (target)",
"refId": "B",
"legend": "status code"
},
{
"expr": "max(http_response_response_time) by (target)",
"refId": "C",
"legend": "latency(s)"
},
{
"expr": "max(http_response_cert_expire_timestamp) by (target) - time()",
"refId": "D",
"legend": "cert expire"
}
],
"name": "URL Details",
"custom": {
"showHeader": true,
"calc": "lastNotNull",
"displayMode": "labelValuesToRows",
"aggrDimension": "target"
},
"options": {
"valueMappings": [],
"standardOptions": {}
},
"overrides": [
{
"properties": {
"valueMappings": [
{
"type": "special",
"match": {
"special": 0
},
"result": {
"text": "UP",
"color": "#417505"
}
},
{
"type": "range",
"match": {
"special": 1,
"from": 1
},
"result": {
"text": "DOWN",
"color": "#e90f0f"
}
}
],
"standardOptions": {}
},
"matcher": {
"value": "A"
}
},
{
"type": "special",
"matcher": {
"value": "D"
},
"properties": {
"standardOptions": {
"util": "humantimeSeconds"
}
}
}
],
"version": "2.0.0",
"type": "table",
"layout": {
"h": 15,
"w": 24,
"x": 0,
"y": 1,
"i": "3674dbfa-243a-49f6-baa5-b7f887c1afb0"
},
"id": "3674dbfa-243a-49f6-baa5-b7f887c1afb0"
}
]
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

@@ -1,976 +0,0 @@
{
"name": "JMX",
"tags": "Prometheus JMX",
"ident": "",
"configs": {
"var": [
{
"name": "job",
"definition": "jmx_exporter"
},
{
"name": "instance",
"definition": "label_values(jmx_scrape_error,instance)"
}
],
"version": "2.0.0",
"panels": [
{
"id": "a26c5c3d-7b60-4746-bd1f-ca95581cf2fd",
"type": "row",
"name": "Basic Info",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 0,
"i": "a26c5c3d-7b60-4746-bd1f-ca95581cf2fd"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "up{job=\"$job\", instance=\"$instance\"}"
}
],
"name": "Status",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "special",
"match": {
"special": 1
},
"result": {
"text": "UP",
"color": "#1eac02"
}
},
{
"type": "special",
"match": {
"special": 0
},
"result": {
"text": "DOWN",
"color": "#f00a0a"
}
}
],
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 0,
"y": 1,
"i": "0721ee76-816b-469f-9c49-2bef94a9299e"
},
"id": "0721ee76-816b-469f-9c49-2bef94a9299e"
},
{
"targets": [
{
"refId": "A",
"expr": "time() - process_start_time_seconds{job=\"$job\",instance=\"$instance\"}"
}
],
"name": "Uptime",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"standardOptions": {
"util": "humantimeSeconds"
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 6,
"y": 1,
"i": "a55c40fc-dc25-4d2a-8e99-928e02c5ff5d"
},
"id": "a55c40fc-dc25-4d2a-8e99-928e02c5ff5d"
},
{
"targets": [
{
"refId": "A",
"expr": "os_available_processors{job=\"$job\",instance=\"$instance\"}"
}
],
"name": "Available CPUs",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 12,
"y": 1,
"i": "60c3389c-808d-4412-b74b-cb762e89a8ad"
},
"id": "60c3389c-808d-4412-b74b-cb762e89a8ad"
},
{
"targets": [
{
"refId": "A",
"expr": "os_open_file_descriptor_count{job=\"$job\",instance=\"$instance\"}"
}
],
"name": "Open file descriptors",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 18,
"y": 1,
"i": "1c9a8cca-3578-485e-837d-21618d383065"
},
"id": "1c9a8cca-3578-485e-837d-21618d383065"
},
{
"id": "705c90e0-e8b6-4f1c-b35c-c8a785009a20",
"type": "row",
"name": "JVM Memory",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 4,
"i": "705c90e0-e8b6-4f1c-b35c-c8a785009a20"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_bytes_used{area=\"heap\",job=\"$job\",instance=\"$instance\"}",
"legend": "Used"
},
{
"expr": "jvm_memory_bytes_max{area=\"heap\",job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Max"
}
],
"name": "JVM Memory(heap)",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 5,
"i": "5455e2f2-f6bb-4888-9d88-240d7e12cce2"
},
"id": "5455e2f2-f6bb-4888-9d88-240d7e12cce2"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_bytes_used{area=\"nonheap\",job=\"$job\",instance=\"$instance\"}",
"legend": "Used"
},
{
"expr": "jvm_memory_bytes_max{area=\"nonheap\",job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Max"
}
],
"name": "JVM Memory(nonheap)",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 5,
"i": "765b22a9-1ddc-4c08-8758-684e3c13252b"
},
"id": "765b22a9-1ddc-4c08-8758-684e3c13252b"
},
{
"id": "c43aa6f5-7252-400f-bb9f-8c96e436151c",
"type": "row",
"name": "Memory Pool",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 12,
"i": "c43aa6f5-7252-400f-bb9f-8c96e436151c"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"CodeHeap 'non-nmethods'\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"CodeHeap 'non-nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"CodeHeap 'non-nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "CodeHeap 'non-nmethods'",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 0,
"y": 13,
"i": "5ab2434c-a905-43c1-a563-4cee2dc9dce9"
},
"id": "5ab2434c-a905-43c1-a563-4cee2dc9dce9"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"CodeHeap 'profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"CodeHeap 'profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"CodeHeap 'profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "CodeHeap 'profiled nmethods'",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 6,
"y": 13,
"i": "bfe16d07-91ff-44e6-87bc-9d5d93d2ebd6"
},
"id": "bfe16d07-91ff-44e6-87bc-9d5d93d2ebd6"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"CodeHeap 'non-profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"CodeHeap 'non-profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"CodeHeap 'non-profiled nmethods'\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "CodeHeap 'non-profiled nmethods'",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 12,
"y": 13,
"i": "18d10f97-5ab2-41c4-a3ad-09f2c7a03e1a"
},
"id": "18d10f97-5ab2-41c4-a3ad-09f2c7a03e1a"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"G1 Eden Space\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"G1 Eden Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"G1 Eden Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "G1 Eden Space",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 18,
"y": 13,
"i": "314a3893-c1d4-4f85-bce0-33ecfda2f521"
},
"id": "314a3893-c1d4-4f85-bce0-33ecfda2f521"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"Compressed Class Space\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"Compressed Class Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"Compressed Class Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "Compressed Class Space",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 0,
"y": 15,
"i": "1e5f03e7-af5d-447b-9c1b-23d81915e8df"
},
"id": "1e5f03e7-af5d-447b-9c1b-23d81915e8df"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"G1 Survivor Space\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"G1 Survivor Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"G1 Survivor Space\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "G1 Survivor Space",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 6,
"y": 15,
"i": "86a68ff6-238c-4fc9-b77e-3b964e564500"
},
"id": "86a68ff6-238c-4fc9-b77e-3b964e564500"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"G1 Old Gen\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"G1 Old Gen\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"G1 Old Gen\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "G1 Old Gen",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 12,
"y": 15,
"i": "595af7d1-e53c-43b5-8f62-ddb9b3a4ffcb"
},
"id": "595af7d1-e53c-43b5-8f62-ddb9b3a4ffcb"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_memory_pool_bytes_max{pool=\"Metaspace\", job=\"$job\",instance=\"$instance\"}",
"legend": "Max"
},
{
"expr": "jvm_memory_pool_bytes_used{pool=\"Metaspace\", job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Used"
},
{
"expr": "jvm_memory_pool_bytes_committed{pool=\"Metaspace\", job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Committed"
}
],
"name": "Metaspace",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 6,
"x": 18,
"y": 15,
"i": "380fdfcb-16a6-4131-abaa-a3911b7de6fa"
},
"id": "380fdfcb-16a6-4131-abaa-a3911b7de6fa"
},
{
"id": "0aaf3516-4938-41e3-b7cb-323de6de75d9",
"type": "row",
"name": "GC",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 22,
"i": "0aaf3516-4938-41e3-b7cb-323de6de75d9"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "increase(jvm_gc_collection_seconds_sum{job=\"$job\",instance=~\"$instance\"}[1m])"
}
],
"name": "过去一分钟GC耗时",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "none"
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 8,
"x": 0,
"y": 23,
"i": "5303bda0-47c2-4aca-bb12-1da512500f4a"
},
"id": "5303bda0-47c2-4aca-bb12-1da512500f4a"
},
{
"targets": [
{
"refId": "A",
"expr": "increase(jvm_gc_collection_seconds_count{job=\"$job\",instance=\"$instance\"}[1m])",
"legend": ""
}
],
"name": "过去一分钟GC次数",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "none"
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 8,
"x": 8,
"y": 23,
"i": "cf410459-b5df-4aca-a410-ecda091d6097"
},
"id": "cf410459-b5df-4aca-a410-ecda091d6097"
},
{
"targets": [
{
"refId": "A",
"expr": "increase(jvm_gc_collection_seconds_sum{job=\"$job\",instance=\"$instance\"}[1m])/increase(jvm_gc_collection_seconds_count{job=\"$job\",instance=\"$instance\"}[1m])",
"legend": ""
}
],
"name": "过去一分钟每次GC平均耗时",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "none"
},
"thresholds": {}
},
"custom": {
"drawStyle": "bars",
"stack": "off",
"fillOpacity": 1
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 8,
"x": 16,
"y": 23,
"i": "30feb928-b7c3-4e71-aeeb-cc10994b313c"
},
"id": "30feb928-b7c3-4e71-aeeb-cc10994b313c"
},
{
"id": "fd6d0772-40d7-4211-b9bb-601e35fb6431",
"type": "row",
"name": "Threads and Class loading",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 30,
"i": "fd6d0772-40d7-4211-b9bb-601e35fb6431"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_threads_current{job=\"$job\",instance=\"$instance\"}",
"legend": "current"
},
{
"expr": "jvm_threads_daemon{job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "daemon"
},
{
"expr": "jvm_threads_deadlocked{job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "deadlocked"
}
],
"name": "Threads",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 31,
"i": "65c74a2b-5f01-4491-b45a-dffe4a9b678a"
},
"id": "65c74a2b-5f01-4491-b45a-dffe4a9b678a"
},
{
"targets": [
{
"refId": "A",
"expr": "jvm_classes_loaded{job=\"$job\", instance=\"$instance\"}"
}
],
"name": "Class loading",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 31,
"i": "2da16907-adf7-4561-9338-4254c89a311b"
},
"id": "2da16907-adf7-4561-9338-4254c89a311b"
},
{
"id": "12fe119e-54f0-4219-9846-ac982c1e9b4d",
"type": "row",
"name": "Physical memory",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 38,
"i": "12fe119e-54f0-4219-9846-ac982c1e9b4d"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "os_total_physical_memory_bytes{job=\"$job\",instance=\"$instance\"}",
"legend": "Total physical memory"
},
{
"expr": "os_committed_virtual_memory_bytes{job=\"$job\",instance=\"$instance\"}",
"refId": "B",
"legend": "Committed virtual memory"
},
{
"expr": "os_free_physical_memory_bytes{job=\"$job\",instance=\"$instance\"}",
"refId": "C",
"legend": "Free physical memory"
}
],
"name": "Physical memory",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesIEC",
"decimals": 1
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 39,
"i": "5a859147-edfc-4dac-9457-8a928213bc00"
},
"id": "5a859147-edfc-4dac-9457-8a928213bc00"
}
]
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

View File

@@ -1,359 +0,0 @@
{
"name": "Kafka - exporter",
"tags": "Kafka Prometheus ",
"ident": "",
"configs": {
"var": [
{
"name": "instance",
"definition": "label_values(kafka_brokers, instance)"
},
{
"name": "job",
"definition": "label_values(kafka_brokers, job)"
}
],
"version": "2.0.0",
"panels": [
{
"id": "a3ac9979-6e3a-42ae-9d52-ebddb8960dc4",
"type": "row",
"name": "overview",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 0,
"i": "a3ac9979-6e3a-42ae-9d52-ebddb8960dc4"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "count(count by (topic) (kafka_topic_partitions))"
}
],
"name": "topics",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {
"value": 50
}
},
"options": {
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 8,
"x": 8,
"y": 1,
"i": "ed68dc7b-4f01-4aef-ab10-20158aadfab7"
},
"id": "ed68dc7b-4f01-4aef-ab10-20158aadfab7"
},
{
"targets": [
{
"refId": "A",
"expr": "kafka_brokers"
}
],
"name": "brokers",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {
"value": 50
}
},
"options": {
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 8,
"x": 0,
"y": 1,
"i": "3678c9d7-cb0a-4114-a0cd-7a06b976f6b8"
},
"id": "3678c9d7-cb0a-4114-a0cd-7a06b976f6b8"
},
{
"targets": [
{
"refId": "A",
"expr": "sum(kafka_topic_partitions)"
}
],
"name": "partitions",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {
"value": 50
}
},
"options": {
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 8,
"x": 16,
"y": 1,
"i": "8adb0df0-13bc-452a-ac63-209ae3748d77"
},
"id": "8adb0df0-13bc-452a-ac63-209ae3748d77"
},
{
"id": "7071dc1f-9410-4899-9c43-206a11bfaab2",
"type": "row",
"name": "throughput",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 4,
"i": "7071dc1f-9410-4899-9c43-206a11bfaab2"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(rate(kafka_topic_partition_current_offset{instance=\"$instance\"}[1m])) by (topic)"
}
],
"name": "Message in per second",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 5,
"i": "b68719ad-ba54-4326-a956-43acaef10e2e"
},
"id": "b68719ad-ba54-4326-a956-43acaef10e2e"
},
{
"targets": [
{
"expr": "sum(kafka_consumer_lag_millis{instance=\"$instance\"}) by (consumergroup, topic) ",
"legend": "{{consumergroup}} (topic: {{topic}})"
}
],
"name": "Latency by Consumer Group",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "humantimeMilliseconds"
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 7,
"i": "bfd08ec7-a539-4c5e-8499-4e5c437b97d7"
},
"id": "bfd08ec7-a539-4c5e-8499-4e5c437b97d7"
},
{
"targets": [
{
"expr": "sum(rate(kafka_consumergroup_current_offset{instance=\"$instance\"}[1m])) by (topic)"
}
],
"name": "Message consume per second",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 5,
"i": "9a42427a-0e01-432e-838d-a6baca6c42b2"
},
"id": "9a42427a-0e01-432e-838d-a6baca6c42b2"
},
{
"targets": [
{
"expr": "sum(kafka_topic_partition_current_offset{instance=\"$instance\"}) by (topic) - sum(kafka_consumergroup_current_offset{instance=\"$instance\"}) by (topic) ",
"legend": "{{consumergroup}} (topic: {{topic}})"
}
],
"name": "Lag by Consumer Group",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 7,
"i": "7324f196-467b-4590-ae47-d56be683a0c3"
},
"id": "7324f196-467b-4590-ae47-d56be683a0c3"
},
{
"id": "bd4d2d51-7b4d-4523-b586-0bf2b248d4d4",
"type": "row",
"name": "patition/replicate",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 14,
"i": "bd4d2d51-7b4d-4523-b586-0bf2b248d4d4"
},
"collapsed": true
},
{
"targets": [
{
"refId": "A",
"expr": "kafka_topic_partitions{instance=\"$instance\"}",
"legend": "{{topic}}"
}
],
"name": "Partitions per Topic",
"custom": {
"showHeader": true,
"calc": "lastNotNull",
"displayMode": "seriesToRows"
},
"options": {
"standardOptions": {}
},
"overrides": [
{}
],
"version": "2.0.0",
"type": "table",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 15,
"i": "04d1f6cc-40ec-4584-be17-a4d10cd5b6e9"
},
"id": "04d1f6cc-40ec-4584-be17-a4d10cd5b6e9"
},
{
"targets": [
{
"refId": "A",
"expr": "kafka_topic_partition_under_replicated_partition",
"legend": "{{topic}}-{{partition}}"
}
],
"name": "Under Replicated",
"description": "副本不同步预案\n1. Restart the Zookeeper leader.\n2. Restart the broker\\brokers that are not replicating some of the partitions.",
"custom": {
"showHeader": true,
"calc": "lastNotNull",
"displayMode": "seriesToRows"
},
"options": {
"standardOptions": {}
},
"overrides": [
{}
],
"version": "2.0.0",
"type": "table",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 15,
"i": "5b589c1c-fd35-4ce5-8b24-c0e05d307345"
},
"id": "5b589c1c-fd35-4ce5-8b24-c0e05d307345"
}
]
}
}

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="64" height="64" viewBox="0 0 32 32" preserveAspectRatio="xMidYMid"><path d="M21.538 17.724a4.16 4.16 0 0 0-3.128 1.42l-1.96-1.388c.208-.573.328-1.188.328-1.832a5.35 5.35 0 0 0-.317-1.802l1.956-1.373a4.16 4.16 0 0 0 3.122 1.414 4.18 4.18 0 0 0 4.172-4.172 4.18 4.18 0 0 0-4.172-4.172 4.18 4.18 0 0 0-4.172 4.172c0 .412.062.8.174 1.185l-1.957 1.374c-.818-1.014-1.995-1.723-3.336-1.94V8.25a4.18 4.18 0 0 0 3.313-4.082A4.18 4.18 0 0 0 11.388 0a4.18 4.18 0 0 0-4.172 4.172c0 1.98 1.387 3.637 3.24 4.063v2.4C7.928 11.067 6 13.273 6 15.925c0 2.665 1.947 4.88 4.493 5.308v2.523c-1.87.4-3.276 2.08-3.276 4.072A4.18 4.18 0 0 0 11.388 32a4.18 4.18 0 0 0 4.172-4.172c0-1.993-1.405-3.66-3.276-4.072v-2.523c1.315-.22 2.47-.916 3.28-1.907l1.973 1.397a4.15 4.15 0 0 0-.171 1.173 4.18 4.18 0 0 0 4.172 4.172 4.18 4.18 0 0 0 4.172-4.172 4.18 4.18 0 0 0-4.172-4.172zm0-9.754c1.115 0 2.022.908 2.022 2.023s-.907 2.022-2.022 2.022-2.022-.907-2.022-2.022.907-2.023 2.022-2.023zM9.366 4.172c0-1.115.907-2.022 2.023-2.022s2.022.907 2.022 2.022-.907 2.022-2.022 2.022-2.023-.907-2.023-2.022zM13.41 27.83c0 1.115-.907 2.022-2.022 2.022s-2.023-.907-2.023-2.022.907-2.022 2.023-2.022 2.022.907 2.022 2.022zm-2.023-9.082c-1.556 0-2.82-1.265-2.82-2.82s1.265-2.82 2.82-2.82 2.82 1.265 2.82 2.82-1.265 2.82-2.82 2.82zm10.15 5.172c-1.115 0-2.022-.908-2.022-2.023s.907-2.022 2.022-2.022 2.022.907 2.022 2.022-.907 2.023-2.022 2.023z"/></svg>

Before

Width:  |  Height:  |  Size: 1.4 KiB

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.7 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.2 KiB

View File

@@ -1,838 +0,0 @@
{
"name": "MySQL Overview by categraf",
"tags": "Prometheus MySQL",
"ident": "",
"configs": {
"var": [
{
"name": "instance",
"definition": "label_values(mysql_global_status_uptime, instance)"
}
],
"version": "2.0.0",
"panels": [
{
"id": "fe0e2a5d-4e82-4eaf-b13a-6d98aa6b6860",
"type": "row",
"name": "Basic Info",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 0,
"i": "fe0e2a5d-4e82-4eaf-b13a-6d98aa6b6860"
},
"collapsed": true
},
{
"targets": [
{
"expr": "min(mysql_global_status_uptime{instance=~\"$instance\"})"
}
],
"name": "MySQL Uptime",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"to": 1800
},
"result": {
"color": "#ec7718"
}
},
{
"type": "range",
"match": {
"from": 1800
},
"result": {
"color": "#369603"
}
}
],
"standardOptions": {
"util": "humantimeSeconds"
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 0,
"y": 1,
"i": "80079949-dbff-48fe-a1eb-54b646c30135"
},
"id": "80079949-dbff-48fe-a1eb-54b646c30135"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_queries{instance=~\"$instance\"}[5m])"
}
],
"name": "Current QPS",
"description": "mysql_global_status_queries",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"to": 100
},
"result": {
"color": "#05a31f"
}
},
{
"type": "range",
"match": {
"from": 100
},
"result": {
"color": "#ea3939"
}
}
],
"standardOptions": {
"decimals": 2
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 6,
"y": 1,
"i": "9fd6dd09-d131-4c0e-88ea-ed62c72baf97"
},
"id": "9fd6dd09-d131-4c0e-88ea-ed62c72baf97"
},
{
"targets": [
{
"expr": "avg(mysql_global_variables_innodb_buffer_pool_size{instance=~\"$instance\"})"
}
],
"name": "InnoDB Buffer Pool",
"description": "**InnoDB Buffer Pool Size**\n\nInnoDB maintains a storage area called the buffer pool for caching data and indexes in memory. Knowing how the InnoDB buffer pool works, and taking advantage of it to keep frequently accessed data in memory, is one of the most important aspects of MySQL tuning. The goal is to keep the working set in memory. In most cases, this should be between 60%-90% of available memory on a dedicated database host, but depends on many factors.",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"standardOptions": {
"util": "bytesIEC"
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 12,
"y": 1,
"i": "24913190-b86d-44b7-a8db-555351d9d3c2"
},
"id": "24913190-b86d-44b7-a8db-555351d9d3c2"
},
{
"targets": [
{
"expr": "sum(increase(mysql_global_status_table_locks_waited{instance=~\"$instance\"}[5m]))"
}
],
"name": "Table Locks Waited(5min)",
"description": "**Table Locks**\n\nMySQL takes a number of different locks for varying reasons. In this graph we see how many Table level locks MySQL has requested from the storage engine. In the case of InnoDB, many times the locks could actually be row locks as it only takes table level locks in a few specific cases.\n\nIt is most useful to compare Locks Immediate and Locks Waited. If Locks waited is rising, it means you have lock contention. Otherwise, Locks Immediate rising and falling is normal activity.",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"from": 1
},
"result": {
"color": "#e70d0d"
}
},
{
"type": "range",
"match": {
"to": 1
},
"result": {
"color": "#53b503"
}
}
],
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 18,
"y": 1,
"i": "94a1e97e-2241-4e05-a9e9-a9b1e69d1070"
},
"id": "94a1e97e-2241-4e05-a9e9-a9b1e69d1070"
},
{
"id": "ca82d30f-8e0d-4caa-8a00-2ed9efe4ad85",
"type": "row",
"name": "Connections",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 4,
"i": "ca82d30f-8e0d-4caa-8a00-2ed9efe4ad85"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(mysql_global_status_threads_connected{instance=~\"$instance\"})",
"legend": "Connections"
},
{
"expr": "sum(mysql_global_status_max_used_connections{instance=~\"$instance\"})",
"legend": "Max Used Connections"
},
{
"expr": "sum(mysql_global_variables_max_connections{instance=~\"$instance\"})",
"legend": "Max Connections"
},
{
"expr": "sum(rate(mysql_global_status_aborted_connects{instance=~\"$instance\"}[5m]))",
"legend": "Aborted Connections"
}
],
"name": "MySQL Connections",
"description": "**Max Connections** \n\nMax Connections is the maximum permitted number of simultaneous client connections. By default, this is 151. Increasing this value increases the number of file descriptors that mysqld requires. If the required number of descriptors are not available, the server reduces the value of Max Connections.\n\nmysqld actually permits Max Connections + 1 clients to connect. The extra connection is reserved for use by accounts that have the SUPER privilege, such as root.\n\nMax Used Connections is the maximum number of connections that have been in use simultaneously since the server started.\n\nConnections is the number of connection attempts (successful or not) to the MySQL server.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 5,
"i": "e2c85e72-0286-49bc-8ddb-5fba5f449b53"
},
"id": "e2c85e72-0286-49bc-8ddb-5fba5f449b53"
},
{
"targets": [
{
"expr": "sum(mysql_global_status_threads_connected{instance=~\"$instance\"})",
"legend": "Threads Connected"
},
{
"expr": "sum(mysql_global_status_threads_running{instance=~\"$instance\"})",
"legend": "Threads Running"
}
],
"name": "MySQL Client Thread Activity",
"description": "Threads Connected is the number of open connections, while Threads Running is the number of threads not sleeping.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 5,
"i": "fbd43ac2-159d-4e55-8bc6-800d1bbfbd59"
},
"id": "fbd43ac2-159d-4e55-8bc6-800d1bbfbd59"
},
{
"id": "cb81def4-ac63-4d42-b66e-440f9061794b",
"type": "row",
"name": "Query Performance",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 12,
"i": "cb81def4-ac63-4d42-b66e-440f9061794b"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_created_tmp_tables{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Tables"
},
{
"expr": "sum(rate(mysql_global_status_created_tmp_disk_tables{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Disk Tables"
},
{
"expr": "sum(rate(mysql_global_status_created_tmp_files{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Files"
}
],
"name": "MySQL Temporary Objects",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.64,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 13,
"i": "5fa65a30-a49b-457f-b46a-11d2029188bd"
},
"id": "5fa65a30-a49b-457f-b46a-11d2029188bd"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_select_full_join{ instance=~\"$instance\"}[5m]))",
"legend": "Select Full Join"
},
{
"expr": "sum(rate(mysql_global_status_select_full_range_join{ instance=~\"$instance\"}[5m]))",
"legend": "Select Full Range Join"
},
{
"expr": "sum(rate(mysql_global_status_select_range{ instance=~\"$instance\"}[5m]))",
"legend": "Select Range"
},
{
"expr": "sum(rate(mysql_global_status_select_range_check{ instance=~\"$instance\"}[5m]))",
"legend": "Select Range Check"
},
{
"expr": "sum(rate(mysql_global_status_select_scan{ instance=~\"$instance\"}[5m]))",
"legend": "Select Scan"
}
],
"name": "MySQL Select Types",
"description": "**MySQL Select Types**\n\nAs with most relational databases, selecting based on indexes is more efficient than scanning an entire table's data. Here we see the counters for selects not done with indexes.\n\n* ***Select Scan*** is how many queries caused full table scans, in which all the data in the table had to be read and either discarded or returned.\n* ***Select Range*** is how many queries used a range scan, which means MySQL scanned all rows in a given range.\n* ***Select Full Join*** is the number of joins that are not joined on an index, this is usually a huge performance hit.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"stack": "off",
"lineInterpolation": "smooth",
"fillOpacity": 0.41
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 13,
"i": "20efd251-6207-4cec-aa3b-4351e8e9b125"
},
"id": "20efd251-6207-4cec-aa3b-4351e8e9b125"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_sort_rows{instance=~\"$instance\"}[5m]))",
"legend": "Sort Rows"
},
{
"expr": "sum(rate(mysql_global_status_sort_range{instance=~\"$instance\"}[5m]))",
"legend": "Sort Range"
},
{
"expr": "sum(rate(mysql_global_status_sort_merge_passes{instance=~\"$instance\"}[5m]))",
"legend": "Sort Merge Passes"
},
{
"expr": "sum(rate(mysql_global_status_sort_scan{instance=~\"$instance\"}[5m]))",
"legend": "Sort Scan"
}
],
"name": "MySQL Sorts",
"description": "**MySQL Sorts**\n\nDue to a query's structure, order, or other requirements, MySQL sorts the rows before returning them. For example, if a table is ordered 1 to 10 but you want the results reversed, MySQL then has to sort the rows to return 10 to 1.\n\nThis graph also shows when sorts had to scan a whole table or a given range of a table in order to return the results and which could not have been sorted via an index.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 15,
"i": "a4d0c5fb-04e0-4627-8722-ae996d70e2aa"
},
"id": "a4d0c5fb-04e0-4627-8722-ae996d70e2aa"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_slow_queries{instance=~\"$instance\"}[5m]))",
"legend": "Slow Queries"
}
],
"name": "MySQL Slow Queries",
"description": "**MySQL Slow Queries**\n\nSlow queries are defined as queries being slower than the long_query_time setting. For example, if you have long_query_time set to 3, all queries that take longer than 3 seconds to complete will show on this graph.",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "bars",
"stack": "off",
"fillOpacity": 0.81
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 15,
"i": "2e13ada4-1128-440d-9360-028f16c3779b"
},
"id": "2e13ada4-1128-440d-9360-028f16c3779b"
},
{
"id": "c9df805c-8ae7-41d7-b28b-575f478fd9ce",
"type": "row",
"name": "Network",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 22,
"i": "c9df805c-8ae7-41d7-b28b-575f478fd9ce"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_bytes_received{instance=~\"$instance\"}[5m]))",
"legend": "Inbound"
},
{
"expr": "sum(rate(mysql_global_status_bytes_sent{instance=~\"$instance\"}[5m]))",
"legend": "Outbound"
}
],
"name": "MySQL Network Traffic",
"description": "**MySQL Network Traffic**\n\nHere we can see how much network traffic is generated by MySQL. Outbound is network traffic sent from MySQL and Inbound is network traffic MySQL has received.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesSI",
"decimals": 2
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 23,
"i": "6107714f-bedd-437c-b6e4-d6eb74db6d30"
},
"id": "6107714f-bedd-437c-b6e4-d6eb74db6d30"
},
{
"id": "00fd2b70-a133-4ad7-bd56-69a3c91ecf0c",
"type": "row",
"name": "Commands, Handlers",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 30,
"i": "00fd2b70-a133-4ad7-bd56-69a3c91ecf0c"
},
"collapsed": true
},
{
"targets": [
{
"expr": "topk(10, rate(mysql_global_status_commands_total{instance=~\"$instance\"}[5m])>0)",
"legend": "Com_{{command}}"
}
],
"name": "Top Command Counters",
"description": "**Top Command Counters**\n\nThe Com_{{xxx}} statement counter variables indicate the number of times each xxx statement has been executed. There is one status variable for each type of statement. For example, Com_delete and Com_update count [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements, respectively. Com_delete_multi and Com_update_multi are similar but apply to [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements that use multiple-table syntax.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"decimals": 2
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.2,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 31,
"i": "f90ca2bc-0809-45f6-88b6-e258805def04"
},
"id": "f90ca2bc-0809-45f6-88b6-e258805def04"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_handlers_total{instance=~\"$instance\", handler!~\"commit|rollback|savepoint.*|prepare\"}[5m])",
"legend": "{{handler}}"
}
],
"name": "MySQL Handlers",
"description": "**MySQL Handlers**\n\nHandler statistics are internal statistics on how MySQL is selecting, updating, inserting, and modifying rows, tables, and indexes.\n\nThis is in fact the layer between the Storage Engine and MySQL.\n\n* `read_rnd_next` is incremented when the server performs a full table scan and this is a counter you don't really want to see with a high value.\n* `read_key` is incremented when a read is done with an index.\n* `read_next` is incremented when the storage engine is asked to 'read the next index entry'. A high value means a lot of index scans are being done.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"decimals": 3
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 33,
"i": "74e1844d-a918-48fa-a29f-6535dc087dac"
},
"id": "74e1844d-a918-48fa-a29f-6535dc087dac"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_handlers_total{instance=~\"$instance\", handler=~\"commit|rollback|savepoint.*|prepare\"}[5m])",
"legend": "{{handler}}"
}
],
"name": "MySQL Transaction Handlers",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 33,
"i": "b2c3a13d-898f-407b-b6a9-db852072b12f"
},
"id": "b2c3a13d-898f-407b-b6a9-db852072b12f"
},
{
"id": "c32a02da-6c61-4b9e-9365-c0b56088fabc",
"type": "row",
"name": "Open Files",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 40,
"i": "c32a02da-6c61-4b9e-9365-c0b56088fabc"
},
"collapsed": true
},
{
"targets": [
{
"expr": "mysql_global_variables_open_files_limit{instance=~\"$instance\"}",
"legend": "Open Files Limit"
},
{
"expr": "mysql_global_status_open_files{instance=~\"$instance\"}",
"legend": "Open Files"
}
],
"name": "MySQL Open Files",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 41,
"i": "fc13eadb-890d-4184-ac16-943d54188db8"
},
"id": "fc13eadb-890d-4184-ac16-943d54188db8"
},
{
"id": "6f596e65-3e4b-4d9a-aad7-a32c8c7b8239",
"type": "row",
"name": "Table Openings",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 48,
"i": "6f596e65-3e4b-4d9a-aad7-a32c8c7b8239"
},
"collapsed": true
},
{
"targets": [
{
"expr": "rate(mysql_global_status_table_open_cache_hits{instance=~\"$instance\"}[5m])\n/\n(\nrate(mysql_global_status_table_open_cache_hits{instance=~\"$instance\"}[5m])\n+\nrate(mysql_global_status_table_open_cache_misses{instance=~\"$instance\"}[5m])\n)",
"legend": "Table Open Cache Hit Ratio"
}
],
"name": "Table Open Cache Hit Ratio Mysql 5.6.6+",
"description": "**MySQL Table Open Cache Status**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "percentUnit"
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 49,
"i": "0b78fbb5-a0b4-4a1b-98b1-af15bc91779d"
},
"id": "0b78fbb5-a0b4-4a1b-98b1-af15bc91779d"
},
{
"targets": [
{
"expr": "mysql_global_status_open_tables{instance=~\"$instance\"}",
"legend": "Open Tables"
},
{
"expr": "mysql_global_variables_table_open_cache{instance=~\"$instance\"}",
"legend": "Table Open Cache"
}
],
"name": "MySQL Open Tables",
"description": "**MySQL Open Tables**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 49,
"i": "948ad10b-8b22-4d42-9e94-99ef09e12927"
},
"id": "948ad10b-8b22-4d42-9e94-99ef09e12927"
}
]
}
}

View File

@@ -1,838 +0,0 @@
{
"name": "MySQL Overview by exporter",
"tags": "Prometheus MySQL",
"ident": "",
"configs": {
"var": [
{
"name": "instance",
"definition": "label_values(mysql_global_status_uptime, instance)"
}
],
"version": "2.0.0",
"panels": [
{
"id": "a94506f9-879c-41d4-bf0a-0ce479352742",
"type": "row",
"name": "Basic Info",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 0,
"i": "a94506f9-879c-41d4-bf0a-0ce479352742"
},
"collapsed": true
},
{
"targets": [
{
"expr": "min(mysql_global_status_uptime{instance=~\"$instance\"})"
}
],
"name": "MySQL Uptime",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"to": 1800
},
"result": {
"color": "#ec7718"
}
},
{
"type": "range",
"match": {
"from": 1800
},
"result": {
"color": "#369603"
}
}
],
"standardOptions": {
"util": "humantimeSeconds"
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 0,
"y": 1,
"i": "c1ed017a-86d8-4ba5-8e75-ce3be943eef9"
},
"id": "c1ed017a-86d8-4ba5-8e75-ce3be943eef9"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_queries{instance=~\"$instance\"}[5m])"
}
],
"name": "Current QPS",
"description": "mysql_global_status_queries",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"to": 100
},
"result": {
"color": "#05a31f"
}
},
{
"type": "range",
"match": {
"from": 100
},
"result": {
"color": "#ea3939"
}
}
],
"standardOptions": {
"decimals": 2
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 6,
"y": 1,
"i": "05b0a593-7328-4298-9b5c-af6bd6a34e52"
},
"id": "05b0a593-7328-4298-9b5c-af6bd6a34e52"
},
{
"targets": [
{
"expr": "avg(mysql_global_variables_innodb_buffer_pool_size{instance=~\"$instance\"})"
}
],
"name": "InnoDB Buffer Pool",
"description": "**InnoDB Buffer Pool Size**\n\nInnoDB maintains a storage area called the buffer pool for caching data and indexes in memory. Knowing how the InnoDB buffer pool works, and taking advantage of it to keep frequently accessed data in memory, is one of the most important aspects of MySQL tuning. The goal is to keep the working set in memory. In most cases, this should be between 60%-90% of available memory on a dedicated database host, but depends on many factors.",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"standardOptions": {
"util": "bytesIEC"
}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 12,
"y": 1,
"i": "e5388f85-8970-4f64-83e1-e77d4025f1dd"
},
"id": "e5388f85-8970-4f64-83e1-e77d4025f1dd"
},
{
"targets": [
{
"expr": "sum(increase(mysql_global_status_table_locks_waited{instance=~\"$instance\"}[5m]))"
}
],
"name": "Table Locks Waited(5min)",
"description": "**Table Locks**\n\nMySQL takes a number of different locks for varying reasons. In this graph we see how many Table level locks MySQL has requested from the storage engine. In the case of InnoDB, many times the locks could actually be row locks as it only takes table level locks in a few specific cases.\n\nIt is most useful to compare Locks Immediate and Locks Waited. If Locks waited is rising, it means you have lock contention. Otherwise, Locks Immediate rising and falling is normal activity.",
"custom": {
"textMode": "value",
"colorMode": "value",
"calc": "lastNotNull",
"colSpan": 1,
"textSize": {}
},
"options": {
"valueMappings": [
{
"type": "range",
"match": {
"from": 1
},
"result": {
"color": "#e70d0d"
}
},
{
"type": "range",
"match": {
"to": 1
},
"result": {
"color": "#53b503"
}
}
],
"standardOptions": {}
},
"version": "2.0.0",
"type": "stat",
"layout": {
"h": 3,
"w": 6,
"x": 18,
"y": 1,
"i": "ab8a768e-98f3-4215-bfbf-ea838a12b45c"
},
"id": "ab8a768e-98f3-4215-bfbf-ea838a12b45c"
},
{
"id": "24a1be60-6b90-483a-af6f-48cc79830da1",
"type": "row",
"name": "Connections",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 4,
"i": "24a1be60-6b90-483a-af6f-48cc79830da1"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(mysql_global_status_threads_connected{instance=~\"$instance\"})",
"legend": "Connections"
},
{
"expr": "sum(mysql_global_status_max_used_connections{instance=~\"$instance\"})",
"legend": "Max Used Connections"
},
{
"expr": "sum(mysql_global_variables_max_connections{instance=~\"$instance\"})",
"legend": "Max Connections"
},
{
"expr": "sum(rate(mysql_global_status_aborted_connects{instance=~\"$instance\"}[5m]))",
"legend": "Aborted Connections"
}
],
"name": "MySQL Connections",
"description": "**Max Connections** \n\nMax Connections is the maximum permitted number of simultaneous client connections. By default, this is 151. Increasing this value increases the number of file descriptors that mysqld requires. If the required number of descriptors are not available, the server reduces the value of Max Connections.\n\nmysqld actually permits Max Connections + 1 clients to connect. The extra connection is reserved for use by accounts that have the SUPER privilege, such as root.\n\nMax Used Connections is the maximum number of connections that have been in use simultaneously since the server started.\n\nConnections is the number of connection attempts (successful or not) to the MySQL server.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 5,
"i": "bb31cf5e-1a80-478c-b300-ee9975d14963"
},
"id": "bb31cf5e-1a80-478c-b300-ee9975d14963"
},
{
"targets": [
{
"expr": "sum(mysql_global_status_threads_connected{instance=~\"$instance\"})",
"legend": "Threads Connected"
},
{
"expr": "sum(mysql_global_status_threads_running{instance=~\"$instance\"})",
"legend": "Threads Running"
}
],
"name": "MySQL Client Thread Activity",
"description": "Threads Connected is the number of open connections, while Threads Running is the number of threads not sleeping.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 5,
"i": "c1083f59-1e46-442e-a7c3-f5d1fbb78751"
},
"id": "c1083f59-1e46-442e-a7c3-f5d1fbb78751"
},
{
"id": "e126f7dd-df38-4a43-846a-ea6188718de9",
"type": "row",
"name": "Query Performance",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 12,
"i": "e126f7dd-df38-4a43-846a-ea6188718de9"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_created_tmp_tables{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Tables"
},
{
"expr": "sum(rate(mysql_global_status_created_tmp_disk_tables{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Disk Tables"
},
{
"expr": "sum(rate(mysql_global_status_created_tmp_files{instance=~\"$instance\"}[5m]))",
"legend": "Created Tmp Files"
}
],
"name": "MySQL Temporary Objects",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.64,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 13,
"i": "80f94d89-babe-4e38-a220-2490af80e091"
},
"id": "80f94d89-babe-4e38-a220-2490af80e091"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_select_full_join{ instance=~\"$instance\"}[5m]))",
"legend": "Select Full Join"
},
{
"expr": "sum(rate(mysql_global_status_select_full_range_join{ instance=~\"$instance\"}[5m]))",
"legend": "Select Full Range Join"
},
{
"expr": "sum(rate(mysql_global_status_select_range{ instance=~\"$instance\"}[5m]))",
"legend": "Select Range"
},
{
"expr": "sum(rate(mysql_global_status_select_range_check{ instance=~\"$instance\"}[5m]))",
"legend": "Select Range Check"
},
{
"expr": "sum(rate(mysql_global_status_select_scan{ instance=~\"$instance\"}[5m]))",
"legend": "Select Scan"
}
],
"name": "MySQL Select Types",
"description": "**MySQL Select Types**\n\nAs with most relational databases, selecting based on indexes is more efficient than scanning an entire table's data. Here we see the counters for selects not done with indexes.\n\n* ***Select Scan*** is how many queries caused full table scans, in which all the data in the table had to be read and either discarded or returned.\n* ***Select Range*** is how many queries used a range scan, which means MySQL scanned all rows in a given range.\n* ***Select Full Join*** is the number of joins that are not joined on an index, this is usually a huge performance hit.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"stack": "off",
"lineInterpolation": "smooth",
"fillOpacity": 0.41
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 13,
"i": "a03b6272-cd60-430c-8128-6bfc8da2938f"
},
"id": "a03b6272-cd60-430c-8128-6bfc8da2938f"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_sort_rows{instance=~\"$instance\"}[5m]))",
"legend": "Sort Rows"
},
{
"expr": "sum(rate(mysql_global_status_sort_range{instance=~\"$instance\"}[5m]))",
"legend": "Sort Range"
},
{
"expr": "sum(rate(mysql_global_status_sort_merge_passes{instance=~\"$instance\"}[5m]))",
"legend": "Sort Merge Passes"
},
{
"expr": "sum(rate(mysql_global_status_sort_scan{instance=~\"$instance\"}[5m]))",
"legend": "Sort Scan"
}
],
"name": "MySQL Sorts",
"description": "**MySQL Sorts**\n\nDue to a query's structure, order, or other requirements, MySQL sorts the rows before returning them. For example, if a table is ordered 1 to 10 but you want the results reversed, MySQL then has to sort the rows to return 10 to 1.\n\nThis graph also shows when sorts had to scan a whole table or a given range of a table in order to return the results and which could not have been sorted via an index.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 15,
"i": "d5fbfe0e-fc90-4f2a-b016-7a24a19c73d7"
},
"id": "d5fbfe0e-fc90-4f2a-b016-7a24a19c73d7"
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_slow_queries{instance=~\"$instance\"}[5m]))",
"legend": "Slow Queries"
}
],
"name": "MySQL Slow Queries",
"description": "**MySQL Slow Queries**\n\nSlow queries are defined as queries being slower than the long_query_time setting. For example, if you have long_query_time set to 3, all queries that take longer than 3 seconds to complete will show on this graph.",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "bars",
"stack": "off",
"fillOpacity": 0.81
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 15,
"i": "51306ae6-e11a-4c08-a55c-3678676d5d8e"
},
"id": "51306ae6-e11a-4c08-a55c-3678676d5d8e"
},
{
"id": "867ae6c9-b4a4-4349-8e68-56ef9cebf8b4",
"type": "row",
"name": "Network",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 22,
"i": "867ae6c9-b4a4-4349-8e68-56ef9cebf8b4"
},
"collapsed": true
},
{
"targets": [
{
"expr": "sum(rate(mysql_global_status_bytes_received{instance=~\"$instance\"}[5m]))",
"legend": "Inbound"
},
{
"expr": "sum(rate(mysql_global_status_bytes_sent{instance=~\"$instance\"}[5m]))",
"legend": "Outbound"
}
],
"name": "MySQL Network Traffic",
"description": "**MySQL Network Traffic**\n\nHere we can see how much network traffic is generated by MySQL. Outbound is network traffic sent from MySQL and Inbound is network traffic MySQL has received.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "bytesSI",
"decimals": 2
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 23,
"i": "392c15b2-d413-4201-9692-5277f7863c05"
},
"id": "392c15b2-d413-4201-9692-5277f7863c05"
},
{
"id": "e58cb79a-75f2-452f-bc55-b36ff93a60c4",
"type": "row",
"name": "Commands, Handlers",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 30,
"i": "e58cb79a-75f2-452f-bc55-b36ff93a60c4"
},
"collapsed": true
},
{
"targets": [
{
"expr": "topk(10, rate(mysql_global_status_commands_total{instance=~\"$instance\"}[5m])>0)",
"legend": "Com_{{command}}"
}
],
"name": "Top Command Counters",
"description": "**Top Command Counters**\n\nThe Com_{{xxx}} statement counter variables indicate the number of times each xxx statement has been executed. There is one status variable for each type of statement. For example, Com_delete and Com_update count [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements, respectively. Com_delete_multi and Com_update_multi are similar but apply to [``DELETE``](https://dev.mysql.com/doc/refman/5.7/en/delete.html) and [``UPDATE``](https://dev.mysql.com/doc/refman/5.7/en/update.html) statements that use multiple-table syntax.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"decimals": 2
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.2,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 31,
"i": "df2f62e6-5a75-4cea-9268-3077348a6558"
},
"id": "df2f62e6-5a75-4cea-9268-3077348a6558"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_handlers_total{instance=~\"$instance\", handler!~\"commit|rollback|savepoint.*|prepare\"}[5m])",
"legend": "{{handler}}"
}
],
"name": "MySQL Handlers",
"description": "**MySQL Handlers**\n\nHandler statistics are internal statistics on how MySQL is selecting, updating, inserting, and modifying rows, tables, and indexes.\n\nThis is in fact the layer between the Storage Engine and MySQL.\n\n* `read_rnd_next` is incremented when the server performs a full table scan and this is a counter you don't really want to see with a high value.\n* `read_key` is incremented when a read is done with an index.\n* `read_next` is incremented when the storage engine is asked to 'read the next index entry'. A high value means a lot of index scans are being done.",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"decimals": 3
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 33,
"i": "34ba0da1-d6f0-4c35-8418-56a7506035c5"
},
"id": "34ba0da1-d6f0-4c35-8418-56a7506035c5"
},
{
"targets": [
{
"expr": "rate(mysql_global_status_handlers_total{instance=~\"$instance\", handler=~\"commit|rollback|savepoint.*|prepare\"}[5m])",
"legend": "{{handler}}"
}
],
"name": "MySQL Transaction Handlers",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 33,
"i": "9e37aa84-a6b6-4730-9fa7-0dab9e596e36"
},
"id": "9e37aa84-a6b6-4730-9fa7-0dab9e596e36"
},
{
"id": "779fdf9a-fcf8-4454-91a4-608950d3fba1",
"type": "row",
"name": "Open Files",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 40,
"i": "779fdf9a-fcf8-4454-91a4-608950d3fba1"
},
"collapsed": true
},
{
"targets": [
{
"expr": "mysql_global_variables_open_files_limit{instance=~\"$instance\"}",
"legend": "Open Files Limit"
},
{
"expr": "mysql_global_status_innodb_num_open_files{instance=~\"$instance\"}",
"legend": "InnoDB Open Files"
}
],
"name": "MySQL Open Files",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 24,
"x": 0,
"y": 41,
"i": "ac797cf1-56f6-4cf7-a472-8a2facd84588"
},
"id": "ac797cf1-56f6-4cf7-a472-8a2facd84588"
},
{
"id": "292f69d6-1a6c-463e-8aaf-14715b447c1f",
"type": "row",
"name": "Table Openings",
"layout": {
"h": 1,
"w": 24,
"x": 0,
"y": 48,
"i": "292f69d6-1a6c-463e-8aaf-14715b447c1f"
},
"collapsed": true
},
{
"targets": [
{
"expr": "rate(mysql_global_status_table_open_cache_hits{instance=~\"$instance\"}[5m])\n/\n(\nrate(mysql_global_status_table_open_cache_hits{instance=~\"$instance\"}[5m])\n+\nrate(mysql_global_status_table_open_cache_misses{instance=~\"$instance\"}[5m])\n)",
"legend": "Table Open Cache Hit Ratio"
}
],
"name": "Table Open Cache Hit Ratio",
"description": "**MySQL Table Open Cache Status**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).",
"options": {
"tooltip": {
"mode": "all",
"sort": "none"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {
"util": "percentUnit"
},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 0,
"y": 49,
"i": "0139a750-1a56-45ee-9004-7a8ef15d34dd"
},
"id": "0139a750-1a56-45ee-9004-7a8ef15d34dd"
},
{
"targets": [
{
"expr": "mysql_global_status_open_tables{instance=~\"$instance\"}",
"legend": "Open Tables"
},
{
"expr": "mysql_global_variables_table_open_cache{instance=~\"$instance\"}",
"legend": "Table Open Cache"
}
],
"name": "MySQL Open Tables",
"description": "**MySQL Open Tables**\n\nThe recommendation is to set the `table_open_cache_instances` to a loose correlation to virtual CPUs, keeping in mind that more instances means the cache is split more times. If you have a cache set to 500 but it has 10 instances, each cache will only have 50 cached.\n\nThe `table_definition_cache` and `table_open_cache` can be left as default as they are auto-sized MySQL 5.6 and above (ie: do not set them to any value).",
"options": {
"tooltip": {
"mode": "all",
"sort": "desc"
},
"legend": {
"displayMode": "hidden"
},
"standardOptions": {},
"thresholds": {}
},
"custom": {
"drawStyle": "lines",
"lineInterpolation": "smooth",
"fillOpacity": 0.5,
"stack": "off"
},
"version": "2.0.0",
"type": "timeseries",
"layout": {
"h": 7,
"w": 12,
"x": 12,
"y": 49,
"i": "fba77c7e-9e40-4829-89b6-ed8bb2a7add7"
},
"id": "fba77c7e-9e40-4829-89b6-ed8bb2a7add7"
}
]
}
}

Some files were not shown because too many files have changed in this diff Show More