Compare commits

..

1 Commits

Author SHA1 Message Date
ning
15c2eadda6 refactor: optimize webhook send 2024-12-11 16:25:35 +08:00
505 changed files with 14494 additions and 71197 deletions

View File

@@ -1,22 +0,0 @@
name: 'Issue Translator'
on:
issues:
types: [opened]
jobs:
translate:
runs-on: ubuntu-latest
permissions:
issues: write
contents: read
steps:
- name: Translate Issues
uses: usthe/issues-translate-action@v2.7
with:
# 是否翻译 issue 标题
IS_MODIFY_TITLE: true
# GitHub Token
BOT_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# 自定义翻译标注(可选)
# CUSTOM_BOT_NOTE: "Translation by bot"

8
.gitignore vendored
View File

@@ -58,10 +58,6 @@ _test
.idea
.index
.vscode
.issue
.issue/*
.cursor
.claude
.DS_Store
.cache-loader
.payload
@@ -69,7 +65,3 @@ queries.active
/n9e-*
n9e.sql
!/datasource
.env.json

View File

@@ -1,41 +0,0 @@
# Configuration for typos tool
[files]
extend-exclude = [
# Ignore auto-generated easyjson files
"*_easyjson.go",
# Ignore binary files
"*.gz",
"*.tar",
"n9e",
"n9e-*"
]
[default.extend-identifiers]
# Didi is a company name (DiDi), not a typo
Didi = "Didi"
# datas is intentionally used as plural of data (slice variable)
datas = "datas"
# pendings is intentionally used as plural
pendings = "pendings"
pendingsUseByRecover = "pendingsUseByRecover"
pendingsUseByRecoverMap = "pendingsUseByRecoverMap"
# typs is intentionally used as shorthand for types (parameter name)
typs = "typs"
[default.extend-words]
# Some false positives
ba = "ba"
# Specific corrections for ambiguous typos
contigious = "contiguous"
onw = "own"
componet = "component"
Patten = "Pattern"
Requets = "Requests"
Mis = "Miss"
exporer = "exporter"
soruce = "source"
verison = "version"
Configations = "Configurations"
emmited = "emitted"
Utlization = "Utilization"
serie = "series"

634
LICENSE
View File

@@ -1,201 +1,433 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright CCF ODC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

102
README.md
View File

@@ -3,7 +3,7 @@
<img src="doc/img/Nightingale_L_V.png" alt="nightingale - cloud native monitoring" width="100" /></a>
</p>
<p align="center">
<b>Open-Source Alerting Expert</b>
<b>开源告警管理专家 一体化的可观测平台</b>
</p>
<p align="center">
@@ -25,93 +25,83 @@
[English](./README.md) | [中文](./README_zh.md)
[English](./README_en.md) | [中文](./README.md)
## 🎯 What is Nightingale
## 夜莺 Nightingale 是什么
Nightingale is an open-source monitoring project that focuses on alerting. Similar to Grafana, Nightingale also connects with various existing data sources. However, while Grafana emphasizes visualization, Nightingale places greater emphasis on the alerting engine, as well as the processing and distribution of alarms.
夜莺监控是一款开源云原生观测分析工具,采用 All-in-One 的设计理念,集数据采集、可视化、监控告警、数据分析于一体,与云原生生态紧密集成,提供开箱即用的企业级监控分析和告警能力。夜莺于 2020 年 3 月 20 日,在 GitHub 上发布 v1 版本,已累计迭代 100 多个版本。
> 💡 Nightingale has now officially launched the [MCP-Server](https://github.com/n9e/n9e-mcp-server/). This MCP Server enables AI assistants to interact with the Nightingale API using natural language, facilitating alert management, monitoring, and observability tasks.
>
> The Nightingale project was initially developed and open-sourced by DiDi.inc. On May 11, 2022, it was donated to the Open Source Development Committee of the China Computer Federation (CCF ODTC).
夜莺最初由滴滴开发和开源,并于 2022 年 5 月 11 日捐赠予中国计算机学会开源发展委员会CCF ODC为 CCF ODC 成立后接受捐赠的第一个开源项目。夜莺的核心研发团队,也是 Open-Falcon 项目原核心研发人员,从 2014 年Open-Falcon 是 2014 年开源)算起来,也有 10 年了,只为把监控这个事情做好。
![](https://n9e.github.io/img/global/arch-bg.png)
## 💡 How Nightingale Works
## 快速开始
- 👉 [文档中心](https://flashcat.cloud/docs/) | [下载中心](https://flashcat.cloud/download/nightingale/)
- ❤️ [报告 Bug](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=&projects=&template=question.yml)
- 为了提供更快速的访问体验,上述文档和下载站点托管于 [FlashcatCloud](https://flashcat.cloud)
- 💡 前后端代码分离,前端代码仓库:[https://github.com/n9e/fe](https://github.com/n9e/fe)
Many users have already collected metrics and log data. In this case, you can connect your storage repositories (such as VictoriaMetrics, ElasticSearch, etc.) as data sources in Nightingale. This allows you to configure alerting rules and notification rules within Nightingale, enabling the generation and distribution of alarms.
## 功能特点
![Nightingale Product Architecture](doc/img/readme/20240221152601.png)
- 对接多种时序库:支持对接 Prometheus、VictoriaMetrics、Thanos、Mimir、M3DB、TDengine 等多种时序库,实现统一告警管理。
- 专业告警能力:内置支持多种告警规则,可以扩展支持常见通知媒介,支持告警屏蔽/抑制/订阅/自愈、告警事件管理。
- 高性能可视化引擎:支持多种图表样式,内置众多 Dashboard 模版,也可导入 Grafana 模版,开箱即用,开源协议商业友好。
- 支持常见采集器:支持 [Categraf](https://flashcat.cloud/product/categraf)、Telegraf、Grafana-agent、Datadog-agent、各种 Exporter 作为采集器,没有什么数据是不能监控的。
- 👀无缝搭配 [Flashduty](https://flashcat.cloud/product/flashcat-duty/)实现告警聚合收敛、认领、升级、排班、IM集成确保告警处理不遗漏减少打扰高效协同。
Nightingale itself does not provide monitoring data collection capabilities. We recommend using [Categraf](https://github.com/flashcatcloud/categraf) as the collector, which integrates seamlessly with Nightingale.
[Categraf](https://github.com/flashcatcloud/categraf) can collect monitoring data from operating systems, network devices, various middleware, and databases. It pushes this data to Nightingale via the `Prometheus Remote Write` protocol. Nightingale then stores the monitoring data in a time-series database (such as Prometheus, VictoriaMetrics, etc.) and provides alerting and visualization capabilities.
## 截图演示
For certain edge data centers with poor network connectivity to the central Nightingale server, we offer a distributed deployment mode for the alerting engine. In this mode, even if the network is disconnected, the alerting functionality remains unaffected.
![Edge Deployment Mode](doc/img/readme/multi-region-arch.png)
你可以在页面的右上角,切换语言和主题,目前我们支持英语、简体中文、繁体中文。
> In the above diagram, Data Center A has a good network with the central data center, so it uses the Nightingale process in the central data center as the alerting engine. Data Center B has a poor network with the central data center, so it deploys `n9e-edge` as the alerting engine to handle alerting for its own data sources.
![语言切换](https://download.flashcat.cloud/ulric/n9e-switch-i18n.png)
## 🔕 Alert Noise Reduction, Escalation, and Collaboration
即时查询,类似 Prometheus 内置的查询分析页面,做 ad-hoc 查询,夜莺做了一些 UI 优化,同时提供了一些内置 promql 指标,让不太了解 promql 的用户也可以快速查询。
Nightingale focuses on being an alerting engine, responsible for generating alarms and flexibly distributing them based on rules. It supports 20 built-in notification medias (such as phone calls, SMS, email, DingTalk, Slack, etc.).
![即时查询](https://download.flashcat.cloud/ulric/20240513103305.png)
If you have more advanced requirements, such as:
- Want to consolidate events from multiple monitoring systems into one platform for unified noise reduction, response handling, and data analysis.
- Want to support personnel scheduling, practice on-call culture, and support alert escalation (to avoid missing alerts) and collaborative handling.
当然,也可以直接通过指标视图查看,有了指标视图,即时查询基本可以不用了,或者只有高端玩家使用即时查询,普通用户直接通过指标视图查询即可。
Then Nightingale is not suitable. It is recommended that you choose on-call products such as PagerDuty and FlashDuty. These products are simple and easy to use.
![指标视图](https://download.flashcat.cloud/ulric/20240513103530.png)
## 🗨️ Communication Channels
夜莺内置了常用仪表盘,可以直接导入使用。也可以导入 Grafana 仪表盘,不过只能兼容 Grafana 基本图表,如果已经习惯了 Grafana 建议继续使用 Grafana 看图,把夜莺作为一个告警引擎使用。
- **Report Bugs:** It is highly recommended to submit issues via the [Nightingale GitHub Issue tracker](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug_report.yml).
- **Documentation:** For more information, we recommend thoroughly browsing the [Nightingale Documentation Site](https://n9e.github.io/).
![内置仪表盘](https://download.flashcat.cloud/ulric/20240513103628.png)
## 🔑 Key Features
除了内置的仪表盘,也内置了很多告警规则,开箱即用。
![Nightingale Alerting rules](doc/img/readme/alerting-rules-en.png)
![内置告警规则](https://download.flashcat.cloud/ulric/20240513103825.png)
- Nightingale supports alerting rules, mute rules, subscription rules, and notification rules. It natively supports 20 types of notification media and allows customization of message templates.
- It supports event pipelines for Pipeline processing of alarms, facilitating automated integration with in-house systems. For example, it can append metadata to alarms or perform relabeling on events.
- It introduces the concept of business groups and a permission system to manage various rules in a categorized manner.
- Many databases and middleware come with built-in alert rules that can be directly imported and used. It also supports direct import of Prometheus alerting rules.
- It supports alerting self-healing, which automatically triggers a script to execute predefined logic after an alarm is generated—such as cleaning up disk space or capturing the current system state.
![Nightingale Alarm Dashboard](doc/img/readme/active-events-en.png)
- Nightingale archives historical alarms and supports multi-dimensional query and statistics.
- It supports flexible aggregation grouping, allowing a clear view of the distribution of alarms across the company.
## 产品架构
![Nightingale Integration Center](doc/img/readme/integration-components-en.png)
社区使用夜莺最多的场景就是使用夜莺做告警引擎,对接多套时序库,统一告警规则管理。绘图仍然使用 Grafana 居多。作为一个告警引擎,夜莺的产品架构如下:
- Nightingale has built-in metric descriptions, dashboards, and alerting rules for common operating systems, middleware, and databases, which are contributed by the community with varying quality.
- It directly receives data via multiple protocols such as Remote Write, OpenTSDB, Datadog, and Falcon, integrates with various Agents.
- It supports data sources like Prometheus, ElasticSearch, Loki, ClickHouse, MySQL, Postgres, allowing alerting based on data from these sources.
- Nightingale can be easily embedded into internal enterprise systems (e.g. Grafana, CMDB), and even supports configuring menu visibility for these embedded systems.
![产品架构](https://download.flashcat.cloud/ulric/20240221152601.png)
![Nightingale dashboards](doc/img/readme/dashboard-en.png)
对于个别边缘机房,如果和中心夜莺服务端网络链路不好,希望提升告警可用性,我们也提供边缘机房告警引擎下沉部署模式,这个模式下,即便网络割裂,告警功能也不受影响。
- Nightingale supports dashboard functionality, including common chart types, and comes with pre-built dashboards. The image above is a screenshot of one of these dashboards.
- If you are already accustomed to Grafana, it is recommended to continue using Grafana for visualization, as Grafana has deeper expertise in this area.
- For machine-related monitoring data collected by Categraf, it is advisable to use Nightingale's built-in dashboards for viewing. This is because Categraf's metric naming follows Telegraf's convention, which differs from that of Node Exporter.
- Due to Nightingale's concept of business groups (where machines can belong to different groups), there may be scenarios where you only want to view machines within the current business group on the dashboard. Thus, Nightingale's dashboards can be linked with business groups for interactive filtering.
![边缘部署模式](https://download.flashcat.cloud/ulric/20240222102119.png)
## 🌟 Stargazers over time
## 交流渠道
- 报告Bug优先推荐提交[夜莺GitHub Issue](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug_report.yml)
- 推荐完整浏览[夜莺文档站点](https://flashcat.cloud/docs/content/flashcat-monitor/nightingale-v7/introduction/),了解更多信息
- 推荐搜索关注夜莺公众号,第一时间获取社区动态:`夜莺监控Nightingale`
- 日常问题交流:
- QQ群730841964
- [加入微信群](https://download.flashcat.cloud/ulric/20241022141621.png),如果二维码过期了,可以联系我(我的微信:`picobyte`)拉群,备注: `夜莺互助群`
## 广受关注
[![Stargazers over time](https://api.star-history.com/svg?repos=ccfos/nightingale&type=Date)](https://star-history.com/#ccfos/nightingale&Date)
## 🔥 Users
![User Logos](doc/img/readme/logos.png)
## 🤝 Community Co-Building
- ❇️ Please read the [Nightingale Open Source Project and Community Governance Draft](./doc/community-governance.md). We sincerely welcome every user, developer, company, and organization to use Nightingale, actively report bugs, submit feature requests, share best practices, and help build a professional and active open-source community.
- ❤️ Nightingale Contributors
## 社区共建
- ❇️ 请阅读浏览[夜莺开源项目和社区治理架构草案](./doc/community-governance.md),真诚欢迎每一位用户、开发者、公司以及组织,使用夜莺监控、积极反馈 Bug、提交功能需求、分享最佳实践共建专业、活跃的夜莺开源社区。
- ❤️ 夜莺贡献者
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img src="https://contrib.rocks/image?repo=ccfos/nightingale" />
</a>
## 📜 License
- [Apache License V2.0](https://github.com/ccfos/nightingale/blob/main/LICENSE)
## License
- [Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE)

113
README_en.md Normal file
View File

@@ -0,0 +1,113 @@
<p align="center">
<a href="https://github.com/ccfos/nightingale">
<img src="doc/img/Nightingale_L_V.png" alt="nightingale - cloud native monitoring" width="100" /></a>
</p>
<p align="center">
<b>Open-source Alert Management Expert, an Integrated Observability Platform</b>
</p>
<p align="center">
<a href="https://flashcat.cloud/docs/">
<img alt="Docs" src="https://img.shields.io/badge/docs-get%20started-brightgreen"/></a>
<a href="https://hub.docker.com/u/flashcatcloud">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/flashcatcloud/nightingale"/></a>
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img alt="GitHub contributors" src="https://img.shields.io/github/contributors-anon/ccfos/nightingale"/></a>
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/ccfos/nightingale">
<img alt="GitHub forks" src="https://img.shields.io/github/forks/ccfos/nightingale">
<br/><img alt="GitHub Repo issues" src="https://img.shields.io/github/issues/ccfos/nightingale">
<img alt="GitHub Repo issues closed" src="https://img.shields.io/github/issues-closed/ccfos/nightingale">
<img alt="GitHub latest release" src="https://img.shields.io/github/v/release/ccfos/nightingale"/>
<img alt="License" src="https://img.shields.io/badge/license-Apache--2.0-blue"/>
<a href="https://n9e-talk.slack.com/">
<img alt="GitHub contributors" src="https://img.shields.io/badge/join%20slack-%23n9e-brightgreen.svg"/></a>
</p>
[English](./README_en.md) | [中文](./README.md)
## What is Nightingale
Nightingale aims to combine the advantages of Prometheus and Grafana. It manages alert rules and visualizes metrics, logs, traces in a beautiful WebUI.
Originally developed and open-sourced by Didi, Nightingale was donated to the China Computer Federation Open Source Development Committee (CCF ODC) on May 11, 2022, becoming the first open-source project accepted by the CCF ODC after its establishment.
## Quick Start
- 👉 [Documentation](https://flashcat.cloud/docs/) | [Download](https://flashcat.cloud/download/nightingale/)
- ❤️ [Report a Bug](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=&projects=&template=question.yml)
- For faster access, the above documentation and download sites are hosted on [FlashcatCloud](https://flashcat.cloud).
## Features
- **Integration with Multiple Time-Series Databases:** Supports integration with various time-series databases such as Prometheus, VictoriaMetrics, Thanos, Mimir, M3DB, and TDengine, enabling unified alert management.
- **Advanced Alerting Capabilities:** Comes with built-in support for multiple alerting rules, extensible to common notification channels. It also supports alert suppression, silencing, subscription, self-healing, and alert event management.
- **High-Performance Visualization Engine:** Offers various chart styles with numerous built-in dashboard templates and the ability to import Grafana templates. Ready to use with a business-friendly open-source license.
- **Support for Common Collectors:** Compatible with [Categraf](https://flashcat.cloud/product/categraf), Telegraf, Grafana-agent, Datadog-agent, and various exporters as collectors—there's no data that can't be monitored.
- **Seamless Integration with [Flashduty](https://flashcat.cloud/product/flashcat-duty/):** Enables alert aggregation, acknowledgment, escalation, scheduling, and IM integration, ensuring no alerts are missed, reducing unnecessary interruptions, and enhancing efficient collaboration.
## Screenshots
You can switch languages and themes in the top right corner. We now support English, Simplified Chinese, and Traditional Chinese.
![18n switch](https://download.flashcat.cloud/ulric/n9e-switch-i18n.png)
### Instant Query
Similar to the built-in query analysis page in Prometheus, Nightingale offers an ad-hoc query feature with UI enhancements. It also provides built-in PromQL metrics, allowing users unfamiliar with PromQL to quickly perform queries.
![Instant Query](https://download.flashcat.cloud/ulric/20240513103305.png)
### Metric View
Alternatively, you can use the Metric View to access data. With this feature, Instant Query becomes less necessary, as it caters more to advanced users. Regular users can easily perform queries using the Metric View.
![Metric View](https://download.flashcat.cloud/ulric/20240513103530.png)
### Built-in Dashboards
Nightingale includes commonly used dashboards that can be imported and used directly. You can also import Grafana dashboards, although compatibility is limited to basic Grafana charts. If youre accustomed to Grafana, its recommended to continue using it for visualization, with Nightingale serving as an alerting engine.
![Built-in Dashboards](https://download.flashcat.cloud/ulric/20240513103628.png)
### Built-in Alert Rules
In addition to the built-in dashboards, Nightingale also comes with numerous alert rules that are ready to use out of the box.
![Built-in Alert Rules](https://download.flashcat.cloud/ulric/20240513103825.png)
## Architecture
In most community scenarios, Nightingale is primarily used as an alert engine, integrating with multiple time-series databases to unify alert rule management. Grafana remains the preferred tool for visualization. As an alert engine, the product architecture of Nightingale is as follows:
![Product Architecture](https://download.flashcat.cloud/ulric/20240221152601.png)
For certain edge data centers with poor network connectivity to the central Nightingale server, we offer a distributed deployment mode for the alert engine. In this mode, even if the network is disconnected, the alerting functionality remains unaffected.
![Edge Deployment Mode](https://download.flashcat.cloud/ulric/20240222102119.png)
## Communication Channels
- **Report Bugs:** It is highly recommended to submit issues via the [Nightingale GitHub Issue tracker](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=kind%2Fbug&projects=&template=bug_report.yml).
- **Documentation:** For more information, we recommend thoroughly browsing the [Nightingale Documentation Site](https://flashcat.cloud/docs/content/flashcat-monitor/nightingale-v7/introduction/).
## Stargazers over time
[![Stargazers over time](https://api.star-history.com/svg?repos=ccfos/nightingale&type=Date)](https://star-history.com/#ccfos/nightingale&Date)
## Community Co-Building
- ❇️ Please read the [Nightingale Open Source Project and Community Governance Draft](./doc/community-governance.md). We sincerely welcome every user, developer, company, and organization to use Nightingale, actively report bugs, submit feature requests, share best practices, and help build a professional and active open-source community.
- ❤️ Nightingale Contributors
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img src="https://contrib.rocks/image?repo=ccfos/nightingale" />
</a>
## License
- [Apache License V2.0](https://github.com/didi/nightingale/blob/main/LICENSE)

View File

@@ -1,123 +0,0 @@
<p align="center">
<a href="https://github.com/ccfos/nightingale">
<img src="doc/img/Nightingale_L_V.png" alt="nightingale - cloud native monitoring" width="100" /></a>
</p>
<p align="center">
<b>开源监控告警管理专家</b>
</p>
<p align="center">
<a href="https://flashcat.cloud/docs/">
<img alt="Docs" src="https://img.shields.io/badge/docs-get%20started-brightgreen"/></a>
<a href="https://hub.docker.com/u/flashcatcloud">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/flashcatcloud/nightingale"/></a>
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img alt="GitHub contributors" src="https://img.shields.io/github/contributors-anon/ccfos/nightingale"/></a>
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/ccfos/nightingale">
<img alt="GitHub forks" src="https://img.shields.io/github/forks/ccfos/nightingale">
<br/><img alt="GitHub Repo issues" src="https://img.shields.io/github/issues/ccfos/nightingale">
<img alt="GitHub Repo issues closed" src="https://img.shields.io/github/issues-closed/ccfos/nightingale">
<img alt="GitHub latest release" src="https://img.shields.io/github/v/release/ccfos/nightingale"/>
<img alt="License" src="https://img.shields.io/badge/license-Apache--2.0-blue"/>
<a href="https://n9e-talk.slack.com/">
<img alt="GitHub contributors" src="https://img.shields.io/badge/join%20slack-%23n9e-brightgreen.svg"/></a>
</p>
[English](./README.md) | [中文](./README_zh.md)
## 夜莺是什么
夜莺 Nightingale 是一款开源云原生监控告警工具,是中国计算机学会接受捐赠并托管的第一个开源项目,在 GitHub 上有超过 12000 颗星,广受关注和使用。夜莺的统一告警引擎,可以对接 Prometheus、Elasticsearch、ClickHouse、Loki、MySQL 等多种数据源,提供全面的告警判定、丰富的事件处理和灵活的告警分发及通知能力。
夜莺侧重于监控告警,类似于 Grafana 的数据源集成方式,夜莺也是对接多种既有的数据源,不过 Grafana 侧重于可视化,夜莺则是侧重于告警引擎、告警事件的处理和分发。
> - 💡夜莺正式推出了 [MCP-Server](https://github.com/n9e/n9e-mcp-server/),此 MCP Server 允许 AI 助手通过自然语言与夜莺 API 交互,实现告警管理、监控和可观测性任务。
> - 夜莺监控项目,最初由滴滴开发和开源,并于 2022 年 5 月 11 日捐赠予中国计算机学会开源发展技术委员会CCF ODTC为 CCF ODTC 成立后接受捐赠的第一个开源项目。
![](https://n9e.github.io/img/global/arch-bg.png)
## 夜莺的工作逻辑
很多用户已经自行采集了指标、日志数据此时就把存储库VictoriaMetrics、ElasticSearch等作为数据源接入夜莺即可在夜莺里配置告警规则、通知规则完成告警事件的生成和派发。
![夜莺产品架构](doc/img/readme/20240221152601.png)
夜莺项目本身不提供监控数据采集能力。推荐您使用 [Categraf](https://github.com/flashcatcloud/categraf) 作为采集器,可以和夜莺丝滑对接。
[Categraf](https://github.com/flashcatcloud/categraf) 可以采集操作系统、网络设备、各类中间件、数据库的监控数据,通过 Remote Write 协议推送给夜莺,夜莺把监控数据转存到时序库(如 Prometheus、VictoriaMetrics 等),并提供告警和可视化能力。
对于个别边缘机房,如果和中心夜莺服务端网络链路不好,希望提升告警可用性,夜莺也提供边缘机房告警引擎下沉部署模式,这个模式下,即便边缘和中心端网络割裂,告警功能也不受影响。
![边缘部署模式](doc/img/readme/20240222102119.png)
> 上图中机房A和中心机房的网络链路很好所以直接由中心端的夜莺进程做告警引擎机房B和中心机房的网络链路不好所以在机房B部署了 `n9e-edge` 做告警引擎对机房B的数据源做告警判定。
## 告警降噪、升级、协同
夜莺的侧重点是做告警引擎,即负责产生告警事件,并根据规则做灵活派发,内置支持 20 种通知媒介电话、短信、邮件、钉钉、飞书、企微、Slack 等)。
如果您有更高级的需求,比如:
- 想要把公司的多套监控系统产生的事件聚拢到一个平台,统一做收敛降噪、响应处理、数据分析
- 想要支持人员的排班,践行 On-call 文化,想要支持告警认领、升级(避免遗漏)、协同处理
那夜莺是不合适的,推荐您选用 [FlashDuty](https://flashcat.cloud/product/flashcat-duty/) 这样的 On-call 产品,产品简单易用,也有免费套餐。
## 相关资料 & 交流渠道
- 📚 [夜莺介绍PPT](https://mp.weixin.qq.com/s/Mkwx_46xrltSq8NLqAIYow) 对您了解夜莺各项关键特性会有帮助PPT链接在文末
- 👉 [文档中心](https://flashcat.cloud/docs/) 为了更快的访问速度,站点托管在 [FlashcatCloud](https://flashcat.cloud)
- ❤️ [报告 Bug](https://github.com/ccfos/nightingale/issues/new?assignees=&labels=&projects=&template=question.yml) 写清楚问题描述、复现步骤、截图等信息,更容易得到答案
- 💡 前后端代码分离,前端代码仓库:[https://github.com/n9e/fe](https://github.com/n9e/fe)
- 🎯 关注[这个公众号](https://gitlink.org.cn/UlricQin)了解更多夜莺动态和知识
- 🌟 加我微信:`picobyte`(我已关闭好友验证)拉入微信群,备注:`夜莺互助群`,如果已经把夜莺上到生产环境,可联系我拉入资深监控用户群
## 关键特性简介
![夜莺告警规则](doc/img/readme/2025-05-23_18-43-37.png)
- 夜莺支持告警规则、屏蔽规则、订阅规则、通知规则,内置支持 20 种通知媒介,支持消息模板自定义
- 支持事件管道,对告警事件做 Pipeline 处理,方便和自有系统做自动化整合,比如给告警事件附加一些元信息,对事件做 relabel
- 支持业务组概念,引入权限体系,分门别类管理各类规则
- 很多数据库、中间件内置了告警规则,可以直接导入使用,也可以直接导入 Prometheus 的告警规则
- 支持告警自愈,即告警之后自动触发一个脚本执行一些预定义的逻辑,比如清理一下磁盘、抓一下现场等
![夜莺事件大盘](doc/img/readme/2025-05-30_08-49-28.png)
- 夜莺存档了历史告警事件,支持多维度的查询和统计
- 支持灵活的聚合分组,一目了然看到公司的告警事件分布情况
![夜莺集成中心](doc/img/readme/2025-05-23_18-46-06.png)
- 夜莺内置常用操作系统、中间件、数据库的的指标说明、仪表盘、告警规则,不过都是社区贡献的,整体也是参差不齐
- 夜莺直接接收 Remote Write、OpenTSDB、Datadog、Falcon 等多种协议的数据,故而可以和各类 Agent 对接
- 夜莺支持 Prometheus、ElasticSearch、Loki、TDEngine 等多种数据源,可以对其中的数据做告警
- 夜莺可以很方便内嵌企业内部系统,比如 Grafana、CMDB 等,甚至可以配置这些内嵌系统的菜单可见性
![夜莺仪表盘](doc/img/readme/2025-05-23_18-49-02.png)
- 夜莺支持仪表盘功能,支持常见的图表类型,也内置了一些仪表盘,上图是其中一个仪表盘的截图。
- 如果你已经习惯了 Grafana建议仍然使用 Grafana 看图。Grafana 在看图方面道行更深。
- 机器相关的监控数据,如果是 Categraf 采集的,建议使用夜莺自带的仪表盘查看,因为 Categraf 的指标命名 Follow 的是 Telegraf 的命名方式,和 Node Exporter 不同
- 因为夜莺有个业务组的概念,机器可以归属不同的业务组,有时在仪表盘里只想查看当前所属业务组的机器,所以夜莺的仪表盘可以和业务组联动
## 广受关注
[![Stargazers over time](https://api.star-history.com/svg?repos=ccfos/nightingale&type=Date)](https://star-history.com/#ccfos/nightingale&Date)
## 感谢众多企业的信赖
![夜莺客户](doc/img/readme/logos.png)
## 社区共建
- ❇️ 请阅读浏览[夜莺开源项目和社区治理架构草案](./doc/community-governance.md),真诚欢迎每一位用户、开发者、公司以及组织,使用夜莺监控、积极反馈 Bug、提交功能需求、分享最佳实践共建专业、活跃的夜莺开源社区。
- ❤️ 夜莺贡献者
<a href="https://github.com/ccfos/nightingale/graphs/contributors">
<img src="https://contrib.rocks/image?repo=ccfos/nightingale" />
</a>
## License
- [Apache License V2.0](https://github.com/ccfos/nightingale/blob/main/LICENSE)

File diff suppressed because it is too large Load Diff

View File

@@ -1,546 +0,0 @@
package aiagent
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/datasource"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/toolkits/pkg/logger"
)
const (
// ToolTypeBuiltin 内置工具类型
ToolTypeBuiltin = "builtin"
)
// =============================================================================
// 数据源获取函数(支持注入,便于测试)
// =============================================================================
// PromClientGetter Prometheus 客户端获取函数类型
type PromClientGetter func(dsId int64) prom.API
// SQLDatasourceGetter SQL 数据源获取函数类型
type SQLDatasourceGetter func(dsType string, dsId int64) (datasource.Datasource, bool)
// 默认使用 GlobalCache可通过 SetPromClientGetter/SetSQLDatasourceGetter 替换
var (
getPromClientFunc PromClientGetter = defaultGetPromClient
getSQLDatasourceFunc SQLDatasourceGetter = defaultGetSQLDatasource
)
// SetPromClientGetter 设置 Prometheus 客户端获取函数(用于测试)
func SetPromClientGetter(getter PromClientGetter) {
getPromClientFunc = getter
}
// SetSQLDatasourceGetter 设置 SQL 数据源获取函数(用于测试)
func SetSQLDatasourceGetter(getter SQLDatasourceGetter) {
getSQLDatasourceFunc = getter
}
// ResetDatasourceGetters 重置为默认的数据源获取函数
func ResetDatasourceGetters() {
getPromClientFunc = defaultGetPromClient
getSQLDatasourceFunc = defaultGetSQLDatasource
}
func defaultGetPromClient(dsId int64) prom.API {
// Default: no PromClient available. Use SetPromClientGetter to inject.
return nil
}
func defaultGetSQLDatasource(dsType string, dsId int64) (datasource.Datasource, bool) {
return dscache.DsCache.Get(dsType, dsId)
}
// BuiltinToolHandler 内置工具处理函数
type BuiltinToolHandler func(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error)
// BuiltinTool 内置工具定义
type BuiltinTool struct {
Definition AgentTool
Handler BuiltinToolHandler
}
// builtinTools 内置工具注册表
var builtinTools = map[string]*BuiltinTool{
// Prometheus 相关工具
"list_metrics": {
Definition: AgentTool{
Name: "list_metrics",
Description: "搜索 Prometheus 数据源的指标名称,支持关键词模糊匹配",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "keyword", Type: "string", Description: "搜索关键词,模糊匹配指标名", Required: false},
{Name: "limit", Type: "integer", Description: "返回数量限制默认30", Required: false},
},
},
Handler: listMetrics,
},
"get_metric_labels": {
Definition: AgentTool{
Name: "get_metric_labels",
Description: "获取 Prometheus 指标的所有标签键及其可选值",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "metric", Type: "string", Description: "指标名称", Required: true},
},
},
Handler: getMetricLabels,
},
// SQL 类数据源相关工具
"list_databases": {
Definition: AgentTool{
Name: "list_databases",
Description: "列出 SQL 数据源MySQL/Doris/ClickHouse/PostgreSQL中的所有数据库",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{},
},
Handler: listDatabases,
},
"list_tables": {
Definition: AgentTool{
Name: "list_tables",
Description: "列出指定数据库中的所有表",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "database", Type: "string", Description: "数据库名", Required: true},
},
},
Handler: listTables,
},
"describe_table": {
Definition: AgentTool{
Name: "describe_table",
Description: "获取表的字段结构(字段名、类型、注释)",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "database", Type: "string", Description: "数据库名", Required: true},
{Name: "table", Type: "string", Description: "表名", Required: true},
},
},
Handler: describeTable,
},
}
// GetBuiltinToolDef 获取内置工具定义
func GetBuiltinToolDef(name string) (AgentTool, bool) {
if tool, ok := builtinTools[name]; ok {
return tool.Definition, true
}
return AgentTool{}, false
}
// GetBuiltinToolDefs 获取指定的内置工具定义列表
func GetBuiltinToolDefs(names []string) []AgentTool {
var defs []AgentTool
for _, name := range names {
if def, ok := GetBuiltinToolDef(name); ok {
defs = append(defs, def)
}
}
return defs
}
// GetAllBuiltinToolDefs 获取所有内置工具定义
func GetAllBuiltinToolDefs() []AgentTool {
defs := make([]AgentTool, 0, len(builtinTools))
for _, tool := range builtinTools {
defs = append(defs, tool.Definition)
}
return defs
}
// ExecuteBuiltinTool 执行内置工具
// 返回值result, handled, error
// handled 表示是否是内置工具true 表示已处理false 表示不是内置工具需要继续查找)
func ExecuteBuiltinTool(ctx context.Context, name string, wfCtx *models.WorkflowContext, argsJSON string) (string, bool, error) {
tool, exists := builtinTools[name]
if !exists {
return "", false, nil
}
// 解析参数
var args map[string]interface{}
if argsJSON != "" {
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
// 如果不是 JSON尝试作为简单字符串参数
args = map[string]interface{}{"input": argsJSON}
}
}
if args == nil {
args = make(map[string]interface{})
}
result, err := tool.Handler(ctx, wfCtx, args)
return result, true, err
}
// getDatasourceId 从 wfCtx.Inputs 中获取 datasource_id
func getDatasourceId(wfCtx *models.WorkflowContext) int64 {
if wfCtx == nil || wfCtx.Inputs == nil {
return 0
}
var dsId int64
if dsIdStr, ok := wfCtx.Inputs["datasource_id"]; ok {
fmt.Sscanf(dsIdStr, "%d", &dsId)
}
return dsId
}
// getDatasourceType 从 wfCtx.Inputs 中获取 datasource_type
func getDatasourceType(wfCtx *models.WorkflowContext) string {
if wfCtx == nil || wfCtx.Inputs == nil {
return ""
}
return wfCtx.Inputs["datasource_type"]
}
// =============================================================================
// Prometheus 工具实现
// =============================================================================
// listMetrics 列出 Prometheus 指标
func listMetrics(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
keyword, _ := args["keyword"].(string)
limit := 30
if l, ok := args["limit"].(float64); ok && l > 0 {
limit = int(l)
}
// 获取 Prometheus 客户端
client := getPromClientFunc(dsId)
if client == nil {
return "", fmt.Errorf("prometheus datasource not found: %d", dsId)
}
// 调用 LabelValues 获取 __name__ 的所有值(即所有指标名)
values, _, err := client.LabelValues(ctx, "__name__", nil)
if err != nil {
return "", fmt.Errorf("failed to get metrics: %v", err)
}
// 过滤和限制
result := make([]string, 0)
keyword = strings.ToLower(keyword)
for _, v := range values {
m := string(v)
if keyword == "" || strings.Contains(strings.ToLower(m), keyword) {
result = append(result, m)
if len(result) >= limit {
break
}
}
}
logger.Debugf("list_metrics: found %d metrics (keyword=%s, limit=%d)", len(result), keyword, limit)
bytes, _ := json.Marshal(result)
return string(bytes), nil
}
// getMetricLabels 获取指标的标签
func getMetricLabels(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
metric, ok := args["metric"].(string)
if !ok || metric == "" {
return "", fmt.Errorf("metric parameter is required")
}
client := getPromClientFunc(dsId)
if client == nil {
return "", fmt.Errorf("prometheus datasource not found: %d", dsId)
}
// 使用 Series 接口获取指标的所有 series
endTime := time.Now()
startTime := endTime.Add(-1 * time.Hour)
series, _, err := client.Series(ctx, []string{metric}, startTime, endTime)
if err != nil {
return "", fmt.Errorf("failed to get metric series: %v", err)
}
// 聚合标签键值
labels := make(map[string][]string)
seen := make(map[string]map[string]bool)
for _, s := range series {
for k, v := range s {
key := string(k)
val := string(v)
if key == "__name__" {
continue
}
if seen[key] == nil {
seen[key] = make(map[string]bool)
}
if !seen[key][val] {
seen[key][val] = true
labels[key] = append(labels[key], val)
}
}
}
logger.Debugf("get_metric_labels: metric=%s, found %d labels", metric, len(labels))
bytes, _ := json.Marshal(labels)
return string(bytes), nil
}
// =============================================================================
// SQL 数据源工具实现
// =============================================================================
// SQLMetadataQuerier SQL 元数据查询接口
type SQLMetadataQuerier interface {
ListDatabases(ctx context.Context) ([]string, error)
ListTables(ctx context.Context, database string) ([]string, error)
DescribeTable(ctx context.Context, database, table string) ([]map[string]interface{}, error)
}
// listDatabases 列出数据库
func listDatabases(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
if dsType == "" {
return "", fmt.Errorf("datasource_type not found in inputs")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = "SHOW DATABASES"
case "ck", "clickhouse":
sql = "SHOW DATABASES"
case "pgsql", "postgresql":
sql = "SELECT datname FROM pg_database WHERE datistemplate = false"
default:
return "", fmt.Errorf("unsupported datasource type for list_databases: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to list databases: %v", err)
}
// 提取数据库名
databases := extractColumnValues(data, dsType, "database")
logger.Debugf("list_databases: dsType=%s, found %d databases", dsType, len(databases))
bytes, _ := json.Marshal(databases)
return string(bytes), nil
}
// listTables 列出表
func listTables(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
database, ok := args["database"].(string)
if !ok || database == "" {
return "", fmt.Errorf("database parameter is required")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = fmt.Sprintf("SHOW TABLES FROM `%s`", database)
case "ck", "clickhouse":
sql = fmt.Sprintf("SHOW TABLES FROM `%s`", database)
case "pgsql", "postgresql":
sql = fmt.Sprintf("SELECT tablename FROM pg_tables WHERE schemaname = 'public'")
default:
return "", fmt.Errorf("unsupported datasource type for list_tables: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql, "database": database}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to list tables: %v", err)
}
// 提取表名
tables := extractColumnValues(data, dsType, "table")
logger.Debugf("list_tables: dsType=%s, database=%s, found %d tables", dsType, database, len(tables))
bytes, _ := json.Marshal(tables)
return string(bytes), nil
}
// describeTable 获取表结构
func describeTable(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
database, ok := args["database"].(string)
if !ok || database == "" {
return "", fmt.Errorf("database parameter is required")
}
table, ok := args["table"].(string)
if !ok || table == "" {
return "", fmt.Errorf("table parameter is required")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = fmt.Sprintf("DESCRIBE `%s`.`%s`", database, table)
case "ck", "clickhouse":
sql = fmt.Sprintf("DESCRIBE TABLE `%s`.`%s`", database, table)
case "pgsql", "postgresql":
sql = fmt.Sprintf(`SELECT column_name as "Field", data_type as "Type", is_nullable as "Null", column_default as "Default" FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '%s'`, table)
default:
return "", fmt.Errorf("unsupported datasource type for describe_table: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql, "database": database}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to describe table: %v", err)
}
// 转换为统一的列结构
columns := convertToColumnInfo(data, dsType)
logger.Debugf("describe_table: dsType=%s, table=%s.%s, found %d columns", dsType, database, table, len(columns))
bytes, _ := json.Marshal(columns)
return string(bytes), nil
}
// ColumnInfo 列信息
type ColumnInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Comment string `json:"comment,omitempty"`
}
// extractColumnValues 从查询结果中提取列值
func extractColumnValues(data []interface{}, dsType string, columnType string) []string {
result := make([]string, 0)
for _, row := range data {
if rowMap, ok := row.(map[string]interface{}); ok {
// 尝试多种可能的列名
var value string
for _, key := range getPossibleColumnNames(dsType, columnType) {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
value = s
break
}
}
}
if value != "" {
result = append(result, value)
}
}
}
return result
}
// getPossibleColumnNames 获取可能的列名
func getPossibleColumnNames(dsType string, columnType string) []string {
switch columnType {
case "database":
return []string{"Database", "database", "datname", "name"}
case "table":
return []string{"Tables_in_", "table", "tablename", "name", "Name"}
default:
return []string{}
}
}
// convertToColumnInfo 将查询结果转换为统一的列信息格式
func convertToColumnInfo(data []interface{}, dsType string) []ColumnInfo {
result := make([]ColumnInfo, 0)
for _, row := range data {
if rowMap, ok := row.(map[string]interface{}); ok {
col := ColumnInfo{}
// 提取列名
for _, key := range []string{"Field", "field", "column_name", "name"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Name = s
break
}
}
}
// 提取类型
for _, key := range []string{"Type", "type", "data_type"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Type = s
break
}
}
}
// 提取注释(可选)
for _, key := range []string{"Comment", "comment", "column_comment"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Comment = s
break
}
}
}
if col.Name != "" {
result = append(result, col)
}
}
}
return result
}

View File

@@ -1,376 +0,0 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
const (
DefaultClaudeURL = "https://api.anthropic.com/v1/messages"
ClaudeAPIVersion = "2023-06-01"
DefaultClaudeMaxTokens = 4096
)
// Claude implements the LLM interface for Anthropic Claude API
type Claude struct {
config *Config
client *http.Client
}
// NewClaude creates a new Claude provider
func NewClaude(cfg *Config, client *http.Client) (*Claude, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultClaudeURL
}
return &Claude{
config: cfg,
client: client,
}, nil
}
func (c *Claude) Name() string {
return ProviderClaude
}
// Claude API request/response structures
type claudeRequest struct {
Model string `json:"model"`
Messages []claudeMessage `json:"messages"`
System string `json:"system,omitempty"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop_sequences,omitempty"`
Stream bool `json:"stream,omitempty"`
Tools []claudeTool `json:"tools,omitempty"`
}
type claudeMessage struct {
Role string `json:"role"`
Content []claudeContentBlock `json:"content"`
}
type claudeContentBlock struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Input any `json:"input,omitempty"`
ToolUseID string `json:"tool_use_id,omitempty"`
Content string `json:"content,omitempty"`
}
type claudeTool struct {
Name string `json:"name"`
Description string `json:"description"`
InputSchema map[string]interface{} `json:"input_schema"`
}
type claudeResponse struct {
ID string `json:"id"`
Type string `json:"type"`
Role string `json:"role"`
Content []claudeContentBlock `json:"content"`
Model string `json:"model"`
StopReason string `json:"stop_reason"`
StopSequence string `json:"stop_sequence,omitempty"`
Usage *struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
} `json:"usage,omitempty"`
Error *struct {
Type string `json:"type"`
Message string `json:"message"`
} `json:"error,omitempty"`
}
// Claude streaming event types
type claudeStreamEvent struct {
Type string `json:"type"`
Index int `json:"index,omitempty"`
ContentBlock *claudeContentBlock `json:"content_block,omitempty"`
Delta *claudeStreamDelta `json:"delta,omitempty"`
Message *claudeResponse `json:"message,omitempty"`
Usage *claudeStreamUsage `json:"usage,omitempty"`
}
type claudeStreamDelta struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
PartialJSON string `json:"partial_json,omitempty"`
StopReason string `json:"stop_reason,omitempty"`
}
type claudeStreamUsage struct {
OutputTokens int `json:"output_tokens"`
}
func (c *Claude) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
claudeReq := c.convertRequest(req)
claudeReq.Stream = false
respBody, err := c.doRequest(ctx, claudeReq)
if err != nil {
return nil, err
}
var claudeResp claudeResponse
if err := json.Unmarshal(respBody, &claudeResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if claudeResp.Error != nil {
return nil, fmt.Errorf("Claude API error: %s", claudeResp.Error.Message)
}
return c.convertResponse(&claudeResp), nil
}
func (c *Claude) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
claudeReq := c.convertRequest(req)
claudeReq.Stream = true
jsonData, err := json.Marshal(claudeReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
c.setHeaders(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("Claude API error (status %d): %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go c.streamResponse(ctx, resp, ch)
return ch, nil
}
func (c *Claude) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
var currentToolCall *ToolCall
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
if line == "" || !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
var event claudeStreamEvent
if err := json.Unmarshal([]byte(data), &event); err != nil {
continue
}
switch event.Type {
case "content_block_start":
if event.ContentBlock != nil && event.ContentBlock.Type == "tool_use" {
currentToolCall = &ToolCall{
ID: event.ContentBlock.ID,
Name: event.ContentBlock.Name,
}
}
case "content_block_delta":
if event.Delta != nil {
chunk := StreamChunk{}
switch event.Delta.Type {
case "text_delta":
chunk.Content = event.Delta.Text
case "input_json_delta":
if currentToolCall != nil {
currentToolCall.Arguments += event.Delta.PartialJSON
}
}
if chunk.Content != "" {
ch <- chunk
}
}
case "content_block_stop":
if currentToolCall != nil {
ch <- StreamChunk{
ToolCalls: []ToolCall{*currentToolCall},
}
currentToolCall = nil
}
case "message_delta":
if event.Delta != nil && event.Delta.StopReason != "" {
ch <- StreamChunk{
FinishReason: event.Delta.StopReason,
}
}
case "message_stop":
ch <- StreamChunk{Done: true}
return
case "error":
ch <- StreamChunk{Done: true, Error: fmt.Errorf("stream error")}
return
}
}
}
func (c *Claude) convertRequest(req *GenerateRequest) *claudeRequest {
claudeReq := &claudeRequest{
Model: c.config.Model,
MaxTokens: req.MaxTokens,
Temperature: req.Temperature,
TopP: req.TopP,
Stop: req.Stop,
}
if claudeReq.MaxTokens <= 0 {
claudeReq.MaxTokens = DefaultClaudeMaxTokens
}
// Extract system message and convert other messages
for _, msg := range req.Messages {
if msg.Role == RoleSystem {
claudeReq.System = msg.Content
continue
}
// Claude uses content blocks instead of plain strings
claudeMsg := claudeMessage{
Role: msg.Role,
Content: []claudeContentBlock{
{Type: "text", Text: msg.Content},
},
}
claudeReq.Messages = append(claudeReq.Messages, claudeMsg)
}
// Convert tools
for _, tool := range req.Tools {
claudeReq.Tools = append(claudeReq.Tools, claudeTool{
Name: tool.Name,
Description: tool.Description,
InputSchema: tool.Parameters,
})
}
return claudeReq
}
func (c *Claude) convertResponse(resp *claudeResponse) *GenerateResponse {
result := &GenerateResponse{
FinishReason: resp.StopReason,
}
// Extract text content and tool calls
var textParts []string
for _, block := range resp.Content {
switch block.Type {
case "text":
textParts = append(textParts, block.Text)
case "tool_use":
inputJSON, _ := json.Marshal(block.Input)
result.ToolCalls = append(result.ToolCalls, ToolCall{
ID: block.ID,
Name: block.Name,
Arguments: string(inputJSON),
})
}
}
result.Content = strings.Join(textParts, "")
if resp.Usage != nil {
result.Usage = &Usage{
PromptTokens: resp.Usage.InputTokens,
CompletionTokens: resp.Usage.OutputTokens,
TotalTokens: resp.Usage.InputTokens + resp.Usage.OutputTokens,
}
}
return result
}
func (c *Claude) doRequest(ctx context.Context, req *claudeRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
c.setHeaders(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("Claude API error (status %d): %s", resp.StatusCode, string(body))
}
return body, nil
}
func (c *Claude) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("anthropic-version", ClaudeAPIVersion)
if c.config.APIKey != "" {
req.Header.Set("x-api-key", c.config.APIKey)
}
for k, v := range c.config.Headers {
req.Header.Set(k, v)
}
}

View File

@@ -1,376 +0,0 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
const (
DefaultGeminiURL = "https://generativelanguage.googleapis.com/v1beta/models"
)
// Gemini implements the LLM interface for Google Gemini API
type Gemini struct {
config *Config
client *http.Client
}
// NewGemini creates a new Gemini provider
func NewGemini(cfg *Config, client *http.Client) (*Gemini, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultGeminiURL
}
return &Gemini{
config: cfg,
client: client,
}, nil
}
func (g *Gemini) Name() string {
return ProviderGemini
}
// Gemini API request/response structures
type geminiRequest struct {
Contents []geminiContent `json:"contents"`
SystemInstruction *geminiContent `json:"systemInstruction,omitempty"`
Tools []geminiTool `json:"tools,omitempty"`
GenerationConfig *geminiGenerationConfig `json:"generationConfig,omitempty"`
}
type geminiContent struct {
Role string `json:"role,omitempty"`
Parts []geminiPart `json:"parts"`
}
type geminiPart struct {
Text string `json:"text,omitempty"`
FunctionCall *geminiFunctionCall `json:"functionCall,omitempty"`
FunctionResponse *geminiFunctionResponse `json:"functionResponse,omitempty"`
}
type geminiFunctionCall struct {
Name string `json:"name"`
Args map[string]interface{} `json:"args"`
}
type geminiFunctionResponse struct {
Name string `json:"name"`
Response map[string]interface{} `json:"response"`
}
type geminiTool struct {
FunctionDeclarations []geminiFunctionDeclaration `json:"functionDeclarations,omitempty"`
}
type geminiFunctionDeclaration struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
type geminiGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
type geminiResponse struct {
Candidates []struct {
Content geminiContent `json:"content"`
FinishReason string `json:"finishReason"`
SafetyRatings []struct {
Category string `json:"category"`
Probability string `json:"probability"`
} `json:"safetyRatings,omitempty"`
} `json:"candidates"`
UsageMetadata *struct {
PromptTokenCount int `json:"promptTokenCount"`
CandidatesTokenCount int `json:"candidatesTokenCount"`
TotalTokenCount int `json:"totalTokenCount"`
} `json:"usageMetadata,omitempty"`
Error *struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
} `json:"error,omitempty"`
}
func (g *Gemini) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
geminiReq := g.convertRequest(req)
url := g.buildURL(false)
respBody, err := g.doRequest(ctx, url, geminiReq)
if err != nil {
return nil, err
}
var geminiResp geminiResponse
if err := json.Unmarshal(respBody, &geminiResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if geminiResp.Error != nil {
return nil, fmt.Errorf("Gemini API error: %s", geminiResp.Error.Message)
}
return g.convertResponse(&geminiResp), nil
}
func (g *Gemini) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
geminiReq := g.convertRequest(req)
jsonData, err := json.Marshal(geminiReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
url := g.buildURL(true)
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
g.setHeaders(httpReq)
resp, err := g.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("Gemini API error (status %d): %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go g.streamResponse(ctx, resp, ch)
return ch, nil
}
func (g *Gemini) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
var buffer strings.Builder
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
// Gemini streams JSON objects, accumulate until we have a complete one
if line == "" {
continue
}
// Handle SSE format if present
if strings.HasPrefix(line, "data: ") {
line = strings.TrimPrefix(line, "data: ")
}
buffer.WriteString(line)
// Try to parse accumulated JSON
var geminiResp geminiResponse
if err := json.Unmarshal([]byte(buffer.String()), &geminiResp); err != nil {
// Not complete yet, continue accumulating
continue
}
// Reset buffer for next response
buffer.Reset()
if len(geminiResp.Candidates) > 0 {
candidate := geminiResp.Candidates[0]
chunk := StreamChunk{
FinishReason: candidate.FinishReason,
}
for _, part := range candidate.Content.Parts {
if part.Text != "" {
chunk.Content += part.Text
}
if part.FunctionCall != nil {
argsJSON, _ := json.Marshal(part.FunctionCall.Args)
chunk.ToolCalls = append(chunk.ToolCalls, ToolCall{
Name: part.FunctionCall.Name,
Arguments: string(argsJSON),
})
}
}
ch <- chunk
if candidate.FinishReason != "" && candidate.FinishReason != "STOP" {
ch <- StreamChunk{Done: true}
return
}
}
}
}
func (g *Gemini) convertRequest(req *GenerateRequest) *geminiRequest {
geminiReq := &geminiRequest{
GenerationConfig: &geminiGenerationConfig{
Temperature: req.Temperature,
TopP: req.TopP,
MaxOutputTokens: req.MaxTokens,
StopSequences: req.Stop,
},
}
// Convert messages
for _, msg := range req.Messages {
if msg.Role == RoleSystem {
geminiReq.SystemInstruction = &geminiContent{
Parts: []geminiPart{{Text: msg.Content}},
}
continue
}
// Map roles
role := msg.Role
if role == RoleAssistant {
role = "model"
}
geminiReq.Contents = append(geminiReq.Contents, geminiContent{
Role: role,
Parts: []geminiPart{{Text: msg.Content}},
})
}
// Convert tools
if len(req.Tools) > 0 {
var declarations []geminiFunctionDeclaration
for _, tool := range req.Tools {
declarations = append(declarations, geminiFunctionDeclaration{
Name: tool.Name,
Description: tool.Description,
Parameters: tool.Parameters,
})
}
geminiReq.Tools = []geminiTool{{FunctionDeclarations: declarations}}
}
return geminiReq
}
func (g *Gemini) convertResponse(resp *geminiResponse) *GenerateResponse {
result := &GenerateResponse{}
if len(resp.Candidates) > 0 {
candidate := resp.Candidates[0]
result.FinishReason = candidate.FinishReason
var textParts []string
for _, part := range candidate.Content.Parts {
if part.Text != "" {
textParts = append(textParts, part.Text)
}
if part.FunctionCall != nil {
argsJSON, _ := json.Marshal(part.FunctionCall.Args)
result.ToolCalls = append(result.ToolCalls, ToolCall{
Name: part.FunctionCall.Name,
Arguments: string(argsJSON),
})
}
}
result.Content = strings.Join(textParts, "")
}
if resp.UsageMetadata != nil {
result.Usage = &Usage{
PromptTokens: resp.UsageMetadata.PromptTokenCount,
CompletionTokens: resp.UsageMetadata.CandidatesTokenCount,
TotalTokens: resp.UsageMetadata.TotalTokenCount,
}
}
return result
}
func (g *Gemini) buildURL(stream bool) string {
action := "generateContent"
if stream {
action = "streamGenerateContent"
}
// Check if baseURL already contains the full path
if strings.Contains(g.config.BaseURL, ":generateContent") ||
strings.Contains(g.config.BaseURL, ":streamGenerateContent") {
return fmt.Sprintf("%s?key=%s", g.config.BaseURL, g.config.APIKey)
}
return fmt.Sprintf("%s/%s:%s?key=%s",
g.config.BaseURL,
g.config.Model,
action,
g.config.APIKey,
)
}
func (g *Gemini) doRequest(ctx context.Context, url string, req *geminiRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
g.setHeaders(httpReq)
resp, err := g.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("Gemini API error (status %d): %s", resp.StatusCode, string(body))
}
return body, nil
}
func (g *Gemini) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
for k, v := range g.config.Headers {
req.Header.Set(k, v)
}
}

View File

@@ -1,135 +0,0 @@
package llm
import (
"context"
"strings"
)
// Chat is a convenience function for simple chat completions
func Chat(ctx context.Context, llm LLM, messages []Message) (string, error) {
resp, err := llm.Generate(ctx, &GenerateRequest{
Messages: messages,
})
if err != nil {
return "", err
}
return resp.Content, nil
}
// ChatWithSystem is a convenience function for chat with a system prompt
func ChatWithSystem(ctx context.Context, llm LLM, systemPrompt string, userMessage string) (string, error) {
messages := []Message{
{Role: RoleSystem, Content: systemPrompt},
{Role: RoleUser, Content: userMessage},
}
return Chat(ctx, llm, messages)
}
// NewMessage creates a new message
func NewMessage(role, content string) Message {
return Message{Role: role, Content: content}
}
// SystemMessage creates a system message
func SystemMessage(content string) Message {
return Message{Role: RoleSystem, Content: content}
}
// UserMessage creates a user message
func UserMessage(content string) Message {
return Message{Role: RoleUser, Content: content}
}
// AssistantMessage creates an assistant message
func AssistantMessage(content string) Message {
return Message{Role: RoleAssistant, Content: content}
}
// DetectProvider attempts to detect the provider from the base URL
func DetectProvider(baseURL string) string {
baseURL = strings.ToLower(baseURL)
switch {
case strings.Contains(baseURL, "anthropic.com"):
return ProviderClaude
case strings.Contains(baseURL, "generativelanguage.googleapis.com"):
return ProviderGemini
case strings.Contains(baseURL, "aiplatform.googleapis.com"):
return ProviderVertex
case strings.Contains(baseURL, "bedrock"):
return ProviderBedrock
case strings.Contains(baseURL, "localhost:11434"):
return ProviderOllama
default:
// Default to OpenAI-compatible
return ProviderOpenAI
}
}
// DetectProviderFromModel attempts to detect the provider from the model name
func DetectProviderFromModel(model string) string {
model = strings.ToLower(model)
switch {
case strings.HasPrefix(model, "claude"):
return ProviderClaude
case strings.HasPrefix(model, "gemini"):
return ProviderGemini
case strings.HasPrefix(model, "gpt") || strings.HasPrefix(model, "o1") || strings.HasPrefix(model, "o3"):
return ProviderOpenAI
case strings.HasPrefix(model, "llama") || strings.HasPrefix(model, "mistral") || strings.HasPrefix(model, "qwen"):
return ProviderOllama
default:
return ProviderOpenAI
}
}
// BuildToolDefinition creates a tool definition with JSON schema parameters
func BuildToolDefinition(name, description string, properties map[string]interface{}, required []string) ToolDefinition {
params := map[string]interface{}{
"type": "object",
"properties": properties,
}
if len(required) > 0 {
params["required"] = required
}
return ToolDefinition{
Name: name,
Description: description,
Parameters: params,
}
}
// CollectStream collects all chunks from a stream into a single response
func CollectStream(ch <-chan StreamChunk) (*GenerateResponse, error) {
var content strings.Builder
var toolCalls []ToolCall
var finishReason string
var lastErr error
for chunk := range ch {
if chunk.Error != nil {
lastErr = chunk.Error
}
if chunk.Content != "" {
content.WriteString(chunk.Content)
}
if len(chunk.ToolCalls) > 0 {
toolCalls = append(toolCalls, chunk.ToolCalls...)
}
if chunk.FinishReason != "" {
finishReason = chunk.FinishReason
}
}
if lastErr != nil {
return nil, lastErr
}
return &GenerateResponse{
Content: content.String(),
ToolCalls: toolCalls,
FinishReason: finishReason,
}, nil
}

View File

@@ -1,193 +0,0 @@
// Package llm provides a unified interface for multiple LLM providers.
// Supports OpenAI-compatible APIs, Claude/Anthropic, and Gemini.
package llm
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"time"
)
// Provider types
const (
ProviderOpenAI = "openai" // OpenAI and compatible APIs (Azure, vLLM, etc.)
ProviderClaude = "claude" // Anthropic Claude
ProviderGemini = "gemini" // Google Gemini
ProviderOllama = "ollama" // Ollama local models
ProviderBedrock = "bedrock" // AWS Bedrock
ProviderVertex = "vertex" // Google Vertex AI
)
// Role constants
const (
RoleSystem = "system"
RoleUser = "user"
RoleAssistant = "assistant"
)
// Message represents a chat message
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// ToolCall represents a tool/function call from the LLM
type ToolCall struct {
ID string `json:"id"`
Name string `json:"name"`
Arguments string `json:"arguments"`
}
// ToolDefinition defines a tool that the LLM can call
type ToolDefinition struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
// GenerateRequest is the unified request for LLM generation
type GenerateRequest struct {
Messages []Message `json:"messages"`
Tools []ToolDefinition `json:"tools,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop,omitempty"`
Stream bool `json:"stream,omitempty"`
}
// GenerateResponse is the unified response from LLM generation
type GenerateResponse struct {
Content string `json:"content"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
FinishReason string `json:"finish_reason"`
Usage *Usage `json:"usage,omitempty"`
}
// Usage represents token usage statistics
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
// StreamChunk represents a chunk in streaming response
type StreamChunk struct {
Content string `json:"content,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
Done bool `json:"done"`
Error error `json:"error,omitempty"`
}
// LLM is the unified interface for all LLM providers
type LLM interface {
// Name returns the provider name
Name() string
// Generate sends a request to the LLM and returns the response
Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error)
// GenerateStream sends a request and returns a channel for streaming responses
GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error)
}
// Config is the configuration for creating an LLM provider
type Config struct {
// Provider type: openai, claude, gemini, ollama, bedrock, vertex
Provider string `json:"provider"`
// API endpoint URL
BaseURL string `json:"base_url,omitempty"`
// API key or token
APIKey string `json:"api_key,omitempty"`
// Model name (e.g., "gpt-4", "claude-3-opus", "gemini-pro")
Model string `json:"model"`
// Additional headers for API requests
Headers map[string]string `json:"headers,omitempty"`
// HTTP timeout in milliseconds
Timeout int `json:"timeout,omitempty"`
// Skip SSL verification (for self-signed certs)
SkipSSLVerify bool `json:"skip_ssl_verify,omitempty"`
// HTTP proxy URL
Proxy string `json:"proxy,omitempty"`
// Provider-specific options
Options map[string]interface{} `json:"options,omitempty"`
}
// DefaultConfig returns a config with default values
func DefaultConfig() *Config {
return &Config{
Provider: ProviderOpenAI,
Timeout: 60000,
}
}
// New creates an LLM instance based on the config
func New(cfg *Config) (LLM, error) {
if cfg == nil {
cfg = DefaultConfig()
}
// Create HTTP client
client := createHTTPClient(cfg)
switch cfg.Provider {
case ProviderOpenAI, "":
return NewOpenAI(cfg, client)
case ProviderClaude:
return NewClaude(cfg, client)
case ProviderGemini:
return NewGemini(cfg, client)
case ProviderOllama:
// Ollama uses OpenAI-compatible API
if cfg.BaseURL == "" {
cfg.BaseURL = "http://localhost:11434/v1"
}
return NewOpenAI(cfg, client)
default:
return nil, fmt.Errorf("unsupported LLM provider: %s", cfg.Provider)
}
}
// createHTTPClient creates an HTTP client with the given config
func createHTTPClient(cfg *Config) *http.Client {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: cfg.SkipSSLVerify,
},
}
if cfg.Proxy != "" {
if proxyURL, err := url.Parse(cfg.Proxy); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
timeout := cfg.Timeout
if timeout <= 0 {
timeout = 60000
}
return &http.Client{
Timeout: time.Duration(timeout) * time.Millisecond,
Transport: transport,
}
}
// Helper function to convert internal messages to provider-specific format
func ConvertMessages(messages []Message) []Message {
result := make([]Message, len(messages))
copy(result, messages)
return result
}

View File

@@ -1,416 +0,0 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
const (
DefaultOpenAIURL = "https://api.openai.com/v1/chat/completions"
// 重试相关配置
maxRetries = 3
initialRetryWait = 5 * time.Second // rate limit 时初始等待 5 秒
maxRetryWait = 60 * time.Second // 最大等待 60 秒
)
// OpenAI implements the LLM interface for OpenAI and compatible APIs
type OpenAI struct {
config *Config
client *http.Client
}
// NewOpenAI creates a new OpenAI provider
func NewOpenAI(cfg *Config, client *http.Client) (*OpenAI, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultOpenAIURL
}
return &OpenAI{
config: cfg,
client: client,
}, nil
}
func (o *OpenAI) Name() string {
return ProviderOpenAI
}
// OpenAI API request/response structures
type openAIRequest struct {
Model string `json:"model"`
Messages []openAIMessage `json:"messages"`
Tools []openAITool `json:"tools,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop,omitempty"`
Stream bool `json:"stream,omitempty"`
}
type openAIMessage struct {
Role string `json:"role"`
Content string `json:"content,omitempty"`
ToolCalls []openAIToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}
type openAITool struct {
Type string `json:"type"`
Function openAIFunction `json:"function"`
}
type openAIFunction struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
type openAIToolCall struct {
ID string `json:"id"`
Type string `json:"type"`
Function struct {
Name string `json:"name"`
Arguments string `json:"arguments"`
} `json:"function"`
}
type openAIResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []struct {
Index int `json:"index"`
Message openAIMessage `json:"message"`
Delta openAIMessage `json:"delta"`
FinishReason string `json:"finish_reason"`
} `json:"choices"`
Usage *struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage,omitempty"`
Error *struct {
Message string `json:"message"`
Type string `json:"type"`
Code string `json:"code"`
} `json:"error,omitempty"`
}
func (o *OpenAI) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
// Convert to OpenAI format
openAIReq := o.convertRequest(req)
openAIReq.Stream = false
// Make request
respBody, err := o.doRequest(ctx, openAIReq)
if err != nil {
return nil, err
}
// Parse response
var openAIResp openAIResponse
if err := json.Unmarshal(respBody, &openAIResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if openAIResp.Error != nil {
return nil, fmt.Errorf("OpenAI API error: %s", openAIResp.Error.Message)
}
if len(openAIResp.Choices) == 0 {
return nil, fmt.Errorf("no response from OpenAI")
}
// Convert to unified response
return o.convertResponse(&openAIResp), nil
}
// isRetryableStatus 检查是否是可重试的 HTTP 状态码
func isRetryableStatus(statusCode int) bool {
switch statusCode {
case 429, 500, 502, 503, 504:
return true
default:
return false
}
}
func (o *OpenAI) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
// Convert to OpenAI format
openAIReq := o.convertRequest(req)
openAIReq.Stream = true
// Create request body
jsonData, err := json.Marshal(openAIReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
var resp *http.Response
var lastErr error
retryWait := initialRetryWait
// 重试循环
for attempt := 0; attempt <= maxRetries; attempt++ {
if attempt > 0 {
// 等待后重试
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(retryWait):
}
// 指数退避,但不超过最大等待时间
retryWait *= 2
if retryWait > maxRetryWait {
retryWait = maxRetryWait
}
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", o.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
o.setHeaders(httpReq)
// Make request
resp, err = o.client.Do(httpReq)
if err != nil {
lastErr = fmt.Errorf("failed to send request: %w", err)
continue // 网络错误,重试
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
lastErr = fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body))
// 检查是否可重试
if isRetryableStatus(resp.StatusCode) && attempt < maxRetries {
continue // 可重试的错误,继续重试
}
return nil, lastErr
}
// 成功,跳出循环
break
}
if resp == nil {
return nil, lastErr
}
// Create channel and start streaming
ch := make(chan StreamChunk, 100)
go o.streamResponse(ctx, resp, ch)
return ch, nil
}
func (o *OpenAI) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
ch <- StreamChunk{Done: true}
return
}
var streamResp openAIResponse
if err := json.Unmarshal([]byte(data), &streamResp); err != nil {
continue
}
if len(streamResp.Choices) > 0 {
delta := streamResp.Choices[0].Delta
chunk := StreamChunk{
Content: delta.Content,
FinishReason: streamResp.Choices[0].FinishReason,
}
// Handle tool calls in stream
if len(delta.ToolCalls) > 0 {
for _, tc := range delta.ToolCalls {
chunk.ToolCalls = append(chunk.ToolCalls, ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: tc.Function.Arguments,
})
}
}
ch <- chunk
}
}
}
func (o *OpenAI) convertRequest(req *GenerateRequest) *openAIRequest {
openAIReq := &openAIRequest{
Model: o.config.Model,
MaxTokens: req.MaxTokens,
Temperature: req.Temperature,
TopP: req.TopP,
Stop: req.Stop,
}
// Convert messages
for _, msg := range req.Messages {
openAIReq.Messages = append(openAIReq.Messages, openAIMessage{
Role: msg.Role,
Content: msg.Content,
})
}
// Convert tools
for _, tool := range req.Tools {
openAIReq.Tools = append(openAIReq.Tools, openAITool{
Type: "function",
Function: openAIFunction{
Name: tool.Name,
Description: tool.Description,
Parameters: tool.Parameters,
},
})
}
return openAIReq
}
func (o *OpenAI) convertResponse(resp *openAIResponse) *GenerateResponse {
result := &GenerateResponse{}
if len(resp.Choices) > 0 {
choice := resp.Choices[0]
result.Content = choice.Message.Content
result.FinishReason = choice.FinishReason
// Convert tool calls
for _, tc := range choice.Message.ToolCalls {
result.ToolCalls = append(result.ToolCalls, ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: tc.Function.Arguments,
})
}
}
if resp.Usage != nil {
result.Usage = &Usage{
PromptTokens: resp.Usage.PromptTokens,
CompletionTokens: resp.Usage.CompletionTokens,
TotalTokens: resp.Usage.TotalTokens,
}
}
return result
}
func (o *OpenAI) doRequest(ctx context.Context, req *openAIRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
var lastErr error
retryWait := initialRetryWait
// 重试循环
for attempt := 0; attempt <= maxRetries; attempt++ {
if attempt > 0 {
// 等待后重试
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(retryWait):
}
// 指数退避
retryWait *= 2
if retryWait > maxRetryWait {
retryWait = maxRetryWait
}
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", o.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
o.setHeaders(httpReq)
resp, err := o.client.Do(httpReq)
if err != nil {
lastErr = fmt.Errorf("failed to send request: %w", err)
continue // 网络错误,重试
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastErr = fmt.Errorf("failed to read response: %w", err)
continue
}
if resp.StatusCode >= 400 {
lastErr = fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body))
// 检查是否可重试
if isRetryableStatus(resp.StatusCode) && attempt < maxRetries {
continue
}
return nil, lastErr
}
return body, nil
}
return nil, lastErr
}
func (o *OpenAI) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
if o.config.APIKey != "" {
req.Header.Set("Authorization", "Bearer "+o.config.APIKey)
}
for k, v := range o.config.Headers {
req.Header.Set(k, v)
}
}

View File

@@ -1,133 +0,0 @@
package llm
import (
"fmt"
"runtime"
"strings"
"time"
)
// ToolInfo 工具信息(用于提示词构建)
type ToolInfo struct {
Name string
Description string
Parameters []ToolParamInfo
}
// ToolParamInfo 工具参数信息
type ToolParamInfo struct {
Name string
Type string
Description string
Required bool
}
// PromptData 提示词模板数据
type PromptData struct {
Platform string // 操作系统
Date string // 当前日期
}
// BuildToolsSection 构建工具描述段落
func BuildToolsSection(tools []ToolInfo) string {
if len(tools) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Available Tools\n\n")
for _, tool := range tools {
sb.WriteString(fmt.Sprintf("### %s\n", tool.Name))
sb.WriteString(fmt.Sprintf("%s\n", tool.Description))
if len(tool.Parameters) > 0 {
sb.WriteString("Parameters:\n")
for _, param := range tool.Parameters {
required := ""
if param.Required {
required = " (required)"
}
sb.WriteString(fmt.Sprintf("- %s (%s)%s: %s\n", param.Name, param.Type, required, param.Description))
}
}
sb.WriteString("\n")
}
return sb.String()
}
// BuildToolsListBrief 构建简洁的工具列表(用于 Plan 模式)
func BuildToolsListBrief(tools []ToolInfo) string {
if len(tools) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Available Tools\n\n")
for _, tool := range tools {
sb.WriteString(fmt.Sprintf("- **%s**: %s\n", tool.Name, tool.Description))
}
return sb.String()
}
// BuildEnvSection 构建环境信息段落
func BuildEnvSection() string {
var sb strings.Builder
sb.WriteString("## Environment\n\n")
sb.WriteString(fmt.Sprintf("- Platform: %s\n", runtime.GOOS))
sb.WriteString(fmt.Sprintf("- Date: %s\n", time.Now().Format("2006-01-02")))
return sb.String()
}
// BuildSkillsSection 构建技能指导段落
func BuildSkillsSection(skillContents []string) string {
if len(skillContents) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## 专项技能指导\n\n")
if len(skillContents) == 1 {
sb.WriteString("你已被加载以下专项技能,请参考技能中的流程:\n\n")
sb.WriteString(skillContents[0])
sb.WriteString("\n\n")
} else {
sb.WriteString("你已被加载以下专项技能,请参考技能中的流程来制定执行计划:\n\n")
for i, content := range skillContents {
sb.WriteString(fmt.Sprintf("### 技能 %d\n\n", i+1))
sb.WriteString(content)
sb.WriteString("\n\n")
}
}
return sb.String()
}
// BuildPreviousFindingsSection 构建之前发现段落
func BuildPreviousFindingsSection(findings []string) string {
if len(findings) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Previous Findings\n\n")
for _, finding := range findings {
sb.WriteString(fmt.Sprintf("- %s\n", finding))
}
sb.WriteString("\n")
return sb.String()
}
// BuildCurrentStepSection 构建当前步骤段落
func BuildCurrentStepSection(goal, approach string) string {
var sb strings.Builder
sb.WriteString("## Current Step\n\n")
sb.WriteString(fmt.Sprintf("**Goal**: %s\n", goal))
sb.WriteString(fmt.Sprintf("**Approach**: %s\n\n", approach))
return sb.String()
}

View File

@@ -1,571 +0,0 @@
package aiagent
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/modelcontextprotocol/go-sdk/mcp"
"github.com/toolkits/pkg/logger"
)
const (
// MCP 传输类型
MCPTransportStdio = "stdio" // 标准输入/输出传输
MCPTransportSSE = "sse" // HTTP Server-Sent Events 传输
// 默认超时
DefaultMCPTimeout = 30000 // 30 秒
DefaultMCPConnectTimeout = 10000 // 10 秒
)
// MCPConfig MCP 服务器配置(在 AIAgentConfig 中使用)
type MCPConfig struct {
// MCP 服务器列表
Servers []MCPServerConfig `json:"servers"`
}
// MCPServerConfig 单个 MCP 服务器配置
type MCPServerConfig struct {
// 服务器名称(唯一标识)
Name string `json:"name"`
// 传输类型stdio 或 sse
Transport string `json:"transport"`
// === stdio 传输配置 ===
Command string `json:"command,omitempty"` // 启动命令
Args []string `json:"args,omitempty"` // 命令参数
Env map[string]string `json:"env,omitempty"` // 环境变量(支持 ${VAR} 从系统环境变量读取)
// === SSE 传输配置 ===
URL string `json:"url,omitempty"` // SSE 服务器 URL
Headers map[string]string `json:"headers,omitempty"` // 请求头(支持 ${VAR} 从系统环境变量读取)
SkipSSLVerify bool `json:"skip_ssl_verify,omitempty"` // 跳过 SSL 验证
// === 鉴权配置SSE 传输)===
// 便捷鉴权配置,会自动设置对应的 Header
AuthType string `json:"auth_type,omitempty"` // 鉴权类型bearer, api_key, basic
APIKey string `json:"api_key,omitempty"` // API Key支持 ${VAR} 从系统环境变量读取)
Username string `json:"username,omitempty"` // Basic Auth 用户名
Password string `json:"password,omitempty"` // Basic Auth 密码(支持 ${VAR}
// 通用配置
Timeout int `json:"timeout,omitempty"` // 工具调用超时(毫秒)
ConnectTimeout int `json:"connect_timeout,omitempty"` // 连接超时(毫秒)
}
// MCPToolConfig MCP 工具配置(在 AgentTool 中使用)
type MCPToolConfig struct {
// MCP 服务器名称(引用 MCPConfig.Servers 中的配置)
ServerName string `json:"server_name"`
// 工具名称MCP 服务器返回的工具名)
ToolName string `json:"tool_name"`
}
// MCPTool MCP 工具定义(用于内部表示)
type MCPTool struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
InputSchema map[string]interface{} `json:"inputSchema,omitempty"`
}
// MCPToolsCallResult 工具调用结果
type MCPToolsCallResult struct {
Content []MCPContent `json:"content"`
IsError bool `json:"isError,omitempty"`
}
// MCPContent 工具返回内容
type MCPContent struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Data string `json:"data,omitempty"`
MimeType string `json:"mimeType,omitempty"`
}
// MCPClient MCP 客户端(基于官方 go-sdk
type MCPClient struct {
config *MCPServerConfig
// SDK 客户端和会话stdio 传输)
client *mcp.Client
session *mcp.ClientSession
// SSE 传输SDK 暂不支持 SSE 客户端,保留自定义实现)
httpClient *http.Client
sseURL string
// 通用
mu sync.Mutex
initialized bool
tools []MCPTool // 缓存的工具列表
}
// expandEnvVars 展开字符串中的环境变量引用
func expandEnvVars(s string) string {
return os.ExpandEnv(s)
}
// NewMCPClient 创建 MCP 客户端
func NewMCPClient(config *MCPServerConfig) (*MCPClient, error) {
client := &MCPClient{
config: config,
}
return client, nil
}
// Connect 连接到 MCP 服务器
func (c *MCPClient) Connect(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.initialized {
return nil
}
var err error
switch c.config.Transport {
case MCPTransportStdio:
err = c.connectStdio(ctx)
case MCPTransportSSE:
err = c.connectSSE(ctx)
default:
return fmt.Errorf("unsupported MCP transport: %s", c.config.Transport)
}
if err != nil {
return err
}
c.initialized = true
return nil
}
// connectStdio 通过 stdio 连接(使用官方 SDK
func (c *MCPClient) connectStdio(ctx context.Context) error {
if c.config.Command == "" {
return fmt.Errorf("stdio transport requires command")
}
// 准备环境变量
env := os.Environ()
for k, v := range c.config.Env {
expandedValue := expandEnvVars(v)
env = append(env, fmt.Sprintf("%s=%s", k, expandedValue))
}
// 创建 exec.Cmd
cmd := exec.CommandContext(ctx, c.config.Command, c.config.Args...)
cmd.Env = env
// 使用官方 SDK 的 CommandTransport
transport := &mcp.CommandTransport{
Command: cmd,
}
// 创建 MCP 客户端
c.client = mcp.NewClient(
&mcp.Implementation{
Name: "nightingale-aiagent",
Version: "1.0.0",
},
nil,
)
// 连接并初始化Connect 会自动进行 initialize 握手)
session, err := c.client.Connect(ctx, transport, nil)
if err != nil {
return fmt.Errorf("failed to connect MCP client: %v", err)
}
c.session = session
logger.Infof("MCP stdio server started: %s", c.config.Name)
return nil
}
// connectSSE 通过 SSE 连接保留自定义实现SDK 暂不支持 SSE 客户端)
func (c *MCPClient) connectSSE(ctx context.Context) error {
if c.config.URL == "" {
return fmt.Errorf("SSE transport requires URL")
}
// 创建 HTTP 客户端
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.config.SkipSSLVerify},
}
timeout := c.config.ConnectTimeout
if timeout <= 0 {
timeout = DefaultMCPConnectTimeout
}
c.httpClient = &http.Client{
Timeout: time.Duration(timeout) * time.Millisecond,
Transport: transport,
}
c.sseURL = c.config.URL
logger.Infof("MCP SSE client configured: %s", c.config.Name)
return nil
}
// ListTools 获取工具列表
func (c *MCPClient) ListTools(ctx context.Context) ([]MCPTool, error) {
c.mu.Lock()
if len(c.tools) > 0 {
tools := c.tools
c.mu.Unlock()
return tools, nil
}
c.mu.Unlock()
var tools []MCPTool
switch c.config.Transport {
case MCPTransportStdio:
// 使用官方 SDK
if c.session == nil {
return nil, fmt.Errorf("MCP session not initialized")
}
result, err := c.session.ListTools(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to list tools: %v", err)
}
// 转换为内部格式
for _, tool := range result.Tools {
inputSchema := make(map[string]interface{})
if tool.InputSchema != nil {
// 将 SDK 的 InputSchema 转换为 map
schemaBytes, err := json.Marshal(tool.InputSchema)
if err == nil {
json.Unmarshal(schemaBytes, &inputSchema)
}
}
tools = append(tools, MCPTool{
Name: tool.Name,
Description: tool.Description,
InputSchema: inputSchema,
})
}
case MCPTransportSSE:
// 使用自定义 HTTP 实现
var err error
tools, err = c.listToolsSSE(ctx)
if err != nil {
return nil, err
}
}
c.mu.Lock()
c.tools = tools
c.mu.Unlock()
return tools, nil
}
// listToolsSSE 通过 SSE 获取工具列表
func (c *MCPClient) listToolsSSE(ctx context.Context) ([]MCPTool, error) {
req := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/list",
}
resp, err := c.sendSSERequest(ctx, req)
if err != nil {
return nil, err
}
resultBytes, _ := json.Marshal(resp["result"])
var result struct {
Tools []MCPTool `json:"tools"`
}
if err := json.Unmarshal(resultBytes, &result); err != nil {
return nil, fmt.Errorf("failed to parse tools list: %v", err)
}
return result.Tools, nil
}
// CallTool 调用工具
func (c *MCPClient) CallTool(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
switch c.config.Transport {
case MCPTransportStdio:
return c.callToolStdio(ctx, name, arguments)
case MCPTransportSSE:
return c.callToolSSE(ctx, name, arguments)
default:
return nil, fmt.Errorf("unsupported transport: %s", c.config.Transport)
}
}
// callToolStdio 通过 stdio 调用工具(使用官方 SDK
func (c *MCPClient) callToolStdio(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
if c.session == nil {
return nil, fmt.Errorf("MCP session not initialized")
}
// 调用工具
result, err := c.session.CallTool(ctx, &mcp.CallToolParams{
Name: name,
Arguments: arguments,
})
if err != nil {
return nil, fmt.Errorf("tool call failed: %v", err)
}
// 转换结果
mcpResult := &MCPToolsCallResult{
IsError: result.IsError,
}
for _, content := range result.Content {
mc := MCPContent{}
// 根据具体类型提取内容
switch c := content.(type) {
case *mcp.TextContent:
mc.Type = "text"
mc.Text = c.Text
case *mcp.ImageContent:
mc.Type = "image"
mc.Data = string(c.Data)
mc.MimeType = c.MIMEType
case *mcp.AudioContent:
mc.Type = "audio"
mc.Data = string(c.Data)
mc.MimeType = c.MIMEType
case *mcp.EmbeddedResource:
mc.Type = "resource"
if c.Resource != nil {
if c.Resource.Text != "" {
mc.Text = c.Resource.Text
} else if c.Resource.Blob != nil {
mc.Data = string(c.Resource.Blob)
}
mc.MimeType = c.Resource.MIMEType
}
case *mcp.ResourceLink:
mc.Type = "resource_link"
mc.Text = c.URI
default:
// 尝试通过 JSON 序列化获取内容
if data, err := json.Marshal(content); err == nil {
mc.Type = "unknown"
mc.Text = string(data)
}
}
mcpResult.Content = append(mcpResult.Content, mc)
}
return mcpResult, nil
}
// callToolSSE 通过 SSE 调用工具
func (c *MCPClient) callToolSSE(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
req := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": map[string]interface{}{
"name": name,
"arguments": arguments,
},
}
resp, err := c.sendSSERequest(ctx, req)
if err != nil {
return nil, err
}
if errObj, ok := resp["error"].(map[string]interface{}); ok {
return nil, fmt.Errorf("MCP error: %v", errObj["message"])
}
resultBytes, _ := json.Marshal(resp["result"])
var result MCPToolsCallResult
if err := json.Unmarshal(resultBytes, &result); err != nil {
return nil, fmt.Errorf("failed to parse tool call result: %v", err)
}
return &result, nil
}
// setAuthHeaders 设置鉴权请求头
func (c *MCPClient) setAuthHeaders(req *http.Request) {
cfg := c.config
if cfg.AuthType == "" && cfg.APIKey == "" {
return
}
apiKey := expandEnvVars(cfg.APIKey)
username := expandEnvVars(cfg.Username)
password := expandEnvVars(cfg.Password)
switch strings.ToLower(cfg.AuthType) {
case "bearer":
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
case "api_key", "apikey":
if apiKey != "" {
req.Header.Set("X-API-Key", apiKey)
}
case "basic":
if username != "" {
req.SetBasicAuth(username, password)
}
default:
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
}
}
// sendSSERequest 通过 HTTP 发送请求
func (c *MCPClient) sendSSERequest(ctx context.Context, req map[string]interface{}) (map[string]interface{}, error) {
baseURL := c.sseURL
if !strings.HasSuffix(baseURL, "/") {
baseURL += "/"
}
postURL := baseURL + "message"
if _, err := url.Parse(postURL); err != nil {
return nil, fmt.Errorf("invalid URL: %v", err)
}
data, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %v", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", postURL, bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
}
httpReq.Header.Set("Content-Type", "application/json")
c.setAuthHeaders(httpReq)
for k, v := range c.config.Headers {
httpReq.Header.Set(k, expandEnvVars(v))
}
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("HTTP request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %v", err)
}
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
return nil, fmt.Errorf("failed to parse response: %v", err)
}
return result, nil
}
// Close 关闭连接
func (c *MCPClient) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.session != nil {
c.session.Close()
c.session = nil
}
c.client = nil
c.initialized = false
logger.Infof("MCP client closed: %s", c.config.Name)
return nil
}
// MCPClientManager MCP 客户端管理器
type MCPClientManager struct {
clients map[string]*MCPClient
mu sync.RWMutex
}
// NewMCPClientManager 创建 MCP 客户端管理器
func NewMCPClientManager() *MCPClientManager {
return &MCPClientManager{
clients: make(map[string]*MCPClient),
}
}
// GetOrCreateClient 获取或创建 MCP 客户端
func (m *MCPClientManager) GetOrCreateClient(ctx context.Context, config *MCPServerConfig) (*MCPClient, error) {
m.mu.RLock()
client, ok := m.clients[config.Name]
m.mu.RUnlock()
if ok {
return client, nil
}
m.mu.Lock()
defer m.mu.Unlock()
// 再次检查double-check locking
if client, ok := m.clients[config.Name]; ok {
return client, nil
}
// 创建新客户端
client, err := NewMCPClient(config)
if err != nil {
return nil, err
}
// 连接
if err := client.Connect(ctx); err != nil {
return nil, err
}
m.clients[config.Name] = client
return client, nil
}
// CloseAll 关闭所有客户端
func (m *MCPClientManager) CloseAll() {
m.mu.Lock()
defer m.mu.Unlock()
for name, client := range m.clients {
if err := client.Close(); err != nil {
logger.Warningf("Failed to close MCP client %s: %v", name, err)
}
}
m.clients = make(map[string]*MCPClient)
}

View File

@@ -1,30 +0,0 @@
package prompts
import (
_ "embed"
)
// ReAct 模式系统提示词
//
//go:embed react_system.md
var ReactSystemPrompt string
// Plan+ReAct 模式规划阶段系统提示词
//
//go:embed plan_system.md
var PlanSystemPrompt string
// 步骤执行提示词
//
//go:embed step_execution.md
var StepExecutionPrompt string
// 综合分析提示词
//
//go:embed synthesis.md
var SynthesisPrompt string
// 用户提示词默认模板
//
//go:embed user_default.md
var UserDefaultTemplate string

View File

@@ -1,65 +0,0 @@
You are an intelligent AI Agent capable of analyzing tasks, creating execution plans, and solving complex problems.
Your role is to understand user requests and create structured, actionable execution plans.
## Core Capabilities
- **Alert Analysis**: Analyze alerts, investigate root causes, correlate events
- **Data Analysis**: Analyze batch data, identify patterns, generate insights
- **SQL Generation**: Convert natural language to SQL queries
- **General Problem Solving**: Break down complex tasks into actionable steps
## Planning Principles
1. **Understand First**: Carefully analyze what the user is asking for
2. **Identify Key Areas**: Determine which domains, systems, or aspects are involved
3. **Create Logical Steps**: Order steps by priority or logical sequence
4. **Be Specific**: Each step should have a clear goal and concrete approach
5. **Reference Tools**: Consider available tools when designing your approach
## Response Format
You must respond in the following JSON format:
```json
{
"task_summary": "Brief summary of the input/request",
"goal": "The overall goal of this task",
"focus_areas": ["area1", "area2", "area3"],
"steps": [
{
"step_number": 1,
"goal": "What to accomplish in this step",
"approach": "How to accomplish it (which tools/methods to use)"
},
{
"step_number": 2,
"goal": "...",
"approach": "..."
}
]
}
```
## Focus Areas by Task Type
**Alert/Incident Analysis:**
- Network: latency, packet loss, DNS resolution
- Database: query performance, connections, locks, replication
- Application: error rates, response times, resource usage
- Infrastructure: CPU, memory, disk I/O, network throughput
**Batch Alert Analysis:**
- Pattern recognition: common labels, time correlation
- Aggregation: group by severity, source, category
- Trend analysis: frequency, escalation patterns
**SQL Generation:**
- Schema understanding: tables, columns, relationships
- Query optimization: indexes, join strategies
- Data validation: constraints, data types
**General Analysis:**
- Data collection: gather relevant information
- Processing: transform, filter, aggregate
- Output: format results appropriately

View File

@@ -1,42 +0,0 @@
You are an intelligent AI Agent capable of analyzing tasks, creating execution plans, and solving complex problems.
Your capabilities include but are not limited to:
- **Root Cause Analysis**: Analyze alerts, investigate incidents, identify root causes
- **Data Analysis**: Query and analyze metrics, logs, traces, and other data sources
- **SQL Generation**: Convert natural language queries to SQL statements
- **Information Synthesis**: Summarize and extract insights from complex data
- **Content Generation**: Generate titles, summaries, and structured reports
## Core Principles
1. **Systematic Analysis**: Gather sufficient information before making conclusions
2. **Evidence-Based**: Support conclusions with specific data from tool outputs
3. **Tool Efficiency**: Use tools wisely, avoid redundant calls
4. **Clear Communication**: Keep responses focused and actionable
5. **Adaptability**: Adjust your approach based on the task type
## Response Format
You must respond in the following format:
```
Thought: [Your reasoning about the current situation and what to do next]
Action: [The tool name to use, or 'Final Answer' if you have enough information]
Action Input: [The input to the action - for tools, provide JSON parameters; for Final Answer, provide your result]
```
## Task Guidelines
1. **Understand the request**: Carefully analyze what the user is asking for
2. **Choose appropriate tools**: Select tools that best fit the task requirements
3. **Iterate as needed**: Gather additional information if initial results are insufficient
4. **Validate results**: Verify your conclusions before providing the final answer
5. **Be concise**: Provide clear, well-structured responses
## Final Answer Requirements
Your Final Answer should:
- Directly address the user's request
- Be well-structured and easy to understand
- Include supporting evidence or reasoning when applicable
- Provide actionable recommendations if relevant

View File

@@ -1,35 +0,0 @@
You are an intelligent AI Agent executing a specific step as part of a larger execution plan.
## Your Task
Focus on completing the current step efficiently and thoroughly. Use the available tools to gather information, process data, or generate results as needed to achieve the step's goal.
## Response Format
Respond in this format:
```
Thought: [Your reasoning about what to do for this step]
Action: [Tool name or 'Step Complete' when done]
Action Input: [Tool parameters as JSON, or step summary for 'Step Complete']
```
## Step Execution Guidelines
1. **Stay Focused**: Only work on the current step's goal
2. **Be Thorough**: Gather enough information to achieve the goal
3. **Document Progress**: Note important findings in your thoughts
4. **Know When to Stop**: Complete the step when you have sufficient results
5. **Handle Failures**: If a tool fails, try alternatives or note the limitation
## When to Mark Step Complete
Mark the step as complete when:
- You have achieved the step's goal
- You have gathered sufficient information or generated the required output
- Further work would be outside the step's scope
Your step summary should include:
- Key results or findings relevant to the step's goal
- Tools used and their outputs
- Any limitations or issues encountered

View File

@@ -1,37 +0,0 @@
You are an intelligent AI Agent synthesizing results from multiple execution steps into a comprehensive final output.
## Your Task
Review all the results from the completed steps and provide a unified, well-structured response that addresses the original request.
## Response Guidelines
Based on the task type, structure your response appropriately:
**For Root Cause Analysis:**
- Summary of the root cause
- Supporting evidence from investigation
- Impact assessment
- Recommended actions
**For Data Analysis / SQL Generation:**
- Query results or generated SQL
- Key insights from the data
- Any caveats or limitations
**For Information Synthesis:**
- Structured summary of findings
- Key insights and patterns
- Relevant conclusions
**For Content Generation:**
- Generated content (title, summary, etc.)
- Alternative options if applicable
## Synthesis Principles
1. **Integrate Results**: Combine findings from all completed steps coherently
2. **Prioritize Relevance**: Focus on the most important information
3. **Be Structured**: Organize output in a clear, logical format
4. **Be Concise**: Avoid unnecessary verbosity while ensuring completeness
5. **Address the Request**: Ensure the final output directly answers the original task

View File

@@ -1,7 +0,0 @@
## Alert Information
{{.AlertContent}}
## Analysis Request
Please analyze this alert and identify the root cause. Provide evidence-based conclusions and actionable recommendations.

View File

@@ -1,573 +0,0 @@
package aiagent
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/toolkits/pkg/logger"
"gopkg.in/yaml.v3"
)
const (
// SkillFileName 技能主文件名
SkillFileName = "SKILL.md"
// SkillToolsDir 技能工具目录名
SkillToolsDir = "skill_tools"
// 默认配置
DefaultMaxSkills = 2
)
// SkillConfig 技能配置(在 AIAgentConfig 中使用)
// 技能目录路径通过全局配置 Plus.AIAgentSkillsPath 设置
type SkillConfig struct {
// 技能选择配置优先级SkillNames > LLM 选择 > DefaultSkills
AutoSelect bool `json:"auto_select,omitempty"` // 是否让 LLM 自动选择技能(默认 true
SkillNames []string `json:"skill_names,omitempty"` // 直接指定技能名列表(手动模式)
MaxSkills int `json:"max_skills,omitempty"` // LLM 最多选择几个技能(默认 2
DefaultSkills []string `json:"default_skills,omitempty"` // 默认技能列表LLM 无法选择时使用)
}
// SkillMetadata 技能元数据Level 1 - 总是在内存中)
type SkillMetadata struct {
// 核心字段(与 Anthropic 官方一致)
Name string `yaml:"name" json:"name"`
Description string `yaml:"description" json:"description"`
// 可选扩展字段
RecommendedTools []string `yaml:"recommended_tools,omitempty" json:"recommended_tools,omitempty"`
BuiltinTools []string `yaml:"builtin_tools,omitempty" json:"builtin_tools,omitempty"` // 内置工具列表
// 内部字段
Path string `json:"-"` // 技能目录路径
LoadedAt time.Time `json:"-"` // 加载时间
}
// SkillContent 技能内容Level 2 - 匹配时加载)
type SkillContent struct {
Metadata *SkillMetadata `json:"metadata"`
MainContent string `json:"main_content"` // SKILL.md 正文
}
// SkillTool Skill 专用工具Level 3 - 按需加载)
type SkillTool struct {
Name string `yaml:"name" json:"name"` // 工具名称
Type string `yaml:"type" json:"type"` // 处理器类型annotation_qd, script, callback 等
Description string `yaml:"description" json:"description"` // 工具描述
Config map[string]interface{} `yaml:"config" json:"config"` // 处理器配置
// 参数定义(可选)
Parameters []ToolParameter `yaml:"parameters,omitempty" json:"parameters,omitempty"`
}
// SkillResources 技能扩展资源Level 3 - 按需加载)
type SkillResources struct {
SkillTools map[string]*SkillTool `json:"skill_tools"` // 工具名 -> 工具定义
References map[string]string `json:"references"` // 引用文件内容
}
// SkillRegistry 技能注册表
type SkillRegistry struct {
skillsPath string // 技能目录路径
skills map[string]*SkillMetadata // name -> metadata
contentCache map[string]*SkillContent // name -> content (LRU cache)
toolsCache map[string]map[string]*SkillTool // skillName -> toolName -> tool
mu sync.RWMutex
}
// NewSkillRegistry 创建技能注册表
func NewSkillRegistry(skillsPath string) *SkillRegistry {
registry := &SkillRegistry{
skillsPath: skillsPath,
skills: make(map[string]*SkillMetadata),
contentCache: make(map[string]*SkillContent),
toolsCache: make(map[string]map[string]*SkillTool),
}
// 初始加载所有技能元数据
if err := registry.loadAllMetadata(); err != nil {
logger.Warningf("Failed to load skill metadata: %v", err)
}
return registry
}
// loadAllMetadata 加载所有技能的元数据Level 1
func (r *SkillRegistry) loadAllMetadata() error {
if r.skillsPath == "" {
return nil
}
// 检查目录是否存在
if _, err := os.Stat(r.skillsPath); os.IsNotExist(err) {
logger.Debugf("Skills directory does not exist: %s", r.skillsPath)
return nil
}
// 遍历技能目录
entries, err := os.ReadDir(r.skillsPath)
if err != nil {
return fmt.Errorf("failed to read skills directory: %v", err)
}
r.mu.Lock()
defer r.mu.Unlock()
for _, entry := range entries {
if !entry.IsDir() {
continue
}
skillPath := filepath.Join(r.skillsPath, entry.Name())
skillFile := filepath.Join(skillPath, SkillFileName)
// 检查 SKILL.md 是否存在
if _, err := os.Stat(skillFile); os.IsNotExist(err) {
continue
}
// 加载元数据
metadata, err := r.loadMetadataFromFile(skillFile)
if err != nil {
logger.Warningf("Failed to load skill metadata from %s: %v", skillFile, err)
continue
}
metadata.Path = skillPath
metadata.LoadedAt = time.Now()
r.skills[metadata.Name] = metadata
logger.Debugf("Loaded skill metadata: %s from %s", metadata.Name, skillPath)
}
logger.Infof("Loaded %d skills from %s", len(r.skills), r.skillsPath)
return nil
}
// 从 SKILL.md 文件加载元数据
func (r *SkillRegistry) loadMetadataFromFile(filePath string) (*SkillMetadata, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
// 解析 YAML frontmatter
scanner := bufio.NewScanner(file)
var inFrontmatter bool
var frontmatterLines []string
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "---" {
if !inFrontmatter {
inFrontmatter = true
continue
} else {
// frontmatter 结束
break
}
}
if inFrontmatter {
frontmatterLines = append(frontmatterLines, line)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan file: %v", err)
}
if len(frontmatterLines) == 0 {
return nil, fmt.Errorf("no frontmatter found in %s", filePath)
}
// 解析 YAML
frontmatter := strings.Join(frontmatterLines, "\n")
var metadata SkillMetadata
if err := yaml.Unmarshal([]byte(frontmatter), &metadata); err != nil {
return nil, fmt.Errorf("failed to parse frontmatter: %v", err)
}
if metadata.Name == "" {
return nil, fmt.Errorf("skill name is required in frontmatter")
}
return &metadata, nil
}
// GetByName 根据名称获取技能元数据
func (r *SkillRegistry) GetByName(name string) *SkillMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
return r.skills[name]
}
// ListAll 列出所有技能元数据
func (r *SkillRegistry) ListAll() []*SkillMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
result := make([]*SkillMetadata, 0, len(r.skills))
for _, metadata := range r.skills {
result = append(result, metadata)
}
return result
}
// LoadContent 加载技能内容Level 2
func (r *SkillRegistry) LoadContent(metadata *SkillMetadata) (*SkillContent, error) {
if metadata == nil {
return nil, fmt.Errorf("metadata is nil")
}
// 检查缓存
r.mu.RLock()
if cached, ok := r.contentCache[metadata.Name]; ok {
r.mu.RUnlock()
return cached, nil
}
r.mu.RUnlock()
// 加载内容
skillFile := filepath.Join(metadata.Path, SkillFileName)
content, err := r.loadContentFromFile(skillFile)
if err != nil {
return nil, err
}
skillContent := &SkillContent{
Metadata: metadata,
MainContent: content,
}
// 缓存
r.mu.Lock()
r.contentCache[metadata.Name] = skillContent
r.mu.Unlock()
return skillContent, nil
}
// loadContentFromFile 从 SKILL.md 文件加载正文内容
func (r *SkillRegistry) loadContentFromFile(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
var inFrontmatter bool
var frontmatterEnded bool
var contentLines []string
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) == "---" {
if !inFrontmatter {
inFrontmatter = true
continue
} else {
frontmatterEnded = true
continue
}
}
if frontmatterEnded {
contentLines = append(contentLines, line)
}
}
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("failed to scan file: %v", err)
}
return strings.TrimSpace(strings.Join(contentLines, "\n")), nil
}
// LoadSkillTool 加载单个 skill_toolLevel 3 - 完整配置)
func (r *SkillRegistry) LoadSkillTool(skillName, toolName string) (*SkillTool, error) {
// 检查缓存
r.mu.RLock()
if skillTools, ok := r.toolsCache[skillName]; ok {
if tool, ok := skillTools[toolName]; ok {
r.mu.RUnlock()
return tool, nil
}
}
r.mu.RUnlock()
// 获取技能元数据
metadata := r.GetByName(skillName)
if metadata == nil {
return nil, fmt.Errorf("skill '%s' not found", skillName)
}
// 加载工具
toolFile := filepath.Join(metadata.Path, SkillToolsDir, toolName+".yaml")
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
return nil, err
}
// 缓存
r.mu.Lock()
if r.toolsCache[skillName] == nil {
r.toolsCache[skillName] = make(map[string]*SkillTool)
}
r.toolsCache[skillName][toolName] = tool
r.mu.Unlock()
return tool, nil
}
// LoadSkillToolDescription 加载 skill_tool 的描述信息(轻量级,只读取 name 和 description
func (r *SkillRegistry) LoadSkillToolDescription(skillName, toolName string) (string, error) {
// 检查缓存 - 如果已经加载了完整工具,直接返回 description
r.mu.RLock()
if skillTools, ok := r.toolsCache[skillName]; ok {
if tool, ok := skillTools[toolName]; ok {
r.mu.RUnlock()
return tool.Description, nil
}
}
r.mu.RUnlock()
// 获取技能元数据
metadata := r.GetByName(skillName)
if metadata == nil {
return "", fmt.Errorf("skill '%s' not found", skillName)
}
// 只加载 description不缓存完整工具保持延迟加载特性
toolFile := filepath.Join(metadata.Path, SkillToolsDir, toolName+".yaml")
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
return "", err
}
return tool.Description, nil
}
// LoadAllSkillToolDescriptions 加载技能目录下所有 skill_tools 的描述
func (r *SkillRegistry) LoadAllSkillToolDescriptions(skillName string) (map[string]string, error) {
metadata := r.GetByName(skillName)
if metadata == nil {
return nil, fmt.Errorf("skill '%s' not found", skillName)
}
toolsDir := filepath.Join(metadata.Path, SkillToolsDir)
// 检查目录是否存在
if _, err := os.Stat(toolsDir); os.IsNotExist(err) {
return make(map[string]string), nil
}
// 遍历 skill_tools 目录
entries, err := os.ReadDir(toolsDir)
if err != nil {
return nil, fmt.Errorf("failed to read skill_tools directory: %v", err)
}
descriptions := make(map[string]string)
for _, entry := range entries {
if entry.IsDir() {
continue
}
// 只处理 .yaml 文件
name := entry.Name()
if !strings.HasSuffix(name, ".yaml") && !strings.HasSuffix(name, ".yml") {
continue
}
toolFile := filepath.Join(toolsDir, name)
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
logger.Warningf("Failed to load skill tool %s: %v", toolFile, err)
continue
}
descriptions[tool.Name] = tool.Description
}
return descriptions, nil
}
// loadToolFromFile 从文件加载工具定义
func (r *SkillRegistry) loadToolFromFile(filePath string) (*SkillTool, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read tool file: %v", err)
}
var tool SkillTool
if err := yaml.Unmarshal(data, &tool); err != nil {
return nil, fmt.Errorf("failed to parse tool file: %v", err)
}
return &tool, nil
}
// LoadReference 加载引用文件Level 3
func (r *SkillRegistry) LoadReference(metadata *SkillMetadata, refName string) (string, error) {
if metadata == nil {
return "", fmt.Errorf("metadata is nil")
}
refFile := filepath.Join(metadata.Path, refName)
data, err := os.ReadFile(refFile)
if err != nil {
return "", fmt.Errorf("failed to read reference file: %v", err)
}
return string(data), nil
}
// Reload 重新加载所有技能元数据
func (r *SkillRegistry) Reload() error {
r.mu.Lock()
r.skills = make(map[string]*SkillMetadata)
r.contentCache = make(map[string]*SkillContent)
r.toolsCache = make(map[string]map[string]*SkillTool)
r.mu.Unlock()
return r.loadAllMetadata()
}
// SkillSelector 技能选择器接口
type SkillSelector interface {
// SelectMultiple 让 LLM 根据任务内容选择最合适的技能(可多选)
SelectMultiple(ctx context.Context, taskContext string, availableSkills []*SkillMetadata, maxSkills int) ([]*SkillMetadata, error)
}
// LLMSkillSelector 基于 LLM 的技能选择器
type LLMSkillSelector struct {
llmCaller func(ctx context.Context, messages []ChatMessage) (string, error)
}
// NewLLMSkillSelector 创建 LLM 技能选择器
func NewLLMSkillSelector(llmCaller func(ctx context.Context, messages []ChatMessage) (string, error)) *LLMSkillSelector {
return &LLMSkillSelector{
llmCaller: llmCaller,
}
}
// SelectMultiple 使用 LLM 选择技能
func (s *LLMSkillSelector) SelectMultiple(ctx context.Context, taskContext string, availableSkills []*SkillMetadata, maxSkills int) ([]*SkillMetadata, error) {
if len(availableSkills) == 0 {
return nil, nil
}
if maxSkills <= 0 {
maxSkills = DefaultMaxSkills
}
// 构建提示词
systemPrompt := s.buildSelectionPrompt(availableSkills, maxSkills)
messages := []ChatMessage{
{Role: "system", Content: systemPrompt},
{Role: "user", Content: taskContext},
}
// 调用 LLM
response, err := s.llmCaller(ctx, messages)
if err != nil {
return nil, fmt.Errorf("LLM call failed: %v", err)
}
// 解析响应
selectedNames := s.parseSelectionResponse(response)
if len(selectedNames) == 0 {
return nil, nil
}
// 限制数量
if len(selectedNames) > maxSkills {
selectedNames = selectedNames[:maxSkills]
}
// 转换为 SkillMetadata
skillMap := make(map[string]*SkillMetadata)
for _, skill := range availableSkills {
skillMap[skill.Name] = skill
}
var result []*SkillMetadata
for _, name := range selectedNames {
if skill, ok := skillMap[name]; ok {
result = append(result, skill)
}
}
return result, nil
}
// buildSelectionPrompt 构建技能选择提示词
func (s *LLMSkillSelector) buildSelectionPrompt(availableSkills []*SkillMetadata, maxSkills int) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf(`你是一个技能选择器。根据以下任务上下文,选择最合适的技能(可选择 1-%d 个)。
## 可用技能
`, maxSkills))
for i, skill := range availableSkills {
sb.WriteString(fmt.Sprintf("%d. **%s**\n", i+1, skill.Name))
sb.WriteString(fmt.Sprintf(" %s\n\n", skill.Description))
}
sb.WriteString(`## 输出格式
请以 JSON 数组格式返回选中的技能名称,例如:
` + "```json\n" + `["skill-name-1", "skill-name-2"]
` + "```" + `
## 选择原则
1. 选择与任务最相关的技能
2. 如果任务涉及多个领域,可以选择多个技能
3. 优先选择更具体、更专业的技能
4. 如果没有合适的技能,返回空数组 []
请返回技能名称数组:`)
return sb.String()
}
// parseSelectionResponse 解析 LLM 的选择响应
func (s *LLMSkillSelector) parseSelectionResponse(response string) []string {
// 尝试从 JSON 代码块中提取
response = strings.TrimSpace(response)
// 查找 JSON 数组
start := strings.Index(response, "[")
end := strings.LastIndex(response, "]")
if start < 0 || end <= start {
return nil
}
jsonStr := response[start : end+1]
var skillNames []string
if err := json.Unmarshal([]byte(jsonStr), &skillNames); err != nil {
logger.Warningf("Failed to parse skill selection response: %v", err)
return nil
}
return skillNames
}

View File

@@ -4,8 +4,6 @@ import (
"context"
"fmt"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/dispatch"
@@ -23,11 +21,12 @@ import (
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/pkg/macros"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/ccfos/nightingale/v6/storage"
"github.com/ccfos/nightingale/v6/tdengine"
"github.com/flashcatcloud/ibex/src/cmd/ibex"
)
@@ -64,22 +63,18 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
userGroupCache := memsto.NewUserGroupCache(ctx, syncStats)
taskTplsCache := memsto.NewTaskTplCache(ctx)
configCvalCache := memsto.NewCvalCache(ctx, syncStats)
notifyRuleCache := memsto.NewNotifyRuleCache(ctx, syncStats)
notifyChannelCache := memsto.NewNotifyChannelCache(ctx, syncStats)
messageTemplateCache := memsto.NewMessageTemplateCache(ctx, syncStats)
promClients := prom.NewPromClient(ctx)
dispatch.InitRegisterQueryFunc(promClients)
tdengineClients := tdengine.NewTdengineClient(ctx, config.Alert.Heartbeat)
externalProcessors := process.NewExternalProcessors()
macros.RegisterMacro(macros.MacroInVain)
dscache.Init(ctx, false)
Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, taskTplsCache, dsCache, ctx, promClients, userCache, userGroupCache, notifyRuleCache, notifyChannelCache, messageTemplateCache, configCvalCache)
Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, taskTplsCache, dsCache, ctx, promClients, tdengineClients, userCache, userGroupCache)
r := httpx.GinEngine(config.Global.RunMode, config.HTTP,
configCvalCache.PrintBodyPaths, configCvalCache.PrintAccessLog)
rt := router.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors, config.Log.Dir)
rt := router.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors)
if config.Ibex.Enable {
ibex.ServerStart(false, nil, redis, config.HTTP.APIForService.BasicAuth, config.Alert.Heartbeat, &config.CenterApi, r, nil, config.Ibex, config.HTTP.Port)
@@ -98,14 +93,12 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
func Start(alertc aconf.Alert, pushgwc pconf.Pushgw, syncStats *memsto.Stats, alertStats *astats.Stats, externalProcessors *process.ExternalProcessorsType, targetCache *memsto.TargetCacheType, busiGroupCache *memsto.BusiGroupCacheType,
alertMuteCache *memsto.AlertMuteCacheType, alertRuleCache *memsto.AlertRuleCacheType, notifyConfigCache *memsto.NotifyConfigCacheType, taskTplsCache *memsto.TaskTplCache, datasourceCache *memsto.DatasourceCacheType, ctx *ctx.Context,
promClients *prom.PromClientMap, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType, notifyRuleCache *memsto.NotifyRuleCacheType, notifyChannelCache *memsto.NotifyChannelCacheType, messageTemplateCache *memsto.MessageTemplateCacheType, configCvalCache *memsto.CvalCache) {
promClients *prom.PromClientMap, tdendgineClients *tdengine.TdengineClientMap, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType) {
alertSubscribeCache := memsto.NewAlertSubscribeCache(ctx, syncStats)
recordingRuleCache := memsto.NewRecordingRuleCache(ctx, syncStats)
targetsOfAlertRulesCache := memsto.NewTargetOfAlertRuleCache(ctx, alertc.Heartbeat.EngineName, syncStats)
go models.InitNotifyConfig(ctx, alertc.Alerting.TemplatesDir)
go models.InitNotifyChannel(ctx)
go models.InitMessageTemplate(ctx)
naming := naming.NewNaming(ctx, alertc.Heartbeat, alertStats)
@@ -113,20 +106,14 @@ func Start(alertc aconf.Alert, pushgwc pconf.Pushgw, syncStats *memsto.Stats, al
record.NewScheduler(alertc, recordingRuleCache, promClients, writers, alertStats, datasourceCache)
eval.NewScheduler(alertc, externalProcessors, alertRuleCache, targetCache, targetsOfAlertRulesCache,
busiGroupCache, alertMuteCache, datasourceCache, promClients, naming, ctx, alertStats)
busiGroupCache, alertMuteCache, datasourceCache, promClients, tdendgineClients, naming, ctx, alertStats)
eventProcessorCache := memsto.NewEventProcessorCache(ctx, syncStats)
dp := dispatch.NewDispatch(alertRuleCache, userCache, userGroupCache, alertSubscribeCache, targetCache, notifyConfigCache, taskTplsCache, notifyRuleCache, notifyChannelCache, messageTemplateCache, eventProcessorCache, configCvalCache, alertc.Alerting, ctx, alertStats)
consumer := dispatch.NewConsumer(alertc.Alerting, ctx, dp, promClients, alertMuteCache)
notifyRecordConsumer := sender.NewNotifyRecordConsumer(ctx)
dp := dispatch.NewDispatch(alertRuleCache, userCache, userGroupCache, alertSubscribeCache, targetCache, notifyConfigCache, taskTplsCache, alertc.Alerting, ctx, alertStats)
consumer := dispatch.NewConsumer(alertc.Alerting, ctx, dp, promClients)
go dp.ReloadTpls()
go consumer.LoopConsume()
go notifyRecordConsumer.LoopConsume()
go queue.ReportQueueSize(alertStats)
go sender.ReportNotifyRecordQueueSize(alertStats)
go sender.InitEmailSender(ctx, notifyConfigCache)
}

View File

@@ -17,16 +17,12 @@ type Stats struct {
CounterRuleEval *prometheus.CounterVec
CounterQueryDataErrorTotal *prometheus.CounterVec
CounterQueryDataTotal *prometheus.CounterVec
CounterVarFillingQuery *prometheus.CounterVec
CounterRecordEval *prometheus.CounterVec
CounterRecordEvalErrorTotal *prometheus.CounterVec
CounterMuteTotal *prometheus.CounterVec
CounterRuleEvalErrorTotal *prometheus.CounterVec
CounterHeartbeatErrorTotal *prometheus.CounterVec
CounterSubEventTotal *prometheus.CounterVec
GaugeQuerySeriesCount *prometheus.GaugeVec
GaugeRuleEvalDuration *prometheus.GaugeVec
GaugeNotifyRecordQueueSize prometheus.Gauge
}
func NewSyncStats() *Stats {
@@ -56,7 +52,7 @@ func NewSyncStats() *Stats {
Subsystem: subsystem,
Name: "query_data_total",
Help: "Number of rule eval query data.",
}, []string{"datasource", "rule_id"})
}, []string{"datasource"})
CounterRecordEval := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
@@ -107,7 +103,7 @@ func NewSyncStats() *Stats {
Subsystem: subsystem,
Name: "mute_total",
Help: "Number of mute.",
}, []string{"group", "rule_id", "mute_rule_id", "datasource_id"})
}, []string{"group"})
CounterSubEventTotal := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
@@ -123,34 +119,6 @@ func NewSyncStats() *Stats {
Help: "Number of heartbeat error.",
}, []string{})
GaugeQuerySeriesCount := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "eval_query_series_count",
Help: "Number of series retrieved from data source after query.",
}, []string{"rule_id", "datasource_id", "ref"})
// 通知记录队列的长度
GaugeNotifyRecordQueueSize := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "notify_record_queue_size",
Help: "The size of notify record queue.",
})
GaugeRuleEvalDuration := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "rule_eval_duration_ms",
Help: "Duration of rule eval in milliseconds.",
}, []string{"rule_id", "datasource_id"})
CounterVarFillingQuery := prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "var_filling_query_total",
Help: "Number of var filling query.",
}, []string{"rule_id", "datasource_id", "ref", "typ"})
prometheus.MustRegister(
CounterAlertsTotal,
GaugeAlertQueueSize,
@@ -165,10 +133,6 @@ func NewSyncStats() *Stats {
CounterRuleEvalErrorTotal,
CounterHeartbeatErrorTotal,
CounterSubEventTotal,
GaugeQuerySeriesCount,
GaugeRuleEvalDuration,
GaugeNotifyRecordQueueSize,
CounterVarFillingQuery,
)
return &Stats{
@@ -185,9 +149,5 @@ func NewSyncStats() *Stats {
CounterRuleEvalErrorTotal: CounterRuleEvalErrorTotal,
CounterHeartbeatErrorTotal: CounterHeartbeatErrorTotal,
CounterSubEventTotal: CounterSubEventTotal,
GaugeQuerySeriesCount: GaugeQuerySeriesCount,
GaugeRuleEvalDuration: GaugeRuleEvalDuration,
GaugeNotifyRecordQueueSize: GaugeNotifyRecordQueueSize,
CounterVarFillingQuery: CounterVarFillingQuery,
}
}

View File

@@ -1,7 +1,6 @@
package common
import (
"encoding/json"
"fmt"
"strings"
@@ -14,20 +13,6 @@ func RuleKey(datasourceId, id int64) string {
func MatchTags(eventTagsMap map[string]string, itags []models.TagFilter) bool {
for _, filter := range itags {
// target_group in和not in优先特殊处理匹配通过则继续下一个 filter匹配失败则整组不匹配
if filter.Key == "target_group" {
// target 字段从 event.JsonTagsAndValue() 中获取的
v, ok := eventTagsMap["target"]
if !ok {
return false
}
if !targetGroupMatch(v, filter) {
return false
}
continue
}
// 普通标签按原逻辑处理
value, has := eventTagsMap[filter.Key]
if !has {
return false
@@ -50,9 +35,9 @@ func MatchGroupsName(groupName string, groupFilter []models.TagFilter) bool {
func matchTag(value string, filter models.TagFilter) bool {
switch filter.Func {
case "==":
return strings.TrimSpace(fmt.Sprintf("%v", filter.Value)) == strings.TrimSpace(value)
return strings.TrimSpace(filter.Value) == strings.TrimSpace(value)
case "!=":
return strings.TrimSpace(fmt.Sprintf("%v", filter.Value)) != strings.TrimSpace(value)
return strings.TrimSpace(filter.Value) != strings.TrimSpace(value)
case "in":
_, has := filter.Vset[value]
return has
@@ -64,65 +49,6 @@ func matchTag(value string, filter models.TagFilter) bool {
case "!~":
return !filter.Regexp.MatchString(value)
}
// unexpected func
// unexpect func
return false
}
// targetGroupMatch 处理 target_group 的特殊匹配逻辑
func targetGroupMatch(value string, filter models.TagFilter) bool {
var valueMap map[string]interface{}
if err := json.Unmarshal([]byte(value), &valueMap); err != nil {
return false
}
switch filter.Func {
case "in", "not in":
// float64 类型的 id 切片
filterValueIds, ok := filter.Value.([]interface{})
if !ok {
return false
}
filterValueIdsMap := make(map[float64]struct{})
for _, id := range filterValueIds {
filterValueIdsMap[id.(float64)] = struct{}{}
}
// float64 类型的 groupIds 切片
groupIds, ok := valueMap["group_ids"].([]interface{})
if !ok {
return false
}
// in 只要 groupIds 中有一个在 filterGroupIds 中出现,就返回 true
// not in 则相反
found := false
for _, gid := range groupIds {
if _, found = filterValueIdsMap[gid.(float64)]; found {
break
}
}
if filter.Func == "in" {
return found
}
// filter.Func == "not in"
return !found
case "=~", "!~":
// 正则满足一个就认为 matched
groupNames, ok := valueMap["group_names"].([]interface{})
if !ok {
return false
}
matched := false
for _, gname := range groupNames {
if filter.Regexp.MatchString(fmt.Sprintf("%v", gname)) {
matched = true
break
}
}
if filter.Func == "=~" {
return matched
}
// "!~": 只要有一个匹配就返回 false否则返回 true
return !matched
default:
return false
}
}

View File

@@ -8,8 +8,8 @@ import (
"time"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/poster"
@@ -26,15 +26,10 @@ type Consumer struct {
alerting aconf.Alerting
ctx *ctx.Context
dispatch *Dispatch
promClients *prom.PromClientMap
alertMuteCache *memsto.AlertMuteCacheType
dispatch *Dispatch
promClients *prom.PromClientMap
}
type EventMuteHookFunc func(event *models.AlertCurEvent) bool
var EventMuteHook EventMuteHookFunc = func(event *models.AlertCurEvent) bool { return false }
func InitRegisterQueryFunc(promClients *prom.PromClientMap) {
tplx.RegisterQueryFunc(func(datasourceID int64, promql string) model.Value {
if promClients.IsNil(datasourceID) {
@@ -48,14 +43,12 @@ func InitRegisterQueryFunc(promClients *prom.PromClientMap) {
}
// 创建一个 Consumer 实例
func NewConsumer(alerting aconf.Alerting, ctx *ctx.Context, dispatch *Dispatch, promClients *prom.PromClientMap, alertMuteCache *memsto.AlertMuteCacheType) *Consumer {
func NewConsumer(alerting aconf.Alerting, ctx *ctx.Context, dispatch *Dispatch, promClients *prom.PromClientMap) *Consumer {
return &Consumer{
alerting: alerting,
ctx: ctx,
dispatch: dispatch,
promClients: promClients,
alertMuteCache: alertMuteCache,
}
}
@@ -98,12 +91,12 @@ func (e *Consumer) consumeOne(event *models.AlertCurEvent) {
e.dispatch.Astats.CounterAlertsTotal.WithLabelValues(event.Cluster, eventType, event.GroupName).Inc()
if err := event.ParseRule("rule_name"); err != nil {
logger.Warningf("alert_eval_%d datasource_%d failed to parse rule name: %v", event.RuleId, event.DatasourceId, err)
logger.Warningf("ruleid:%d failed to parse rule name: %v", event.RuleId, err)
event.RuleName = fmt.Sprintf("failed to parse rule name: %v", err)
}
if err := event.ParseRule("annotations"); err != nil {
logger.Warningf("alert_eval_%d datasource_%d failed to parse annotations: %v", event.RuleId, event.DatasourceId, err)
logger.Warningf("ruleid:%d failed to parse annotations: %v", event.RuleId, err)
event.Annotations = fmt.Sprintf("failed to parse annotations: %v", err)
event.AnnotationsJSON["error"] = event.Annotations
}
@@ -111,12 +104,16 @@ func (e *Consumer) consumeOne(event *models.AlertCurEvent) {
e.queryRecoveryVal(event)
if err := event.ParseRule("rule_note"); err != nil {
logger.Warningf("alert_eval_%d datasource_%d failed to parse rule note: %v", event.RuleId, event.DatasourceId, err)
logger.Warningf("ruleid:%d failed to parse rule note: %v", event.RuleId, err)
event.RuleNote = fmt.Sprintf("failed to parse rule note: %v", err)
}
e.persist(event)
if event.IsRecovered && event.NotifyRecovered == 0 {
return
}
e.dispatch.HandleEventNotify(event, false)
}
@@ -130,7 +127,7 @@ func (e *Consumer) persist(event *models.AlertCurEvent) {
var err error
event.Id, err = poster.PostByUrlsWithResp[int64](e.ctx, "/v1/n9e/event-persist", event)
if err != nil {
logger.Errorf("event:%s persist err:%v", event.Hash, err)
logger.Errorf("event:%+v persist err:%v", event, err)
e.dispatch.Astats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", event.DatasourceId), "persist_event", event.GroupName, fmt.Sprintf("%v", event.RuleId)).Inc()
}
return
@@ -138,7 +135,7 @@ func (e *Consumer) persist(event *models.AlertCurEvent) {
err := models.EventPersist(e.ctx, event)
if err != nil {
logger.Errorf("event:%s persist err:%v", event.Hash, err)
logger.Errorf("event%+v persist err:%v", event, err)
e.dispatch.Astats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", event.DatasourceId), "persist_event", event.GroupName, fmt.Sprintf("%v", event.RuleId)).Inc()
}
}
@@ -156,12 +153,12 @@ func (e *Consumer) queryRecoveryVal(event *models.AlertCurEvent) {
promql = strings.TrimSpace(promql)
if promql == "" {
logger.Warningf("alert_eval_%d datasource_%d promql is blank", event.RuleId, event.DatasourceId)
logger.Warningf("rule_eval:%s promql is blank", getKey(event))
return
}
if e.promClients.IsNil(event.DatasourceId) {
logger.Warningf("alert_eval_%d datasource_%d error reader client is nil", event.RuleId, event.DatasourceId)
logger.Warningf("rule_eval:%s error reader client is nil", getKey(event))
return
}
@@ -170,7 +167,7 @@ func (e *Consumer) queryRecoveryVal(event *models.AlertCurEvent) {
var warnings promsdk.Warnings
value, warnings, err := readerClient.Query(e.ctx.Ctx, promql, time.Now())
if err != nil {
logger.Errorf("alert_eval_%d datasource_%d promql:%s, error:%v", event.RuleId, event.DatasourceId, promql, err)
logger.Errorf("rule_eval:%s promql:%s, error:%v", getKey(event), promql, err)
event.AnnotationsJSON["recovery_promql_error"] = fmt.Sprintf("promql:%s error:%v", promql, err)
b, err := json.Marshal(event.AnnotationsJSON)
@@ -184,12 +181,12 @@ func (e *Consumer) queryRecoveryVal(event *models.AlertCurEvent) {
}
if len(warnings) > 0 {
logger.Errorf("alert_eval_%d datasource_%d promql:%s, warnings:%v", event.RuleId, event.DatasourceId, promql, warnings)
logger.Errorf("rule_eval:%s promql:%s, warnings:%v", getKey(event), promql, warnings)
}
anomalyPoints := models.ConvertAnomalyPoints(value)
if len(anomalyPoints) == 0 {
logger.Warningf("alert_eval_%d datasource_%d promql:%s, result is empty", event.RuleId, event.DatasourceId, promql)
logger.Warningf("rule_eval:%s promql:%s, result is empty", getKey(event), promql)
event.AnnotationsJSON["recovery_promql_error"] = fmt.Sprintf("promql:%s error:%s", promql, "result is empty")
} else {
event.AnnotationsJSON["recovery_value"] = fmt.Sprintf("%v", anomalyPoints[0].Value)
@@ -204,3 +201,6 @@ func (e *Consumer) queryRecoveryVal(event *models.AlertCurEvent) {
}
}
func getKey(event *models.AlertCurEvent) string {
return common.RuleKey(event.DatasourceId, event.RuleId)
}

View File

@@ -3,8 +3,6 @@ package dispatch
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"html/template"
"net/url"
"strconv"
@@ -15,8 +13,6 @@ import (
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline"
"github.com/ccfos/nightingale/v6/alert/pipeline/engine"
"github.com/ccfos/nightingale/v6/alert/sender"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
@@ -25,17 +21,6 @@ import (
"github.com/toolkits/pkg/logger"
)
var ShouldSkipNotify func(*ctx.Context, *models.AlertCurEvent, int64) bool
var SendByNotifyRule func(*ctx.Context, *memsto.UserCacheType, *memsto.UserGroupCacheType, *memsto.NotifyChannelCacheType, *memsto.CvalCache,
[]*models.AlertCurEvent, int64, *models.NotifyConfig, *models.NotifyChannelConfig, *models.MessageTemplate)
var EventProcessorCache *memsto.EventProcessorCacheType
func init() {
ShouldSkipNotify = shouldSkipNotify
SendByNotifyRule = SendNotifyRuleMessage
}
type Dispatch struct {
alertRuleCache *memsto.AlertRuleCacheType
userCache *memsto.UserCacheType
@@ -44,12 +29,6 @@ type Dispatch struct {
targetCache *memsto.TargetCacheType
notifyConfigCache *memsto.NotifyConfigCacheType
taskTplsCache *memsto.TaskTplCache
configCvalCache *memsto.CvalCache
notifyRuleCache *memsto.NotifyRuleCacheType
notifyChannelCache *memsto.NotifyChannelCacheType
messageTemplateCache *memsto.MessageTemplateCacheType
eventProcessorCache *memsto.EventProcessorCacheType
alerting aconf.Alerting
@@ -58,8 +37,9 @@ type Dispatch struct {
tpls map[string]*template.Template
ExtraSenders map[string]sender.Sender
BeforeSenderHook func(*models.AlertCurEvent) bool
ctx *ctx.Context
Astats *astats.Stats
ctx *ctx.Context
Astats *astats.Stats
RwLock sync.RWMutex
}
@@ -67,21 +47,15 @@ type Dispatch struct {
// 创建一个 Notify 实例
func NewDispatch(alertRuleCache *memsto.AlertRuleCacheType, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType,
alertSubscribeCache *memsto.AlertSubscribeCacheType, targetCache *memsto.TargetCacheType, notifyConfigCache *memsto.NotifyConfigCacheType,
taskTplsCache *memsto.TaskTplCache, notifyRuleCache *memsto.NotifyRuleCacheType, notifyChannelCache *memsto.NotifyChannelCacheType,
messageTemplateCache *memsto.MessageTemplateCacheType, eventProcessorCache *memsto.EventProcessorCacheType, configCvalCache *memsto.CvalCache, alerting aconf.Alerting, c *ctx.Context, astats *astats.Stats) *Dispatch {
taskTplsCache *memsto.TaskTplCache, alerting aconf.Alerting, ctx *ctx.Context, astats *astats.Stats) *Dispatch {
notify := &Dispatch{
alertRuleCache: alertRuleCache,
userCache: userCache,
userGroupCache: userGroupCache,
alertSubscribeCache: alertSubscribeCache,
targetCache: targetCache,
notifyConfigCache: notifyConfigCache,
taskTplsCache: taskTplsCache,
notifyRuleCache: notifyRuleCache,
notifyChannelCache: notifyChannelCache,
messageTemplateCache: messageTemplateCache,
eventProcessorCache: eventProcessorCache,
configCvalCache: configCvalCache,
alertRuleCache: alertRuleCache,
userCache: userCache,
userGroupCache: userGroupCache,
alertSubscribeCache: alertSubscribeCache,
targetCache: targetCache,
notifyConfigCache: notifyConfigCache,
taskTplsCache: taskTplsCache,
alerting: alerting,
@@ -90,21 +64,14 @@ func NewDispatch(alertRuleCache *memsto.AlertRuleCacheType, userCache *memsto.Us
ExtraSenders: make(map[string]sender.Sender),
BeforeSenderHook: func(*models.AlertCurEvent) bool { return true },
ctx: c,
ctx: ctx,
Astats: astats,
}
pipeline.Init()
EventProcessorCache = eventProcessorCache
// 设置通知记录回调函数
notifyChannelCache.SetNotifyRecordFunc(sender.NotifyRecord)
return notify
}
func (e *Dispatch) ReloadTpls() error {
err := e.reloadTpls()
err := e.relaodTpls()
if err != nil {
logger.Errorf("failed to reload tpls: %v", err)
}
@@ -112,13 +79,13 @@ func (e *Dispatch) ReloadTpls() error {
duration := time.Duration(9000) * time.Millisecond
for {
time.Sleep(duration)
if err := e.reloadTpls(); err != nil {
if err := e.relaodTpls(); err != nil {
logger.Warning("failed to reload tpls:", err)
}
}
}
func (e *Dispatch) reloadTpls() error {
func (e *Dispatch) relaodTpls() error {
tmpTpls, err := models.ListTpls(e.ctx)
if err != nil {
return err
@@ -164,452 +131,17 @@ func (e *Dispatch) reloadTpls() error {
return nil
}
func (e *Dispatch) HandleEventWithNotifyRule(eventOrigin *models.AlertCurEvent) {
if len(eventOrigin.NotifyRuleIds) > 0 {
for _, notifyRuleId := range eventOrigin.NotifyRuleIds {
// 深拷贝新的 event避免并发修改 event 冲突
eventCopy := eventOrigin.DeepCopy()
logger.Infof("notify rule ids: %v, event: %s", notifyRuleId, eventCopy.Hash)
notifyRule := e.notifyRuleCache.Get(notifyRuleId)
if notifyRule == nil {
continue
}
if !notifyRule.Enable {
continue
}
eventCopy.NotifyRuleId = notifyRuleId
eventCopy.NotifyRuleName = notifyRule.Name
eventCopy = HandleEventPipeline(notifyRule.PipelineConfigs, eventOrigin, eventCopy, e.eventProcessorCache, e.ctx, notifyRuleId, "notify_rule")
if ShouldSkipNotify(e.ctx, eventCopy, notifyRuleId) {
logger.Infof("notify_id: %d, event:%s, should skip notify", notifyRuleId, eventCopy.Hash)
continue
}
// notify
for i := range notifyRule.NotifyConfigs {
err := NotifyRuleMatchCheck(&notifyRule.NotifyConfigs[i], eventCopy)
if err != nil {
logger.Errorf("notify_id: %d, event:%s, channel_id:%d, template_id: %d, notify_config:%+v, err:%v", notifyRuleId, eventCopy.Hash, notifyRule.NotifyConfigs[i].ChannelID, notifyRule.NotifyConfigs[i].TemplateID, notifyRule.NotifyConfigs[i], err)
continue
}
notifyChannel := e.notifyChannelCache.Get(notifyRule.NotifyConfigs[i].ChannelID)
messageTemplate := e.messageTemplateCache.Get(notifyRule.NotifyConfigs[i].TemplateID)
if notifyChannel == nil {
sender.NotifyRecord(e.ctx, []*models.AlertCurEvent{eventCopy}, notifyRuleId, fmt.Sprintf("notify_channel_id:%d", notifyRule.NotifyConfigs[i].ChannelID), "", "", errors.New("notify_channel not found"))
logger.Warningf("notify_id: %d, event:%s, channel_id:%d, template_id: %d, notify_channel not found", notifyRuleId, eventCopy.Hash, notifyRule.NotifyConfigs[i].ChannelID, notifyRule.NotifyConfigs[i].TemplateID)
continue
}
if notifyChannel.RequestType != "flashduty" && notifyChannel.RequestType != "pagerduty" && messageTemplate == nil {
logger.Warningf("notify_id: %d, channel_name: %v, event:%s, template_id: %d, message_template not found", notifyRuleId, notifyChannel.Ident, eventCopy.Hash, notifyRule.NotifyConfigs[i].TemplateID)
sender.NotifyRecord(e.ctx, []*models.AlertCurEvent{eventCopy}, notifyRuleId, notifyChannel.Name, "", "", errors.New("message_template not found"))
continue
}
go SendByNotifyRule(e.ctx, e.userCache, e.userGroupCache, e.notifyChannelCache, e.configCvalCache, []*models.AlertCurEvent{eventCopy}, notifyRuleId, &notifyRule.NotifyConfigs[i], notifyChannel, messageTemplate)
}
}
}
}
func shouldSkipNotify(ctx *ctx.Context, event *models.AlertCurEvent, notifyRuleId int64) bool {
if event == nil {
// 如果 eventCopy 为 nil说明 eventCopy 被 processor drop 掉了, 不再发送通知
return true
}
if event.IsRecovered && event.NotifyRecovered == 0 {
// 如果 eventCopy 是恢复事件,且 NotifyRecovered 为 0则不发送通知
return true
}
return false
}
func HandleEventPipeline(pipelineConfigs []models.PipelineConfig, eventOrigin, event *models.AlertCurEvent, eventProcessorCache *memsto.EventProcessorCacheType, ctx *ctx.Context, id int64, from string) *models.AlertCurEvent {
workflowEngine := engine.NewWorkflowEngine(ctx)
for _, pipelineConfig := range pipelineConfigs {
if !pipelineConfig.Enable {
continue
}
eventPipeline := eventProcessorCache.Get(pipelineConfig.PipelineId)
if eventPipeline == nil {
logger.Warningf("processor_by_%s_id:%d pipeline_id:%d, event pipeline not found, event: %s", from, id, pipelineConfig.PipelineId, event.Hash)
continue
}
if !PipelineApplicable(eventPipeline, event) {
logger.Debugf("processor_by_%s_id:%d pipeline_id:%d, event pipeline not applicable, event: %s", from, id, pipelineConfig.PipelineId, event.Hash)
continue
}
// 统一使用工作流引擎执行(兼容线性模式和工作流模式)
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeEvent,
TriggerBy: from + "_" + strconv.FormatInt(id, 10),
}
resultEvent, result, err := workflowEngine.Execute(eventPipeline, event, triggerCtx)
if err != nil {
logger.Errorf("processor_by_%s_id:%d pipeline_id:%d, pipeline execute error: %v", from, id, pipelineConfig.PipelineId, err)
continue
}
if resultEvent == nil {
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, event dropped, event: %s", from, id, pipelineConfig.PipelineId, eventOrigin.Hash)
if from == "notify_rule" {
sender.NotifyRecord(ctx, []*models.AlertCurEvent{eventOrigin}, id, "", "", result.Message, fmt.Errorf("processor_by_%s_id:%d pipeline_id:%d, drop by pipeline", from, id, pipelineConfig.PipelineId))
}
return nil
}
event = resultEvent
logger.Infof("processor_by_%s_id:%d pipeline_id:%d, pipeline executed, status:%s, message:%s", from, id, pipelineConfig.PipelineId, result.Status, result.Message)
}
event.FE2DB()
event.FillTagsMap()
return event
}
func PipelineApplicable(pipeline *models.EventPipeline, event *models.AlertCurEvent) bool {
if pipeline == nil {
return true
}
if !pipeline.FilterEnable {
return true
}
tagMatch := true
if len(pipeline.LabelFilters) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelFiltersCopy := make([]models.TagFilter, len(pipeline.LabelFilters))
copy(labelFiltersCopy, pipeline.LabelFilters)
for i := range labelFiltersCopy {
if labelFiltersCopy[i].Func == "" {
labelFiltersCopy[i].Func = labelFiltersCopy[i].Op
}
}
tagFilters, err := models.ParseTagFilter(labelFiltersCopy)
if err != nil {
logger.Errorf("pipeline applicable failed to parse tag filter: %v event:%s pipeline:%+v", err, event.Hash, pipeline)
return false
}
tagMatch = common.MatchTags(event.TagsMap, tagFilters)
}
attributesMatch := true
if len(pipeline.AttrFilters) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attrFiltersCopy := make([]models.TagFilter, len(pipeline.AttrFilters))
copy(attrFiltersCopy, pipeline.AttrFilters)
tagFilters, err := models.ParseTagFilter(attrFiltersCopy)
if err != nil {
logger.Errorf("pipeline applicable failed to parse tag filter: %v event:%s pipeline:%+v err:%v", tagFilters, event.Hash, pipeline, err)
return false
}
attributesMatch = common.MatchTags(event.JsonTagsAndValue(), tagFilters)
}
return tagMatch && attributesMatch
}
func NotifyRuleMatchCheck(notifyConfig *models.NotifyConfig, event *models.AlertCurEvent) error {
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := int(tm.Weekday())
timeMatch := false
if len(notifyConfig.TimeRanges) == 0 {
timeMatch = true
}
for j := range notifyConfig.TimeRanges {
if timeMatch {
break
}
enableStime := notifyConfig.TimeRanges[j].Start
enableEtime := notifyConfig.TimeRanges[j].End
enableDaysOfWeek := notifyConfig.TimeRanges[j].Week
length := len(enableDaysOfWeek)
// enableStime,enableEtime,enableDaysOfWeek三者长度肯定相同这里循环一个即可
for i := 0; i < length; i++ {
if enableDaysOfWeek[i] != triggerWeek {
continue
}
if enableStime < enableEtime {
if enableEtime == "23:59" {
// 02:00-23:59这种情况做个特殊处理相当于左闭右闭区间了
if triggerTime < enableStime {
// mute, 即没生效
continue
}
} else {
// 02:00-04:00 或者 02:00-24:00
if triggerTime < enableStime || triggerTime >= enableEtime {
// mute, 即没生效
continue
}
}
} else if enableStime > enableEtime {
// 21:00-09:00
if triggerTime < enableStime && triggerTime >= enableEtime {
// mute, 即没生效
continue
}
}
// 到这里说明当前时刻在告警规则的某组生效时间范围内,即没有 mute直接返回 false
timeMatch = true
break
}
}
if !timeMatch {
return fmt.Errorf("event time not match time filter")
}
severityMatch := false
for i := range notifyConfig.Severities {
if notifyConfig.Severities[i] == event.Severity {
severityMatch = true
}
}
if !severityMatch {
return fmt.Errorf("event severity not match severity filter")
}
tagMatch := true
if len(notifyConfig.LabelKeys) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(notifyConfig.LabelKeys))
copy(labelKeysCopy, notifyConfig.LabelKeys)
for i := range labelKeysCopy {
if labelKeysCopy[i].Func == "" {
labelKeysCopy[i].Func = labelKeysCopy[i].Op
}
}
tagFilters, err := models.ParseTagFilter(labelKeysCopy)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%s notify_config:%+v", err, event.Hash, notifyConfig)
return fmt.Errorf("failed to parse tag filter: %v", err)
}
tagMatch = common.MatchTags(event.TagsMap, tagFilters)
}
if !tagMatch {
return fmt.Errorf("event tag not match tag filter")
}
attributesMatch := true
if len(notifyConfig.Attributes) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(notifyConfig.Attributes))
copy(attributesCopy, notifyConfig.Attributes)
tagFilters, err := models.ParseTagFilter(attributesCopy)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%s notify_config:%+v err:%v", tagFilters, event.Hash, notifyConfig, err)
return fmt.Errorf("failed to parse tag filter: %v", err)
}
attributesMatch = common.MatchTags(event.JsonTagsAndValue(), tagFilters)
}
if !attributesMatch {
return fmt.Errorf("event attributes not match attributes filter")
}
logger.Infof("notify send timeMatch:%v severityMatch:%v tagMatch:%v attributesMatch:%v event:%s notify_config:%+v", timeMatch, severityMatch, tagMatch, attributesMatch, event.Hash, notifyConfig)
return nil
}
func GetNotifyConfigParams(notifyConfig *models.NotifyConfig, contactKey string, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType) ([]string, []int64, []string, map[string]string) {
customParams := make(map[string]string)
var flashDutyChannelIDs []int64
var pagerDutyRoutingKeys []string
var userInfoParams models.CustomParams
for key, value := range notifyConfig.Params {
switch key {
case "user_ids", "user_group_ids", "ids":
if data, err := json.Marshal(value); err == nil {
var ids []int64
if json.Unmarshal(data, &ids) == nil {
if key == "user_ids" {
userInfoParams.UserIDs = ids
} else if key == "user_group_ids" {
userInfoParams.UserGroupIDs = ids
} else if key == "ids" {
flashDutyChannelIDs = ids
}
}
}
case "pagerduty_integration_keys", "pagerduty_integration_ids":
if key == "pagerduty_integration_ids" {
// 不处理ids直接跳过这个字段只给前端标记用
continue
}
if data, err := json.Marshal(value); err == nil {
var keys []string
if json.Unmarshal(data, &keys) == nil {
pagerDutyRoutingKeys = keys
break
}
}
default:
// 避免直接 value.(string) 导致 panic支持多种类型并统一为字符串
customParams[key] = value.(string)
}
}
if len(userInfoParams.UserIDs) == 0 && len(userInfoParams.UserGroupIDs) == 0 {
return []string{}, flashDutyChannelIDs, pagerDutyRoutingKeys, customParams
}
userIds := make([]int64, 0)
userIds = append(userIds, userInfoParams.UserIDs...)
if len(userInfoParams.UserGroupIDs) > 0 {
userGroups := userGroupCache.GetByUserGroupIds(userInfoParams.UserGroupIDs)
for _, userGroup := range userGroups {
userIds = append(userIds, userGroup.UserIds...)
}
}
users := userCache.GetByUserIds(userIds)
visited := make(map[int64]bool)
sendtos := make([]string, 0)
for _, user := range users {
if visited[user.Id] {
continue
}
var sendto string
if contactKey == "phone" {
sendto = user.Phone
} else if contactKey == "email" {
sendto = user.Email
} else {
sendto, _ = user.ExtractToken(contactKey)
}
if sendto == "" {
continue
}
sendtos = append(sendtos, sendto)
visited[user.Id] = true
}
return sendtos, flashDutyChannelIDs, pagerDutyRoutingKeys, customParams
}
func SendNotifyRuleMessage(ctx *ctx.Context, userCache *memsto.UserCacheType, userGroupCache *memsto.UserGroupCacheType, notifyChannelCache *memsto.NotifyChannelCacheType, configCvalCache *memsto.CvalCache,
events []*models.AlertCurEvent, notifyRuleId int64, notifyConfig *models.NotifyConfig, notifyChannel *models.NotifyChannelConfig, messageTemplate *models.MessageTemplate) {
if len(events) == 0 {
logger.Errorf("notify_id: %d events is empty", notifyRuleId)
return
}
siteInfo := configCvalCache.GetSiteInfo()
tplContent := make(map[string]interface{})
if notifyChannel.RequestType != "flashduty" {
tplContent = messageTemplate.RenderEvent(events, siteInfo.SiteUrl)
}
var contactKey string
if notifyChannel.ParamConfig != nil && notifyChannel.ParamConfig.UserInfo != nil {
contactKey = notifyChannel.ParamConfig.UserInfo.ContactKey
}
sendtos, flashDutyChannelIDs, pagerdutyRoutingKeys, customParams := GetNotifyConfigParams(notifyConfig, contactKey, userCache, userGroupCache)
switch notifyChannel.RequestType {
case "flashduty":
if len(flashDutyChannelIDs) == 0 {
flashDutyChannelIDs = []int64{0} // 如果 flashduty 通道没有配置,则使用 0, 给 SendFlashDuty 判断使用, 不给 flashduty 传 channel_id 参数
}
for i := range flashDutyChannelIDs {
start := time.Now()
respBody, err := notifyChannel.SendFlashDuty(events, flashDutyChannelIDs[i], notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), respBody)
logger.Infof("duty_sender notify_id: %d, channel_name: %v, event:%s, IntegrationUrl: %v dutychannel_id: %v, respBody: %v, err: %v", notifyRuleId, notifyChannel.Name, events[0].Hash, notifyChannel.RequestConfig.FlashDutyRequestConfig.IntegrationUrl, flashDutyChannelIDs[i], respBody, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, strconv.FormatInt(flashDutyChannelIDs[i], 10), respBody, err)
}
case "pagerduty":
for _, routingKey := range pagerdutyRoutingKeys {
start := time.Now()
respBody, err := notifyChannel.SendPagerDuty(events, routingKey, siteInfo.SiteUrl, notifyChannelCache.GetHttpClient(notifyChannel.ID))
respBody = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), respBody)
logger.Infof("pagerduty_sender notify_id: %d, channel_name: %v, event:%s, respBody: %v, err: %v", notifyRuleId, notifyChannel.Name, events[0].Hash, respBody, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, "", respBody, err)
}
case "http":
// 使用队列模式处理 http 通知
// 创建通知任务
task := &memsto.NotifyTask{
Events: events,
NotifyRuleId: notifyRuleId,
NotifyChannel: notifyChannel,
TplContent: tplContent,
CustomParams: customParams,
Sendtos: sendtos,
}
// 将任务加入队列
success := notifyChannelCache.EnqueueNotifyTask(task)
if !success {
logger.Errorf("failed to enqueue notify task for channel %d, notify_id: %d", notifyChannel.ID, notifyRuleId)
// 如果入队失败,记录错误通知
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, getSendTarget(customParams, sendtos), "", errors.New("failed to enqueue notify task, queue is full"))
}
case "smtp":
notifyChannel.SendEmail(notifyRuleId, events, tplContent, sendtos, notifyChannelCache.GetSmtpClient(notifyChannel.ID))
case "script":
start := time.Now()
target, res, err := notifyChannel.SendScript(events, tplContent, customParams, sendtos)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
logger.Infof("script_sender notify_id: %d, channel_name: %v, event:%s, tplContent:%s, customParams:%v, target:%s, res:%s, err:%v", notifyRuleId, notifyChannel.Name, events[0].Hash, tplContent, customParams, target, res, err)
sender.NotifyRecord(ctx, events, notifyRuleId, notifyChannel.Name, target, res, err)
default:
logger.Warningf("notify_id: %d, channel_name: %v, event:%s send type not found", notifyRuleId, notifyChannel.Name, events[0].Hash)
}
}
func NeedBatchContacts(requestConfig *models.HTTPRequestConfig) bool {
b, _ := json.Marshal(requestConfig)
return strings.Contains(string(b), "$sendtos")
}
// HandleEventNotify 处理event事件的主逻辑
// event: 告警/恢复事件
// isSubscribe: 告警事件是否由subscribe的配置产生
func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bool) {
go e.HandleEventWithNotifyRule(event)
if event.IsRecovered && event.NotifyRecovered == 0 {
rule := e.alertRuleCache.Get(event.RuleId)
if rule == nil {
return
}
rule := e.alertRuleCache.Get(event.RuleId)
if rule == nil {
if e.blockEventNotify(rule, event) {
logger.Infof("block event notify: rule_id:%d event:%+v", rule.Id, event)
return
}
@@ -640,6 +172,7 @@ func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bo
notifyTarget.AndMerge(handler(rule, event, notifyTarget, e))
}
// 处理事件发送,这里用一个goroutine处理一个event的所有发送事件
go e.Send(rule, event, notifyTarget, isSubscribe)
// 如果是不是订阅规则出现的event, 则需要处理订阅规则的event
@@ -648,6 +181,25 @@ func (e *Dispatch) HandleEventNotify(event *models.AlertCurEvent, isSubscribe bo
}
}
func (e *Dispatch) blockEventNotify(rule *models.AlertRule, event *models.AlertCurEvent) bool {
ruleType := rule.GetRuleType()
// 若为机器则先看机器是否删除
if ruleType == models.HOST {
host, ok := e.targetCache.Get(event.TagsMap["ident"])
if !ok || host == nil {
return true
}
}
// 恢复通知,检测规则配置是否改变
// if event.IsRecovered && event.RuleHash != rule.Hash() {
// return true
// }
return false
}
func (e *Dispatch) handleSubs(event *models.AlertCurEvent) {
// handle alert subscribes
subscribes := make([]*models.AlertSubscribe, 0)
@@ -679,10 +231,6 @@ func (e *Dispatch) handleSub(sub *models.AlertSubscribe, event models.AlertCurEv
return
}
if !sub.MatchCate(event.Cate) {
return
}
if !common.MatchTags(event.TagsMap, sub.ITags) {
return
}
@@ -734,7 +282,7 @@ func (e *Dispatch) Send(rule *models.AlertRule, event *models.AlertCurEvent, not
event = msgCtx.Events[0]
}
logger.Debugf("send to channel:%s event:%s users:%+v", channel, event.Hash, msgCtx.Users)
logger.Debugf("send to channel:%s event:%+v users:%+v", channel, event, msgCtx.Users)
s.Send(msgCtx)
}
}
@@ -821,11 +369,6 @@ func (e *Dispatch) HandleIbex(rule *models.AlertRule, event *models.AlertCurEven
}
json.Unmarshal([]byte(rule.RuleConfig), &ruleConfig)
if event.IsRecovered {
// 恢复事件不需要走故障自愈的逻辑
return
}
for _, t := range ruleConfig.TaskTpls {
if t.TplId == 0 {
continue
@@ -833,12 +376,12 @@ func (e *Dispatch) HandleIbex(rule *models.AlertRule, event *models.AlertCurEven
if len(t.Host) == 0 {
sender.CallIbex(e.ctx, t.TplId, event.TargetIdent,
e.taskTplsCache, e.targetCache, e.userCache, event, "")
e.taskTplsCache, e.targetCache, e.userCache, event)
continue
}
for _, host := range t.Host {
sender.CallIbex(e.ctx, t.TplId, host,
e.taskTplsCache, e.targetCache, e.userCache, event, "")
e.taskTplsCache, e.targetCache, e.userCache, event)
}
}
}
@@ -904,22 +447,3 @@ func mapKeys(m map[int64]struct{}) []int64 {
}
return lst
}
func getSendTarget(customParams map[string]string, sendtos []string) string {
if len(customParams) == 0 {
return strings.Join(sendtos, ",")
}
values := make([]string, 0)
for _, value := range customParams {
runes := []rune(value)
if len(runes) <= 4 {
values = append(values, value)
} else {
maskedValue := string(runes[:len(runes)-4]) + "****"
values = append(values, maskedValue)
}
}
return strings.Join(values, ",")
}

View File

@@ -18,18 +18,16 @@ func LogEvent(event *models.AlertCurEvent, location string, err ...error) {
}
logger.Infof(
"alert_eval_%d event(%s %s) %s: sub_id:%d notify_rule_ids:%v cluster:%s %v%s@%d last_eval_time:%d %s",
event.RuleId,
"event(%s %s) %s: rule_id=%d sub_id:%d cluster:%s %v%s@%d %s",
event.Hash,
status,
location,
event.RuleId,
event.SubRuleId,
event.NotifyRuleIds,
event.Cluster,
event.TagsJSON,
event.TriggerValue,
event.TriggerTime,
event.LastEvalTime,
message,
)
}

View File

@@ -10,10 +10,11 @@ import (
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/alert/process"
"github.com/ccfos/nightingale/v6/datasource/commons/eslike"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/tdengine"
"github.com/toolkits/pkg/logger"
)
@@ -32,7 +33,8 @@ type Scheduler struct {
alertMuteCache *memsto.AlertMuteCacheType
datasourceCache *memsto.DatasourceCacheType
promClients *prom.PromClientMap
promClients *prom.PromClientMap
tdengineClients *tdengine.TdengineClientMap
naming *naming.Naming
@@ -43,7 +45,7 @@ type Scheduler struct {
func NewScheduler(aconf aconf.Alert, externalProcessors *process.ExternalProcessorsType, arc *memsto.AlertRuleCacheType,
targetCache *memsto.TargetCacheType, toarc *memsto.TargetsOfAlertRuleCacheType,
busiGroupCache *memsto.BusiGroupCacheType, alertMuteCache *memsto.AlertMuteCacheType, datasourceCache *memsto.DatasourceCacheType,
promClients *prom.PromClientMap, naming *naming.Naming, ctx *ctx.Context, stats *astats.Stats) *Scheduler {
promClients *prom.PromClientMap, tdengineClients *tdengine.TdengineClientMap, naming *naming.Naming, ctx *ctx.Context, stats *astats.Stats) *Scheduler {
scheduler := &Scheduler{
aconf: aconf,
alertRules: make(map[string]*AlertRuleWorker),
@@ -57,13 +59,13 @@ func NewScheduler(aconf aconf.Alert, externalProcessors *process.ExternalProcess
alertMuteCache: alertMuteCache,
datasourceCache: datasourceCache,
promClients: promClients,
naming: naming,
promClients: promClients,
tdengineClients: tdengineClients,
naming: naming,
ctx: ctx,
stats: stats,
}
eslike.SetEsIndexPatternCacheType(memsto.NewEsIndexPatternCacheType(ctx))
go scheduler.LoopSyncRules(context.Background())
return scheduler
@@ -93,7 +95,7 @@ func (s *Scheduler) syncAlertRules() {
}
ruleType := rule.GetRuleType()
if rule.IsPrometheusRule() || rule.IsInnerRule() {
if rule.IsPrometheusRule() || rule.IsLokiRule() || rule.IsTdengineRule() {
datasourceIds := s.datasourceCache.GetIDsByDsCateAndQueries(rule.Cate, rule.DatasourceQueries)
for _, dsId := range datasourceIds {
if !naming.DatasourceHashRing.IsHit(strconv.FormatInt(dsId, 10), fmt.Sprintf("%d", rule.Id), s.aconf.Heartbeat.Endpoint) {
@@ -101,22 +103,22 @@ func (s *Scheduler) syncAlertRules() {
}
ds := s.datasourceCache.GetById(dsId)
if ds == nil {
logger.Debugf("alert_eval_%d datasource %d not found", rule.Id, dsId)
logger.Debugf("datasource %d not found", dsId)
continue
}
if ds.PluginType != ruleType {
logger.Debugf("alert_eval_%d datasource %d category is %s not %s", rule.Id, dsId, ds.PluginType, ruleType)
logger.Debugf("datasource %d category is %s not %s", dsId, ds.PluginType, ruleType)
continue
}
if ds.Status != "enabled" {
logger.Debugf("alert_eval_%d datasource %d status is %s", rule.Id, dsId, ds.Status)
logger.Debugf("datasource %d status is %s", dsId, ds.Status)
continue
}
processor := process.NewProcessor(s.aconf.Heartbeat.EngineName, rule, dsId, s.alertRuleCache, s.targetCache, s.targetsOfAlertRuleCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.ctx, s.stats)
alertRule := NewAlertRuleWorker(rule, dsId, processor, s.promClients, s.ctx)
alertRule := NewAlertRuleWorker(rule, dsId, processor, s.promClients, s.tdengineClients, s.ctx)
alertRuleWorkers[alertRule.Hash()] = alertRule
}
} else if rule.IsHostRule() {
@@ -125,7 +127,7 @@ func (s *Scheduler) syncAlertRules() {
continue
}
processor := process.NewProcessor(s.aconf.Heartbeat.EngineName, rule, 0, s.alertRuleCache, s.targetCache, s.targetsOfAlertRuleCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.ctx, s.stats)
alertRule := NewAlertRuleWorker(rule, 0, processor, s.promClients, s.ctx)
alertRule := NewAlertRuleWorker(rule, 0, processor, s.promClients, s.tdengineClients, s.ctx)
alertRuleWorkers[alertRule.Hash()] = alertRule
} else {
// 如果 rule 不是通过 prometheus engine 来告警的,则创建为 externalRule
@@ -134,12 +136,12 @@ func (s *Scheduler) syncAlertRules() {
for _, dsId := range dsIds {
ds := s.datasourceCache.GetById(dsId)
if ds == nil {
logger.Debugf("alert_eval_%d datasource %d not found", rule.Id, dsId)
logger.Debugf("datasource %d not found", dsId)
continue
}
if ds.Status != "enabled" {
logger.Debugf("alert_eval_%d datasource %d status is %s", rule.Id, dsId, ds.Status)
logger.Debugf("datasource %d status is %s", dsId, ds.Status)
continue
}
processor := process.NewProcessor(s.aconf.Heartbeat.EngineName, rule, dsId, s.alertRuleCache, s.targetCache, s.targetsOfAlertRuleCache, s.busiGroupCache, s.alertMuteCache, s.datasourceCache, s.ctx, s.stats)
@@ -151,7 +153,6 @@ func (s *Scheduler) syncAlertRules() {
for hash, rule := range alertRuleWorkers {
if _, has := s.alertRules[hash]; !has {
rule.Prepare()
time.Sleep(time.Duration(20) * time.Millisecond)
rule.Start()
s.alertRules[hash] = rule
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,6 @@
package mute
import (
"slices"
"strconv"
"strings"
"time"
@@ -10,68 +9,40 @@ import (
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/pkg/errors"
"github.com/toolkits/pkg/logger"
)
func IsMuted(rule *models.AlertRule, event *models.AlertCurEvent, targetCache *memsto.TargetCacheType, alertMuteCache *memsto.AlertMuteCacheType) (bool, string, int64) {
func IsMuted(rule *models.AlertRule, event *models.AlertCurEvent, targetCache *memsto.TargetCacheType, alertMuteCache *memsto.AlertMuteCacheType) (bool, string) {
if rule.Disabled == 1 {
return true, "rule disabled", 0
return true, "rule disabled"
}
if TimeSpanMuteStrategy(rule, event) {
return true, "rule is not effective for period of time", 0
return true, "rule is not effective for period of time"
}
if IdentNotExistsMuteStrategy(rule, event, targetCache) {
return true, "ident not exists mute", 0
return true, "ident not exists mute"
}
if BgNotMatchMuteStrategy(rule, event, targetCache) {
return true, "bg not match mute", 0
return true, "bg not match mute"
}
hit, muteId := EventMuteStrategy(event, alertMuteCache)
if hit {
return true, "match mute rule", muteId
if EventMuteStrategy(event, alertMuteCache) {
return true, "match mute rule"
}
return false, "", 0
return false, ""
}
// TimeSpanMuteStrategy 根据规则配置的告警生效时间段过滤,如果产生的告警不在规则配置的告警生效时间段内,则不告警,即被mute
// 时间范围左闭右开默认范围00:00-24:00
// 如果规则配置了时区,则在该时区下进行时间判断;如果时区为空,则使用系统时区
func TimeSpanMuteStrategy(rule *models.AlertRule, event *models.AlertCurEvent) bool {
// 确定使用的时区
var targetLoc *time.Location
var err error
timezone := rule.TimeZone
if timezone == "" {
// 如果时区为空,使用系统时区(保持原有逻辑)
targetLoc = time.Local
} else {
// 加载规则配置的时区
targetLoc, err = time.LoadLocation(timezone)
if err != nil {
// 如果时区加载失败,记录错误并使用系统时区
logger.Warningf("Failed to load timezone %s for rule %d, using system timezone: %v", timezone, rule.Id, err)
targetLoc = time.Local
}
}
// 将触发时间转换到目标时区
tm := time.Unix(event.TriggerTime, 0).In(targetLoc)
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := strconv.Itoa(int(tm.Weekday()))
if rule.EnableDaysOfWeek == "" {
// 如果规则没有配置生效时间,则默认全天生效
return false
}
enableStime := strings.Fields(rule.EnableStime)
enableEtime := strings.Fields(rule.EnableEtime)
enableDaysOfWeek := strings.Split(rule.EnableDaysOfWeek, ";")
@@ -122,7 +93,7 @@ func IdentNotExistsMuteStrategy(rule *models.AlertRule, event *models.AlertCurEv
// 如果是target_up的告警,且ident已经不存在了,直接过滤掉
// 这里的判断有点太粗暴了,但是目前没有更好的办法
if !exists && strings.Contains(rule.PromQl, "target_up") {
logger.Debugf("alert_eval_%d [IdentNotExistsMuteStrategy] mute: cluster:%s ident:%s", rule.Id, event.Cluster, ident)
logger.Debugf("[%s] mute: rule_eval:%d cluster:%s ident:%s", "IdentNotExistsMuteStrategy", rule.Id, event.Cluster, ident)
return true
}
return false
@@ -144,57 +115,82 @@ func BgNotMatchMuteStrategy(rule *models.AlertRule, event *models.AlertCurEvent,
// 对于包含ident的告警事件check一下ident所属bg和rule所属bg是否相同
// 如果告警规则选择了只在本BG生效那其他BG的机器就不能因此规则产生告警
if exists && !target.MatchGroupId(rule.GroupId) {
logger.Debugf("alert_eval_%d [BgNotMatchMuteStrategy] mute: cluster:%s", rule.Id, event.Cluster)
logger.Debugf("[%s] mute: rule_eval:%d cluster:%s", "BgNotMatchMuteStrategy", rule.Id, event.Cluster)
return true
}
return false
}
func EventMuteStrategy(event *models.AlertCurEvent, alertMuteCache *memsto.AlertMuteCacheType) (bool, int64) {
func EventMuteStrategy(event *models.AlertCurEvent, alertMuteCache *memsto.AlertMuteCacheType) bool {
mutes, has := alertMuteCache.Gets(event.GroupId)
if !has || len(mutes) == 0 {
return false, 0
return false
}
for i := 0; i < len(mutes); i++ {
matched, _ := MatchMute(event, mutes[i])
if matched {
return true, mutes[i].Id
if matchMute(event, mutes[i]) {
return true
}
}
return false, 0
return false
}
// MatchMute 如果传入了clock这个可选参数就表示使用这个clock表示的时间否则就从event的字段中取TriggerTime
func MatchMute(event *models.AlertCurEvent, mute *models.AlertMute, clock ...int64) (bool, error) {
// matchMute 如果传入了clock这个可选参数就表示使用这个clock表示的时间否则就从event的字段中取TriggerTime
func matchMute(event *models.AlertCurEvent, mute *models.AlertMute, clock ...int64) bool {
if mute.Disabled == 1 {
return false, errors.New("mute is disabled")
return false
}
ts := event.TriggerTime
if len(clock) > 0 {
ts = clock[0]
}
// 如果不是全局的,判断 匹配的 datasource id
if len(mute.DatasourceIdsJson) != 0 && mute.DatasourceIdsJson[0] != 0 && event.DatasourceId != 0 {
if !slices.Contains(mute.DatasourceIdsJson, event.DatasourceId) {
return false, errors.New("datasource id not match")
idm := make(map[int64]struct{}, len(mute.DatasourceIdsJson))
for i := 0; i < len(mute.DatasourceIdsJson); i++ {
idm[mute.DatasourceIdsJson[i]] = struct{}{}
}
// 判断 event.datasourceId 是否包含在 idm 中
if _, has := idm[event.DatasourceId]; !has {
return false
}
}
var matchTime bool
if mute.MuteTimeType == models.TimeRange {
if !mute.IsWithinTimeRange(event.TriggerTime) {
return false, errors.New("event trigger time not within mute time range")
if ts < mute.Btime || ts > mute.Etime {
return false
}
matchTime = true
} else if mute.MuteTimeType == models.Periodic {
ts := event.TriggerTime
if len(clock) > 0 {
ts = clock[0]
}
tm := time.Unix(event.TriggerTime, 0)
triggerTime := tm.Format("15:04")
triggerWeek := strconv.Itoa(int(tm.Weekday()))
if !mute.IsWithinPeriodicMute(ts) {
return false, errors.New("event trigger time not within periodic mute range")
for i := 0; i < len(mute.PeriodicMutesJson); i++ {
if strings.Contains(mute.PeriodicMutesJson[i].EnableDaysOfWeek, triggerWeek) {
if mute.PeriodicMutesJson[i].EnableStime == mute.PeriodicMutesJson[i].EnableEtime || (mute.PeriodicMutesJson[i].EnableStime == "00:00" && mute.PeriodicMutesJson[i].EnableEtime == "23:59") {
matchTime = true
break
} else if mute.PeriodicMutesJson[i].EnableStime < mute.PeriodicMutesJson[i].EnableEtime {
if triggerTime >= mute.PeriodicMutesJson[i].EnableStime && triggerTime < mute.PeriodicMutesJson[i].EnableEtime {
matchTime = true
break
}
} else {
if triggerTime >= mute.PeriodicMutesJson[i].EnableStime || triggerTime < mute.PeriodicMutesJson[i].EnableEtime {
matchTime = true
break
}
}
}
}
} else {
logger.Warningf("mute time type invalid, %d", mute.MuteTimeType)
return false, errors.New("mute time type invalid")
}
if !matchTime {
return false
}
var matchSeverity bool
@@ -210,14 +206,12 @@ func MatchMute(event *models.AlertCurEvent, mute *models.AlertMute, clock ...int
}
if !matchSeverity {
return false, errors.New("event severity not match mute severity")
return false
}
if len(mute.ITags) == 0 {
return true, nil
if mute.ITags == nil || len(mute.ITags) == 0 {
return true
}
if !common.MatchTags(event.TagsMap, mute.ITags) {
return false, errors.New("event tags not match mute tags")
}
return true, nil
return common.MatchTags(event.TagsMap, mute.ITags)
}

View File

@@ -115,7 +115,7 @@ func (n *Naming) heartbeat() error {
newDatasource[datasourceIds[i]] = struct{}{}
servers, err := n.ActiveServers(datasourceIds[i])
if err != nil {
logger.Warningf("heartbeat %d get active server err:%v", datasourceIds[i], err)
logger.Warningf("hearbeat %d get active server err:%v", datasourceIds[i], err)
n.astats.CounterHeartbeatErrorTotal.WithLabelValues().Inc()
continue
}
@@ -148,7 +148,7 @@ func (n *Naming) heartbeat() error {
servers, err := n.ActiveServersByEngineName()
if err != nil {
logger.Warningf("heartbeat %d get active server err:%v", HostDatasource, err)
logger.Warningf("hearbeat %d get active server err:%v", HostDatasource, err)
n.astats.CounterHeartbeatErrorTotal.WithLabelValues().Inc()
return nil
}

View File

@@ -1,380 +0,0 @@
package engine
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/google/uuid"
"github.com/toolkits/pkg/logger"
)
type WorkflowEngine struct {
ctx *ctx.Context
}
func NewWorkflowEngine(c *ctx.Context) *WorkflowEngine {
return &WorkflowEngine{ctx: c}
}
func (e *WorkflowEngine) Execute(pipeline *models.EventPipeline, event *models.AlertCurEvent, triggerCtx *models.WorkflowTriggerContext) (*models.AlertCurEvent, *models.WorkflowResult, error) {
startTime := time.Now()
wfCtx := e.initWorkflowContext(pipeline, event, triggerCtx)
nodes := pipeline.GetWorkflowNodes()
connections := pipeline.GetWorkflowConnections()
if len(nodes) == 0 {
return event, &models.WorkflowResult{
Event: event,
Status: models.ExecutionStatusSuccess,
Message: "no nodes to execute",
}, nil
}
nodeMap := make(map[string]*models.WorkflowNode)
for i := range nodes {
if nodes[i].RetryInterval == 0 {
nodes[i].RetryInterval = 1
}
if nodes[i].MaxRetries == 0 {
nodes[i].MaxRetries = 1
}
nodeMap[nodes[i].ID] = &nodes[i]
}
result := e.executeDAG(nodeMap, connections, wfCtx)
result.Event = wfCtx.Event
duration := time.Since(startTime).Milliseconds()
if triggerCtx != nil && triggerCtx.Mode != "" {
e.saveExecutionRecord(pipeline, wfCtx, result, triggerCtx, startTime.Unix(), duration)
}
return wfCtx.Event, result, nil
}
func (e *WorkflowEngine) initWorkflowContext(pipeline *models.EventPipeline, event *models.AlertCurEvent, triggerCtx *models.WorkflowTriggerContext) *models.WorkflowContext {
// 合并输入参数
inputs := pipeline.GetInputsMap()
if triggerCtx != nil && triggerCtx.InputsOverrides != nil {
for k, v := range triggerCtx.InputsOverrides {
inputs[k] = v
}
}
metadata := map[string]string{
"start_time": fmt.Sprintf("%d", time.Now().Unix()),
"pipeline_id": fmt.Sprintf("%d", pipeline.ID),
}
// 是否启用流式输出
stream := false
if triggerCtx != nil {
metadata["request_id"] = triggerCtx.RequestID
metadata["trigger_mode"] = triggerCtx.Mode
metadata["trigger_by"] = triggerCtx.TriggerBy
stream = triggerCtx.Stream
}
return &models.WorkflowContext{
Event: event,
Inputs: inputs,
Vars: make(map[string]interface{}), // 初始化空的 Vars供节点间传递数据
Metadata: metadata,
Stream: stream,
}
}
// executeDAG 使用 Kahn 算法执行 DAG
func (e *WorkflowEngine) executeDAG(nodeMap map[string]*models.WorkflowNode, connections models.Connections, wfCtx *models.WorkflowContext) *models.WorkflowResult {
result := &models.WorkflowResult{
Status: models.ExecutionStatusSuccess,
NodeResults: make([]*models.NodeExecutionResult, 0),
Stream: wfCtx.Stream, // 从上下文继承流式输出设置
}
// 计算每个节点的入度
inDegree := make(map[string]int)
for nodeID := range nodeMap {
inDegree[nodeID] = 0
}
// 遍历连接,计算入度
for _, nodeConns := range connections {
for _, targets := range nodeConns.Main {
for _, target := range targets {
inDegree[target.Node]++
}
}
}
// 找到所有入度为 0 的节点(起始节点)
queue := make([]string, 0)
for nodeID, degree := range inDegree {
if degree == 0 {
queue = append(queue, nodeID)
}
}
// 如果没有起始节点,说明存在循环依赖
if len(queue) == 0 && len(nodeMap) > 0 {
result.Status = models.ExecutionStatusFailed
result.Message = "workflow has circular dependency"
return result
}
// 记录已执行的节点
executed := make(map[string]bool)
// 记录节点的分支选择结果
branchResults := make(map[string]*int)
for len(queue) > 0 {
// 取出队首节点
nodeID := queue[0]
queue = queue[1:]
// 检查是否已执行
if executed[nodeID] {
continue
}
node, exists := nodeMap[nodeID]
if !exists {
continue
}
// 执行节点
nodeResult, nodeOutput := e.executeNode(node, wfCtx)
result.NodeResults = append(result.NodeResults, nodeResult)
if nodeOutput != nil && nodeOutput.Stream && nodeOutput.StreamChan != nil {
// 流式输出节点通常是最后一个节点
// 直接传递 StreamChan 给 WorkflowResult不阻塞等待
result.Stream = true
result.StreamChan = nodeOutput.StreamChan
result.Event = wfCtx.Event
result.Status = "streaming"
result.Message = fmt.Sprintf("streaming output from node: %s", node.Name)
// 更新节点状态为 streaming
nodeResult.Status = "streaming"
nodeResult.Message = "streaming in progress"
// 立即返回,让 API 层处理流式响应
return result
}
executed[nodeID] = true
// 保存分支结果
if nodeResult.BranchIndex != nil {
branchResults[nodeID] = nodeResult.BranchIndex
}
// 检查执行状态
if nodeResult.Status == "failed" {
if !node.ContinueOnFail {
result.Status = models.ExecutionStatusFailed
result.ErrorNode = nodeID
result.Message = fmt.Sprintf("node %s failed: %s", node.Name, nodeResult.Error)
}
}
// 检查是否终止
if nodeResult.Status == "terminated" {
result.Message = fmt.Sprintf("workflow terminated at node %s", node.Name)
return result
}
// 更新后继节点的入度
if nodeConns, ok := connections[nodeID]; ok {
for outputIndex, targets := range nodeConns.Main {
// 检查是否应该走这个分支
if !e.shouldFollowBranch(nodeID, outputIndex, branchResults) {
continue
}
for _, target := range targets {
inDegree[target.Node]--
if inDegree[target.Node] == 0 {
queue = append(queue, target.Node)
}
}
}
}
}
return result
}
// executeNode 执行单个节点
// 返回:节点执行结果、节点输出(用于流式输出检测)
func (e *WorkflowEngine) executeNode(node *models.WorkflowNode, wfCtx *models.WorkflowContext) (*models.NodeExecutionResult, *models.NodeOutput) {
startTime := time.Now()
nodeResult := &models.NodeExecutionResult{
NodeID: node.ID,
NodeName: node.Name,
NodeType: node.Type,
StartedAt: startTime.Unix(),
}
var nodeOutput *models.NodeOutput
// 跳过禁用的节点
if node.Disabled {
nodeResult.Status = "skipped"
nodeResult.Message = "node is disabled"
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
return nodeResult, nil
}
// 获取处理器
processor, err := models.GetProcessorByType(node.Type, node.Config)
if err != nil {
nodeResult.Status = "failed"
nodeResult.Error = fmt.Sprintf("failed to get processor: %v", err)
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
return nodeResult, nil
}
// 执行处理器(带重试)
var retries int
maxRetries := node.MaxRetries
if !node.RetryOnFail {
maxRetries = 0
}
for retries <= maxRetries {
// 检查是否为分支处理器
if branchProcessor, ok := processor.(models.BranchProcessor); ok {
output, err := branchProcessor.ProcessWithBranch(e.ctx, wfCtx)
if err != nil {
if retries < maxRetries {
retries++
time.Sleep(time.Duration(node.RetryInterval) * time.Second)
continue
}
nodeResult.Status = "failed"
nodeResult.Error = err.Error()
} else {
nodeResult.Status = "success"
if output != nil {
nodeOutput = output
if output.WfCtx != nil {
wfCtx = output.WfCtx
}
nodeResult.Message = output.Message
nodeResult.BranchIndex = output.BranchIndex
if output.Terminate {
nodeResult.Status = "terminated"
}
}
}
break
}
// 普通处理器
newWfCtx, msg, err := processor.Process(e.ctx, wfCtx)
if err != nil {
if retries < maxRetries {
retries++
time.Sleep(time.Duration(node.RetryInterval) * time.Second)
continue
}
nodeResult.Status = "failed"
nodeResult.Error = err.Error()
} else {
nodeResult.Status = "success"
nodeResult.Message = msg
if newWfCtx != nil {
wfCtx = newWfCtx
// 检测流式输出标记
if newWfCtx.Stream && newWfCtx.StreamChan != nil {
nodeOutput = &models.NodeOutput{
WfCtx: newWfCtx,
Message: msg,
Stream: true,
StreamChan: newWfCtx.StreamChan,
}
}
}
// 如果事件被 drop返回 nil 或 Event 为 nil标记为终止
if newWfCtx == nil || newWfCtx.Event == nil {
nodeResult.Status = "terminated"
nodeResult.Message = msg
}
}
break
}
nodeResult.FinishedAt = time.Now().Unix()
nodeResult.DurationMs = time.Since(startTime).Milliseconds()
logger.Infof("workflow: executed node %s (type=%s) status=%s msg=%s duration=%dms",
node.Name, node.Type, nodeResult.Status, nodeResult.Message, nodeResult.DurationMs)
return nodeResult, nodeOutput
}
// shouldFollowBranch 判断是否应该走某个分支
func (e *WorkflowEngine) shouldFollowBranch(nodeID string, outputIndex int, branchResults map[string]*int) bool {
branchIndex, hasBranch := branchResults[nodeID]
if !hasBranch {
// 没有分支结果,说明不是分支节点,只走第一个输出
return outputIndex == 0
}
if branchIndex == nil {
// branchIndex 为 nil走默认分支通常是最后一个
return true
}
// 只走选中的分支
return outputIndex == *branchIndex
}
func (e *WorkflowEngine) saveExecutionRecord(pipeline *models.EventPipeline, wfCtx *models.WorkflowContext, result *models.WorkflowResult, triggerCtx *models.WorkflowTriggerContext, startTime int64, duration int64) {
executionID := triggerCtx.RequestID
if executionID == "" {
executionID = uuid.New().String()
}
execution := &models.EventPipelineExecution{
ID: executionID,
PipelineID: pipeline.ID,
PipelineName: pipeline.Name,
Mode: triggerCtx.Mode,
Status: result.Status,
ErrorMessage: result.Message,
ErrorNode: result.ErrorNode,
CreatedAt: startTime,
FinishedAt: time.Now().Unix(),
DurationMs: duration,
TriggerBy: triggerCtx.TriggerBy,
}
if wfCtx.Event != nil {
execution.EventID = wfCtx.Event.Id
}
if err := execution.SetNodeResults(result.NodeResults); err != nil {
logger.Errorf("workflow: failed to set node results: pipeline_id=%d, error=%v", pipeline.ID, err)
}
if err := execution.SetInputsSnapshot(wfCtx.Inputs); err != nil {
logger.Errorf("workflow: failed to set inputs snapshot: pipeline_id=%d, error=%v", pipeline.ID, err)
}
if err := models.CreateEventPipelineExecution(e.ctx, execution); err != nil {
logger.Errorf("workflow: failed to save execution record: pipeline_id=%d, error=%v", pipeline.ID, err)
}
}

View File

@@ -1,13 +0,0 @@
package pipeline
import (
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/aisummary"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/callback"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/eventdrop"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/eventupdate"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/logic"
_ "github.com/ccfos/nightingale/v6/alert/pipeline/processor/relabel"
)
func Init() {
}

View File

@@ -1,246 +0,0 @@
package aisummary
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"text/template"
"time"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/callback"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
const (
HTTP_STATUS_SUCCESS_MAX = 299
)
// AISummaryConfig 配置结构体
type AISummaryConfig struct {
callback.HTTPConfig
ModelName string `json:"model_name"`
APIKey string `json:"api_key"`
PromptTemplate string `json:"prompt_template"`
CustomParams map[string]interface{} `json:"custom_params"`
}
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
type ChatCompletionResponse struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
}
func init() {
models.RegisterProcessor("ai_summary", &AISummaryConfig{})
}
func (c *AISummaryConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*AISummaryConfig](settings)
return result, err
}
func (c *AISummaryConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
if c.Client == nil {
if err := c.initHTTPClient(); err != nil {
return wfCtx, "", fmt.Errorf("failed to initialize HTTP client: %v processor: %v", err, c)
}
}
// 准备告警事件信息
eventInfo, err := c.prepareEventInfo(wfCtx)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to prepare event info: %v processor: %v", err, c)
}
// 调用AI模型生成总结
summary, err := c.generateAISummary(eventInfo)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to generate AI summary: %v processor: %v", err, c)
}
// 将总结添加到annotations字段
if event.AnnotationsJSON == nil {
event.AnnotationsJSON = make(map[string]string)
}
event.AnnotationsJSON["ai_summary"] = summary
// 更新Annotations字段
b, err := json.Marshal(event.AnnotationsJSON)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal annotations: %v processor: %v", err, c)
}
event.Annotations = string(b)
return wfCtx, "", nil
}
func (c *AISummaryConfig) initHTTPClient() error {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLVerify},
}
if c.Proxy != "" {
proxyURL, err := url.Parse(c.Proxy)
if err != nil {
return fmt.Errorf("failed to parse proxy url: %v", err)
}
transport.Proxy = http.ProxyURL(proxyURL)
}
c.Client = &http.Client{
Timeout: time.Duration(c.Timeout) * time.Millisecond,
Transport: transport,
}
return nil
}
func (c *AISummaryConfig) prepareEventInfo(wfCtx *models.WorkflowContext) (string, error) {
var defs = []string{
"{{$event := .Event}}",
"{{$inputs := .Inputs}}",
}
text := strings.Join(append(defs, c.PromptTemplate), "")
t, err := template.New("prompt").Funcs(template.FuncMap(tplx.TemplateFuncMap)).Parse(text)
if err != nil {
return "", fmt.Errorf("failed to parse prompt template: %v", err)
}
var body bytes.Buffer
err = t.Execute(&body, wfCtx)
if err != nil {
return "", fmt.Errorf("failed to execute prompt template: %v", err)
}
return body.String(), nil
}
func (c *AISummaryConfig) generateAISummary(eventInfo string) (string, error) {
// 构建基础请求参数
reqParams := map[string]interface{}{
"model": c.ModelName,
"messages": []Message{
{
Role: "user",
Content: eventInfo,
},
},
}
// 合并自定义参数
for k, v := range c.CustomParams {
converted, err := convertCustomParam(v)
if err != nil {
return "", fmt.Errorf("failed to convert custom param %s: %v", k, err)
}
reqParams[k] = converted
}
// 序列化请求体
jsonData, err := json.Marshal(reqParams)
if err != nil {
return "", fmt.Errorf("failed to marshal request body: %v", err)
}
// 创建HTTP请求
req, err := http.NewRequest("POST", c.URL, bytes.NewBuffer(jsonData))
if err != nil {
return "", fmt.Errorf("failed to create request: %v", err)
}
// 设置请求头
req.Header.Set("Authorization", "Bearer "+c.APIKey)
req.Header.Set("Content-Type", "application/json")
for k, v := range c.Headers {
req.Header.Set(k, v)
}
// 发送请求
resp, err := c.Client.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send request: %v", err)
}
defer resp.Body.Close()
// 检查响应状态码
if resp.StatusCode > HTTP_STATUS_SUCCESS_MAX {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body))
}
// 读取响应
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %v", err)
}
// 解析响应
var chatResp ChatCompletionResponse
if err := json.Unmarshal(body, &chatResp); err != nil {
return "", fmt.Errorf("failed to unmarshal response: %v", err)
}
if len(chatResp.Choices) == 0 {
return "", fmt.Errorf("no response from AI model")
}
return chatResp.Choices[0].Message.Content, nil
}
// convertCustomParam 将前端传入的参数转换为正确的类型
func convertCustomParam(value interface{}) (interface{}, error) {
if value == nil {
return nil, nil
}
// 如果是字符串,尝试转换为其他类型
if str, ok := value.(string); ok {
// 尝试转换为数字
if f, err := strconv.ParseFloat(str, 64); err == nil {
// 检查是否为整数
if f == float64(int64(f)) {
return int64(f), nil
}
return f, nil
}
// 尝试转换为布尔值
if b, err := strconv.ParseBool(str); err == nil {
return b, nil
}
// 尝试解析为JSON数组
if strings.HasPrefix(strings.TrimSpace(str), "[") {
var arr []interface{}
if err := json.Unmarshal([]byte(str), &arr); err == nil {
return arr, nil
}
}
// 尝试解析为JSON对象
if strings.HasPrefix(strings.TrimSpace(str), "{") {
var obj map[string]interface{}
if err := json.Unmarshal([]byte(str), &obj); err == nil {
return obj, nil
}
}
}
return value, nil
}

View File

@@ -1,145 +0,0 @@
package aisummary
import (
"testing"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/callback"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/stretchr/testify/assert"
)
func TestAISummaryConfig_Process(t *testing.T) {
// 创建测试配置
config := &AISummaryConfig{
HTTPConfig: callback.HTTPConfig{
URL: "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions",
Timeout: 30000,
SkipSSLVerify: true,
Headers: map[string]string{
"Content-Type": "application/json",
},
},
ModelName: "gemini-2.0-flash",
APIKey: "*",
PromptTemplate: "告警规则:{{$event.RuleName}}\n严重程度{{$event.Severity}}",
CustomParams: map[string]interface{}{
"temperature": 0.7,
"max_tokens": 2000,
"top_p": 0.9,
},
}
// 创建测试事件
event := &models.AlertCurEvent{
RuleName: "Test Rule",
Severity: 1,
TagsMap: map[string]string{
"host": "test-host",
},
AnnotationsJSON: map[string]string{
"description": "Test alert",
},
}
// 创建 WorkflowContext
wfCtx := &models.WorkflowContext{
Event: event,
Inputs: map[string]string{},
}
// 测试模板处理
eventInfo, err := config.prepareEventInfo(wfCtx)
assert.NoError(t, err)
assert.Contains(t, eventInfo, "Test Rule")
assert.Contains(t, eventInfo, "1")
// 测试配置初始化
processor, err := config.Init(config)
assert.NoError(t, err)
assert.NotNil(t, processor)
// 测试处理函数
result, _, err := processor.Process(&ctx.Context{}, wfCtx)
assert.NoError(t, err)
assert.NotNil(t, result)
assert.NotEmpty(t, result.Event.AnnotationsJSON["ai_summary"])
// 展示处理结果
t.Log("\n=== 处理结果 ===")
t.Logf("告警规则: %s", result.Event.RuleName)
t.Logf("严重程度: %d", result.Event.Severity)
t.Logf("标签: %v", result.Event.TagsMap)
t.Logf("原始注释: %v", result.Event.AnnotationsJSON["description"])
t.Logf("AI总结: %s", result.Event.AnnotationsJSON["ai_summary"])
}
func TestConvertCustomParam(t *testing.T) {
tests := []struct {
name string
input interface{}
expected interface{}
hasError bool
}{
{
name: "nil value",
input: nil,
expected: nil,
hasError: false,
},
{
name: "string number to int64",
input: "123",
expected: int64(123),
hasError: false,
},
{
name: "string float to float64",
input: "123.45",
expected: 123.45,
hasError: false,
},
{
name: "string boolean to bool",
input: "true",
expected: true,
hasError: false,
},
{
name: "string false to bool",
input: "false",
expected: false,
hasError: false,
},
{
name: "JSON array string to slice",
input: `["a", "b", "c"]`,
expected: []interface{}{"a", "b", "c"},
hasError: false,
},
{
name: "JSON object string to map",
input: `{"key": "value", "num": 123}`,
expected: map[string]interface{}{"key": "value", "num": float64(123)},
hasError: false,
},
{
name: "plain string remains string",
input: "hello world",
expected: "hello world",
hasError: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
converted, err := convertCustomParam(test.input)
if test.hasError {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.Equal(t, test.expected, converted)
})
}
}

View File

@@ -1,110 +0,0 @@
package callback
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/utils"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/logger"
)
type HTTPConfig struct {
URL string `json:"url"`
Method string `json:"method,omitempty"`
Body string `json:"body,omitempty"`
Headers map[string]string `json:"header"`
AuthUsername string `json:"auth_username"`
AuthPassword string `json:"auth_password"`
Timeout int `json:"timeout"` // 单位:ms
SkipSSLVerify bool `json:"skip_ssl_verify"`
Proxy string `json:"proxy"`
Client *http.Client `json:"-"`
}
// RelabelConfig
type CallbackConfig struct {
HTTPConfig
}
func init() {
models.RegisterProcessor("callback", &CallbackConfig{})
}
func (c *CallbackConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*CallbackConfig](settings)
return result, err
}
func (c *CallbackConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
if c.Client == nil {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLVerify},
}
if c.Proxy != "" {
proxyURL, err := url.Parse(c.Proxy)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
c.Client = &http.Client{
Timeout: time.Duration(c.Timeout) * time.Millisecond,
Transport: transport,
}
}
headers := make(map[string]string)
headers["Content-Type"] = "application/json"
for k, v := range c.Headers {
headers[k] = v
}
url, err := utils.TplRender(wfCtx, c.URL)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to render url template: %v processor: %v", err, c)
}
body, err := json.Marshal(event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
}
req, err := http.NewRequest("POST", url, strings.NewReader(string(body)))
if err != nil {
return wfCtx, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
}
for k, v := range headers {
req.Header.Set(k, v)
}
if c.AuthUsername != "" && c.AuthPassword != "" {
req.SetBasicAuth(c.AuthUsername, c.AuthPassword)
}
resp, err := c.Client.Do(req)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to read response body: %v processor: %v", err, c)
}
logger.Debugf("callback processor response body: %s", string(b))
return wfCtx, "callback success", nil
}

View File

@@ -1,24 +0,0 @@
package common
import (
"encoding/json"
)
// InitProcessor 是一个通用的初始化处理器的方法
// 使用泛型简化处理器初始化逻辑
// T 必须是 models.Processor 接口的实现
func InitProcessor[T any](settings interface{}) (T, error) {
var zero T
b, err := json.Marshal(settings)
if err != nil {
return zero, err
}
var result T
err = json.Unmarshal(b, &result)
if err != nil {
return zero, err
}
return result, nil
}

View File

@@ -1,63 +0,0 @@
package eventdrop
import (
"bytes"
"fmt"
"strings"
texttemplate "text/template"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/toolkits/pkg/logger"
)
type EventDropConfig struct {
Content string `json:"content"`
}
func init() {
models.RegisterProcessor("event_drop", &EventDropConfig{})
}
func (c *EventDropConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*EventDropConfig](settings)
return result, err
}
func (c *EventDropConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
// 使用背景是可以根据此处理器,实现对事件进行更加灵活的过滤的逻辑
// 在标签过滤和属性过滤都不满足需求时可以使用
// 如果模板执行结果为 true则删除该事件
event := wfCtx.Event
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $inputs := .Inputs }}",
}
text := strings.Join(append(defs, c.Content), "")
tpl, err := texttemplate.New("eventdrop").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return wfCtx, "", fmt.Errorf("processor failed to parse template: %v processor: %v", err, c)
}
var body bytes.Buffer
if err = tpl.Execute(&body, wfCtx); err != nil {
return wfCtx, "", fmt.Errorf("processor failed to execute template: %v processor: %v", err, c)
}
result := strings.TrimSpace(body.String())
logger.Infof("processor eventdrop result: %v", result)
if result == "true" {
wfCtx.Event = nil
logger.Infof("processor eventdrop drop event: %s", event.Hash)
return wfCtx, "drop event success", nil
}
return wfCtx, "drop event failed", nil
}

View File

@@ -1,97 +0,0 @@
package eventupdate
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/callback"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/logger"
)
// RelabelConfig
type EventUpdateConfig struct {
callback.HTTPConfig
}
func init() {
models.RegisterProcessor("event_update", &EventUpdateConfig{})
}
func (c *EventUpdateConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*EventUpdateConfig](settings)
return result, err
}
func (c *EventUpdateConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
event := wfCtx.Event
if c.Client == nil {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLVerify},
}
if c.Proxy != "" {
proxyURL, err := url.Parse(c.Proxy)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to parse proxy url: %v processor: %v", err, c)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
c.Client = &http.Client{
Timeout: time.Duration(c.Timeout) * time.Millisecond,
Transport: transport,
}
}
headers := make(map[string]string)
headers["Content-Type"] = "application/json"
for k, v := range c.Headers {
headers[k] = v
}
body, err := json.Marshal(event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to marshal event: %v processor: %v", err, c)
}
req, err := http.NewRequest("POST", c.URL, strings.NewReader(string(body)))
if err != nil {
return wfCtx, "", fmt.Errorf("failed to create request: %v processor: %v", err, c)
}
for k, v := range headers {
req.Header.Set(k, v)
}
if c.AuthUsername != "" && c.AuthPassword != "" {
req.SetBasicAuth(c.AuthUsername, c.AuthPassword)
}
resp, err := c.Client.Do(req)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to send request: %v processor: %v", err, c)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, "", fmt.Errorf("failed to read response body: %v processor: %v", err, c)
}
logger.Debugf("event update processor response body: %s", string(b))
err = json.Unmarshal(b, &event)
if err != nil {
return wfCtx, "", fmt.Errorf("failed to unmarshal response body: %v processor: %v", err, c)
}
return wfCtx, "", nil
}

View File

@@ -1,197 +0,0 @@
package logic
import (
"bytes"
"fmt"
"strings"
"text/template"
alertCommon "github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
// 判断模式常量
const (
ConditionModeExpression = "expression" // 表达式模式(默认)
ConditionModeTags = "tags" // 标签/属性模式
)
// IfConfig If 条件处理器配置
type IfConfig struct {
// 判断模式expression表达式或 tags标签/属性)
Mode string `json:"mode,omitempty"`
// 表达式模式配置
// 条件表达式(支持 Go 模板语法)
// 例如:{{ if eq .Severity 1 }}true{{ end }}
Condition string `json:"condition,omitempty"`
// 标签/属性模式配置
LabelKeys []models.TagFilter `json:"label_keys,omitempty"` // 适用标签
Attributes []models.TagFilter `json:"attributes,omitempty"` // 适用属性
// 内部使用,解析后的过滤器
parsedLabelKeys []models.TagFilter `json:"-"`
parsedAttributes []models.TagFilter `json:"-"`
}
func init() {
models.RegisterProcessor("logic.if", &IfConfig{})
}
func (c *IfConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*IfConfig](settings)
if err != nil {
return nil, err
}
// 解析标签过滤器
if len(result.LabelKeys) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(result.LabelKeys))
copy(labelKeysCopy, result.LabelKeys)
for i := range labelKeysCopy {
if labelKeysCopy[i].Func == "" {
labelKeysCopy[i].Func = labelKeysCopy[i].Op
}
}
result.parsedLabelKeys, err = models.ParseTagFilter(labelKeysCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse label_keys: %v", err)
}
}
// 解析属性过滤器
if len(result.Attributes) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(result.Attributes))
copy(attributesCopy, result.Attributes)
for i := range attributesCopy {
if attributesCopy[i].Func == "" {
attributesCopy[i].Func = attributesCopy[i].Op
}
}
result.parsedAttributes, err = models.ParseTagFilter(attributesCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse attributes: %v", err)
}
}
return result, nil
}
// Process 实现 Processor 接口(兼容旧模式)
func (c *IfConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
result, err := c.evaluateCondition(wfCtx)
if err != nil {
return wfCtx, "", fmt.Errorf("if processor: failed to evaluate condition: %v", err)
}
if result {
return wfCtx, "condition matched (true branch)", nil
}
return wfCtx, "condition not matched (false branch)", nil
}
// ProcessWithBranch 实现 BranchProcessor 接口
func (c *IfConfig) ProcessWithBranch(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.NodeOutput, error) {
result, err := c.evaluateCondition(wfCtx)
if err != nil {
return nil, fmt.Errorf("if processor: failed to evaluate condition: %v", err)
}
output := &models.NodeOutput{
WfCtx: wfCtx,
}
if result {
// 条件为 true走输出 0true 分支)
branchIndex := 0
output.BranchIndex = &branchIndex
output.Message = "condition matched (true branch)"
} else {
// 条件为 false走输出 1false 分支)
branchIndex := 1
output.BranchIndex = &branchIndex
output.Message = "condition not matched (false branch)"
}
return output, nil
}
// evaluateCondition 评估条件
func (c *IfConfig) evaluateCondition(wfCtx *models.WorkflowContext) (bool, error) {
mode := c.Mode
if mode == "" {
mode = ConditionModeExpression // 默认表达式模式
}
switch mode {
case ConditionModeTags:
return c.evaluateTagsCondition(wfCtx.Event)
default:
return c.evaluateExpressionCondition(wfCtx)
}
}
// evaluateExpressionCondition 评估表达式条件
func (c *IfConfig) evaluateExpressionCondition(wfCtx *models.WorkflowContext) (bool, error) {
if c.Condition == "" {
return true, nil
}
// 构建模板数据
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $inputs := .Inputs }}",
}
text := strings.Join(append(defs, c.Condition), "")
tpl, err := template.New("if_condition").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return false, err
}
var buf bytes.Buffer
if err = tpl.Execute(&buf, wfCtx); err != nil {
return false, err
}
result := strings.TrimSpace(strings.ToLower(buf.String()))
return result == "true" || result == "1", nil
}
// evaluateTagsCondition 评估标签/属性条件
func (c *IfConfig) evaluateTagsCondition(event *models.AlertCurEvent) (bool, error) {
// 如果没有配置任何过滤条件,默认返回 true
if len(c.parsedLabelKeys) == 0 && len(c.parsedAttributes) == 0 {
return true, nil
}
// 匹配标签 (TagsMap)
if len(c.parsedLabelKeys) > 0 {
tagsMap := event.TagsMap
if tagsMap == nil {
tagsMap = make(map[string]string)
}
if !alertCommon.MatchTags(tagsMap, c.parsedLabelKeys) {
return false, nil
}
}
// 匹配属性 (JsonTagsAndValue - 所有 JSON 字段)
if len(c.parsedAttributes) > 0 {
attributesMap := event.JsonTagsAndValue()
if !alertCommon.MatchTags(attributesMap, c.parsedAttributes) {
return false, nil
}
}
return true, nil
}

View File

@@ -1,224 +0,0 @@
package logic
import (
"bytes"
"fmt"
"strings"
"text/template"
alertCommon "github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
// SwitchCase Switch 分支定义
type SwitchCase struct {
// 判断模式expression表达式或 tags标签/属性)
Mode string `json:"mode,omitempty"`
// 表达式模式配置
// 条件表达式(支持 Go 模板语法)
Condition string `json:"condition,omitempty"`
// 标签/属性模式配置
LabelKeys []models.TagFilter `json:"label_keys,omitempty"` // 适用标签
Attributes []models.TagFilter `json:"attributes,omitempty"` // 适用属性
// 分支名称(可选,用于日志)
Name string `json:"name,omitempty"`
// 内部使用,解析后的过滤器
parsedLabelKeys []models.TagFilter `json:"-"`
parsedAttributes []models.TagFilter `json:"-"`
}
// SwitchConfig Switch 多分支处理器配置
type SwitchConfig struct {
// 分支条件列表
// 按顺序匹配,第一个为 true 的分支将被选中
Cases []SwitchCase `json:"cases"`
// 是否允许多个分支同时匹配(默认 false只走第一个匹配的
AllowMultiple bool `json:"allow_multiple,omitempty"`
}
func init() {
models.RegisterProcessor("logic.switch", &SwitchConfig{})
}
func (c *SwitchConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*SwitchConfig](settings)
if err != nil {
return nil, err
}
// 解析每个 case 的标签和属性过滤器
for i := range result.Cases {
if len(result.Cases[i].LabelKeys) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(result.Cases[i].LabelKeys))
copy(labelKeysCopy, result.Cases[i].LabelKeys)
for j := range labelKeysCopy {
if labelKeysCopy[j].Func == "" {
labelKeysCopy[j].Func = labelKeysCopy[j].Op
}
}
result.Cases[i].parsedLabelKeys, err = models.ParseTagFilter(labelKeysCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse label_keys for case[%d]: %v", i, err)
}
}
if len(result.Cases[i].Attributes) > 0 {
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(result.Cases[i].Attributes))
copy(attributesCopy, result.Cases[i].Attributes)
for j := range attributesCopy {
if attributesCopy[j].Func == "" {
attributesCopy[j].Func = attributesCopy[j].Op
}
}
result.Cases[i].parsedAttributes, err = models.ParseTagFilter(attributesCopy)
if err != nil {
return nil, fmt.Errorf("failed to parse attributes for case[%d]: %v", i, err)
}
}
}
return result, nil
}
// Process 实现 Processor 接口(兼容旧模式)
func (c *SwitchConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
index, caseName, err := c.evaluateCases(wfCtx)
if err != nil {
return wfCtx, "", fmt.Errorf("switch processor: failed to evaluate cases: %v", err)
}
if index >= 0 {
if caseName != "" {
return wfCtx, fmt.Sprintf("matched case[%d]: %s", index, caseName), nil
}
return wfCtx, fmt.Sprintf("matched case[%d]", index), nil
}
// 走默认分支(最后一个输出)
return wfCtx, "no case matched, using default branch", nil
}
// ProcessWithBranch 实现 BranchProcessor 接口
func (c *SwitchConfig) ProcessWithBranch(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.NodeOutput, error) {
index, caseName, err := c.evaluateCases(wfCtx)
if err != nil {
return nil, fmt.Errorf("switch processor: failed to evaluate cases: %v", err)
}
output := &models.NodeOutput{
WfCtx: wfCtx,
}
if index >= 0 {
output.BranchIndex = &index
if caseName != "" {
output.Message = fmt.Sprintf("matched case[%d]: %s", index, caseName)
} else {
output.Message = fmt.Sprintf("matched case[%d]", index)
}
} else {
// 默认分支的索引是 cases 数量(即最后一个输出端口)
defaultIndex := len(c.Cases)
output.BranchIndex = &defaultIndex
output.Message = "no case matched, using default branch"
}
return output, nil
}
// evaluateCases 评估所有分支条件
// 返回匹配的分支索引和分支名称,如果没有匹配返回 -1
func (c *SwitchConfig) evaluateCases(wfCtx *models.WorkflowContext) (int, string, error) {
for i := range c.Cases {
matched, err := c.evaluateCaseCondition(&c.Cases[i], wfCtx)
if err != nil {
return -1, "", fmt.Errorf("case[%d] evaluation error: %v", i, err)
}
if matched {
return i, c.Cases[i].Name, nil
}
}
return -1, "", nil
}
// evaluateCaseCondition 评估单个分支条件
func (c *SwitchConfig) evaluateCaseCondition(caseItem *SwitchCase, wfCtx *models.WorkflowContext) (bool, error) {
mode := caseItem.Mode
if mode == "" {
mode = ConditionModeExpression // 默认表达式模式
}
switch mode {
case ConditionModeTags:
return c.evaluateTagsCondition(caseItem, wfCtx.Event)
default:
return c.evaluateExpressionCondition(caseItem.Condition, wfCtx)
}
}
// evaluateExpressionCondition 评估表达式条件
func (c *SwitchConfig) evaluateExpressionCondition(condition string, wfCtx *models.WorkflowContext) (bool, error) {
if condition == "" {
return false, nil
}
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $inputs := .Inputs }}",
}
text := strings.Join(append(defs, condition), "")
tpl, err := template.New("switch_condition").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return false, err
}
var buf bytes.Buffer
if err = tpl.Execute(&buf, wfCtx); err != nil {
return false, err
}
result := strings.TrimSpace(strings.ToLower(buf.String()))
return result == "true" || result == "1", nil
}
// evaluateTagsCondition 评估标签/属性条件
func (c *SwitchConfig) evaluateTagsCondition(caseItem *SwitchCase, event *models.AlertCurEvent) (bool, error) {
// 如果没有配置任何过滤条件,默认返回 false不匹配
if len(caseItem.parsedLabelKeys) == 0 && len(caseItem.parsedAttributes) == 0 {
return false, nil
}
// 匹配标签 (TagsMap)
if len(caseItem.parsedLabelKeys) > 0 {
tagsMap := event.TagsMap
if tagsMap == nil {
tagsMap = make(map[string]string)
}
if !alertCommon.MatchTags(tagsMap, caseItem.parsedLabelKeys) {
return false, nil
}
}
// 匹配属性 (JsonTagsAndValue - 所有 JSON 字段)
if len(caseItem.parsedAttributes) > 0 {
attributesMap := event.JsonTagsAndValue()
if !alertCommon.MatchTags(attributesMap, caseItem.parsedAttributes) {
return false, nil
}
}
return true, nil
}

View File

@@ -1,107 +0,0 @@
package relabel
import (
"fmt"
"regexp"
"strings"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/prompb"
)
const (
REPLACE_DOT = "___"
)
// RelabelConfig
type RelabelConfig struct {
SourceLabels []string `json:"source_labels"`
Separator string `json:"separator"`
Regex string `json:"regex"`
RegexCompiled *regexp.Regexp
If string `json:"if"`
IfRegex *regexp.Regexp
Modulus uint64 `json:"modulus"`
TargetLabel string `json:"target_label"`
Replacement string `json:"replacement"`
Action string `json:"action"`
}
func init() {
models.RegisterProcessor("relabel", &RelabelConfig{})
}
func (r *RelabelConfig) Init(settings interface{}) (models.Processor, error) {
result, err := common.InitProcessor[*RelabelConfig](settings)
return result, err
}
func (r *RelabelConfig) Process(ctx *ctx.Context, wfCtx *models.WorkflowContext) (*models.WorkflowContext, string, error) {
sourceLabels := make([]model.LabelName, len(r.SourceLabels))
for i := range r.SourceLabels {
sourceLabels[i] = model.LabelName(strings.ReplaceAll(r.SourceLabels[i], ".", REPLACE_DOT))
}
relabelConfigs := []*pconf.RelabelConfig{
{
SourceLabels: sourceLabels,
Separator: r.Separator,
Regex: r.Regex,
RegexCompiled: r.RegexCompiled,
If: r.If,
IfRegex: r.IfRegex,
Modulus: r.Modulus,
TargetLabel: r.TargetLabel,
Replacement: r.Replacement,
Action: r.Action,
},
}
EventRelabel(wfCtx.Event, relabelConfigs)
return wfCtx, "", nil
}
func EventRelabel(event *models.AlertCurEvent, relabelConfigs []*pconf.RelabelConfig) {
labels := make([]prompb.Label, len(event.TagsJSON))
event.OriginalTagsJSON = make([]string, len(event.TagsJSON))
for i, tag := range event.TagsJSON {
label := strings.SplitN(tag, "=", 2)
if len(label) != 2 {
continue
}
event.OriginalTagsJSON[i] = tag
label[0] = strings.ReplaceAll(string(label[0]), ".", REPLACE_DOT)
labels[i] = prompb.Label{Name: label[0], Value: label[1]}
}
for i := 0; i < len(relabelConfigs); i++ {
if relabelConfigs[i].Replacement == "" {
relabelConfigs[i].Replacement = "$1"
}
if relabelConfigs[i].Separator == "" {
relabelConfigs[i].Separator = ";"
}
if relabelConfigs[i].Regex == "" {
relabelConfigs[i].Regex = "(.*)"
}
}
gotLabels := writer.Process(labels, relabelConfigs...)
event.TagsJSON = make([]string, len(gotLabels))
event.TagsMap = make(map[string]string, len(gotLabels))
for i, label := range gotLabels {
label.Name = strings.ReplaceAll(string(label.Name), REPLACE_DOT, ".")
event.TagsJSON[i] = fmt.Sprintf("%s=%s", label.Name, label.Value)
event.TagsMap[label.Name] = label.Value
}
event.Tags = strings.Join(event.TagsJSON, ",,")
}

View File

@@ -1,32 +0,0 @@
package utils
import (
"bytes"
"fmt"
"strings"
"text/template"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/tplx"
)
func TplRender(wfCtx *models.WorkflowContext, content string) (string, error) {
var defs = []string{
"{{ $event := .Event }}",
"{{ $labels := .Event.TagsMap }}",
"{{ $value := .Event.TriggerValue }}",
"{{ $inputs := .Inputs }}",
}
text := strings.Join(append(defs, content), "")
tpl, err := template.New("tpl").Funcs(tplx.TemplateFuncMap).Parse(text)
if err != nil {
return "", fmt.Errorf("failed to parse template: %v", err)
}
var body bytes.Buffer
if err = tpl.Execute(&body, wfCtx); err != nil {
return "", fmt.Errorf("failed to execute template: %v", err)
}
return strings.TrimSpace(body.String()), nil
}

View File

@@ -2,7 +2,6 @@ package process
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"sort"
@@ -14,18 +13,21 @@ import (
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/alert/dispatch"
"github.com/ccfos/nightingale/v6/alert/mute"
"github.com/ccfos/nightingale/v6/alert/pipeline/processor/relabel"
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/memsto"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/tplx"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/prometheus/prometheus/prompb"
"github.com/robfig/cron/v3"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type EventMuteHookFunc func(event *models.AlertCurEvent) bool
type ExternalProcessorsType struct {
ExternalLock sync.RWMutex
Processors map[string]*Processor
@@ -58,9 +60,11 @@ type Processor struct {
pendingsUseByRecover *AlertCurEventMap
inhibit bool
tagsMap map[string]string
tagsArr []string
groupName string
tagsMap map[string]string
tagsArr []string
target string
targetNote string
groupName string
alertRuleCache *memsto.AlertRuleCacheType
TargetCache *memsto.TargetCacheType
@@ -74,6 +78,7 @@ type Processor struct {
HandleFireEventHook HandleEventFunc
HandleRecoverEventHook HandleEventFunc
EventMuteHook EventMuteHookFunc
ScheduleEntry cron.Entry
PromEvalInterval int
@@ -118,6 +123,7 @@ func NewProcessor(engineName string, rule *models.AlertRule, datasourceId int64,
HandleFireEventHook: func(event *models.AlertCurEvent) {},
HandleRecoverEventHook: func(event *models.AlertCurEvent) {},
EventMuteHook: func(event *models.AlertCurEvent) bool { return false },
}
p.mayHandleGroup()
@@ -131,7 +137,7 @@ func (p *Processor) Handle(anomalyPoints []models.AnomalyPoint, from string, inh
p.inhibit = inhibit
cachedRule := p.alertRuleCache.Get(p.rule.Id)
if cachedRule == nil {
logger.Warningf("alert_eval_%d datasource_%d handle error: rule not found, maybe rule has been deleted, anomalyPoints:%+v", p.rule.Id, p.datasourceId, anomalyPoints)
logger.Errorf("rule not found %+v", anomalyPoints)
p.Stats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", p.DatasourceId()), "handle_event", p.BusiGroupCache.GetNameByBusiGroupId(p.rule.GroupId), fmt.Sprintf("%v", p.rule.Id)).Inc()
return
}
@@ -147,40 +153,19 @@ func (p *Processor) Handle(anomalyPoints []models.AnomalyPoint, from string, inh
eventsMap := make(map[string][]*models.AlertCurEvent)
for _, anomalyPoint := range anomalyPoints {
event := p.BuildEvent(anomalyPoint, from, now, ruleHash)
event.NotifyRuleIds = cachedRule.NotifyRuleIds
// 如果 event 被 mute 了,本质也是 fire 的状态,这里无论如何都添加到 alertingKeys 中,防止 fire 的事件自动恢复了
hash := event.Hash
alertingKeys[hash] = struct{}{}
// event processor
eventCopy := event.DeepCopy()
event = dispatch.HandleEventPipeline(cachedRule.PipelineConfigs, eventCopy, event, dispatch.EventProcessorCache, p.ctx, cachedRule.Id, "alert_rule")
if event == nil {
logger.Infof("alert_eval_%d datasource_%d is muted drop by pipeline event:%s", p.rule.Id, p.datasourceId, eventCopy.Hash)
continue
}
// event mute
isMuted, detail, muteId := mute.IsMuted(cachedRule, event, p.TargetCache, p.alertMuteCache)
isMuted, detail := mute.IsMuted(cachedRule, event, p.TargetCache, p.alertMuteCache)
if isMuted {
logger.Infof("alert_eval_%d datasource_%d is muted, detail:%s event:%s", p.rule.Id, p.datasourceId, detail, event.Hash)
p.Stats.CounterMuteTotal.WithLabelValues(
fmt.Sprintf("%v", event.GroupName),
fmt.Sprintf("%v", p.rule.Id),
fmt.Sprintf("%v", muteId),
fmt.Sprintf("%v", p.datasourceId),
).Inc()
p.Stats.CounterMuteTotal.WithLabelValues(event.GroupName).Inc()
logger.Debugf("rule_eval:%s event:%v is muted, detail:%s", p.Key(), event, detail)
continue
}
if dispatch.EventMuteHook(event) {
logger.Infof("alert_eval_%d datasource_%d is muted by hook event:%s", p.rule.Id, p.datasourceId, event.Hash)
p.Stats.CounterMuteTotal.WithLabelValues(
fmt.Sprintf("%v", event.GroupName),
fmt.Sprintf("%v", p.rule.Id),
fmt.Sprintf("%v", 0),
fmt.Sprintf("%v", p.datasourceId),
).Inc()
if p.EventMuteHook(event) {
p.Stats.CounterMuteTotal.WithLabelValues(event.GroupName).Inc()
logger.Debugf("rule_eval:%s event:%v is muted by hook", p.Key(), event)
continue
}
@@ -199,7 +184,7 @@ func (p *Processor) Handle(anomalyPoints []models.AnomalyPoint, from string, inh
func (p *Processor) BuildEvent(anomalyPoint models.AnomalyPoint, from string, now int64, ruleHash string) *models.AlertCurEvent {
p.fillTags(anomalyPoint)
p.mayHandleIdent()
hash := Hash(p.rule.Id, p.datasourceId, anomalyPoint)
ds := p.datasourceCache.GetById(p.datasourceId)
var dsName string
@@ -219,6 +204,8 @@ func (p *Processor) BuildEvent(anomalyPoint models.AnomalyPoint, from string, no
event.DatasourceId = p.datasourceId
event.Cluster = dsName
event.Hash = hash
event.TargetIdent = p.target
event.TargetNote = p.targetNote
event.TriggerValue = anomalyPoint.ReadableValue()
event.TriggerValues = anomalyPoint.Values
event.TriggerValuesJson = models.EventTriggerValues{ValuesWithUnit: anomalyPoint.ValuesUnit}
@@ -228,6 +215,7 @@ func (p *Processor) BuildEvent(anomalyPoint models.AnomalyPoint, from string, no
event.Callbacks = p.rule.Callbacks
event.CallbacksJSON = p.rule.CallbacksJSON
event.Annotations = p.rule.Annotations
event.AnnotationsJSON = make(map[string]string)
event.RuleConfig = p.rule.RuleConfig
event.RuleConfigJson = p.rule.RuleConfigJson
event.Severity = anomalyPoint.Severity
@@ -236,18 +224,13 @@ func (p *Processor) BuildEvent(anomalyPoint models.AnomalyPoint, from string, no
event.RecoverConfig = anomalyPoint.RecoverConfig
event.RuleHash = ruleHash
if anomalyPoint.TriggerType == models.TriggerTypeNodata {
event.TriggerValue = "nodata"
ruleConfig := models.RuleQuery{}
json.Unmarshal([]byte(p.rule.RuleConfig), &ruleConfig)
ruleConfig.TriggerType = anomalyPoint.TriggerType
b, _ := json.Marshal(ruleConfig)
event.RuleConfig = string(b)
}
if err := json.Unmarshal([]byte(p.rule.Annotations), &event.AnnotationsJSON); err != nil {
event.AnnotationsJSON = make(map[string]string) // 解析失败时使用空 map
logger.Warningf("alert_eval_%d datasource_%d unmarshal annotations json failed: %v", p.rule.Id, p.datasourceId, err)
if p.target != "" {
if pt, exist := p.TargetCache.Get(p.target); exist {
pt.GroupNames = p.BusiGroupCache.GetNamesByBusiGroupIds(pt.GroupIds)
event.Target = pt
} else {
logger.Infof("Target[ident: %s] doesn't exist in cache.", p.target)
}
}
if event.TriggerValues != "" && strings.Count(event.TriggerValues, "$") > 1 {
@@ -263,19 +246,6 @@ func (p *Processor) BuildEvent(anomalyPoint models.AnomalyPoint, from string, no
// 生成事件之后,立马进程 relabel 处理
Relabel(p.rule, event)
// 放到 Relabel(p.rule, event) 下面,为了处理 relabel 之后,标签里才出现 ident 的情况
p.mayHandleIdent(event)
if event.TargetIdent != "" {
if pt, exist := p.TargetCache.Get(event.TargetIdent); exist {
pt.GroupNames = p.BusiGroupCache.GetNamesByBusiGroupIds(pt.GroupIds)
event.Target = pt
} else {
logger.Infof("alert_eval_%d datasource_%d fill event target error, ident: %s doesn't exist in cache.", p.rule.Id, p.datasourceId, event.TargetIdent)
}
}
return event
}
@@ -284,15 +254,44 @@ func Relabel(rule *models.AlertRule, event *models.AlertCurEvent) {
return
}
// need to keep the original label
event.OriginalTags = event.Tags
event.OriginalTagsJSON = event.TagsJSON
if len(rule.EventRelabelConfig) == 0 {
return
}
relabel.EventRelabel(event, rule.EventRelabelConfig)
// need to keep the original label
event.OriginalTags = event.Tags
event.OriginalTagsJSON = make([]string, len(event.TagsJSON))
labels := make([]prompb.Label, len(event.TagsJSON))
for i, tag := range event.TagsJSON {
label := strings.SplitN(tag, "=", 2)
event.OriginalTagsJSON[i] = tag
labels[i] = prompb.Label{Name: label[0], Value: label[1]}
}
for i := 0; i < len(rule.EventRelabelConfig); i++ {
if rule.EventRelabelConfig[i].Replacement == "" {
rule.EventRelabelConfig[i].Replacement = "$1"
}
if rule.EventRelabelConfig[i].Separator == "" {
rule.EventRelabelConfig[i].Separator = ";"
}
if rule.EventRelabelConfig[i].Regex == "" {
rule.EventRelabelConfig[i].Regex = "(.*)"
}
}
// relabel process
relabels := writer.Process(labels, rule.EventRelabelConfig...)
event.TagsJSON = make([]string, len(relabels))
event.TagsMap = make(map[string]string, len(relabels))
for i, label := range relabels {
event.TagsJSON[i] = fmt.Sprintf("%s=%s", label.Name, label.Value)
event.TagsMap[label.Name] = label.Value
}
event.Tags = strings.Join(event.TagsJSON, ",,")
}
func (p *Processor) HandleRecover(alertingKeys map[string]struct{}, now int64, inhibit bool) {
@@ -371,19 +370,19 @@ func (p *Processor) RecoverSingle(byRecover bool, hash string, now int64, value
lastPendingEvent, has := p.pendingsUseByRecover.Get(hash)
if !has {
// 说明没有产生过异常点,就不需要恢复了
logger.Debugf("alert_eval_%d datasource_%d event:%s do not has pending event, not recover", p.rule.Id, p.datasourceId, event.Hash)
logger.Debugf("rule_eval:%s event:%v do not has pending event, not recover", p.Key(), event)
return
}
if now-lastPendingEvent.LastEvalTime < cachedRule.RecoverDuration {
logger.Debugf("alert_eval_%d datasource_%d event:%s not recover", p.rule.Id, p.datasourceId, event.Hash)
logger.Debugf("rule_eval:%s event:%v not recover", p.Key(), event)
return
}
}
// 如果设置了恢复条件,则不能在此处恢复,必须依靠 recoverPoint 来恢复
if event.RecoverConfig.JudgeType != models.Origin && !byRecover {
logger.Debugf("alert_eval_%d datasource_%d event:%s not recover", p.rule.Id, p.datasourceId, event.Hash)
logger.Debugf("rule_eval:%s event:%v not recover", p.Key(), event)
return
}
@@ -412,8 +411,8 @@ func (p *Processor) RecoverSingle(byRecover bool, hash string, now int64, value
func (p *Processor) handleEvent(events []*models.AlertCurEvent) {
var fireEvents []*models.AlertCurEvent
// severity 初始为最低优先级, 一定为遇到比自己优先级高的事件
severity := models.SeverityLowest
// severity 初始为 4, 一定为遇到比自己优先级高的事件
severity := 4
for _, event := range events {
if event == nil {
continue
@@ -434,18 +433,17 @@ func (p *Processor) handleEvent(events []*models.AlertCurEvent) {
continue
}
var preEvalTime int64 // 第一个 pending event 的检测时间
var preTriggerTime int64 // 第一个 pending event 的触发时间
preEvent, has := p.pendings.Get(event.Hash)
if has {
p.pendings.UpdateLastEvalTime(event.Hash, event.LastEvalTime)
preEvalTime = preEvent.FirstEvalTime
preTriggerTime = preEvent.TriggerTime
} else {
event.FirstEvalTime = event.LastEvalTime
p.pendings.Set(event.Hash, event)
preEvalTime = event.FirstEvalTime
preTriggerTime = event.TriggerTime
}
if event.LastEvalTime-preEvalTime+int64(event.PromEvalInterval) >= int64(p.rule.PromForDuration) {
if event.LastEvalTime-preTriggerTime+int64(event.PromEvalInterval) >= int64(p.rule.PromForDuration) {
fireEvents = append(fireEvents, event)
if severity > event.Severity {
severity = event.Severity
@@ -460,7 +458,7 @@ func (p *Processor) handleEvent(events []*models.AlertCurEvent) {
func (p *Processor) inhibitEvent(events []*models.AlertCurEvent, highSeverity int) {
for _, event := range events {
if p.inhibit && event.Severity > highSeverity {
logger.Debugf("alert_eval_%d datasource_%d event:%s inhibit highSeverity:%d", p.rule.Id, p.datasourceId, event.Hash, highSeverity)
logger.Debugf("rule_eval:%s event:%+v inhibit highSeverity:%d", p.Key(), event, highSeverity)
continue
}
p.fireEvent(event)
@@ -474,18 +472,16 @@ func (p *Processor) fireEvent(event *models.AlertCurEvent) {
return
}
message := "unknown"
defer func() {
logger.Infof("alert_eval_%d datasource_%d event-hash-%s %s", p.rule.Id, p.datasourceId, event.Hash, message)
}()
logger.Debugf("rule_eval:%s event:%+v fire", p.Key(), event)
if fired, has := p.fires.Get(event.Hash); has {
p.fires.UpdateLastEvalTime(event.Hash, event.LastEvalTime)
event.FirstTriggerTime = fired.FirstTriggerTime
p.HandleFireEventHook(event)
if cachedRule.NotifyRepeatStep == 0 {
message = "stalled, rule.notify_repeat_step is 0, no need to repeat notify"
logger.Debugf("rule_eval:%s event:%+v repeat is zero nothing to do", p.Key(), event)
// 说明不想重复通知那就直接返回了nothing to do
// do not need to send alert again
return
}
@@ -494,26 +490,21 @@ func (p *Processor) fireEvent(event *models.AlertCurEvent) {
if cachedRule.NotifyMaxNumber == 0 {
// 最大可以发送次数如果是0表示不想限制最大发送次数一直发即可
event.NotifyCurNumber = fired.NotifyCurNumber + 1
message = fmt.Sprintf("fired, notify_repeat_step_matched(%d >= %d + %d * 60) notify_max_number_ignore(#%d / %d)", event.LastEvalTime, fired.LastSentTime, cachedRule.NotifyRepeatStep, event.NotifyCurNumber, cachedRule.NotifyMaxNumber)
p.pushEventToQueue(event)
} else {
// 有最大发送次数的限制,就要看已经发了几次了,是否达到了最大发送次数
if fired.NotifyCurNumber >= cachedRule.NotifyMaxNumber {
message = fmt.Sprintf("stalled, notify_repeat_step_matched(%d >= %d + %d * 60) notify_max_number_not_matched(#%d / %d)", event.LastEvalTime, fired.LastSentTime, cachedRule.NotifyRepeatStep, fired.NotifyCurNumber, cachedRule.NotifyMaxNumber)
logger.Debugf("rule_eval:%s event:%+v reach max number", p.Key(), event)
return
} else {
event.NotifyCurNumber = fired.NotifyCurNumber + 1
message = fmt.Sprintf("fired, notify_repeat_step_matched(%d >= %d + %d * 60) notify_max_number_matched(#%d / %d)", event.LastEvalTime, fired.LastSentTime, cachedRule.NotifyRepeatStep, event.NotifyCurNumber, cachedRule.NotifyMaxNumber)
p.pushEventToQueue(event)
}
}
} else {
message = fmt.Sprintf("stalled, notify_repeat_step_not_matched(%d < %d + %d * 60)", event.LastEvalTime, fired.LastSentTime, cachedRule.NotifyRepeatStep)
}
} else {
event.NotifyCurNumber = 1
event.FirstTriggerTime = event.TriggerTime
message = fmt.Sprintf("fired, first_trigger_time: %d", event.FirstTriggerTime)
p.HandleFireEventHook(event)
p.pushEventToQueue(event)
}
@@ -527,7 +518,7 @@ func (p *Processor) pushEventToQueue(e *models.AlertCurEvent) {
dispatch.LogEvent(e, "push_queue")
if !queue.EventQueue.PushFront(e) {
logger.Warningf("alert_eval_%d datasource_%d event_push_queue: queue is full, event:%s", p.rule.Id, p.datasourceId, e.Hash)
logger.Warningf("event_push_queue: queue is full, event:%+v", e)
p.Stats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", p.DatasourceId()), "push_event_queue", p.BusiGroupCache.GetNameByBusiGroupId(p.rule.GroupId), fmt.Sprintf("%v", p.rule.Id)).Inc()
}
}
@@ -538,7 +529,7 @@ func (p *Processor) RecoverAlertCurEventFromDb() {
curEvents, err := models.AlertCurEventGetByRuleIdAndDsId(p.ctx, p.rule.Id, p.datasourceId)
if err != nil {
logger.Errorf("alert_eval_%d datasource_%d recover event from db failed, err:%s", p.rule.Id, p.datasourceId, err)
logger.Errorf("recover event from db for rule:%s failed, err:%s", p.Key(), err)
p.Stats.CounterRuleEvalErrorTotal.WithLabelValues(fmt.Sprintf("%v", p.DatasourceId()), "get_recover_event", p.BusiGroupCache.GetNameByBusiGroupId(p.rule.GroupId), fmt.Sprintf("%v", p.rule.Id)).Inc()
p.fires = NewAlertCurEventMap(nil)
return
@@ -547,12 +538,6 @@ func (p *Processor) RecoverAlertCurEventFromDb() {
fireMap := make(map[string]*models.AlertCurEvent)
pendingsUseByRecoverMap := make(map[string]*models.AlertCurEvent)
for _, event := range curEvents {
alertRule := p.alertRuleCache.Get(event.RuleId)
if alertRule == nil {
continue
}
event.NotifyRuleIds = alertRule.NotifyRuleIds
if event.Cate == models.HOST {
target, exists := p.TargetCache.Get(event.TargetIdent)
if exists && target.EngineName != p.EngineName && !(p.ctx.IsCenter && target.EngineName == "") {
@@ -591,9 +576,7 @@ func (p *Processor) fillTags(anomalyPoint models.AnomalyPoint) {
}
// handle rule tags
tags := p.rule.AppendTagsJSON
tags = append(tags, "rulename="+p.rule.Name)
for _, tag := range tags {
for _, tag := range p.rule.AppendTagsJSON {
arr := strings.SplitN(tag, "=", 2)
var defs = []string{
@@ -619,25 +602,27 @@ func (p *Processor) fillTags(anomalyPoint models.AnomalyPoint) {
tagsMap[arr[0]] = body.String()
}
tagsMap["rulename"] = p.rule.Name
p.tagsMap = tagsMap
// handle tagsArr
p.tagsArr = labelMapToArr(tagsMap)
}
func (p *Processor) mayHandleIdent(event *models.AlertCurEvent) {
func (p *Processor) mayHandleIdent() {
// handle ident
if ident, has := event.TagsMap["ident"]; has {
if ident, has := p.tagsMap["ident"]; has {
if target, exists := p.TargetCache.Get(ident); exists {
event.TargetIdent = target.Ident
event.TargetNote = target.Note
p.target = target.Ident
p.targetNote = target.Note
} else {
event.TargetIdent = ident
event.TargetNote = ""
p.target = ident
p.targetNote = ""
}
} else {
event.TargetIdent = ""
event.TargetNote = ""
p.target = ""
p.targetNote = ""
}
}

View File

@@ -39,7 +39,7 @@ func NewRecordRuleContext(rule *models.RecordingRule, datasourceId int64, promCl
rule.CronPattern = fmt.Sprintf("@every %ds", rule.PromEvalInterval)
}
rrc.scheduler = cron.New(cron.WithSeconds(), cron.WithChain(cron.SkipIfStillRunning(cron.DefaultLogger)))
rrc.scheduler = cron.New(cron.WithSeconds())
_, err := rrc.scheduler.AddFunc(rule.CronPattern, func() {
rrc.Eval()
})
@@ -56,13 +56,12 @@ func (rrc *RecordRuleContext) Key() string {
}
func (rrc *RecordRuleContext) Hash() string {
return str.MD5(fmt.Sprintf("%d_%s_%s_%d_%s_%s",
return str.MD5(fmt.Sprintf("%d_%s_%s_%d_%s",
rrc.rule.Id,
rrc.rule.CronPattern,
rrc.rule.PromQl,
rrc.datasourceId,
rrc.rule.AppendTags,
rrc.rule.Name,
))
}

View File

@@ -22,11 +22,10 @@ type Router struct {
AlertStats *astats.Stats
Ctx *ctx.Context
ExternalProcessors *process.ExternalProcessorsType
LogDir string
}
func New(httpConfig httpx.Config, alert aconf.Alert, amc *memsto.AlertMuteCacheType, tc *memsto.TargetCacheType, bgc *memsto.BusiGroupCacheType,
astats *astats.Stats, ctx *ctx.Context, externalProcessors *process.ExternalProcessorsType, logDir string) *Router {
astats *astats.Stats, ctx *ctx.Context, externalProcessors *process.ExternalProcessorsType) *Router {
return &Router{
HTTP: httpConfig,
Alert: alert,
@@ -36,7 +35,6 @@ func New(httpConfig httpx.Config, alert aconf.Alert, amc *memsto.AlertMuteCacheT
AlertStats: astats,
Ctx: ctx,
ExternalProcessors: externalProcessors,
LogDir: logDir,
}
}
@@ -52,9 +50,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.POST("/event", rt.pushEventToQueue)
service.POST("/event-persist", rt.eventPersist)
service.POST("/make-event", rt.makeEvent)
service.GET("/event-detail/:hash", rt.eventDetail)
service.GET("/alert-eval-detail/:id", rt.alertEvalDetail)
service.GET("/trace-logs/:traceid", rt.traceLogs)
}
func Render(c *gin.Context, data, msg interface{}) {

View File

@@ -1,28 +0,0 @@
package router
import (
"fmt"
"github.com/ccfos/nightingale/v6/pkg/loggrep"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func (rt *Router) alertEvalDetail(c *gin.Context) {
id := ginx.UrlParamStr(c, "id")
if !loggrep.IsValidRuleID(id) {
ginx.Bomb(200, "invalid rule id format")
}
instance := fmt.Sprintf("%s:%d", rt.Alert.Heartbeat.IP, rt.HTTP.Port)
keyword := fmt.Sprintf("alert_eval_%s", id)
logs, err := loggrep.GrepLogDir(rt.LogDir, keyword)
ginx.Dangerous(err)
ginx.NewRender(c).Data(loggrep.EventDetailResp{
Logs: logs,
Instance: instance,
}, nil)
}

View File

@@ -13,9 +13,9 @@ import (
"github.com/ccfos/nightingale/v6/alert/queue"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
@@ -25,7 +25,6 @@ func (rt *Router) pushEventToQueue(c *gin.Context) {
if event.RuleId == 0 {
ginx.Bomb(200, "event is illegal")
}
event.FE2DB()
event.TagsMap = make(map[string]string)
for i := 0; i < len(event.TagsJSON); i++ {
@@ -41,8 +40,8 @@ func (rt *Router) pushEventToQueue(c *gin.Context) {
event.TagsMap[arr[0]] = arr[1]
}
hit, _ := mute.EventMuteStrategy(event, rt.AlertMuteCache)
if hit {
if mute.EventMuteStrategy(event, rt.AlertMuteCache) {
logger.Infof("event_muted: rule_id=%d %s", event.RuleId, event.Hash)
ginx.NewRender(c).Message(nil)
return
@@ -75,7 +74,7 @@ func (rt *Router) pushEventToQueue(c *gin.Context) {
dispatch.LogEvent(event, "http_push_queue")
if !queue.EventQueue.PushFront(event) {
msg := fmt.Sprintf("event:%s push_queue err: queue is full", event.Hash)
msg := fmt.Sprintf("event:%+v push_queue err: queue is full", event)
ginx.Bomb(200, msg)
logger.Warningf(msg)
}
@@ -105,21 +104,21 @@ func (rt *Router) makeEvent(c *gin.Context) {
for i := 0; i < len(events); i++ {
node, err := naming.DatasourceHashRing.GetNode(strconv.FormatInt(events[i].DatasourceId, 10), fmt.Sprintf("%d", events[i].RuleId))
if err != nil {
logger.Warningf("event(rule_id=%d ds_id=%d) get node err:%v", events[i].RuleId, events[i].DatasourceId, err)
logger.Warningf("event:%+v get node err:%v", events[i], err)
ginx.Bomb(200, "event node not exists")
}
if node != rt.Alert.Heartbeat.Endpoint {
err := forwardEvent(events[i], node)
if err != nil {
logger.Warningf("event(rule_id=%d ds_id=%d) forward err:%v", events[i].RuleId, events[i].DatasourceId, err)
logger.Warningf("event:%+v forward err:%v", events[i], err)
ginx.Bomb(200, "event forward error")
}
continue
}
ruleWorker, exists := rt.ExternalProcessors.GetExternalAlertRule(events[i].DatasourceId, events[i].RuleId)
logger.Debugf("handle event(rule_id=%d ds_id=%d) exists:%v", events[i].RuleId, events[i].DatasourceId, exists)
logger.Debugf("handle event:%+v exists:%v", events[i], exists)
if !exists {
ginx.Bomb(200, "rule not exists")
}
@@ -143,6 +142,6 @@ func forwardEvent(event *eventForm, instance string) error {
if err != nil {
return err
}
logger.Infof("forward event: result=succ url=%s code=%d rule_id=%d response=%s", ur, code, event.RuleId, string(res))
logger.Infof("forward event: result=succ url=%s code=%d event:%v response=%s", ur, code, event, string(res))
return nil
}

View File

@@ -1,27 +0,0 @@
package router
import (
"fmt"
"github.com/ccfos/nightingale/v6/pkg/loggrep"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func (rt *Router) eventDetail(c *gin.Context) {
hash := ginx.UrlParamStr(c, "hash")
if !loggrep.IsValidHash(hash) {
ginx.Bomb(200, "invalid hash format")
}
instance := fmt.Sprintf("%s:%d", rt.Alert.Heartbeat.IP, rt.HTTP.Port)
logs, err := loggrep.GrepLogDir(rt.LogDir, hash)
ginx.Dangerous(err)
ginx.NewRender(c).Data(loggrep.EventDetailResp{
Logs: logs,
Instance: instance,
}, nil)
}

View File

@@ -1,28 +0,0 @@
package router
import (
"fmt"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/loggrep"
"github.com/gin-gonic/gin"
)
func (rt *Router) traceLogs(c *gin.Context) {
traceId := ginx.UrlParamStr(c, "traceid")
if !loggrep.IsValidTraceID(traceId) {
ginx.Bomb(200, "invalid trace id format")
}
instance := fmt.Sprintf("%s:%d", rt.Alert.Heartbeat.IP, rt.HTTP.Port)
keyword := "trace_id=" + traceId
logs, err := loggrep.GrepLatestLogFiles(rt.LogDir, keyword)
ginx.Dangerous(err)
ginx.NewRender(c).Data(loggrep.EventDetailResp{
Logs: logs,
Instance: instance,
}, nil)
}

View File

@@ -1,7 +1,6 @@
package sender
import (
"fmt"
"html/template"
"net/url"
"strings"
@@ -136,14 +135,14 @@ func (c *DefaultCallBacker) CallBack(ctx CallBackContext) {
func doSendAndRecord(ctx *ctx.Context, url, token string, body interface{}, channel string,
stats *astats.Stats, events []*models.AlertCurEvent) {
res, err := doSend(url, body, channel, stats)
NotifyRecord(ctx, events, 0, channel, token, res, err)
NotifyRecord(ctx, events, channel, token, res, err)
}
func NotifyRecord(ctx *ctx.Context, evts []*models.AlertCurEvent, notifyRuleID int64, channel, target, res string, err error) {
func NotifyRecord(ctx *ctx.Context, evts []*models.AlertCurEvent, channel, target, res string, err error) {
// 一个通知可能对应多个 event都需要记录
notis := make([]*models.NotificationRecord, 0, len(evts))
notis := make([]*models.NotificaitonRecord, 0, len(evts))
for _, evt := range evts {
noti := models.NewNotificationRecord(evt, notifyRuleID, channel, target)
noti := models.NewNotificationRecord(evt, channel, target)
if err != nil {
noti.SetStatus(models.NotiStatusFailure)
noti.SetDetails(err.Error())
@@ -154,26 +153,26 @@ func NotifyRecord(ctx *ctx.Context, evts []*models.AlertCurEvent, notifyRuleID i
}
if !ctx.IsCenter {
err := poster.PostByUrls(ctx, "/v1/n9e/notify-record", notis)
_, err := poster.PostByUrlsWithResp[[]int64](ctx, "/v1/n9e/notify-record", notis)
if err != nil {
logger.Errorf("add notis:%v failed, err: %v", notis, err)
}
return
}
PushNotifyRecords(notis)
if err := models.DB(ctx).CreateInBatches(notis, 100).Error; err != nil {
logger.Errorf("add notis:%v failed, err: %v", notis, err)
}
}
func doSend(url string, body interface{}, channel string, stats *astats.Stats) (string, error) {
stats.AlertNotifyTotal.WithLabelValues(channel).Inc()
start := time.Now()
res, code, err := poster.PostJSON(url, time.Second*5, body, 3)
res = []byte(fmt.Sprintf("duration: %d ms status_code:%d, response:%s", time.Since(start).Milliseconds(), code, string(res)))
if err != nil {
logger.Errorf("%s_sender: result=fail url=%s code=%d error=%v req:%v response=%s", channel, url, code, err, body, string(res))
stats.AlertNotifyErrorTotal.WithLabelValues(channel).Inc()
return string(res), err
return "", err
}
logger.Infof("%s_sender: result=succ url=%s code=%d req:%v response=%s", channel, url, code, body, string(res))
@@ -205,6 +204,6 @@ func PushCallbackEvent(ctx *ctx.Context, webhook *models.Webhook, event *models.
succ := queue.eventQueue.Push(event)
if !succ {
logger.Warningf("Write channel(%s) full, current channel size: %d event:%s", webhook.Url, queue.eventQueue.Len(), event.Hash)
logger.Warningf("Write channel(%s) full, current channel size: %d event:%v", webhook.Url, queue.eventQueue.Len(), event)
}
}

View File

@@ -141,7 +141,7 @@ func updateSmtp(ctx *ctx.Context, ncc *memsto.NotifyConfigCacheType) {
func startEmailSender(ctx *ctx.Context, smtp aconf.SMTPConfig) {
conf := smtp
if conf.Host == "" || conf.Port == 0 {
logger.Debug("SMTP configurations invalid")
logger.Warning("SMTP configurations invalid")
<-mailQuit
return
}
@@ -205,7 +205,7 @@ func startEmailSender(ctx *ctx.Context, smtp aconf.SMTPConfig) {
if err == nil {
msg = "ok"
}
NotifyRecord(ctx, m.events, 0, models.Email, to, msg, err)
NotifyRecord(ctx, m.events, models.Email, to, msg, err)
}
size++

View File

@@ -30,14 +30,12 @@ type IbexCallBacker struct {
func (c *IbexCallBacker) CallBack(ctx CallBackContext) {
if len(ctx.CallBackURL) == 0 || len(ctx.Events) == 0 {
logger.Warningf("event_callback_ibex: url or events is empty, url: %s", ctx.CallBackURL)
return
}
event := ctx.Events[0]
if event.IsRecovered {
logger.Infof("event_callback_ibex: event is recovered, event: %s", event.Hash)
return
}
@@ -45,9 +43,8 @@ func (c *IbexCallBacker) CallBack(ctx CallBackContext) {
}
func (c *IbexCallBacker) handleIbex(ctx *ctx.Context, url string, event *models.AlertCurEvent) {
logger.Infof("event_callback_ibex: url: %s, event: %s", url, event.Hash)
if imodels.DB() == nil && ctx.IsCenter {
logger.Warningf("event_callback_ibex: db is nil, event: %s", event.Hash)
logger.Warning("event_callback_ibex: db is nil")
return
}
@@ -66,53 +63,42 @@ func (c *IbexCallBacker) handleIbex(ctx *ctx.Context, url string, event *models.
id, err := strconv.ParseInt(idstr, 10, 64)
if err != nil {
logger.Errorf("event_callback_ibex: failed to parse url: %s event: %s", url, event.Hash)
logger.Errorf("event_callback_ibex: failed to parse url: %s", url)
return
}
if host == "" {
// 用户在callback url中没有传入host就从event中解析
host = event.TargetIdent
if host == "" {
if ident, has := event.TagsMap["ident"]; has {
host = ident
}
}
}
if host == "" {
logger.Errorf("event_callback_ibex: failed to get host, id: %d, event: %s", id, event.Hash)
logger.Error("event_callback_ibex: failed to get host")
return
}
CallIbex(ctx, id, host, c.taskTplCache, c.targetCache, c.userCache, event, "")
CallIbex(ctx, id, host, c.taskTplCache, c.targetCache, c.userCache, event)
}
func CallIbex(ctx *ctx.Context, id int64, host string,
taskTplCache *memsto.TaskTplCache, targetCache *memsto.TargetCacheType,
userCache *memsto.UserCacheType, event *models.AlertCurEvent, args string) (int64, error) {
logger.Infof("event_callback_ibex: id: %d, host: %s, args: %s, event: %s", id, host, args, event.Hash)
userCache *memsto.UserCacheType, event *models.AlertCurEvent) {
tpl := taskTplCache.Get(id)
if tpl == nil {
err := fmt.Errorf("event_callback_ibex: no such tpl(%d), event: %s", id, event.Hash)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: no such tpl(%d)", id)
return
}
// check perm
// tpl.GroupId - host - account 三元组校验权限
can, err := CanDoIbex(tpl.UpdateBy, tpl, host, targetCache, userCache)
can, err := canDoIbex(tpl.UpdateBy, tpl, host, targetCache, userCache)
if err != nil {
err = fmt.Errorf("event_callback_ibex: check perm fail: %v, event: %s", err, event.Hash)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: check perm fail: %v", err)
return
}
if !can {
err = fmt.Errorf("event_callback_ibex: user(%s) no permission, event: %s", tpl.UpdateBy, event.Hash)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: user(%s) no permission", tpl.UpdateBy)
return
}
tagsMap := make(map[string]string)
@@ -136,16 +122,11 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
tags, err := json.Marshal(tagsMap)
if err != nil {
err = fmt.Errorf("event_callback_ibex: failed to marshal tags to json: %v, event: %s", tagsMap, event.Hash)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: failed to marshal tags to json: %v", tagsMap)
return
}
// call ibex
taskArgs := tpl.Args
if args != "" {
taskArgs = args
}
in := models.TaskForm{
Title: tpl.Title + " FH: " + host,
Account: tpl.Account,
@@ -154,7 +135,7 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
Timeout: tpl.Timeout,
Pause: tpl.Pause,
Script: tpl.Script,
Args: taskArgs,
Args: tpl.Args,
Stdin: string(tags),
Action: "start",
Creator: tpl.UpdateBy,
@@ -164,9 +145,8 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
id, err = TaskAdd(in, tpl.UpdateBy, ctx.IsCenter)
if err != nil {
err = fmt.Errorf("event_callback_ibex: call ibex fail: %v, event: %s", err, event.Hash)
logger.Errorf("%s", err)
return 0, err
logger.Errorf("event_callback_ibex: call ibex fail: %v", err)
return
}
// write db
@@ -187,14 +167,11 @@ func CallIbex(ctx *ctx.Context, id int64, host string,
}
if err = record.Add(ctx); err != nil {
err = fmt.Errorf("event_callback_ibex: persist task_record fail: %v, event: %s", err, event.Hash)
logger.Errorf("%s", err)
return id, err
logger.Errorf("event_callback_ibex: persist task_record fail: %v", err)
}
return id, nil
}
func CanDoIbex(username string, tpl *models.TaskTpl, host string, targetCache *memsto.TargetCacheType, userCache *memsto.UserCacheType) (bool, error) {
func canDoIbex(username string, tpl *models.TaskTpl, host string, targetCache *memsto.TargetCacheType, userCache *memsto.UserCacheType) (bool, error) {
user := userCache.GetByUsername(username)
if user != nil && user.IsAdmin() {
return true, nil
@@ -210,7 +187,7 @@ func CanDoIbex(username string, tpl *models.TaskTpl, host string, targetCache *m
func TaskAdd(f models.TaskForm, authUser string, isCenter bool) (int64, error) {
if storage.Cache == nil {
logger.Warningf("event_callback_ibex: redis cache is nil, task: %+v", f)
logger.Warning("event_callback_ibex: redis cache is nil")
return 0, fmt.Errorf("redis cache is nil")
}

View File

@@ -74,7 +74,7 @@ func SendMM(ctx *ctx.Context, message MatterMostMessage, events []*models.AlertC
u, err := url.Parse(message.Tokens[i])
if err != nil {
logger.Errorf("mm_sender: failed to parse error=%v", err)
NotifyRecord(ctx, events, 0, channel, message.Tokens[i], "", err)
NotifyRecord(ctx, events, channel, message.Tokens[i], "", err)
continue
}

View File

@@ -1,75 +0,0 @@
package sender
import (
"errors"
"time"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/toolkits/pkg/container/list"
"github.com/toolkits/pkg/logger"
)
// 通知记录队列,最大长度 1000000
var NotifyRecordQueue = list.NewSafeListLimited(1000000)
// 每秒上报通知记录队列大小
func ReportNotifyRecordQueueSize(stats *astats.Stats) {
for {
time.Sleep(time.Second)
stats.GaugeNotifyRecordQueueSize.Set(float64(NotifyRecordQueue.Len()))
}
}
// 推送通知记录到队列
// 若队列满 则返回 error
func PushNotifyRecords(records []*models.NotificationRecord) error {
for _, record := range records {
if ok := NotifyRecordQueue.PushFront(record); !ok {
logger.Warningf("notify record queue is full, record: %+v", record)
return errors.New("notify record queue is full")
}
}
return nil
}
type NotifyRecordConsumer struct {
ctx *ctx.Context
}
func NewNotifyRecordConsumer(ctx *ctx.Context) *NotifyRecordConsumer {
return &NotifyRecordConsumer{
ctx: ctx,
}
}
// 消费通知记录队列 每 100ms 检测一次队列是否为空
func (c *NotifyRecordConsumer) LoopConsume() {
duration := time.Duration(100) * time.Millisecond
for {
// 无论队列是否为空 都需要等待
time.Sleep(duration)
inotis := NotifyRecordQueue.PopBackBy(100)
if len(inotis) == 0 {
continue
}
// 类型转换,不然 CreateInBatches 会报错
notis := make([]*models.NotificationRecord, 0, len(inotis))
for _, inoti := range inotis {
notis = append(notis, inoti.(*models.NotificationRecord))
}
c.consume(notis)
}
}
func (c *NotifyRecordConsumer) consume(notis []*models.NotificationRecord) {
if err := models.DB(c.ctx).CreateInBatches(notis, 100).Error; err != nil {
logger.Errorf("add notis:%v failed, err: %v", notis, err)
}
}

View File

@@ -6,7 +6,6 @@ import (
"os"
"os/exec"
"time"
"unicode/utf8"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/models"
@@ -35,7 +34,7 @@ func alertingCallScript(ctx *ctx.Context, stdinBytes []byte, notifyScript models
channel := "script"
stats.AlertNotifyTotal.WithLabelValues(channel).Inc()
fpath := ".notify_script"
fpath := ".notify_scriptt"
if config.Type == 1 {
fpath = config.Content
} else {
@@ -79,7 +78,6 @@ func alertingCallScript(ctx *ctx.Context, stdinBytes []byte, notifyScript models
cmd.Stdout = &buf
cmd.Stderr = &buf
start := time.Now()
err := startCmd(cmd)
if err != nil {
logger.Errorf("event_script_notify_fail: run cmd err: %v", err)
@@ -87,26 +85,7 @@ func alertingCallScript(ctx *ctx.Context, stdinBytes []byte, notifyScript models
}
err, isTimeout := sys.WrapTimeout(cmd, time.Duration(config.Timeout)*time.Second)
res := buf.String()
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
// 截断超出长度的输出
if len(res) > 512 {
// 确保在有效的UTF-8字符边界处截断
validLen := 0
for i := 0; i < 512 && i < len(res); {
_, size := utf8.DecodeRuneInString(res[i:])
if i+size > 512 {
break
}
i += size
validLen = i
}
res = res[:validLen] + "..."
}
NotifyRecord(ctx, []*models.AlertCurEvent{event}, 0, channel, cmd.String(), res, buildErr(err, isTimeout))
NotifyRecord(ctx, []*models.AlertCurEvent{event}, channel, cmd.String(), "", buildErr(err, isTimeout))
if isTimeout {
if err == nil {
@@ -121,12 +100,12 @@ func alertingCallScript(ctx *ctx.Context, stdinBytes []byte, notifyScript models
}
if err != nil {
logger.Errorf("event_script_notify_fail: exec script %s occur error: %v, output: %s", fpath, err, res)
logger.Errorf("event_script_notify_fail: exec script %s occur error: %v, output: %s", fpath, err, buf.String())
stats.AlertNotifyErrorTotal.WithLabelValues(channel).Inc()
return
}
logger.Infof("event_script_notify_ok: exec %s output: %s", fpath, res)
logger.Infof("event_script_notify_ok: exec %s output: %s", fpath, buf.String())
}
func buildErr(err error, isTimeout bool) error {

View File

@@ -72,7 +72,7 @@ func SendTelegram(ctx *ctx.Context, message TelegramMessage, events []*models.Al
for i := 0; i < len(message.Tokens); i++ {
if !strings.Contains(message.Tokens[i], "/") && !strings.HasPrefix(message.Tokens[i], "https://") {
logger.Errorf("telegram_sender: result=fail invalid token=%s", message.Tokens[i])
NotifyRecord(ctx, events, 0, channel, message.Tokens[i], "", errors.New("invalid token"))
NotifyRecord(ctx, events, channel, message.Tokens[i], "", errors.New("invalid token"))
continue
}
var url string

View File

@@ -13,53 +13,10 @@ import (
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
// webhookClientCache 缓存 http.Client避免每次请求都创建新的 Client 导致连接泄露
var webhookClientCache sync.Map // key: clientKey (string), value: *http.Client
// 相同配置的 webhook 会复用同一个 Client
func getWebhookClient(webhook *models.Webhook) *http.Client {
clientKey := webhook.Hash()
if client, ok := webhookClientCache.Load(clientKey); ok {
return client.(*http.Client)
}
// 创建新的 Client
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: webhook.SkipVerify},
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
}
if poster.UseProxy(webhook.Url) {
transport.Proxy = http.ProxyFromEnvironment
}
timeout := webhook.Timeout
if timeout <= 0 {
timeout = 10
}
newClient := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: transport,
}
// 使用 LoadOrStore 确保并发安全,避免重复创建
actual, loaded := webhookClientCache.LoadOrStore(clientKey, newClient)
if loaded {
return actual.(*http.Client)
}
return newClient
}
func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats) (bool, string, error) {
channel := "webhook"
if webhook.Type == models.RuleCallback {
@@ -72,7 +29,7 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
}
bs, err := json.Marshal(event)
if err != nil {
logger.Errorf("%s alertingWebhook failed to marshal event err:%v", channel, err)
logger.Errorf("%s alertingWebhook failed to marshal event:%+v err:%v", channel, event, err)
return false, "", err
}
@@ -80,7 +37,7 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
req, err := http.NewRequest("POST", conf.Url, bf)
if err != nil {
logger.Warningf("%s alertingWebhook failed to new request event:%s err:%v", channel, string(bs), err)
logger.Warningf("%s alertingWebhook failed to new reques event:%s err:%v", channel, string(bs), err)
return true, "", err
}
@@ -98,13 +55,25 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
req.Header.Set(conf.Headers[i], conf.Headers[i+1])
}
}
// 使用全局 Client 缓存,避免每次请求都创建新的 Client 导致连接泄露
client := getWebhookClient(conf)
insecureSkipVerify := false
if webhook != nil {
insecureSkipVerify = webhook.SkipVerify
}
if conf.Client == nil {
logger.Warningf("event_%s, event:%s, url: [%s], error: [%s]", channel, string(bs), conf.Url, "client is nil")
conf.Client = &http.Client{
Timeout: time.Duration(conf.Timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify},
},
}
}
stats.AlertNotifyTotal.WithLabelValues(channel).Inc()
var resp *http.Response
var body []byte
resp, err = client.Do(req)
resp, err = conf.Client.Do(req)
if err != nil {
stats.AlertNotifyErrorTotal.WithLabelValues(channel).Inc()
@@ -119,21 +88,19 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
if resp.StatusCode == 429 {
logger.Errorf("event_%s_fail, url: %s, response code: %d, body: %s event:%s", channel, conf.Url, resp.StatusCode, string(body), string(bs))
return true, fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), fmt.Errorf("status code is 429")
return true, string(body), fmt.Errorf("status code is 429")
}
logger.Debugf("event_%s_succ, url: %s, response code: %d, body: %s event:%s", channel, conf.Url, resp.StatusCode, string(body), string(bs))
return false, fmt.Sprintf("status_code:%d, response:%s", resp.StatusCode, string(body)), nil
return false, string(body), nil
}
func SingleSendWebhooks(ctx *ctx.Context, webhooks map[string]*models.Webhook, event *models.AlertCurEvent, stats *astats.Stats) {
for _, conf := range webhooks {
retryCount := 0
for retryCount < 3 {
start := time.Now()
needRetry, res, err := sendWebhook(conf, event, stats)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
NotifyRecord(ctx, []*models.AlertCurEvent{event}, 0, "webhook", conf.Url, res, err)
NotifyRecord(ctx, []*models.AlertCurEvent{event}, "webhook", conf.Url, res, err)
if !needRetry {
break
}
@@ -145,7 +112,7 @@ func SingleSendWebhooks(ctx *ctx.Context, webhooks map[string]*models.Webhook, e
func BatchSendWebhooks(ctx *ctx.Context, webhooks map[string]*models.Webhook, event *models.AlertCurEvent, stats *astats.Stats) {
for _, conf := range webhooks {
logger.Infof("push event:%s to queue:%v", event.Hash, conf)
logger.Infof("push event:%+v to queue:%v", event, conf)
PushEvent(ctx, conf, event, stats)
}
}
@@ -183,7 +150,7 @@ func PushEvent(ctx *ctx.Context, webhook *models.Webhook, event *models.AlertCur
succ := queue.eventQueue.Push(event)
if !succ {
stats.AlertNotifyErrorTotal.WithLabelValues("push_event_queue").Inc()
logger.Warningf("Write channel(%s) full, current channel size: %d event:%s", webhook.Url, queue.eventQueue.Len(), event.Hash)
logger.Warningf("Write channel(%s) full, current channel size: %d event:%v", webhook.Url, queue.eventQueue.Len(), event)
}
}
@@ -202,10 +169,8 @@ func StartConsumer(ctx *ctx.Context, queue *WebhookQueue, popSize int, webhook *
retryCount := 0
for retryCount < webhook.RetryCount {
start := time.Now()
needRetry, res, err := sendWebhook(webhook, events, stats)
res = fmt.Sprintf("send_time: %s duration: %d ms %s", time.Now().Format("2006-01-02 15:04:05"), time.Since(start).Milliseconds(), res)
go NotifyRecord(ctx, events, 0, "webhook", webhook.Url, res, err)
go NotifyRecord(ctx, events, "webhook", webhook.Url, res, err)
if !needRetry {
break
}

View File

@@ -1,32 +1,20 @@
package cconf
import (
"time"
"github.com/ccfos/nightingale/v6/pkg/httpx"
)
import "time"
type Center struct {
Plugins []Plugin
MetricsYamlFile string
OpsYamlFile string
BuiltinIntegrationsDir string
I18NHeaderKey string
MetricDesc MetricDescType
AnonymousAccess AnonymousAccess
UseFileAssets bool
FlashDuty FlashDuty
EventHistoryGroupView bool
CleanNotifyRecordDay int
CleanPipelineExecutionDay int
MigrateBusiGroupLabel bool
RSA httpx.RSAConfig
AIAgent AIAgent
}
type AIAgent struct {
Enable bool `toml:"Enable"`
SkillsPath string `toml:"SkillsPath"`
Plugins []Plugin
MetricsYamlFile string
OpsYamlFile string
BuiltinIntegrationsDir string
I18NHeaderKey string
MetricDesc MetricDescType
AnonymousAccess AnonymousAccess
UseFileAssets bool
FlashDuty FlashDuty
EventHistoryGroupView bool
CleanNotifyRecordDay int
MigrateBusiGroupLabel bool
}
type Plugin struct {

View File

@@ -15,25 +15,9 @@ type Operation struct {
}
type Ops struct {
Name string `yaml:"name" json:"name"`
Cname string `yaml:"cname" json:"cname"`
Ops []SingleOp `yaml:"ops" json:"ops"`
}
// SingleOp Name 为 op 名称Cname 为展示名称,默认英文
type SingleOp struct {
Name string `yaml:"name" json:"name"`
Cname string `yaml:"cname" json:"cname"`
}
func TransformNames(name []string, nameToName map[string]string) []string {
var ret []string
for _, n := range name {
if v, has := nameToName[n]; has {
ret = append(ret, v)
}
}
return ret
Name string `yaml:"name" json:"name"`
Cname string `yaml:"cname" json:"cname"`
Ops []string `yaml:"ops" json:"ops"`
}
func LoadOpsYaml(configDir string, opsYamlFile string) error {
@@ -55,8 +39,8 @@ func LoadOpsYaml(configDir string, opsYamlFile string) error {
return file.ReadYaml(fp, &Operations)
}
func GetAllOps(ops []Ops) []SingleOp {
var ret []SingleOp
func GetAllOps(ops []Ops) []string {
var ret []string
for _, op := range ops {
ret = append(ret, op.Ops...)
}
@@ -64,7 +48,7 @@ func GetAllOps(ops []Ops) []SingleOp {
}
func MergeOperationConf() error {
var opsBuiltIn Operation
opsBuiltIn := Operation{}
err := yaml.Unmarshal([]byte(builtInOps), &opsBuiltIn)
if err != nil {
return fmt.Errorf("cannot parse builtInOps: %s", err.Error())
@@ -85,229 +69,138 @@ func MergeOperationConf() error {
const (
builtInOps = `
ops:
- name: Infrastructure
cname: Infrastructure
- name: dashboards
cname: 仪表盘
ops:
- name: /targets
cname: Host - View
- name: /targets/put
cname: Host - Modify
- name: /targets/del
cname: Host - Delete
- name: /targets/bind
cname: Host - Bind Uncategorized
- "/dashboards"
- "/dashboards/add"
- "/dashboards/put"
- "/dashboards/del"
- "/embedded-dashboards/put"
- "/embedded-dashboards"
- "/public-dashboards"
- name: Explorer
cname: Explorer
- name: alert
cname: 告警规则
ops:
- name: /metric/explorer
cname: Metrics Explorer
- name: /object/explorer
cname: Quick View
- name: /metrics-built-in
cname: Built-in Metric - View
- name: /builtin-metrics/add
cname: Built-in Metric - Add
- name: /builtin-metrics/put
cname: Built-in Metric - Modify
- name: /builtin-metrics/del
cname: Built-in Metric - Delete
- name: /recording-rules
cname: Recording Rule - View
- name: /recording-rules/add
cname: Recording Rule - Add
- name: /recording-rules/put
cname: Recording Rule - Modify
- name: /recording-rules/del
cname: Recording Rule - Delete
- name: /log/explorer
cname: Logs Explorer
- name: /log/index-patterns # 前端有个管理索引模式的页面,所以需要一个权限点来控制,后面应该改成侧拉板
cname: Index Pattern - View
- name: /log/index-patterns/add
cname: Index Pattern - Add
- name: /log/index-patterns/put
cname: Index Pattern - Modify
- name: /log/index-patterns/del
cname: Index Pattern - Delete
- name: /dashboards
cname: Dashboard - View
- name: /dashboards/add
cname: Dashboard - Add
- name: /dashboards/put
cname: Dashboard - Modify
- name: /dashboards/del
cname: Dashboard - Delete
- name: /public-dashboards
cname: Dashboard - View Public
- "/alert-rules"
- "/alert-rules/add"
- "/alert-rules/put"
- "/alert-rules/del"
- name: alerting
cname: Alerting
- name: alert-mutes
cname: 告警静默管理
ops:
- name: /alert-rules
cname: Alerting Rule - View
- name: /alert-rules/add
cname: Alerting Rule - Add
- name: /alert-rules/put
cname: Alerting Rule - Modify
- name: /alert-rules/del
cname: Alerting Rule - Delete
- name: /alert-mutes
cname: Mutting Rule - View
- name: /alert-mutes/add
cname: Mutting Rule - Add
- name: /alert-mutes/put
cname: Mutting Rule - Modify
- name: /alert-mutes/del
cname: Mutting Rule - Delete
- name: /alert-subscribes
cname: Subscribing Rule - View
- name: /alert-subscribes/add
cname: Subscribing Rule - Add
- name: /alert-subscribes/put
cname: Subscribing Rule - Modify
- name: /alert-subscribes/del
cname: Subscribing Rule - Delete
- name: /job-tpls
cname: Self-healing-Script - View
- name: /job-tpls/add
cname: Self-healing-Script - Add
- name: /job-tpls/put
cname: Self-healing-Script - Modify
- name: /job-tpls/del
cname: Self-healing-Script - Delete
- name: /job-tasks
cname: Self-healing-Job - View
- name: /job-tasks/add
cname: Self-healing-Job - Add
- name: /job-tasks/put
cname: Self-healing-Job - Modify
- name: /alert-cur-events
cname: Active Event - View
- name: /alert-cur-events/del
cname: Active Event - Delete
- name: /alert-his-events
cname: Historical Event - View
- name: Notification
cname: Notification
- "/alert-mutes"
- "/alert-mutes/add"
- "/alert-mutes/put"
- "/alert-mutes/del"
- name: alert-subscribes
cname: 告警订阅管理
ops:
- name: /notification-rules
cname: Notification Rule - View
- name: /notification-rules/add
cname: Notification Rule - Add
- name: /notification-rules/put
cname: Notification Rule - Modify
- name: /notification-rules/del
cname: Notification Rule - Delete
- name: /notification-channels
cname: Media Type - View
- name: /notification-channels/add
cname: Media Type - Add
- name: /notification-channels/put
cname: Media Type - Modify
- name: /notification-channels/del
cname: Media Type - Delete
- name: /notification-templates
cname: Message Template - View
- name: /notification-templates/add
cname: Message Template - Add
- name: /notification-templates/put
cname: Message Template - Modify
- name: /notification-templates/del
cname: Message Template - Delete
- name: /event-pipelines
cname: Event Pipeline - View
- name: /event-pipelines/add
cname: Event Pipeline - Add
- name: /event-pipelines/put
cname: Event Pipeline - Modify
- name: /event-pipelines/del
cname: Event Pipeline - Delete
- name: /help/notification-settings # 用于控制老版本的通知设置菜单是否展示
cname: Notification Settings - View
- name: /help/notification-tpls # 用于控制老版本的通知模板菜单是否展示
cname: Notification Templates - View
- "/alert-subscribes"
- "/alert-subscribes/add"
- "/alert-subscribes/put"
- "/alert-subscribes/del"
- name: Integrations
cname: Integrations
- name: alert-events
cname: 告警事件管理
ops:
- name: /datasources # 用于控制能否看到数据源列表页面的菜单。只有 Admin 才能修改、删除数据源
cname: Data Source - View
- name: /components
cname: Component - View
- name: /components/add
cname: Component - Add
- name: /components/put
cname: Component - Modify
- name: /components/del
cname: Component - Delete
- name: /embedded-products
cname: Embedded Product - View
- name: /embedded-product/add
cname: Embedded Product - Add
- name: /embedded-product/put
cname: Embedded Product - Modify
- name: /embedded-product/delete
cname: Embedded Product - Delete
- "/alert-cur-events"
- "/alert-cur-events/del"
- "/alert-his-events"
- name: Organization
cname: Organization
- name: recording-rules
cname: 记录规则管理
ops:
- name: /users
cname: User - View
- name: /users/add
cname: User - Add
- name: /users/put
cname: User - Modify
- name: /users/del
cname: User - Delete
- name: /user-groups
cname: Team - View
- name: /user-groups/add
cname: Team - Add
- name: /user-groups/put
cname: Team - Modify
- name: /user-groups/del
cname: Team - Delete
- name: /busi-groups
cname: Business Group - View
- name: /busi-groups/add
cname: Business Group - Add
- name: /busi-groups/put
cname: Business Group - Modify
- name: /busi-groups/del
cname: Business Group - Delete
- name: /roles
cname: Role - View
- name: /roles/add
cname: Role - Add
- name: /roles/put
cname: Role - Modify
- name: /roles/del
cname: Role - Delete
- "/recording-rules"
- "/recording-rules/add"
- "/recording-rules/put"
- "/recording-rules/del"
- name: System Settings
cname: System Settings
- name: metric
cname: 时序指标
ops:
- name: /system/site-settings # 仅用于控制能否展示菜单,只有 Admin 才能修改、删除
cname: View Site Settings
- name: /system/variable-settings
cname: View Variable Settings
- name: /system/sso-settings
cname: View SSO Settings
- name: /system/alerting-engines
cname: View Alerting Engines
- name: /system/version
cname: View Product Version
- name: /ai-config/agents
cname: AI Config - Agents
- name: /ai-config/llm-configs
cname: AI Config - LLM Configs
- name: /ai-config/skills
cname: AI Config - Skills
- name: /ai-config/mcp-servers
cname: AI Config - MCP Servers
- "/metric/explorer"
- "/object/explorer"
- name: log
cname: 日志分析
ops:
- "/log/explorer"
- "/log/index-patterns"
- name: targets
cname: 基础设施
ops:
- "/targets"
- "/targets/add"
- "/targets/put"
- "/targets/del"
- "/targets/bind"
- name: job
cname: 任务管理
ops:
- "/job-tpls"
- "/job-tpls/add"
- "/job-tpls/put"
- "/job-tpls/del"
- "/job-tasks"
- "/job-tasks/add"
- "/job-tasks/put"
- "/ibex-settings"
- name: user
cname: 用户管理
ops:
- "/users"
- "/user-groups"
- "/user-groups/add"
- "/user-groups/put"
- "/user-groups/del"
- name: permissions
cname: 权限管理
ops:
- "/permissions"
- name: busi-groups
cname: 业务分组管理
ops:
- "/busi-groups"
- "/busi-groups/add"
- "/busi-groups/put"
- "/busi-groups/del"
- name: builtin-metrics
cname: 指标视图
ops:
- "/metrics-built-in"
- "/builtin-metrics/add"
- "/builtin-metrics/put"
- "/builtin-metrics/del"
- name: built-in-components
cname: 模版中心
ops:
- "/built-in-components"
- "/built-in-components/add"
- "/built-in-components/put"
- "/built-in-components/del"
- name: system
cname: 系统信息
ops:
- "/help/variable-configs"
- "/help/version"
- "/help/servers"
- "/help/source"
- "/help/sso"
- "/help/notification-tpls"
- "/help/notification-settings"
- "/help/migrate"
- "/site-settings"
`
)

View File

@@ -25,40 +25,4 @@ var Plugins = []Plugin{
Type: "tdengine",
TypeName: "TDengine",
},
{
Id: 5,
Category: "logging",
Type: "ck",
TypeName: "ClickHouse",
},
{
Id: 6,
Category: "timeseries",
Type: "mysql",
TypeName: "MySQL",
},
{
Id: 7,
Category: "timeseries",
Type: "pgsql",
TypeName: "PostgreSQL",
},
{
Id: 8,
Category: "logging",
Type: "doris",
TypeName: "Doris",
},
{
Id: 9,
Category: "logging",
Type: "opensearch",
TypeName: "OpenSearch",
},
{
Id: 10,
Category: "logging",
Type: "victorialogs",
TypeName: "VictoriaLogs",
},
}

View File

@@ -2,13 +2,8 @@ package center
import (
"context"
"encoding/json"
"fmt"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/toolkits/pkg/logger"
"github.com/ccfos/nightingale/v6/alert"
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/alert/dispatch"
@@ -16,6 +11,7 @@ import (
alertrt "github.com/ccfos/nightingale/v6/alert/router"
"github.com/ccfos/nightingale/v6/center/cconf"
"github.com/ccfos/nightingale/v6/center/cconf/rsa"
"github.com/ccfos/nightingale/v6/center/cstats"
"github.com/ccfos/nightingale/v6/center/integration"
"github.com/ccfos/nightingale/v6/center/metas"
centerrt "github.com/ccfos/nightingale/v6/center/router"
@@ -31,13 +27,14 @@ import (
"github.com/ccfos/nightingale/v6/pkg/httpx"
"github.com/ccfos/nightingale/v6/pkg/i18nx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/pkg/macros"
"github.com/ccfos/nightingale/v6/pkg/version"
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/idents"
pushgwrt "github.com/ccfos/nightingale/v6/pushgw/router"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/ccfos/nightingale/v6/storage"
"github.com/ccfos/nightingale/v6/tdengine"
"github.com/flashcatcloud/ibex/src/cmd/ibex"
)
@@ -62,6 +59,7 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
}
i18nx.Init(configDir)
cstats.Init()
flashduty.Init(config.Center.FlashDuty)
db, err := storage.New(config.DB)
@@ -87,21 +85,11 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
}
metas := metas.New(redis)
idents := idents.New(ctx, redis, config.Pushgw)
idents := idents.New(ctx, redis)
syncStats := memsto.NewSyncStats()
alertStats := astats.NewSyncStats()
if config.Center.MigrateBusiGroupLabel || models.CanMigrateBg(ctx) {
models.MigrateBg(ctx, config.Pushgw.BusiGroupLabelKey)
}
if models.CanMigrateEP(ctx) {
models.MigrateEP(ctx)
}
// 初始化 siteUrl如果为空则设置默认值
InitSiteUrl(ctx, config.Alert.Heartbeat.IP, config.HTTP.Port)
configCache := memsto.NewConfigCache(ctx, syncStats, config.HTTP.RSA.RSAPrivateKey, config.HTTP.RSA.RSAPassWord)
busiGroupCache := memsto.NewBusiGroupCache(ctx, syncStats)
targetCache := memsto.NewTargetCache(ctx, syncStats, redis)
@@ -113,35 +101,35 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
userGroupCache := memsto.NewUserGroupCache(ctx, syncStats)
taskTplCache := memsto.NewTaskTplCache(ctx)
configCvalCache := memsto.NewCvalCache(ctx, syncStats)
notifyRuleCache := memsto.NewNotifyRuleCache(ctx, syncStats)
notifyChannelCache := memsto.NewNotifyChannelCache(ctx, syncStats)
messageTemplateCache := memsto.NewMessageTemplateCache(ctx, syncStats)
userTokenCache := memsto.NewUserTokenCache(ctx, syncStats)
sso := sso.Init(config.Center, ctx, configCache)
promClients := prom.NewPromClient(ctx)
dispatch.InitRegisterQueryFunc(promClients)
externalProcessors := process.NewExternalProcessors()
tdengineClients := tdengine.NewTdengineClient(ctx, config.Alert.Heartbeat)
macros.RegisterMacro(macros.MacroInVain)
dscache.Init(ctx, false)
alert.Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, taskTplCache, dsCache, ctx, promClients, userCache, userGroupCache, notifyRuleCache, notifyChannelCache, messageTemplateCache, configCvalCache)
externalProcessors := process.NewExternalProcessors()
alert.Start(config.Alert, config.Pushgw, syncStats, alertStats, externalProcessors, targetCache, busiGroupCache, alertMuteCache, alertRuleCache, notifyConfigCache, taskTplCache, dsCache, ctx, promClients, tdengineClients, userCache, userGroupCache)
writers := writer.NewWriters(config.Pushgw)
go version.GetGithubVersion()
go cron.CleanNotifyRecord(ctx, config.Center.CleanNotifyRecordDay)
go cron.CleanPipelineExecution(ctx, config.Center.CleanPipelineExecutionDay)
alertrtRouter := alertrt.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors, config.Log.Dir)
alertrtRouter := alertrt.New(config.HTTP, config.Alert, alertMuteCache, targetCache, busiGroupCache, alertStats, ctx, externalProcessors)
centerRouter := centerrt.New(config.HTTP, config.Center, config.Alert, config.Ibex,
cconf.Operations, dsCache, notifyConfigCache, promClients,
redis, sso, ctx, metas, idents, targetCache, userCache, userGroupCache, userTokenCache, config.Log.Dir)
cconf.Operations, dsCache, notifyConfigCache, promClients, tdengineClients,
redis, sso, ctx, metas, idents, targetCache, userCache, userGroupCache)
pushgwRouter := pushgwrt.New(config.HTTP, config.Pushgw, config.Alert, targetCache, busiGroupCache, idents, metas, writers, ctx)
go func() {
if config.Center.MigrateBusiGroupLabel || models.CanMigrateBg(ctx) {
models.MigrateBg(ctx, pushgwRouter.Pushgw.BusiGroupLabelKey)
}
}()
r := httpx.GinEngine(config.Global.RunMode, config.HTTP, configCvalCache.PrintBodyPaths, configCvalCache.PrintAccessLog)
centerRouter.Config(r)
@@ -166,67 +154,3 @@ func Initialize(configDir string, cryptoKey string) (func(), error) {
httpClean()
}, nil
}
// initSiteUrl 初始化 site_info 中的 site_url如果为空则使用服务器IP和端口设置默认值
func InitSiteUrl(ctx *ctx.Context, serverIP string, serverPort int) {
// 构造默认的 SiteUrl
defaultSiteUrl := fmt.Sprintf("http://%s:%d", serverIP, serverPort)
// 获取现有的 site_info 配置
siteInfoStr, err := models.ConfigsGet(ctx, "site_info")
if err != nil {
logger.Errorf("failed to get site_info config: %v", err)
return
}
// 如果 site_info 不存在,创建新的
if siteInfoStr == "" {
newSiteInfo := memsto.SiteInfo{
SiteUrl: defaultSiteUrl,
}
siteInfoBytes, err := json.Marshal(newSiteInfo)
if err != nil {
logger.Errorf("failed to marshal site_info: %v", err)
return
}
err = models.ConfigsSet(ctx, "site_info", string(siteInfoBytes))
if err != nil {
logger.Errorf("failed to set site_info: %v", err)
return
}
logger.Infof("initialized site_url with default value: %s", defaultSiteUrl)
return
}
// 检查现有的 site_info 中的 site_url 字段
var existingSiteInfo memsto.SiteInfo
err = json.Unmarshal([]byte(siteInfoStr), &existingSiteInfo)
if err != nil {
logger.Errorf("failed to unmarshal site_info: %v", err)
return
}
// 如果 site_url 已经有值,则不需要初始化
if existingSiteInfo.SiteUrl != "" {
return
}
// 设置 site_url
existingSiteInfo.SiteUrl = defaultSiteUrl
siteInfoBytes, err := json.Marshal(existingSiteInfo)
if err != nil {
logger.Errorf("failed to marshal updated site_info: %v", err)
return
}
err = models.ConfigsSet(ctx, "site_info", string(siteInfoBytes))
if err != nil {
logger.Errorf("failed to update site_info: %v", err)
return
}
logger.Infof("initialized site_url with default value: %s", defaultSiteUrl)
}

View File

@@ -6,49 +6,40 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
namespace = "n9e"
subsystem = "center"
)
const Service = "n9e-center"
var (
uptime = prometheus.NewCounter(
labels = []string{"service", "code", "path", "method"}
uptime = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "uptime",
Help: "HTTP service uptime.",
},
Name: "uptime",
Help: "HTTP service uptime.",
}, []string{"service"},
)
RequestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_request_count_total",
Help: "Total number of HTTP requests made.",
}, labels,
)
RequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Buckets: prometheus.DefBuckets,
Name: "http_request_duration_seconds",
Help: "HTTP request latencies in seconds.",
}, []string{"code", "path", "method"},
)
RedisOperationLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "redis_operation_latency_seconds",
Help: "Histogram of latencies for Redis operations",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5},
},
[]string{"operation", "status"},
Buckets: []float64{.01, .1, 1, 10},
Name: "http_request_duration_seconds",
Help: "HTTP request latencies in seconds.",
}, labels,
)
)
func init() {
func Init() {
// Register the summary and the histogram with Prometheus's default registry.
prometheus.MustRegister(
uptime,
RequestCounter,
RequestDuration,
RedisOperationLatency,
)
go recordUptime()
@@ -57,6 +48,6 @@ func init() {
// recordUptime increases service uptime per second.
func recordUptime() {
for range time.Tick(time.Second) {
uptime.Inc()
uptime.WithLabelValues(Service).Inc()
}
}

View File

@@ -3,15 +3,11 @@ package integration
import (
"encoding/json"
"path"
"sort"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/pkg/errors"
"github.com/toolkits/pkg/container/set"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/runner"
@@ -19,32 +15,13 @@ import (
const SYSTEM = "system"
var BuiltinPayloadInFile *BuiltinPayloadInFileType
type BuiltinPayloadInFileType struct {
Data map[uint64]map[string]map[string][]*models.BuiltinPayload // map[component_id]map[type]map[cate][]*models.BuiltinPayload
IndexData map[int64]*models.BuiltinPayload // map[uuid]payload
BuiltinMetrics map[string]*models.BuiltinMetric
}
func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
BuiltinPayloadInFile = NewBuiltinPayloadInFileType()
err := models.InitBuiltinPayloads(ctx)
if err != nil {
logger.Warning("init old builtinPayloads fail ", err)
return
}
if res, err := models.ConfigsSelectByCkey(ctx, "disable_integration_init"); err != nil {
logger.Error("fail to get value 'disable_integration_init' from configs", err)
return
} else if len(res) != 0 {
logger.Info("disable_integration_init is set, skip integration init")
return
}
fp := builtinIntegrationsDir
if fp == "" {
fp = path.Join(runner.Cwd, "integrations")
@@ -124,13 +101,13 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
component.ID = old.ID
}
// delete uuid is empty
// delete uuid is emtpy
err = models.DB(ctx).Exec("delete from builtin_payloads where uuid = 0 and type != 'collect' and (updated_by = 'system' or updated_by = '')").Error
if err != nil {
logger.Warning("delete builtin payloads fail ", err)
}
// delete builtin metrics uuid is empty
// delete builtin metrics uuid is emtpy
err = models.DB(ctx).Exec("delete from builtin_metrics where uuid = 0 and (updated_by = 'system' or updated_by = '')").Error
if err != nil {
logger.Warning("delete builtin metrics fail ", err)
@@ -161,10 +138,11 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
}
newAlerts := []models.AlertRule{}
writeAlertFileFlag := false
for _, alert := range alerts {
if alert.UUID == 0 {
time.Sleep(time.Microsecond)
alert.UUID = time.Now().UnixMicro()
writeAlertFileFlag = true
alert.UUID = time.Now().UnixNano()
}
newAlerts = append(newAlerts, alert)
@@ -183,13 +161,47 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
Tags: alert.AppendTags,
Content: string(content),
UUID: alert.UUID,
ID: alert.UUID,
CreatedBy: SYSTEM,
UpdatedBy: SYSTEM,
}
BuiltinPayloadInFile.AddBuiltinPayload(&builtinAlert)
old, err := models.BuiltinPayloadGet(ctx, "uuid = ?", alert.UUID)
if err != nil {
logger.Warning("get builtin alert fail ", builtinAlert, err)
continue
}
if old == nil {
err := builtinAlert.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin alert fail ", builtinAlert, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.ComponentID = component.ID
old.Content = string(content)
old.Name = alert.Name
old.Tags = alert.AppendTags
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin alert:%+v fail %v", builtinAlert, err)
}
}
}
if writeAlertFileFlag {
bs, err = json.MarshalIndent(newAlerts, "", " ")
if err != nil {
logger.Warning("marshal builtin alerts fail ", newAlerts, err)
continue
}
_, err = file.WriteBytes(fp, bs)
if err != nil {
logger.Warning("write builtin alerts file fail ", f, err)
}
}
}
}
@@ -239,14 +251,34 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
Cate: "",
Name: dashboard.Name,
Tags: dashboard.Tags,
Note: dashboard.Note,
Content: string(content),
UUID: dashboard.UUID,
ID: dashboard.UUID,
CreatedBy: SYSTEM,
UpdatedBy: SYSTEM,
}
BuiltinPayloadInFile.AddBuiltinPayload(&builtinDashboard)
old, err := models.BuiltinPayloadGet(ctx, "uuid = ?", dashboard.UUID)
if err != nil {
logger.Warning("get builtin alert fail ", builtinDashboard, err)
continue
}
if old == nil {
err := builtinDashboard.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin alert fail ", builtinDashboard, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.ComponentID = component.ID
old.Content = string(content)
old.Name = dashboard.Name
old.Tags = dashboard.Tags
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin alert:%+v fail %v", builtinDashboard, err)
}
}
}
} else if err != nil {
logger.Warningf("read builtin component dash dir fail %s %v", component.Ident, err)
@@ -264,21 +296,64 @@ func Init(ctx *ctx.Context, builtinIntegrationsDir string) {
}
metrics := []models.BuiltinMetric{}
newMetrics := []models.BuiltinMetric{}
err = json.Unmarshal(bs, &metrics)
if err != nil {
logger.Warning("parse builtin component metrics file fail", f, err)
continue
}
writeMetricFileFlag := false
for _, metric := range metrics {
time.Sleep(time.Microsecond)
metric.UUID = time.Now().UnixMicro()
metric.ID = metric.UUID
metric.CreatedBy = SYSTEM
metric.UpdatedBy = SYSTEM
if metric.UUID == 0 {
writeMetricFileFlag = true
metric.UUID = time.Now().UnixNano()
}
newMetrics = append(newMetrics, metric)
BuiltinPayloadInFile.BuiltinMetrics[metric.Expression] = &metric
old, err := models.BuiltinMetricGet(ctx, "uuid = ?", metric.UUID)
if err != nil {
logger.Warning("get builtin metrics fail ", metric, err)
continue
}
if old == nil {
err := metric.Add(ctx, SYSTEM)
if err != nil {
logger.Warning("add builtin metrics fail ", metric, err)
}
continue
}
if old.UpdatedBy == SYSTEM {
old.Collector = metric.Collector
old.Typ = metric.Typ
old.Name = metric.Name
old.Unit = metric.Unit
old.Note = metric.Note
old.Lang = metric.Lang
old.Expression = metric.Expression
err = models.DB(ctx).Model(old).Select("*").Updates(old).Error
if err != nil {
logger.Warningf("update builtin metric:%+v fail %v", metric, err)
}
}
}
if writeMetricFileFlag {
bs, err = json.MarshalIndent(newMetrics, "", " ")
if err != nil {
logger.Warning("marshal builtin metrics fail ", newMetrics, err)
continue
}
_, err = file.WriteBytes(fp, bs)
if err != nil {
logger.Warning("write builtin metrics file fail ", f, err)
}
}
}
} else if err != nil {
logger.Warningf("read builtin component metrics dir fail %s %v", component.Ident, err)
@@ -292,7 +367,6 @@ type BuiltinBoard struct {
Name string `json:"name"`
Ident string `json:"ident"`
Tags string `json:"tags"`
Note string `json:"note"`
CreateAt int64 `json:"create_at"`
CreateBy string `json:"create_by"`
UpdateAt int64 `json:"update_at"`
@@ -305,346 +379,3 @@ type BuiltinBoard struct {
Hide int `json:"hide"` // 0: false, 1: true
UUID int64 `json:"uuid"`
}
func NewBuiltinPayloadInFileType() *BuiltinPayloadInFileType {
return &BuiltinPayloadInFileType{
Data: make(map[uint64]map[string]map[string][]*models.BuiltinPayload),
IndexData: make(map[int64]*models.BuiltinPayload),
BuiltinMetrics: make(map[string]*models.BuiltinMetric),
}
}
func (b *BuiltinPayloadInFileType) AddBuiltinPayload(bp *models.BuiltinPayload) {
if _, exists := b.Data[bp.ComponentID]; !exists {
b.Data[bp.ComponentID] = make(map[string]map[string][]*models.BuiltinPayload)
}
bpInType := b.Data[bp.ComponentID]
if _, exists := bpInType[bp.Type]; !exists {
bpInType[bp.Type] = make(map[string][]*models.BuiltinPayload)
}
bpInCate := bpInType[bp.Type]
if _, exists := bpInCate[bp.Cate]; !exists {
bpInCate[bp.Cate] = make([]*models.BuiltinPayload, 0)
}
bpInCate[bp.Cate] = append(bpInCate[bp.Cate], bp)
b.IndexData[bp.UUID] = bp
}
func (b *BuiltinPayloadInFileType) GetComponentIdentByCate(typ, cate string) string {
for _, source := range b.Data {
if source == nil {
continue
}
typeMap, exists := source[typ]
if !exists {
continue
}
payloads, exists := typeMap[cate]
if !exists {
continue
}
if len(payloads) > 0 {
return payloads[0].Component
}
}
return ""
}
func (b *BuiltinPayloadInFileType) GetBuiltinPayload(typ, cate, query string, componentId uint64) ([]*models.BuiltinPayload, error) {
var result []*models.BuiltinPayload
source := b.Data[componentId]
if source == nil {
return nil, nil
}
typeMap, exists := source[typ]
if !exists {
return nil, nil
}
if cate != "" {
payloads, exists := typeMap[cate]
if !exists {
return nil, nil
}
result = append(result, filterByQuery(payloads, query)...)
} else {
for _, payloads := range typeMap {
result = append(result, filterByQuery(payloads, query)...)
}
}
if len(result) > 0 {
sort.Slice(result, func(i, j int) bool {
return result[i].Name < result[j].Name
})
}
return result, nil
}
func (b *BuiltinPayloadInFileType) GetBuiltinPayloadCates(typ string, componentId uint64) ([]string, error) {
var result []string
source := b.Data[componentId]
if source == nil {
return result, nil
}
typeData := source[typ]
if typeData == nil {
return result, nil
}
for cate := range typeData {
result = append(result, cate)
}
sort.Strings(result)
return result, nil
}
func filterByQuery(payloads []*models.BuiltinPayload, query string) []*models.BuiltinPayload {
if query == "" {
return payloads
}
queryLower := strings.ToLower(query)
var filtered []*models.BuiltinPayload
for _, p := range payloads {
if strings.Contains(strings.ToLower(p.Name), queryLower) || strings.Contains(strings.ToLower(p.Tags), queryLower) {
filtered = append(filtered, p)
}
}
return filtered
}
func (b *BuiltinPayloadInFileType) BuiltinMetricGets(metricsInDB []*models.BuiltinMetric, lang, collector, typ, query, unit string, limit, offset int) ([]*models.BuiltinMetric, int, error) {
var filteredMetrics []*models.BuiltinMetric
expressionSet := set.NewStringSet()
builtinMetricsByDB := convertBuiltinMetricByDB(metricsInDB)
builtinMetricsMap := make(map[string]*models.BuiltinMetric)
for expression, metric := range builtinMetricsByDB {
builtinMetricsMap[expression] = metric
}
for expression, metric := range b.BuiltinMetrics {
builtinMetricsMap[expression] = metric
}
for _, metric := range builtinMetricsMap {
if !applyFilter(metric, collector, typ, query, unit) {
continue
}
// Skip if expression is already in db cache
// NOTE: 忽略重复的expression特别的在旧版本中用户可能已经创建了重复的metrics需要覆盖掉ByFile中相同的Metrics
// NOTE: Ignore duplicate expressions, especially in the old version, users may have created duplicate metrics,
if expressionSet.Exists(metric.Expression) {
continue
}
// Add db expression in set.
expressionSet.Add(metric.Expression)
// Apply language
trans, err := getTranslationWithLanguage(metric, lang)
if err != nil {
logger.Errorf("Error getting translation for metric %s: %v", metric.Name, err)
continue // Skip if translation not found
}
metric.Name = trans.Name
metric.Note = trans.Note
filteredMetrics = append(filteredMetrics, metric)
}
// Sort metrics
sort.Slice(filteredMetrics, func(i, j int) bool {
if filteredMetrics[i].Collector != filteredMetrics[j].Collector {
return filteredMetrics[i].Collector < filteredMetrics[j].Collector
}
if filteredMetrics[i].Typ != filteredMetrics[j].Typ {
return filteredMetrics[i].Typ < filteredMetrics[j].Typ
}
return filteredMetrics[i].Expression < filteredMetrics[j].Expression
})
totalCount := len(filteredMetrics)
// Validate parameters
if offset < 0 {
offset = 0
}
if limit < 0 {
limit = 0
}
// Handle edge cases
if offset >= totalCount || limit == 0 {
return []*models.BuiltinMetric{}, totalCount, nil
}
// Apply pagination
end := offset + limit
if end > totalCount {
end = totalCount
}
return filteredMetrics[offset:end], totalCount, nil
}
func (b *BuiltinPayloadInFileType) BuiltinMetricTypes(lang, collector, query string) []string {
typeSet := set.NewStringSet()
for _, metric := range b.BuiltinMetrics {
if !applyFilter(metric, collector, "", query, "") {
continue
}
typeSet.Add(metric.Typ)
}
return typeSet.ToSlice()
}
func (b *BuiltinPayloadInFileType) BuiltinMetricCollectors(lang, typ, query string) []string {
collectorSet := set.NewStringSet()
for _, metric := range b.BuiltinMetrics {
if !applyFilter(metric, "", typ, query, "") {
continue
}
collectorSet.Add(metric.Collector)
}
return collectorSet.ToSlice()
}
func applyFilter(metric *models.BuiltinMetric, collector, typ, query, unit string) bool {
if collector != "" && collector != metric.Collector {
return false
}
if typ != "" && typ != metric.Typ {
return false
}
if unit != "" && !containsUnit(unit, metric.Unit) {
return false
}
if query != "" && !applyQueryFilter(metric, query) {
return false
}
return true
}
func containsUnit(unit, metricUnit string) bool {
us := strings.Split(unit, ",")
for _, u := range us {
if u == metricUnit {
return true
}
}
return false
}
func applyQueryFilter(metric *models.BuiltinMetric, query string) bool {
qs := strings.Split(query, " ")
for _, q := range qs {
if strings.HasPrefix(q, "-") {
q = strings.TrimPrefix(q, "-")
if strings.Contains(metric.Name, q) || strings.Contains(metric.Note, q) || strings.Contains(metric.Expression, q) {
return false
}
} else {
if !strings.Contains(metric.Name, q) && !strings.Contains(metric.Note, q) && !strings.Contains(metric.Expression, q) {
return false
}
}
}
return true
}
func getTranslationWithLanguage(bm *models.BuiltinMetric, lang string) (*models.Translation, error) {
var defaultTranslation *models.Translation
for _, t := range bm.Translation {
if t.Lang == lang {
return &t, nil
}
if t.Lang == "en_US" {
defaultTranslation = &t
}
}
if defaultTranslation != nil {
return defaultTranslation, nil
}
return nil, errors.Errorf("translation not found for metric %s", bm.Name)
}
func convertBuiltinMetricByDB(metricsInDB []*models.BuiltinMetric) map[string]*models.BuiltinMetric {
builtinMetricsByDB := make(map[string]*models.BuiltinMetric)
builtinMetricsByDBList := make(map[string][]*models.BuiltinMetric)
for _, metric := range metricsInDB {
builtinMetrics, ok := builtinMetricsByDBList[metric.Expression]
if !ok {
builtinMetrics = []*models.BuiltinMetric{}
}
builtinMetrics = append(builtinMetrics, metric)
builtinMetricsByDBList[metric.Expression] = builtinMetrics
}
for expression, builtinMetrics := range builtinMetricsByDBList {
if len(builtinMetrics) == 0 {
continue
}
// NOTE: 为兼容旧版本用户已经创建的 metrics同时将修改 metrics 收敛到同一个记录上,
// 我们选择使用 expression 相同但是 id 最小的 metric 记录作为主要的 Metric。
sort.Slice(builtinMetrics, func(i, j int) bool {
return builtinMetrics[i].ID < builtinMetrics[j].ID
})
currentBuiltinMetric := builtinMetrics[0]
// User has no customized translation, so we can merge it
if len(currentBuiltinMetric.Translation) == 0 {
translationMap := make(map[string]models.Translation)
for _, bm := range builtinMetrics {
for _, t := range getDefaultTranslation(bm) {
translationMap[t.Lang] = t
}
}
currentBuiltinMetric.Translation = make([]models.Translation, 0, len(translationMap))
for _, t := range translationMap {
currentBuiltinMetric.Translation = append(currentBuiltinMetric.Translation, t)
}
}
builtinMetricsByDB[expression] = currentBuiltinMetric
}
return builtinMetricsByDB
}
func getDefaultTranslation(bm *models.BuiltinMetric) []models.Translation {
if len(bm.Translation) != 0 {
return bm.Translation
}
return []models.Translation{{
Lang: bm.Lang,
Name: bm.Name,
Note: bm.Note,
}}
}

View File

@@ -6,7 +6,6 @@ import (
"sync"
"time"
"github.com/ccfos/nightingale/v6/center/cstats"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/storage"
@@ -116,23 +115,15 @@ func (s *Set) updateTargets(m map[string]models.HostMeta) error {
}
newMap[models.WrapIdent(ident)] = meta
}
start := time.Now()
err := storage.MSet(context.Background(), s.redis, newMap, 7*24*time.Hour)
err := storage.MSet(context.Background(), s.redis, newMap)
if err != nil {
cstats.RedisOperationLatency.WithLabelValues("mset_target_meta", "fail").Observe(time.Since(start).Seconds())
return err
} else {
cstats.RedisOperationLatency.WithLabelValues("mset_target_meta", "success").Observe(time.Since(start).Seconds())
}
if len(extendMap) > 0 {
err = storage.MSet(context.Background(), s.redis, extendMap, 7*24*time.Hour)
err = storage.MSet(context.Background(), s.redis, extendMap)
if err != nil {
cstats.RedisOperationLatency.WithLabelValues("mset_target_extend", "fail").Observe(time.Since(start).Seconds())
return err
} else {
cstats.RedisOperationLatency.WithLabelValues("mset_target_extend", "success").Observe(time.Since(start).Seconds())
}
}

View File

@@ -24,11 +24,11 @@ import (
"github.com/ccfos/nightingale/v6/prom"
"github.com/ccfos/nightingale/v6/pushgw/idents"
"github.com/ccfos/nightingale/v6/storage"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"gorm.io/gorm"
"github.com/ccfos/nightingale/v6/tdengine"
"github.com/gin-gonic/gin"
"github.com/rakyll/statik/fs"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/runner"
)
@@ -42,6 +42,7 @@ type Router struct {
DatasourceCache *memsto.DatasourceCacheType
NotifyConfigCache *memsto.NotifyConfigCacheType
PromClients *prom.PromClientMap
TdendgineClients *tdengine.TdengineClientMap
Redis storage.Redis
MetaSet *metas.Set
IdentSet *idents.Set
@@ -49,45 +50,43 @@ type Router struct {
Sso *sso.SsoClient
UserCache *memsto.UserCacheType
UserGroupCache *memsto.UserGroupCacheType
UserTokenCache *memsto.UserTokenCacheType
Ctx *ctx.Context
LogDir string
HeartbeatHook HeartbeatHookFunc
TargetDeleteHook models.TargetDeleteHookFunc
AlertRuleModifyHook AlertRuleModifyHookFunc
HeartbeatHook HeartbeatHookFunc
TargetDeleteHook models.TargetDeleteHookFunc
}
func New(httpConfig httpx.Config, center cconf.Center, alert aconf.Alert, ibex conf.Ibex,
operations cconf.Operation, ds *memsto.DatasourceCacheType, ncc *memsto.NotifyConfigCacheType,
pc *prom.PromClientMap, redis storage.Redis,
pc *prom.PromClientMap, tdendgineClients *tdengine.TdengineClientMap, redis storage.Redis,
sso *sso.SsoClient, ctx *ctx.Context, metaSet *metas.Set, idents *idents.Set,
tc *memsto.TargetCacheType, uc *memsto.UserCacheType, ugc *memsto.UserGroupCacheType, utc *memsto.UserTokenCacheType, logDir string) *Router {
tc *memsto.TargetCacheType, uc *memsto.UserCacheType, ugc *memsto.UserGroupCacheType) *Router {
return &Router{
HTTP: httpConfig,
Center: center,
Alert: alert,
Ibex: ibex,
Operations: operations,
DatasourceCache: ds,
NotifyConfigCache: ncc,
PromClients: pc,
Redis: redis,
MetaSet: metaSet,
IdentSet: idents,
TargetCache: tc,
Sso: sso,
UserCache: uc,
UserGroupCache: ugc,
UserTokenCache: utc,
Ctx: ctx,
LogDir: logDir,
HeartbeatHook: func(ident string) map[string]interface{} { return nil },
TargetDeleteHook: func(tx *gorm.DB, idents []string) error { return nil },
AlertRuleModifyHook: func(ar *models.AlertRule) {},
HTTP: httpConfig,
Center: center,
Alert: alert,
Ibex: ibex,
Operations: operations,
DatasourceCache: ds,
NotifyConfigCache: ncc,
PromClients: pc,
TdendgineClients: tdendgineClients,
Redis: redis,
MetaSet: metaSet,
IdentSet: idents,
TargetCache: tc,
Sso: sso,
UserCache: uc,
UserGroupCache: ugc,
Ctx: ctx,
HeartbeatHook: func(ident string) map[string]interface{} { return nil },
TargetDeleteHook: emptyDeleteHook,
}
}
func emptyDeleteHook(ctx *ctx.Context, idents []string) error {
return nil
}
func stat() gin.HandlerFunc {
return func(c *gin.Context) {
start := time.Now()
@@ -95,9 +94,10 @@ func stat() gin.HandlerFunc {
code := fmt.Sprintf("%d", c.Writer.Status())
method := c.Request.Method
labels := []string{code, c.FullPath(), method}
labels := []string{cstats.Service, code, c.FullPath(), method}
cstats.RequestDuration.WithLabelValues(labels...).Observe(time.Since(start).Seconds())
cstats.RequestCounter.WithLabelValues(labels...).Inc()
cstats.RequestDuration.WithLabelValues(labels...).Observe(float64(time.Since(start).Seconds()))
}
}
@@ -179,7 +179,6 @@ func (rt *Router) Config(r *gin.Engine) {
pages := r.Group(pagesPrefix)
{
pages.DELETE("/datasource/series", rt.auth(), rt.admin(), rt.deleteDatasourceSeries)
if rt.Center.AnonymousAccess.PromQuerier {
pages.Any("/proxy/:id/*url", rt.dsProxy)
pages.POST("/query-range-batch", rt.promBatchQueryRange)
@@ -188,24 +187,12 @@ func (rt *Router) Config(r *gin.Engine) {
pages.POST("/datasource/query", rt.datasourceQuery)
pages.POST("/ds-query", rt.QueryData)
pages.POST("/logs-query", rt.QueryLogV2)
pages.POST("/logs-query", rt.QueryLog)
pages.POST("/tdengine-databases", rt.tdengineDatabases)
pages.POST("/tdengine-tables", rt.tdengineTables)
pages.POST("/tdengine-columns", rt.tdengineColumns)
pages.POST("/log-query-batch", rt.QueryLogBatch)
// 数据库元数据接口
pages.POST("/db-databases", rt.ShowDatabases)
pages.POST("/db-tables", rt.ShowTables)
pages.POST("/db-desc-table", rt.DescribeTable)
// es 专用接口
pages.POST("/indices", rt.auth(), rt.user(), rt.QueryIndices)
pages.POST("/es-variable", rt.auth(), rt.user(), rt.QueryESVariable)
pages.POST("/fields", rt.auth(), rt.user(), rt.QueryFields)
pages.POST("/log-query", rt.auth(), rt.user(), rt.QueryLog)
} else {
pages.Any("/proxy/:id/*url", rt.auth(), rt.dsProxy)
pages.POST("/query-range-batch", rt.auth(), rt.promBatchQueryRange)
@@ -213,32 +200,14 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/datasource/brief", rt.auth(), rt.user(), rt.datasourceBriefs)
pages.POST("/datasource/query", rt.auth(), rt.user(), rt.datasourceQuery)
pages.POST("/ds-query", rt.auth(), rt.user(), rt.QueryData)
pages.POST("/logs-query", rt.auth(), rt.user(), rt.QueryLogV2)
pages.POST("/ds-query", rt.auth(), rt.QueryData)
pages.POST("/logs-query", rt.auth(), rt.QueryLog)
pages.POST("/tdengine-databases", rt.auth(), rt.tdengineDatabases)
pages.POST("/tdengine-tables", rt.auth(), rt.tdengineTables)
pages.POST("/tdengine-columns", rt.auth(), rt.tdengineColumns)
pages.POST("/log-query-batch", rt.auth(), rt.user(), rt.QueryLogBatch)
// 数据库元数据接口
pages.POST("/db-databases", rt.auth(), rt.user(), rt.ShowDatabases)
pages.POST("/db-tables", rt.auth(), rt.user(), rt.ShowTables)
pages.POST("/db-desc-table", rt.auth(), rt.user(), rt.DescribeTable)
// es 专用接口
pages.POST("/indices", rt.auth(), rt.user(), rt.QueryIndices)
pages.POST("/es-variable", rt.QueryESVariable)
pages.POST("/fields", rt.QueryFields)
pages.POST("/log-query", rt.QueryLog)
}
// OpenSearch 专用接口
pages.POST("/os-indices", rt.QueryOSIndices)
pages.POST("/os-variable", rt.QueryOSVariable)
pages.POST("/os-fields", rt.QueryOSFields)
pages.GET("/sql-template", rt.QuerySqlTemplate)
pages.POST("/auth/login", rt.jwtMock(), rt.loginPost)
pages.POST("/auth/logout", rt.jwtMock(), rt.auth(), rt.user(), rt.logoutPost)
@@ -252,13 +221,9 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/auth/redirect", rt.loginRedirect)
pages.GET("/auth/redirect/cas", rt.loginRedirectCas)
pages.GET("/auth/redirect/oauth", rt.loginRedirectOAuth)
pages.GET("/auth/redirect/dingtalk", rt.loginRedirectDingTalk)
pages.GET("/auth/redirect/feishu", rt.loginRedirectFeiShu)
pages.GET("/auth/callback", rt.loginCallback)
pages.GET("/auth/callback/cas", rt.loginCallbackCas)
pages.GET("/auth/callback/oauth", rt.loginCallbackOAuth)
pages.GET("/auth/callback/dingtalk", rt.loginCallbackDingTalk)
pages.GET("/auth/callback/feishu", rt.loginCallbackFeiShu)
pages.GET("/auth/perms", rt.allPerms)
pages.GET("/metrics/desc", rt.metricsDescGetFile)
@@ -266,22 +231,18 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/notify-channels", rt.notifyChannelsGets)
pages.GET("/contact-keys", rt.contactKeysGets)
pages.GET("/install-date", rt.installDateGet)
pages.GET("/self/perms", rt.auth(), rt.user(), rt.permsGets)
pages.GET("/self/profile", rt.auth(), rt.user(), rt.selfProfileGet)
pages.PUT("/self/profile", rt.auth(), rt.user(), rt.selfProfilePut)
pages.PUT("/self/password", rt.auth(), rt.user(), rt.selfPasswordPut)
pages.GET("/self/token", rt.auth(), rt.user(), rt.getToken)
pages.POST("/self/token", rt.auth(), rt.user(), rt.addToken)
pages.DELETE("/self/token/:id", rt.auth(), rt.user(), rt.deleteToken)
pages.GET("/users", rt.auth(), rt.user(), rt.perm("/users"), rt.userGets)
pages.POST("/users", rt.auth(), rt.user(), rt.perm("/users/add"), rt.userAddPost)
pages.POST("/users", rt.auth(), rt.admin(), rt.userAddPost)
pages.GET("/user/:id/profile", rt.auth(), rt.userProfileGet)
pages.PUT("/user/:id/profile", rt.auth(), rt.user(), rt.perm("/users/put"), rt.userProfilePut)
pages.PUT("/user/:id/password", rt.auth(), rt.user(), rt.perm("/users/put"), rt.userPasswordPut)
pages.DELETE("/user/:id", rt.auth(), rt.user(), rt.perm("/users/del"), rt.userDel)
pages.PUT("/user/:id/profile", rt.auth(), rt.admin(), rt.userProfilePut)
pages.PUT("/user/:id/password", rt.auth(), rt.admin(), rt.userPasswordPut)
pages.DELETE("/user/:id", rt.auth(), rt.admin(), rt.userDel)
pages.GET("/metric-views", rt.auth(), rt.metricViewGets)
pages.DELETE("/metric-views", rt.auth(), rt.user(), rt.metricViewDel)
@@ -322,7 +283,6 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/busi-groups/tags", rt.auth(), rt.user(), rt.busiGroupsGetTags)
pages.GET("/targets", rt.auth(), rt.user(), rt.targetGets)
pages.POST("/target-update", rt.auth(), rt.targetUpdate)
pages.GET("/target/extra-meta", rt.auth(), rt.user(), rt.targetExtendInfoByIdent)
pages.POST("/target/list", rt.auth(), rt.user(), rt.targetGetsByHostFilter)
pages.DELETE("/targets", rt.auth(), rt.user(), rt.perm("/targets/del"), rt.targetDel)
@@ -362,15 +322,9 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/share-charts", rt.chartShareGets)
pages.POST("/share-charts", rt.auth(), rt.chartShareAdd)
pages.POST("/dashboard-annotations", rt.auth(), rt.user(), rt.perm("/dashboards/put"), rt.dashAnnotationAdd)
pages.GET("/dashboard-annotations", rt.dashAnnotationGets)
pages.PUT("/dashboard-annotation/:id", rt.auth(), rt.user(), rt.perm("/dashboards/put"), rt.dashAnnotationPut)
pages.DELETE("/dashboard-annotation/:id", rt.auth(), rt.user(), rt.perm("/dashboards/del"), rt.dashAnnotationDel)
// pages.GET("/alert-rules/builtin/alerts-cates", rt.auth(), rt.user(), rt.builtinAlertCateGets)
// pages.GET("/alert-rules/builtin/list", rt.auth(), rt.user(), rt.builtinAlertRules)
pages.GET("/alert-rules/callbacks", rt.auth(), rt.user(), rt.alertRuleCallbacks)
pages.GET("/timezones", rt.auth(), rt.user(), rt.timezonesGet)
pages.GET("/busi-groups/alert-rules", rt.auth(), rt.user(), rt.perm("/alert-rules"), rt.alertRuleGetsByGids)
pages.GET("/busi-group/:id/alert-rules", rt.auth(), rt.user(), rt.perm("/alert-rules"), rt.alertRuleGets)
@@ -386,16 +340,13 @@ func (rt *Router) Config(r *gin.Engine) {
pages.PUT("/busi-group/alert-rule/validate", rt.auth(), rt.user(), rt.perm("/alert-rules/put"), rt.alertRuleValidation)
pages.POST("/relabel-test", rt.auth(), rt.user(), rt.relabelTest)
pages.POST("/busi-group/:id/alert-rules/clone", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.bgrw(), rt.cloneToMachine)
pages.POST("/busi-groups/alert-rules/clones", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.batchAlertRuleClone)
pages.POST("/busi-group/alert-rules/notify-tryrun", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.alertRuleNotifyTryRun)
pages.POST("/busi-group/alert-rules/enable-tryrun", rt.auth(), rt.user(), rt.perm("/alert-rules/add"), rt.alertRuleEnableTryRun)
pages.GET("/busi-groups/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGetsByGids)
pages.GET("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGets)
pages.POST("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/add"), rt.bgrw(), rt.recordingRuleAddByFE)
pages.DELETE("/busi-group/:id/recording-rules", rt.auth(), rt.user(), rt.perm("/recording-rules/del"), rt.bgrw(), rt.recordingRuleDel)
pages.PUT("/busi-group/:id/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.bgrw(), rt.recordingRulePutByFE)
pages.GET("/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRuleGet)
pages.PUT("/recording-rule/:rrid", rt.auth(), rt.user(), rt.perm("/recording-rules"), rt.recordingRulePutByFE)
pages.PUT("/busi-group/:id/recording-rules/fields", rt.auth(), rt.user(), rt.perm("/recording-rules/put"), rt.recordingRulePutFields)
pages.GET("/busi-groups/alert-mutes", rt.auth(), rt.user(), rt.perm("/alert-mutes"), rt.alertMuteGetsByGids)
@@ -406,7 +357,6 @@ func (rt *Router) Config(r *gin.Engine) {
pages.PUT("/busi-group/:id/alert-mute/:amid", rt.auth(), rt.user(), rt.perm("/alert-mutes/put"), rt.alertMutePutByFE)
pages.GET("/busi-group/:id/alert-mute/:amid", rt.auth(), rt.user(), rt.perm("/alert-mutes"), rt.alertMuteGet)
pages.PUT("/busi-group/:id/alert-mutes/fields", rt.auth(), rt.user(), rt.perm("/alert-mutes/put"), rt.bgrw(), rt.alertMutePutFields)
pages.POST("/alert-mute-tryrun", rt.auth(), rt.user(), rt.perm("/alert-mutes/add"), rt.alertMuteTryRun)
pages.GET("/busi-groups/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes"), rt.alertSubscribeGetsByGids)
pages.GET("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes"), rt.bgro(), rt.alertSubscribeGets)
@@ -414,21 +364,22 @@ func (rt *Router) Config(r *gin.Engine) {
pages.POST("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/add"), rt.bgrw(), rt.alertSubscribeAdd)
pages.PUT("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/put"), rt.bgrw(), rt.alertSubscribePut)
pages.DELETE("/busi-group/:id/alert-subscribes", rt.auth(), rt.user(), rt.perm("/alert-subscribes/del"), rt.bgrw(), rt.alertSubscribeDel)
pages.POST("/alert-subscribe/alert-subscribes-tryrun", rt.auth(), rt.user(), rt.perm("/alert-subscribes/add"), rt.alertSubscribeTryRun)
pages.GET("/alert-cur-event/:eid", rt.alertCurEventGet)
pages.GET("/alert-his-event/:eid", rt.alertHisEventGet)
pages.GET("/event-notify-records/:eid", rt.notificationRecordList)
pages.GET("/event-detail/:hash", rt.eventDetailPage)
pages.GET("/alert-eval-detail/:id", rt.alertEvalDetailPage)
pages.GET("/trace-logs/:traceid", rt.traceLogsPage)
if rt.Center.AnonymousAccess.AlertDetail {
pages.GET("/alert-cur-event/:eid", rt.alertCurEventGet)
pages.GET("/alert-his-event/:eid", rt.alertHisEventGet)
pages.GET("/event-notify-records/:eid", rt.notificationRecordList)
} else {
pages.GET("/alert-cur-event/:eid", rt.auth(), rt.user(), rt.alertCurEventGet)
pages.GET("/alert-his-event/:eid", rt.auth(), rt.user(), rt.alertHisEventGet)
pages.GET("/event-notify-records/:eid", rt.auth(), rt.user(), rt.notificationRecordList)
}
// card logic
pages.GET("/alert-cur-events/list", rt.auth(), rt.user(), rt.alertCurEventsList)
pages.GET("/alert-cur-events/card", rt.auth(), rt.user(), rt.alertCurEventsCard)
pages.POST("/alert-cur-events/card/details", rt.auth(), rt.alertCurEventsCardDetails)
pages.GET("/alert-his-events/list", rt.auth(), rt.user(), rt.alertHisEventsList)
pages.DELETE("/alert-his-events", rt.auth(), rt.admin(), rt.alertHisEventsDelete)
pages.DELETE("/alert-cur-events", rt.auth(), rt.user(), rt.perm("/alert-cur-events/del"), rt.alertCurEventDel)
pages.GET("/alert-cur-events/stats", rt.auth(), rt.alertCurEventsStatistics)
@@ -460,13 +411,13 @@ func (rt *Router) Config(r *gin.Engine) {
pages.POST("/datasource/status/update", rt.auth(), rt.admin(), rt.datasourceUpdataStatus)
pages.DELETE("/datasource/", rt.auth(), rt.admin(), rt.datasourceDel)
pages.GET("/roles", rt.auth(), rt.user(), rt.roleGets)
pages.POST("/roles", rt.auth(), rt.user(), rt.perm("/roles/add"), rt.roleAdd)
pages.PUT("/roles", rt.auth(), rt.user(), rt.perm("/roles/put"), rt.rolePut)
pages.DELETE("/role/:id", rt.auth(), rt.user(), rt.perm("/roles/del"), rt.roleDel)
pages.GET("/roles", rt.auth(), rt.admin(), rt.roleGets)
pages.POST("/roles", rt.auth(), rt.admin(), rt.roleAdd)
pages.PUT("/roles", rt.auth(), rt.admin(), rt.rolePut)
pages.DELETE("/role/:id", rt.auth(), rt.admin(), rt.roleDel)
pages.GET("/role/:id/ops", rt.auth(), rt.user(), rt.perm("/roles"), rt.operationOfRole)
pages.PUT("/role/:id/ops", rt.auth(), rt.user(), rt.perm("/roles/put"), rt.roleBindOperation)
pages.GET("/role/:id/ops", rt.auth(), rt.admin(), rt.operationOfRole)
pages.PUT("/role/:id/ops", rt.auth(), rt.admin(), rt.roleBindOperation)
pages.GET("/operation", rt.operations)
pages.GET("/notify-tpls", rt.auth(), rt.user(), rt.notifyTplGets)
@@ -488,7 +439,7 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/notify-channel", rt.auth(), rt.user(), rt.perm("/help/notification-settings"), rt.notifyChannelGets)
pages.PUT("/notify-channel", rt.auth(), rt.admin(), rt.notifyChannelPuts)
pages.GET("/notify-contact", rt.auth(), rt.user(), rt.notifyContactGets)
pages.GET("/notify-contact", rt.auth(), rt.user(), rt.perm("/help/notification-settings"), rt.notifyContactGets)
pages.PUT("/notify-contact", rt.auth(), rt.admin(), rt.notifyContactPuts)
pages.GET("/notify-config", rt.auth(), rt.user(), rt.perm("/help/notification-settings"), rt.notifyConfigGet)
@@ -497,20 +448,13 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/es-index-pattern", rt.auth(), rt.esIndexPatternGet)
pages.GET("/es-index-pattern-list", rt.auth(), rt.esIndexPatternGetList)
pages.POST("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/add"), rt.esIndexPatternAdd)
pages.PUT("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/put"), rt.esIndexPatternPut)
pages.DELETE("/es-index-pattern", rt.auth(), rt.user(), rt.perm("/log/index-patterns/del"), rt.esIndexPatternDel)
pages.POST("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternAdd)
pages.PUT("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternPut)
pages.DELETE("/es-index-pattern", rt.auth(), rt.admin(), rt.esIndexPatternDel)
pages.GET("/embedded-dashboards", rt.auth(), rt.user(), rt.perm("/embedded-dashboards"), rt.embeddedDashboardsGet)
pages.PUT("/embedded-dashboards", rt.auth(), rt.user(), rt.perm("/embedded-dashboards/put"), rt.embeddedDashboardsPut)
// 获取 embedded-product 列表
pages.GET("/embedded-product", rt.auth(), rt.user(), rt.embeddedProductGets)
pages.GET("/embedded-product/:id", rt.auth(), rt.user(), rt.embeddedProductGet)
pages.POST("/embedded-product", rt.auth(), rt.user(), rt.perm("/embedded-product/add"), rt.embeddedProductAdd)
pages.PUT("/embedded-product/:id", rt.auth(), rt.user(), rt.perm("/embedded-product/put"), rt.embeddedProductPut)
pages.DELETE("/embedded-product/:id", rt.auth(), rt.user(), rt.perm("/embedded-product/delete"), rt.embeddedProductDelete)
pages.GET("/user-variable-configs", rt.auth(), rt.user(), rt.perm("/help/variable-configs"), rt.userVariableConfigGets)
pages.POST("/user-variable-config", rt.auth(), rt.user(), rt.perm("/help/variable-configs"), rt.userVariableConfigAdd)
pages.PUT("/user-variable-config/:id", rt.auth(), rt.user(), rt.perm("/help/variable-configs"), rt.userVariableConfigPut)
@@ -520,128 +464,21 @@ func (rt *Router) Config(r *gin.Engine) {
pages.PUT("/config", rt.auth(), rt.admin(), rt.configPutByKey)
pages.GET("/site-info", rt.siteInfo)
// AI Config management
pages.GET("/ai-agents", rt.auth(), rt.admin(), rt.aiAgentGets)
pages.GET("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentGet)
pages.POST("/ai-agents", rt.auth(), rt.admin(), rt.aiAgentAdd)
pages.PUT("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentPut)
pages.DELETE("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentDel)
pages.GET("/ai-llm-configs", rt.auth(), rt.admin(), rt.aiLLMConfigGets)
pages.GET("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigGet)
pages.POST("/ai-llm-configs", rt.auth(), rt.admin(), rt.aiLLMConfigAdd)
pages.PUT("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigPut)
pages.DELETE("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigDel)
pages.POST("/ai-llm-config/test", rt.auth(), rt.admin(), rt.aiLLMConfigTest)
pages.GET("/ai-skills", rt.auth(), rt.admin(), rt.aiSkillGets)
pages.GET("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillGet)
pages.POST("/ai-skills", rt.auth(), rt.admin(), rt.aiSkillAdd)
pages.PUT("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillPut)
pages.DELETE("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillDel)
pages.POST("/ai-skills/import", rt.auth(), rt.admin(), rt.aiSkillImport)
pages.POST("/ai-skill/:id/files", rt.auth(), rt.admin(), rt.aiSkillFileAdd)
pages.GET("/ai-skill-file/:fileId", rt.auth(), rt.admin(), rt.aiSkillFileGet)
pages.DELETE("/ai-skill-file/:fileId", rt.auth(), rt.admin(), rt.aiSkillFileDel)
pages.GET("/mcp-servers", rt.auth(), rt.admin(), rt.mcpServerGets)
pages.GET("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerGet)
pages.POST("/mcp-servers", rt.auth(), rt.admin(), rt.mcpServerAdd)
pages.PUT("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerPut)
pages.DELETE("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerDel)
pages.POST("/ai-agent/:id/test", rt.auth(), rt.admin(), rt.aiAgentTest)
pages.POST("/mcp-server/test", rt.auth(), rt.admin(), rt.mcpServerTest)
pages.GET("/mcp-server/:id/tools", rt.auth(), rt.admin(), rt.mcpServerTools)
// AI Conversations
pages.GET("/ai-conversations", rt.auth(), rt.user(), rt.aiConversationGets)
pages.POST("/ai-conversations", rt.auth(), rt.user(), rt.aiConversationAdd)
pages.GET("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationGet)
pages.PUT("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationPut)
pages.DELETE("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationDel)
pages.POST("/ai-conversation/:id/messages", rt.auth(), rt.user(), rt.aiConversationMessageAdd)
// AI chat (SSE), dispatches by action_key
pages.POST("/ai-chat", rt.auth(), rt.user(), rt.aiChat)
// source token 相关路由
pages.POST("/source-token", rt.auth(), rt.user(), rt.sourceTokenAdd)
// for admin api
pages.GET("/user/busi-groups", rt.auth(), rt.admin(), rt.userBusiGroupsGets)
pages.GET("/builtin-components", rt.auth(), rt.user(), rt.builtinComponentsGets)
pages.POST("/builtin-components", rt.auth(), rt.user(), rt.perm("/components/add"), rt.builtinComponentsAdd)
pages.PUT("/builtin-components", rt.auth(), rt.user(), rt.perm("/components/put"), rt.builtinComponentsPut)
pages.DELETE("/builtin-components", rt.auth(), rt.user(), rt.perm("/components/del"), rt.builtinComponentsDel)
pages.POST("/builtin-components", rt.auth(), rt.user(), rt.perm("/built-in-components/add"), rt.builtinComponentsAdd)
pages.PUT("/builtin-components", rt.auth(), rt.user(), rt.perm("/built-in-components/put"), rt.builtinComponentsPut)
pages.DELETE("/builtin-components", rt.auth(), rt.user(), rt.perm("/built-in-components/del"), rt.builtinComponentsDel)
pages.GET("/builtin-payloads", rt.auth(), rt.user(), rt.builtinPayloadsGets)
pages.GET("/builtin-payloads/cates", rt.auth(), rt.user(), rt.builtinPayloadcatesGet)
pages.POST("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/add"), rt.builtinPayloadsAdd)
pages.PUT("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/put"), rt.builtinPayloadsPut)
pages.DELETE("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/components/del"), rt.builtinPayloadsDel)
pages.GET("/builtin-payload", rt.auth(), rt.user(), rt.builtinPayloadsGetByUUID)
pages.POST("/message-templates", rt.auth(), rt.user(), rt.perm("/notification-templates/add"), rt.messageTemplatesAdd)
pages.DELETE("/message-templates", rt.auth(), rt.user(), rt.perm("/notification-templates/del"), rt.messageTemplatesDel)
pages.PUT("/message-template/:id", rt.auth(), rt.user(), rt.perm("/notification-templates/put"), rt.messageTemplatePut)
pages.GET("/message-template/:id", rt.auth(), rt.user(), rt.perm("/notification-templates"), rt.messageTemplateGet)
pages.GET("/message-templates", rt.auth(), rt.user(), rt.messageTemplatesGet)
pages.POST("/events-message", rt.auth(), rt.user(), rt.eventsMessage)
pages.POST("/notify-rules", rt.auth(), rt.user(), rt.perm("/notification-rules/add"), rt.notifyRulesAdd)
pages.DELETE("/notify-rules", rt.auth(), rt.user(), rt.perm("/notification-rules/del"), rt.notifyRulesDel)
pages.PUT("/notify-rule/:id", rt.auth(), rt.user(), rt.perm("/notification-rules/put"), rt.notifyRulePut)
pages.GET("/notify-rule/:id", rt.auth(), rt.user(), rt.perm("/notification-rules"), rt.notifyRuleGet)
pages.GET("/notify-rules", rt.auth(), rt.user(), rt.perm("/notification-rules"), rt.notifyRulesGet)
pages.POST("/notify-rule/test", rt.auth(), rt.user(), rt.perm("/notification-rules"), rt.notifyTest)
pages.GET("/notify-rule/custom-params", rt.auth(), rt.user(), rt.perm("/notification-rules"), rt.notifyRuleCustomParamsGet)
pages.POST("/notify-rule/event-pipelines-tryrun", rt.auth(), rt.user(), rt.perm("/notification-rules/add"), rt.tryRunEventProcessorByNotifyRule)
pages.GET("/event-tagkeys", rt.auth(), rt.user(), rt.eventTagKeys)
pages.GET("/event-tagvalues", rt.auth(), rt.user(), rt.eventTagValues)
// 事件Pipeline相关路由
pages.GET("/event-pipelines", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.eventPipelinesList)
pages.POST("/event-pipeline", rt.auth(), rt.user(), rt.perm("/event-pipelines/add"), rt.addEventPipeline)
pages.PUT("/event-pipeline", rt.auth(), rt.user(), rt.perm("/event-pipelines/put"), rt.updateEventPipeline)
pages.GET("/event-pipeline/:id", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipeline)
pages.DELETE("/event-pipelines", rt.auth(), rt.user(), rt.perm("/event-pipelines/del"), rt.deleteEventPipelines)
pages.POST("/event-pipeline-tryrun", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.tryRunEventPipeline)
pages.POST("/event-processor-tryrun", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.tryRunEventProcessor)
// API 触发工作流
pages.POST("/event-pipeline/:id/trigger", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.triggerEventPipelineByAPI)
// SSE 流式执行工作流
pages.POST("/event-pipeline/:id/stream", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.streamEventPipeline)
// 事件Pipeline执行记录路由
pages.GET("/event-pipeline-executions", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.listAllEventPipelineExecutions)
pages.GET("/event-pipeline/:id/executions", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.listEventPipelineExecutions)
pages.GET("/event-pipeline/:id/execution/:exec_id", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecution)
pages.GET("/event-pipeline-execution/:exec_id", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecution)
pages.GET("/event-pipeline/:id/execution-stats", rt.auth(), rt.user(), rt.perm("/event-pipelines"), rt.getEventPipelineExecutionStats)
pages.POST("/event-pipeline-executions/clean", rt.auth(), rt.user(), rt.admin(), rt.cleanEventPipelineExecutions)
pages.POST("/notify-channel-configs", rt.auth(), rt.user(), rt.perm("/notification-channels/add"), rt.notifyChannelsAdd)
pages.DELETE("/notify-channel-configs", rt.auth(), rt.user(), rt.perm("/notification-channels/del"), rt.notifyChannelsDel)
pages.PUT("/notify-channel-config/:id", rt.auth(), rt.user(), rt.perm("/notification-channels/put"), rt.notifyChannelPut)
pages.GET("/notify-channel-config/:id", rt.auth(), rt.user(), rt.perm("/notification-channels"), rt.notifyChannelGet)
pages.GET("/notify-channel-configs", rt.auth(), rt.user(), rt.perm("/notification-channels"), rt.notifyChannelsGet)
pages.GET("/simplified-notify-channel-configs", rt.notifyChannelsGetForNormalUser)
pages.GET("/flashduty-channel-list/:id", rt.auth(), rt.user(), rt.flashDutyNotifyChannelsGet)
pages.GET("/pagerduty-integration-key/:id/:service_id/:integration_id", rt.auth(), rt.user(), rt.pagerDutyIntegrationKeyGet)
pages.GET("/pagerduty-service-list/:id", rt.auth(), rt.user(), rt.pagerDutyNotifyServicesGet)
pages.GET("/notify-channel-config", rt.auth(), rt.user(), rt.notifyChannelGetBy)
pages.GET("/notify-channel-config/idents", rt.notifyChannelIdentsGet)
// saved view 查询条件保存相关路由
pages.GET("/saved-views", rt.auth(), rt.user(), rt.savedViewGets)
pages.POST("/saved-views", rt.auth(), rt.user(), rt.savedViewAdd)
pages.PUT("/saved-view/:id", rt.auth(), rt.user(), rt.savedViewPut)
pages.DELETE("/saved-view/:id", rt.auth(), rt.user(), rt.savedViewDel)
pages.POST("/saved-view/:id/favorite", rt.auth(), rt.user(), rt.savedViewFavoriteAdd)
pages.DELETE("/saved-view/:id/favorite", rt.auth(), rt.user(), rt.savedViewFavoriteDel)
pages.POST("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/built-in-components/add"), rt.builtinPayloadsAdd)
pages.GET("/builtin-payload/:id", rt.auth(), rt.user(), rt.perm("/built-in-components"), rt.builtinPayloadGet)
pages.PUT("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/built-in-components/put"), rt.builtinPayloadsPut)
pages.DELETE("/builtin-payloads", rt.auth(), rt.user(), rt.perm("/built-in-components/del"), rt.builtinPayloadsDel)
pages.GET("/builtin-payload", rt.auth(), rt.user(), rt.builtinPayloadsGetByUUIDOrID)
}
r.GET("/api/n9e/versions", func(c *gin.Context) {
@@ -684,8 +521,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.PUT("/targets/note", rt.targetUpdateNoteByService)
service.PUT("/targets/bgid", rt.targetUpdateBgidByService)
service.POST("/targets-of-host-query", rt.targetsOfHostQuery)
service.POST("/alert-rules", rt.alertRuleAddByService)
service.POST("/alert-rule-add", rt.alertRuleAddOneByService)
service.DELETE("/alert-rules", rt.alertRuleDelByService)
@@ -698,7 +533,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.GET("/busi-groups", rt.busiGroupGetsByService)
service.GET("/datasources", rt.datasourceGetsByService)
service.GET("/datasource-rsa-config", rt.datasourceRsaConfigGet)
service.GET("/datasource-ids", rt.getDatasourceIds)
service.POST("/server-heartbeat", rt.serverHeartbeat)
service.GET("/servers-active", rt.serversActive)
@@ -706,7 +540,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.GET("/recording-rules", rt.recordingRuleGetsByService)
service.GET("/alert-mutes", rt.alertMuteGets)
service.GET("/active-alert-mutes", rt.activeAlertMuteGets)
service.POST("/alert-mutes", rt.alertMuteAddByService)
service.DELETE("/alert-mutes", rt.alertMuteDel)
@@ -745,27 +578,6 @@ func (rt *Router) Config(r *gin.Engine) {
service.GET("/alert-cur-events-del-by-hash", rt.alertCurEventDelByHash)
service.POST("/center/heartbeat", rt.heartbeat)
service.GET("/es-index-pattern-list", rt.esIndexPatternGetList)
service.GET("/notify-rules", rt.notifyRulesGetByService)
service.GET("/notify-channels", rt.notifyChannelConfigGets)
service.GET("/message-templates", rt.messageTemplateGets)
service.GET("/event-pipelines", rt.eventPipelinesListByService)
service.POST("/event-pipeline/:id/trigger", rt.triggerEventPipelineByService)
service.POST("/event-pipeline/:id/stream", rt.streamEventPipelineByService)
service.POST("/event-pipeline-execution", rt.eventPipelineExecutionAdd)
// 手机号加密存储配置接口
service.POST("/users/phone/encrypt", rt.usersPhoneEncrypt)
service.POST("/users/phone/decrypt", rt.usersPhoneDecrypt)
service.POST("/users/phone/refresh-encryption-config", rt.usersPhoneDecryptRefresh)
service.GET("/builtin-components", rt.builtinComponentsGets)
service.GET("/builtin-payloads", rt.builtinPayloadsGets)
}
}

View File

@@ -1,747 +0,0 @@
package router
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"gopkg.in/yaml.v3"
)
// ========================
// AI Agent handlers
// ========================
func (rt *Router) aiAgentGets(c *gin.Context) {
lst, err := models.AIAgentGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiAgentGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiAgentAdd(c *gin.Context) {
var obj models.AIAgent
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
ginx.Dangerous(obj.Create(rt.Ctx, me.Username))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiAgentPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
var ref models.AIAgent
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, me.Username, ref))
}
func (rt *Router) aiAgentDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// AI Skill handlers
// ========================
func (rt *Router) aiSkillGets(c *gin.Context) {
search := ginx.QueryStr(c, "search", "")
lst, err := models.AISkillGets(rt.Ctx, search)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiSkillGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
// Include associated files (without content)
files, err := models.AISkillFileGets(rt.Ctx, id)
ginx.Dangerous(err)
obj.Files = files
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiSkillAdd(c *gin.Context) {
var obj models.AISkill
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
obj.CreatedBy = me.Username
obj.UpdatedBy = me.Username
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiSkillPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
var ref models.AISkill
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ref.UpdatedBy = me.Username
ginx.NewRender(c).Message(obj.Update(rt.Ctx, ref))
}
func (rt *Router) aiSkillDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
// Cascade delete skill files
ginx.Dangerous(models.AISkillFileDeleteBySkillId(rt.Ctx, id))
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiSkillImport(c *gin.Context) {
file, header, err := c.Request.FormFile("file")
ginx.Dangerous(err)
defer file.Close()
ext := strings.ToLower(filepath.Ext(header.Filename))
if ext != ".md" {
ginx.Bomb(http.StatusBadRequest, "only .md files are supported")
}
content, err := io.ReadAll(file)
ginx.Dangerous(err)
meta, instructions := parseSkillMarkdown(string(content), header.Filename, ext)
me := c.MustGet("user").(*models.User)
skill := models.AISkill{
Name: meta.Name,
Description: meta.Description,
Instructions: instructions,
License: meta.License,
Compatibility: meta.Compatibility,
Metadata: meta.Metadata,
AllowedTools: meta.AllowedTools,
CreatedBy: me.Username,
UpdatedBy: me.Username,
}
ginx.Dangerous(skill.Create(rt.Ctx))
ginx.NewRender(c).Data(skill.Id, nil)
}
// parseSkillMarkdown parses a SKILL.md file with optional YAML frontmatter.
// Frontmatter format:
//
// ---
// name: my-skill
// description: what this skill does
// ---
// # Actual instructions content...
type skillFrontmatter struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
License string `yaml:"license"`
Compatibility string `yaml:"compatibility"`
Metadata map[string]string `yaml:"metadata"`
AllowedTools string `yaml:"allowed-tools"`
}
func parseSkillMarkdown(content, filename, ext string) (meta skillFrontmatter, instructions string) {
text := strings.TrimSpace(content)
// Try to parse YAML frontmatter (between --- delimiters)
if strings.HasPrefix(text, "---") {
endIdx := strings.Index(text[3:], "\n---")
if endIdx >= 0 {
frontmatter := text[3 : 3+endIdx]
body := strings.TrimSpace(text[3+endIdx+4:]) // skip past closing ---
if yaml.Unmarshal([]byte(frontmatter), &meta) == nil && meta.Name != "" {
return meta, body
}
}
}
// No valid frontmatter, fallback: filename as name, entire content as instructions
meta.Name = strings.TrimSuffix(filename, ext)
return meta, content
}
// ========================
// AI Skill File handlers
// ========================
func (rt *Router) aiSkillFileAdd(c *gin.Context) {
skillId := ginx.UrlParamInt64(c, "id")
// Verify skill exists
skill, err := models.AISkillGetById(rt.Ctx, skillId)
ginx.Dangerous(err)
if skill == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
file, header, err := c.Request.FormFile("file")
ginx.Dangerous(err)
defer file.Close()
// Validate file extension
ext := strings.ToLower(filepath.Ext(header.Filename))
allowed := map[string]bool{".md": true, ".txt": true, ".json": true, ".yaml": true, ".yml": true, ".csv": true}
if !allowed[ext] {
ginx.Bomb(http.StatusBadRequest, "file type not allowed, only .md/.txt/.json/.yaml/.csv")
}
// Validate file size (2MB max)
if header.Size > 2*1024*1024 {
ginx.Bomb(http.StatusBadRequest, "file size exceeds 2MB limit")
}
content, err := io.ReadAll(file)
ginx.Dangerous(err)
me := c.MustGet("user").(*models.User)
skillFile := models.AISkillFile{
SkillId: skillId,
Name: header.Filename,
Content: string(content),
CreatedBy: me.Username,
}
ginx.Dangerous(skillFile.Create(rt.Ctx))
ginx.NewRender(c).Data(skillFile.Id, nil)
}
func (rt *Router) aiSkillFileGet(c *gin.Context) {
fileId := ginx.UrlParamInt64(c, "fileId")
obj, err := models.AISkillFileGetById(rt.Ctx, fileId)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "file not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiSkillFileDel(c *gin.Context) {
fileId := ginx.UrlParamInt64(c, "fileId")
obj, err := models.AISkillFileGetById(rt.Ctx, fileId)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "file not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// MCP Server handlers
// ========================
func (rt *Router) mcpServerGets(c *gin.Context) {
lst, err := models.MCPServerGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) mcpServerGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) mcpServerAdd(c *gin.Context) {
var obj models.MCPServer
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
obj.CreatedBy = me.Username
obj.UpdatedBy = me.Username
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) mcpServerPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
var ref models.MCPServer
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ref.UpdatedBy = me.Username
ginx.NewRender(c).Message(obj.Update(rt.Ctx, ref))
}
func (rt *Router) mcpServerDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// AI LLM Config handlers
// ========================
func (rt *Router) aiLLMConfigGets(c *gin.Context) {
lst, err := models.AILLMConfigGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiLLMConfigGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiLLMConfigAdd(c *gin.Context) {
var obj models.AILLMConfig
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
ginx.Dangerous(obj.Create(rt.Ctx, me.Username))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiLLMConfigPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
var ref models.AILLMConfig
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, me.Username, ref))
}
func (rt *Router) aiLLMConfigDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiLLMConfigTest(c *gin.Context) {
var body struct {
APIType string `json:"api_type"`
APIURL string `json:"api_url"`
APIKey string `json:"api_key"`
Model string `json:"model"`
ExtraConfig models.LLMExtraConfig `json:"extra_config"`
}
ginx.BindJSON(c, &body)
if body.APIType == "" || body.APIURL == "" || body.APIKey == "" || body.Model == "" {
ginx.Bomb(http.StatusBadRequest, "api_type, api_url, api_key, model are required")
}
obj := &models.AILLMConfig{
APIType: body.APIType,
APIURL: body.APIURL,
APIKey: body.APIKey,
Model: body.Model,
ExtraConfig: body.ExtraConfig,
}
start := time.Now()
testErr := testAIAgent(obj)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
}
ginx.NewRender(c).Data(result, testErr)
}
// ========================
// AI Agent test
// ========================
func (rt *Router) aiAgentTest(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
agent, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if agent == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
llmCfg, err := models.AILLMConfigGetById(rt.Ctx, agent.LLMConfigId)
ginx.Dangerous(err)
if llmCfg == nil {
ginx.Bomb(http.StatusBadRequest, "referenced LLM config not found")
}
start := time.Now()
testErr := testAIAgent(llmCfg)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
}
if testErr != nil {
result["error"] = testErr.Error()
}
ginx.NewRender(c).Data(result, nil)
}
func testAIAgent(p *models.AILLMConfig) error {
extra := p.ExtraConfig
// Build HTTP client with ExtraConfig settings
timeout := 30 * time.Second
if extra.TimeoutSeconds > 0 {
timeout = time.Duration(extra.TimeoutSeconds) * time.Second
}
transport := &http.Transport{}
if extra.SkipTLSVerify {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
if extra.Proxy != "" {
if proxyURL, err := url.Parse(extra.Proxy); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
client := &http.Client{Timeout: timeout, Transport: transport}
var reqURL string
var reqBody []byte
hdrs := map[string]string{"Content-Type": "application/json"}
switch p.APIType {
case "openai":
base := strings.TrimRight(p.APIURL, "/")
if strings.HasSuffix(base, "/chat/completions") {
reqURL = base
} else {
reqURL = base + "/chat/completions"
}
reqBody, _ = json.Marshal(map[string]interface{}{
"model": p.Model,
"messages": []map[string]string{{"role": "user", "content": "Hi"}},
"max_tokens": 5,
})
hdrs["Authorization"] = "Bearer " + p.APIKey
case "claude":
reqURL = strings.TrimRight(p.APIURL, "/") + "/v1/messages"
reqBody, _ = json.Marshal(map[string]interface{}{
"model": p.Model,
"messages": []map[string]string{{"role": "user", "content": "Hi"}},
"max_tokens": 5,
})
hdrs["x-api-key"] = p.APIKey
hdrs["anthropic-version"] = "2023-06-01"
case "gemini":
reqURL = strings.TrimRight(p.APIURL, "/") + "/v1beta/models/" + p.Model + ":generateContent?key=" + p.APIKey
reqBody, _ = json.Marshal(map[string]interface{}{
"contents": []map[string]interface{}{
{"parts": []map[string]string{{"text": "Hi"}}},
},
})
default:
return fmt.Errorf("unsupported api_type: %s", p.APIType)
}
req, err := http.NewRequest("POST", reqURL, bytes.NewReader(reqBody))
if err != nil {
return err
}
for k, v := range hdrs {
req.Header.Set(k, v)
}
// Apply custom headers from ExtraConfig
for k, v := range extra.CustomHeaders {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
if len(body) > 500 {
body = body[:500]
}
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
return nil
}
// ========================
// MCP Server test & tools
// ========================
func (rt *Router) mcpServerTest(c *gin.Context) {
var body struct {
URL string `json:"url"`
Headers map[string]string `json:"headers"`
}
ginx.BindJSON(c, &body)
if body.URL == "" {
ginx.Bomb(http.StatusBadRequest, "url is required")
}
obj := &models.MCPServer{
URL: body.URL,
Headers: body.Headers,
}
start := time.Now()
tools, testErr := listMCPTools(obj)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
"tool_count": len(tools),
}
if testErr != nil {
result["error"] = testErr.Error()
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) mcpServerTools(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
tools, err := listMCPTools(obj)
ginx.Dangerous(err)
ginx.NewRender(c).Data(tools, nil)
}
type mcpTool struct {
Name string `json:"name"`
Description string `json:"description"`
}
func listMCPTools(s *models.MCPServer) ([]mcpTool, error) {
client := &http.Client{Timeout: 30 * time.Second}
hdrs := s.Headers
// Step 1: Initialize
initResp, initSessionID, err := sendMCPRPC(client, s.URL, hdrs, "", 1, "initialize", map[string]interface{}{
"protocolVersion": "2024-11-05",
"capabilities": map[string]interface{}{},
"clientInfo": map[string]interface{}{"name": "nightingale", "version": "1.0.0"},
})
if err != nil {
return nil, fmt.Errorf("initialize: %v", err)
}
_ = initResp
// Send initialized notification
sendMCPRPC(client, s.URL, hdrs, initSessionID, 0, "notifications/initialized", map[string]interface{}{})
// Step 2: List tools
toolsResp, _, err := sendMCPRPC(client, s.URL, hdrs, initSessionID, 2, "tools/list", map[string]interface{}{})
if err != nil {
return nil, fmt.Errorf("tools/list: %v", err)
}
if toolsResp == nil || toolsResp.Result == nil {
return []mcpTool{}, nil
}
toolsRaw, ok := toolsResp.Result["tools"]
if !ok {
return []mcpTool{}, nil
}
toolsJSON, _ := json.Marshal(toolsRaw)
var tools []mcpTool
json.Unmarshal(toolsJSON, &tools)
return tools, nil
}
type jsonRPCResponse struct {
JSONRPC string `json:"jsonrpc"`
ID interface{} `json:"id"`
Result map[string]interface{} `json:"result"`
Error *jsonRPCError `json:"error"`
}
type jsonRPCError struct {
Code int `json:"code"`
Message string `json:"message"`
}
func sendMCPRPC(client *http.Client, serverURL string, hdrs map[string]string, sessionID string, id int, method string, params interface{}) (*jsonRPCResponse, string, error) {
body := map[string]interface{}{
"jsonrpc": "2.0",
"method": method,
"params": params,
}
if id > 0 {
body["id"] = id
}
reqBody, _ := json.Marshal(body)
req, err := http.NewRequest("POST", serverURL, bytes.NewReader(reqBody))
if err != nil {
return nil, "", err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json, text/event-stream")
if sessionID != "" {
req.Header.Set("Mcp-Session-Id", sessionID)
}
for k, v := range hdrs {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
return nil, "", err
}
defer resp.Body.Close()
newSessionID := resp.Header.Get("Mcp-Session-Id")
if newSessionID == "" {
newSessionID = sessionID
}
// Notification (no id) - no response body expected
if id <= 0 {
return nil, newSessionID, nil
}
if resp.StatusCode >= 400 {
respBody, _ := io.ReadAll(resp.Body)
if len(respBody) > 500 {
respBody = respBody[:500]
}
return nil, newSessionID, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody))
}
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, newSessionID, err
}
// Handle SSE response
contentType := resp.Header.Get("Content-Type")
if strings.Contains(contentType, "text/event-stream") {
for _, line := range strings.Split(string(respBody), "\n") {
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
var rpcResp jsonRPCResponse
if json.Unmarshal([]byte(data), &rpcResp) == nil && (rpcResp.Result != nil || rpcResp.Error != nil) {
if rpcResp.Error != nil {
return &rpcResp, newSessionID, fmt.Errorf("RPC error %d: %s", rpcResp.Error.Code, rpcResp.Error.Message)
}
return &rpcResp, newSessionID, nil
}
}
}
return nil, newSessionID, fmt.Errorf("no valid JSON-RPC response in SSE stream")
}
// Handle JSON response
var rpcResp jsonRPCResponse
if err := json.Unmarshal(respBody, &rpcResp); err != nil {
if len(respBody) > 200 {
respBody = respBody[:200]
}
return nil, newSessionID, fmt.Errorf("invalid response: %s", string(respBody))
}
if rpcResp.Error != nil {
return &rpcResp, newSessionID, fmt.Errorf("RPC error %d: %s", rpcResp.Error.Code, rpcResp.Error.Message)
}
return &rpcResp, newSessionID, nil
}

View File

@@ -1,114 +0,0 @@
package router
import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func (rt *Router) aiConversationGets(c *gin.Context) {
me := c.MustGet("user").(*models.User)
lst, err := models.AIConversationGetsByUserId(rt.Ctx, me.Id)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiConversationAdd(c *gin.Context) {
var obj models.AIConversation
ginx.BindJSON(c, &obj)
me := c.MustGet("user").(*models.User)
obj.UserId = me.Id
ginx.Dangerous(obj.Verify())
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiConversationGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
messages, err := models.AIConversationMessageGetsByConversationId(rt.Ctx, id)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"conversation": obj,
"messages": messages,
}, nil)
}
func (rt *Router) aiConversationPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
var body struct {
Title string `json:"title"`
}
ginx.BindJSON(c, &body)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, body.Title))
}
func (rt *Router) aiConversationDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiConversationMessageAdd(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
var msgs []models.AIConversationMessage
ginx.BindJSON(c, &msgs)
for i := range msgs {
msgs[i].ConversationId = id
ginx.Dangerous(msgs[i].Create(rt.Ctx))
}
// Update conversation timestamp
obj.UpdateTime(rt.Ctx)
ginx.NewRender(c).Message(nil)
}

View File

@@ -1,345 +0,0 @@
package router
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/aiagent"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/logger"
)
// AIChatRequest is the generic chat request dispatched by action_key.
type AIChatRequest struct {
ActionKey string `json:"action_key"` // e.g. "query_generator"
UserInput string `json:"user_input"`
History []aiagent.ChatMessage `json:"history,omitempty"`
Context map[string]interface{} `json:"context,omitempty"` // action-specific params
}
// actionHandler defines how each action_key is processed.
type actionHandler struct {
useCase string // maps to AIAgent.UseCase for finding the right agent config
validate func(req *AIChatRequest) error
selectTools func(req *AIChatRequest) []string
buildPrompt func(req *AIChatRequest) string
buildInputs func(req *AIChatRequest) map[string]string
}
var actionRegistry = map[string]*actionHandler{
"query_generator": {
useCase: "chat",
validate: validateQueryGenerator,
selectTools: selectQueryGeneratorTools,
buildPrompt: buildQueryGeneratorPrompt,
buildInputs: buildQueryGeneratorInputs,
},
}
// --- query_generator action ---
func ctxStr(ctx map[string]interface{}, key string) string {
if v, ok := ctx[key]; ok {
if s, ok := v.(string); ok {
return s
}
}
return ""
}
func ctxInt64(ctx map[string]interface{}, key string) int64 {
if v, ok := ctx[key]; ok {
switch n := v.(type) {
case float64:
return int64(n)
case int64:
return n
case json.Number:
i, _ := n.Int64()
return i
}
}
return 0
}
func validateQueryGenerator(req *AIChatRequest) error {
dsType := ctxStr(req.Context, "datasource_type")
dsID := ctxInt64(req.Context, "datasource_id")
if dsType == "" {
return fmt.Errorf("context.datasource_type is required")
}
if dsID == 0 {
return fmt.Errorf("context.datasource_id is required")
}
return nil
}
func selectQueryGeneratorTools(req *AIChatRequest) []string {
dsType := ctxStr(req.Context, "datasource_type")
switch dsType {
case "prometheus":
return []string{"list_metrics", "get_metric_labels"}
case "mysql", "doris", "ck", "clickhouse", "pgsql", "postgresql":
return []string{"list_databases", "list_tables", "describe_table"}
default:
return nil
}
}
func buildQueryGeneratorPrompt(req *AIChatRequest) string {
dsType := ctxStr(req.Context, "datasource_type")
dbName := ctxStr(req.Context, "database_name")
tableName := ctxStr(req.Context, "table_name")
switch dsType {
case "prometheus":
return fmt.Sprintf(`You are a PromQL expert. The user wants to query Prometheus metrics.
User request: %s
Please use the available tools to explore the metrics and generate the correct PromQL query.
- First use list_metrics to find relevant metrics
- Then use get_metric_labels to understand the label structure
- Finally provide the PromQL query as your Final Answer
Your Final Answer MUST be a valid JSON object with these fields:
{"query": "<the PromQL query>", "explanation": "<brief explanation in the user's language>"}`, req.UserInput)
default: // SQL-based datasources
dbContext := ""
if dbName != "" {
dbContext += fmt.Sprintf("\nTarget database: %s", dbName)
}
if tableName != "" {
dbContext += fmt.Sprintf("\nTarget table: %s", tableName)
}
return fmt.Sprintf(`You are a SQL expert for %s databases. The user wants to query data.
%s
User request: %s
Please use the available tools to explore the database schema and generate the correct SQL query.
- Use list_databases to see available databases
- Use list_tables to see tables in the target database
- Use describe_table to understand the table structure
- Finally provide the SQL query as your Final Answer
Your Final Answer MUST be a valid JSON object with these fields:
{"query": "<the SQL query>", "explanation": "<brief explanation in the user's language>"}`, dsType, dbContext, req.UserInput)
}
}
func buildQueryGeneratorInputs(req *AIChatRequest) map[string]string {
inputs := map[string]string{
"user_input": req.UserInput,
}
for _, key := range []string{"datasource_type", "datasource_id", "database_name", "table_name"} {
if v := ctxStr(req.Context, key); v != "" {
inputs[key] = v
}
}
// datasource_id may be a number in JSON
if inputs["datasource_id"] == "" {
if id := ctxInt64(req.Context, "datasource_id"); id > 0 {
inputs["datasource_id"] = fmt.Sprintf("%d", id)
}
}
return inputs
}
// --- generic handler ---
func (rt *Router) aiChat(c *gin.Context) {
if !rt.Center.AIAgent.Enable {
ginx.Bomb(http.StatusServiceUnavailable, "AI Agent is not enabled")
return
}
var req AIChatRequest
ginx.BindJSON(c, &req)
if req.UserInput == "" {
ginx.Bomb(http.StatusBadRequest, "user_input is required")
return
}
if req.ActionKey == "" {
ginx.Bomb(http.StatusBadRequest, "action_key is required")
return
}
if req.Context == nil {
req.Context = make(map[string]interface{})
}
handler, ok := actionRegistry[req.ActionKey]
if !ok {
ginx.Bomb(http.StatusBadRequest, "unsupported action_key: %s", req.ActionKey)
return
}
logger.Infof("[AIChat] action=%s, user_input=%q", req.ActionKey, truncStr(req.UserInput, 100))
// Action-specific validation
if handler.validate != nil {
if err := handler.validate(&req); err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
return
}
}
// Find AI agent by use_case
agent, err := models.AIAgentGetByUseCase(rt.Ctx, handler.useCase)
if err != nil || agent == nil {
ginx.Bomb(http.StatusBadRequest, "no AI agent configured for use_case=%s", handler.useCase)
return
}
// Resolve LLM config
llmCfg, err := models.AILLMConfigGetById(rt.Ctx, agent.LLMConfigId)
if err != nil || llmCfg == nil {
ginx.Bomb(http.StatusBadRequest, "referenced LLM config not found")
return
}
agent.LLMConfig = llmCfg
// Select tools
var tools []aiagent.AgentTool
if handler.selectTools != nil {
toolNames := handler.selectTools(&req)
if toolNames != nil {
tools = aiagent.GetBuiltinToolDefs(toolNames)
}
}
// Parse extra config
extraConfig := llmCfg.ExtraConfig
timeout := 120000
if extraConfig.TimeoutSeconds > 0 {
timeout = extraConfig.TimeoutSeconds * 1000
}
// Build prompt
userPrompt := ""
if handler.buildPrompt != nil {
userPrompt = handler.buildPrompt(&req)
}
// Build workflow inputs
inputs := map[string]string{"user_input": req.UserInput}
if handler.buildInputs != nil {
inputs = handler.buildInputs(&req)
}
// Create agent
agentCfg := aiagent.NewAgent(&aiagent.AIAgentConfig{
Provider: llmCfg.APIType,
LLMURL: llmCfg.APIURL,
Model: llmCfg.Model,
APIKey: llmCfg.APIKey,
Headers: extraConfig.CustomHeaders,
AgentMode: aiagent.AgentModeReAct,
Tools: tools,
Timeout: timeout,
Stream: true,
UserPromptTemplate: userPrompt,
SkipSSLVerify: extraConfig.SkipTLSVerify,
Proxy: extraConfig.Proxy,
Temperature: extraConfig.Temperature,
MaxTokens: extraConfig.MaxTokens,
})
// Inject PromClient getter
aiagent.SetPromClientGetter(func(dsId int64) prom.API {
return rt.PromClients.GetCli(dsId)
})
// Streaming setup
streamChan := make(chan *models.StreamChunk, 100)
wfCtx := &models.WorkflowContext{
Stream: true,
StreamChan: streamChan,
Inputs: inputs,
}
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
startTime := time.Now()
go func() {
defer func() {
if r := recover(); r != nil {
logger.Errorf("[AIChat] PANIC in agent goroutine: %v", r)
streamChan <- &models.StreamChunk{
Type: models.StreamTypeError,
Content: fmt.Sprintf("internal error: %v", r),
Done: true,
Timestamp: time.Now().UnixMilli(),
}
close(streamChan)
}
}()
_, _, err := agentCfg.Process(rt.Ctx, wfCtx)
if err != nil {
logger.Errorf("[AIChat] agent Process error: %v", err)
}
}()
// Stream SSE events
var accumulatedMessage string
c.Stream(func(w io.Writer) bool {
chunk, ok := <-streamChan
if !ok {
return false
}
data, _ := json.Marshal(chunk)
if chunk.Type == models.StreamTypeText || chunk.Type == models.StreamTypeThinking {
if chunk.Delta != "" {
accumulatedMessage += chunk.Delta
} else if chunk.Content != "" {
accumulatedMessage += chunk.Content
}
}
if chunk.Type == models.StreamTypeError {
fmt.Fprintf(w, "event: error\ndata: %s\n\n", data)
c.Writer.Flush()
return false
}
if chunk.Done || chunk.Type == models.StreamTypeDone {
doneData := map[string]interface{}{
"type": "done",
"duration_ms": time.Since(startTime).Milliseconds(),
"message": accumulatedMessage,
"response": chunk.Content,
}
finalData, _ := json.Marshal(doneData)
fmt.Fprintf(w, "event: done\ndata: %s\n\n", finalData)
c.Writer.Flush()
return false
}
fmt.Fprintf(w, "event: chunk\ndata: %s\n\n", data)
c.Writer.Flush()
return true
})
}
func truncStr(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View File

@@ -4,9 +4,9 @@ import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
// no param

View File

@@ -1,55 +1,50 @@
package router
import (
"fmt"
"net/http"
"sort"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/ginx"
)
func getUserGroupIds(ctx *gin.Context, rt *Router, myGroups bool) ([]int64, error) {
if !myGroups {
return nil, nil
func parseAggrRules(c *gin.Context) []*models.AggrRule {
aggrRules := strings.Split(ginx.QueryStr(c, "rule", ""), "::") // e.g. field:group_name::field:severity::tagkey:ident
if len(aggrRules) == 0 {
ginx.Bomb(http.StatusBadRequest, "rule empty")
}
me := ctx.MustGet("user").(*models.User)
return models.MyGroupIds(rt.Ctx, me.Id)
rules := make([]*models.AggrRule, len(aggrRules))
for i := 0; i < len(aggrRules); i++ {
pair := strings.Split(aggrRules[i], ":")
if len(pair) != 2 {
ginx.Bomb(http.StatusBadRequest, "rule invalid")
}
if !(pair[0] == "field" || pair[0] == "tagkey") {
ginx.Bomb(http.StatusBadRequest, "rule invalid")
}
rules[i] = &models.AggrRule{
Type: pair[0],
Value: pair[1],
}
}
return rules
}
func (rt *Router) alertCurEventsCard(c *gin.Context) {
stime, etime := getTimeRange(c)
severity := strx.IdsInt64ForAPI(ginx.QueryStr(c, "severity", ""), ",")
severity := ginx.QueryInt(c, "severity", -1)
query := ginx.QueryStr(c, "query", "")
myGroups := ginx.QueryBool(c, "my_groups", false) // 是否只看自己组默认false
var gids []int64
var err error
if myGroups {
gids, err = getUserGroupIds(c, rt, myGroups)
ginx.Dangerous(err)
if len(gids) == 0 {
gids = append(gids, -1)
}
}
viewId := ginx.QueryInt64(c, "view_id")
alertView, err := models.GetAlertAggrViewByViewID(rt.Ctx, viewId)
ginx.Dangerous(err)
if alertView == nil {
ginx.Bomb(http.StatusNotFound, "alert aggr view not found")
}
dsIds := queryDatasourceIds(c)
rules := parseAggrRules(c)
prod := ginx.QueryStr(c, "prods", "")
if prod == "" {
@@ -66,18 +61,17 @@ func (rt *Router) alertCurEventsCard(c *gin.Context) {
cates = strings.Split(cate, ",")
}
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, myGroups)
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView)
ginx.Dangerous(err)
// 最多获取50000个获取太多也没啥意义
list, err := models.AlertCurEventsGet(rt.Ctx, prods, bgids, stime, etime, severity, dsIds,
cates, 0, query, 50000, 0, []int64{})
cates, 0, query, 50000, 0)
ginx.Dangerous(err)
cardmap := make(map[string]*AlertCard)
for _, event := range list {
title, err := event.GenCardTitle(alertView.Rule)
ginx.Dangerous(err)
title := event.GenCardTitle(rules)
if _, has := cardmap[title]; has {
cardmap[title].Total++
cardmap[title].EventIds = append(cardmap[title].EventIds, event.Id)
@@ -92,10 +86,6 @@ func (rt *Router) alertCurEventsCard(c *gin.Context) {
Severity: event.Severity,
}
}
if cardmap[title].Severity < 1 {
cardmap[title].Severity = 3
}
}
titles := make([]string, 0, len(cardmap))
@@ -152,15 +142,11 @@ func (rt *Router) alertCurEventsGetByRid(c *gin.Context) {
// 列表方式,拉取活跃告警
func (rt *Router) alertCurEventsList(c *gin.Context) {
stime, etime := getTimeRange(c)
severity := strx.IdsInt64ForAPI(ginx.QueryStr(c, "severity", ""), ",")
severity := ginx.QueryInt(c, "severity", -1)
query := ginx.QueryStr(c, "query", "")
limit := ginx.QueryInt(c, "limit", 20)
myGroups := ginx.QueryBool(c, "my_groups", false) // 是否只看自己组默认false
dsIds := queryDatasourceIds(c)
eventIds := strx.IdsInt64ForAPI(ginx.QueryStr(c, "event_ids", ""), ",")
prod := ginx.QueryStr(c, "prods", "")
if prod == "" {
prod = ginx.QueryStr(c, "rule_prods", "")
@@ -179,19 +165,18 @@ func (rt *Router) alertCurEventsList(c *gin.Context) {
ruleId := ginx.QueryInt64(c, "rid", 0)
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, myGroups)
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView)
ginx.Dangerous(err)
total, err := models.AlertCurEventTotal(rt.Ctx, prods, bgids, stime, etime, severity, dsIds,
cates, ruleId, query, eventIds)
cates, ruleId, query)
ginx.Dangerous(err)
list, err := models.AlertCurEventsGet(rt.Ctx, prods, bgids, stime, etime, severity, dsIds,
cates, ruleId, query, limit, ginx.Offset(c, limit), eventIds)
cates, ruleId, query, limit, ginx.Offset(c, limit))
ginx.Dangerous(err)
cache := make(map[int64]*models.UserGroup)
for i := 0; i < len(list); i++ {
list[i].FillNotifyGroups(rt.Ctx, cache)
}
@@ -233,68 +218,24 @@ func (rt *Router) checkCurEventBusiGroupRWPermission(c *gin.Context, ids []int64
func (rt *Router) alertCurEventGet(c *gin.Context) {
eid := ginx.UrlParamInt64(c, "eid")
event, err := GetCurEventDetail(rt.Ctx, eid)
event, err := models.AlertCurEventGetById(rt.Ctx, eid)
ginx.Dangerous(err)
hasPermission := HasPermission(rt.Ctx, c, "event", fmt.Sprintf("%d", eid), rt.Center.AnonymousAccess.AlertDetail)
if !hasPermission {
rt.auth()(c)
rt.user()(c)
if event == nil {
ginx.Bomb(404, "No such active event")
}
if !rt.Center.AnonymousAccess.AlertDetail && rt.Center.EventHistoryGroupView {
rt.bgroCheck(c, event.GroupId)
}
ginx.NewRender(c).Data(event, err)
}
func GetCurEventDetail(ctx *ctx.Context, eid int64) (*models.AlertCurEvent, error) {
event, err := models.AlertCurEventGetById(ctx, eid)
if err != nil {
return nil, err
}
if event == nil {
return nil, fmt.Errorf("no such active event")
}
ruleConfig, needReset := models.FillRuleConfigTplName(ctx, event.RuleConfig)
ruleConfig, needReset := models.FillRuleConfigTplName(rt.Ctx, event.RuleConfig)
if needReset {
event.RuleConfigJson = ruleConfig
}
event.LastEvalTime = event.TriggerTime
event.NotifyVersion, err = GetEventNotifyVersion(ctx, event.RuleId, event.NotifyRuleIds)
ginx.Dangerous(err)
event.NotifyRules, err = GetEventNotifyRuleNames(ctx, event.NotifyRuleIds)
return event, err
}
func GetEventNotifyRuleNames(ctx *ctx.Context, notifyRuleIds []int64) ([]*models.EventNotifyRule, error) {
notifyRuleNames := make([]*models.EventNotifyRule, 0)
notifyRules, err := models.NotifyRulesGet(ctx, "id in ?", notifyRuleIds)
if err != nil {
return nil, err
}
for _, notifyRule := range notifyRules {
notifyRuleNames = append(notifyRuleNames, &models.EventNotifyRule{
Id: notifyRule.ID,
Name: notifyRule.Name,
})
}
return notifyRuleNames, nil
}
func GetEventNotifyVersion(ctx *ctx.Context, ruleId int64, notifyRuleIds []int64) (int, error) {
if len(notifyRuleIds) != 0 {
// 如果存在 notify_rule_ids则认为使用新的告警通知方式
return 1, nil
}
rule, err := models.AlertRuleGetById(ctx, ruleId)
if err != nil {
return 0, err
}
return rule.NotifyVersion, nil
ginx.NewRender(c).Data(event, nil)
}
func (rt *Router) alertCurEventsStatistics(c *gin.Context) {
@@ -306,123 +247,3 @@ func (rt *Router) alertCurEventDelByHash(c *gin.Context) {
hash := ginx.QueryStr(c, "hash")
ginx.NewRender(c).Message(models.AlertCurEventDelByHash(rt.Ctx, hash))
}
func (rt *Router) eventTagKeys(c *gin.Context) {
// 获取最近1天的活跃告警事件
now := time.Now().Unix()
stime := now - 24*3600
etime := now
// 获取用户可见的业务组ID列表
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, false)
if err != nil {
logger.Warningf("failed to get business group ids: %v", err)
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 查询活跃告警事件,限制数量以提高性能
events, err := models.AlertCurEventsGet(rt.Ctx, []string{}, bgids, stime, etime, []int64{}, []int64{}, []string{}, 0, "", 200, 0, []int64{})
if err != nil {
logger.Warningf("failed to get current alert events: %v", err)
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 如果没有查到事件,返回默认标签
if len(events) == 0 {
ginx.NewRender(c).Data([]string{"ident", "app", "service", "instance"}, nil)
return
}
// 收集所有标签键并去重
tagKeys := make(map[string]struct{})
for _, event := range events {
for key := range event.TagsMap {
tagKeys[key] = struct{}{}
}
}
// 转换为字符串切片
var result []string
for key := range tagKeys {
result = append(result, key)
}
// 如果没有收集到任何标签键,返回默认值
if len(result) == 0 {
result = []string{"ident", "app", "service", "instance"}
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) eventTagValues(c *gin.Context) {
// 获取标签key
tagKey := ginx.QueryStr(c, "key")
// 获取最近1天的活跃告警事件
now := time.Now().Unix()
stime := now - 24*3600
etime := now
// 获取用户可见的业务组ID列表
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, false)
if err != nil {
logger.Warningf("failed to get business group ids: %v", err)
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 查询活跃告警事件,获取更多数据以保证统计准确性
events, err := models.AlertCurEventsGet(rt.Ctx, []string{}, bgids, stime, etime, []int64{}, []int64{}, []string{}, 0, "", 1000, 0, []int64{})
if err != nil {
logger.Warningf("failed to get current alert events: %v", err)
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 如果没有查到事件,返回空数组
if len(events) == 0 {
ginx.NewRender(c).Data([]string{}, nil)
return
}
// 统计标签值出现次数
valueCount := make(map[string]int)
for _, event := range events {
// TagsMap已经在AlertCurEventsGet中处理直接使用
if value, exists := event.TagsMap[tagKey]; exists && value != "" {
valueCount[value]++
}
}
// 转换为切片并按出现次数降序排序
type tagValue struct {
value string
count int
}
tagValues := make([]tagValue, 0, len(valueCount))
for value, count := range valueCount {
tagValues = append(tagValues, tagValue{value, count})
}
// 按出现次数降序排序
sort.Slice(tagValues, func(i, j int) bool {
return tagValues[i].count > tagValues[j].count
})
// 只取Top20并转换为字符串数组
limit := 20
if len(tagValues) < limit {
limit = len(tagValues)
}
result := make([]string, 0, limit)
for i := 0; i < limit; i++ {
result = append(result, tagValues[i].value)
}
ginx.NewRender(c).Data(result, nil)
}

View File

@@ -1,168 +0,0 @@
package router
import (
"encoding/json"
"fmt"
"io"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/loggrep"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
// alertEvalDetailPage renders an HTML log viewer page for alert rule evaluation logs.
func (rt *Router) alertEvalDetailPage(c *gin.Context) {
id := ginx.UrlParamStr(c, "id")
if !loggrep.IsValidRuleID(id) {
c.String(http.StatusBadRequest, "invalid rule id format")
return
}
logs, instance, err := rt.getAlertEvalLogs(id)
if err != nil {
c.String(http.StatusInternalServerError, "Error: %v", err)
return
}
c.Header("Content-Type", "text/html; charset=utf-8")
err = loggrep.RenderAlertEvalHTML(c.Writer, loggrep.AlertEvalPageData{
RuleID: id,
Instance: instance,
Logs: logs,
Total: len(logs),
})
if err != nil {
c.String(http.StatusInternalServerError, "render error: %v", err)
}
}
// alertEvalDetailJSON returns JSON for alert rule evaluation logs.
func (rt *Router) alertEvalDetailJSON(c *gin.Context) {
id := ginx.UrlParamStr(c, "id")
if !loggrep.IsValidRuleID(id) {
ginx.Bomb(200, "invalid rule id format")
}
logs, instance, err := rt.getAlertEvalLogs(id)
ginx.Dangerous(err)
ginx.NewRender(c).Data(loggrep.EventDetailResp{
Logs: logs,
Instance: instance,
}, nil)
}
// getAlertEvalLogs resolves the target instance(s) and retrieves alert eval logs.
func (rt *Router) getAlertEvalLogs(id string) ([]string, string, error) {
ruleId, _ := strconv.ParseInt(id, 10, 64)
rule, err := models.AlertRuleGetById(rt.Ctx, ruleId)
if err != nil {
return nil, "", err
}
if rule == nil {
return nil, "", fmt.Errorf("no such alert rule")
}
instance := fmt.Sprintf("%s:%d", rt.Alert.Heartbeat.IP, rt.HTTP.Port)
keyword := fmt.Sprintf("alert_eval_%s", id)
// Get datasource IDs for this rule
dsIds := rt.DatasourceCache.GetIDsByDsCateAndQueries(rule.Cate, rule.DatasourceQueries)
if len(dsIds) == 0 {
// No datasources found (e.g. host rule), try local grep
logs, err := loggrep.GrepLogDir(rt.LogDir, keyword)
return logs, instance, err
}
// Find unique target nodes via hash ring, with DB fallback
nodeSet := make(map[string]struct{})
for _, dsId := range dsIds {
node, err := rt.getNodeForDatasource(dsId, id)
if err != nil {
continue
}
nodeSet[node] = struct{}{}
}
if len(nodeSet) == 0 {
// Hash ring not ready, grep locally
logs, err := loggrep.GrepLogDir(rt.LogDir, keyword)
return logs, instance, err
}
// Collect logs from all target nodes
var allLogs []string
var instances []string
for node := range nodeSet {
if node == instance {
logs, err := loggrep.GrepLogDir(rt.LogDir, keyword)
if err == nil {
allLogs = append(allLogs, logs...)
instances = append(instances, node)
}
} else {
logs, nodeAddr, err := rt.forwardAlertEvalDetail(node, id)
if err == nil {
allLogs = append(allLogs, logs...)
instances = append(instances, nodeAddr)
}
}
}
// Sort logs by timestamp descending
sort.Slice(allLogs, func(i, j int) bool {
return allLogs[i] > allLogs[j]
})
if len(allLogs) > loggrep.MaxLogLines {
allLogs = allLogs[:loggrep.MaxLogLines]
}
return allLogs, strings.Join(instances, ", "), nil
}
func (rt *Router) forwardAlertEvalDetail(node, id string) ([]string, string, error) {
url := fmt.Sprintf("http://%s/v1/n9e/alert-eval-detail/%s", node, id)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, node, err
}
for user, pass := range rt.HTTP.APIForService.BasicAuth {
req.SetBasicAuth(user, pass)
break
}
client := &http.Client{Timeout: 15 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, node, fmt.Errorf("forward to %s failed: %v", node, err)
}
defer resp.Body.Close()
body, err := io.ReadAll(io.LimitReader(resp.Body, 10*1024*1024)) // 10MB limit
if err != nil {
return nil, node, err
}
var result struct {
Dat loggrep.EventDetailResp `json:"dat"`
Err string `json:"err"`
}
if err := json.Unmarshal(body, &result); err != nil {
return nil, node, err
}
if result.Err != "" {
return nil, node, fmt.Errorf("%s", result.Err)
}
return result.Dat.Logs, result.Dat.Instance, nil
}

View File

@@ -2,16 +2,14 @@ package router
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/ginx"
"golang.org/x/exp/slices"
)
@@ -58,15 +56,15 @@ func (rt *Router) alertHisEventsList(c *gin.Context) {
ruleId := ginx.QueryInt64(c, "rid", 0)
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView, false)
bgids, err := GetBusinessGroupIds(c, rt.Ctx, rt.Center.EventHistoryGroupView)
ginx.Dangerous(err)
total, err := models.AlertHisEventTotal(rt.Ctx, prods, bgids, stime, etime, severity,
recovered, dsIds, cates, ruleId, query, []int64{})
recovered, dsIds, cates, ruleId, query)
ginx.Dangerous(err)
list, err := models.AlertHisEventGets(rt.Ctx, prods, bgids, stime, etime, severity, recovered,
dsIds, cates, ruleId, query, limit, ginx.Offset(c, limit), []int64{})
dsIds, cates, ruleId, query, limit, ginx.Offset(c, limit))
ginx.Dangerous(err)
cache := make(map[int64]*models.UserGroup)
@@ -80,67 +78,16 @@ func (rt *Router) alertHisEventsList(c *gin.Context) {
}, nil)
}
type alertHisEventsDeleteForm struct {
Severities []int `json:"severities"`
Timestamp int64 `json:"timestamp" binding:"required"`
}
func (rt *Router) alertHisEventsDelete(c *gin.Context) {
var f alertHisEventsDeleteForm
ginx.BindJSON(c, &f)
// 校验
if f.Timestamp == 0 {
ginx.Bomb(http.StatusBadRequest, "timestamp parameter is required")
return
}
user := c.MustGet("user").(*models.User)
// 启动后台清理任务
go func() {
limit := 100
for {
n, err := models.AlertHisEventBatchDelete(rt.Ctx, f.Timestamp, f.Severities, limit)
if err != nil {
logger.Errorf("Failed to delete alert history events: operator=%s, timestamp=%d, severities=%v, error=%v",
user.Username, f.Timestamp, f.Severities, err)
break
}
logger.Debugf("Successfully deleted alert history events: operator=%s, timestamp=%d, severities=%v, deleted=%d",
user.Username, f.Timestamp, f.Severities, n)
if n < int64(limit) {
break // 已经删完
}
time.Sleep(100 * time.Millisecond) // 防止锁表
}
}()
ginx.NewRender(c).Data("Alert history events deletion started", nil)
}
var TransferEventToCur func(*ctx.Context, *models.AlertHisEvent) *models.AlertCurEvent
func init() {
TransferEventToCur = transferEventToCur
}
func transferEventToCur(ctx *ctx.Context, event *models.AlertHisEvent) *models.AlertCurEvent {
cur := event.ToCur()
return cur
}
func (rt *Router) alertHisEventGet(c *gin.Context) {
eid := ginx.UrlParamInt64(c, "eid")
event, err := models.AlertHisEventGetById(rt.Ctx, eid)
ginx.Dangerous(err)
if event == nil {
ginx.Bomb(404, "No such alert event")
}
hasPermission := HasPermission(rt.Ctx, c, "event", fmt.Sprintf("%d", eid), rt.Center.AnonymousAccess.AlertDetail)
if !hasPermission {
rt.auth()(c)
rt.user()(c)
if !rt.Center.AnonymousAccess.AlertDetail && rt.Center.EventHistoryGroupView {
rt.bgroCheck(c, event.GroupId)
}
@@ -149,54 +96,46 @@ func (rt *Router) alertHisEventGet(c *gin.Context) {
event.RuleConfigJson = ruleConfig
}
event.NotifyVersion, err = GetEventNotifyVersion(rt.Ctx, event.RuleId, event.NotifyRuleIds)
ginx.Dangerous(err)
event.NotifyRules, err = GetEventNotifyRuleNames(rt.Ctx, event.NotifyRuleIds)
ginx.NewRender(c).Data(TransferEventToCur(rt.Ctx, event), err)
ginx.NewRender(c).Data(event, err)
}
func GetBusinessGroupIds(c *gin.Context, ctx *ctx.Context, onlySelfGroupView bool, myGroups bool) ([]int64, error) {
func GetBusinessGroupIds(c *gin.Context, ctx *ctx.Context, eventHistoryGroupView bool) ([]int64, error) {
bgid := ginx.QueryInt64(c, "bgid", 0)
var bgids []int64
if strings.HasPrefix(c.Request.URL.Path, "/v1") {
// 如果请求路径以 /v1 开头,不查询用户信息
if !eventHistoryGroupView || strings.HasPrefix(c.Request.URL.Path, "/v1") {
if bgid > 0 {
return []int64{bgid}, nil
}
return bgids, nil
}
user := c.MustGet("user").(*models.User)
if myGroups || (onlySelfGroupView && !user.IsAdmin()) {
// 1. 页面上勾选了我的业务组,需要查询用户所属的业务组
// 2. 如果 onlySelfGroupView 为 true表示只允许查询用户所属的业务组
bussGroupIds, err := models.MyBusiGroupIds(ctx, user.Id)
if err != nil {
return nil, err
}
if len(bussGroupIds) == 0 {
// 如果没查到用户属于任何业务组需要返回一个0否则会导致查询到全部告警历史
return []int64{0}, nil
}
if user.IsAdmin() {
if bgid > 0 {
if !slices.Contains(bussGroupIds, bgid) && !user.IsAdmin() {
return nil, fmt.Errorf("business group ID not allowed")
}
return []int64{bgid}, nil
}
return bgids, nil
}
return bussGroupIds, nil
bussGroupIds, err := models.MyBusiGroupIds(ctx, user.Id)
if err != nil {
return nil, err
}
if len(bussGroupIds) == 0 {
// 如果没查到用户属于任何业务组需要返回一个0否则会导致查询到全部告警历史
return []int64{0}, nil
}
if bgid > 0 && !slices.Contains(bussGroupIds, bgid) {
return nil, fmt.Errorf("business group ID not allowed")
}
if bgid > 0 {
// Pass filter parameters, priority to use
return []int64{bgid}, nil
}
return bgids, nil
return bussGroupIds, nil
}

View File

@@ -11,22 +11,18 @@ import (
"gopkg.in/yaml.v2"
"github.com/ccfos/nightingale/v6/alert/mute"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/gin-gonic/gin"
"github.com/jinzhu/copier"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/prompb"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/str"
)
type AlertRuleModifyHookFunc func(ar *models.AlertRule)
// Return all, front-end search and paging
func (rt *Router) alertRuleGets(c *gin.Context) {
busiGroupId := ginx.UrlParamInt64(c, "id")
@@ -35,13 +31,13 @@ func (rt *Router) alertRuleGets(c *gin.Context) {
cache := make(map[int64]*models.UserGroup)
for i := 0; i < len(ars); i++ {
ars[i].FillNotifyGroups(rt.Ctx, cache)
ars[i].FillSeverities()
}
models.FillUpdateByNicknames(rt.Ctx, ars)
}
ginx.NewRender(c).Data(ars, err)
}
func GetAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
func getAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
stime = ginx.QueryInt64(c, "stime", 0)
etime = ginx.QueryInt64(c, "etime", 0)
if etime == 0 {
@@ -54,7 +50,7 @@ func GetAlertCueEventTimeRange(c *gin.Context) (stime, etime int64) {
}
func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)
@@ -77,17 +73,20 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
if err == nil {
cache := make(map[int64]*models.UserGroup)
rids := make([]int64, 0, len(ars))
names := make([]string, 0, len(ars))
for i := 0; i < len(ars); i++ {
ars[i].FillNotifyGroups(rt.Ctx, cache)
ars[i].FillSeverities()
if len(ars[i].DatasourceQueries) != 0 {
ars[i].DatasourceIdsJson = rt.DatasourceCache.GetIDsByDsCateAndQueries(ars[i].Cate, ars[i].DatasourceQueries)
}
rids = append(rids, ars[i].Id)
names = append(names, ars[i].UpdateBy)
}
stime, etime := GetAlertCueEventTimeRange(c)
stime, etime := getAlertCueEventTimeRange(c)
cnt := models.AlertCurEventCountByRuleId(rt.Ctx, rids, stime, etime)
if cnt != nil {
for i := 0; i < len(ars); i++ {
@@ -95,7 +94,14 @@ func (rt *Router) alertRuleGetsByGids(c *gin.Context) {
}
}
models.FillUpdateByNicknames(rt.Ctx, ars)
users := models.UserMapGet(rt.Ctx, "username in (?)", names)
if users != nil {
for i := 0; i < len(ars); i++ {
if user, exist := users[ars[i].UpdateBy]; exist {
ars[i].UpdateByNickname = user.Nickname
}
}
}
}
ginx.NewRender(c).Data(ars, err)
}
@@ -127,7 +133,6 @@ func (rt *Router) alertRulesGetByService(c *gin.Context) {
ars[i].DatasourceIdsJson = rt.DatasourceCache.GetIDsByDsCateAndQueries(ars[i].Cate, ars[i].DatasourceQueries)
}
}
models.FillUpdateByNicknames(rt.Ctx, ars)
}
ginx.NewRender(c).Data(ars, err)
}
@@ -150,120 +155,6 @@ func (rt *Router) alertRuleAddByFE(c *gin.Context) {
ginx.NewRender(c).Data(reterr, nil)
}
type AlertRuleTryRunForm struct {
EventId int64 `json:"event_id" binding:"required"`
AlertRuleConfig models.AlertRule `json:"config" binding:"required"`
}
func (rt *Router) alertRuleNotifyTryRun(c *gin.Context) {
// check notify channels of old version
var f AlertRuleTryRunForm
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
ginx.Dangerous(err)
if hisEvent == nil {
ginx.Bomb(http.StatusNotFound, "event not found")
}
curEvent := *hisEvent.ToCur()
curEvent.SetTagsMap()
if f.AlertRuleConfig.NotifyVersion == 1 {
for _, id := range f.AlertRuleConfig.NotifyRuleIds {
notifyRule, err := models.GetNotifyRule(rt.Ctx, id)
ginx.Dangerous(err)
for _, notifyConfig := range notifyRule.NotifyConfigs {
_, err = SendNotifyChannelMessage(rt.Ctx, rt.UserCache, rt.UserGroupCache, notifyConfig, []*models.AlertCurEvent{&curEvent})
ginx.Dangerous(err)
}
}
ginx.NewRender(c).Data("notification test ok", nil)
return
}
if len(f.AlertRuleConfig.NotifyChannelsJSON) == 0 {
ginx.Bomb(http.StatusOK, "no notify channels selected")
}
if len(f.AlertRuleConfig.NotifyGroupsJSON) == 0 {
ginx.Bomb(http.StatusOK, "no notify groups selected")
}
ancs := make([]string, 0, len(curEvent.NotifyChannelsJSON))
ugids := f.AlertRuleConfig.NotifyGroupsJSON
ngids := make([]int64, 0)
for i := 0; i < len(ugids); i++ {
if gid, err := strconv.ParseInt(ugids[i], 10, 64); err == nil {
ngids = append(ngids, gid)
}
}
userGroups := rt.UserGroupCache.GetByUserGroupIds(ngids)
uids := make([]int64, 0)
for i := range userGroups {
uids = append(uids, userGroups[i].UserIds...)
}
users := rt.UserCache.GetByUserIds(uids)
for _, NotifyChannels := range curEvent.NotifyChannelsJSON {
flag := true
// ignore non-default channels
switch NotifyChannels {
case models.Dingtalk, models.Wecom, models.Feishu, models.Mm,
models.Telegram, models.Email, models.FeishuCard:
// do nothing
default:
continue
}
// default channels
for ui := range users {
if _, b := users[ui].ExtractToken(NotifyChannels); b {
flag = false
break
}
}
if flag {
ancs = append(ancs, NotifyChannels)
}
}
if len(ancs) > 0 {
ginx.Dangerous(errors.New(fmt.Sprintf("All users are missing notify channel configurations. Please check for missing tokens (each channel should be configured with at least one user). %v", ancs)))
}
ginx.NewRender(c).Data("notification test ok", nil)
}
func (rt *Router) alertRuleEnableTryRun(c *gin.Context) {
// check notify channels of old version
var f AlertRuleTryRunForm
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
ginx.Dangerous(err)
if hisEvent == nil {
ginx.Bomb(http.StatusNotFound, "event not found")
}
curEvent := *hisEvent.ToCur()
curEvent.SetTagsMap()
if f.AlertRuleConfig.Disabled == 1 {
ginx.Bomb(http.StatusOK, "rule is disabled")
}
if mute.TimeSpanMuteStrategy(&f.AlertRuleConfig, &curEvent) {
ginx.Bomb(http.StatusOK, "event is not match for period of time")
}
if mute.BgNotMatchMuteStrategy(&f.AlertRuleConfig, &curEvent, rt.TargetCache) {
ginx.Bomb(http.StatusOK, "event target busi group not match rule busi group")
}
ginx.NewRender(c).Data("event is effective", nil)
}
func (rt *Router) alertRuleAddByImport(c *gin.Context) {
username := c.MustGet("username").(string)
@@ -281,15 +172,6 @@ func (rt *Router) alertRuleAddByImport(c *gin.Context) {
models.DataSourceQueryAll,
}
}
// 将导入的规则统一转为新版本的通知规则配置
lst[i].NotifyVersion = 1
lst[i].NotifyChannelsJSON = []string{}
lst[i].NotifyGroupsJSON = []string{}
lst[i].NotifyChannels = ""
lst[i].NotifyGroups = ""
lst[i].Callbacks = ""
lst[i].CallbacksJSON = []string{}
}
bgid := ginx.UrlParamInt64(c, "id")
@@ -308,52 +190,19 @@ func (rt *Router) alertRuleAddByImportPromRule(c *gin.Context) {
var f promRuleForm
ginx.Dangerous(c.BindJSON(&f))
// 首先尝试解析带 groups 的格式
var pr struct {
Groups []models.PromRuleGroup `yaml:"groups"`
}
err := yaml.Unmarshal([]byte(f.Payload), &pr)
var groups []models.PromRuleGroup
if err != nil || len(pr.Groups) == 0 {
// 如果解析失败或没有 groups尝试解析规则数组格式
var rules []models.PromRule
err = yaml.Unmarshal([]byte(f.Payload), &rules)
if err != nil {
// 最后尝试解析单个规则格式
var singleRule models.PromRule
err = yaml.Unmarshal([]byte(f.Payload), &singleRule)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "invalid yaml format. err: %v", err)
}
// 验证单个规则是否有效
if singleRule.Alert == "" && singleRule.Record == "" {
ginx.Bomb(http.StatusBadRequest, "input yaml is empty or invalid")
}
rules = []models.PromRule{singleRule}
}
// 验证规则数组是否为空
if len(rules) == 0 {
ginx.Bomb(http.StatusBadRequest, "input yaml contains no rules")
}
// 将规则数组包装成 group
groups = []models.PromRuleGroup{
{
Name: "imported_rules",
Rules: rules,
},
}
} else {
// 使用已解析的 groups
groups = pr.Groups
if err != nil {
ginx.Bomb(http.StatusBadRequest, "invalid yaml format, please use the example format. err: %v", err)
}
lst := models.DealPromGroup(groups, f.DatasourceQueries, f.Disabled)
if len(pr.Groups) == 0 {
ginx.Bomb(http.StatusBadRequest, "input yaml is empty")
}
lst := models.DealPromGroup(pr.Groups, f.DatasourceQueries, f.Disabled)
username := c.MustGet("username").(string)
bgid := ginx.UrlParamInt64(c, "id")
ginx.NewRender(c).Data(rt.alertRuleAdd(lst, username, bgid, c.GetHeader("X-Language")), nil)
@@ -498,8 +347,8 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
ginx.Bomb(http.StatusBadRequest, "fields empty")
}
updateBy := c.MustGet("username").(string)
updateAt := time.Now().Unix()
f.Fields["update_by"] = c.MustGet("username").(string)
f.Fields["update_at"] = time.Now().Unix()
for i := 0; i < len(f.Ids); i++ {
ar, err := models.AlertRuleGetById(rt.Ctx, f.Ids[i])
@@ -516,6 +365,7 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(originRule)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"rule_config": string(b)}))
continue
}
}
@@ -528,6 +378,7 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(ar.AnnotationsJSON)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"annotations": string(b)}))
continue
}
}
@@ -540,6 +391,7 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
b, err := json.Marshal(ar.AnnotationsJSON)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"annotations": string(b)}))
continue
}
}
@@ -549,6 +401,7 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
callback := callbacks.(string)
if !strings.Contains(ar.Callbacks, callback) {
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"callbacks": ar.Callbacks + " " + callback}))
continue
}
}
}
@@ -558,6 +411,7 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
if callbacks, has := f.Fields["callbacks"]; has {
callback := callbacks.(string)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"callbacks": strings.ReplaceAll(ar.Callbacks, callback, "")}))
continue
}
}
@@ -567,27 +421,13 @@ func (rt *Router) alertRulePutFields(c *gin.Context) {
bytes, err := json.Marshal(datasourceQueries)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{"datasource_queries": bytes}))
continue
}
}
for k, v := range f.Fields {
// 检查 v 是否为各种切片类型
switch v.(type) {
case []interface{}, []int64, []int, []string:
// 将切片转换为 JSON 字符串
bytes, err := json.Marshal(v)
ginx.Dangerous(err)
ginx.Dangerous(ar.UpdateColumn(rt.Ctx, k, string(bytes)))
default:
ginx.Dangerous(ar.UpdateColumn(rt.Ctx, k, v))
}
ginx.Dangerous(ar.UpdateColumn(rt.Ctx, k, v))
}
// 统一更新更新时间和更新人,只有更新时间变了,告警规则才会被引擎拉取
ginx.Dangerous(ar.UpdateFieldsMap(rt.Ctx, map[string]interface{}{
"update_by": updateBy,
"update_at": updateAt,
}))
}
ginx.NewRender(c).Message(nil)
@@ -611,7 +451,6 @@ func (rt *Router) alertRuleGet(c *gin.Context) {
err = ar.FillNotifyGroups(rt.Ctx, make(map[int64]*models.UserGroup))
ginx.Dangerous(err)
rt.AlertRuleModifyHook(ar)
ginx.NewRender(c).Data(ar, err)
}
@@ -836,74 +675,3 @@ func (rt *Router) cloneToMachine(c *gin.Context) {
ginx.NewRender(c).Data(reterr, models.InsertAlertRule(rt.Ctx, newRules))
}
type alertBatchCloneForm struct {
RuleIds []int64 `json:"rule_ids"`
Bgids []int64 `json:"bgids"`
}
// 批量克隆告警规则
func (rt *Router) batchAlertRuleClone(c *gin.Context) {
me := c.MustGet("user").(*models.User)
var f alertBatchCloneForm
ginx.BindJSON(c, &f)
// 校验 bgids 操作权限
for _, bgid := range f.Bgids {
rt.bgrwCheck(c, bgid)
}
reterr := make(map[string]string, len(f.RuleIds))
lang := c.GetHeader("X-Language")
for _, arid := range f.RuleIds {
ar, err := models.AlertRuleGetById(rt.Ctx, arid)
for _, bgid := range f.Bgids {
// 为了让 bgid 和 arid 对应,将上面的 err 放到这里处理
if err != nil {
reterr[fmt.Sprintf("%d-%d", arid, bgid)] = i18n.Sprintf(lang, err.Error())
continue
}
if ar == nil {
reterr[fmt.Sprintf("%d-%d", arid, bgid)] = i18n.Sprintf(lang, "alert rule not found")
continue
}
newAr := ar.Clone(me.Username, bgid)
err = newAr.Add(rt.Ctx)
if err != nil {
reterr[fmt.Sprintf("%d-%d", arid, bgid)] = i18n.Sprintf(lang, err.Error())
continue
}
}
}
ginx.NewRender(c).Data(reterr, nil)
}
func (rt *Router) timezonesGet(c *gin.Context) {
// 返回常用时区列表(按时差去重,每个时差只保留一个代表性时区)
timezones := []string{
"Local",
"UTC",
"Asia/Shanghai", // UTC+8 (代表 Asia/Hong_Kong, Asia/Singapore 等)
"Asia/Tokyo", // UTC+9 (代表 Asia/Seoul 等)
"Asia/Dubai", // UTC+4
"Asia/Kolkata", // UTC+5:30
"Asia/Bangkok", // UTC+7 (代表 Asia/Jakarta 等)
"Europe/London", // UTC+0 (代表 UTC)
"Europe/Paris", // UTC+1 (代表 Europe/Berlin, Europe/Rome, Europe/Madrid 等)
"Europe/Moscow", // UTC+3
"America/New_York", // UTC-5 (代表 America/Toronto 等)
"America/Chicago", // UTC-6 (代表 America/Mexico_City 等)
"America/Denver", // UTC-7
"America/Los_Angeles", // UTC-8
"America/Sao_Paulo", // UTC-3
"Australia/Sydney", // UTC+10 (代表 Australia/Melbourne 等)
"Pacific/Auckland", // UTC+12
}
ginx.NewRender(c).Data(timezones, nil)
}

View File

@@ -2,17 +2,13 @@ package router
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/ccfos/nightingale/v6/alert/common"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
// Return all, front-end search and paging
@@ -30,13 +26,12 @@ func (rt *Router) alertSubscribeGets(c *gin.Context) {
ginx.Dangerous(lst[i].FillDatasourceIds(rt.Ctx))
ginx.Dangerous(lst[i].DB2FE())
}
models.FillUpdateByNicknames(rt.Ctx, lst)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) alertSubscribeGetsByGids(c *gin.Context) {
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
if len(gids) > 0 {
for _, gid := range gids {
rt.bgroCheck(c, gid)
@@ -67,7 +62,6 @@ func (rt *Router) alertSubscribeGetsByGids(c *gin.Context) {
ginx.Dangerous(lst[i].FillDatasourceIds(rt.Ctx))
ginx.Dangerous(lst[i].DB2FE())
}
models.FillUpdateByNicknames(rt.Ctx, lst)
ginx.NewRender(c).Data(lst, err)
}
@@ -110,148 +104,6 @@ func (rt *Router) alertSubscribeAdd(c *gin.Context) {
ginx.NewRender(c).Message(f.Add(rt.Ctx))
}
type SubscribeTryRunForm struct {
EventId int64 `json:"event_id" binding:"required"`
SubscribeConfig models.AlertSubscribe `json:"config" binding:"required"`
}
func (rt *Router) alertSubscribeTryRun(c *gin.Context) {
var f SubscribeTryRunForm
ginx.BindJSON(c, &f)
ginx.Dangerous(f.SubscribeConfig.Verify())
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
ginx.Dangerous(err)
if hisEvent == nil {
ginx.Bomb(http.StatusNotFound, "event not found")
}
curEvent := *hisEvent.ToCur()
curEvent.SetTagsMap()
lang := c.GetHeader("X-Language")
// 先判断匹配条件
if !f.SubscribeConfig.MatchCluster(curEvent.DatasourceId) {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "event datasource not match"))
}
if len(f.SubscribeConfig.RuleIds) != 0 {
match := false
for _, rid := range f.SubscribeConfig.RuleIds {
if rid == curEvent.RuleId {
match = true
break
}
}
if !match {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "event rule id not match"))
}
}
// 匹配 tag
f.SubscribeConfig.Parse()
if !common.MatchTags(curEvent.TagsMap, f.SubscribeConfig.ITags) {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "event tags not match"))
}
// 匹配group name
if !common.MatchGroupsName(curEvent.GroupName, f.SubscribeConfig.IBusiGroups) {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "event group name not match"))
}
// 检查严重级别Severity匹配
if len(f.SubscribeConfig.SeveritiesJson) != 0 {
match := false
for _, s := range f.SubscribeConfig.SeveritiesJson {
if s == curEvent.Severity || s == 0 {
match = true
break
}
}
if !match {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "event severity not match"))
}
}
// 新版本通知规则
if f.SubscribeConfig.NotifyVersion == 1 {
if len(f.SubscribeConfig.NotifyRuleIds) == 0 {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "no notify rules selected"))
}
for _, id := range f.SubscribeConfig.NotifyRuleIds {
notifyRule, err := models.GetNotifyRule(rt.Ctx, id)
if err != nil {
ginx.Bomb(http.StatusNotFound, i18n.Sprintf(lang, "subscribe notify rule not found: %v", err))
}
for _, notifyConfig := range notifyRule.NotifyConfigs {
_, err = SendNotifyChannelMessage(rt.Ctx, rt.UserCache, rt.UserGroupCache, notifyConfig, []*models.AlertCurEvent{&curEvent})
if err != nil {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "notify rule send error: %v", err))
}
}
}
ginx.NewRender(c).Data(i18n.Sprintf(lang, "event match subscribe and notification test ok"), nil)
return
}
// 旧版通知方式
f.SubscribeConfig.ModifyEvent(&curEvent)
if len(curEvent.NotifyChannelsJSON) == 0 {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "no notify channels selected"))
}
if len(curEvent.NotifyGroupsJSON) == 0 {
ginx.Bomb(http.StatusOK, i18n.Sprintf(lang, "no notify groups selected"))
}
ancs := make([]string, 0, len(curEvent.NotifyChannelsJSON))
ugids := strings.Fields(f.SubscribeConfig.UserGroupIds)
ngids := make([]int64, 0)
for i := 0; i < len(ugids); i++ {
if gid, err := strconv.ParseInt(ugids[i], 10, 64); err == nil {
ngids = append(ngids, gid)
}
}
userGroups := rt.UserGroupCache.GetByUserGroupIds(ngids)
uids := make([]int64, 0)
for i := range userGroups {
uids = append(uids, userGroups[i].UserIds...)
}
users := rt.UserCache.GetByUserIds(uids)
for _, NotifyChannels := range curEvent.NotifyChannelsJSON {
flag := true
// ignore non-default channels
switch NotifyChannels {
case models.Dingtalk, models.Wecom, models.Feishu, models.Mm,
models.Telegram, models.Email, models.FeishuCard:
// do nothing
default:
continue
}
// default channels
for ui := range users {
if _, b := users[ui].ExtractToken(NotifyChannels); b {
flag = false
break
}
}
if flag {
ancs = append(ancs, NotifyChannels)
}
}
if len(ancs) > 0 {
ginx.Bomb(http.StatusBadRequest, i18n.Sprintf(lang, "all users missing notify channel configurations: %v", ancs))
}
ginx.NewRender(c).Data(i18n.Sprintf(lang, "event match subscribe and notify settings ok"), nil)
}
func (rt *Router) alertSubscribePut(c *gin.Context) {
var fs []models.AlertSubscribe
ginx.BindJSON(c, &fs)
@@ -289,8 +141,6 @@ func (rt *Router) alertSubscribePut(c *gin.Context) {
"extra_config",
"busi_groups",
"note",
"notify_rule_ids",
"notify_version",
))
}

View File

@@ -6,18 +6,17 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/str"
)
type boardForm struct {
Name string `json:"name"`
Ident string `json:"ident"`
Tags string `json:"tags"`
Note string `json:"note"`
Configs string `json:"configs"`
Public int `json:"public"`
PublicCate int `json:"public_cate"`
@@ -35,7 +34,6 @@ func (rt *Router) boardAdd(c *gin.Context) {
Name: f.Name,
Ident: f.Ident,
Tags: f.Tags,
Note: f.Note,
Configs: f.Configs,
CreateBy: me.Username,
UpdateBy: me.Username,
@@ -53,14 +51,9 @@ func (rt *Router) boardAdd(c *gin.Context) {
func (rt *Router) boardGet(c *gin.Context) {
bid := ginx.UrlParamStr(c, "bid")
board, err := models.BoardGet(rt.Ctx, "ident = ?", bid)
board, err := models.BoardGet(rt.Ctx, "id = ? or ident = ?", bid, bid)
ginx.Dangerous(err)
if board == nil {
board, err = models.BoardGet(rt.Ctx, "id = ?", bid)
ginx.Dangerous(err)
}
if board == nil {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
@@ -103,7 +96,7 @@ func (rt *Router) boardGet(c *gin.Context) {
// 根据 bids 参数,获取多个 board
func (rt *Router) boardGetsByBids(c *gin.Context) {
bids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "bids", ""), ",")
bids := str.IdsInt64(ginx.QueryStr(c, "bids", ""), ",")
boards, err := models.BoardGetsByBids(rt.Ctx, bids)
ginx.Dangerous(err)
ginx.NewRender(c).Data(boards, err)
@@ -117,10 +110,6 @@ func (rt *Router) boardPureGet(c *gin.Context) {
ginx.Bomb(http.StatusNotFound, "No such dashboard")
}
// 清除创建者和更新者信息
board.CreateBy = ""
board.UpdateBy = ""
ginx.NewRender(c).Data(board, nil)
}
@@ -186,11 +175,10 @@ func (rt *Router) boardPut(c *gin.Context) {
bo.Name = f.Name
bo.Ident = f.Ident
bo.Tags = f.Tags
bo.Note = f.Note
bo.UpdateBy = me.Username
bo.UpdateAt = time.Now().Unix()
err = bo.Update(rt.Ctx, "name", "ident", "tags", "note", "update_by", "update_at")
err = bo.Update(rt.Ctx, "name", "ident", "tags", "update_by", "update_at")
ginx.NewRender(c).Data(bo, err)
}
@@ -260,9 +248,6 @@ func (rt *Router) boardGets(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
boards, err := models.BoardGetsByGroupId(rt.Ctx, bgid, query)
if err == nil {
models.FillUpdateByNicknames(rt.Ctx, boards)
}
ginx.NewRender(c).Data(boards, err)
}
@@ -276,14 +261,11 @@ func (rt *Router) publicBoardGets(c *gin.Context) {
ginx.Dangerous(err)
boards, err := models.BoardGets(rt.Ctx, "", "public=1 and (public_cate in (?) or id in (?))", []int64{0, 1}, boardIds)
if err == nil {
models.FillUpdateByNicknames(rt.Ctx, boards)
}
ginx.NewRender(c).Data(boards, err)
}
func (rt *Router) boardGetsByGids(c *gin.Context) {
gids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
gids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
query := ginx.QueryStr(c, "query", "")
if len(gids) > 0 {
@@ -318,7 +300,6 @@ func (rt *Router) boardGetsByGids(c *gin.Context) {
boards[i].Bgids = ids
}
}
models.FillUpdateByNicknames(rt.Ctx, boards)
ginx.NewRender(c).Data(boards, err)
}

View File

@@ -8,10 +8,10 @@ import (
"strings"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/file"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/runner"
)

View File

@@ -5,9 +5,9 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"gorm.io/gorm"
)
@@ -36,9 +36,8 @@ func (rt *Router) builtinComponentsAdd(c *gin.Context) {
func (rt *Router) builtinComponentsGets(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
disabled := ginx.QueryInt(c, "disabled", -1)
bc, err := models.BuiltinComponentGets(rt.Ctx, query, disabled)
bc, err := models.BuiltinComponentGets(rt.Ctx, query)
ginx.Dangerous(err)
ginx.NewRender(c).Data(bc, nil)

View File

@@ -3,8 +3,8 @@ package router
import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) metricFilterGets(c *gin.Context) {
@@ -27,8 +27,6 @@ func (rt *Router) metricFilterGets(c *gin.Context) {
}
}
models.FillUpdateByNicknames(rt.Ctx, arr)
ginx.NewRender(c).Data(arr, err)
}
@@ -59,7 +57,7 @@ func (rt *Router) metricFilterDel(c *gin.Context) {
ginx.Dangerous(err)
if !HasPerm(gids, old.GroupsPerm, true) {
ginx.NewRender(c).Message("forbidden")
ginx.NewRender(c).Message("no permission")
return
}
}
@@ -81,7 +79,7 @@ func (rt *Router) metricFilterPut(c *gin.Context) {
ginx.Dangerous(err)
if !HasPerm(gids, old.GroupsPerm, true) {
ginx.NewRender(c).Message("forbidden")
ginx.NewRender(c).Message("no permission")
return
}
}

View File

@@ -2,14 +2,12 @@ package router
import (
"net/http"
"sort"
"time"
"github.com/ccfos/nightingale/v6/center/integration"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
)
@@ -31,7 +29,7 @@ func (rt *Router) builtinMetricsAdd(c *gin.Context) {
reterr := make(map[string]string)
for i := 0; i < count; i++ {
lst[i].Lang = lang
lst[i].UUID = time.Now().UnixMicro()
lst[i].UUID = time.Now().UnixNano()
if err := lst[i].Add(rt.Ctx, username); err != nil {
reterr[lst[i].Name] = i18n.Sprintf(c.GetHeader("X-Language"), err.Error())
}
@@ -50,12 +48,11 @@ func (rt *Router) builtinMetricsGets(c *gin.Context) {
lang = "zh_CN"
}
bmInDB, err := models.BuiltinMetricGets(rt.Ctx, "", collector, typ, query, unit)
bm, err := models.BuiltinMetricGets(rt.Ctx, lang, collector, typ, query, unit, limit, ginx.Offset(c, limit))
ginx.Dangerous(err)
bm, total, err := integration.BuiltinPayloadInFile.BuiltinMetricGets(bmInDB, lang, collector, typ, query, unit, limit, ginx.Offset(c, limit))
total, err := models.BuiltinMetricCount(rt.Ctx, lang, collector, typ, query, unit)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": bm,
"total": total,
@@ -89,11 +86,15 @@ func (rt *Router) builtinMetricsDel(c *gin.Context) {
func (rt *Router) builtinMetricsDefaultTypes(c *gin.Context) {
lst := []string{
"Linux",
"Procstat",
"cAdvisor",
"Ping",
"MySQL",
"ClickHouse",
"Redis",
"Kafka",
"Elasticsearch",
"PostgreSQL",
"MongoDB",
"Memcached",
}
ginx.NewRender(c).Data(lst, nil)
}
@@ -103,26 +104,7 @@ func (rt *Router) builtinMetricsTypes(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
lang := c.GetHeader("X-Language")
metricTypeListInDB, err := models.BuiltinMetricTypes(rt.Ctx, lang, collector, query)
ginx.Dangerous(err)
metricTypeListInFile := integration.BuiltinPayloadInFile.BuiltinMetricTypes(lang, collector, query)
typeMap := make(map[string]struct{})
for _, metricType := range metricTypeListInDB {
typeMap[metricType] = struct{}{}
}
for _, metricType := range metricTypeListInFile {
typeMap[metricType] = struct{}{}
}
metricTypeList := make([]string, 0, len(typeMap))
for metricType := range typeMap {
metricTypeList = append(metricTypeList, metricType)
}
sort.Strings(metricTypeList)
ginx.NewRender(c).Data(metricTypeList, nil)
ginx.NewRender(c).Data(models.BuiltinMetricTypes(rt.Ctx, lang, collector, query))
}
func (rt *Router) builtinMetricsCollectors(c *gin.Context) {
@@ -130,24 +112,5 @@ func (rt *Router) builtinMetricsCollectors(c *gin.Context) {
query := ginx.QueryStr(c, "query", "")
lang := c.GetHeader("X-Language")
collectorListInDB, err := models.BuiltinMetricCollectors(rt.Ctx, lang, typ, query)
ginx.Dangerous(err)
collectorListInFile := integration.BuiltinPayloadInFile.BuiltinMetricCollectors(lang, typ, query)
collectorMap := make(map[string]struct{})
for _, collector := range collectorListInDB {
collectorMap[collector] = struct{}{}
}
for _, collector := range collectorListInFile {
collectorMap[collector] = struct{}{}
}
collectorList := make([]string, 0, len(collectorMap))
for collector := range collectorMap {
collectorList = append(collectorList, collector)
}
sort.Strings(collectorList)
ginx.NewRender(c).Data(collectorList, nil)
ginx.NewRender(c).Data(models.BuiltinMetricCollectors(rt.Ctx, lang, typ, query))
}

View File

@@ -7,10 +7,9 @@ import (
"time"
"github.com/BurntSushi/toml"
"github.com/ccfos/nightingale/v6/center/integration"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/i18n"
)
@@ -19,7 +18,6 @@ type Board struct {
Tags string `json:"tags"`
Configs interface{} `json:"configs"`
UUID int64 `json:"uuid"`
Note string `json:"note"`
}
func (rt *Router) builtinPayloadsAdd(c *gin.Context) {
@@ -130,7 +128,6 @@ func (rt *Router) builtinPayloadsAdd(c *gin.Context) {
Name: dashboard.Name,
Tags: dashboard.Tags,
UUID: dashboard.UUID,
Note: dashboard.Note,
Content: string(contentBytes),
CreatedBy: username,
UpdatedBy: username,
@@ -166,7 +163,6 @@ func (rt *Router) builtinPayloadsAdd(c *gin.Context) {
Name: dashboard.Name,
Tags: dashboard.Tags,
UUID: dashboard.UUID,
Note: dashboard.Note,
Content: string(contentBytes),
CreatedBy: username,
UpdatedBy: username,
@@ -196,26 +192,13 @@ func (rt *Router) builtinPayloadsAdd(c *gin.Context) {
func (rt *Router) builtinPayloadsGets(c *gin.Context) {
typ := ginx.QueryStr(c, "type", "")
if typ == "" {
ginx.Bomb(http.StatusBadRequest, "type is required")
return
}
ComponentID := ginx.QueryInt64(c, "component_id", 0)
cate := ginx.QueryStr(c, "cate", "")
query := ginx.QueryStr(c, "query", "")
lst, err := models.BuiltinPayloadGets(rt.Ctx, uint64(ComponentID), typ, cate, query)
ginx.Dangerous(err)
lstInFile, err := integration.BuiltinPayloadInFile.GetBuiltinPayload(typ, cate, query, uint64(ComponentID))
ginx.Dangerous(err)
if len(lstInFile) > 0 {
lst = append(lst, lstInFile...)
}
ginx.NewRender(c).Data(lst, nil)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) builtinPayloadcatesGet(c *gin.Context) {
@@ -223,31 +206,21 @@ func (rt *Router) builtinPayloadcatesGet(c *gin.Context) {
ComponentID := ginx.QueryInt64(c, "component_id", 0)
cates, err := models.BuiltinPayloadCates(rt.Ctx, typ, uint64(ComponentID))
ginx.Dangerous(err)
ginx.NewRender(c).Data(cates, err)
}
catesInFile, err := integration.BuiltinPayloadInFile.GetBuiltinPayloadCates(typ, uint64(ComponentID))
ginx.Dangerous(err)
func (rt *Router) builtinPayloadGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
// 使用 map 进行去重
cateMap := make(map[string]bool)
// 添加数据库中的分类
for _, cate := range cates {
cateMap[cate] = true
bp, err := models.BuiltinPayloadGet(rt.Ctx, "id = ?", id)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, err.Error())
}
if bp == nil {
ginx.Bomb(http.StatusNotFound, "builtin payload not found")
}
// 添加文件中的分类
for _, cate := range catesInFile {
cateMap[cate] = true
}
// 将去重后的结果转换回切片
result := make([]string, 0, len(cateMap))
for cate := range cateMap {
result = append(result, cate)
}
ginx.NewRender(c).Data(result, nil)
ginx.NewRender(c).Data(bp, nil)
}
func (rt *Router) builtinPayloadsPut(c *gin.Context) {
@@ -278,7 +251,6 @@ func (rt *Router) builtinPayloadsPut(c *gin.Context) {
req.Name = dashboard.Name
req.Tags = dashboard.Tags
req.Note = dashboard.Note
} else if req.Type == "collect" {
c := make(map[string]interface{})
if _, err := toml.Decode(req.Content, &c); err != nil {
@@ -301,15 +273,14 @@ func (rt *Router) builtinPayloadsDel(c *gin.Context) {
ginx.NewRender(c).Message(models.BuiltinPayloadDels(rt.Ctx, req.Ids))
}
func (rt *Router) builtinPayloadsGetByUUID(c *gin.Context) {
uuid := ginx.QueryInt64(c, "uuid")
bp, err := models.BuiltinPayloadGet(rt.Ctx, "uuid = ?", uuid)
ginx.Dangerous(err)
if bp != nil {
ginx.NewRender(c).Data(bp, nil)
} else {
ginx.NewRender(c).Data(integration.BuiltinPayloadInFile.IndexData[uuid], nil)
func (rt *Router) builtinPayloadsGetByUUIDOrID(c *gin.Context) {
uuid := ginx.QueryInt64(c, "uuid", 0)
// 优先以 uuid 为准
if uuid != 0 {
ginx.NewRender(c).Data(models.BuiltinPayloadGet(rt.Ctx, "uuid = ?", uuid))
return
}
id := ginx.QueryInt64(c, "id", 0)
ginx.NewRender(c).Data(models.BuiltinPayloadGet(rt.Ctx, "id = ?", id))
}

View File

@@ -4,11 +4,11 @@ import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/str"
)
type busiGroupForm struct {
@@ -119,9 +119,6 @@ func (rt *Router) busiGroupGets(c *gin.Context) {
if len(lst) == 0 {
lst = []models.BusiGroup{}
}
if err == nil {
models.FillUpdateByNicknames(rt.Ctx, lst)
}
ginx.NewRender(c).Data(lst, err)
}
@@ -134,7 +131,7 @@ func (rt *Router) busiGroupGetsByService(c *gin.Context) {
// 这个接口只有在活跃告警页面才调用获取各个BG的活跃告警数量
func (rt *Router) busiGroupAlertingsGets(c *gin.Context) {
ids := ginx.QueryStr(c, "ids", "")
ret, err := models.AlertNumbers(rt.Ctx, strx.IdsInt64ForAPI(ids))
ret, err := models.AlertNumbers(rt.Ctx, str.IdsInt64(ids))
ginx.NewRender(c).Data(ret, err)
}
@@ -145,7 +142,7 @@ func (rt *Router) busiGroupGet(c *gin.Context) {
}
func (rt *Router) busiGroupsGetTags(c *gin.Context) {
bgids := strx.IdsInt64ForAPI(ginx.QueryStr(c, "gids", ""), ",")
bgids := str.IdsInt64(ginx.QueryStr(c, "gids", ""), ",")
targetIdents, err := models.TargetIndentsGetByBgids(rt.Ctx, bgids)
ginx.Dangerous(err)
tags, err := models.TargetGetTags(rt.Ctx, targetIdents, true, "busigroup")

View File

@@ -5,9 +5,9 @@ import (
"time"
"github.com/ccfos/nightingale/v6/storage"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
captcha "github.com/mojocn/base64Captcha"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)

View File

@@ -4,15 +4,15 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/str"
)
func (rt *Router) chartShareGets(c *gin.Context) {
ids := ginx.QueryStr(c, "ids", "")
lst, err := models.ChartShareGetsByIds(rt.Ctx, strx.IdsInt64ForAPI(ids, ","))
lst, err := models.ChartShareGetsByIds(rt.Ctx, str.IdsInt64(ids, ","))
ginx.NewRender(c).Data(lst, err)
}

View File

@@ -4,9 +4,9 @@ import (
"encoding/json"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) notifyChannelsGets(c *gin.Context) {

View File

@@ -4,9 +4,9 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
const EMBEDDEDDASHBOARD = "embedded-dashboards"
@@ -15,9 +15,6 @@ func (rt *Router) configsGet(c *gin.Context) {
prefix := ginx.QueryStr(c, "prefix", "")
limit := ginx.QueryInt(c, "limit", 10)
configs, err := models.ConfigsGets(rt.Ctx, prefix, limit, ginx.Offset(c, limit))
if err == nil {
models.FillUpdateByNicknames(rt.Ctx, configs)
}
ginx.NewRender(c).Data(configs, err)
}

View File

@@ -2,9 +2,9 @@ package router
import (
"github.com/ccfos/nightingale/v6/pkg/secu"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
type confPropCrypto struct {

View File

@@ -1,99 +0,0 @@
package router
import (
"fmt"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func checkAnnotationPermission(c *gin.Context, ctx *ctx.Context, dashboardId int64) {
dashboard, err := models.BoardGetByID(ctx, dashboardId)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, "failed to get dashboard: %v", err)
}
if dashboard == nil {
ginx.Bomb(http.StatusNotFound, "dashboard not found")
}
bg := BusiGroup(ctx, dashboard.GroupId)
me := c.MustGet("user").(*models.User)
can, err := me.CanDoBusiGroup(ctx, bg, "rw")
ginx.Dangerous(err)
if !can {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
}
func (rt *Router) dashAnnotationAdd(c *gin.Context) {
var f models.DashAnnotation
ginx.BindJSON(c, &f)
username := c.MustGet("username").(string)
now := time.Now().Unix()
checkAnnotationPermission(c, rt.Ctx, f.DashboardId)
f.CreateBy = username
f.CreateAt = now
f.UpdateBy = username
f.UpdateAt = now
ginx.NewRender(c).Data(f.Id, f.Add(rt.Ctx))
}
func (rt *Router) dashAnnotationGets(c *gin.Context) {
dashboardId := ginx.QueryInt64(c, "dashboard_id")
from := ginx.QueryInt64(c, "from")
to := ginx.QueryInt64(c, "to")
limit := ginx.QueryInt(c, "limit", 100)
lst, err := models.DashAnnotationGets(rt.Ctx, dashboardId, from, to, limit)
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) dashAnnotationPut(c *gin.Context) {
var f models.DashAnnotation
ginx.BindJSON(c, &f)
id := ginx.UrlParamInt64(c, "id")
annotation, err := getAnnotationById(rt.Ctx, id)
ginx.Dangerous(err)
checkAnnotationPermission(c, rt.Ctx, annotation.DashboardId)
f.Id = id
f.UpdateAt = time.Now().Unix()
f.UpdateBy = c.MustGet("username").(string)
ginx.NewRender(c).Message(f.Update(rt.Ctx))
}
func (rt *Router) dashAnnotationDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
annotation, err := getAnnotationById(rt.Ctx, id)
ginx.Dangerous(err)
checkAnnotationPermission(c, rt.Ctx, annotation.DashboardId)
ginx.NewRender(c).Message(models.DashAnnotationDel(rt.Ctx, id))
}
// 可以提取获取注释的通用方法
func getAnnotationById(ctx *ctx.Context, id int64) (*models.DashAnnotation, error) {
annotation, err := models.DashAnnotationGet(ctx, "id=?", id)
if err != nil {
return nil, err
}
if annotation == nil {
return nil, fmt.Errorf("annotation not found")
}
return annotation, nil
}

View File

@@ -1,23 +1,17 @@
package router
import (
"context"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/ccfos/nightingale/v6/datasource/opensearch"
"github.com/ccfos/nightingale/v6/dskit/clickhouse"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
@@ -53,41 +47,9 @@ func (rt *Router) datasourceList(c *gin.Context) {
func (rt *Router) datasourceGetsByService(c *gin.Context) {
typ := ginx.QueryStr(c, "typ", "")
lst, err := models.GetDatasourcesGetsBy(rt.Ctx, typ, "", "", "")
openRsa := rt.Center.RSA.OpenRSA
for _, item := range lst {
if err := item.Encrypt(openRsa, rt.HTTP.RSA.RSAPublicKey); err != nil {
logger.Errorf("datasource %+v encrypt failed: %v", item, err)
continue
}
}
ginx.NewRender(c).Data(lst, err)
}
func (rt *Router) datasourceRsaConfigGet(c *gin.Context) {
if rt.Center.RSA.OpenRSA {
publicKey := ""
privateKey := ""
if len(rt.HTTP.RSA.RSAPublicKey) > 0 {
publicKey = base64.StdEncoding.EncodeToString(rt.HTTP.RSA.RSAPublicKey)
}
if len(rt.HTTP.RSA.RSAPrivateKey) > 0 {
privateKey = base64.StdEncoding.EncodeToString(rt.HTTP.RSA.RSAPrivateKey)
}
logger.Debugf("OpenRSA=%v", rt.Center.RSA.OpenRSA)
ginx.NewRender(c).Data(models.RsaConfig{
OpenRSA: rt.Center.RSA.OpenRSA,
RSAPublicKey: publicKey,
RSAPrivateKey: privateKey,
RSAPassWord: rt.HTTP.RSA.RSAPassWord,
}, nil)
} else {
ginx.NewRender(c).Data(models.RsaConfig{
OpenRSA: rt.Center.RSA.OpenRSA,
}, nil)
}
}
func (rt *Router) datasourceBriefs(c *gin.Context) {
var dss []*models.Datasource
list, err := models.GetDatasourcesGetsBy(rt.Ctx, "", "", "", "")
@@ -95,21 +57,15 @@ func (rt *Router) datasourceBriefs(c *gin.Context) {
for _, item := range list {
item.AuthJson.BasicAuthPassword = ""
if item.PluginType == models.PROMETHEUS {
if item.PluginType != models.PROMETHEUS {
item.SettingsJson = nil
} else {
for k, v := range item.SettingsJson {
if strings.HasPrefix(k, "prometheus.") {
item.SettingsJson[strings.TrimPrefix(k, "prometheus.")] = v
delete(item.SettingsJson, k)
}
}
} else if item.PluginType == "cloudwatch" {
for k := range item.SettingsJson {
if !strings.Contains(k, "region") {
delete(item.SettingsJson, k)
}
}
} else {
item.SettingsJson = nil
}
dss = append(dss, item)
}
@@ -137,128 +93,11 @@ func (rt *Router) datasourceUpsert(c *gin.Context) {
var count int64
if !req.ForceSave {
if req.PluginType == models.PROMETHEUS || req.PluginType == models.LOKI || req.PluginType == models.TDENGINE {
err = DatasourceCheck(c, req)
if err != nil {
Dangerous(c, err)
return
}
}
}
for k, v := range req.SettingsJson {
if strings.Contains(k, "cluster_name") {
req.ClusterName = v.(string)
break
}
}
if req.PluginType == models.OPENSEARCH {
b, err := json.Marshal(req.SettingsJson)
err = DatasourceCheck(req)
if err != nil {
logger.Warningf("marshal settings fail: %v", err)
return
}
var os opensearch.OpenSearch
err = json.Unmarshal(b, &os)
if err != nil {
logger.Warningf("unmarshal settings fail: %v", err)
return
}
if len(os.Nodes) == 0 {
logger.Warningf("nodes empty, %+v", req)
return
}
req.HTTPJson = models.HTTP{
Timeout: os.Timeout,
Url: os.Nodes[0],
Headers: os.Headers,
TLS: models.TLS{
SkipTlsVerify: os.TLS.SkipTlsVerify,
},
}
req.AuthJson = models.Auth{
BasicAuth: os.Basic.Enable,
BasicAuthUser: os.Basic.Username,
BasicAuthPassword: os.Basic.Password,
}
}
if req.PluginType == models.CLICKHOUSE {
b, err := json.Marshal(req.SettingsJson)
if err != nil {
logger.Warningf("marshal clickhouse settings failed: %v", err)
Dangerous(c, err)
return
}
var ckConfig clickhouse.Clickhouse
err = json.Unmarshal(b, &ckConfig)
if err != nil {
logger.Warningf("unmarshal clickhouse settings failed: %v", err)
Dangerous(c, err)
return
}
// 检查ckconfig的nodes不应该以http://或https://开头
for _, addr := range ckConfig.Nodes {
if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") {
err = fmt.Errorf("clickhouse node address should not start with http:// or https:// : %s", addr)
logger.Warningf("clickhouse node address invalid: %v", err)
Dangerous(c, err)
return
}
}
// InitCli 会自动检测并选择 HTTP 或 Native 协议
err = ckConfig.InitCli()
if err != nil {
logger.Warningf("clickhouse connection failed: %v", err)
Dangerous(c, err)
return
}
// 执行 SHOW DATABASES 测试连通性
_, err = ckConfig.ShowDatabases(context.Background())
if err != nil {
logger.Warningf("clickhouse test query failed: %v", err)
Dangerous(c, err)
return
}
}
if req.PluginType == models.ELASTICSEARCH {
skipAuto := false
// 若用户输入了versionversion字符串存在且不为空则不自动获取
if req.SettingsJson != nil {
if v, ok := req.SettingsJson["version"]; ok {
switch vv := v.(type) {
case string:
if strings.TrimSpace(vv) != "" {
skipAuto = true
}
default:
if strings.TrimSpace(fmt.Sprint(vv)) != "" {
skipAuto = true
}
}
}
}
if !skipAuto {
version, err := getElasticsearchVersion(req, 10*time.Second)
if err != nil {
logger.Warningf("failed to get elasticsearch version: %v", err)
} else {
if req.SettingsJson == nil {
req.SettingsJson = make(map[string]interface{})
}
req.SettingsJson["version"] = version
}
}
}
if req.Id == 0 {
@@ -276,14 +115,14 @@ func (rt *Router) datasourceUpsert(c *gin.Context) {
}
err = req.Add(rt.Ctx)
} else {
err = req.Update(rt.Ctx, "name", "identifier", "description", "cluster_name", "settings", "http", "auth", "updated_by", "updated_at", "is_default", "weight")
err = req.Update(rt.Ctx, "name", "description", "cluster_name", "settings", "http", "auth", "updated_by", "updated_at", "is_default")
}
Render(c, nil, err)
}
func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
if ds.PluginType == models.PROMETHEUS || ds.PluginType == models.LOKI || ds.PluginType == models.TDENGINE {
func DatasourceCheck(ds models.Datasource) error {
if ds.PluginType != models.ELASTICSEARCH {
if ds.HTTPJson.Url == "" {
return fmt.Errorf("url is empty")
}
@@ -293,24 +132,19 @@ func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
}
}
// 使用 TLS 配置(支持 mTLS
tlsConfig, err := ds.HTTPJson.TLS.TLSConfig()
if err != nil {
return fmt.Errorf("failed to create TLS config: %v", err)
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: ds.HTTPJson.TLS.SkipTlsVerify,
},
},
}
ds.HTTPJson.Url = strings.TrimRight(ds.HTTPJson.Url, "/")
var fullURL string
req, err := ds.HTTPJson.NewReq(&fullURL)
if err != nil {
logger.Errorf("Error creating request: %v", err)
return fmt.Errorf("request urls:%v failed: %v", ds.HTTPJson.GetUrls(), err)
return fmt.Errorf("request urls:%v failed", ds.HTTPJson.GetUrls())
}
if ds.PluginType == models.PROMETHEUS {
@@ -326,14 +160,14 @@ func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
req, err = http.NewRequest("GET", fullURL, nil)
if err != nil {
logger.Errorf("Error creating request: %v", err)
return fmt.Errorf("request url:%s failed: %v", fullURL, err)
return fmt.Errorf("request url:%s failed", fullURL)
}
} else if ds.PluginType == models.TDENGINE {
fullURL = fmt.Sprintf("%s/rest/sql", ds.HTTPJson.Url)
req, err = http.NewRequest("POST", fullURL, strings.NewReader("show databases"))
if err != nil {
logger.Errorf("Error creating request: %v", err)
return fmt.Errorf("request url:%s failed: %v", fullURL, err)
return fmt.Errorf("request url:%s failed", fullURL)
}
}
@@ -345,11 +179,7 @@ func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
req, err = http.NewRequest("GET", fullURL, nil)
if err != nil {
logger.Errorf("Error creating request: %v", err)
if !strings.Contains(ds.HTTPJson.Url, "/loki") {
lang := c.GetHeader("X-Language")
return fmt.Errorf(i18n.Sprintf(lang, "/loki suffix is miss, please add /loki to the url: %s", ds.HTTPJson.Url+"/loki"))
}
return fmt.Errorf("request url:%s failed: %v", fullURL, err)
return fmt.Errorf("request url:%s failed", fullURL)
}
}
@@ -364,16 +194,12 @@ func DatasourceCheck(c *gin.Context, ds models.Datasource) error {
resp, err := client.Do(req)
if err != nil {
logger.Errorf("Error making request: %v\n", err)
return fmt.Errorf("request url:%s failed: %v", fullURL, err)
return fmt.Errorf("request url:%s failed", fullURL)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
logger.Errorf("Error making request: %v\n", resp.StatusCode)
if resp.StatusCode == 404 && ds.PluginType == models.LOKI && !strings.Contains(ds.HTTPJson.Url, "/loki") {
lang := c.GetHeader("X-Language")
return fmt.Errorf(i18n.Sprintf(lang, "/loki suffix is miss, please add /loki to the url: %s", ds.HTTPJson.Url+"/loki"))
}
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("request url:%s failed code:%d body:%s", fullURL, resp.StatusCode, string(body))
}
@@ -459,82 +285,3 @@ func (rt *Router) datasourceQuery(c *gin.Context) {
}
ginx.NewRender(c).Data(req, err)
}
// getElasticsearchVersion 该函数尝试从提供的Elasticsearch数据源中获取版本号遍历所有URL
// 直到成功获取版本号或所有URL均尝试失败为止。
func getElasticsearchVersion(ds models.Datasource, timeout time.Duration) (string, error) {
client := &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: ds.HTTPJson.TLS.SkipTlsVerify,
},
},
}
urls := make([]string, 0)
if len(ds.HTTPJson.Urls) > 0 {
urls = append(urls, ds.HTTPJson.Urls...)
}
if ds.HTTPJson.Url != "" {
urls = append(urls, ds.HTTPJson.Url)
}
if len(urls) == 0 {
return "", fmt.Errorf("no url provided")
}
var lastErr error
for _, raw := range urls {
baseURL := strings.TrimRight(raw, "/") + "/"
req, err := http.NewRequest("GET", baseURL, nil)
if err != nil {
lastErr = err
continue
}
if ds.AuthJson.BasicAuthUser != "" {
req.SetBasicAuth(ds.AuthJson.BasicAuthUser, ds.AuthJson.BasicAuthPassword)
}
for k, v := range ds.HTTPJson.Headers {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
lastErr = err
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastErr = err
continue
}
if resp.StatusCode != 200 {
lastErr = fmt.Errorf("request to %s failed with status: %d body:%s", baseURL, resp.StatusCode, string(body))
continue
}
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
lastErr = err
continue
}
if version, ok := result["version"].(map[string]interface{}); ok {
if number, ok := version["number"].(string); ok && number != "" {
return number, nil
}
}
lastErr = fmt.Errorf("version not found in response from %s", baseURL)
}
if lastErr != nil {
return "", lastErr
}
return "", fmt.Errorf("failed to get elasticsearch version")
}

View File

@@ -1,101 +0,0 @@
package router
import (
"context"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/dskit/types"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/gin-gonic/gin"
)
func (rt *Router) ShowDatabases(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
var databases []string
var err error
type DatabaseShower interface {
ShowDatabases(context.Context) ([]string, error)
}
switch plug.(type) {
case DatabaseShower:
databases, err = plug.(DatabaseShower).ShowDatabases(c.Request.Context())
ginx.Dangerous(err)
default:
ginx.Bomb(200, "datasource not exists")
}
if len(databases) == 0 {
databases = make([]string, 0)
}
ginx.NewRender(c).Data(databases, nil)
}
func (rt *Router) ShowTables(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
// 只接受一个入参
tables := make([]string, 0)
var err error
type TableShower interface {
ShowTables(ctx context.Context, database string) ([]string, error)
}
switch plug.(type) {
case TableShower:
if len(f.Queries) > 0 {
database, ok := f.Queries[0].(string)
if ok {
tables, err = plug.(TableShower).ShowTables(c.Request.Context(), database)
}
}
default:
ginx.Bomb(200, "datasource not exists")
}
ginx.NewRender(c).Data(tables, err)
}
func (rt *Router) DescribeTable(c *gin.Context) {
var f models.QueryParam
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
// 只接受一个入参
columns := make([]*types.ColumnProperty, 0)
var err error
type TableDescriber interface {
DescribeTable(context.Context, interface{}) ([]*types.ColumnProperty, error)
}
switch plug.(type) {
case TableDescriber:
client := plug.(TableDescriber)
if len(f.Queries) > 0 {
columns, err = client.DescribeTable(c.Request.Context(), f.Queries[0])
}
default:
ginx.Bomb(200, "datasource not exists")
}
ginx.NewRender(c).Data(columns, err)
}

View File

@@ -1,142 +0,0 @@
package router
import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func (rt *Router) embeddedProductGets(c *gin.Context) {
products, err := models.EmbeddedProductGets(rt.Ctx)
ginx.Dangerous(err)
models.FillUpdateByNicknames(rt.Ctx, products)
// 获取当前用户可访问的Group ID 列表
me := c.MustGet("user").(*models.User)
if me.IsAdmin() {
ginx.NewRender(c).Data(products, err)
return
}
gids, err := models.MyGroupIds(rt.Ctx, me.Id)
bgSet := make(map[int64]struct{}, len(gids))
for _, id := range gids {
bgSet[id] = struct{}{}
}
// 过滤出公开或有权限访问的私有 product link
var result []*models.EmbeddedProduct
for _, product := range products {
if !product.IsPrivate {
result = append(result, product)
continue
}
for _, tid := range product.TeamIDs {
if _, ok := bgSet[tid]; ok {
result = append(result, product)
break
}
}
}
ginx.NewRender(c).Data(result, err)
}
func (rt *Router) embeddedProductGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
if id <= 0 {
ginx.Bomb(400, "invalid id")
}
data, err := models.GetEmbeddedProductByID(rt.Ctx, id)
ginx.Dangerous(err)
me := c.MustGet("user").(*models.User)
hashPermission, err := hasEmbeddedProductAccess(rt.Ctx, me, data)
ginx.Dangerous(err)
if !hashPermission {
ginx.Bomb(403, "forbidden")
}
ginx.NewRender(c).Data(data, nil)
}
func (rt *Router) embeddedProductAdd(c *gin.Context) {
var eps []models.EmbeddedProduct
ginx.BindJSON(c, &eps)
me := c.MustGet("user").(*models.User)
for i := range eps {
eps[i].CreateBy = me.Nickname
eps[i].UpdateBy = me.Nickname
}
err := models.AddEmbeddedProduct(rt.Ctx, eps)
ginx.NewRender(c).Message(err)
}
func (rt *Router) embeddedProductPut(c *gin.Context) {
var ep models.EmbeddedProduct
id := ginx.UrlParamInt64(c, "id")
ginx.BindJSON(c, &ep)
if id <= 0 {
ginx.Bomb(400, "invalid id")
}
oldProduct, err := models.GetEmbeddedProductByID(rt.Ctx, id)
ginx.Dangerous(err)
me := c.MustGet("user").(*models.User)
now := time.Now().Unix()
oldProduct.Name = ep.Name
oldProduct.URL = ep.URL
oldProduct.IsPrivate = ep.IsPrivate
oldProduct.TeamIDs = ep.TeamIDs
oldProduct.UpdateBy = me.Username
oldProduct.UpdateAt = now
err = models.UpdateEmbeddedProduct(rt.Ctx, oldProduct)
ginx.NewRender(c).Message(err)
}
func (rt *Router) embeddedProductDelete(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
if id <= 0 {
ginx.Bomb(400, "invalid id")
}
err := models.DeleteEmbeddedProduct(rt.Ctx, id)
ginx.NewRender(c).Message(err)
}
func hasEmbeddedProductAccess(ctx *ctx.Context, user *models.User, ep *models.EmbeddedProduct) (bool, error) {
if user.IsAdmin() || !ep.IsPrivate {
return true, nil
}
gids, err := models.MyGroupIds(ctx, user.Id)
if err != nil {
return false, err
}
groupSet := make(map[int64]struct{}, len(gids))
for _, gid := range gids {
groupSet[gid] = struct{}{}
}
for _, tid := range ep.TeamIDs {
if _, ok := groupSet[tid]; ok {
return true, nil
}
}
return false, nil
}

View File

@@ -1,77 +0,0 @@
package router
import (
"github.com/ccfos/nightingale/v6/datasource/es"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/gin-gonic/gin"
)
type IndexReq struct {
Cate string `json:"cate"`
DatasourceId int64 `json:"datasource_id"`
Index string `json:"index"`
}
type FieldValueReq struct {
Cate string `json:"cate"`
DatasourceId int64 `json:"datasource_id"`
Index string `json:"index"`
Query FieldObj `json:"query"`
}
type FieldObj struct {
Find string `json:"find"`
Field string `json:"field"`
Query string `json:"query"`
}
func (rt *Router) QueryIndices(c *gin.Context) {
var f IndexReq
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
indices, err := plug.(*es.Elasticsearch).QueryIndices()
ginx.Dangerous(err)
ginx.NewRender(c).Data(indices, nil)
}
func (rt *Router) QueryFields(c *gin.Context) {
var f IndexReq
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
fields, err := plug.(*es.Elasticsearch).QueryFields([]string{f.Index})
ginx.Dangerous(err)
ginx.NewRender(c).Data(fields, nil)
}
func (rt *Router) QueryESVariable(c *gin.Context) {
var f FieldValueReq
ginx.BindJSON(c, &f)
plug, exists := dscache.DsCache.Get(f.Cate, f.DatasourceId)
if !exists {
logx.Warningf(c.Request.Context(), "cluster:%d not exists", f.DatasourceId)
ginx.Bomb(200, "cluster not exists")
}
fields, err := plug.(*es.Elasticsearch).QueryFieldValue([]string{f.Index}, f.Query.Field, f.Query.Query)
ginx.Dangerous(err)
ginx.NewRender(c).Data(fields, nil)
}

View File

@@ -5,8 +5,8 @@ import (
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
// 创建 ES Index Pattern
@@ -69,10 +69,6 @@ func (rt *Router) esIndexPatternGetList(c *gin.Context) {
lst, err = models.EsIndexPatternGets(rt.Ctx, "")
}
if err == nil {
models.FillUpdateByNicknames(rt.Ctx, lst)
}
ginx.NewRender(c).Data(lst, err)
}

View File

@@ -1,149 +0,0 @@
package router
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/ccfos/nightingale/v6/alert/naming"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/loggrep"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
// eventDetailPage renders an HTML log viewer page (for pages group).
func (rt *Router) eventDetailPage(c *gin.Context) {
hash := ginx.UrlParamStr(c, "hash")
if !loggrep.IsValidHash(hash) {
c.String(http.StatusBadRequest, "invalid hash format")
return
}
logs, instance, err := rt.getEventLogs(hash)
if err != nil {
c.String(http.StatusInternalServerError, "Error: %v", err)
return
}
c.Header("Content-Type", "text/html; charset=utf-8")
err = loggrep.RenderHTML(c.Writer, loggrep.PageData{
Hash: hash,
Instance: instance,
Logs: logs,
Total: len(logs),
})
if err != nil {
c.String(http.StatusInternalServerError, "render error: %v", err)
}
}
// eventDetailJSON returns JSON (for service group).
func (rt *Router) eventDetailJSON(c *gin.Context) {
hash := ginx.UrlParamStr(c, "hash")
if !loggrep.IsValidHash(hash) {
ginx.Bomb(200, "invalid hash format")
}
logs, instance, err := rt.getEventLogs(hash)
ginx.Dangerous(err)
ginx.NewRender(c).Data(loggrep.EventDetailResp{
Logs: logs,
Instance: instance,
}, nil)
}
// getNodeForDatasource returns the alert engine instance responsible for the given
// datasource and primary key. It first checks the local hashring, and falls back
// to querying the database for active instances if the hashring is empty
// (e.g. when the datasource belongs to another engine cluster).
func (rt *Router) getNodeForDatasource(datasourceId int64, pk string) (string, error) {
dsIdStr := strconv.FormatInt(datasourceId, 10)
node, err := naming.DatasourceHashRing.GetNode(dsIdStr, pk)
if err == nil {
return node, nil
}
// Hashring is empty for this datasource (likely belongs to another engine cluster).
// Query the DB for active instances.
servers, dbErr := models.AlertingEngineGetsInstances(rt.Ctx,
"datasource_id = ? and clock > ?",
datasourceId, time.Now().Unix()-30)
if dbErr != nil {
return "", dbErr
}
if len(servers) == 0 {
return "", fmt.Errorf("no active instances for datasource %d", datasourceId)
}
ring := naming.NewConsistentHashRing(int32(naming.NodeReplicas), servers)
return ring.Get(pk)
}
// getEventLogs resolves the target instance and retrieves logs.
func (rt *Router) getEventLogs(hash string) ([]string, string, error) {
event, err := models.AlertHisEventGetByHash(rt.Ctx, hash)
if err != nil {
return nil, "", err
}
if event == nil {
return nil, "", fmt.Errorf("no such alert event")
}
ruleId := strconv.FormatInt(event.RuleId, 10)
instance := fmt.Sprintf("%s:%d", rt.Alert.Heartbeat.IP, rt.HTTP.Port)
node, err := rt.getNodeForDatasource(event.DatasourceId, ruleId)
if err != nil || node == instance {
// hashring not ready or target is self, handle locally
logs, err := loggrep.GrepLogDir(rt.LogDir, hash)
return logs, instance, err
}
// forward to the target alert instance
return rt.forwardEventDetail(node, hash)
}
func (rt *Router) forwardEventDetail(node, hash string) ([]string, string, error) {
url := fmt.Sprintf("http://%s/v1/n9e/event-detail/%s", node, hash)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, node, err
}
for user, pass := range rt.HTTP.APIForService.BasicAuth {
req.SetBasicAuth(user, pass)
break
}
client := &http.Client{Timeout: 15 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, node, fmt.Errorf("forward to %s failed: %v", node, err)
}
defer resp.Body.Close()
body, err := io.ReadAll(io.LimitReader(resp.Body, 10*1024*1024)) // 10MB limit
if err != nil {
return nil, node, err
}
var result struct {
Dat loggrep.EventDetailResp `json:"dat"`
Err string `json:"err"`
}
if err := json.Unmarshal(body, &result); err != nil {
return nil, node, err
}
if result.Err != "" {
return nil, node, fmt.Errorf("%s", result.Err)
}
return result.Dat.Logs, result.Dat.Instance, nil
}

View File

@@ -1,639 +0,0 @@
package router
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/alert/pipeline/engine"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/toolkits/pkg/i18n"
"github.com/toolkits/pkg/logger"
)
// 获取事件Pipeline列表
func (rt *Router) eventPipelinesList(c *gin.Context) {
me := c.MustGet("user").(*models.User)
pipelines, err := models.ListEventPipelines(rt.Ctx)
ginx.Dangerous(err)
allTids := make([]int64, 0)
for _, pipeline := range pipelines {
allTids = append(allTids, pipeline.TeamIds...)
}
ugMap, err := models.UserGroupIdAndNameMap(rt.Ctx, allTids)
ginx.Dangerous(err)
for _, pipeline := range pipelines {
for _, tid := range pipeline.TeamIds {
pipeline.TeamNames = append(pipeline.TeamNames, ugMap[tid])
}
// 兼容处理:自动填充工作流字段
pipeline.FillWorkflowFields()
}
models.FillUpdateByNicknames(rt.Ctx, pipelines)
gids, err := models.MyGroupIdsMap(rt.Ctx, me.Id)
ginx.Dangerous(err)
if me.IsAdmin() {
for _, pipeline := range pipelines {
if pipeline.TriggerMode == "" {
pipeline.TriggerMode = models.TriggerModeEvent
}
if pipeline.UseCase == "" {
pipeline.UseCase = models.UseCaseEventPipeline
}
}
ginx.NewRender(c).Data(pipelines, nil)
return
}
res := make([]*models.EventPipeline, 0)
for _, pipeline := range pipelines {
if pipeline.TriggerMode == "" {
pipeline.TriggerMode = models.TriggerModeEvent
}
if pipeline.UseCase == "" {
pipeline.UseCase = models.UseCaseEventPipeline
}
for _, tid := range pipeline.TeamIds {
if _, ok := gids[tid]; ok {
res = append(res, pipeline)
break
}
}
}
ginx.NewRender(c).Data(res, nil)
}
// 获取单个事件Pipeline详情
func (rt *Router) getEventPipeline(c *gin.Context) {
me := c.MustGet("user").(*models.User)
id := ginx.UrlParamInt64(c, "id")
pipeline, err := models.GetEventPipeline(rt.Ctx, id)
ginx.Dangerous(err)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
err = pipeline.FillTeamNames(rt.Ctx)
ginx.Dangerous(err)
// 兼容处理:自动填充工作流字段
pipeline.FillWorkflowFields()
if pipeline.TriggerMode == "" {
pipeline.TriggerMode = models.TriggerModeEvent
}
if pipeline.UseCase == "" {
pipeline.UseCase = models.UseCaseEventPipeline
}
ginx.NewRender(c).Data(pipeline, nil)
}
// 创建事件Pipeline
func (rt *Router) addEventPipeline(c *gin.Context) {
var pipeline models.EventPipeline
ginx.BindJSON(c, &pipeline)
user := c.MustGet("user").(*models.User)
now := time.Now().Unix()
pipeline.CreateBy = user.Username
pipeline.CreateAt = now
pipeline.UpdateAt = now
pipeline.UpdateBy = user.Username
err := pipeline.Verify()
if err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
}
ginx.Dangerous(user.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
err = models.CreateEventPipeline(rt.Ctx, &pipeline)
ginx.NewRender(c).Message(err)
}
// 更新事件Pipeline
func (rt *Router) updateEventPipeline(c *gin.Context) {
var f models.EventPipeline
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
f.UpdateBy = me.Username
f.UpdateAt = time.Now().Unix()
pipeline, err := models.GetEventPipeline(rt.Ctx, f.ID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "No such event pipeline")
}
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
ginx.NewRender(c).Message(pipeline.Update(rt.Ctx, &f))
}
// 删除事件Pipeline
func (rt *Router) deleteEventPipelines(c *gin.Context) {
var f struct {
Ids []int64 `json:"ids"`
}
ginx.BindJSON(c, &f)
if len(f.Ids) == 0 {
ginx.Bomb(http.StatusBadRequest, "ids required")
}
me := c.MustGet("user").(*models.User)
for _, id := range f.Ids {
pipeline, err := models.GetEventPipeline(rt.Ctx, id)
ginx.Dangerous(err)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
}
err := models.DeleteEventPipelines(rt.Ctx, f.Ids)
ginx.NewRender(c).Message(err)
}
// 测试事件Pipeline
func (rt *Router) tryRunEventPipeline(c *gin.Context) {
var f struct {
EventId int64 `json:"event_id"`
PipelineConfig models.EventPipeline `json:"pipeline_config"`
InputVariables map[string]string `json:"input_variables,omitempty"`
}
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
if err != nil || hisEvent == nil {
ginx.Bomb(http.StatusBadRequest, "event not found")
}
event := hisEvent.ToCur()
lang := c.GetHeader("X-Language")
me := c.MustGet("user").(*models.User)
// 统一使用工作流引擎执行(兼容线性模式和工作流模式)
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: me.Username,
InputsOverrides: f.InputVariables,
}
resultEvent, result, err := workflowEngine.Execute(&f.PipelineConfig, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "pipeline execute error: %v", err)
}
m := map[string]interface{}{
"event": resultEvent,
"result": i18n.Sprintf(lang, result.Message),
"status": result.Status,
"node_results": result.NodeResults,
}
if resultEvent == nil {
m["result"] = i18n.Sprintf(lang, "event is dropped")
}
ginx.NewRender(c).Data(m, nil)
}
// 测试事件处理器
func (rt *Router) tryRunEventProcessor(c *gin.Context) {
var f struct {
EventId int64 `json:"event_id"`
ProcessorConfig models.ProcessorConfig `json:"processor_config"`
}
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
if err != nil || hisEvent == nil {
ginx.Bomb(http.StatusBadRequest, "event not found")
}
event := hisEvent.ToCur()
processor, err := models.GetProcessorByType(f.ProcessorConfig.Typ, f.ProcessorConfig.Config)
if err != nil {
ginx.Bomb(200, "get processor err: %+v", err)
}
wfCtx := &models.WorkflowContext{
Event: event,
Vars: make(map[string]interface{}),
}
wfCtx, res, err := processor.Process(rt.Ctx, wfCtx)
if err != nil {
ginx.Bomb(200, "processor err: %+v", err)
}
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": wfCtx.Event,
"result": i18n.Sprintf(lang, res),
}, nil)
}
func (rt *Router) tryRunEventProcessorByNotifyRule(c *gin.Context) {
var f struct {
EventId int64 `json:"event_id"`
PipelineConfigs []models.PipelineConfig `json:"pipeline_configs"`
}
ginx.BindJSON(c, &f)
hisEvent, err := models.AlertHisEventGetById(rt.Ctx, f.EventId)
if err != nil || hisEvent == nil {
ginx.Bomb(http.StatusBadRequest, "event not found")
}
event := hisEvent.ToCur()
pids := make([]int64, 0)
for _, pc := range f.PipelineConfigs {
if pc.Enable {
pids = append(pids, pc.PipelineId)
}
}
pipelines, err := models.GetEventPipelinesByIds(rt.Ctx, pids)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "processors not found")
}
wfCtx := &models.WorkflowContext{
Event: event,
Vars: make(map[string]interface{}),
}
for _, pl := range pipelines {
for _, p := range pl.ProcessorConfigs {
processor, err := models.GetProcessorByType(p.Typ, p.Config)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "get processor: %+v err: %+v", p, err)
}
wfCtx, _, err = processor.Process(rt.Ctx, wfCtx)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "processor: %+v err: %+v", p, err)
}
if wfCtx == nil || wfCtx.Event == nil {
lang := c.GetHeader("X-Language")
ginx.NewRender(c).Data(map[string]interface{}{
"event": nil,
"result": i18n.Sprintf(lang, "event is dropped"),
}, nil)
return
}
}
}
ginx.NewRender(c).Data(wfCtx.Event, nil)
}
func (rt *Router) eventPipelinesListByService(c *gin.Context) {
pipelines, err := models.ListEventPipelines(rt.Ctx)
ginx.NewRender(c).Data(pipelines, err)
}
type EventPipelineRequest struct {
// 事件数据(可选,如果不传则使用空事件)
Event *models.AlertCurEvent `json:"event,omitempty"`
// 输入参数覆盖
InputsOverrides map[string]string `json:"inputs_overrides,omitempty"`
Username string `json:"username,omitempty"`
}
// executePipelineTrigger 执行 Pipeline 触发的公共逻辑
func (rt *Router) executePipelineTrigger(pipeline *models.EventPipeline, req *EventPipelineRequest, triggerBy string) (string, error) {
// 准备事件数据
var event *models.AlertCurEvent
if req.Event != nil {
event = req.Event
} else {
// 创建空事件
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
// 生成执行ID
executionID := uuid.New().String()
// 创建触发上下文
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: triggerBy,
InputsOverrides: req.InputsOverrides,
RequestID: executionID,
}
// 异步执行工作流
go func() {
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, _, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
logger.Errorf("async workflow execute error: pipeline_id=%d execution_id=%s err=%v",
pipeline.ID, executionID, err)
}
}()
return executionID, nil
}
// triggerEventPipelineByService Service 调用触发工作流执行
func (rt *Router) triggerEventPipelineByService(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
// 获取 Pipeline
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
executionID, err := rt.executePipelineTrigger(pipeline, &f, f.Username)
if err != nil {
ginx.Bomb(http.StatusBadRequest, "%v", err)
}
ginx.NewRender(c).Data(gin.H{
"execution_id": executionID,
"message": "workflow execution started",
}, nil)
}
// triggerEventPipelineByAPI API 触发工作流执行
func (rt *Router) triggerEventPipelineByAPI(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
// 获取 Pipeline
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
// 检查权限
me := c.MustGet("user").(*models.User)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
executionID, err := rt.executePipelineTrigger(pipeline, &f, me.Username)
if err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
}
ginx.NewRender(c).Data(gin.H{
"execution_id": executionID,
"message": "workflow execution started",
}, nil)
}
func (rt *Router) listAllEventPipelineExecutions(c *gin.Context) {
pipelineId := ginx.QueryInt64(c, "pipeline_id", 0)
pipelineName := ginx.QueryStr(c, "pipeline_name", "")
mode := ginx.QueryStr(c, "mode", "")
status := ginx.QueryStr(c, "status", "")
limit := ginx.QueryInt(c, "limit", 20)
offset := ginx.QueryInt(c, "p", 1)
if limit <= 0 || limit > 1000 {
limit = 20
}
if offset <= 0 {
offset = 1
}
executions, total, err := models.ListAllEventPipelineExecutions(rt.Ctx, pipelineId, pipelineName, mode, status, limit, (offset-1)*limit)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": executions,
"total": total,
}, nil)
}
func (rt *Router) listEventPipelineExecutions(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
mode := ginx.QueryStr(c, "mode", "")
status := ginx.QueryStr(c, "status", "")
limit := ginx.QueryInt(c, "limit", 20)
offset := ginx.QueryInt(c, "p", 1)
if limit <= 0 || limit > 1000 {
limit = 20
}
if offset <= 0 {
offset = 1
}
executions, total, err := models.ListEventPipelineExecutions(rt.Ctx, pipelineID, mode, status, limit, (offset-1)*limit)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"list": executions,
"total": total,
}, nil)
}
func (rt *Router) getEventPipelineExecution(c *gin.Context) {
execID := ginx.UrlParamStr(c, "exec_id")
detail, err := models.GetEventPipelineExecutionDetail(rt.Ctx, execID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "execution not found: %v", err)
}
ginx.NewRender(c).Data(detail, nil)
}
func (rt *Router) getEventPipelineExecutionStats(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
stats, err := models.GetEventPipelineExecutionStatistics(rt.Ctx, pipelineID)
ginx.Dangerous(err)
ginx.NewRender(c).Data(stats, nil)
}
func (rt *Router) cleanEventPipelineExecutions(c *gin.Context) {
var f struct {
BeforeDays int `json:"before_days"`
}
ginx.BindJSON(c, &f)
if f.BeforeDays <= 0 {
f.BeforeDays = 30
}
beforeTime := time.Now().AddDate(0, 0, -f.BeforeDays).Unix()
affected, err := models.DeleteEventPipelineExecutions(rt.Ctx, beforeTime)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"deleted": affected,
}, nil)
}
func (rt *Router) streamEventPipeline(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
me := c.MustGet("user").(*models.User)
ginx.Dangerous(me.CheckGroupPermission(rt.Ctx, pipeline.TeamIds))
var event *models.AlertCurEvent
if f.Event != nil {
event = f.Event
} else {
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: me.Username,
InputsOverrides: f.InputsOverrides,
RequestID: uuid.New().String(),
Stream: true, // 流式端点强制启用流式输出
}
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, result, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, "execute failed: %v", err)
}
if result.Stream && result.StreamChan != nil {
rt.handleStreamResponse(c, result, triggerCtx.RequestID)
return
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) handleStreamResponse(c *gin.Context, result *models.WorkflowResult, requestID string) {
// 设置 SSE 响应头
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no") // 禁用 nginx 缓冲
c.Header("X-Request-ID", requestID)
flusher, ok := c.Writer.(http.Flusher)
if !ok {
ginx.Bomb(http.StatusInternalServerError, "streaming not supported")
return
}
// 发送初始连接成功消息
initData := fmt.Sprintf(`{"type":"connected","request_id":"%s","timestamp":%d}`, requestID, time.Now().UnixMilli())
fmt.Fprintf(c.Writer, "data: %s\n\n", initData)
flusher.Flush()
// 从 channel 读取并发送 SSE
timeout := time.After(30 * time.Minute) // 最长流式输出时间
for {
select {
case chunk, ok := <-result.StreamChan:
if !ok {
// channel 关闭,发送结束标记
return
}
data, err := json.Marshal(chunk)
if err != nil {
logger.Errorf("stream: failed to marshal chunk: %v", err)
continue
}
fmt.Fprintf(c.Writer, "data: %s\n\n", data)
flusher.Flush()
if chunk.Done {
return
}
case <-c.Request.Context().Done():
// 客户端断开连接
logger.Infof("stream: client disconnected, request_id=%s", requestID)
return
case <-timeout:
logger.Errorf("stream: timeout, request_id=%s", requestID)
return
}
}
}
func (rt *Router) streamEventPipelineByService(c *gin.Context) {
pipelineID := ginx.UrlParamInt64(c, "id")
var f EventPipelineRequest
ginx.BindJSON(c, &f)
pipeline, err := models.GetEventPipeline(rt.Ctx, pipelineID)
if err != nil {
ginx.Bomb(http.StatusNotFound, "pipeline not found: %v", err)
}
var event *models.AlertCurEvent
if f.Event != nil {
event = f.Event
} else {
event = &models.AlertCurEvent{
TriggerTime: time.Now().Unix(),
}
}
triggerCtx := &models.WorkflowTriggerContext{
Mode: models.TriggerModeAPI,
TriggerBy: f.Username,
InputsOverrides: f.InputsOverrides,
RequestID: uuid.New().String(),
Stream: true, // 流式端点强制启用流式输出
}
workflowEngine := engine.NewWorkflowEngine(rt.Ctx)
_, result, err := workflowEngine.Execute(pipeline, event, triggerCtx)
if err != nil {
ginx.Bomb(http.StatusInternalServerError, "execute failed: %v", err)
}
// 检查是否是流式输出
if result.Stream && result.StreamChan != nil {
rt.handleStreamResponse(c, result, triggerCtx.RequestID)
return
}
ginx.NewRender(c).Data(result, nil)
}
// eventPipelineExecutionAdd 接收 edge 节点同步的 Pipeline 执行记录
func (rt *Router) eventPipelineExecutionAdd(c *gin.Context) {
var execution models.EventPipelineExecution
ginx.BindJSON(c, &execution)
if execution.ID == "" {
ginx.Bomb(http.StatusBadRequest, "id is required")
}
if execution.PipelineID <= 0 {
ginx.Bomb(http.StatusBadRequest, "pipeline_id is required")
}
ginx.NewRender(c).Message(models.DB(rt.Ctx).Create(&execution).Error)
}

View File

@@ -7,9 +7,9 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
const defaultLimit = 300
@@ -36,14 +36,6 @@ func (rt *Router) statistic(c *gin.Context) {
model = models.User{}
case "user_group":
model = models.UserGroup{}
case "notify_rule":
model = models.NotifyRule{}
case "notify_channel":
model = models.NotifyChannel{}
case "event_pipeline":
statistics, err = models.EventPipelineStatistics(rt.Ctx)
ginx.NewRender(c).Data(statistics, err)
return
case "datasource":
// datasource update_at is different from others
statistics, err = models.DatasourceStatistics(rt.Ctx)
@@ -57,10 +49,6 @@ func (rt *Router) statistic(c *gin.Context) {
statistics, err = models.ConfigCvalStatistics(rt.Ctx)
ginx.NewRender(c).Data(statistics, err)
return
case "message_template":
statistics, err = models.MessageTemplateStatistics(rt.Ctx)
ginx.NewRender(c).Data(statistics, err)
return
default:
ginx.Bomb(http.StatusBadRequest, "invalid name")
}
@@ -128,12 +116,6 @@ func UserGroup(ctx *ctx.Context, id int64) *models.UserGroup {
ginx.Bomb(http.StatusNotFound, "No such UserGroup")
}
bgids, err := models.BusiGroupIds(ctx, []int64{id})
ginx.Dangerous(err)
obj.BusiGroups, err = models.BusiGroupGetByIds(ctx, bgids)
ginx.Dangerous(err)
return obj
}
@@ -179,38 +161,3 @@ func Username(c *gin.Context) string {
}
return username
}
func HasPermission(ctx *ctx.Context, c *gin.Context, sourceType, sourceId string, isAnonymousAccess bool) bool {
if sourceType == "event" && isAnonymousAccess {
return true
}
// 尝试从请求中获取 __token 参数
token := ginx.QueryStr(c, "__token", "")
// 如果有 __token 参数,验证其合法性
if token != "" {
return ValidateSourceToken(ctx, sourceType, sourceId, token)
}
return false
}
func ValidateSourceToken(ctx *ctx.Context, sourceType, sourceId, token string) bool {
if token == "" {
return false
}
// 根据源类型、源ID和令牌获取源令牌记录
sourceToken, err := models.GetSourceTokenBySource(ctx, sourceType, sourceId, token)
if err != nil {
return false
}
// 检查令牌是否过期
if sourceToken.IsExpired() {
return false
}
return true
}

View File

@@ -15,9 +15,9 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pushgw/idents"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)

View File

@@ -2,29 +2,23 @@ package router
import (
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
"github.com/ccfos/nightingale/v6/pkg/feishu"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/logx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
"github.com/ccfos/nightingale/v6/pkg/secu"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/pelletier/go-toml/v2"
"github.com/pkg/errors"
"gorm.io/gorm"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
type loginForm struct {
@@ -37,9 +31,7 @@ type loginForm struct {
func (rt *Router) loginPost(c *gin.Context) {
var f loginForm
ginx.BindJSON(c, &f)
rctx := c.Request.Context()
logx.Infof(rctx, "username:%s login from:%s", f.Username, c.ClientIP())
logger.Infof("username:%s login from:%s", f.Username, c.ClientIP())
if rt.HTTP.ShowCaptcha.Enable {
if !CaptchaVerify(f.Captchaid, f.Verifyvalue) {
@@ -52,25 +44,23 @@ func (rt *Router) loginPost(c *gin.Context) {
if rt.HTTP.RSA.OpenRSA {
decPassWord, err := secu.Decrypt(f.Password, rt.HTTP.RSA.RSAPrivateKey, rt.HTTP.RSA.RSAPassWord)
if err != nil {
logx.Errorf(rctx, "RSA Decrypt failed: %v username: %s", err, f.Username)
logger.Errorf("RSA Decrypt failed: %v username: %s", err, f.Username)
ginx.NewRender(c).Message(err)
return
}
authPassWord = decPassWord
}
reqCtx := rt.Ctx.WithContext(rctx)
var user *models.User
var err error
lc := rt.Sso.LDAP.Copy()
if lc.Enable {
user, err = ldapx.LdapLogin(reqCtx, f.Username, authPassWord, lc.DefaultRoles, lc.DefaultTeams, lc)
user, err = ldapx.LdapLogin(rt.Ctx, f.Username, authPassWord, lc.DefaultRoles, lc.DefaultTeams, lc)
if err != nil {
logx.Debugf(rctx, "ldap login failed: %v username: %s", err, f.Username)
logger.Debugf("ldap login failed: %v username: %s", err, f.Username)
var errLoginInN9e error
// to use n9e as the minimum guarantee for login
if user, errLoginInN9e = models.PassLogin(reqCtx, rt.Redis, f.Username, authPassWord); errLoginInN9e != nil {
if user, errLoginInN9e = models.PassLogin(rt.Ctx, rt.Redis, f.Username, authPassWord); errLoginInN9e != nil {
ginx.NewRender(c).Message("ldap login failed: %v; n9e login failed: %v", err, errLoginInN9e)
return
}
@@ -78,7 +68,7 @@ func (rt *Router) loginPost(c *gin.Context) {
user.RolesLst = strings.Fields(user.Roles)
}
} else {
user, err = models.PassLogin(reqCtx, rt.Redis, f.Username, authPassWord)
user, err = models.PassLogin(rt.Ctx, rt.Redis, f.Username, authPassWord)
ginx.Dangerous(err)
}
@@ -102,8 +92,7 @@ func (rt *Router) loginPost(c *gin.Context) {
}
func (rt *Router) logoutPost(c *gin.Context) {
rctx := c.Request.Context()
logx.Infof(rctx, "username:%s logout from:%s", c.GetString("username"), c.ClientIP())
logger.Infof("username:%s logout from:%s", c.GetString("username"), c.ClientIP())
metadata, err := rt.extractTokenMetadata(c.Request)
if err != nil {
ginx.NewRender(c, http.StatusBadRequest).Message("failed to parse jwt token")
@@ -118,20 +107,9 @@ func (rt *Router) logoutPost(c *gin.Context) {
var logoutAddr string
user := c.MustGet("user").(*models.User)
// 获取用户的 id_token
idToken, err := rt.fetchIdToken(c.Request.Context(), user.Id)
if err != nil {
logx.Debugf(rctx, "fetch id_token failed: %v, user_id: %d", err, user.Id)
idToken = "" // 如果获取失败,使用空字符串
}
// 删除 id_token
rt.deleteIdToken(c.Request.Context(), user.Id)
switch user.Belong {
case "oidc":
logoutAddr = rt.Sso.OIDC.GetSsoLogoutAddr(idToken)
logoutAddr = rt.Sso.OIDC.GetSsoLogoutAddr()
case "cas":
logoutAddr = rt.Sso.CAS.GetSsoLogoutAddr()
case "oauth2":
@@ -174,13 +152,6 @@ func (rt *Router) refreshPost(c *gin.Context) {
return
}
// 看这个 token 是否还存在 redis 中
val, err := rt.fetchAuth(c.Request.Context(), refreshUuid)
if err != nil || val == "" {
ginx.NewRender(c, http.StatusUnauthorized).Message("refresh token expired")
return
}
userIdentity, ok := claims["user_identity"].(string)
if !ok {
// Theoretically impossible
@@ -221,14 +192,6 @@ func (rt *Router) refreshPost(c *gin.Context) {
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
// 延长 id_token 的过期时间,使其与新的 refresh token 生命周期保持一致
// 注意:这里不会获取新的 id_token只是延长 Redis 中现有 id_token 的 TTL
if idToken, err := rt.fetchIdToken(c.Request.Context(), userid); err == nil && idToken != "" {
if err := rt.saveIdToken(c.Request.Context(), userid, idToken); err != nil {
logx.Debugf(c.Request.Context(), "refresh id_token ttl failed: %v, user_id: %d", err, userid)
}
}
ginx.NewRender(c).Data(gin.H{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
@@ -276,13 +239,12 @@ type CallbackOutput struct {
}
func (rt *Router) loginCallback(c *gin.Context) {
rctx := c.Request.Context()
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.OIDC.Callback(rt.Redis, rctx, code, state)
ret, err := rt.Sso.OIDC.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logx.Errorf(rctx, "sso_callback fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
logger.Errorf("sso_callback fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
@@ -305,7 +267,7 @@ func (rt *Router) loginCallback(c *gin.Context) {
for _, gid := range rt.Sso.OIDC.DefaultTeams {
err = models.UserGroupMemberAdd(rt.Ctx, gid, user.Id)
if err != nil {
logx.Errorf(rctx, "user:%v UserGroupMemberAdd: %s", user, err)
logger.Errorf("user:%v UserGroupMemberAdd: %s", user, err)
}
}
}
@@ -315,14 +277,7 @@ func (rt *Router) loginCallback(c *gin.Context) {
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(rctx, userIdentity, ts))
// 保存 id_token 到 Redis用于登出时使用
if ret.IdToken != "" {
if err := rt.saveIdToken(rctx, user.Id, ret.IdToken); err != nil {
logx.Errorf(rctx, "save id_token failed: %v, user_id: %d", err, user.Id)
}
}
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
@@ -361,7 +316,7 @@ func (rt *Router) loginRedirectCas(c *gin.Context) {
}
if !rt.Sso.CAS.Enable {
logx.Errorf(c.Request.Context(), "cas is not enable")
logger.Error("cas is not enable")
ginx.NewRender(c).Data("", nil)
return
}
@@ -376,18 +331,17 @@ func (rt *Router) loginRedirectCas(c *gin.Context) {
}
func (rt *Router) loginCallbackCas(c *gin.Context) {
rctx := c.Request.Context()
ticket := ginx.QueryStr(c, "ticket", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.CAS.ValidateServiceTicket(rctx, ticket, state, rt.Redis)
ret, err := rt.Sso.CAS.ValidateServiceTicket(c.Request.Context(), ticket, state, rt.Redis)
if err != nil {
logx.Errorf(rctx, "ValidateServiceTicket: %s", err)
logger.Errorf("ValidateServiceTicket: %s", err)
ginx.NewRender(c).Data("", err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
if err != nil {
logx.Errorf(rctx, "UserGet: %s", err)
logger.Errorf("UserGet: %s", err)
}
ginx.Dangerous(err)
if user != nil {
@@ -406,10 +360,10 @@ func (rt *Router) loginCallbackCas(c *gin.Context) {
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
if err != nil {
logx.Errorf(rctx, "createTokens: %s", err)
logger.Errorf("createTokens: %s", err)
}
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(rctx, userIdentity, ts))
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
@@ -452,180 +406,13 @@ func (rt *Router) loginRedirectOAuth(c *gin.Context) {
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginRedirectDingTalk(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if !rt.Sso.DingTalk.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.DingTalk.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginCallbackDingTalk(c *gin.Context) {
rctx := c.Request.Context()
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.DingTalk.Callback(rt.Redis, rctx, code, state)
if err != nil {
logx.Errorf(rctx, "sso_callback DingTalk fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.DingTalk.DingTalkConfig.CoverAttributes {
updatedFields := user.UpdateSsoFields(dingtalk.SsoTypeName, ret.Nickname, ret.Phone, ret.Email)
ginx.Dangerous(user.Update(rt.Ctx, "update_at", updatedFields...))
}
} else {
user = new(models.User)
user.FullSsoFields(dingtalk.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, rt.Sso.DingTalk.DingTalkConfig.DefaultRoles)
// create user from dingtalk
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
func (rt *Router) loginRedirectFeiShu(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if rt.Sso.FeiShu == nil || !rt.Sso.FeiShu.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.FeiShu.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginCallbackFeiShu(c *gin.Context) {
rctx := c.Request.Context()
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.FeiShu.Callback(rt.Redis, rctx, code, state)
if err != nil {
logx.Errorf(rctx, "sso_callback FeiShu fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil && rt.Sso.FeiShu.FeiShuConfig.CoverAttributes {
updatedFields := user.UpdateSsoFields(feishu.SsoTypeName, ret.Nickname, ret.Phone, ret.Email)
ginx.Dangerous(user.Update(rt.Ctx, "update_at", updatedFields...))
}
} else {
user = new(models.User)
defaultRoles := []string{}
defaultUserGroups := []int64{}
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil {
defaultRoles = rt.Sso.FeiShu.FeiShuConfig.DefaultRoles
defaultUserGroups = rt.Sso.FeiShu.FeiShuConfig.DefaultUserGroups
}
user.FullSsoFields(feishu.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, defaultRoles)
ginx.Dangerous(user.Add(rt.Ctx))
if len(defaultUserGroups) > 0 {
err = user.AddToUserGroups(rt.Ctx, defaultUserGroups)
if err != nil {
logx.Errorf(rctx, "sso feishu add user group error %v %v", ret, err)
}
}
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
func (rt *Router) loginCallbackOAuth(c *gin.Context) {
rctx := c.Request.Context()
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.OAuth2.Callback(rt.Redis, rctx, code, state)
ret, err := rt.Sso.OAuth2.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logx.Debugf(rctx, "sso.callback() get ret %+v error %v", ret, err)
logger.Debugf("sso.callback() get ret %+v error %v", ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
@@ -665,15 +452,13 @@ func (rt *Router) loginCallbackOAuth(c *gin.Context) {
}
type SsoConfigOutput struct {
OidcDisplayName string `json:"oidcDisplayName"`
CasDisplayName string `json:"casDisplayName"`
OauthDisplayName string `json:"oauthDisplayName"`
DingTalkDisplayName string `json:"dingTalkDisplayName"`
FeiShuDisplayName string `json:"feishuDisplayName"`
OidcDisplayName string `json:"oidcDisplayName"`
CasDisplayName string `json:"casDisplayName"`
OauthDisplayName string `json:"oauthDisplayName"`
}
func (rt *Router) ssoConfigNameGet(c *gin.Context) {
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName, feiShuDisplayName string
var oidcDisplayName, casDisplayName, oauthDisplayName string
if rt.Sso.OIDC != nil {
oidcDisplayName = rt.Sso.OIDC.GetDisplayName()
}
@@ -686,117 +471,23 @@ func (rt *Router) ssoConfigNameGet(c *gin.Context) {
oauthDisplayName = rt.Sso.OAuth2.GetDisplayName()
}
if rt.Sso.DingTalk != nil {
dingTalkDisplayName = rt.Sso.DingTalk.GetDisplayName()
}
if rt.Sso.FeiShu != nil {
feiShuDisplayName = rt.Sso.FeiShu.GetDisplayName()
}
ginx.NewRender(c).Data(SsoConfigOutput{
OidcDisplayName: oidcDisplayName,
CasDisplayName: casDisplayName,
OauthDisplayName: oauthDisplayName,
DingTalkDisplayName: dingTalkDisplayName,
FeiShuDisplayName: feiShuDisplayName,
OidcDisplayName: oidcDisplayName,
CasDisplayName: casDisplayName,
OauthDisplayName: oauthDisplayName,
}, nil)
}
func (rt *Router) ssoConfigGets(c *gin.Context) {
var ssoConfigs []models.SsoConfig
lst, err := models.SsoConfigGets(rt.Ctx)
ginx.Dangerous(err)
if len(lst) == 0 {
ginx.NewRender(c).Data(ssoConfigs, nil)
return
}
// TODO: dingTalkExist 为了兼容当前前端配置, 后期单点登陆统一调整后不在预先设置默认内容
dingTalkExist := false
feiShuExist := false
for _, config := range lst {
var ssoReqConfig models.SsoConfig
ssoReqConfig.Id = config.Id
ssoReqConfig.Name = config.Name
ssoReqConfig.UpdateAt = config.UpdateAt
switch config.Name {
case dingtalk.SsoTypeName:
dingTalkExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
case feishu.SsoTypeName:
feiShuExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
default:
ssoReqConfig.Content = config.Content
}
ssoConfigs = append(ssoConfigs, ssoReqConfig)
}
// TODO: dingTalkExist 为了兼容当前前端配置, 后期单点登陆统一调整后不在预先设置默认内容
if !dingTalkExist {
var ssoConfig models.SsoConfig
ssoConfig.Name = dingtalk.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
if !feiShuExist {
var ssoConfig models.SsoConfig
ssoConfig.Name = feishu.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
ginx.NewRender(c).Data(ssoConfigs, nil)
ginx.NewRender(c).Data(models.SsoConfigGets(rt.Ctx))
}
func (rt *Router) ssoConfigUpdate(c *gin.Context) {
var f models.SsoConfig
var ssoConfig models.SsoConfig
ginx.BindJSON(c, &ssoConfig)
ginx.BindJSON(c, &f)
switch ssoConfig.Name {
case dingtalk.SsoTypeName:
f.Name = ssoConfig.Name
setting, err := json.Marshal(ssoConfig.SettingJson)
ginx.Dangerous(err)
f.Content = string(setting)
f.UpdateAt = time.Now().Unix()
sso, err := f.Query(rt.Ctx)
if !errors.Is(err, gorm.ErrRecordNotFound) {
ginx.Dangerous(err)
}
if errors.Is(err, gorm.ErrRecordNotFound) {
err = f.Create(rt.Ctx)
} else {
f.Id = sso.Id
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
case feishu.SsoTypeName:
f.Name = ssoConfig.Name
setting, err := json.Marshal(ssoConfig.SettingJson)
ginx.Dangerous(err)
f.Content = string(setting)
f.UpdateAt = time.Now().Unix()
sso, err := f.Query(rt.Ctx)
if !errors.Is(err, gorm.ErrRecordNotFound) {
ginx.Dangerous(err)
}
if errors.Is(err, gorm.ErrRecordNotFound) {
err = f.Create(rt.Ctx)
} else {
f.Id = sso.Id
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
default:
f.Id = ssoConfig.Id
f.Name = ssoConfig.Name
f.Content = ssoConfig.Content
err := f.Update(rt.Ctx)
ginx.Dangerous(err)
}
err := f.Update(rt.Ctx)
ginx.Dangerous(err)
switch f.Name {
case "LDAP":
@@ -820,22 +511,6 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
err := toml.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
rt.Sso.OAuth2.Reload(config)
case dingtalk.SsoTypeName:
var config dingtalk.Config
err := json.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
if rt.Sso.DingTalk == nil {
rt.Sso.DingTalk = dingtalk.New(config)
}
rt.Sso.DingTalk.Reload(config)
case feishu.SsoTypeName:
var config feishu.Config
err := json.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
if rt.Sso.FeiShu == nil {
rt.Sso.FeiShu = feishu.New(config)
}
rt.Sso.FeiShu.Reload(config)
}
ginx.NewRender(c).Message(nil)

Some files were not shown because too many files have changed in this diff Show More