Compare commits

...

19 Commits

Author SHA1 Message Date
ning
cd44d131aa refactor: optimize drop sample 2026-01-23 11:58:02 +08:00
ning
42ba6c8738 brain fix get datasource 2026-01-20 20:45:12 +08:00
jie210
cd395f659c feat: sso support feishu (#3045) 2026-01-16 19:23:36 +08:00
ning
a16c87c734 refactor: update trigger value 2026-01-14 16:14:21 +08:00
ning
24f3d85b29 fix: query data 2025-12-30 19:25:07 +08:00
ning
e046b57f8b update ds perm check 2025-12-30 16:48:07 +08:00
ning
4b054c279c fix: datasource delete 2025-12-30 16:29:35 +08:00
ning
cf2f442c36 refactor: doris datasource conf 2025-12-29 20:52:58 +08:00
pioneerlfn
f0e948acef fix: doris exec sql timeout unit: s -> ms (#3020) 2025-12-29 16:08:21 +08:00
pioneerlfn
e1fa53e48c refactor: doris update row check, add interval, offset, showIndexes (#3014) 2025-12-26 16:15:11 +08:00
ning
adeb6dbf0b fix: search view api 2025-12-26 14:51:16 +08:00
ning
027e3ece1e refactor: save view check name 2025-12-26 14:51:06 +08:00
Yening Qin
1637d6db73 feat: support search view save (#3009) 2025-12-26 14:50:55 +08:00
ning
8ff0c1eeb9 fix: webhook connection leak 2025-12-17 18:05:18 +08:00
ning
b5917bdf8e fix: webhook connection leak 2025-12-17 18:01:52 +08:00
pioneerlfn
05063bdcca fix: doris macro/time (#2990) 2025-12-11 12:29:07 +08:00
ning
98de2f5b30 refactor: builtin tpl add 2025-12-08 16:54:31 +08:00
ning
e4f0a6560d Merge branch 'release-20' of github.com:ccfos/nightingale into release-20 2025-12-04 20:15:06 +08:00
ning
b065a1fea7 fix: event concurrent map writes 2025-12-04 20:13:05 +08:00
29 changed files with 1244 additions and 67 deletions

View File

@@ -282,13 +282,16 @@ func PipelineApplicable(pipeline *models.EventPipeline, event *models.AlertCurEv
tagMatch := true
if len(pipeline.LabelFilters) > 0 {
for i := range pipeline.LabelFilters {
if pipeline.LabelFilters[i].Func == "" {
pipeline.LabelFilters[i].Func = pipeline.LabelFilters[i].Op
// Deep copy to avoid concurrent map writes on cached objects
labelFiltersCopy := make([]models.TagFilter, len(pipeline.LabelFilters))
copy(labelFiltersCopy, pipeline.LabelFilters)
for i := range labelFiltersCopy {
if labelFiltersCopy[i].Func == "" {
labelFiltersCopy[i].Func = labelFiltersCopy[i].Op
}
}
tagFilters, err := models.ParseTagFilter(pipeline.LabelFilters)
tagFilters, err := models.ParseTagFilter(labelFiltersCopy)
if err != nil {
logger.Errorf("pipeline applicable failed to parse tag filter: %v event:%+v pipeline:%+v", err, event, pipeline)
return false
@@ -298,7 +301,11 @@ func PipelineApplicable(pipeline *models.EventPipeline, event *models.AlertCurEv
attributesMatch := true
if len(pipeline.AttrFilters) > 0 {
tagFilters, err := models.ParseTagFilter(pipeline.AttrFilters)
// Deep copy to avoid concurrent map writes on cached objects
attrFiltersCopy := make([]models.TagFilter, len(pipeline.AttrFilters))
copy(attrFiltersCopy, pipeline.AttrFilters)
tagFilters, err := models.ParseTagFilter(attrFiltersCopy)
if err != nil {
logger.Errorf("pipeline applicable failed to parse tag filter: %v event:%+v pipeline:%+v err:%v", tagFilters, event, pipeline, err)
return false
@@ -379,13 +386,16 @@ func NotifyRuleMatchCheck(notifyConfig *models.NotifyConfig, event *models.Alert
tagMatch := true
if len(notifyConfig.LabelKeys) > 0 {
for i := range notifyConfig.LabelKeys {
if notifyConfig.LabelKeys[i].Func == "" {
notifyConfig.LabelKeys[i].Func = notifyConfig.LabelKeys[i].Op
// Deep copy to avoid concurrent map writes on cached objects
labelKeysCopy := make([]models.TagFilter, len(notifyConfig.LabelKeys))
copy(labelKeysCopy, notifyConfig.LabelKeys)
for i := range labelKeysCopy {
if labelKeysCopy[i].Func == "" {
labelKeysCopy[i].Func = labelKeysCopy[i].Op
}
}
tagFilters, err := models.ParseTagFilter(notifyConfig.LabelKeys)
tagFilters, err := models.ParseTagFilter(labelKeysCopy)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%+v notify_config:%+v", err, event, notifyConfig)
return fmt.Errorf("failed to parse tag filter: %v", err)
@@ -399,7 +409,11 @@ func NotifyRuleMatchCheck(notifyConfig *models.NotifyConfig, event *models.Alert
attributesMatch := true
if len(notifyConfig.Attributes) > 0 {
tagFilters, err := models.ParseTagFilter(notifyConfig.Attributes)
// Deep copy to avoid concurrent map writes on cached objects
attributesCopy := make([]models.TagFilter, len(notifyConfig.Attributes))
copy(attributesCopy, notifyConfig.Attributes)
tagFilters, err := models.ParseTagFilter(attributesCopy)
if err != nil {
logger.Errorf("notify send failed to parse tag filter: %v event:%+v notify_config:%+v err:%v", tagFilters, event, notifyConfig, err)
return fmt.Errorf("failed to parse tag filter: %v", err)

View File

@@ -1602,11 +1602,15 @@ func (arw *AlertRuleWorker) GetAnomalyPoint(rule *models.AlertRule, dsId int64)
continue
}
switch v.(type) {
case float64:
values += fmt.Sprintf("%s:%.3f ", k, v)
case string:
values += fmt.Sprintf("%s:%s ", k, v)
if u, exists := valuesUnitMap[k]; exists { // 配置了单位,优先用配置了单位的值
values += fmt.Sprintf("%s:%s ", k, u.Text)
} else {
switch v.(type) {
case float64:
values += fmt.Sprintf("%s:%.3f ", k, v)
case string:
values += fmt.Sprintf("%s:%s ", k, v)
}
}
}

View File

@@ -13,10 +13,53 @@ import (
"github.com/ccfos/nightingale/v6/alert/astats"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/toolkits/pkg/logger"
)
// webhookClientCache 缓存 http.Client避免每次请求都创建新的 Client 导致连接泄露
var webhookClientCache sync.Map // key: clientKey (string), value: *http.Client
// 相同配置的 webhook 会复用同一个 Client
func getWebhookClient(webhook *models.Webhook) *http.Client {
clientKey := webhook.Hash()
if client, ok := webhookClientCache.Load(clientKey); ok {
return client.(*http.Client)
}
// 创建新的 Client
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: webhook.SkipVerify},
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
}
if poster.UseProxy(webhook.Url) {
transport.Proxy = http.ProxyFromEnvironment
}
timeout := webhook.Timeout
if timeout <= 0 {
timeout = 10
}
newClient := &http.Client{
Timeout: time.Duration(timeout) * time.Second,
Transport: transport,
}
// 使用 LoadOrStore 确保并发安全,避免重复创建
actual, loaded := webhookClientCache.LoadOrStore(clientKey, newClient)
if loaded {
return actual.(*http.Client)
}
return newClient
}
func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats) (bool, string, error) {
channel := "webhook"
if webhook.Type == models.RuleCallback {
@@ -55,25 +98,13 @@ func sendWebhook(webhook *models.Webhook, event interface{}, stats *astats.Stats
req.Header.Set(conf.Headers[i], conf.Headers[i+1])
}
}
insecureSkipVerify := false
if webhook != nil {
insecureSkipVerify = webhook.SkipVerify
}
if conf.Client == nil {
logger.Warningf("event_%s, event:%s, url: [%s], error: [%s]", channel, string(bs), conf.Url, "client is nil")
conf.Client = &http.Client{
Timeout: time.Duration(conf.Timeout) * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify},
},
}
}
// 使用全局 Client 缓存,避免每次请求都创建新的 Client 导致连接泄露
client := getWebhookClient(conf)
stats.AlertNotifyTotal.WithLabelValues(channel).Inc()
var resp *http.Response
var body []byte
resp, err = conf.Client.Do(req)
resp, err = client.Do(req)
if err != nil {
stats.AlertNotifyErrorTotal.WithLabelValues(channel).Inc()

View File

@@ -211,8 +211,8 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/datasource/brief", rt.auth(), rt.user(), rt.datasourceBriefs)
pages.POST("/datasource/query", rt.auth(), rt.user(), rt.datasourceQuery)
pages.POST("/ds-query", rt.auth(), rt.QueryData)
pages.POST("/logs-query", rt.auth(), rt.QueryLogV2)
pages.POST("/ds-query", rt.auth(), rt.user(), rt.QueryData)
pages.POST("/logs-query", rt.auth(), rt.user(), rt.QueryLogV2)
pages.POST("/tdengine-databases", rt.auth(), rt.tdengineDatabases)
pages.POST("/tdengine-tables", rt.auth(), rt.tdengineTables)
@@ -251,10 +251,12 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/auth/redirect/cas", rt.loginRedirectCas)
pages.GET("/auth/redirect/oauth", rt.loginRedirectOAuth)
pages.GET("/auth/redirect/dingtalk", rt.loginRedirectDingTalk)
pages.GET("/auth/redirect/feishu", rt.loginRedirectFeiShu)
pages.GET("/auth/callback", rt.loginCallback)
pages.GET("/auth/callback/cas", rt.loginCallbackCas)
pages.GET("/auth/callback/oauth", rt.loginCallbackOAuth)
pages.GET("/auth/callback/dingtalk", rt.loginCallbackDingTalk)
pages.GET("/auth/callback/feishu", rt.loginCallbackFeiShu)
pages.GET("/auth/perms", rt.allPerms)
pages.GET("/metrics/desc", rt.metricsDescGetFile)
@@ -569,6 +571,14 @@ func (rt *Router) Config(r *gin.Engine) {
pages.GET("/pagerduty-service-list/:id", rt.auth(), rt.user(), rt.pagerDutyNotifyServicesGet)
pages.GET("/notify-channel-config", rt.auth(), rt.user(), rt.notifyChannelGetBy)
pages.GET("/notify-channel-config/idents", rt.notifyChannelIdentsGet)
// saved view 查询条件保存相关路由
pages.GET("/saved-views", rt.auth(), rt.user(), rt.savedViewGets)
pages.POST("/saved-views", rt.auth(), rt.user(), rt.savedViewAdd)
pages.PUT("/saved-view/:id", rt.auth(), rt.user(), rt.savedViewPut)
pages.DELETE("/saved-view/:id", rt.auth(), rt.user(), rt.savedViewDel)
pages.POST("/saved-view/:id/favorite", rt.auth(), rt.user(), rt.savedViewFavoriteAdd)
pages.DELETE("/saved-view/:id/favorite", rt.auth(), rt.user(), rt.savedViewFavoriteDel)
}
r.GET("/api/n9e/versions", func(c *gin.Context) {

View File

@@ -12,6 +12,7 @@ import (
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
"github.com/ccfos/nightingale/v6/pkg/feishu"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
@@ -519,6 +520,85 @@ func (rt *Router) loginCallbackDingTalk(c *gin.Context) {
}
func (rt *Router) loginRedirectFeiShu(c *gin.Context) {
redirect := ginx.QueryStr(c, "redirect", "/")
v, exists := c.Get("userid")
if exists {
userid := v.(int64)
user, err := models.UserGetById(rt.Ctx, userid)
ginx.Dangerous(err)
if user == nil {
ginx.Bomb(200, "user not found")
}
if user.Username != "" { // already login
ginx.NewRender(c).Data(redirect, nil)
return
}
}
if rt.Sso.FeiShu == nil || !rt.Sso.FeiShu.Enable {
ginx.NewRender(c).Data("", nil)
return
}
redirect, err := rt.Sso.FeiShu.Authorize(rt.Redis, redirect)
ginx.Dangerous(err)
ginx.NewRender(c).Data(redirect, err)
}
func (rt *Router) loginCallbackFeiShu(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
ret, err := rt.Sso.FeiShu.Callback(rt.Redis, c.Request.Context(), code, state)
if err != nil {
logger.Errorf("sso_callback FeiShu fail. code:%s, state:%s, get ret: %+v. error: %v", code, state, ret, err)
ginx.NewRender(c).Data(CallbackOutput{}, err)
return
}
user, err := models.UserGet(rt.Ctx, "username=?", ret.Username)
ginx.Dangerous(err)
if user != nil {
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil && rt.Sso.FeiShu.FeiShuConfig.CoverAttributes {
updatedFields := user.UpdateSsoFields(feishu.SsoTypeName, ret.Nickname, ret.Phone, ret.Email)
ginx.Dangerous(user.Update(rt.Ctx, "update_at", updatedFields...))
}
} else {
user = new(models.User)
defaultRoles := []string{}
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil {
defaultRoles = rt.Sso.FeiShu.FeiShuConfig.DefaultRoles
}
user.FullSsoFields(feishu.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, defaultRoles)
// create user from feishu
ginx.Dangerous(user.Add(rt.Ctx))
}
// set user login state
userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username)
ts, err := rt.createTokens(rt.HTTP.JWTAuth.SigningKey, userIdentity)
ginx.Dangerous(err)
ginx.Dangerous(rt.createAuth(c.Request.Context(), userIdentity, ts))
redirect := "/"
if ret.Redirect != "/login" {
redirect = ret.Redirect
}
ginx.NewRender(c).Data(CallbackOutput{
Redirect: redirect,
User: user,
AccessToken: ts.AccessToken,
RefreshToken: ts.RefreshToken,
}, nil)
}
func (rt *Router) loginCallbackOAuth(c *gin.Context) {
code := ginx.QueryStr(c, "code", "")
state := ginx.QueryStr(c, "state", "")
@@ -569,10 +649,11 @@ type SsoConfigOutput struct {
CasDisplayName string `json:"casDisplayName"`
OauthDisplayName string `json:"oauthDisplayName"`
DingTalkDisplayName string `json:"dingTalkDisplayName"`
FeiShuDisplayName string `json:"feishuDisplayName"`
}
func (rt *Router) ssoConfigNameGet(c *gin.Context) {
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName string
var oidcDisplayName, casDisplayName, oauthDisplayName, dingTalkDisplayName, feiShuDisplayName string
if rt.Sso.OIDC != nil {
oidcDisplayName = rt.Sso.OIDC.GetDisplayName()
}
@@ -589,11 +670,16 @@ func (rt *Router) ssoConfigNameGet(c *gin.Context) {
dingTalkDisplayName = rt.Sso.DingTalk.GetDisplayName()
}
if rt.Sso.FeiShu != nil {
feiShuDisplayName = rt.Sso.FeiShu.GetDisplayName()
}
ginx.NewRender(c).Data(SsoConfigOutput{
OidcDisplayName: oidcDisplayName,
CasDisplayName: casDisplayName,
OauthDisplayName: oauthDisplayName,
DingTalkDisplayName: dingTalkDisplayName,
FeiShuDisplayName: feiShuDisplayName,
}, nil)
}
@@ -608,6 +694,7 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
// TODO: dingTalkExist 为了兼容当前前端配置, 后期单点登陆统一调整后不在预先设置默认内容
dingTalkExist := false
feiShuExist := false
for _, config := range lst {
var ssoReqConfig models.SsoConfig
ssoReqConfig.Id = config.Id
@@ -618,6 +705,10 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
dingTalkExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
case feishu.SsoTypeName:
feiShuExist = true
err := json.Unmarshal([]byte(config.Content), &ssoReqConfig.SettingJson)
ginx.Dangerous(err)
default:
ssoReqConfig.Content = config.Content
}
@@ -630,6 +721,11 @@ func (rt *Router) ssoConfigGets(c *gin.Context) {
ssoConfig.Name = dingtalk.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
if !feiShuExist {
var ssoConfig models.SsoConfig
ssoConfig.Name = feishu.SsoTypeName
ssoConfigs = append(ssoConfigs, ssoConfig)
}
ginx.NewRender(c).Data(ssoConfigs, nil)
}
@@ -657,6 +753,23 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
case feishu.SsoTypeName:
f.Name = ssoConfig.Name
setting, err := json.Marshal(ssoConfig.SettingJson)
ginx.Dangerous(err)
f.Content = string(setting)
f.UpdateAt = time.Now().Unix()
sso, err := f.Query(rt.Ctx)
if !errors.Is(err, gorm.ErrRecordNotFound) {
ginx.Dangerous(err)
}
if errors.Is(err, gorm.ErrRecordNotFound) {
err = f.Create(rt.Ctx)
} else {
f.Id = sso.Id
err = f.Update(rt.Ctx)
}
ginx.Dangerous(err)
default:
f.Id = ssoConfig.Id
f.Name = ssoConfig.Name
@@ -695,6 +808,14 @@ func (rt *Router) ssoConfigUpdate(c *gin.Context) {
rt.Sso.DingTalk = dingtalk.New(config)
}
rt.Sso.DingTalk.Reload(config)
case feishu.SsoTypeName:
var config feishu.Config
err := json.Unmarshal([]byte(f.Content), &config)
ginx.Dangerous(err)
if rt.Sso.FeiShu == nil {
rt.Sso.FeiShu = feishu.New(config)
}
rt.Sso.FeiShu.Reload(config)
}
ginx.NewRender(c).Message(nil)

View File

@@ -1,18 +1,23 @@
package router
import (
"context"
"fmt"
"sort"
"sync"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/dskit/doris"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
)
func CheckDsPerm(c *gin.Context, dsId int64, cate string, q interface{}) bool {
type CheckDsPermFunc func(c *gin.Context, dsId int64, cate string, q interface{}) bool
var CheckDsPerm CheckDsPermFunc = func(c *gin.Context, dsId int64, cate string, q interface{}) bool {
// todo: 后续需要根据 cate 判断是否需要权限
return true
}
@@ -107,10 +112,13 @@ func (rt *Router) QueryLogBatch(c *gin.Context) {
}
func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.QueryParam) ([]models.DataResp, error) {
var resp []models.DataResp
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
var (
resp []models.DataResp
mu sync.Mutex
wg sync.WaitGroup
errs []error
rCtx = ctx.Request.Context()
)
for _, q := range f.Queries {
if !anonymousAccess && !CheckDsPerm(ctx, f.DatasourceId, f.Cate, q) {
@@ -122,12 +130,17 @@ func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.Quer
logger.Warningf("cluster:%d not exists", f.DatasourceId)
return nil, fmt.Errorf("cluster not exists")
}
vCtx := rCtx
if f.Cate == models.DORIS {
vCtx = context.WithValue(vCtx, doris.NoNeedCheckMaxRow, true)
}
wg.Add(1)
go func(query interface{}) {
defer wg.Done()
data, err := plug.QueryData(ctx.Request.Context(), query)
data, err := plug.QueryData(vCtx, query)
if err != nil {
logger.Warningf("query data error: req:%+v err:%v", query, err)
mu.Lock()

View File

@@ -0,0 +1,144 @@
package router
import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/slice"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
)
func (rt *Router) savedViewGets(c *gin.Context) {
page := ginx.QueryStr(c, "page", "")
me := c.MustGet("user").(*models.User)
lst, err := models.SavedViewGets(rt.Ctx, page)
if err != nil {
ginx.NewRender(c).Data(nil, err)
return
}
userGids, err := models.MyGroupIds(rt.Ctx, me.Id)
if err != nil {
ginx.NewRender(c).Data(nil, err)
return
}
favoriteMap, err := models.SavedViewFavoriteGetByUserId(rt.Ctx, me.Id)
if err != nil {
ginx.NewRender(c).Data(nil, err)
return
}
favoriteViews := make([]models.SavedView, 0)
normalViews := make([]models.SavedView, 0)
for _, view := range lst {
visible := view.CreateBy == me.Username ||
view.PublicCate == 2 ||
(view.PublicCate == 1 && slice.HaveIntersection[int64](userGids, view.Gids))
if !visible {
continue
}
view.IsFavorite = favoriteMap[view.Id]
// 收藏的排前面
if view.IsFavorite {
favoriteViews = append(favoriteViews, view)
} else {
normalViews = append(normalViews, view)
}
}
ginx.NewRender(c).Data(append(favoriteViews, normalViews...), nil)
}
func (rt *Router) savedViewAdd(c *gin.Context) {
var f models.SavedView
ginx.BindJSON(c, &f)
me := c.MustGet("user").(*models.User)
f.Id = 0
f.CreateBy = me.Username
f.UpdateBy = me.Username
err := models.SavedViewAdd(rt.Ctx, &f)
ginx.NewRender(c).Data(f.Id, err)
}
func (rt *Router) savedViewPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
view, err := models.SavedViewGetById(rt.Ctx, id)
if err != nil {
ginx.NewRender(c).Data(nil, err)
return
}
if view == nil {
ginx.NewRender(c, http.StatusNotFound).Message("saved view not found")
return
}
me := c.MustGet("user").(*models.User)
// 只有创建者可以更新
if view.CreateBy != me.Username && !me.IsAdmin() {
ginx.NewRender(c, http.StatusForbidden).Message("forbidden")
return
}
var f models.SavedView
ginx.BindJSON(c, &f)
view.Name = f.Name
view.Filter = f.Filter
view.PublicCate = f.PublicCate
view.Gids = f.Gids
err = models.SavedViewUpdate(rt.Ctx, view, me.Username)
ginx.NewRender(c).Message(err)
}
func (rt *Router) savedViewDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
view, err := models.SavedViewGetById(rt.Ctx, id)
if err != nil {
ginx.NewRender(c).Data(nil, err)
return
}
if view == nil {
ginx.NewRender(c, http.StatusNotFound).Message("saved view not found")
return
}
me := c.MustGet("user").(*models.User)
// 只有创建者或管理员可以删除
if view.CreateBy != me.Username && !me.IsAdmin() {
ginx.NewRender(c, http.StatusForbidden).Message("forbidden")
return
}
err = models.SavedViewDel(rt.Ctx, id)
ginx.NewRender(c).Message(err)
}
func (rt *Router) savedViewFavoriteAdd(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
me := c.MustGet("user").(*models.User)
err := models.UserViewFavoriteAdd(rt.Ctx, id, me.Id)
ginx.NewRender(c).Message(err)
}
func (rt *Router) savedViewFavoriteDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
me := c.MustGet("user").(*models.User)
err := models.UserViewFavoriteDel(rt.Ctx, id, me.Id)
ginx.NewRender(c).Message(err)
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/ccfos/nightingale/v6/pkg/cas"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/dingtalk"
"github.com/ccfos/nightingale/v6/pkg/feishu"
"github.com/ccfos/nightingale/v6/pkg/ldapx"
"github.com/ccfos/nightingale/v6/pkg/oauth2x"
"github.com/ccfos/nightingale/v6/pkg/oidcx"
@@ -27,6 +28,7 @@ type SsoClient struct {
CAS *cas.SsoClient
OAuth2 *oauth2x.SsoClient
DingTalk *dingtalk.SsoClient
FeiShu *feishu.SsoClient
LastUpdateTime int64
configCache *memsto.ConfigCache
configLastUpdateTime int64
@@ -203,6 +205,13 @@ func Init(center cconf.Center, ctx *ctx.Context, configCache *memsto.ConfigCache
log.Fatalf("init %s failed: %s", dingtalk.SsoTypeName, err)
}
ssoClient.DingTalk = dingtalk.New(config)
case feishu.SsoTypeName:
var config feishu.Config
err := json.Unmarshal([]byte(cfg.Content), &config)
if err != nil {
log.Fatalf("init %s failed: %s", feishu.SsoTypeName, err)
}
ssoClient.FeiShu = feishu.New(config)
}
}
@@ -291,6 +300,22 @@ func (s *SsoClient) reload(ctx *ctx.Context) error {
s.DingTalk = nil
}
if feiShuConfig, ok := ssoConfigMap[feishu.SsoTypeName]; ok {
var config feishu.Config
err := json.Unmarshal([]byte(feiShuConfig.Content), &config)
if err != nil {
logger.Warningf("reload %s failed: %s", feishu.SsoTypeName, err)
} else {
if s.FeiShu != nil {
s.FeiShu.Reload(config)
} else {
s.FeiShu = feishu.New(config)
}
}
} else {
s.FeiShu = nil
}
s.LastUpdateTime = lastUpdateTime
s.configLastUpdateTime = lastCacheUpdateTime
return nil

View File

@@ -4,12 +4,13 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/datasource"
"github.com/ccfos/nightingale/v6/dskit/doris"
"github.com/ccfos/nightingale/v6/dskit/types"
"github.com/ccfos/nightingale/v6/pkg/macros"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/macros"
"github.com/mitchellh/mapstructure"
"github.com/toolkits/pkg/logger"
@@ -38,6 +39,8 @@ type QueryParam struct {
To int64 `json:"to" mapstructure:"to"`
TimeField string `json:"time_field" mapstructure:"time_field"`
TimeFormat string `json:"time_format" mapstructure:"time_format"`
Interval int64 `json:"interval" mapstructure:"interval"` // 查询时间间隔(秒)
Offset int `json:"offset" mapstructure:"offset"` // 延迟计算不在使用通用配置delay
}
func (d *Doris) InitClient() error {
@@ -146,13 +149,46 @@ func (d *Doris) QueryData(ctx context.Context, query interface{}) ([]models.Data
return nil, fmt.Errorf("valueKey is required")
}
items, err := d.QueryTimeseries(context.TODO(), &doris.QueryParam{
// 设置默认 interval
if dorisQueryParam.Interval == 0 {
dorisQueryParam.Interval = 60
}
// 计算时间范围
now := time.Now().Unix()
var start, end int64
if dorisQueryParam.To != 0 && dorisQueryParam.From != 0 {
end = dorisQueryParam.To
start = dorisQueryParam.From
} else {
end = now
start = end - dorisQueryParam.Interval
}
if dorisQueryParam.Offset != 0 {
end -= int64(dorisQueryParam.Offset)
start -= int64(dorisQueryParam.Offset)
}
dorisQueryParam.From = start
dorisQueryParam.To = end
if strings.Contains(dorisQueryParam.SQL, "$__") {
var err error
dorisQueryParam.SQL, err = macros.Macro(dorisQueryParam.SQL, dorisQueryParam.From, dorisQueryParam.To)
if err != nil {
return nil, err
}
}
items, err := d.QueryTimeseries(ctx, &doris.QueryParam{
Database: dorisQueryParam.Database,
Sql: dorisQueryParam.SQL,
Keys: types.Keys{
ValueKey: dorisQueryParam.Keys.ValueKey,
LabelKey: dorisQueryParam.Keys.LabelKey,
TimeKey: dorisQueryParam.Keys.TimeKey,
Offset: dorisQueryParam.Offset,
},
})
if err != nil {
@@ -180,6 +216,18 @@ func (d *Doris) QueryLog(ctx context.Context, query interface{}) ([]interface{},
return nil, 0, err
}
// 记录规则预览场景下只传了interval, 没有传From和To
now := time.Now().Unix()
if dorisQueryParam.To == 0 && dorisQueryParam.From == 0 && dorisQueryParam.Interval != 0 {
dorisQueryParam.To = now
dorisQueryParam.From = now - dorisQueryParam.Interval
}
if dorisQueryParam.Offset != 0 {
dorisQueryParam.To -= int64(dorisQueryParam.Offset)
dorisQueryParam.From -= int64(dorisQueryParam.Offset)
}
if strings.Contains(dorisQueryParam.SQL, "$__") {
var err error
dorisQueryParam.SQL, err = macros.Macro(dorisQueryParam.SQL, dorisQueryParam.From, dorisQueryParam.To)

View File

@@ -57,3 +57,29 @@ func (cs *Cache) Get(cate string, dsId int64) (datasource.Datasource, bool) {
return cs.datas[cate][dsId], true
}
func (cs *Cache) Delete(cate string, dsId int64) {
cs.mutex.Lock()
defer cs.mutex.Unlock()
if _, found := cs.datas[cate]; !found {
return
}
delete(cs.datas[cate], dsId)
logger.Debugf("delete plugin:%s %d from cache", cate, dsId)
}
// GetAllIds 返回缓存中所有数据源的 ID按类型分组
func (cs *Cache) GetAllIds() map[string][]int64 {
cs.mutex.RLock()
defer cs.mutex.RUnlock()
result := make(map[string][]int64)
for cate, dsMap := range cs.datas {
ids := make([]int64, 0, len(dsMap))
for dsId := range dsMap {
ids = append(ids, dsId)
}
result[cate] = ids
}
return result
}

View File

@@ -172,7 +172,10 @@ func esN9eToDatasourceInfo(ds *datasource.DatasourceInfo, item models.Datasource
}
func PutDatasources(items []datasource.DatasourceInfo) {
// 记录当前有效的数据源 ID按类型分组
validIds := make(map[string]map[int64]struct{})
ids := make([]int64, 0)
for _, item := range items {
if item.Type == "prometheus" {
continue
@@ -201,6 +204,12 @@ func PutDatasources(items []datasource.DatasourceInfo) {
}
ids = append(ids, item.Id)
// 记录有效的数据源 ID
if _, ok := validIds[typ]; !ok {
validIds[typ] = make(map[int64]struct{})
}
validIds[typ][item.Id] = struct{}{}
// 异步初始化 client 不然数据源同步的会很慢
go func() {
defer func() {
@@ -212,5 +221,19 @@ func PutDatasources(items []datasource.DatasourceInfo) {
}()
}
// 删除 items 中不存在但 DsCache 中存在的数据源
cachedIds := DsCache.GetAllIds()
for cate, dsIds := range cachedIds {
for _, dsId := range dsIds {
if _, ok := validIds[cate]; !ok {
// 该类型在 items 中完全不存在,删除缓存中的所有该类型数据源
DsCache.Delete(cate, dsId)
} else if _, ok := validIds[cate][dsId]; !ok {
// 该数据源 ID 在 items 中不存在,删除
DsCache.Delete(cate, dsId)
}
}
}
logger.Debugf("get plugin by type success Ids:%v", ids)
}

View File

@@ -18,13 +18,21 @@ import (
"github.com/mitchellh/mapstructure"
)
const (
ShowIndexFieldIndexType = "index_type"
ShowIndexFieldColumnName = "column_name"
ShowIndexKeyName = "key_name"
SQLShowIndex = "SHOW INDEX FROM "
)
// Doris struct to hold connection details and the connection object
type Doris struct {
Addr string `json:"doris.addr" mapstructure:"doris.addr"` // fe mysql endpoint
FeAddr string `json:"doris.fe_addr" mapstructure:"doris.fe_addr"` // fe http endpoint
User string `json:"doris.user" mapstructure:"doris.user"` //
Password string `json:"doris.password" mapstructure:"doris.password"` //
Timeout int `json:"doris.timeout" mapstructure:"doris.timeout"`
Timeout int `json:"doris.timeout" mapstructure:"doris.timeout"` // ms
MaxIdleConns int `json:"doris.max_idle_conns" mapstructure:"doris.max_idle_conns"`
MaxOpenConns int `json:"doris.max_open_conns" mapstructure:"doris.max_open_conns"`
ConnMaxLifetime int `json:"doris.conn_max_lifetime" mapstructure:"doris.conn_max_lifetime"`
@@ -63,7 +71,7 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
// Set default values similar to postgres implementation
if d.Timeout == 0 {
d.Timeout = 60
d.Timeout = 60000
}
if d.MaxIdleConns == 0 {
d.MaxIdleConns = 10
@@ -119,7 +127,7 @@ func (d *Doris) createTimeoutContext(ctx context.Context) (context.Context, cont
if timeout == 0 {
timeout = 60
}
return context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
return context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)
}
// ShowDatabases lists all databases in Doris
@@ -312,6 +320,88 @@ func (d *Doris) DescTable(ctx context.Context, database, table string) ([]*types
return columns, nil
}
type TableIndexInfo struct {
ColumnName string `json:"column_name"`
IndexName string `json:"index_name"`
IndexType string `json:"index_type"`
}
// ShowIndexes 查询表的所有索引信息
func (d *Doris) ShowIndexes(ctx context.Context, database, table string) ([]TableIndexInfo, error) {
if database == "" || table == "" {
return nil, fmt.Errorf("database and table names cannot be empty")
}
tCtx, cancel := d.createTimeoutContext(ctx)
defer cancel()
db, err := d.NewConn(tCtx, database)
if err != nil {
return nil, err
}
querySQL := fmt.Sprintf("%s `%s`.`%s`", SQLShowIndex, database, table)
rows, err := db.QueryContext(tCtx, querySQL)
if err != nil {
return nil, fmt.Errorf("failed to query indexes: %w", err)
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return nil, fmt.Errorf("failed to get columns: %w", err)
}
count := len(columns)
// 预映射列索引
colIdx := map[string]int{
ShowIndexKeyName: -1,
ShowIndexFieldColumnName: -1,
ShowIndexFieldIndexType: -1,
}
for i, col := range columns {
lCol := strings.ToLower(col)
if lCol == ShowIndexKeyName || lCol == ShowIndexFieldColumnName || lCol == ShowIndexFieldIndexType {
colIdx[lCol] = i
}
}
var result []TableIndexInfo
for rows.Next() {
// 使用 sql.RawBytes 可以接受任何类型并转为 string避免复杂的类型断言
scanArgs := make([]interface{}, count)
values := make([]sql.RawBytes, count)
for i := range values {
scanArgs[i] = &values[i]
}
if err = rows.Scan(scanArgs...); err != nil {
return nil, err
}
info := TableIndexInfo{}
if i := colIdx[ShowIndexFieldColumnName]; i != -1 && i < count {
info.ColumnName = string(values[i])
}
if i := colIdx[ShowIndexKeyName]; i != -1 && i < count {
info.IndexName = string(values[i])
}
if i := colIdx[ShowIndexFieldIndexType]; i != -1 && i < count {
info.IndexType = string(values[i])
}
if info.ColumnName != "" {
result = append(result, info)
}
}
if err = rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating rows: %w", err)
}
return result, nil
}
// SelectRows selects rows from a specified table in Doris based on a given query with MaxQueryRows check
func (d *Doris) SelectRows(ctx context.Context, database, table, query string) ([]map[string]interface{}, error) {
sql := fmt.Sprintf("SELECT * FROM %s.%s", database, table)

View File

@@ -10,13 +10,14 @@ const (
TimeseriesAggregationTimestamp = "__ts__"
)
// QueryLogs 查询日志
// TODO: 待测试, MAP/ARRAY/STRUCT/JSON 等类型能否处理
func (d *Doris) QueryLogs(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
// 等同于 Query()
return d.Query(ctx, query)
return d.Query(ctx, query, true)
}
// 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
// QueryHistogram 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
func (d *Doris) QueryHistogram(ctx context.Context, query *QueryParam) ([][]float64, error) {
values, err := d.QueryTimeseries(ctx, query)
if err != nil {

View File

@@ -15,6 +15,10 @@ const (
TimeFieldFormatDateTime = "datetime"
)
type noNeedCheckMaxRowKey struct{}
var NoNeedCheckMaxRow = noNeedCheckMaxRowKey{}
// 不再拼接SQL, 完全信赖用户的输入
type QueryParam struct {
Database string `json:"database"`
@@ -39,7 +43,7 @@ var (
)
// Query executes a given SQL query in Doris and returns the results with MaxQueryRows check
func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
func (d *Doris) Query(ctx context.Context, query *QueryParam, checkMaxRow bool) ([]map[string]interface{}, error) {
// 校验SQL的合法性, 过滤掉 write请求
sqlItem := strings.Split(strings.ToUpper(query.Sql), " ")
for _, item := range sqlItem {
@@ -48,10 +52,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
}
}
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
if checkMaxRow {
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
}
}
rows, err := d.ExecQuery(ctx, query.Database, query.Sql)
@@ -63,8 +69,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
// QueryTimeseries executes a time series data query using the given parameters with MaxQueryRows check
func (d *Doris) QueryTimeseries(ctx context.Context, query *QueryParam) ([]types.MetricValues, error) {
// 使用 Query 方法执行查询Query方法内部已包含MaxQueryRows检查
rows, err := d.Query(ctx, query)
// 默认需要检查,除非调用方声明不需要检查
checkMaxRow := true
if noCheck, ok := ctx.Value(NoNeedCheckMaxRow).(bool); ok && noCheck {
checkMaxRow = false
}
rows, err := d.Query(ctx, query, checkMaxRow)
if err != nil {
return nil, err
}

View File

@@ -158,7 +158,10 @@ func FormatMetricValues(keys types.Keys, rows []map[string]interface{}, ignoreDe
}
if !exists {
ts = float64(time.Now().Unix()) // Default to current time if not specified
// Default to current time if not specified
// 大多数情况下offset为空
// 对于记录规则延迟计算的情况,统计值的时间戳需要有偏移,以便跟统计值对应
ts = float64(time.Now().Unix()) - float64(keys.Offset)
}
valuePair := []float64{ts, value}
@@ -257,6 +260,14 @@ func parseTimeFromString(str, format string) (time.Time, error) {
if parsedTime, err := time.Parse(time.RFC3339, str); err == nil {
return parsedTime, nil
}
if parsedTime, err := time.Parse(time.DateTime, str); err == nil {
return parsedTime, nil
}
if parsedTime, err := time.Parse("2006-01-02 15:04:05.000000", str); err == nil {
return parsedTime, nil
}
if parsedTime, err := time.Parse(time.RFC3339Nano, str); err == nil {
return parsedTime, nil
}

View File

@@ -48,4 +48,5 @@ type Keys struct {
LabelKey string `json:"labelKey" mapstructure:"labelKey"` // 多个用空格分隔
TimeKey string `json:"timeKey" mapstructure:"timeKey"`
TimeFormat string `json:"timeFormat" mapstructure:"timeFormat"` // not used anymore
Offset int `json:"offset" mapstructure:"offset"`
}

1
go.mod
View File

@@ -101,6 +101,7 @@ require (
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/larksuite/oapi-sdk-go/v3 v3.5.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect

3
go.sum
View File

@@ -243,6 +243,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -315,6 +316,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/larksuite/oapi-sdk-go/v3 v3.5.1 h1:gX4dz92YU70inuIX+ug+PBe64eHToIN9rHB4Vupv5Eg=
github.com/larksuite/oapi-sdk-go/v3 v3.5.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=

View File

@@ -46,6 +46,12 @@ func NewAnomalyPoint(key string, labels map[string]string, ts int64, value float
}
func (v *AnomalyPoint) ReadableValue() string {
if len(v.ValuesUnit) > 0 {
for _, unit := range v.ValuesUnit { // 配置了单位,优先用配置了单位的值
return unit.Text
}
}
ret := fmt.Sprintf("%.5f", v.Value)
ret = strings.TrimRight(ret, "0")
return strings.TrimRight(ret, ".")

View File

@@ -76,6 +76,7 @@ func (bc *BuiltinComponent) Add(ctx *ctx.Context, username string) error {
bc.CreatedAt = now
bc.UpdatedAt = now
bc.CreatedBy = username
bc.UpdatedBy = username
return Insert(ctx, bc)
}

View File

@@ -448,7 +448,8 @@ func (ds *Datasource) Encrypt(openRsa bool, publicKeyData []byte) error {
// Decrypt 用于 edge 将从中心同步的数据源解密,中心不可调用
func (ds *Datasource) Decrypt() error {
if rsaConfig == nil {
return errors.New("rsa config is nil")
logger.Debugf("datasource %s rsa config is nil", ds.Name)
return nil
}
if !rsaConfig.OpenRSA {

View File

@@ -68,7 +68,8 @@ func MigrateTables(db *gorm.DB) error {
&Board{}, &BoardBusigroup{}, &Users{}, &SsoConfig{}, &models.BuiltinMetric{},
&models.MetricFilter{}, &models.NotificationRecord{}, &models.TargetBusiGroup{},
&models.UserToken{}, &models.DashAnnotation{}, MessageTemplate{}, NotifyRule{}, NotifyChannelConfig{}, &EsIndexPatternMigrate{},
&models.EventPipeline{}, &models.EmbeddedProduct{}, &models.SourceToken{}}
&models.EventPipeline{}, &models.EmbeddedProduct{}, &models.SourceToken{},
&models.SavedView{}, &models.UserViewFavorite{}}
if isPostgres(db) {
dts = append(dts, &models.PostgresBuiltinComponent{})

View File

@@ -31,7 +31,8 @@ type Webhook struct {
RetryCount int `json:"retry_count"`
RetryInterval int `json:"retry_interval"`
Batch int `json:"batch"`
Client *http.Client `json:"-"`
Client *http.Client `json:"-"`
}
func (w *Webhook) Hash() string {

174
models/saved_view.go Normal file
View File

@@ -0,0 +1,174 @@
package models
import (
"errors"
"strings"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
var (
ErrSavedViewNameEmpty = errors.New("saved view name is blank")
ErrSavedViewPageEmpty = errors.New("saved view page is blank")
ErrSavedViewNotFound = errors.New("saved view not found")
ErrSavedViewNameDuplicate = errors.New("saved view name already exists in this page")
)
type SavedView struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Name string `json:"name" gorm:"type:varchar(255);not null"`
Page string `json:"page" gorm:"type:varchar(64);not null;index"`
Filter string `json:"filter" gorm:"type:text"`
PublicCate int `json:"public_cate" gorm:"default:0"` // 0: self, 1: team, 2: all
Gids []int64 `json:"gids" gorm:"column:gids;type:text;serializer:json"`
CreateAt int64 `json:"create_at" gorm:"type:bigint;not null;default:0"`
CreateBy string `json:"create_by" gorm:"type:varchar(64);index"`
UpdateAt int64 `json:"update_at" gorm:"type:bigint;not null;default:0"`
UpdateBy string `json:"update_by" gorm:"type:varchar(64)"`
// 查询时填充的字段
IsFavorite bool `json:"is_favorite" gorm:"-"`
}
func (SavedView) TableName() string {
return "saved_view"
}
func (sv *SavedView) Verify() error {
sv.Name = strings.TrimSpace(sv.Name)
if sv.Name == "" {
return ErrSavedViewNameEmpty
}
if sv.Page == "" {
return ErrSavedViewPageEmpty
}
return nil
}
func SavedViewCheckDuplicateName(c *ctx.Context, page, name string, excludeId int64) error {
var count int64
session := DB(c).Model(&SavedView{}).Where("page = ? AND name = ? AND public_cate = 2", page, name)
if excludeId > 0 {
session = session.Where("id != ?", excludeId)
}
if err := session.Count(&count).Error; err != nil {
return err
}
if count > 0 {
return ErrSavedViewNameDuplicate
}
return nil
}
func SavedViewAdd(c *ctx.Context, sv *SavedView) error {
if err := sv.Verify(); err != nil {
return err
}
// 当 PublicCate 为 all(2) 时,检查同一个 page 下 name 是否重复
if sv.PublicCate == 2 {
if err := SavedViewCheckDuplicateName(c, sv.Page, sv.Name, 0); err != nil {
return err
}
}
now := time.Now().Unix()
sv.CreateAt = now
sv.UpdateAt = now
return Insert(c, sv)
}
func SavedViewUpdate(c *ctx.Context, sv *SavedView, username string) error {
if err := sv.Verify(); err != nil {
return err
}
// 当 PublicCate 为 all(2) 时,检查同一个 page 下 name 是否重复(排除自身)
if sv.PublicCate == 2 {
if err := SavedViewCheckDuplicateName(c, sv.Page, sv.Name, sv.Id); err != nil {
return err
}
}
sv.UpdateAt = time.Now().Unix()
sv.UpdateBy = username
return DB(c).Model(sv).Select("name", "filter", "public_cate", "gids", "update_at", "update_by").Updates(sv).Error
}
func SavedViewDel(c *ctx.Context, id int64) error {
// 先删除收藏关联
if err := DB(c).Where("view_id = ?", id).Delete(&UserViewFavorite{}).Error; err != nil {
return err
}
return DB(c).Where("id = ?", id).Delete(&SavedView{}).Error
}
func SavedViewGetById(c *ctx.Context, id int64) (*SavedView, error) {
var sv SavedView
err := DB(c).Where("id = ?", id).First(&sv).Error
if err != nil {
return nil, err
}
return &sv, nil
}
func SavedViewGets(c *ctx.Context, page string) ([]SavedView, error) {
var views []SavedView
session := DB(c).Where("page = ?", page)
if err := session.Order("update_at DESC").Find(&views).Error; err != nil {
return nil, err
}
return views, nil
}
func SavedViewFavoriteGetByUserId(c *ctx.Context, userId int64) (map[int64]bool, error) {
var favorites []UserViewFavorite
if err := DB(c).Where("user_id = ?", userId).Find(&favorites).Error; err != nil {
return nil, err
}
result := make(map[int64]bool)
for _, f := range favorites {
result[f.ViewId] = true
}
return result, nil
}
type UserViewFavorite struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
ViewId int64 `json:"view_id" gorm:"index"`
UserId int64 `json:"user_id" gorm:"index"`
CreateAt int64 `json:"create_at"`
}
func (UserViewFavorite) TableName() string {
return "user_view_favorite"
}
func UserViewFavoriteAdd(c *ctx.Context, viewId, userId int64) error {
var count int64
if err := DB(c).Model(&SavedView{}).Where("id = ?", viewId).Count(&count).Error; err != nil {
return err
}
if count == 0 {
return ErrSavedViewNotFound
}
if err := DB(c).Model(&UserViewFavorite{}).Where("view_id = ? AND user_id = ?", viewId, userId).Count(&count).Error; err != nil {
return err
}
if count > 0 {
return nil // 已收藏,直接返回成功
}
fav := &UserViewFavorite{
ViewId: viewId,
UserId: userId,
CreateAt: time.Now().Unix(),
}
return DB(c).Create(fav).Error
}
func UserViewFavoriteDel(c *ctx.Context, viewId, userId int64) error {
return DB(c).Where("view_id = ? AND user_id = ?", viewId, userId).Delete(&UserViewFavorite{}).Error
}

345
pkg/feishu/feishu.go Normal file
View File

@@ -0,0 +1,345 @@
package feishu
import (
"bytes"
"context"
"fmt"
"net/url"
"strings"
"sync"
"time"
"github.com/ccfos/nightingale/v6/storage"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/toolkits/pkg/logger"
lark "github.com/larksuite/oapi-sdk-go/v3"
larkcore "github.com/larksuite/oapi-sdk-go/v3/core"
larkauthen "github.com/larksuite/oapi-sdk-go/v3/service/authen/v1"
larkcontact "github.com/larksuite/oapi-sdk-go/v3/service/contact/v3"
)
const defaultAuthURL = "https://accounts.feishu.cn/open-apis/authen/v1/authorize"
const SsoTypeName = "feishu"
type SsoClient struct {
Enable bool
FeiShuConfig *Config `json:"-"`
Ctx context.Context
client *lark.Client
sync.RWMutex
}
type Config struct {
Enable bool `json:"enable"`
AuthURL string `json:"auth_url"`
DisplayName string `json:"display_name"`
AppID string `json:"app_id"`
AppSecret string `json:"app_secret"`
RedirectURL string `json:"redirect_url"`
UsernameField string `json:"username_field"` // name, email, phone
FeiShuEndpoint string `json:"feishu_endpoint"` // 飞书API端点默认为 open.feishu.cn
Proxy string `json:"proxy"`
CoverAttributes bool `json:"cover_attributes"`
DefaultRoles []string `json:"default_roles"`
}
type CallbackOutput struct {
Redirect string `json:"redirect"`
Msg string `json:"msg"`
AccessToken string `json:"accessToken"`
Username string `json:"Username"`
Nickname string `json:"Nickname"`
Phone string `yaml:"Phone"`
Email string `yaml:"Email"`
}
func wrapStateKey(key string) string {
return "n9e_feishu_oauth_" + key
}
// createClient 创建飞书SDK客户端v3版本
func (c *Config) createClient() (*lark.Client, error) {
opts := []lark.ClientOptionFunc{
lark.WithLogLevel(larkcore.LogLevelInfo),
lark.WithEnableTokenCache(true), // 启用token缓存
}
if c.FeiShuEndpoint != "" {
lark.FeishuBaseUrl = c.FeiShuEndpoint
}
// 创建客户端v3版本
client := lark.NewClient(
c.AppID,
c.AppSecret,
opts...,
)
return client, nil
}
func New(cf Config) *SsoClient {
var s = &SsoClient{}
if !cf.Enable {
return s
}
s.Reload(cf)
return s
}
func (s *SsoClient) AuthCodeURL(state string) (string, error) {
var buf bytes.Buffer
feishuAuthURL := defaultAuthURL
if s.FeiShuConfig.AuthURL != "" {
feishuAuthURL = s.FeiShuConfig.AuthURL
}
buf.WriteString(feishuAuthURL)
v := url.Values{
"app_id": {s.FeiShuConfig.AppID},
"state": {state},
}
v.Set("redirect_uri", s.FeiShuConfig.RedirectURL)
if s.FeiShuConfig.RedirectURL == "" {
return "", errors.New("FeiShu OAuth RedirectURL is empty")
}
if strings.Contains(feishuAuthURL, "?") {
buf.WriteByte('&')
} else {
buf.WriteByte('?')
}
buf.WriteString(v.Encode())
return buf.String(), nil
}
// GetUserToken 通过授权码获取用户access token和user_id使用SDK v3
func (s *SsoClient) GetUserToken(code string) (string, string, error) {
if s.client == nil {
return "", "", errors.New("feishu client is not initialized")
}
ctx := context.Background()
// 使用SDK v3的authen服务获取access token
req := larkauthen.NewCreateAccessTokenReqBuilder().
Body(larkauthen.NewCreateAccessTokenReqBodyBuilder().
GrantType("authorization_code").
Code(code).
Build()).
Build()
resp, err := s.client.Authen.AccessToken.Create(ctx, req)
if err != nil {
return "", "", fmt.Errorf("feishu get access token error: %w", err)
}
// 检查响应
if !resp.Success() {
return "", "", fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
}
if resp.Data == nil {
return "", "", errors.New("feishu api returned empty data")
}
userID := ""
if resp.Data.UserId != nil {
userID = *resp.Data.UserId
}
if userID == "" {
return "", "", errors.New("feishu api returned empty user_id")
}
accessToken := ""
if resp.Data.AccessToken != nil {
accessToken = *resp.Data.AccessToken
}
if accessToken == "" {
return "", "", errors.New("feishu api returned empty access_token")
}
return accessToken, userID, nil
}
// GetUserInfo 通过user_id获取用户详细信息使用SDK v3
// 注意SDK内部会自动管理token所以不需要传入accessToken
func (s *SsoClient) GetUserInfo(userID string) (*larkcontact.GetUserRespData, error) {
if s.client == nil {
return nil, errors.New("feishu client is not initialized")
}
ctx := context.Background()
// 使用SDK v3的contact服务获取用户详情
req := larkcontact.NewGetUserReqBuilder().
UserId(userID).
UserIdType(larkcontact.UserIdTypeUserId).
Build()
resp, err := s.client.Contact.User.Get(ctx, req)
if err != nil {
return nil, fmt.Errorf("feishu get user detail error: %w", err)
}
// 检查响应
if !resp.Success() {
return nil, fmt.Errorf("feishu api error: code=%d, msg=%s", resp.Code, resp.Msg)
}
if resp.Data == nil || resp.Data.User == nil {
return nil, errors.New("feishu api returned empty user data")
}
return resp.Data, nil
}
func (s *SsoClient) Reload(feishuConfig Config) {
s.Lock()
defer s.Unlock()
s.Enable = feishuConfig.Enable
s.FeiShuConfig = &feishuConfig
// 重新创建客户端
if feishuConfig.Enable && feishuConfig.AppID != "" && feishuConfig.AppSecret != "" {
client, err := feishuConfig.createClient()
if err != nil {
logger.Errorf("create feishu client error: %v", err)
} else {
s.client = client
}
}
}
func (s *SsoClient) GetDisplayName() string {
s.RLock()
defer s.RUnlock()
if !s.Enable {
return ""
}
return s.FeiShuConfig.DisplayName
}
func (s *SsoClient) Authorize(redis storage.Redis, redirect string) (string, error) {
state := uuid.New().String()
ctx := context.Background()
err := redis.Set(ctx, wrapStateKey(state), redirect, time.Duration(300*time.Second)).Err()
if err != nil {
return "", err
}
s.RLock()
defer s.RUnlock()
return s.AuthCodeURL(state)
}
func (s *SsoClient) Callback(redis storage.Redis, ctx context.Context, code, state string) (*CallbackOutput, error) {
// 通过code获取access token和user_id
accessToken, userID, err := s.GetUserToken(code)
if err != nil {
return nil, fmt.Errorf("feishu GetUserToken error: %s", err)
}
// 获取用户详细信息
userData, err := s.GetUserInfo(userID)
if err != nil {
return nil, fmt.Errorf("feishu GetUserInfo error: %s", err)
}
// 获取redirect URL
redirect := ""
if redis != nil {
redirect, err = fetchRedirect(redis, ctx, state)
if err != nil {
logger.Errorf("get redirect err:%v code:%s state:%s", err, code, state)
}
}
if redirect == "" {
redirect = "/"
}
err = deleteRedirect(redis, ctx, state)
if err != nil {
logger.Errorf("delete redirect err:%v code:%s state:%s", err, code, state)
}
var callbackOutput CallbackOutput
if userData == nil || userData.User == nil {
return nil, fmt.Errorf("feishu GetUserInfo failed, user data is nil")
}
user := userData.User
logger.Debugf("feishu get user info userID %s result %+v", userID, user)
// 提取用户信息
username := ""
if user.UserId != nil {
username = *user.UserId
}
if username == "" {
return nil, errors.New("feishu user_id is empty")
}
nickname := ""
if user.Name != nil {
nickname = *user.Name
}
phone := ""
if user.Mobile != nil {
phone = *user.Mobile
}
email := ""
if user.Email != nil {
email = *user.Email
}
if email == "" {
if user.EnterpriseEmail != nil {
email = *user.EnterpriseEmail
}
}
callbackOutput.Redirect = redirect
callbackOutput.AccessToken = accessToken
// 根据UsernameField配置确定username
switch s.FeiShuConfig.UsernameField {
case "name":
if nickname == "" {
return nil, errors.New("feishu user name is empty")
}
callbackOutput.Username = nickname
case "phone":
if phone == "" {
return nil, errors.New("feishu user phone is empty")
}
callbackOutput.Username = phone
default:
if email == "" {
return nil, errors.New("feishu user email is empty")
}
callbackOutput.Username = email
}
callbackOutput.Nickname = nickname
callbackOutput.Email = email
callbackOutput.Phone = phone
return &callbackOutput, nil
}
func fetchRedirect(redis storage.Redis, ctx context.Context, state string) (string, error) {
return redis.Get(ctx, wrapStateKey(state)).Result()
}
func deleteRedirect(redis storage.Redis, ctx context.Context, state string) error {
return redis.Del(ctx, wrapStateKey(state)).Err()
}

View File

@@ -201,6 +201,11 @@ var I18N = `{
"Some recovery scripts still in the BusiGroup": "业务组中仍有自愈脚本",
"Some target busigroups still in the BusiGroup": "业务组中仍有监控对象",
"saved view not found": "保存的视图不存在",
"saved view name is blank": "视图名称不能为空",
"saved view page is blank": "视图页面不能为空",
"saved view name already exists in this page": "该页面下已存在同名的公开视图",
"---------zh_CN--------": "---------zh_CN--------"
},
"zh_HK": {
@@ -405,6 +410,11 @@ var I18N = `{
"Some recovery scripts still in the BusiGroup": "業務組中仍有自愈腳本",
"Some target busigroups still in the BusiGroup": "業務組中仍有監控對象",
"saved view not found": "保存的視圖不存在",
"saved view name is blank": "視圖名稱不能為空",
"saved view page is blank": "視圖頁面不能為空",
"saved view name already exists in this page": "該頁面下已存在同名的公開視圖",
"---------zh_HK--------": "---------zh_HK--------"
},
"ja_JP": {
@@ -606,6 +616,11 @@ var I18N = `{
"Some recovery scripts still in the BusiGroup": "ビジネスグループにまだ自己回復スクリプトがあります",
"Some target busigroups still in the BusiGroup": "ビジネスグループにまだ監視対象があります",
"saved view not found": "保存されたビューが見つかりません",
"saved view name is blank": "ビュー名を空にすることはできません",
"saved view page is blank": "ビューページを空にすることはできません",
"saved view name already exists in this page": "このページには同名の公開ビューが既に存在します",
"---------ja_JP--------": "---------ja_JP--------"
},
"ru_RU": {
@@ -807,6 +822,11 @@ var I18N = `{
"Some recovery scripts still in the BusiGroup": "В бизнес-группе еще есть скрипты самоисцеления",
"Some target busigroups still in the BusiGroup": "В бизнес-группе еще есть объекты мониторинга",
"saved view not found": "Сохраненный вид не найден",
"saved view name is blank": "Название вида не может быть пустым",
"saved view page is blank": "Страница вида не может быть пустой",
"saved view name already exists in this page": "На этой странице уже существует публичный вид с таким названием",
"---------ru_RU--------": "---------ru_RU--------"
}
}`

View File

@@ -35,6 +35,12 @@ type Pushgw struct {
WriterOpt WriterGlobalOpt
Writers []WriterOptions
KafkaWriters []KafkaWriterOptions
// 预处理的字段,用于快速匹配只有 __name__ 的 DropSample 规则
// key: metric name, value: struct{}
DropMetricNames map[string]struct{}
// 包含多个标签的复杂 DropSample 规则
DropSampleComplex []map[string]string
}
type WriterGlobalOpt struct {

View File

@@ -109,21 +109,30 @@ func (rt *Router) debugSample(remoteAddr string, v *prompb.TimeSeries) {
}
func (rt *Router) DropSample(v *prompb.TimeSeries) bool {
filters := rt.Pushgw.DropSample
if len(filters) == 0 {
// 快速路径:检查仅 __name__ 的过滤器 O(1)
if len(rt.dropByNameOnly) > 0 {
for i := 0; i < len(v.Labels); i++ {
if v.Labels[i].Name == "__name__" {
if _, ok := rt.dropByNameOnly[v.Labels[i].Value]; ok {
return true
}
break // __name__ 只会出现一次,找到后直接跳出
}
}
}
// 慢速路径:处理复杂的多条件过滤器
if len(rt.dropComplex) == 0 {
return false
}
labelMap := make(map[string]string)
// 只有复杂过滤器存在时才创建 labelMap
labelMap := make(map[string]string, len(v.Labels))
for i := 0; i < len(v.Labels); i++ {
labelMap[v.Labels[i].Name] = v.Labels[i].Value
}
for _, filter := range filters {
if len(filter) == 0 {
continue
}
for _, filter := range rt.dropComplex {
if matchSample(filter, labelMap) {
return true
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/prometheus/prompb"
"github.com/toolkits/pkg/logger"
"github.com/ccfos/nightingale/v6/alert/aconf"
"github.com/ccfos/nightingale/v6/center/metas"
@@ -32,7 +33,11 @@ type Router struct {
Writers *writer.WritersType
Ctx *ctx.Context
HandleTS HandleTSFunc
HeartbeatApi string
HeartbeatApi string
// 预编译的 DropSample 过滤器
dropByNameOnly map[string]struct{} // 仅 __name__ 条件的快速匹配
dropComplex []map[string]string // 多条件的复杂匹配
}
func stat() gin.HandlerFunc {
@@ -51,7 +56,7 @@ func stat() gin.HandlerFunc {
func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *memsto.TargetCacheType, bg *memsto.BusiGroupCacheType,
idents *idents.Set, metas *metas.Set,
writers *writer.WritersType, ctx *ctx.Context) *Router {
return &Router{
rt := &Router{
HTTP: httpConfig,
Pushgw: pushgw,
Aconf: aconf,
@@ -63,6 +68,38 @@ func New(httpConfig httpx.Config, pushgw pconf.Pushgw, aconf aconf.Alert, tc *me
MetaSet: metas,
HandleTS: func(pt *prompb.TimeSeries) *prompb.TimeSeries { return pt },
}
// 预编译 DropSample 过滤器
rt.initDropSampleFilters()
return rt
}
// initDropSampleFilters 预编译 DropSample 过滤器,将单条件 __name__ 过滤器
// 放入 map 实现 O(1) 查找,多条件过滤器保留原有逻辑
func (rt *Router) initDropSampleFilters() {
rt.dropByNameOnly = make(map[string]struct{})
rt.dropComplex = make([]map[string]string, 0)
for _, filter := range rt.Pushgw.DropSample {
if len(filter) == 0 {
continue
}
// 如果只有一个条件且是 __name__放入快速匹配 map
if len(filter) == 1 {
if name, ok := filter["__name__"]; ok {
rt.dropByNameOnly[name] = struct{}{}
continue
}
}
// 其他情况放入复杂匹配列表
rt.dropComplex = append(rt.dropComplex, filter)
}
logger.Infof("DropSample filters initialized: %d name-only, %d complex",
len(rt.dropByNameOnly), len(rt.dropComplex))
}
func (rt *Router) Config(r *gin.Engine) {