Compare commits

...

31 Commits
main ... dev21

Author SHA1 Message Date
huangjie
b02ddeec7b feishu config add defaultUserGroup (#3076) 2026-02-10 14:19:58 +08:00
liufuniu
d5528541c3 fix:doris datasource equal (#3072) 2026-02-05 17:31:37 +08:00
liufuniu
33ec277ac1 fix:doris NewWriteConn (#3069) 2026-02-05 12:08:12 +08:00
liufuniu
a63d6a1e49 fix:doris conn user (#3068) 2026-02-05 10:59:16 +08:00
ning
84a179c4f4 Merge branch 'main' of github.com:ccfos/nightingale into dev21 2026-02-04 16:09:15 +08:00
liufuniu
f27bbb4a51 refactor: doris datasource add write user (#3064) 2026-02-03 16:16:35 +08:00
ning
b0fbca21b8 Merge branch 'main' of github.com:ccfos/nightingale into dev21 2026-02-02 16:29:59 +08:00
ning
0c71eeac2a merge main 2026-02-02 14:48:10 +08:00
huangjie
48820a6bd5 feishu userid (#3057)
Co-authored-by: jie210 <huangjie@flashcat.com>
2026-01-26 20:02:10 +08:00
ning
52421f2477 refactor: update doris check max rows 2026-01-21 16:03:18 +08:00
ning
a9ab02e1ad refactor: update doris check max rows 2026-01-21 14:34:39 +08:00
ning
e5acc9199b Merge branch 'update-workflow' of github.com:ccfos/nightingale into dev21 2026-01-16 15:30:03 +08:00
ning
ec7fbf313b update workflow 2026-01-16 15:29:46 +08:00
ning
1180066df3 Merge branch 'update-workflow' of github.com:ccfos/nightingale into dev21 2026-01-16 15:20:06 +08:00
ning
b3ee1e56ad update callback 2026-01-16 15:15:48 +08:00
Yening Qin
0b71d1ef82 Update workflow (#3041) 2026-01-16 14:25:50 +08:00
Yening Qin
2934dab4c7 Revert "Update workflow (#3038)" (#3040)
This reverts commit ff1aa83b8c.
2026-01-16 14:25:09 +08:00
ning
d908240912 update workflow 2026-01-16 14:21:50 +08:00
Yening Qin
ff1aa83b8c Update workflow (#3038) 2026-01-16 14:04:57 +08:00
ning
d54bcdd722 update workflow 2026-01-16 14:04:14 +08:00
ning
81b5ce20ae Merge branch 'main' of github.com:ccfos/nightingale into dev21 2026-01-14 19:25:38 +08:00
ning
806b3effe9 update workflow 2026-01-14 19:23:15 +08:00
jie210
cd0b529b69 feishu email update (#3037)
Co-authored-by: jie210 <huangjie@flashcat.com>
2026-01-14 14:06:11 +08:00
jie210
9e99e4a63a sso add feishu (#3035) 2026-01-13 16:00:57 +08:00
ning
6b25a4ce90 Merge branch 'main' of github.com:ccfos/nightingale into dev21 2026-01-12 17:03:37 +08:00
ning
1a50d22573 Merge branch 'dev21' of github.com:ccfos/nightingale into dev21 2026-01-12 17:03:01 +08:00
ning
46083d741d fix: query data 2025-12-30 19:21:16 +08:00
ning
3eeb705b39 update ds perm check 2025-12-30 16:51:10 +08:00
ning
8d87e69ee7 fix: datasource delete 2025-12-30 16:50:10 +08:00
pioneerlfn
3da85d8e28 fix: doris exec sql timeout unit: s -> ms 2025-12-29 14:27:42 +08:00
pioneerlfn
b50410b88a refactor: update doris query 2025-12-26 16:32:57 +08:00
9 changed files with 166 additions and 79 deletions

View File

@@ -571,12 +571,19 @@ func (rt *Router) loginCallbackFeiShu(c *gin.Context) {
} else {
user = new(models.User)
defaultRoles := []string{}
defaultUserGroups := []int64{}
if rt.Sso.FeiShu != nil && rt.Sso.FeiShu.FeiShuConfig != nil {
defaultRoles = rt.Sso.FeiShu.FeiShuConfig.DefaultRoles
defaultUserGroups = rt.Sso.FeiShu.FeiShuConfig.DefaultUserGroups
}
user.FullSsoFields(feishu.SsoTypeName, ret.Username, ret.Nickname, ret.Phone, ret.Email, defaultRoles)
// create user from feishu
ginx.Dangerous(user.Add(rt.Ctx))
if len(defaultUserGroups) > 0 {
ginx.Dangerous(user.UpdateUserGroup(rt.Ctx, defaultUserGroups))
}
}
// set user login state

View File

@@ -1,13 +1,16 @@
package router
import (
"context"
"fmt"
"sort"
"sync"
"github.com/ccfos/nightingale/v6/alert/eval"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/dskit/doris"
"github.com/ccfos/nightingale/v6/models"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/ginx"
"github.com/toolkits/pkg/logger"
@@ -117,10 +120,13 @@ func (rt *Router) QueryLogBatch(c *gin.Context) {
}
func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.QueryParam) ([]models.DataResp, error) {
var resp []models.DataResp
var mu sync.Mutex
var wg sync.WaitGroup
var errs []error
var (
resp []models.DataResp
mu sync.Mutex
wg sync.WaitGroup
errs []error
rCtx = ctx.Request.Context()
)
for _, q := range f.Queries {
if !anonymousAccess && !CheckDsPerm(ctx, f.DatasourceId, f.Cate, q) {
@@ -132,12 +138,17 @@ func QueryDataConcurrently(anonymousAccess bool, ctx *gin.Context, f models.Quer
logger.Warningf("cluster:%d not exists", f.DatasourceId)
return nil, fmt.Errorf("cluster not exists")
}
vCtx := rCtx
if f.Cate == models.DORIS {
vCtx = context.WithValue(vCtx, doris.NoNeedCheckMaxRow, true)
}
wg.Add(1)
go func(query interface{}) {
defer wg.Done()
data, err := plug.QueryData(ctx.Request.Context(), query)
data, err := plug.QueryData(vCtx, query)
if err != nil {
logger.Warningf("query data error: req:%+v err:%v", query, err)
mu.Lock()

View File

@@ -79,52 +79,19 @@ func (d *Doris) Equal(p datasource.Datasource) bool {
return false
}
// only compare first shard
if d.Addr != newest.Addr {
return false
}
if d.User != newest.User {
return false
}
if d.Password != newest.Password {
return false
}
if d.EnableWrite != newest.EnableWrite {
return false
}
if d.FeAddr != newest.FeAddr {
return false
}
if d.MaxQueryRows != newest.MaxQueryRows {
return false
}
if d.Timeout != newest.Timeout {
return false
}
if d.MaxIdleConns != newest.MaxIdleConns {
return false
}
if d.MaxOpenConns != newest.MaxOpenConns {
return false
}
if d.ConnMaxLifetime != newest.ConnMaxLifetime {
return false
}
if d.ClusterName != newest.ClusterName {
return false
}
return true
return d.Addr == newest.Addr &&
d.FeAddr == newest.FeAddr &&
d.User == newest.User &&
d.Password == newest.Password &&
d.EnableWrite == newest.EnableWrite &&
d.UserWrite == newest.UserWrite &&
d.PasswordWrite == newest.PasswordWrite &&
d.MaxQueryRows == newest.MaxQueryRows &&
d.Timeout == newest.Timeout &&
d.MaxIdleConns == newest.MaxIdleConns &&
d.MaxOpenConns == newest.MaxOpenConns &&
d.ConnMaxLifetime == newest.ConnMaxLifetime &&
d.ClusterName == newest.ClusterName
}
func (d *Doris) MakeLogQuery(ctx context.Context, query interface{}, eventTags []string, start, end int64) (interface{}, error) {
@@ -181,7 +148,7 @@ func (d *Doris) QueryData(ctx context.Context, query interface{}) ([]models.Data
}
}
items, err := d.QueryTimeseries(context.TODO(), &doris.QueryParam{
items, err := d.QueryTimeseries(ctx, &doris.QueryParam{
Database: dorisQueryParam.Database,
Sql: dorisQueryParam.SQL,
Keys: types.Keys{

View File

@@ -39,6 +39,9 @@ type Doris struct {
MaxQueryRows int `json:"doris.max_query_rows" mapstructure:"doris.max_query_rows"`
ClusterName string `json:"doris.cluster_name" mapstructure:"doris.cluster_name"`
EnableWrite bool `json:"doris.enable_write" mapstructure:"doris.enable_write"`
// 写用户,用来区分读写用户,减少数据源
UserWrite string `json:"doris.user_write" mapstructure:"doris.user_write"`
PasswordWrite string `json:"doris.password_write" mapstructure:"doris.password_write"`
}
// NewDorisWithSettings initializes a new Doris instance with the given settings
@@ -88,13 +91,13 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
var keys []string
keys = append(keys, d.Addr)
keys = append(keys, d.Password, d.User)
keys = append(keys, d.User, d.Password)
if len(database) > 0 {
keys = append(keys, database)
}
cachedkey := strings.Join(keys, ":")
cachedKey := strings.Join(keys, ":")
// cache conn with database
conn, ok := pool.PoolClient.Load(cachedkey)
conn, ok := pool.PoolClient.Load(cachedKey)
if ok {
return conn.(*sql.DB), nil
}
@@ -102,7 +105,7 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
var err error
defer func() {
if db != nil && err == nil {
pool.PoolClient.Store(cachedkey, db)
pool.PoolClient.Store(cachedKey, db)
}
}()
@@ -121,6 +124,79 @@ func (d *Doris) NewConn(ctx context.Context, database string) (*sql.DB, error) {
return db, nil
}
// NewWriteConn establishes a new connection to Doris for write operations
// When EnableWrite is true and UserWrite is configured, it uses the write user credentials
// Otherwise, it reuses the read connection from NewConn
func (d *Doris) NewWriteConn(ctx context.Context, database string) (*sql.DB, error) {
// If write user is not configured, reuse the read connection
if !d.EnableWrite || len(d.UserWrite) == 0 {
return d.NewConn(ctx, database)
}
if len(d.Addr) == 0 {
return nil, errors.New("empty fe-node addr")
}
// Set default values similar to postgres implementation
if d.Timeout == 0 {
d.Timeout = 60000
}
if d.MaxIdleConns == 0 {
d.MaxIdleConns = 10
}
if d.MaxOpenConns == 0 {
d.MaxOpenConns = 100
}
if d.ConnMaxLifetime == 0 {
d.ConnMaxLifetime = 14400
}
if d.MaxQueryRows == 0 {
d.MaxQueryRows = 500
}
// Use write user credentials
user := d.UserWrite
password := d.PasswordWrite
var keys []string
keys = append(keys, d.Addr)
keys = append(keys, user, password)
if len(database) > 0 {
keys = append(keys, database)
}
cachedKey := strings.Join(keys, ":")
// cache conn with database
conn, ok := pool.PoolClient.Load(cachedKey)
if ok {
return conn.(*sql.DB), nil
}
var db *sql.DB
var err error
defer func() {
if db != nil && err == nil {
pool.PoolClient.Store(cachedKey, db)
}
}()
// Simplified connection logic for Doris using MySQL driver
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8", user, password, d.Addr, database)
db, err = sql.Open("mysql", dsn)
if err != nil {
return nil, err
}
// Set connection pool configuration for write connections
// Use more conservative values since write operations are typically less frequent
writeMaxIdleConns := max(d.MaxIdleConns/5, 2)
writeMaxOpenConns := max(d.MaxOpenConns/10, 5)
db.SetMaxIdleConns(writeMaxIdleConns)
db.SetMaxOpenConns(writeMaxOpenConns)
db.SetConnMaxLifetime(time.Duration(d.ConnMaxLifetime) * time.Second)
return db, nil
}
// createTimeoutContext creates a context with timeout based on Doris configuration
func (d *Doris) createTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc) {
timeout := d.Timeout
@@ -472,7 +548,7 @@ func (d *Doris) ExecContext(ctx context.Context, database string, sql string) er
timeoutCtx, cancel := d.createTimeoutContext(ctx)
defer cancel()
db, err := d.NewConn(timeoutCtx, database)
db, err := d.NewWriteConn(timeoutCtx, database)
if err != nil {
return err
}

View File

@@ -10,13 +10,14 @@ const (
TimeseriesAggregationTimestamp = "__ts__"
)
// QueryLogs 查询日志
// TODO: 待测试, MAP/ARRAY/STRUCT/JSON 等类型能否处理
func (d *Doris) QueryLogs(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
// 等同于 Query()
return d.Query(ctx, query)
return d.Query(ctx, query, true)
}
// 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
// QueryHistogram 本质是查询时序数据, 取第一组, SQL由上层封装, 不再做复杂的解析和截断
func (d *Doris) QueryHistogram(ctx context.Context, query *QueryParam) ([][]float64, error) {
values, err := d.QueryTimeseries(ctx, query)
if err != nil {

View File

@@ -15,6 +15,10 @@ const (
TimeFieldFormatDateTime = "datetime"
)
type noNeedCheckMaxRowKey struct{}
var NoNeedCheckMaxRow = noNeedCheckMaxRowKey{}
// 不再拼接SQL, 完全信赖用户的输入
type QueryParam struct {
Database string `json:"database"`
@@ -39,7 +43,7 @@ var (
)
// Query executes a given SQL query in Doris and returns the results with MaxQueryRows check
func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]interface{}, error) {
func (d *Doris) Query(ctx context.Context, query *QueryParam, checkMaxRow bool) ([]map[string]interface{}, error) {
// 校验SQL的合法性, 过滤掉 write请求
sqlItem := strings.Split(strings.ToUpper(query.Sql), " ")
for _, item := range sqlItem {
@@ -48,10 +52,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
}
}
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
if checkMaxRow {
// 检查查询结果行数
err := d.CheckMaxQueryRows(ctx, query.Database, query.Sql)
if err != nil {
return nil, err
}
}
rows, err := d.ExecQuery(ctx, query.Database, query.Sql)
@@ -63,8 +69,12 @@ func (d *Doris) Query(ctx context.Context, query *QueryParam) ([]map[string]inte
// QueryTimeseries executes a time series data query using the given parameters with MaxQueryRows check
func (d *Doris) QueryTimeseries(ctx context.Context, query *QueryParam) ([]types.MetricValues, error) {
// 使用 Query 方法执行查询Query方法内部已包含MaxQueryRows检查
rows, err := d.Query(ctx, query)
// 默认需要检查,除非调用方声明不需要检查
checkMaxRow := true
if noCheck, ok := ctx.Value(NoNeedCheckMaxRow).(bool); ok && noCheck {
checkMaxRow = false
}
rows, err := d.Query(ctx, query, checkMaxRow)
if err != nil {
return nil, err
}

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"github.com/ccfos/nightingale/v6/pkg/ctx"
"github.com/ccfos/nightingale/v6/pkg/poster"
"github.com/ccfos/nightingale/v6/pkg/poster"
"gorm.io/gorm"
)

View File

@@ -315,6 +315,18 @@ func (u *User) UpdatePassword(ctx *ctx.Context, password, updateBy string) error
}).Error
}
func (u *User) UpdateUserGroup(ctx *ctx.Context, userGroupIds []int64) error {
count := len(userGroupIds)
for i := 0; i < count; i++ {
err := UserGroupMemberAdd(ctx, userGroupIds[i], u.Id)
if err != nil {
return err
}
}
return nil
}
func UpdateUserLastActiveTime(ctx *ctx.Context, userId int64, lastActiveTime int64) error {
return DB(ctx).Model(&User{}).Where("id = ?", userId).Updates(map[string]interface{}{
"last_active_time": lastActiveTime,

View File

@@ -32,17 +32,18 @@ type SsoClient struct {
}
type Config struct {
Enable bool `json:"enable"`
AuthURL string `json:"auth_url"`
DisplayName string `json:"display_name"`
AppID string `json:"app_id"`
AppSecret string `json:"app_secret"`
RedirectURL string `json:"redirect_url"`
UsernameField string `json:"username_field"` // name, email, phone
FeiShuEndpoint string `json:"feishu_endpoint"` // 飞书API端点默认为 open.feishu.cn
Proxy string `json:"proxy"`
CoverAttributes bool `json:"cover_attributes"`
DefaultRoles []string `json:"default_roles"`
Enable bool `json:"enable"`
AuthURL string `json:"auth_url"`
DisplayName string `json:"display_name"`
AppID string `json:"app_id"`
AppSecret string `json:"app_secret"`
RedirectURL string `json:"redirect_url"`
UsernameField string `json:"username_field"` // name, email, phone
FeiShuEndpoint string `json:"feishu_endpoint"` // 飞书API端点默认为 open.feishu.cn
Proxy string `json:"proxy"`
CoverAttributes bool `json:"cover_attributes"`
DefaultRoles []string `json:"default_roles"`
DefaultUserGroups []int64 `json:"default_user_groups"`
}
type CallbackOutput struct {
@@ -312,6 +313,8 @@ func (s *SsoClient) Callback(redis storage.Redis, ctx context.Context, code, sta
// 根据UsernameField配置确定username
switch s.FeiShuConfig.UsernameField {
case "userid":
callbackOutput.Username = username
case "name":
if nickname == "" {
return nil, errors.New("feishu user name is empty")