Compare commits

...

16 Commits

Author SHA1 Message Date
ning
f5fb52024b update doc api 2026-03-13 12:36:53 +08:00
ning
04e9cd08da update ai config 2026-03-12 16:57:39 +08:00
ning
1310b8a522 update ai config 2026-03-12 15:42:29 +08:00
ning
0d105e1f9d update mcp 2026-03-11 19:21:53 +08:00
ning
77bca17970 refactor: optimize llm config 2026-03-11 16:43:16 +08:00
ning
3fb5f446be update agent model 2026-03-11 15:36:41 +08:00
ning
f2384cc12b update chat api 2026-03-10 17:04:40 +08:00
ning
72e16b25f3 refactor: update llm config 2026-03-09 20:19:14 +08:00
ning
59c85a8efb update add skill 2026-03-06 22:03:29 +08:00
ning
f50f05ae01 update ai talk 2026-03-06 16:04:04 +08:00
ning
ef6676d3d6 update ai agent 2026-03-06 14:48:47 +08:00
ning
eacf1b650a optimize talk 2026-03-05 19:49:57 +08:00
ning
7566b9b690 add llm 2026-03-05 18:49:00 +08:00
yuansheng
5e01e8e021 refactor: alert rule support Local in timezones
Made-with: Cursor
2026-03-04 15:22:35 +08:00
ning
61c7bbd0d8 fix: doris timeout 2026-03-04 11:29:31 +08:00
ning
303ef3476e fix: doris timeout 2026-03-04 11:16:40 +08:00
40 changed files with 10882 additions and 5 deletions

3338
aiagent/ai_agent.go Normal file

File diff suppressed because it is too large Load Diff

546
aiagent/builtin_tools.go Normal file
View File

@@ -0,0 +1,546 @@
package aiagent
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/datasource"
"github.com/ccfos/nightingale/v6/dscache"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/toolkits/pkg/logger"
)
const (
// ToolTypeBuiltin 内置工具类型
ToolTypeBuiltin = "builtin"
)
// =============================================================================
// 数据源获取函数(支持注入,便于测试)
// =============================================================================
// PromClientGetter Prometheus 客户端获取函数类型
type PromClientGetter func(dsId int64) prom.API
// SQLDatasourceGetter SQL 数据源获取函数类型
type SQLDatasourceGetter func(dsType string, dsId int64) (datasource.Datasource, bool)
// 默认使用 GlobalCache可通过 SetPromClientGetter/SetSQLDatasourceGetter 替换
var (
getPromClientFunc PromClientGetter = defaultGetPromClient
getSQLDatasourceFunc SQLDatasourceGetter = defaultGetSQLDatasource
)
// SetPromClientGetter 设置 Prometheus 客户端获取函数(用于测试)
func SetPromClientGetter(getter PromClientGetter) {
getPromClientFunc = getter
}
// SetSQLDatasourceGetter 设置 SQL 数据源获取函数(用于测试)
func SetSQLDatasourceGetter(getter SQLDatasourceGetter) {
getSQLDatasourceFunc = getter
}
// ResetDatasourceGetters 重置为默认的数据源获取函数
func ResetDatasourceGetters() {
getPromClientFunc = defaultGetPromClient
getSQLDatasourceFunc = defaultGetSQLDatasource
}
func defaultGetPromClient(dsId int64) prom.API {
// Default: no PromClient available. Use SetPromClientGetter to inject.
return nil
}
func defaultGetSQLDatasource(dsType string, dsId int64) (datasource.Datasource, bool) {
return dscache.DsCache.Get(dsType, dsId)
}
// BuiltinToolHandler 内置工具处理函数
type BuiltinToolHandler func(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error)
// BuiltinTool 内置工具定义
type BuiltinTool struct {
Definition AgentTool
Handler BuiltinToolHandler
}
// builtinTools 内置工具注册表
var builtinTools = map[string]*BuiltinTool{
// Prometheus 相关工具
"list_metrics": {
Definition: AgentTool{
Name: "list_metrics",
Description: "搜索 Prometheus 数据源的指标名称,支持关键词模糊匹配",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "keyword", Type: "string", Description: "搜索关键词,模糊匹配指标名", Required: false},
{Name: "limit", Type: "integer", Description: "返回数量限制默认30", Required: false},
},
},
Handler: listMetrics,
},
"get_metric_labels": {
Definition: AgentTool{
Name: "get_metric_labels",
Description: "获取 Prometheus 指标的所有标签键及其可选值",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "metric", Type: "string", Description: "指标名称", Required: true},
},
},
Handler: getMetricLabels,
},
// SQL 类数据源相关工具
"list_databases": {
Definition: AgentTool{
Name: "list_databases",
Description: "列出 SQL 数据源MySQL/Doris/ClickHouse/PostgreSQL中的所有数据库",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{},
},
Handler: listDatabases,
},
"list_tables": {
Definition: AgentTool{
Name: "list_tables",
Description: "列出指定数据库中的所有表",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "database", Type: "string", Description: "数据库名", Required: true},
},
},
Handler: listTables,
},
"describe_table": {
Definition: AgentTool{
Name: "describe_table",
Description: "获取表的字段结构(字段名、类型、注释)",
Type: ToolTypeBuiltin,
Parameters: []ToolParameter{
{Name: "database", Type: "string", Description: "数据库名", Required: true},
{Name: "table", Type: "string", Description: "表名", Required: true},
},
},
Handler: describeTable,
},
}
// GetBuiltinToolDef 获取内置工具定义
func GetBuiltinToolDef(name string) (AgentTool, bool) {
if tool, ok := builtinTools[name]; ok {
return tool.Definition, true
}
return AgentTool{}, false
}
// GetBuiltinToolDefs 获取指定的内置工具定义列表
func GetBuiltinToolDefs(names []string) []AgentTool {
var defs []AgentTool
for _, name := range names {
if def, ok := GetBuiltinToolDef(name); ok {
defs = append(defs, def)
}
}
return defs
}
// GetAllBuiltinToolDefs 获取所有内置工具定义
func GetAllBuiltinToolDefs() []AgentTool {
defs := make([]AgentTool, 0, len(builtinTools))
for _, tool := range builtinTools {
defs = append(defs, tool.Definition)
}
return defs
}
// ExecuteBuiltinTool 执行内置工具
// 返回值result, handled, error
// handled 表示是否是内置工具true 表示已处理false 表示不是内置工具需要继续查找)
func ExecuteBuiltinTool(ctx context.Context, name string, wfCtx *models.WorkflowContext, argsJSON string) (string, bool, error) {
tool, exists := builtinTools[name]
if !exists {
return "", false, nil
}
// 解析参数
var args map[string]interface{}
if argsJSON != "" {
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
// 如果不是 JSON尝试作为简单字符串参数
args = map[string]interface{}{"input": argsJSON}
}
}
if args == nil {
args = make(map[string]interface{})
}
result, err := tool.Handler(ctx, wfCtx, args)
return result, true, err
}
// getDatasourceId 从 wfCtx.Inputs 中获取 datasource_id
func getDatasourceId(wfCtx *models.WorkflowContext) int64 {
if wfCtx == nil || wfCtx.Inputs == nil {
return 0
}
var dsId int64
if dsIdStr, ok := wfCtx.Inputs["datasource_id"]; ok {
fmt.Sscanf(dsIdStr, "%d", &dsId)
}
return dsId
}
// getDatasourceType 从 wfCtx.Inputs 中获取 datasource_type
func getDatasourceType(wfCtx *models.WorkflowContext) string {
if wfCtx == nil || wfCtx.Inputs == nil {
return ""
}
return wfCtx.Inputs["datasource_type"]
}
// =============================================================================
// Prometheus 工具实现
// =============================================================================
// listMetrics 列出 Prometheus 指标
func listMetrics(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
keyword, _ := args["keyword"].(string)
limit := 30
if l, ok := args["limit"].(float64); ok && l > 0 {
limit = int(l)
}
// 获取 Prometheus 客户端
client := getPromClientFunc(dsId)
if client == nil {
return "", fmt.Errorf("prometheus datasource not found: %d", dsId)
}
// 调用 LabelValues 获取 __name__ 的所有值(即所有指标名)
values, _, err := client.LabelValues(ctx, "__name__", nil)
if err != nil {
return "", fmt.Errorf("failed to get metrics: %v", err)
}
// 过滤和限制
result := make([]string, 0)
keyword = strings.ToLower(keyword)
for _, v := range values {
m := string(v)
if keyword == "" || strings.Contains(strings.ToLower(m), keyword) {
result = append(result, m)
if len(result) >= limit {
break
}
}
}
logger.Debugf("list_metrics: found %d metrics (keyword=%s, limit=%d)", len(result), keyword, limit)
bytes, _ := json.Marshal(result)
return string(bytes), nil
}
// getMetricLabels 获取指标的标签
func getMetricLabels(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
metric, ok := args["metric"].(string)
if !ok || metric == "" {
return "", fmt.Errorf("metric parameter is required")
}
client := getPromClientFunc(dsId)
if client == nil {
return "", fmt.Errorf("prometheus datasource not found: %d", dsId)
}
// 使用 Series 接口获取指标的所有 series
endTime := time.Now()
startTime := endTime.Add(-1 * time.Hour)
series, _, err := client.Series(ctx, []string{metric}, startTime, endTime)
if err != nil {
return "", fmt.Errorf("failed to get metric series: %v", err)
}
// 聚合标签键值
labels := make(map[string][]string)
seen := make(map[string]map[string]bool)
for _, s := range series {
for k, v := range s {
key := string(k)
val := string(v)
if key == "__name__" {
continue
}
if seen[key] == nil {
seen[key] = make(map[string]bool)
}
if !seen[key][val] {
seen[key][val] = true
labels[key] = append(labels[key], val)
}
}
}
logger.Debugf("get_metric_labels: metric=%s, found %d labels", metric, len(labels))
bytes, _ := json.Marshal(labels)
return string(bytes), nil
}
// =============================================================================
// SQL 数据源工具实现
// =============================================================================
// SQLMetadataQuerier SQL 元数据查询接口
type SQLMetadataQuerier interface {
ListDatabases(ctx context.Context) ([]string, error)
ListTables(ctx context.Context, database string) ([]string, error)
DescribeTable(ctx context.Context, database, table string) ([]map[string]interface{}, error)
}
// listDatabases 列出数据库
func listDatabases(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
if dsType == "" {
return "", fmt.Errorf("datasource_type not found in inputs")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = "SHOW DATABASES"
case "ck", "clickhouse":
sql = "SHOW DATABASES"
case "pgsql", "postgresql":
sql = "SELECT datname FROM pg_database WHERE datistemplate = false"
default:
return "", fmt.Errorf("unsupported datasource type for list_databases: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to list databases: %v", err)
}
// 提取数据库名
databases := extractColumnValues(data, dsType, "database")
logger.Debugf("list_databases: dsType=%s, found %d databases", dsType, len(databases))
bytes, _ := json.Marshal(databases)
return string(bytes), nil
}
// listTables 列出表
func listTables(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
database, ok := args["database"].(string)
if !ok || database == "" {
return "", fmt.Errorf("database parameter is required")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = fmt.Sprintf("SHOW TABLES FROM `%s`", database)
case "ck", "clickhouse":
sql = fmt.Sprintf("SHOW TABLES FROM `%s`", database)
case "pgsql", "postgresql":
sql = fmt.Sprintf("SELECT tablename FROM pg_tables WHERE schemaname = 'public'")
default:
return "", fmt.Errorf("unsupported datasource type for list_tables: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql, "database": database}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to list tables: %v", err)
}
// 提取表名
tables := extractColumnValues(data, dsType, "table")
logger.Debugf("list_tables: dsType=%s, database=%s, found %d tables", dsType, database, len(tables))
bytes, _ := json.Marshal(tables)
return string(bytes), nil
}
// describeTable 获取表结构
func describeTable(ctx context.Context, wfCtx *models.WorkflowContext, args map[string]interface{}) (string, error) {
dsId := getDatasourceId(wfCtx)
dsType := getDatasourceType(wfCtx)
if dsId == 0 {
return "", fmt.Errorf("datasource_id not found in inputs")
}
database, ok := args["database"].(string)
if !ok || database == "" {
return "", fmt.Errorf("database parameter is required")
}
table, ok := args["table"].(string)
if !ok || table == "" {
return "", fmt.Errorf("table parameter is required")
}
plug, exists := getSQLDatasourceFunc(dsType, dsId)
if !exists {
return "", fmt.Errorf("datasource not found: %s/%d", dsType, dsId)
}
// 构建查询 SQL
var sql string
switch dsType {
case "mysql", "doris":
sql = fmt.Sprintf("DESCRIBE `%s`.`%s`", database, table)
case "ck", "clickhouse":
sql = fmt.Sprintf("DESCRIBE TABLE `%s`.`%s`", database, table)
case "pgsql", "postgresql":
sql = fmt.Sprintf(`SELECT column_name as "Field", data_type as "Type", is_nullable as "Null", column_default as "Default" FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '%s'`, table)
default:
return "", fmt.Errorf("unsupported datasource type for describe_table: %s", dsType)
}
// 执行查询
query := map[string]interface{}{"sql": sql, "database": database}
data, _, err := plug.QueryLog(ctx, query)
if err != nil {
return "", fmt.Errorf("failed to describe table: %v", err)
}
// 转换为统一的列结构
columns := convertToColumnInfo(data, dsType)
logger.Debugf("describe_table: dsType=%s, table=%s.%s, found %d columns", dsType, database, table, len(columns))
bytes, _ := json.Marshal(columns)
return string(bytes), nil
}
// ColumnInfo 列信息
type ColumnInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Comment string `json:"comment,omitempty"`
}
// extractColumnValues 从查询结果中提取列值
func extractColumnValues(data []interface{}, dsType string, columnType string) []string {
result := make([]string, 0)
for _, row := range data {
if rowMap, ok := row.(map[string]interface{}); ok {
// 尝试多种可能的列名
var value string
for _, key := range getPossibleColumnNames(dsType, columnType) {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
value = s
break
}
}
}
if value != "" {
result = append(result, value)
}
}
}
return result
}
// getPossibleColumnNames 获取可能的列名
func getPossibleColumnNames(dsType string, columnType string) []string {
switch columnType {
case "database":
return []string{"Database", "database", "datname", "name"}
case "table":
return []string{"Tables_in_", "table", "tablename", "name", "Name"}
default:
return []string{}
}
}
// convertToColumnInfo 将查询结果转换为统一的列信息格式
func convertToColumnInfo(data []interface{}, dsType string) []ColumnInfo {
result := make([]ColumnInfo, 0)
for _, row := range data {
if rowMap, ok := row.(map[string]interface{}); ok {
col := ColumnInfo{}
// 提取列名
for _, key := range []string{"Field", "field", "column_name", "name"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Name = s
break
}
}
}
// 提取类型
for _, key := range []string{"Type", "type", "data_type"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Type = s
break
}
}
}
// 提取注释(可选)
for _, key := range []string{"Comment", "comment", "column_comment"} {
if v, ok := rowMap[key]; ok {
if s, ok := v.(string); ok {
col.Comment = s
break
}
}
}
if col.Name != "" {
result = append(result, col)
}
}
}
return result
}

376
aiagent/llm/claude.go Normal file
View File

@@ -0,0 +1,376 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
const (
DefaultClaudeURL = "https://api.anthropic.com/v1/messages"
ClaudeAPIVersion = "2023-06-01"
DefaultClaudeMaxTokens = 4096
)
// Claude implements the LLM interface for Anthropic Claude API
type Claude struct {
config *Config
client *http.Client
}
// NewClaude creates a new Claude provider
func NewClaude(cfg *Config, client *http.Client) (*Claude, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultClaudeURL
}
return &Claude{
config: cfg,
client: client,
}, nil
}
func (c *Claude) Name() string {
return ProviderClaude
}
// Claude API request/response structures
type claudeRequest struct {
Model string `json:"model"`
Messages []claudeMessage `json:"messages"`
System string `json:"system,omitempty"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop_sequences,omitempty"`
Stream bool `json:"stream,omitempty"`
Tools []claudeTool `json:"tools,omitempty"`
}
type claudeMessage struct {
Role string `json:"role"`
Content []claudeContentBlock `json:"content"`
}
type claudeContentBlock struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Input any `json:"input,omitempty"`
ToolUseID string `json:"tool_use_id,omitempty"`
Content string `json:"content,omitempty"`
}
type claudeTool struct {
Name string `json:"name"`
Description string `json:"description"`
InputSchema map[string]interface{} `json:"input_schema"`
}
type claudeResponse struct {
ID string `json:"id"`
Type string `json:"type"`
Role string `json:"role"`
Content []claudeContentBlock `json:"content"`
Model string `json:"model"`
StopReason string `json:"stop_reason"`
StopSequence string `json:"stop_sequence,omitempty"`
Usage *struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
} `json:"usage,omitempty"`
Error *struct {
Type string `json:"type"`
Message string `json:"message"`
} `json:"error,omitempty"`
}
// Claude streaming event types
type claudeStreamEvent struct {
Type string `json:"type"`
Index int `json:"index,omitempty"`
ContentBlock *claudeContentBlock `json:"content_block,omitempty"`
Delta *claudeStreamDelta `json:"delta,omitempty"`
Message *claudeResponse `json:"message,omitempty"`
Usage *claudeStreamUsage `json:"usage,omitempty"`
}
type claudeStreamDelta struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
PartialJSON string `json:"partial_json,omitempty"`
StopReason string `json:"stop_reason,omitempty"`
}
type claudeStreamUsage struct {
OutputTokens int `json:"output_tokens"`
}
func (c *Claude) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
claudeReq := c.convertRequest(req)
claudeReq.Stream = false
respBody, err := c.doRequest(ctx, claudeReq)
if err != nil {
return nil, err
}
var claudeResp claudeResponse
if err := json.Unmarshal(respBody, &claudeResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if claudeResp.Error != nil {
return nil, fmt.Errorf("Claude API error: %s", claudeResp.Error.Message)
}
return c.convertResponse(&claudeResp), nil
}
func (c *Claude) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
claudeReq := c.convertRequest(req)
claudeReq.Stream = true
jsonData, err := json.Marshal(claudeReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
c.setHeaders(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("Claude API error (status %d): %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go c.streamResponse(ctx, resp, ch)
return ch, nil
}
func (c *Claude) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
var currentToolCall *ToolCall
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
if line == "" || !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
var event claudeStreamEvent
if err := json.Unmarshal([]byte(data), &event); err != nil {
continue
}
switch event.Type {
case "content_block_start":
if event.ContentBlock != nil && event.ContentBlock.Type == "tool_use" {
currentToolCall = &ToolCall{
ID: event.ContentBlock.ID,
Name: event.ContentBlock.Name,
}
}
case "content_block_delta":
if event.Delta != nil {
chunk := StreamChunk{}
switch event.Delta.Type {
case "text_delta":
chunk.Content = event.Delta.Text
case "input_json_delta":
if currentToolCall != nil {
currentToolCall.Arguments += event.Delta.PartialJSON
}
}
if chunk.Content != "" {
ch <- chunk
}
}
case "content_block_stop":
if currentToolCall != nil {
ch <- StreamChunk{
ToolCalls: []ToolCall{*currentToolCall},
}
currentToolCall = nil
}
case "message_delta":
if event.Delta != nil && event.Delta.StopReason != "" {
ch <- StreamChunk{
FinishReason: event.Delta.StopReason,
}
}
case "message_stop":
ch <- StreamChunk{Done: true}
return
case "error":
ch <- StreamChunk{Done: true, Error: fmt.Errorf("stream error")}
return
}
}
}
func (c *Claude) convertRequest(req *GenerateRequest) *claudeRequest {
claudeReq := &claudeRequest{
Model: c.config.Model,
MaxTokens: req.MaxTokens,
Temperature: req.Temperature,
TopP: req.TopP,
Stop: req.Stop,
}
if claudeReq.MaxTokens <= 0 {
claudeReq.MaxTokens = DefaultClaudeMaxTokens
}
// Extract system message and convert other messages
for _, msg := range req.Messages {
if msg.Role == RoleSystem {
claudeReq.System = msg.Content
continue
}
// Claude uses content blocks instead of plain strings
claudeMsg := claudeMessage{
Role: msg.Role,
Content: []claudeContentBlock{
{Type: "text", Text: msg.Content},
},
}
claudeReq.Messages = append(claudeReq.Messages, claudeMsg)
}
// Convert tools
for _, tool := range req.Tools {
claudeReq.Tools = append(claudeReq.Tools, claudeTool{
Name: tool.Name,
Description: tool.Description,
InputSchema: tool.Parameters,
})
}
return claudeReq
}
func (c *Claude) convertResponse(resp *claudeResponse) *GenerateResponse {
result := &GenerateResponse{
FinishReason: resp.StopReason,
}
// Extract text content and tool calls
var textParts []string
for _, block := range resp.Content {
switch block.Type {
case "text":
textParts = append(textParts, block.Text)
case "tool_use":
inputJSON, _ := json.Marshal(block.Input)
result.ToolCalls = append(result.ToolCalls, ToolCall{
ID: block.ID,
Name: block.Name,
Arguments: string(inputJSON),
})
}
}
result.Content = strings.Join(textParts, "")
if resp.Usage != nil {
result.Usage = &Usage{
PromptTokens: resp.Usage.InputTokens,
CompletionTokens: resp.Usage.OutputTokens,
TotalTokens: resp.Usage.InputTokens + resp.Usage.OutputTokens,
}
}
return result
}
func (c *Claude) doRequest(ctx context.Context, req *claudeRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
c.setHeaders(httpReq)
resp, err := c.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("Claude API error (status %d): %s", resp.StatusCode, string(body))
}
return body, nil
}
func (c *Claude) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("anthropic-version", ClaudeAPIVersion)
if c.config.APIKey != "" {
req.Header.Set("x-api-key", c.config.APIKey)
}
for k, v := range c.config.Headers {
req.Header.Set(k, v)
}
}

376
aiagent/llm/gemini.go Normal file
View File

@@ -0,0 +1,376 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
const (
DefaultGeminiURL = "https://generativelanguage.googleapis.com/v1beta/models"
)
// Gemini implements the LLM interface for Google Gemini API
type Gemini struct {
config *Config
client *http.Client
}
// NewGemini creates a new Gemini provider
func NewGemini(cfg *Config, client *http.Client) (*Gemini, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultGeminiURL
}
return &Gemini{
config: cfg,
client: client,
}, nil
}
func (g *Gemini) Name() string {
return ProviderGemini
}
// Gemini API request/response structures
type geminiRequest struct {
Contents []geminiContent `json:"contents"`
SystemInstruction *geminiContent `json:"systemInstruction,omitempty"`
Tools []geminiTool `json:"tools,omitempty"`
GenerationConfig *geminiGenerationConfig `json:"generationConfig,omitempty"`
}
type geminiContent struct {
Role string `json:"role,omitempty"`
Parts []geminiPart `json:"parts"`
}
type geminiPart struct {
Text string `json:"text,omitempty"`
FunctionCall *geminiFunctionCall `json:"functionCall,omitempty"`
FunctionResponse *geminiFunctionResponse `json:"functionResponse,omitempty"`
}
type geminiFunctionCall struct {
Name string `json:"name"`
Args map[string]interface{} `json:"args"`
}
type geminiFunctionResponse struct {
Name string `json:"name"`
Response map[string]interface{} `json:"response"`
}
type geminiTool struct {
FunctionDeclarations []geminiFunctionDeclaration `json:"functionDeclarations,omitempty"`
}
type geminiFunctionDeclaration struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
type geminiGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
MaxOutputTokens int `json:"maxOutputTokens,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
type geminiResponse struct {
Candidates []struct {
Content geminiContent `json:"content"`
FinishReason string `json:"finishReason"`
SafetyRatings []struct {
Category string `json:"category"`
Probability string `json:"probability"`
} `json:"safetyRatings,omitempty"`
} `json:"candidates"`
UsageMetadata *struct {
PromptTokenCount int `json:"promptTokenCount"`
CandidatesTokenCount int `json:"candidatesTokenCount"`
TotalTokenCount int `json:"totalTokenCount"`
} `json:"usageMetadata,omitempty"`
Error *struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
} `json:"error,omitempty"`
}
func (g *Gemini) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
geminiReq := g.convertRequest(req)
url := g.buildURL(false)
respBody, err := g.doRequest(ctx, url, geminiReq)
if err != nil {
return nil, err
}
var geminiResp geminiResponse
if err := json.Unmarshal(respBody, &geminiResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if geminiResp.Error != nil {
return nil, fmt.Errorf("Gemini API error: %s", geminiResp.Error.Message)
}
return g.convertResponse(&geminiResp), nil
}
func (g *Gemini) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
geminiReq := g.convertRequest(req)
jsonData, err := json.Marshal(geminiReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
url := g.buildURL(true)
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
g.setHeaders(httpReq)
resp, err := g.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return nil, fmt.Errorf("Gemini API error (status %d): %s", resp.StatusCode, string(body))
}
ch := make(chan StreamChunk, 100)
go g.streamResponse(ctx, resp, ch)
return ch, nil
}
func (g *Gemini) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
var buffer strings.Builder
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
// Gemini streams JSON objects, accumulate until we have a complete one
if line == "" {
continue
}
// Handle SSE format if present
if strings.HasPrefix(line, "data: ") {
line = strings.TrimPrefix(line, "data: ")
}
buffer.WriteString(line)
// Try to parse accumulated JSON
var geminiResp geminiResponse
if err := json.Unmarshal([]byte(buffer.String()), &geminiResp); err != nil {
// Not complete yet, continue accumulating
continue
}
// Reset buffer for next response
buffer.Reset()
if len(geminiResp.Candidates) > 0 {
candidate := geminiResp.Candidates[0]
chunk := StreamChunk{
FinishReason: candidate.FinishReason,
}
for _, part := range candidate.Content.Parts {
if part.Text != "" {
chunk.Content += part.Text
}
if part.FunctionCall != nil {
argsJSON, _ := json.Marshal(part.FunctionCall.Args)
chunk.ToolCalls = append(chunk.ToolCalls, ToolCall{
Name: part.FunctionCall.Name,
Arguments: string(argsJSON),
})
}
}
ch <- chunk
if candidate.FinishReason != "" && candidate.FinishReason != "STOP" {
ch <- StreamChunk{Done: true}
return
}
}
}
}
func (g *Gemini) convertRequest(req *GenerateRequest) *geminiRequest {
geminiReq := &geminiRequest{
GenerationConfig: &geminiGenerationConfig{
Temperature: req.Temperature,
TopP: req.TopP,
MaxOutputTokens: req.MaxTokens,
StopSequences: req.Stop,
},
}
// Convert messages
for _, msg := range req.Messages {
if msg.Role == RoleSystem {
geminiReq.SystemInstruction = &geminiContent{
Parts: []geminiPart{{Text: msg.Content}},
}
continue
}
// Map roles
role := msg.Role
if role == RoleAssistant {
role = "model"
}
geminiReq.Contents = append(geminiReq.Contents, geminiContent{
Role: role,
Parts: []geminiPart{{Text: msg.Content}},
})
}
// Convert tools
if len(req.Tools) > 0 {
var declarations []geminiFunctionDeclaration
for _, tool := range req.Tools {
declarations = append(declarations, geminiFunctionDeclaration{
Name: tool.Name,
Description: tool.Description,
Parameters: tool.Parameters,
})
}
geminiReq.Tools = []geminiTool{{FunctionDeclarations: declarations}}
}
return geminiReq
}
func (g *Gemini) convertResponse(resp *geminiResponse) *GenerateResponse {
result := &GenerateResponse{}
if len(resp.Candidates) > 0 {
candidate := resp.Candidates[0]
result.FinishReason = candidate.FinishReason
var textParts []string
for _, part := range candidate.Content.Parts {
if part.Text != "" {
textParts = append(textParts, part.Text)
}
if part.FunctionCall != nil {
argsJSON, _ := json.Marshal(part.FunctionCall.Args)
result.ToolCalls = append(result.ToolCalls, ToolCall{
Name: part.FunctionCall.Name,
Arguments: string(argsJSON),
})
}
}
result.Content = strings.Join(textParts, "")
}
if resp.UsageMetadata != nil {
result.Usage = &Usage{
PromptTokens: resp.UsageMetadata.PromptTokenCount,
CompletionTokens: resp.UsageMetadata.CandidatesTokenCount,
TotalTokens: resp.UsageMetadata.TotalTokenCount,
}
}
return result
}
func (g *Gemini) buildURL(stream bool) string {
action := "generateContent"
if stream {
action = "streamGenerateContent"
}
// Check if baseURL already contains the full path
if strings.Contains(g.config.BaseURL, ":generateContent") ||
strings.Contains(g.config.BaseURL, ":streamGenerateContent") {
return fmt.Sprintf("%s?key=%s", g.config.BaseURL, g.config.APIKey)
}
return fmt.Sprintf("%s/%s:%s?key=%s",
g.config.BaseURL,
g.config.Model,
action,
g.config.APIKey,
)
}
func (g *Gemini) doRequest(ctx context.Context, url string, req *geminiRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
g.setHeaders(httpReq)
resp, err := g.client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("Gemini API error (status %d): %s", resp.StatusCode, string(body))
}
return body, nil
}
func (g *Gemini) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
for k, v := range g.config.Headers {
req.Header.Set(k, v)
}
}

135
aiagent/llm/helper.go Normal file
View File

@@ -0,0 +1,135 @@
package llm
import (
"context"
"strings"
)
// Chat is a convenience function for simple chat completions
func Chat(ctx context.Context, llm LLM, messages []Message) (string, error) {
resp, err := llm.Generate(ctx, &GenerateRequest{
Messages: messages,
})
if err != nil {
return "", err
}
return resp.Content, nil
}
// ChatWithSystem is a convenience function for chat with a system prompt
func ChatWithSystem(ctx context.Context, llm LLM, systemPrompt string, userMessage string) (string, error) {
messages := []Message{
{Role: RoleSystem, Content: systemPrompt},
{Role: RoleUser, Content: userMessage},
}
return Chat(ctx, llm, messages)
}
// NewMessage creates a new message
func NewMessage(role, content string) Message {
return Message{Role: role, Content: content}
}
// SystemMessage creates a system message
func SystemMessage(content string) Message {
return Message{Role: RoleSystem, Content: content}
}
// UserMessage creates a user message
func UserMessage(content string) Message {
return Message{Role: RoleUser, Content: content}
}
// AssistantMessage creates an assistant message
func AssistantMessage(content string) Message {
return Message{Role: RoleAssistant, Content: content}
}
// DetectProvider attempts to detect the provider from the base URL
func DetectProvider(baseURL string) string {
baseURL = strings.ToLower(baseURL)
switch {
case strings.Contains(baseURL, "anthropic.com"):
return ProviderClaude
case strings.Contains(baseURL, "generativelanguage.googleapis.com"):
return ProviderGemini
case strings.Contains(baseURL, "aiplatform.googleapis.com"):
return ProviderVertex
case strings.Contains(baseURL, "bedrock"):
return ProviderBedrock
case strings.Contains(baseURL, "localhost:11434"):
return ProviderOllama
default:
// Default to OpenAI-compatible
return ProviderOpenAI
}
}
// DetectProviderFromModel attempts to detect the provider from the model name
func DetectProviderFromModel(model string) string {
model = strings.ToLower(model)
switch {
case strings.HasPrefix(model, "claude"):
return ProviderClaude
case strings.HasPrefix(model, "gemini"):
return ProviderGemini
case strings.HasPrefix(model, "gpt") || strings.HasPrefix(model, "o1") || strings.HasPrefix(model, "o3"):
return ProviderOpenAI
case strings.HasPrefix(model, "llama") || strings.HasPrefix(model, "mistral") || strings.HasPrefix(model, "qwen"):
return ProviderOllama
default:
return ProviderOpenAI
}
}
// BuildToolDefinition creates a tool definition with JSON schema parameters
func BuildToolDefinition(name, description string, properties map[string]interface{}, required []string) ToolDefinition {
params := map[string]interface{}{
"type": "object",
"properties": properties,
}
if len(required) > 0 {
params["required"] = required
}
return ToolDefinition{
Name: name,
Description: description,
Parameters: params,
}
}
// CollectStream collects all chunks from a stream into a single response
func CollectStream(ch <-chan StreamChunk) (*GenerateResponse, error) {
var content strings.Builder
var toolCalls []ToolCall
var finishReason string
var lastErr error
for chunk := range ch {
if chunk.Error != nil {
lastErr = chunk.Error
}
if chunk.Content != "" {
content.WriteString(chunk.Content)
}
if len(chunk.ToolCalls) > 0 {
toolCalls = append(toolCalls, chunk.ToolCalls...)
}
if chunk.FinishReason != "" {
finishReason = chunk.FinishReason
}
}
if lastErr != nil {
return nil, lastErr
}
return &GenerateResponse{
Content: content.String(),
ToolCalls: toolCalls,
FinishReason: finishReason,
}, nil
}

193
aiagent/llm/llm.go Normal file
View File

@@ -0,0 +1,193 @@
// Package llm provides a unified interface for multiple LLM providers.
// Supports OpenAI-compatible APIs, Claude/Anthropic, and Gemini.
package llm
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"time"
)
// Provider types
const (
ProviderOpenAI = "openai" // OpenAI and compatible APIs (Azure, vLLM, etc.)
ProviderClaude = "claude" // Anthropic Claude
ProviderGemini = "gemini" // Google Gemini
ProviderOllama = "ollama" // Ollama local models
ProviderBedrock = "bedrock" // AWS Bedrock
ProviderVertex = "vertex" // Google Vertex AI
)
// Role constants
const (
RoleSystem = "system"
RoleUser = "user"
RoleAssistant = "assistant"
)
// Message represents a chat message
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// ToolCall represents a tool/function call from the LLM
type ToolCall struct {
ID string `json:"id"`
Name string `json:"name"`
Arguments string `json:"arguments"`
}
// ToolDefinition defines a tool that the LLM can call
type ToolDefinition struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
// GenerateRequest is the unified request for LLM generation
type GenerateRequest struct {
Messages []Message `json:"messages"`
Tools []ToolDefinition `json:"tools,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop,omitempty"`
Stream bool `json:"stream,omitempty"`
}
// GenerateResponse is the unified response from LLM generation
type GenerateResponse struct {
Content string `json:"content"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
FinishReason string `json:"finish_reason"`
Usage *Usage `json:"usage,omitempty"`
}
// Usage represents token usage statistics
type Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
// StreamChunk represents a chunk in streaming response
type StreamChunk struct {
Content string `json:"content,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
Done bool `json:"done"`
Error error `json:"error,omitempty"`
}
// LLM is the unified interface for all LLM providers
type LLM interface {
// Name returns the provider name
Name() string
// Generate sends a request to the LLM and returns the response
Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error)
// GenerateStream sends a request and returns a channel for streaming responses
GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error)
}
// Config is the configuration for creating an LLM provider
type Config struct {
// Provider type: openai, claude, gemini, ollama, bedrock, vertex
Provider string `json:"provider"`
// API endpoint URL
BaseURL string `json:"base_url,omitempty"`
// API key or token
APIKey string `json:"api_key,omitempty"`
// Model name (e.g., "gpt-4", "claude-3-opus", "gemini-pro")
Model string `json:"model"`
// Additional headers for API requests
Headers map[string]string `json:"headers,omitempty"`
// HTTP timeout in milliseconds
Timeout int `json:"timeout,omitempty"`
// Skip SSL verification (for self-signed certs)
SkipSSLVerify bool `json:"skip_ssl_verify,omitempty"`
// HTTP proxy URL
Proxy string `json:"proxy,omitempty"`
// Provider-specific options
Options map[string]interface{} `json:"options,omitempty"`
}
// DefaultConfig returns a config with default values
func DefaultConfig() *Config {
return &Config{
Provider: ProviderOpenAI,
Timeout: 60000,
}
}
// New creates an LLM instance based on the config
func New(cfg *Config) (LLM, error) {
if cfg == nil {
cfg = DefaultConfig()
}
// Create HTTP client
client := createHTTPClient(cfg)
switch cfg.Provider {
case ProviderOpenAI, "":
return NewOpenAI(cfg, client)
case ProviderClaude:
return NewClaude(cfg, client)
case ProviderGemini:
return NewGemini(cfg, client)
case ProviderOllama:
// Ollama uses OpenAI-compatible API
if cfg.BaseURL == "" {
cfg.BaseURL = "http://localhost:11434/v1"
}
return NewOpenAI(cfg, client)
default:
return nil, fmt.Errorf("unsupported LLM provider: %s", cfg.Provider)
}
}
// createHTTPClient creates an HTTP client with the given config
func createHTTPClient(cfg *Config) *http.Client {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: cfg.SkipSSLVerify,
},
}
if cfg.Proxy != "" {
if proxyURL, err := url.Parse(cfg.Proxy); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
timeout := cfg.Timeout
if timeout <= 0 {
timeout = 60000
}
return &http.Client{
Timeout: time.Duration(timeout) * time.Millisecond,
Transport: transport,
}
}
// Helper function to convert internal messages to provider-specific format
func ConvertMessages(messages []Message) []Message {
result := make([]Message, len(messages))
copy(result, messages)
return result
}

416
aiagent/llm/openai.go Normal file
View File

@@ -0,0 +1,416 @@
package llm
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
const (
DefaultOpenAIURL = "https://api.openai.com/v1/chat/completions"
// 重试相关配置
maxRetries = 3
initialRetryWait = 5 * time.Second // rate limit 时初始等待 5 秒
maxRetryWait = 60 * time.Second // 最大等待 60 秒
)
// OpenAI implements the LLM interface for OpenAI and compatible APIs
type OpenAI struct {
config *Config
client *http.Client
}
// NewOpenAI creates a new OpenAI provider
func NewOpenAI(cfg *Config, client *http.Client) (*OpenAI, error) {
if cfg.BaseURL == "" {
cfg.BaseURL = DefaultOpenAIURL
}
return &OpenAI{
config: cfg,
client: client,
}, nil
}
func (o *OpenAI) Name() string {
return ProviderOpenAI
}
// OpenAI API request/response structures
type openAIRequest struct {
Model string `json:"model"`
Messages []openAIMessage `json:"messages"`
Tools []openAITool `json:"tools,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Stop []string `json:"stop,omitempty"`
Stream bool `json:"stream,omitempty"`
}
type openAIMessage struct {
Role string `json:"role"`
Content string `json:"content,omitempty"`
ToolCalls []openAIToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}
type openAITool struct {
Type string `json:"type"`
Function openAIFunction `json:"function"`
}
type openAIFunction struct {
Name string `json:"name"`
Description string `json:"description"`
Parameters map[string]interface{} `json:"parameters,omitempty"`
}
type openAIToolCall struct {
ID string `json:"id"`
Type string `json:"type"`
Function struct {
Name string `json:"name"`
Arguments string `json:"arguments"`
} `json:"function"`
}
type openAIResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []struct {
Index int `json:"index"`
Message openAIMessage `json:"message"`
Delta openAIMessage `json:"delta"`
FinishReason string `json:"finish_reason"`
} `json:"choices"`
Usage *struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
} `json:"usage,omitempty"`
Error *struct {
Message string `json:"message"`
Type string `json:"type"`
Code string `json:"code"`
} `json:"error,omitempty"`
}
func (o *OpenAI) Generate(ctx context.Context, req *GenerateRequest) (*GenerateResponse, error) {
// Convert to OpenAI format
openAIReq := o.convertRequest(req)
openAIReq.Stream = false
// Make request
respBody, err := o.doRequest(ctx, openAIReq)
if err != nil {
return nil, err
}
// Parse response
var openAIResp openAIResponse
if err := json.Unmarshal(respBody, &openAIResp); err != nil {
return nil, fmt.Errorf("failed to parse response: %w", err)
}
if openAIResp.Error != nil {
return nil, fmt.Errorf("OpenAI API error: %s", openAIResp.Error.Message)
}
if len(openAIResp.Choices) == 0 {
return nil, fmt.Errorf("no response from OpenAI")
}
// Convert to unified response
return o.convertResponse(&openAIResp), nil
}
// isRetryableStatus 检查是否是可重试的 HTTP 状态码
func isRetryableStatus(statusCode int) bool {
switch statusCode {
case 429, 500, 502, 503, 504:
return true
default:
return false
}
}
func (o *OpenAI) GenerateStream(ctx context.Context, req *GenerateRequest) (<-chan StreamChunk, error) {
// Convert to OpenAI format
openAIReq := o.convertRequest(req)
openAIReq.Stream = true
// Create request body
jsonData, err := json.Marshal(openAIReq)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
var resp *http.Response
var lastErr error
retryWait := initialRetryWait
// 重试循环
for attempt := 0; attempt <= maxRetries; attempt++ {
if attempt > 0 {
// 等待后重试
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(retryWait):
}
// 指数退避,但不超过最大等待时间
retryWait *= 2
if retryWait > maxRetryWait {
retryWait = maxRetryWait
}
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", o.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
o.setHeaders(httpReq)
// Make request
resp, err = o.client.Do(httpReq)
if err != nil {
lastErr = fmt.Errorf("failed to send request: %w", err)
continue // 网络错误,重试
}
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
lastErr = fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body))
// 检查是否可重试
if isRetryableStatus(resp.StatusCode) && attempt < maxRetries {
continue // 可重试的错误,继续重试
}
return nil, lastErr
}
// 成功,跳出循环
break
}
if resp == nil {
return nil, lastErr
}
// Create channel and start streaming
ch := make(chan StreamChunk, 100)
go o.streamResponse(ctx, resp, ch)
return ch, nil
}
func (o *OpenAI) streamResponse(ctx context.Context, resp *http.Response, ch chan<- StreamChunk) {
defer close(ch)
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
for {
select {
case <-ctx.Done():
ch <- StreamChunk{Done: true, Error: ctx.Err()}
return
default:
}
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
ch <- StreamChunk{Done: true, Error: err}
} else {
ch <- StreamChunk{Done: true}
}
return
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
ch <- StreamChunk{Done: true}
return
}
var streamResp openAIResponse
if err := json.Unmarshal([]byte(data), &streamResp); err != nil {
continue
}
if len(streamResp.Choices) > 0 {
delta := streamResp.Choices[0].Delta
chunk := StreamChunk{
Content: delta.Content,
FinishReason: streamResp.Choices[0].FinishReason,
}
// Handle tool calls in stream
if len(delta.ToolCalls) > 0 {
for _, tc := range delta.ToolCalls {
chunk.ToolCalls = append(chunk.ToolCalls, ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: tc.Function.Arguments,
})
}
}
ch <- chunk
}
}
}
func (o *OpenAI) convertRequest(req *GenerateRequest) *openAIRequest {
openAIReq := &openAIRequest{
Model: o.config.Model,
MaxTokens: req.MaxTokens,
Temperature: req.Temperature,
TopP: req.TopP,
Stop: req.Stop,
}
// Convert messages
for _, msg := range req.Messages {
openAIReq.Messages = append(openAIReq.Messages, openAIMessage{
Role: msg.Role,
Content: msg.Content,
})
}
// Convert tools
for _, tool := range req.Tools {
openAIReq.Tools = append(openAIReq.Tools, openAITool{
Type: "function",
Function: openAIFunction{
Name: tool.Name,
Description: tool.Description,
Parameters: tool.Parameters,
},
})
}
return openAIReq
}
func (o *OpenAI) convertResponse(resp *openAIResponse) *GenerateResponse {
result := &GenerateResponse{}
if len(resp.Choices) > 0 {
choice := resp.Choices[0]
result.Content = choice.Message.Content
result.FinishReason = choice.FinishReason
// Convert tool calls
for _, tc := range choice.Message.ToolCalls {
result.ToolCalls = append(result.ToolCalls, ToolCall{
ID: tc.ID,
Name: tc.Function.Name,
Arguments: tc.Function.Arguments,
})
}
}
if resp.Usage != nil {
result.Usage = &Usage{
PromptTokens: resp.Usage.PromptTokens,
CompletionTokens: resp.Usage.CompletionTokens,
TotalTokens: resp.Usage.TotalTokens,
}
}
return result
}
func (o *OpenAI) doRequest(ctx context.Context, req *openAIRequest) ([]byte, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
var lastErr error
retryWait := initialRetryWait
// 重试循环
for attempt := 0; attempt <= maxRetries; attempt++ {
if attempt > 0 {
// 等待后重试
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(retryWait):
}
// 指数退避
retryWait *= 2
if retryWait > maxRetryWait {
retryWait = maxRetryWait
}
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", o.config.BaseURL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
o.setHeaders(httpReq)
resp, err := o.client.Do(httpReq)
if err != nil {
lastErr = fmt.Errorf("failed to send request: %w", err)
continue // 网络错误,重试
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
lastErr = fmt.Errorf("failed to read response: %w", err)
continue
}
if resp.StatusCode >= 400 {
lastErr = fmt.Errorf("OpenAI API error (status %d): %s", resp.StatusCode, string(body))
// 检查是否可重试
if isRetryableStatus(resp.StatusCode) && attempt < maxRetries {
continue
}
return nil, lastErr
}
return body, nil
}
return nil, lastErr
}
func (o *OpenAI) setHeaders(req *http.Request) {
req.Header.Set("Content-Type", "application/json")
if o.config.APIKey != "" {
req.Header.Set("Authorization", "Bearer "+o.config.APIKey)
}
for k, v := range o.config.Headers {
req.Header.Set(k, v)
}
}

133
aiagent/llm/prompt.go Normal file
View File

@@ -0,0 +1,133 @@
package llm
import (
"fmt"
"runtime"
"strings"
"time"
)
// ToolInfo 工具信息(用于提示词构建)
type ToolInfo struct {
Name string
Description string
Parameters []ToolParamInfo
}
// ToolParamInfo 工具参数信息
type ToolParamInfo struct {
Name string
Type string
Description string
Required bool
}
// PromptData 提示词模板数据
type PromptData struct {
Platform string // 操作系统
Date string // 当前日期
}
// BuildToolsSection 构建工具描述段落
func BuildToolsSection(tools []ToolInfo) string {
if len(tools) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Available Tools\n\n")
for _, tool := range tools {
sb.WriteString(fmt.Sprintf("### %s\n", tool.Name))
sb.WriteString(fmt.Sprintf("%s\n", tool.Description))
if len(tool.Parameters) > 0 {
sb.WriteString("Parameters:\n")
for _, param := range tool.Parameters {
required := ""
if param.Required {
required = " (required)"
}
sb.WriteString(fmt.Sprintf("- %s (%s)%s: %s\n", param.Name, param.Type, required, param.Description))
}
}
sb.WriteString("\n")
}
return sb.String()
}
// BuildToolsListBrief 构建简洁的工具列表(用于 Plan 模式)
func BuildToolsListBrief(tools []ToolInfo) string {
if len(tools) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Available Tools\n\n")
for _, tool := range tools {
sb.WriteString(fmt.Sprintf("- **%s**: %s\n", tool.Name, tool.Description))
}
return sb.String()
}
// BuildEnvSection 构建环境信息段落
func BuildEnvSection() string {
var sb strings.Builder
sb.WriteString("## Environment\n\n")
sb.WriteString(fmt.Sprintf("- Platform: %s\n", runtime.GOOS))
sb.WriteString(fmt.Sprintf("- Date: %s\n", time.Now().Format("2006-01-02")))
return sb.String()
}
// BuildSkillsSection 构建技能指导段落
func BuildSkillsSection(skillContents []string) string {
if len(skillContents) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## 专项技能指导\n\n")
if len(skillContents) == 1 {
sb.WriteString("你已被加载以下专项技能,请参考技能中的流程:\n\n")
sb.WriteString(skillContents[0])
sb.WriteString("\n\n")
} else {
sb.WriteString("你已被加载以下专项技能,请参考技能中的流程来制定执行计划:\n\n")
for i, content := range skillContents {
sb.WriteString(fmt.Sprintf("### 技能 %d\n\n", i+1))
sb.WriteString(content)
sb.WriteString("\n\n")
}
}
return sb.String()
}
// BuildPreviousFindingsSection 构建之前发现段落
func BuildPreviousFindingsSection(findings []string) string {
if len(findings) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString("## Previous Findings\n\n")
for _, finding := range findings {
sb.WriteString(fmt.Sprintf("- %s\n", finding))
}
sb.WriteString("\n")
return sb.String()
}
// BuildCurrentStepSection 构建当前步骤段落
func BuildCurrentStepSection(goal, approach string) string {
var sb strings.Builder
sb.WriteString("## Current Step\n\n")
sb.WriteString(fmt.Sprintf("**Goal**: %s\n", goal))
sb.WriteString(fmt.Sprintf("**Approach**: %s\n\n", approach))
return sb.String()
}

571
aiagent/mcp.go Normal file
View File

@@ -0,0 +1,571 @@
package aiagent
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/modelcontextprotocol/go-sdk/mcp"
"github.com/toolkits/pkg/logger"
)
const (
// MCP 传输类型
MCPTransportStdio = "stdio" // 标准输入/输出传输
MCPTransportSSE = "sse" // HTTP Server-Sent Events 传输
// 默认超时
DefaultMCPTimeout = 30000 // 30 秒
DefaultMCPConnectTimeout = 10000 // 10 秒
)
// MCPConfig MCP 服务器配置(在 AIAgentConfig 中使用)
type MCPConfig struct {
// MCP 服务器列表
Servers []MCPServerConfig `json:"servers"`
}
// MCPServerConfig 单个 MCP 服务器配置
type MCPServerConfig struct {
// 服务器名称(唯一标识)
Name string `json:"name"`
// 传输类型stdio 或 sse
Transport string `json:"transport"`
// === stdio 传输配置 ===
Command string `json:"command,omitempty"` // 启动命令
Args []string `json:"args,omitempty"` // 命令参数
Env map[string]string `json:"env,omitempty"` // 环境变量(支持 ${VAR} 从系统环境变量读取)
// === SSE 传输配置 ===
URL string `json:"url,omitempty"` // SSE 服务器 URL
Headers map[string]string `json:"headers,omitempty"` // 请求头(支持 ${VAR} 从系统环境变量读取)
SkipSSLVerify bool `json:"skip_ssl_verify,omitempty"` // 跳过 SSL 验证
// === 鉴权配置SSE 传输)===
// 便捷鉴权配置,会自动设置对应的 Header
AuthType string `json:"auth_type,omitempty"` // 鉴权类型bearer, api_key, basic
APIKey string `json:"api_key,omitempty"` // API Key支持 ${VAR} 从系统环境变量读取)
Username string `json:"username,omitempty"` // Basic Auth 用户名
Password string `json:"password,omitempty"` // Basic Auth 密码(支持 ${VAR}
// 通用配置
Timeout int `json:"timeout,omitempty"` // 工具调用超时(毫秒)
ConnectTimeout int `json:"connect_timeout,omitempty"` // 连接超时(毫秒)
}
// MCPToolConfig MCP 工具配置(在 AgentTool 中使用)
type MCPToolConfig struct {
// MCP 服务器名称(引用 MCPConfig.Servers 中的配置)
ServerName string `json:"server_name"`
// 工具名称MCP 服务器返回的工具名)
ToolName string `json:"tool_name"`
}
// MCPTool MCP 工具定义(用于内部表示)
type MCPTool struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
InputSchema map[string]interface{} `json:"inputSchema,omitempty"`
}
// MCPToolsCallResult 工具调用结果
type MCPToolsCallResult struct {
Content []MCPContent `json:"content"`
IsError bool `json:"isError,omitempty"`
}
// MCPContent 工具返回内容
type MCPContent struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
Data string `json:"data,omitempty"`
MimeType string `json:"mimeType,omitempty"`
}
// MCPClient MCP 客户端(基于官方 go-sdk
type MCPClient struct {
config *MCPServerConfig
// SDK 客户端和会话stdio 传输)
client *mcp.Client
session *mcp.ClientSession
// SSE 传输SDK 暂不支持 SSE 客户端,保留自定义实现)
httpClient *http.Client
sseURL string
// 通用
mu sync.Mutex
initialized bool
tools []MCPTool // 缓存的工具列表
}
// expandEnvVars 展开字符串中的环境变量引用
func expandEnvVars(s string) string {
return os.ExpandEnv(s)
}
// NewMCPClient 创建 MCP 客户端
func NewMCPClient(config *MCPServerConfig) (*MCPClient, error) {
client := &MCPClient{
config: config,
}
return client, nil
}
// Connect 连接到 MCP 服务器
func (c *MCPClient) Connect(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.initialized {
return nil
}
var err error
switch c.config.Transport {
case MCPTransportStdio:
err = c.connectStdio(ctx)
case MCPTransportSSE:
err = c.connectSSE(ctx)
default:
return fmt.Errorf("unsupported MCP transport: %s", c.config.Transport)
}
if err != nil {
return err
}
c.initialized = true
return nil
}
// connectStdio 通过 stdio 连接(使用官方 SDK
func (c *MCPClient) connectStdio(ctx context.Context) error {
if c.config.Command == "" {
return fmt.Errorf("stdio transport requires command")
}
// 准备环境变量
env := os.Environ()
for k, v := range c.config.Env {
expandedValue := expandEnvVars(v)
env = append(env, fmt.Sprintf("%s=%s", k, expandedValue))
}
// 创建 exec.Cmd
cmd := exec.CommandContext(ctx, c.config.Command, c.config.Args...)
cmd.Env = env
// 使用官方 SDK 的 CommandTransport
transport := &mcp.CommandTransport{
Command: cmd,
}
// 创建 MCP 客户端
c.client = mcp.NewClient(
&mcp.Implementation{
Name: "nightingale-aiagent",
Version: "1.0.0",
},
nil,
)
// 连接并初始化Connect 会自动进行 initialize 握手)
session, err := c.client.Connect(ctx, transport, nil)
if err != nil {
return fmt.Errorf("failed to connect MCP client: %v", err)
}
c.session = session
logger.Infof("MCP stdio server started: %s", c.config.Name)
return nil
}
// connectSSE 通过 SSE 连接保留自定义实现SDK 暂不支持 SSE 客户端)
func (c *MCPClient) connectSSE(ctx context.Context) error {
if c.config.URL == "" {
return fmt.Errorf("SSE transport requires URL")
}
// 创建 HTTP 客户端
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.config.SkipSSLVerify},
}
timeout := c.config.ConnectTimeout
if timeout <= 0 {
timeout = DefaultMCPConnectTimeout
}
c.httpClient = &http.Client{
Timeout: time.Duration(timeout) * time.Millisecond,
Transport: transport,
}
c.sseURL = c.config.URL
logger.Infof("MCP SSE client configured: %s", c.config.Name)
return nil
}
// ListTools 获取工具列表
func (c *MCPClient) ListTools(ctx context.Context) ([]MCPTool, error) {
c.mu.Lock()
if len(c.tools) > 0 {
tools := c.tools
c.mu.Unlock()
return tools, nil
}
c.mu.Unlock()
var tools []MCPTool
switch c.config.Transport {
case MCPTransportStdio:
// 使用官方 SDK
if c.session == nil {
return nil, fmt.Errorf("MCP session not initialized")
}
result, err := c.session.ListTools(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to list tools: %v", err)
}
// 转换为内部格式
for _, tool := range result.Tools {
inputSchema := make(map[string]interface{})
if tool.InputSchema != nil {
// 将 SDK 的 InputSchema 转换为 map
schemaBytes, err := json.Marshal(tool.InputSchema)
if err == nil {
json.Unmarshal(schemaBytes, &inputSchema)
}
}
tools = append(tools, MCPTool{
Name: tool.Name,
Description: tool.Description,
InputSchema: inputSchema,
})
}
case MCPTransportSSE:
// 使用自定义 HTTP 实现
var err error
tools, err = c.listToolsSSE(ctx)
if err != nil {
return nil, err
}
}
c.mu.Lock()
c.tools = tools
c.mu.Unlock()
return tools, nil
}
// listToolsSSE 通过 SSE 获取工具列表
func (c *MCPClient) listToolsSSE(ctx context.Context) ([]MCPTool, error) {
req := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/list",
}
resp, err := c.sendSSERequest(ctx, req)
if err != nil {
return nil, err
}
resultBytes, _ := json.Marshal(resp["result"])
var result struct {
Tools []MCPTool `json:"tools"`
}
if err := json.Unmarshal(resultBytes, &result); err != nil {
return nil, fmt.Errorf("failed to parse tools list: %v", err)
}
return result.Tools, nil
}
// CallTool 调用工具
func (c *MCPClient) CallTool(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
switch c.config.Transport {
case MCPTransportStdio:
return c.callToolStdio(ctx, name, arguments)
case MCPTransportSSE:
return c.callToolSSE(ctx, name, arguments)
default:
return nil, fmt.Errorf("unsupported transport: %s", c.config.Transport)
}
}
// callToolStdio 通过 stdio 调用工具(使用官方 SDK
func (c *MCPClient) callToolStdio(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
if c.session == nil {
return nil, fmt.Errorf("MCP session not initialized")
}
// 调用工具
result, err := c.session.CallTool(ctx, &mcp.CallToolParams{
Name: name,
Arguments: arguments,
})
if err != nil {
return nil, fmt.Errorf("tool call failed: %v", err)
}
// 转换结果
mcpResult := &MCPToolsCallResult{
IsError: result.IsError,
}
for _, content := range result.Content {
mc := MCPContent{}
// 根据具体类型提取内容
switch c := content.(type) {
case *mcp.TextContent:
mc.Type = "text"
mc.Text = c.Text
case *mcp.ImageContent:
mc.Type = "image"
mc.Data = string(c.Data)
mc.MimeType = c.MIMEType
case *mcp.AudioContent:
mc.Type = "audio"
mc.Data = string(c.Data)
mc.MimeType = c.MIMEType
case *mcp.EmbeddedResource:
mc.Type = "resource"
if c.Resource != nil {
if c.Resource.Text != "" {
mc.Text = c.Resource.Text
} else if c.Resource.Blob != nil {
mc.Data = string(c.Resource.Blob)
}
mc.MimeType = c.Resource.MIMEType
}
case *mcp.ResourceLink:
mc.Type = "resource_link"
mc.Text = c.URI
default:
// 尝试通过 JSON 序列化获取内容
if data, err := json.Marshal(content); err == nil {
mc.Type = "unknown"
mc.Text = string(data)
}
}
mcpResult.Content = append(mcpResult.Content, mc)
}
return mcpResult, nil
}
// callToolSSE 通过 SSE 调用工具
func (c *MCPClient) callToolSSE(ctx context.Context, name string, arguments map[string]interface{}) (*MCPToolsCallResult, error) {
req := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": map[string]interface{}{
"name": name,
"arguments": arguments,
},
}
resp, err := c.sendSSERequest(ctx, req)
if err != nil {
return nil, err
}
if errObj, ok := resp["error"].(map[string]interface{}); ok {
return nil, fmt.Errorf("MCP error: %v", errObj["message"])
}
resultBytes, _ := json.Marshal(resp["result"])
var result MCPToolsCallResult
if err := json.Unmarshal(resultBytes, &result); err != nil {
return nil, fmt.Errorf("failed to parse tool call result: %v", err)
}
return &result, nil
}
// setAuthHeaders 设置鉴权请求头
func (c *MCPClient) setAuthHeaders(req *http.Request) {
cfg := c.config
if cfg.AuthType == "" && cfg.APIKey == "" {
return
}
apiKey := expandEnvVars(cfg.APIKey)
username := expandEnvVars(cfg.Username)
password := expandEnvVars(cfg.Password)
switch strings.ToLower(cfg.AuthType) {
case "bearer":
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
case "api_key", "apikey":
if apiKey != "" {
req.Header.Set("X-API-Key", apiKey)
}
case "basic":
if username != "" {
req.SetBasicAuth(username, password)
}
default:
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
}
}
// sendSSERequest 通过 HTTP 发送请求
func (c *MCPClient) sendSSERequest(ctx context.Context, req map[string]interface{}) (map[string]interface{}, error) {
baseURL := c.sseURL
if !strings.HasSuffix(baseURL, "/") {
baseURL += "/"
}
postURL := baseURL + "message"
if _, err := url.Parse(postURL); err != nil {
return nil, fmt.Errorf("invalid URL: %v", err)
}
data, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %v", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", postURL, bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
}
httpReq.Header.Set("Content-Type", "application/json")
c.setAuthHeaders(httpReq)
for k, v := range c.config.Headers {
httpReq.Header.Set(k, expandEnvVars(v))
}
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("HTTP request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("HTTP error %d: %s", resp.StatusCode, string(body))
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %v", err)
}
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
return nil, fmt.Errorf("failed to parse response: %v", err)
}
return result, nil
}
// Close 关闭连接
func (c *MCPClient) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.session != nil {
c.session.Close()
c.session = nil
}
c.client = nil
c.initialized = false
logger.Infof("MCP client closed: %s", c.config.Name)
return nil
}
// MCPClientManager MCP 客户端管理器
type MCPClientManager struct {
clients map[string]*MCPClient
mu sync.RWMutex
}
// NewMCPClientManager 创建 MCP 客户端管理器
func NewMCPClientManager() *MCPClientManager {
return &MCPClientManager{
clients: make(map[string]*MCPClient),
}
}
// GetOrCreateClient 获取或创建 MCP 客户端
func (m *MCPClientManager) GetOrCreateClient(ctx context.Context, config *MCPServerConfig) (*MCPClient, error) {
m.mu.RLock()
client, ok := m.clients[config.Name]
m.mu.RUnlock()
if ok {
return client, nil
}
m.mu.Lock()
defer m.mu.Unlock()
// 再次检查double-check locking
if client, ok := m.clients[config.Name]; ok {
return client, nil
}
// 创建新客户端
client, err := NewMCPClient(config)
if err != nil {
return nil, err
}
// 连接
if err := client.Connect(ctx); err != nil {
return nil, err
}
m.clients[config.Name] = client
return client, nil
}
// CloseAll 关闭所有客户端
func (m *MCPClientManager) CloseAll() {
m.mu.Lock()
defer m.mu.Unlock()
for name, client := range m.clients {
if err := client.Close(); err != nil {
logger.Warningf("Failed to close MCP client %s: %v", name, err)
}
}
m.clients = make(map[string]*MCPClient)
}

30
aiagent/prompts/embed.go Normal file
View File

@@ -0,0 +1,30 @@
package prompts
import (
_ "embed"
)
// ReAct 模式系统提示词
//
//go:embed react_system.md
var ReactSystemPrompt string
// Plan+ReAct 模式规划阶段系统提示词
//
//go:embed plan_system.md
var PlanSystemPrompt string
// 步骤执行提示词
//
//go:embed step_execution.md
var StepExecutionPrompt string
// 综合分析提示词
//
//go:embed synthesis.md
var SynthesisPrompt string
// 用户提示词默认模板
//
//go:embed user_default.md
var UserDefaultTemplate string

View File

@@ -0,0 +1,65 @@
You are an intelligent AI Agent capable of analyzing tasks, creating execution plans, and solving complex problems.
Your role is to understand user requests and create structured, actionable execution plans.
## Core Capabilities
- **Alert Analysis**: Analyze alerts, investigate root causes, correlate events
- **Data Analysis**: Analyze batch data, identify patterns, generate insights
- **SQL Generation**: Convert natural language to SQL queries
- **General Problem Solving**: Break down complex tasks into actionable steps
## Planning Principles
1. **Understand First**: Carefully analyze what the user is asking for
2. **Identify Key Areas**: Determine which domains, systems, or aspects are involved
3. **Create Logical Steps**: Order steps by priority or logical sequence
4. **Be Specific**: Each step should have a clear goal and concrete approach
5. **Reference Tools**: Consider available tools when designing your approach
## Response Format
You must respond in the following JSON format:
```json
{
"task_summary": "Brief summary of the input/request",
"goal": "The overall goal of this task",
"focus_areas": ["area1", "area2", "area3"],
"steps": [
{
"step_number": 1,
"goal": "What to accomplish in this step",
"approach": "How to accomplish it (which tools/methods to use)"
},
{
"step_number": 2,
"goal": "...",
"approach": "..."
}
]
}
```
## Focus Areas by Task Type
**Alert/Incident Analysis:**
- Network: latency, packet loss, DNS resolution
- Database: query performance, connections, locks, replication
- Application: error rates, response times, resource usage
- Infrastructure: CPU, memory, disk I/O, network throughput
**Batch Alert Analysis:**
- Pattern recognition: common labels, time correlation
- Aggregation: group by severity, source, category
- Trend analysis: frequency, escalation patterns
**SQL Generation:**
- Schema understanding: tables, columns, relationships
- Query optimization: indexes, join strategies
- Data validation: constraints, data types
**General Analysis:**
- Data collection: gather relevant information
- Processing: transform, filter, aggregate
- Output: format results appropriately

View File

@@ -0,0 +1,42 @@
You are an intelligent AI Agent capable of analyzing tasks, creating execution plans, and solving complex problems.
Your capabilities include but are not limited to:
- **Root Cause Analysis**: Analyze alerts, investigate incidents, identify root causes
- **Data Analysis**: Query and analyze metrics, logs, traces, and other data sources
- **SQL Generation**: Convert natural language queries to SQL statements
- **Information Synthesis**: Summarize and extract insights from complex data
- **Content Generation**: Generate titles, summaries, and structured reports
## Core Principles
1. **Systematic Analysis**: Gather sufficient information before making conclusions
2. **Evidence-Based**: Support conclusions with specific data from tool outputs
3. **Tool Efficiency**: Use tools wisely, avoid redundant calls
4. **Clear Communication**: Keep responses focused and actionable
5. **Adaptability**: Adjust your approach based on the task type
## Response Format
You must respond in the following format:
```
Thought: [Your reasoning about the current situation and what to do next]
Action: [The tool name to use, or 'Final Answer' if you have enough information]
Action Input: [The input to the action - for tools, provide JSON parameters; for Final Answer, provide your result]
```
## Task Guidelines
1. **Understand the request**: Carefully analyze what the user is asking for
2. **Choose appropriate tools**: Select tools that best fit the task requirements
3. **Iterate as needed**: Gather additional information if initial results are insufficient
4. **Validate results**: Verify your conclusions before providing the final answer
5. **Be concise**: Provide clear, well-structured responses
## Final Answer Requirements
Your Final Answer should:
- Directly address the user's request
- Be well-structured and easy to understand
- Include supporting evidence or reasoning when applicable
- Provide actionable recommendations if relevant

View File

@@ -0,0 +1,35 @@
You are an intelligent AI Agent executing a specific step as part of a larger execution plan.
## Your Task
Focus on completing the current step efficiently and thoroughly. Use the available tools to gather information, process data, or generate results as needed to achieve the step's goal.
## Response Format
Respond in this format:
```
Thought: [Your reasoning about what to do for this step]
Action: [Tool name or 'Step Complete' when done]
Action Input: [Tool parameters as JSON, or step summary for 'Step Complete']
```
## Step Execution Guidelines
1. **Stay Focused**: Only work on the current step's goal
2. **Be Thorough**: Gather enough information to achieve the goal
3. **Document Progress**: Note important findings in your thoughts
4. **Know When to Stop**: Complete the step when you have sufficient results
5. **Handle Failures**: If a tool fails, try alternatives or note the limitation
## When to Mark Step Complete
Mark the step as complete when:
- You have achieved the step's goal
- You have gathered sufficient information or generated the required output
- Further work would be outside the step's scope
Your step summary should include:
- Key results or findings relevant to the step's goal
- Tools used and their outputs
- Any limitations or issues encountered

View File

@@ -0,0 +1,37 @@
You are an intelligent AI Agent synthesizing results from multiple execution steps into a comprehensive final output.
## Your Task
Review all the results from the completed steps and provide a unified, well-structured response that addresses the original request.
## Response Guidelines
Based on the task type, structure your response appropriately:
**For Root Cause Analysis:**
- Summary of the root cause
- Supporting evidence from investigation
- Impact assessment
- Recommended actions
**For Data Analysis / SQL Generation:**
- Query results or generated SQL
- Key insights from the data
- Any caveats or limitations
**For Information Synthesis:**
- Structured summary of findings
- Key insights and patterns
- Relevant conclusions
**For Content Generation:**
- Generated content (title, summary, etc.)
- Alternative options if applicable
## Synthesis Principles
1. **Integrate Results**: Combine findings from all completed steps coherently
2. **Prioritize Relevance**: Focus on the most important information
3. **Be Structured**: Organize output in a clear, logical format
4. **Be Concise**: Avoid unnecessary verbosity while ensuring completeness
5. **Address the Request**: Ensure the final output directly answers the original task

View File

@@ -0,0 +1,7 @@
## Alert Information
{{.AlertContent}}
## Analysis Request
Please analyze this alert and identify the root cause. Provide evidence-based conclusions and actionable recommendations.

573
aiagent/skill.go Normal file
View File

@@ -0,0 +1,573 @@
package aiagent
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/toolkits/pkg/logger"
"gopkg.in/yaml.v3"
)
const (
// SkillFileName 技能主文件名
SkillFileName = "SKILL.md"
// SkillToolsDir 技能工具目录名
SkillToolsDir = "skill_tools"
// 默认配置
DefaultMaxSkills = 2
)
// SkillConfig 技能配置(在 AIAgentConfig 中使用)
// 技能目录路径通过全局配置 Plus.AIAgentSkillsPath 设置
type SkillConfig struct {
// 技能选择配置优先级SkillNames > LLM 选择 > DefaultSkills
AutoSelect bool `json:"auto_select,omitempty"` // 是否让 LLM 自动选择技能(默认 true
SkillNames []string `json:"skill_names,omitempty"` // 直接指定技能名列表(手动模式)
MaxSkills int `json:"max_skills,omitempty"` // LLM 最多选择几个技能(默认 2
DefaultSkills []string `json:"default_skills,omitempty"` // 默认技能列表LLM 无法选择时使用)
}
// SkillMetadata 技能元数据Level 1 - 总是在内存中)
type SkillMetadata struct {
// 核心字段(与 Anthropic 官方一致)
Name string `yaml:"name" json:"name"`
Description string `yaml:"description" json:"description"`
// 可选扩展字段
RecommendedTools []string `yaml:"recommended_tools,omitempty" json:"recommended_tools,omitempty"`
BuiltinTools []string `yaml:"builtin_tools,omitempty" json:"builtin_tools,omitempty"` // 内置工具列表
// 内部字段
Path string `json:"-"` // 技能目录路径
LoadedAt time.Time `json:"-"` // 加载时间
}
// SkillContent 技能内容Level 2 - 匹配时加载)
type SkillContent struct {
Metadata *SkillMetadata `json:"metadata"`
MainContent string `json:"main_content"` // SKILL.md 正文
}
// SkillTool Skill 专用工具Level 3 - 按需加载)
type SkillTool struct {
Name string `yaml:"name" json:"name"` // 工具名称
Type string `yaml:"type" json:"type"` // 处理器类型annotation_qd, script, callback 等
Description string `yaml:"description" json:"description"` // 工具描述
Config map[string]interface{} `yaml:"config" json:"config"` // 处理器配置
// 参数定义(可选)
Parameters []ToolParameter `yaml:"parameters,omitempty" json:"parameters,omitempty"`
}
// SkillResources 技能扩展资源Level 3 - 按需加载)
type SkillResources struct {
SkillTools map[string]*SkillTool `json:"skill_tools"` // 工具名 -> 工具定义
References map[string]string `json:"references"` // 引用文件内容
}
// SkillRegistry 技能注册表
type SkillRegistry struct {
skillsPath string // 技能目录路径
skills map[string]*SkillMetadata // name -> metadata
contentCache map[string]*SkillContent // name -> content (LRU cache)
toolsCache map[string]map[string]*SkillTool // skillName -> toolName -> tool
mu sync.RWMutex
}
// NewSkillRegistry 创建技能注册表
func NewSkillRegistry(skillsPath string) *SkillRegistry {
registry := &SkillRegistry{
skillsPath: skillsPath,
skills: make(map[string]*SkillMetadata),
contentCache: make(map[string]*SkillContent),
toolsCache: make(map[string]map[string]*SkillTool),
}
// 初始加载所有技能元数据
if err := registry.loadAllMetadata(); err != nil {
logger.Warningf("Failed to load skill metadata: %v", err)
}
return registry
}
// loadAllMetadata 加载所有技能的元数据Level 1
func (r *SkillRegistry) loadAllMetadata() error {
if r.skillsPath == "" {
return nil
}
// 检查目录是否存在
if _, err := os.Stat(r.skillsPath); os.IsNotExist(err) {
logger.Debugf("Skills directory does not exist: %s", r.skillsPath)
return nil
}
// 遍历技能目录
entries, err := os.ReadDir(r.skillsPath)
if err != nil {
return fmt.Errorf("failed to read skills directory: %v", err)
}
r.mu.Lock()
defer r.mu.Unlock()
for _, entry := range entries {
if !entry.IsDir() {
continue
}
skillPath := filepath.Join(r.skillsPath, entry.Name())
skillFile := filepath.Join(skillPath, SkillFileName)
// 检查 SKILL.md 是否存在
if _, err := os.Stat(skillFile); os.IsNotExist(err) {
continue
}
// 加载元数据
metadata, err := r.loadMetadataFromFile(skillFile)
if err != nil {
logger.Warningf("Failed to load skill metadata from %s: %v", skillFile, err)
continue
}
metadata.Path = skillPath
metadata.LoadedAt = time.Now()
r.skills[metadata.Name] = metadata
logger.Debugf("Loaded skill metadata: %s from %s", metadata.Name, skillPath)
}
logger.Infof("Loaded %d skills from %s", len(r.skills), r.skillsPath)
return nil
}
// 从 SKILL.md 文件加载元数据
func (r *SkillRegistry) loadMetadataFromFile(filePath string) (*SkillMetadata, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
// 解析 YAML frontmatter
scanner := bufio.NewScanner(file)
var inFrontmatter bool
var frontmatterLines []string
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "---" {
if !inFrontmatter {
inFrontmatter = true
continue
} else {
// frontmatter 结束
break
}
}
if inFrontmatter {
frontmatterLines = append(frontmatterLines, line)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan file: %v", err)
}
if len(frontmatterLines) == 0 {
return nil, fmt.Errorf("no frontmatter found in %s", filePath)
}
// 解析 YAML
frontmatter := strings.Join(frontmatterLines, "\n")
var metadata SkillMetadata
if err := yaml.Unmarshal([]byte(frontmatter), &metadata); err != nil {
return nil, fmt.Errorf("failed to parse frontmatter: %v", err)
}
if metadata.Name == "" {
return nil, fmt.Errorf("skill name is required in frontmatter")
}
return &metadata, nil
}
// GetByName 根据名称获取技能元数据
func (r *SkillRegistry) GetByName(name string) *SkillMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
return r.skills[name]
}
// ListAll 列出所有技能元数据
func (r *SkillRegistry) ListAll() []*SkillMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
result := make([]*SkillMetadata, 0, len(r.skills))
for _, metadata := range r.skills {
result = append(result, metadata)
}
return result
}
// LoadContent 加载技能内容Level 2
func (r *SkillRegistry) LoadContent(metadata *SkillMetadata) (*SkillContent, error) {
if metadata == nil {
return nil, fmt.Errorf("metadata is nil")
}
// 检查缓存
r.mu.RLock()
if cached, ok := r.contentCache[metadata.Name]; ok {
r.mu.RUnlock()
return cached, nil
}
r.mu.RUnlock()
// 加载内容
skillFile := filepath.Join(metadata.Path, SkillFileName)
content, err := r.loadContentFromFile(skillFile)
if err != nil {
return nil, err
}
skillContent := &SkillContent{
Metadata: metadata,
MainContent: content,
}
// 缓存
r.mu.Lock()
r.contentCache[metadata.Name] = skillContent
r.mu.Unlock()
return skillContent, nil
}
// loadContentFromFile 从 SKILL.md 文件加载正文内容
func (r *SkillRegistry) loadContentFromFile(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
var inFrontmatter bool
var frontmatterEnded bool
var contentLines []string
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) == "---" {
if !inFrontmatter {
inFrontmatter = true
continue
} else {
frontmatterEnded = true
continue
}
}
if frontmatterEnded {
contentLines = append(contentLines, line)
}
}
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("failed to scan file: %v", err)
}
return strings.TrimSpace(strings.Join(contentLines, "\n")), nil
}
// LoadSkillTool 加载单个 skill_toolLevel 3 - 完整配置)
func (r *SkillRegistry) LoadSkillTool(skillName, toolName string) (*SkillTool, error) {
// 检查缓存
r.mu.RLock()
if skillTools, ok := r.toolsCache[skillName]; ok {
if tool, ok := skillTools[toolName]; ok {
r.mu.RUnlock()
return tool, nil
}
}
r.mu.RUnlock()
// 获取技能元数据
metadata := r.GetByName(skillName)
if metadata == nil {
return nil, fmt.Errorf("skill '%s' not found", skillName)
}
// 加载工具
toolFile := filepath.Join(metadata.Path, SkillToolsDir, toolName+".yaml")
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
return nil, err
}
// 缓存
r.mu.Lock()
if r.toolsCache[skillName] == nil {
r.toolsCache[skillName] = make(map[string]*SkillTool)
}
r.toolsCache[skillName][toolName] = tool
r.mu.Unlock()
return tool, nil
}
// LoadSkillToolDescription 加载 skill_tool 的描述信息(轻量级,只读取 name 和 description
func (r *SkillRegistry) LoadSkillToolDescription(skillName, toolName string) (string, error) {
// 检查缓存 - 如果已经加载了完整工具,直接返回 description
r.mu.RLock()
if skillTools, ok := r.toolsCache[skillName]; ok {
if tool, ok := skillTools[toolName]; ok {
r.mu.RUnlock()
return tool.Description, nil
}
}
r.mu.RUnlock()
// 获取技能元数据
metadata := r.GetByName(skillName)
if metadata == nil {
return "", fmt.Errorf("skill '%s' not found", skillName)
}
// 只加载 description不缓存完整工具保持延迟加载特性
toolFile := filepath.Join(metadata.Path, SkillToolsDir, toolName+".yaml")
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
return "", err
}
return tool.Description, nil
}
// LoadAllSkillToolDescriptions 加载技能目录下所有 skill_tools 的描述
func (r *SkillRegistry) LoadAllSkillToolDescriptions(skillName string) (map[string]string, error) {
metadata := r.GetByName(skillName)
if metadata == nil {
return nil, fmt.Errorf("skill '%s' not found", skillName)
}
toolsDir := filepath.Join(metadata.Path, SkillToolsDir)
// 检查目录是否存在
if _, err := os.Stat(toolsDir); os.IsNotExist(err) {
return make(map[string]string), nil
}
// 遍历 skill_tools 目录
entries, err := os.ReadDir(toolsDir)
if err != nil {
return nil, fmt.Errorf("failed to read skill_tools directory: %v", err)
}
descriptions := make(map[string]string)
for _, entry := range entries {
if entry.IsDir() {
continue
}
// 只处理 .yaml 文件
name := entry.Name()
if !strings.HasSuffix(name, ".yaml") && !strings.HasSuffix(name, ".yml") {
continue
}
toolFile := filepath.Join(toolsDir, name)
tool, err := r.loadToolFromFile(toolFile)
if err != nil {
logger.Warningf("Failed to load skill tool %s: %v", toolFile, err)
continue
}
descriptions[tool.Name] = tool.Description
}
return descriptions, nil
}
// loadToolFromFile 从文件加载工具定义
func (r *SkillRegistry) loadToolFromFile(filePath string) (*SkillTool, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return nil, fmt.Errorf("failed to read tool file: %v", err)
}
var tool SkillTool
if err := yaml.Unmarshal(data, &tool); err != nil {
return nil, fmt.Errorf("failed to parse tool file: %v", err)
}
return &tool, nil
}
// LoadReference 加载引用文件Level 3
func (r *SkillRegistry) LoadReference(metadata *SkillMetadata, refName string) (string, error) {
if metadata == nil {
return "", fmt.Errorf("metadata is nil")
}
refFile := filepath.Join(metadata.Path, refName)
data, err := os.ReadFile(refFile)
if err != nil {
return "", fmt.Errorf("failed to read reference file: %v", err)
}
return string(data), nil
}
// Reload 重新加载所有技能元数据
func (r *SkillRegistry) Reload() error {
r.mu.Lock()
r.skills = make(map[string]*SkillMetadata)
r.contentCache = make(map[string]*SkillContent)
r.toolsCache = make(map[string]map[string]*SkillTool)
r.mu.Unlock()
return r.loadAllMetadata()
}
// SkillSelector 技能选择器接口
type SkillSelector interface {
// SelectMultiple 让 LLM 根据任务内容选择最合适的技能(可多选)
SelectMultiple(ctx context.Context, taskContext string, availableSkills []*SkillMetadata, maxSkills int) ([]*SkillMetadata, error)
}
// LLMSkillSelector 基于 LLM 的技能选择器
type LLMSkillSelector struct {
llmCaller func(ctx context.Context, messages []ChatMessage) (string, error)
}
// NewLLMSkillSelector 创建 LLM 技能选择器
func NewLLMSkillSelector(llmCaller func(ctx context.Context, messages []ChatMessage) (string, error)) *LLMSkillSelector {
return &LLMSkillSelector{
llmCaller: llmCaller,
}
}
// SelectMultiple 使用 LLM 选择技能
func (s *LLMSkillSelector) SelectMultiple(ctx context.Context, taskContext string, availableSkills []*SkillMetadata, maxSkills int) ([]*SkillMetadata, error) {
if len(availableSkills) == 0 {
return nil, nil
}
if maxSkills <= 0 {
maxSkills = DefaultMaxSkills
}
// 构建提示词
systemPrompt := s.buildSelectionPrompt(availableSkills, maxSkills)
messages := []ChatMessage{
{Role: "system", Content: systemPrompt},
{Role: "user", Content: taskContext},
}
// 调用 LLM
response, err := s.llmCaller(ctx, messages)
if err != nil {
return nil, fmt.Errorf("LLM call failed: %v", err)
}
// 解析响应
selectedNames := s.parseSelectionResponse(response)
if len(selectedNames) == 0 {
return nil, nil
}
// 限制数量
if len(selectedNames) > maxSkills {
selectedNames = selectedNames[:maxSkills]
}
// 转换为 SkillMetadata
skillMap := make(map[string]*SkillMetadata)
for _, skill := range availableSkills {
skillMap[skill.Name] = skill
}
var result []*SkillMetadata
for _, name := range selectedNames {
if skill, ok := skillMap[name]; ok {
result = append(result, skill)
}
}
return result, nil
}
// buildSelectionPrompt 构建技能选择提示词
func (s *LLMSkillSelector) buildSelectionPrompt(availableSkills []*SkillMetadata, maxSkills int) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf(`你是一个技能选择器。根据以下任务上下文,选择最合适的技能(可选择 1-%d 个)。
## 可用技能
`, maxSkills))
for i, skill := range availableSkills {
sb.WriteString(fmt.Sprintf("%d. **%s**\n", i+1, skill.Name))
sb.WriteString(fmt.Sprintf(" %s\n\n", skill.Description))
}
sb.WriteString(`## 输出格式
请以 JSON 数组格式返回选中的技能名称,例如:
` + "```json\n" + `["skill-name-1", "skill-name-2"]
` + "```" + `
## 选择原则
1. 选择与任务最相关的技能
2. 如果任务涉及多个领域,可以选择多个技能
3. 优先选择更具体、更专业的技能
4. 如果没有合适的技能,返回空数组 []
请返回技能名称数组:`)
return sb.String()
}
// parseSelectionResponse 解析 LLM 的选择响应
func (s *LLMSkillSelector) parseSelectionResponse(response string) []string {
// 尝试从 JSON 代码块中提取
response = strings.TrimSpace(response)
// 查找 JSON 数组
start := strings.Index(response, "[")
end := strings.LastIndex(response, "]")
if start < 0 || end <= start {
return nil
}
jsonStr := response[start : end+1]
var skillNames []string
if err := json.Unmarshal([]byte(jsonStr), &skillNames); err != nil {
logger.Warningf("Failed to parse skill selection response: %v", err)
return nil
}
return skillNames
}

View File

@@ -21,6 +21,12 @@ type Center struct {
CleanPipelineExecutionDay int
MigrateBusiGroupLabel bool
RSA httpx.RSAConfig
AIAgent AIAgent
}
type AIAgent struct {
Enable bool `toml:"Enable"`
SkillsPath string `toml:"SkillsPath"`
}
type Plugin struct {

View File

@@ -300,6 +300,14 @@ ops:
cname: View Alerting Engines
- name: /system/version
cname: View Product Version
- name: /ai-config/agents
cname: AI Config - Agents
- name: /ai-config/llm-configs
cname: AI Config - LLM Configs
- name: /ai-config/skills
cname: AI Config - Skills
- name: /ai-config/mcp-servers
cname: AI Config - MCP Servers
`
)

View File

@@ -520,6 +520,50 @@ func (rt *Router) Config(r *gin.Engine) {
pages.PUT("/config", rt.auth(), rt.admin(), rt.configPutByKey)
pages.GET("/site-info", rt.siteInfo)
// AI Config management
pages.GET("/ai-agents", rt.auth(), rt.admin(), rt.aiAgentGets)
pages.GET("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentGet)
pages.POST("/ai-agents", rt.auth(), rt.admin(), rt.aiAgentAdd)
pages.PUT("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentPut)
pages.DELETE("/ai-agent/:id", rt.auth(), rt.admin(), rt.aiAgentDel)
pages.GET("/ai-llm-configs", rt.auth(), rt.admin(), rt.aiLLMConfigGets)
pages.GET("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigGet)
pages.POST("/ai-llm-configs", rt.auth(), rt.admin(), rt.aiLLMConfigAdd)
pages.PUT("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigPut)
pages.DELETE("/ai-llm-config/:id", rt.auth(), rt.admin(), rt.aiLLMConfigDel)
pages.POST("/ai-llm-config/test", rt.auth(), rt.admin(), rt.aiLLMConfigTest)
pages.GET("/ai-skills", rt.auth(), rt.admin(), rt.aiSkillGets)
pages.GET("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillGet)
pages.POST("/ai-skills", rt.auth(), rt.admin(), rt.aiSkillAdd)
pages.PUT("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillPut)
pages.DELETE("/ai-skill/:id", rt.auth(), rt.admin(), rt.aiSkillDel)
pages.POST("/ai-skills/import", rt.auth(), rt.admin(), rt.aiSkillImport)
pages.POST("/ai-skill/:id/files", rt.auth(), rt.admin(), rt.aiSkillFileAdd)
pages.GET("/ai-skill-file/:fileId", rt.auth(), rt.admin(), rt.aiSkillFileGet)
pages.DELETE("/ai-skill-file/:fileId", rt.auth(), rt.admin(), rt.aiSkillFileDel)
pages.GET("/mcp-servers", rt.auth(), rt.admin(), rt.mcpServerGets)
pages.GET("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerGet)
pages.POST("/mcp-servers", rt.auth(), rt.admin(), rt.mcpServerAdd)
pages.PUT("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerPut)
pages.DELETE("/mcp-server/:id", rt.auth(), rt.admin(), rt.mcpServerDel)
pages.POST("/ai-agent/:id/test", rt.auth(), rt.admin(), rt.aiAgentTest)
pages.POST("/mcp-server/test", rt.auth(), rt.admin(), rt.mcpServerTest)
pages.GET("/mcp-server/:id/tools", rt.auth(), rt.admin(), rt.mcpServerTools)
// AI Conversations
pages.GET("/ai-conversations", rt.auth(), rt.user(), rt.aiConversationGets)
pages.POST("/ai-conversations", rt.auth(), rt.user(), rt.aiConversationAdd)
pages.GET("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationGet)
pages.PUT("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationPut)
pages.DELETE("/ai-conversation/:id", rt.auth(), rt.user(), rt.aiConversationDel)
pages.POST("/ai-conversation/:id/messages", rt.auth(), rt.user(), rt.aiConversationMessageAdd)
// AI chat (SSE), dispatches by action_key
pages.POST("/ai-chat", rt.auth(), rt.user(), rt.aiChat)
// source token 相关路由
pages.POST("/source-token", rt.auth(), rt.user(), rt.sourceTokenAdd)

View File

@@ -0,0 +1,747 @@
package router
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path/filepath"
"strings"
"time"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"gopkg.in/yaml.v3"
)
// ========================
// AI Agent handlers
// ========================
func (rt *Router) aiAgentGets(c *gin.Context) {
lst, err := models.AIAgentGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiAgentGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiAgentAdd(c *gin.Context) {
var obj models.AIAgent
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
ginx.Dangerous(obj.Create(rt.Ctx, me.Username))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiAgentPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
var ref models.AIAgent
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, me.Username, ref))
}
func (rt *Router) aiAgentDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// AI Skill handlers
// ========================
func (rt *Router) aiSkillGets(c *gin.Context) {
search := ginx.QueryStr(c, "search", "")
lst, err := models.AISkillGets(rt.Ctx, search)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiSkillGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
// Include associated files (without content)
files, err := models.AISkillFileGets(rt.Ctx, id)
ginx.Dangerous(err)
obj.Files = files
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiSkillAdd(c *gin.Context) {
var obj models.AISkill
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
obj.CreatedBy = me.Username
obj.UpdatedBy = me.Username
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiSkillPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
var ref models.AISkill
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ref.UpdatedBy = me.Username
ginx.NewRender(c).Message(obj.Update(rt.Ctx, ref))
}
func (rt *Router) aiSkillDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AISkillGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
// Cascade delete skill files
ginx.Dangerous(models.AISkillFileDeleteBySkillId(rt.Ctx, id))
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiSkillImport(c *gin.Context) {
file, header, err := c.Request.FormFile("file")
ginx.Dangerous(err)
defer file.Close()
ext := strings.ToLower(filepath.Ext(header.Filename))
if ext != ".md" {
ginx.Bomb(http.StatusBadRequest, "only .md files are supported")
}
content, err := io.ReadAll(file)
ginx.Dangerous(err)
meta, instructions := parseSkillMarkdown(string(content), header.Filename, ext)
me := c.MustGet("user").(*models.User)
skill := models.AISkill{
Name: meta.Name,
Description: meta.Description,
Instructions: instructions,
License: meta.License,
Compatibility: meta.Compatibility,
Metadata: meta.Metadata,
AllowedTools: meta.AllowedTools,
CreatedBy: me.Username,
UpdatedBy: me.Username,
}
ginx.Dangerous(skill.Create(rt.Ctx))
ginx.NewRender(c).Data(skill.Id, nil)
}
// parseSkillMarkdown parses a SKILL.md file with optional YAML frontmatter.
// Frontmatter format:
//
// ---
// name: my-skill
// description: what this skill does
// ---
// # Actual instructions content...
type skillFrontmatter struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
License string `yaml:"license"`
Compatibility string `yaml:"compatibility"`
Metadata map[string]string `yaml:"metadata"`
AllowedTools string `yaml:"allowed-tools"`
}
func parseSkillMarkdown(content, filename, ext string) (meta skillFrontmatter, instructions string) {
text := strings.TrimSpace(content)
// Try to parse YAML frontmatter (between --- delimiters)
if strings.HasPrefix(text, "---") {
endIdx := strings.Index(text[3:], "\n---")
if endIdx >= 0 {
frontmatter := text[3 : 3+endIdx]
body := strings.TrimSpace(text[3+endIdx+4:]) // skip past closing ---
if yaml.Unmarshal([]byte(frontmatter), &meta) == nil && meta.Name != "" {
return meta, body
}
}
}
// No valid frontmatter, fallback: filename as name, entire content as instructions
meta.Name = strings.TrimSuffix(filename, ext)
return meta, content
}
// ========================
// AI Skill File handlers
// ========================
func (rt *Router) aiSkillFileAdd(c *gin.Context) {
skillId := ginx.UrlParamInt64(c, "id")
// Verify skill exists
skill, err := models.AISkillGetById(rt.Ctx, skillId)
ginx.Dangerous(err)
if skill == nil {
ginx.Bomb(http.StatusNotFound, "ai skill not found")
}
file, header, err := c.Request.FormFile("file")
ginx.Dangerous(err)
defer file.Close()
// Validate file extension
ext := strings.ToLower(filepath.Ext(header.Filename))
allowed := map[string]bool{".md": true, ".txt": true, ".json": true, ".yaml": true, ".yml": true, ".csv": true}
if !allowed[ext] {
ginx.Bomb(http.StatusBadRequest, "file type not allowed, only .md/.txt/.json/.yaml/.csv")
}
// Validate file size (2MB max)
if header.Size > 2*1024*1024 {
ginx.Bomb(http.StatusBadRequest, "file size exceeds 2MB limit")
}
content, err := io.ReadAll(file)
ginx.Dangerous(err)
me := c.MustGet("user").(*models.User)
skillFile := models.AISkillFile{
SkillId: skillId,
Name: header.Filename,
Content: string(content),
CreatedBy: me.Username,
}
ginx.Dangerous(skillFile.Create(rt.Ctx))
ginx.NewRender(c).Data(skillFile.Id, nil)
}
func (rt *Router) aiSkillFileGet(c *gin.Context) {
fileId := ginx.UrlParamInt64(c, "fileId")
obj, err := models.AISkillFileGetById(rt.Ctx, fileId)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "file not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiSkillFileDel(c *gin.Context) {
fileId := ginx.UrlParamInt64(c, "fileId")
obj, err := models.AISkillFileGetById(rt.Ctx, fileId)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "file not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// MCP Server handlers
// ========================
func (rt *Router) mcpServerGets(c *gin.Context) {
lst, err := models.MCPServerGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) mcpServerGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) mcpServerAdd(c *gin.Context) {
var obj models.MCPServer
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
obj.CreatedBy = me.Username
obj.UpdatedBy = me.Username
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) mcpServerPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
var ref models.MCPServer
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ref.UpdatedBy = me.Username
ginx.NewRender(c).Message(obj.Update(rt.Ctx, ref))
}
func (rt *Router) mcpServerDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
// ========================
// AI LLM Config handlers
// ========================
func (rt *Router) aiLLMConfigGets(c *gin.Context) {
lst, err := models.AILLMConfigGets(rt.Ctx)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiLLMConfigGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiLLMConfigAdd(c *gin.Context) {
var obj models.AILLMConfig
ginx.BindJSON(c, &obj)
ginx.Dangerous(obj.Verify())
me := c.MustGet("user").(*models.User)
ginx.Dangerous(obj.Create(rt.Ctx, me.Username))
ginx.NewRender(c).Data(obj.Id, nil)
}
func (rt *Router) aiLLMConfigPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
var ref models.AILLMConfig
ginx.BindJSON(c, &ref)
ginx.Dangerous(ref.Verify())
me := c.MustGet("user").(*models.User)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, me.Username, ref))
}
func (rt *Router) aiLLMConfigDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AILLMConfigGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "ai llm config not found")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiLLMConfigTest(c *gin.Context) {
var body struct {
APIType string `json:"api_type"`
APIURL string `json:"api_url"`
APIKey string `json:"api_key"`
Model string `json:"model"`
ExtraConfig models.LLMExtraConfig `json:"extra_config"`
}
ginx.BindJSON(c, &body)
if body.APIType == "" || body.APIURL == "" || body.APIKey == "" || body.Model == "" {
ginx.Bomb(http.StatusBadRequest, "api_type, api_url, api_key, model are required")
}
obj := &models.AILLMConfig{
APIType: body.APIType,
APIURL: body.APIURL,
APIKey: body.APIKey,
Model: body.Model,
ExtraConfig: body.ExtraConfig,
}
start := time.Now()
testErr := testAIAgent(obj)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
}
ginx.NewRender(c).Data(result, testErr)
}
// ========================
// AI Agent test
// ========================
func (rt *Router) aiAgentTest(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
agent, err := models.AIAgentGetById(rt.Ctx, id)
ginx.Dangerous(err)
if agent == nil {
ginx.Bomb(http.StatusNotFound, "ai agent not found")
}
llmCfg, err := models.AILLMConfigGetById(rt.Ctx, agent.LLMConfigId)
ginx.Dangerous(err)
if llmCfg == nil {
ginx.Bomb(http.StatusBadRequest, "referenced LLM config not found")
}
start := time.Now()
testErr := testAIAgent(llmCfg)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
}
if testErr != nil {
result["error"] = testErr.Error()
}
ginx.NewRender(c).Data(result, nil)
}
func testAIAgent(p *models.AILLMConfig) error {
extra := p.ExtraConfig
// Build HTTP client with ExtraConfig settings
timeout := 30 * time.Second
if extra.TimeoutSeconds > 0 {
timeout = time.Duration(extra.TimeoutSeconds) * time.Second
}
transport := &http.Transport{}
if extra.SkipTLSVerify {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
if extra.Proxy != "" {
if proxyURL, err := url.Parse(extra.Proxy); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
client := &http.Client{Timeout: timeout, Transport: transport}
var reqURL string
var reqBody []byte
hdrs := map[string]string{"Content-Type": "application/json"}
switch p.APIType {
case "openai":
base := strings.TrimRight(p.APIURL, "/")
if strings.HasSuffix(base, "/chat/completions") {
reqURL = base
} else {
reqURL = base + "/chat/completions"
}
reqBody, _ = json.Marshal(map[string]interface{}{
"model": p.Model,
"messages": []map[string]string{{"role": "user", "content": "Hi"}},
"max_tokens": 5,
})
hdrs["Authorization"] = "Bearer " + p.APIKey
case "claude":
reqURL = strings.TrimRight(p.APIURL, "/") + "/v1/messages"
reqBody, _ = json.Marshal(map[string]interface{}{
"model": p.Model,
"messages": []map[string]string{{"role": "user", "content": "Hi"}},
"max_tokens": 5,
})
hdrs["x-api-key"] = p.APIKey
hdrs["anthropic-version"] = "2023-06-01"
case "gemini":
reqURL = strings.TrimRight(p.APIURL, "/") + "/v1beta/models/" + p.Model + ":generateContent?key=" + p.APIKey
reqBody, _ = json.Marshal(map[string]interface{}{
"contents": []map[string]interface{}{
{"parts": []map[string]string{{"text": "Hi"}}},
},
})
default:
return fmt.Errorf("unsupported api_type: %s", p.APIType)
}
req, err := http.NewRequest("POST", reqURL, bytes.NewReader(reqBody))
if err != nil {
return err
}
for k, v := range hdrs {
req.Header.Set(k, v)
}
// Apply custom headers from ExtraConfig
for k, v := range extra.CustomHeaders {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
body, _ := io.ReadAll(resp.Body)
if len(body) > 500 {
body = body[:500]
}
return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body))
}
return nil
}
// ========================
// MCP Server test & tools
// ========================
func (rt *Router) mcpServerTest(c *gin.Context) {
var body struct {
URL string `json:"url"`
Headers map[string]string `json:"headers"`
}
ginx.BindJSON(c, &body)
if body.URL == "" {
ginx.Bomb(http.StatusBadRequest, "url is required")
}
obj := &models.MCPServer{
URL: body.URL,
Headers: body.Headers,
}
start := time.Now()
tools, testErr := listMCPTools(obj)
durationMs := time.Since(start).Milliseconds()
result := gin.H{
"success": testErr == nil,
"duration_ms": durationMs,
"tool_count": len(tools),
}
if testErr != nil {
result["error"] = testErr.Error()
}
ginx.NewRender(c).Data(result, nil)
}
func (rt *Router) mcpServerTools(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.MCPServerGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "mcp server not found")
}
tools, err := listMCPTools(obj)
ginx.Dangerous(err)
ginx.NewRender(c).Data(tools, nil)
}
type mcpTool struct {
Name string `json:"name"`
Description string `json:"description"`
}
func listMCPTools(s *models.MCPServer) ([]mcpTool, error) {
client := &http.Client{Timeout: 30 * time.Second}
hdrs := s.Headers
// Step 1: Initialize
initResp, initSessionID, err := sendMCPRPC(client, s.URL, hdrs, "", 1, "initialize", map[string]interface{}{
"protocolVersion": "2024-11-05",
"capabilities": map[string]interface{}{},
"clientInfo": map[string]interface{}{"name": "nightingale", "version": "1.0.0"},
})
if err != nil {
return nil, fmt.Errorf("initialize: %v", err)
}
_ = initResp
// Send initialized notification
sendMCPRPC(client, s.URL, hdrs, initSessionID, 0, "notifications/initialized", map[string]interface{}{})
// Step 2: List tools
toolsResp, _, err := sendMCPRPC(client, s.URL, hdrs, initSessionID, 2, "tools/list", map[string]interface{}{})
if err != nil {
return nil, fmt.Errorf("tools/list: %v", err)
}
if toolsResp == nil || toolsResp.Result == nil {
return []mcpTool{}, nil
}
toolsRaw, ok := toolsResp.Result["tools"]
if !ok {
return []mcpTool{}, nil
}
toolsJSON, _ := json.Marshal(toolsRaw)
var tools []mcpTool
json.Unmarshal(toolsJSON, &tools)
return tools, nil
}
type jsonRPCResponse struct {
JSONRPC string `json:"jsonrpc"`
ID interface{} `json:"id"`
Result map[string]interface{} `json:"result"`
Error *jsonRPCError `json:"error"`
}
type jsonRPCError struct {
Code int `json:"code"`
Message string `json:"message"`
}
func sendMCPRPC(client *http.Client, serverURL string, hdrs map[string]string, sessionID string, id int, method string, params interface{}) (*jsonRPCResponse, string, error) {
body := map[string]interface{}{
"jsonrpc": "2.0",
"method": method,
"params": params,
}
if id > 0 {
body["id"] = id
}
reqBody, _ := json.Marshal(body)
req, err := http.NewRequest("POST", serverURL, bytes.NewReader(reqBody))
if err != nil {
return nil, "", err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json, text/event-stream")
if sessionID != "" {
req.Header.Set("Mcp-Session-Id", sessionID)
}
for k, v := range hdrs {
req.Header.Set(k, v)
}
resp, err := client.Do(req)
if err != nil {
return nil, "", err
}
defer resp.Body.Close()
newSessionID := resp.Header.Get("Mcp-Session-Id")
if newSessionID == "" {
newSessionID = sessionID
}
// Notification (no id) - no response body expected
if id <= 0 {
return nil, newSessionID, nil
}
if resp.StatusCode >= 400 {
respBody, _ := io.ReadAll(resp.Body)
if len(respBody) > 500 {
respBody = respBody[:500]
}
return nil, newSessionID, fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(respBody))
}
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, newSessionID, err
}
// Handle SSE response
contentType := resp.Header.Get("Content-Type")
if strings.Contains(contentType, "text/event-stream") {
for _, line := range strings.Split(string(respBody), "\n") {
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
var rpcResp jsonRPCResponse
if json.Unmarshal([]byte(data), &rpcResp) == nil && (rpcResp.Result != nil || rpcResp.Error != nil) {
if rpcResp.Error != nil {
return &rpcResp, newSessionID, fmt.Errorf("RPC error %d: %s", rpcResp.Error.Code, rpcResp.Error.Message)
}
return &rpcResp, newSessionID, nil
}
}
}
return nil, newSessionID, fmt.Errorf("no valid JSON-RPC response in SSE stream")
}
// Handle JSON response
var rpcResp jsonRPCResponse
if err := json.Unmarshal(respBody, &rpcResp); err != nil {
if len(respBody) > 200 {
respBody = respBody[:200]
}
return nil, newSessionID, fmt.Errorf("invalid response: %s", string(respBody))
}
if rpcResp.Error != nil {
return &rpcResp, newSessionID, fmt.Errorf("RPC error %d: %s", rpcResp.Error.Code, rpcResp.Error.Message)
}
return &rpcResp, newSessionID, nil
}

View File

@@ -0,0 +1,114 @@
package router
import (
"net/http"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
)
func (rt *Router) aiConversationGets(c *gin.Context) {
me := c.MustGet("user").(*models.User)
lst, err := models.AIConversationGetsByUserId(rt.Ctx, me.Id)
ginx.Dangerous(err)
ginx.NewRender(c).Data(lst, nil)
}
func (rt *Router) aiConversationAdd(c *gin.Context) {
var obj models.AIConversation
ginx.BindJSON(c, &obj)
me := c.MustGet("user").(*models.User)
obj.UserId = me.Id
ginx.Dangerous(obj.Verify())
ginx.Dangerous(obj.Create(rt.Ctx))
ginx.NewRender(c).Data(obj, nil)
}
func (rt *Router) aiConversationGet(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
messages, err := models.AIConversationMessageGetsByConversationId(rt.Ctx, id)
ginx.Dangerous(err)
ginx.NewRender(c).Data(gin.H{
"conversation": obj,
"messages": messages,
}, nil)
}
func (rt *Router) aiConversationPut(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
var body struct {
Title string `json:"title"`
}
ginx.BindJSON(c, &body)
ginx.NewRender(c).Message(obj.Update(rt.Ctx, body.Title))
}
func (rt *Router) aiConversationDel(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
ginx.NewRender(c).Message(obj.Delete(rt.Ctx))
}
func (rt *Router) aiConversationMessageAdd(c *gin.Context) {
id := ginx.UrlParamInt64(c, "id")
obj, err := models.AIConversationGetById(rt.Ctx, id)
ginx.Dangerous(err)
if obj == nil {
ginx.Bomb(http.StatusNotFound, "conversation not found")
}
me := c.MustGet("user").(*models.User)
if obj.UserId != me.Id {
ginx.Bomb(http.StatusForbidden, "forbidden")
}
var msgs []models.AIConversationMessage
ginx.BindJSON(c, &msgs)
for i := range msgs {
msgs[i].ConversationId = id
ginx.Dangerous(msgs[i].Create(rt.Ctx))
}
// Update conversation timestamp
obj.UpdateTime(rt.Ctx)
ginx.NewRender(c).Message(nil)
}

View File

@@ -0,0 +1,345 @@
package router
import (
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/ccfos/nightingale/v6/aiagent"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/prom"
"github.com/gin-gonic/gin"
"github.com/toolkits/pkg/logger"
)
// AIChatRequest is the generic chat request dispatched by action_key.
type AIChatRequest struct {
ActionKey string `json:"action_key"` // e.g. "query_generator"
UserInput string `json:"user_input"`
History []aiagent.ChatMessage `json:"history,omitempty"`
Context map[string]interface{} `json:"context,omitempty"` // action-specific params
}
// actionHandler defines how each action_key is processed.
type actionHandler struct {
useCase string // maps to AIAgent.UseCase for finding the right agent config
validate func(req *AIChatRequest) error
selectTools func(req *AIChatRequest) []string
buildPrompt func(req *AIChatRequest) string
buildInputs func(req *AIChatRequest) map[string]string
}
var actionRegistry = map[string]*actionHandler{
"query_generator": {
useCase: "chat",
validate: validateQueryGenerator,
selectTools: selectQueryGeneratorTools,
buildPrompt: buildQueryGeneratorPrompt,
buildInputs: buildQueryGeneratorInputs,
},
}
// --- query_generator action ---
func ctxStr(ctx map[string]interface{}, key string) string {
if v, ok := ctx[key]; ok {
if s, ok := v.(string); ok {
return s
}
}
return ""
}
func ctxInt64(ctx map[string]interface{}, key string) int64 {
if v, ok := ctx[key]; ok {
switch n := v.(type) {
case float64:
return int64(n)
case int64:
return n
case json.Number:
i, _ := n.Int64()
return i
}
}
return 0
}
func validateQueryGenerator(req *AIChatRequest) error {
dsType := ctxStr(req.Context, "datasource_type")
dsID := ctxInt64(req.Context, "datasource_id")
if dsType == "" {
return fmt.Errorf("context.datasource_type is required")
}
if dsID == 0 {
return fmt.Errorf("context.datasource_id is required")
}
return nil
}
func selectQueryGeneratorTools(req *AIChatRequest) []string {
dsType := ctxStr(req.Context, "datasource_type")
switch dsType {
case "prometheus":
return []string{"list_metrics", "get_metric_labels"}
case "mysql", "doris", "ck", "clickhouse", "pgsql", "postgresql":
return []string{"list_databases", "list_tables", "describe_table"}
default:
return nil
}
}
func buildQueryGeneratorPrompt(req *AIChatRequest) string {
dsType := ctxStr(req.Context, "datasource_type")
dbName := ctxStr(req.Context, "database_name")
tableName := ctxStr(req.Context, "table_name")
switch dsType {
case "prometheus":
return fmt.Sprintf(`You are a PromQL expert. The user wants to query Prometheus metrics.
User request: %s
Please use the available tools to explore the metrics and generate the correct PromQL query.
- First use list_metrics to find relevant metrics
- Then use get_metric_labels to understand the label structure
- Finally provide the PromQL query as your Final Answer
Your Final Answer MUST be a valid JSON object with these fields:
{"query": "<the PromQL query>", "explanation": "<brief explanation in the user's language>"}`, req.UserInput)
default: // SQL-based datasources
dbContext := ""
if dbName != "" {
dbContext += fmt.Sprintf("\nTarget database: %s", dbName)
}
if tableName != "" {
dbContext += fmt.Sprintf("\nTarget table: %s", tableName)
}
return fmt.Sprintf(`You are a SQL expert for %s databases. The user wants to query data.
%s
User request: %s
Please use the available tools to explore the database schema and generate the correct SQL query.
- Use list_databases to see available databases
- Use list_tables to see tables in the target database
- Use describe_table to understand the table structure
- Finally provide the SQL query as your Final Answer
Your Final Answer MUST be a valid JSON object with these fields:
{"query": "<the SQL query>", "explanation": "<brief explanation in the user's language>"}`, dsType, dbContext, req.UserInput)
}
}
func buildQueryGeneratorInputs(req *AIChatRequest) map[string]string {
inputs := map[string]string{
"user_input": req.UserInput,
}
for _, key := range []string{"datasource_type", "datasource_id", "database_name", "table_name"} {
if v := ctxStr(req.Context, key); v != "" {
inputs[key] = v
}
}
// datasource_id may be a number in JSON
if inputs["datasource_id"] == "" {
if id := ctxInt64(req.Context, "datasource_id"); id > 0 {
inputs["datasource_id"] = fmt.Sprintf("%d", id)
}
}
return inputs
}
// --- generic handler ---
func (rt *Router) aiChat(c *gin.Context) {
if !rt.Center.AIAgent.Enable {
ginx.Bomb(http.StatusServiceUnavailable, "AI Agent is not enabled")
return
}
var req AIChatRequest
ginx.BindJSON(c, &req)
if req.UserInput == "" {
ginx.Bomb(http.StatusBadRequest, "user_input is required")
return
}
if req.ActionKey == "" {
ginx.Bomb(http.StatusBadRequest, "action_key is required")
return
}
if req.Context == nil {
req.Context = make(map[string]interface{})
}
handler, ok := actionRegistry[req.ActionKey]
if !ok {
ginx.Bomb(http.StatusBadRequest, "unsupported action_key: %s", req.ActionKey)
return
}
logger.Infof("[AIChat] action=%s, user_input=%q", req.ActionKey, truncStr(req.UserInput, 100))
// Action-specific validation
if handler.validate != nil {
if err := handler.validate(&req); err != nil {
ginx.Bomb(http.StatusBadRequest, err.Error())
return
}
}
// Find AI agent by use_case
agent, err := models.AIAgentGetByUseCase(rt.Ctx, handler.useCase)
if err != nil || agent == nil {
ginx.Bomb(http.StatusBadRequest, "no AI agent configured for use_case=%s", handler.useCase)
return
}
// Resolve LLM config
llmCfg, err := models.AILLMConfigGetById(rt.Ctx, agent.LLMConfigId)
if err != nil || llmCfg == nil {
ginx.Bomb(http.StatusBadRequest, "referenced LLM config not found")
return
}
agent.LLMConfig = llmCfg
// Select tools
var tools []aiagent.AgentTool
if handler.selectTools != nil {
toolNames := handler.selectTools(&req)
if toolNames != nil {
tools = aiagent.GetBuiltinToolDefs(toolNames)
}
}
// Parse extra config
extraConfig := llmCfg.ExtraConfig
timeout := 120000
if extraConfig.TimeoutSeconds > 0 {
timeout = extraConfig.TimeoutSeconds * 1000
}
// Build prompt
userPrompt := ""
if handler.buildPrompt != nil {
userPrompt = handler.buildPrompt(&req)
}
// Build workflow inputs
inputs := map[string]string{"user_input": req.UserInput}
if handler.buildInputs != nil {
inputs = handler.buildInputs(&req)
}
// Create agent
agentCfg := aiagent.NewAgent(&aiagent.AIAgentConfig{
Provider: llmCfg.APIType,
LLMURL: llmCfg.APIURL,
Model: llmCfg.Model,
APIKey: llmCfg.APIKey,
Headers: extraConfig.CustomHeaders,
AgentMode: aiagent.AgentModeReAct,
Tools: tools,
Timeout: timeout,
Stream: true,
UserPromptTemplate: userPrompt,
SkipSSLVerify: extraConfig.SkipTLSVerify,
Proxy: extraConfig.Proxy,
Temperature: extraConfig.Temperature,
MaxTokens: extraConfig.MaxTokens,
})
// Inject PromClient getter
aiagent.SetPromClientGetter(func(dsId int64) prom.API {
return rt.PromClients.GetCli(dsId)
})
// Streaming setup
streamChan := make(chan *models.StreamChunk, 100)
wfCtx := &models.WorkflowContext{
Stream: true,
StreamChan: streamChan,
Inputs: inputs,
}
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
startTime := time.Now()
go func() {
defer func() {
if r := recover(); r != nil {
logger.Errorf("[AIChat] PANIC in agent goroutine: %v", r)
streamChan <- &models.StreamChunk{
Type: models.StreamTypeError,
Content: fmt.Sprintf("internal error: %v", r),
Done: true,
Timestamp: time.Now().UnixMilli(),
}
close(streamChan)
}
}()
_, _, err := agentCfg.Process(rt.Ctx, wfCtx)
if err != nil {
logger.Errorf("[AIChat] agent Process error: %v", err)
}
}()
// Stream SSE events
var accumulatedMessage string
c.Stream(func(w io.Writer) bool {
chunk, ok := <-streamChan
if !ok {
return false
}
data, _ := json.Marshal(chunk)
if chunk.Type == models.StreamTypeText || chunk.Type == models.StreamTypeThinking {
if chunk.Delta != "" {
accumulatedMessage += chunk.Delta
} else if chunk.Content != "" {
accumulatedMessage += chunk.Content
}
}
if chunk.Type == models.StreamTypeError {
fmt.Fprintf(w, "event: error\ndata: %s\n\n", data)
c.Writer.Flush()
return false
}
if chunk.Done || chunk.Type == models.StreamTypeDone {
doneData := map[string]interface{}{
"type": "done",
"duration_ms": time.Since(startTime).Milliseconds(),
"message": accumulatedMessage,
"response": chunk.Content,
}
finalData, _ := json.Marshal(doneData)
fmt.Fprintf(w, "event: done\ndata: %s\n\n", finalData)
c.Writer.Flush()
return false
}
fmt.Fprintf(w, "event: chunk\ndata: %s\n\n", data)
c.Writer.Flush()
return true
})
}
func truncStr(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View File

@@ -13,10 +13,10 @@ import (
"github.com/ccfos/nightingale/v6/alert/mute"
"github.com/ccfos/nightingale/v6/models"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/ccfos/nightingale/v6/pkg/strx"
"github.com/ccfos/nightingale/v6/pushgw/pconf"
"github.com/ccfos/nightingale/v6/pushgw/writer"
"github.com/ccfos/nightingale/v6/pkg/ginx"
"github.com/gin-gonic/gin"
"github.com/jinzhu/copier"
@@ -886,6 +886,7 @@ func (rt *Router) batchAlertRuleClone(c *gin.Context) {
func (rt *Router) timezonesGet(c *gin.Context) {
// 返回常用时区列表(按时差去重,每个时差只保留一个代表性时区)
timezones := []string{
"Local",
"UTC",
"Asia/Shanghai", // UTC+8 (代表 Asia/Hong_Kong, Asia/Singapore 等)
"Asia/Tokyo", // UTC+9 (代表 Asia/Seoul 等)

251
doc/api/ai-agent.md Normal file
View File

@@ -0,0 +1,251 @@
# AI Agent API
所有接口需要管理员权限(`auth` + `admin`)。
## 数据结构
### AIAgent
| 字段 | 类型 | 必填 | 说明 |
|------|------|------|------|
| id | int64 | - | 主键,自增 |
| name | string | 是 | Agent 名称 |
| description | string | 否 | 描述 |
| use_case | string | 否 | 用途场景,如 `chat` |
| llm_config_id | int64 | 是 | 关联的 LLM 配置 ID |
| skill_ids | int64[] | 否 | 关联的 Skill ID 列表 |
| mcp_server_ids | int64[] | 否 | 关联的 MCP Server ID 列表 |
| enabled | int | 否 | 是否启用,默认 1 |
| created_at | int64 | - | 创建时间Unix 时间戳) |
| created_by | string | - | 创建人 |
| updated_at | int64 | - | 更新时间Unix 时间戳) |
| updated_by | string | - | 更新人 |
| llm_config | object | - | 运行时字段,关联的 LLM 配置对象(不存储) |
---
## 获取 Agent 列表
```
GET /api/n9e/ai-agents
```
### 响应
```json
{
"dat": [
{
"id": 1,
"name": "chat-agent",
"description": "AI 对话 Agent",
"use_case": "chat",
"llm_config_id": 1,
"skill_ids": [1, 2],
"mcp_server_ids": [1],
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
}
],
"err": ""
}
```
---
## 获取 Agent 详情
```
GET /api/n9e/ai-agent/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Agent ID |
### 响应
```json
{
"dat": {
"id": 1,
"name": "chat-agent",
"description": "AI 对话 Agent",
"use_case": "chat",
"llm_config_id": 1,
"skill_ids": [1, 2],
"mcp_server_ids": [1],
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
},
"err": ""
}
```
### 错误
- `404` Agent 不存在
---
## 创建 Agent
```
POST /api/n9e/ai-agents
```
### 请求体
```json
{
"name": "chat-agent",
"description": "AI 对话 Agent",
"use_case": "chat",
"llm_config_id": 1,
"skill_ids": [1, 2],
"mcp_server_ids": [1],
"enabled": 1
}
```
### 校验规则
- `name` 必填
- `llm_config_id` 必填,且大于 0
### 响应
```json
{
"dat": 1,
"err": ""
}
```
返回新创建的 Agent ID。
---
## 更新 Agent
```
PUT /api/n9e/ai-agent/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Agent ID |
### 请求体
```json
{
"name": "chat-agent-v2",
"description": "更新后的描述",
"use_case": "chat",
"llm_config_id": 2,
"skill_ids": [1, 3],
"mcp_server_ids": [],
"enabled": 1
}
```
### 校验规则
同创建接口。
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` Agent 不存在
---
## 删除 Agent
```
DELETE /api/n9e/ai-agent/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Agent ID |
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` Agent 不存在
---
## 测试 Agent
通过 Agent 关联的 LLM 配置发送测试请求,验证连通性。
```
POST /api/n9e/ai-agent/:id/test
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Agent ID |
### 响应
```json
{
"dat": {
"success": true,
"duration_ms": 1234
},
"err": ""
}
```
失败时:
```json
{
"dat": {
"success": false,
"duration_ms": 5000,
"error": "HTTP 401: Unauthorized"
},
"err": ""
}
```
### 错误
- `404` Agent 不存在
- `400` 关联的 LLM 配置不存在

290
doc/api/ai-llm-config.md Normal file
View File

@@ -0,0 +1,290 @@
# AI LLM Config API
所有接口需要管理员权限(`auth` + `admin`)。
## 数据结构
### AILLMConfig
| 字段 | 类型 | 必填 | 说明 |
|------|------|------|------|
| id | int64 | - | 主键,自增 |
| name | string | 是 | 配置名称 |
| description | string | 否 | 描述 |
| api_type | string | 是 | 提供商类型:`openai``claude``gemini` |
| api_url | string | 是 | API 地址 |
| api_key | string | 是 | API 密钥 |
| model | string | 是 | 模型名称 |
| extra_config | object | 否 | 高级配置,见 LLMExtraConfig |
| enabled | int | 否 | 是否启用,默认 1 |
| created_at | int64 | - | 创建时间Unix 时间戳) |
| created_by | string | - | 创建人 |
| updated_at | int64 | - | 更新时间Unix 时间戳) |
| updated_by | string | - | 更新人 |
### LLMExtraConfig
| 字段 | 类型 | 说明 |
|------|------|------|
| timeout_seconds | int | 请求超时时间(秒),默认 30 |
| skip_tls_verify | bool | 跳过 TLS 证书校验 |
| proxy | string | HTTP 代理地址 |
| custom_headers | map[string]string | 自定义请求头 |
| custom_params | map[string]any | 自定义请求参数 |
| temperature | float64 | 生成温度(可选) |
| max_tokens | int | 最大输出 Token 数(可选) |
| context_length | int | 上下文窗口大小(可选) |
---
## 获取 LLM 配置列表
```
GET /api/n9e/ai-llm-configs
```
### 响应
```json
{
"dat": [
{
"id": 1,
"name": "gpt-4o",
"description": "OpenAI GPT-4o",
"api_type": "openai",
"api_url": "https://api.openai.com",
"api_key": "sk-xxx",
"model": "gpt-4o",
"extra_config": {
"temperature": 0.7,
"max_tokens": 4096
},
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
}
],
"err": ""
}
```
---
## 获取 LLM 配置详情
```
GET /api/n9e/ai-llm-config/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | LLM 配置 ID |
### 响应
```json
{
"dat": {
"id": 1,
"name": "gpt-4o",
"description": "OpenAI GPT-4o",
"api_type": "openai",
"api_url": "https://api.openai.com",
"api_key": "sk-xxx",
"model": "gpt-4o",
"extra_config": {
"temperature": 0.7,
"max_tokens": 4096
},
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
},
"err": ""
}
```
### 错误
- `404` LLM 配置不存在
---
## 创建 LLM 配置
```
POST /api/n9e/ai-llm-configs
```
### 请求体
```json
{
"name": "gpt-4o",
"description": "OpenAI GPT-4o",
"api_type": "openai",
"api_url": "https://api.openai.com",
"api_key": "sk-xxx",
"model": "gpt-4o",
"extra_config": {
"timeout_seconds": 60,
"temperature": 0.7,
"max_tokens": 4096,
"custom_headers": {
"X-Custom": "value"
}
},
"enabled": 1
}
```
### 校验规则
- `name``api_type``api_url``api_key``model` 均为必填
### 响应
```json
{
"dat": 1,
"err": ""
}
```
返回新创建的配置 ID。
---
## 更新 LLM 配置
```
PUT /api/n9e/ai-llm-config/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | LLM 配置 ID |
### 请求体
同创建接口。**注意:如果 `api_key` 为空,则保留原值不更新。**
### 校验规则
同创建接口。
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` LLM 配置不存在
---
## 删除 LLM 配置
```
DELETE /api/n9e/ai-llm-config/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | LLM 配置 ID |
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` LLM 配置不存在
---
## 测试 LLM 连接
无需先创建配置,直接传入连接参数进行连通性测试。
```
POST /api/n9e/ai-llm-config/test
```
### 请求体
```json
{
"api_type": "openai",
"api_url": "https://api.openai.com",
"api_key": "sk-xxx",
"model": "gpt-4o",
"extra_config": {
"timeout_seconds": 30,
"skip_tls_verify": false,
"proxy": "",
"custom_headers": {}
}
}
```
### 校验规则
- `api_type``api_url``api_key``model` 均为必填
### 测试行为
根据 `api_type` 向对应的 API 发送一个最小请求("Hi"max_tokens=5
| api_type | 请求地址 | 认证方式 |
|----------|---------|---------|
| openai | `{api_url}/chat/completions` | `Authorization: Bearer {api_key}` |
| claude | `{api_url}/v1/messages` | `x-api-key: {api_key}` |
| gemini | `{api_url}/v1beta/models/{model}:generateContent?key={api_key}` | URL 参数 |
### 响应
成功:
```json
{
"dat": {
"success": true,
"duration_ms": 856
},
"err": ""
}
```
失败:
```json
{
"dat": {
"success": false,
"duration_ms": 5000
},
"err": "HTTP 401: {\"error\": \"invalid api key\"}"
}
```

292
doc/api/ai-mcp-server.md Normal file
View File

@@ -0,0 +1,292 @@
# MCP Server API
所有接口需要管理员权限(`auth` + `admin`)。
## 数据结构
### MCPServer
| 字段 | 类型 | 必填 | 说明 |
|------|------|------|------|
| id | int64 | - | 主键,自增 |
| name | string | 是 | 名称 |
| url | string | 是 | MCP Server 地址 |
| headers | map[string]string | 否 | 自定义 HTTP 请求头,用于认证等 |
| description | string | 否 | 描述 |
| enabled | int | 否 | 是否启用,默认 1 |
| created_at | int64 | - | 创建时间Unix 时间戳) |
| created_by | string | - | 创建人 |
| updated_at | int64 | - | 更新时间Unix 时间戳) |
| updated_by | string | - | 更新人 |
---
## 获取 MCP Server 列表
```
GET /api/n9e/mcp-servers
```
### 响应
```json
{
"dat": [
{
"id": 1,
"name": "my-mcp-server",
"url": "https://mcp.example.com/sse",
"headers": {
"Authorization": "Bearer xxx"
},
"description": "示例 MCP Server",
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
}
],
"err": ""
}
```
---
## 获取 MCP Server 详情
```
GET /api/n9e/mcp-server/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | MCP Server ID |
### 响应
```json
{
"dat": {
"id": 1,
"name": "my-mcp-server",
"url": "https://mcp.example.com/sse",
"headers": {
"Authorization": "Bearer xxx"
},
"description": "示例 MCP Server",
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
},
"err": ""
}
```
### 错误
- `404` MCP Server 不存在
---
## 创建 MCP Server
```
POST /api/n9e/mcp-servers
```
### 请求体
```json
{
"name": "my-mcp-server",
"url": "https://mcp.example.com/sse",
"headers": {
"Authorization": "Bearer xxx"
},
"description": "示例 MCP Server",
"enabled": 1
}
```
### 校验规则
- `name` 必填(自动 trim
- `url` 必填(自动 trim
### 响应
```json
{
"dat": 1,
"err": ""
}
```
返回新创建的 MCP Server ID。
---
## 更新 MCP Server
```
PUT /api/n9e/mcp-server/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | MCP Server ID |
### 请求体
同创建接口。
### 可更新字段
`name``url``headers``description``enabled`
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` MCP Server 不存在
---
## 删除 MCP Server
```
DELETE /api/n9e/mcp-server/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | MCP Server ID |
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` MCP Server 不存在
---
## 测试 MCP Server 连接
无需先创建,直接传入连接参数进行连通性测试。通过 MCP 协议初始化握手并获取工具列表来验证连通性。
```
POST /api/n9e/mcp-server/test
```
### 请求体
```json
{
"url": "https://mcp.example.com/sse",
"headers": {
"Authorization": "Bearer xxx"
}
}
```
### 校验规则
- `url` 必填
### 测试行为
1. 发送 `initialize` 请求(协议版本 `2024-11-05`
2. 发送 `notifications/initialized` 通知
3. 发送 `tools/list` 请求获取工具列表
支持 JSON 和 SSE`text/event-stream`)两种响应格式。
### 响应
成功:
```json
{
"dat": {
"success": true,
"duration_ms": 320,
"tool_count": 5
},
"err": ""
}
```
失败:
```json
{
"dat": {
"success": false,
"duration_ms": 5000,
"tool_count": 0,
"error": "initialize: HTTP 403: Forbidden"
},
"err": ""
}
```
---
## 获取 MCP Server 工具列表
获取已创建的 MCP Server 提供的工具列表。
```
GET /api/n9e/mcp-server/:id/tools
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | MCP Server ID |
### 响应
```json
{
"dat": [
{
"name": "query_database",
"description": "Execute a SQL query against the database"
},
{
"name": "search_logs",
"description": "Search through application logs"
}
],
"err": ""
}
```
### 错误
- `404` MCP Server 不存在

409
doc/api/ai-skill.md Normal file
View File

@@ -0,0 +1,409 @@
# AI Skill API
所有接口需要管理员权限(`auth` + `admin`)。
## 数据结构
### AISkill
| 字段 | 类型 | 必填 | 说明 |
|------|------|------|------|
| id | int64 | - | 主键,自增 |
| name | string | 是 | Skill 名称 |
| description | string | 否 | 描述,建议说明用途和触发场景 |
| instructions | string | 是 | 提示词指令,支持 Markdown |
| license | string | 否 | 许可证,如 `MIT``Apache-2.0` |
| compatibility | string | 否 | 兼容性说明,如环境依赖、网络需求等 |
| metadata | map[string]string | 否 | 扩展元数据,如 `{"author": "org", "version": "1.0"}` |
| allowed_tools | string | 否 | 预授权工具列表,空格分隔,如 `Bash(git:*) Read` |
| enabled | int | 否 | 是否启用,默认 1 |
| created_at | int64 | - | 创建时间Unix 时间戳) |
| created_by | string | - | 创建人 |
| updated_at | int64 | - | 更新时间Unix 时间戳) |
| updated_by | string | - | 更新人 |
| files | AISkillFile[] | - | 关联的资源文件列表(仅详情接口返回,不含 content |
> `license`、`compatibility`、`metadata`、`allowed_tools` 字段参考 [Agent Skills Specification](https://agentskills.io/specification)。
### AISkillFile
| 字段 | 类型 | 说明 |
|------|------|------|
| id | int64 | 主键,自增 |
| skill_id | int64 | 关联的 Skill ID |
| name | string | 文件名 |
| content | string | 文件内容(仅文件详情接口返回) |
| size | int64 | 文件大小(字节),创建时自动计算 |
| created_at | int64 | 创建时间Unix 时间戳) |
| created_by | string | 创建人 |
---
## 获取 Skill 列表
```
GET /api/n9e/ai-skills
```
### 查询参数
| 参数 | 类型 | 说明 |
|------|------|------|
| search | string | 可选,按 name 或 description 模糊搜索 |
### 响应
```json
{
"dat": [
{
"id": 1,
"name": "query-generator",
"description": "生成 PromQL/SQL 查询语句",
"instructions": "# Query Generator\n...",
"license": "Apache-2.0",
"compatibility": "Requires network access",
"metadata": {
"author": "nightingale",
"version": "1.0"
},
"allowed_tools": "Bash(git:*) Read",
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin"
}
],
"err": ""
}
```
> 列表接口不返回 `files` 字段。
---
## 获取 Skill 详情
```
GET /api/n9e/ai-skill/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Skill ID |
### 响应
返回 Skill 完整信息,并包含关联的资源文件列表(不含文件 content
```json
{
"dat": {
"id": 1,
"name": "query-generator",
"description": "生成 PromQL/SQL 查询语句",
"instructions": "# Query Generator\n...",
"license": "Apache-2.0",
"compatibility": "Requires network access",
"metadata": {
"author": "nightingale",
"version": "1.0"
},
"allowed_tools": "Bash(git:*) Read",
"enabled": 1,
"created_at": 1710000000,
"created_by": "admin",
"updated_at": 1710000000,
"updated_by": "admin",
"files": [
{
"id": 10,
"skill_id": 1,
"name": "reference.md",
"size": 2048,
"created_at": 1710000000,
"created_by": "admin"
}
]
},
"err": ""
}
```
### 错误
- `404` Skill 不存在
---
## 创建 Skill
```
POST /api/n9e/ai-skills
```
### 请求体
```json
{
"name": "query-generator",
"description": "生成 PromQL/SQL 查询语句",
"instructions": "# Query Generator\n根据用户输入生成查询语句...",
"license": "Apache-2.0",
"compatibility": "Requires network access",
"metadata": {
"author": "nightingale",
"version": "1.0"
},
"allowed_tools": "Bash(git:*) Read",
"enabled": 1
}
```
### 校验规则
- `name` 必填(自动 trim
- `instructions` 必填(自动 trim
### 响应
```json
{
"dat": 1,
"err": ""
}
```
返回新创建的 Skill ID。
---
## 更新 Skill
```
PUT /api/n9e/ai-skill/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Skill ID |
### 请求体
同创建接口。
### 可更新字段
`name``description``instructions``license``compatibility``metadata``allowed_tools``enabled`
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` Skill 不存在
---
## 删除 Skill
删除 Skill 时会级联删除关联的所有资源文件。
```
DELETE /api/n9e/ai-skill/:id
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Skill ID |
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` Skill 不存在
---
## 导入 Skill
`.md` 文件导入 Skill支持 YAML frontmatter 格式。
```
POST /api/n9e/ai-skills/import
```
### 请求格式
`multipart/form-data`
| 字段 | 类型 | 说明 |
|------|------|------|
| file | file | `.md` 文件 |
### 文件格式
支持标准的 YAML frontmatter + Markdown body
```markdown
---
name: my-skill
description: 技能描述
license: MIT
compatibility: Requires git, docker
metadata:
author: my-org
version: "1.0"
allowed-tools: Bash(git:*) Read
---
# Skill 指令内容
这里是 instructions 部分...
```
- 如果文件包含有效的 frontmatter则从中提取 `name``description``license``compatibility``metadata``allowed-tools`
- 如果没有 frontmatter则以文件名作为 `name`,全部内容作为 `instructions`
### 响应
```json
{
"dat": 1,
"err": ""
}
```
返回新创建的 Skill ID。
### 错误
- `400` 仅支持 `.md` 文件
---
## 上传 Skill 资源文件
```
POST /api/n9e/ai-skill/:id/files
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| id | int64 | Skill ID |
### 请求格式
`multipart/form-data`
| 字段 | 类型 | 说明 |
|------|------|------|
| file | file | 资源文件 |
### 限制
- 允许的文件类型:`.md``.txt``.json``.yaml``.yml``.csv`
- 单文件最大 2MB
- 每个 Skill 最多 20 个资源文件
### 响应
```json
{
"dat": 10,
"err": ""
}
```
返回新创建的文件 ID。
### 错误
- `404` Skill 不存在
- `400` 文件类型不支持 / 文件超过 2MB / 文件数量超过 20
---
## 获取资源文件详情
获取单个资源文件的完整内容。
```
GET /api/n9e/ai-skill-file/:fileId
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| fileId | int64 | 文件 ID |
### 响应
```json
{
"dat": {
"id": 10,
"skill_id": 1,
"name": "reference.md",
"content": "# Reference\n文件完整内容...",
"size": 2048,
"created_at": 1710000000,
"created_by": "admin"
},
"err": ""
}
```
### 错误
- `404` 文件不存在
---
## 删除资源文件
```
DELETE /api/n9e/ai-skill-file/:fileId
```
### 路径参数
| 参数 | 类型 | 说明 |
|------|------|------|
| fileId | int64 | 文件 ID |
### 响应
```json
{
"dat": "",
"err": ""
}
```
### 错误
- `404` 文件不存在

View File

@@ -201,7 +201,7 @@ func (d *Doris) NewWriteConn(ctx context.Context, database string) (*sql.DB, err
func (d *Doris) createTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc) {
timeout := d.Timeout
if timeout == 0 {
timeout = 60
timeout = 60000
}
return context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)
}

View File

@@ -0,0 +1,691 @@
---
name: ops-troubleshooting
description: This skill should be used when the user asks to "troubleshoot", "diagnose", "debug alert", "investigate incident", "故障定位", "告警排查", "问题诊断", "排障", "查告警", "分析告警", "根因分析", "灭火图", "北极星", "tracing", "链路追踪", "日志分析", or discusses monitoring/alerting/observability issues in Flashcat platform (n9e-plus, fc-insight).
version: 5.0.0
---
# Flashcat 故障定位专家 (SRE Troubleshooting Expert)
你是一位拥有 10 年以上经验的资深 SRE专门负责使用 Flashcat 可观测性平台进行故障定位和根因分析。
---
## 核心原则
1. **证据链驱动**: 每个推断都要有页面数据支撑
2. **按需查询**: 根据分析需要逐步查询,不盲目拉取所有数据
3. **最小权限**: 只查询必要的数据,避免敏感信息泄露
4. **时间线优先**: 关注故障发生的时序关系,寻找根因
5. **定位直接原因**: 不追求 100% 覆盖根因,聚焦于定位直接原因和止损依据
6. **聚焦故障的时间**:如果跳到其他页面,没找到线索,重新回到故障链接的页面分析
---
## 数据获取方式Playwright 访问页面
使用 Playwright MCP 工具直接访问 Flashcat Web 页面,可视化查看数据。
**适用场景**
- 查看灭火图全局视图,识别飘红/飘黄卡片
- 浏览北极星业务线指标
- 查看仪表盘和图表趋势
- 分析 Trace 瀑布图
- 需要截图作为证据
- 探索性分析,不确定需要哪些数据
**使用方法**
```
# 1. 启动浏览器(如果尚未启动)
mcp__playwright__browser_navigate to the target URL
# 2. 常用页面导航
- 灭火图首页: {BASE_URL}/firemap
- 灭火图卡片详情: {BASE_URL}/firemap/function/{card_id}
- 北极星: {BASE_URL}/polaris
- 链路追踪: {BASE_URL}/trace
- 活跃告警: {BASE_URL}/alert-cur-events
- 事件墙: {BASE_URL}/events
# 3. 截图保存证据
mcp__playwright__browser_screenshot
# 4. 页面交互
- 点击卡片查看详情
- 切换时间范围
- 展开/折叠分组
- 下钻到关联数据
```
**Playwright 操作示例**
```
# 访问灭火图,查看异常卡片
1. browser_navigate: https://demo.flashcat.cloud
2. browser_screenshot: 保存当前状态
3. browser_click: 点击飘红卡片进入详情
4. browser_screenshot: 保存卡片详情
5. browser_click: 点击"关联链路"下钻到 Trace
```
---
## 灭火图核心理念
灭火图是 Flashcat 平台的核心创新,理解其设计理念对故障排查至关重要。
### 定义
> 灭火图:以服务/模块/组件/基础设施等为维度,以聚合视角实时度量特定维度的可用性。
### 推荐层级架构C端服务
```
┌─────────────────────────────────────────────────────────────┐
│ Layer 1: 接口层 │
│ - 核心接口的成功率、延迟、流量 │
│ - 如:下单接口、支付接口、查询接口 │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Layer 2: 微服务层 │
│ - 各微服务的 RED 指标 │
│ - 如:订单服务、用户服务、库存服务 │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Layer 3: 组件层 │
│ - 中间件、缓存、消息队列等 │
│ - 如Redis、MySQL、Kafka │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Layer 4: 基础设施层 │
│ - 主机、容器、网络 │
│ - 如CPU、内存、磁盘、网络带宽 │
└─────────────────────────────────────────────────────────────┘
```
### 核心指标维度 (USE/RED)
灭火图卡片通常基于以下指标自动设定阈值:
- **时延 (Duration)**: P50/P90/P99 延迟
- **流量 (Rate)**: 请求量/QPS
- **错误 (Error)**: 错误率/成功率
- **饱和度 (Saturation)**: 资源使用率
---
## 排查流程决策树
```
┌─────────────────────────────────────────────────────────────┐
│ 故障排查入口 │
└─────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Step 1: 明确问题 │
│ 用户描述了什么? │
├─────────────────────────────────────────────────────────────┤
│ A. 具体告警 ID → 走【告警分析流程】 │
│ B. 服务/接口名 → 走【服务分析流程】 │
│ C. 业务异常 → 走【灭火图分析流程】 │
│ D. 时间段异常 → 走【事件墙分析流程】 │
│ E. 不确定 → 走【全局概览流程】 │
└─────────────────────────────────────────────────────────────┘
```
### 常见问题快速定位表
| 问题类型 | 首选流程 | 访问页面 |
| -------- | ---- | --------------------- |
| 收到告警通知 | 流程 A | /alert-cur-events |
| 服务响应慢 | 流程 B | /trace |
| 接口报错 | 流程 B | /trace, /log/explorer |
| 业务指标异常 | 流程 C | /firemap, /polaris |
| 灭火图飘红/飘黄 | 流程 C | /firemap |
| 发布后出问题 | 流程 D | /events |
| 不确定哪里有问题 | 流程 E | /firemap (全局概览) |
---
## 平台架构
Flashcat 可观测性平台包含以下核心组件:
| 组件 | 说明 | 默认端口 |
| -------------- | --------------------------- | ----- |
| **n9e-plus** | 夜莺增强版,告警管理、指标查询、日志查询 | 17000 |
| **fc-insight** | 可观测分析平台灭火图、北极星、Tracing、事件墙 | 8080 |
## 核心能力
### n9e-plus 能力
1. **告警事件管理** - 当前/历史告警查询、确认、静默
2. **指标数据查询** - Prometheus、ES、SLS、CLS、TLS 等多数据源
3. **日志分析** - 多种日志源的统一查询
4. **采集配置** - 采集任务管理、网络设备监控
### fc-insight 能力
1. **灭火图 (Firemap)** - 业务健康度可视化、SLO 管理
2. **北极星 (Northstar)** - 业务指标管理、异常检测、告警规则
3. **事件墙 (Event Wall)** - 告警/变更事件聚合、关联分析
4. **Tracing** - 分布式链路追踪、服务拓扑
5. **日志分析** - 多维分析、特征提取
---
## 页面导航与功能说明
### 北极星 (Polaris) - `/polaris`
业务指标的核心观测平台,用于监控业务健康状态。
| 页面路径 | 功能说明 |
| ------------------------ | ------------------- |
| `/polaris` | 北极星首页,业务线指标总览 |
| `/polaris/indicator` | 指标池,所有北极星指标的管理列表 |
| `/polaris/statuspage` | 状态概览页,业务状态汇总展示 |
| `/polaris/stability` | 稳定性看板SLO/SLA 可用性统计 |
| `/polaris/screen` | 大屏管理,北极星大屏配置 |
| `/polaris/{business_id}` | 业务线详情页,查看具体业务线的指标 |
**关键功能**:
- 业务线管理:按业务维度组织指标
- 指标异常检测:动态阈值、同环比、智能基线
- 告警规则配置:基于指标配置告警策略
- 稳定性看板SLO 目标设定和达成率统计
### 灭火图 (Firemap) - `/firemap`
层级化的业务健康度可视化,支持从全局到细节的下钻分析。
| 页面路径 | 功能说明 |
| ---------------------------- | ------------------ |
| `/firemap` | 灭火图首页,系统级健康度总览 |
| `/firemap/card-pool` | 卡片池,所有卡片的管理列表 |
| `/firemap/statuspage` | 状态概览页,全局状态汇总 |
| `/firemap/slo` | SLO报表卡片级别的SLO统计 |
| `/firemap/screens` | 拓扑画布,自定义拓扑图配置 |
| `/firemap/{level}/{card_id}` | 卡片详情页,查看具体卡片的指标和下钻 |
**层级结构**:
- **首页层级**: 系统级卡片,如"订单系统"、"支付系统"
- **分组**: 系统内的子模块分组
- **详情卡片**: 具体的观测对象(接口、实例、服务等)
**关键功能**:
- 飘红/飘黄告警:视觉化异常标识
- 卡片快照:查看卡片在特定时间点的状态
- 下钻关联从卡片跳转到日志、Trace、仪表盘
- 规则管理:自动创建和管理卡片的规则
### 事件墙 (Event Wall) - `/events`
告警和变更事件的聚合时间线,用于故障时间线分析。
| 页面路径 | 功能说明 |
| ------------ | ----------------- |
| `/events` | 事件墙首页(新版),事件时间线展示 |
| `/events-v1` | 事件墙(旧版) |
**事件类型**:
- **告警事件**: 来自各告警源的告警
- **变更事件**: 发布、配置变更、数据库变更等
- **Incident**: 故障事件聚合
**关键功能**:
- 时间线视图:按时间顺序展示事件
- 事件聚合:相似事件自动聚合
- 关联分析:分析告警与变更的时间关系
### 链路分析 (Tracing) - `/trace`
分布式链路追踪,用于服务调用链分析。
| 页面路径 | 功能说明 |
| ----------------- | ---------------- |
| `/trace` | 链路检索,按条件查询 Trace |
| `/trace/topology` | 拓扑分析,服务依赖拓扑图 |
| `/trace/services` | 应用列表,服务 RED 指标概览 |
| `/trace/database` | 数据库分析DB 调用分析 |
**关键功能**:
- Trace 检索:按 TraceID、服务、时间范围查询
- 瀑布图:展示 Span 调用链
- 服务拓扑:可视化服务依赖关系
- RED 指标Rate请求量、Error错误率、Duration延迟
### 日志分析 (Multi-Dimension) - `/multi-dimension`
日志的多维度特征分析,支持异常特征识别。
| 页面路径 | 功能说明 |
| --------------------- | -------------- |
| `/log/explorer` | 日志检索,原始日志查询 |
| `/multi-dimension` | 特征分析,日志多维度统计 |
| `/log-extraction` | 日志转换,日志字段提取配置 |
| `/tracing-multiquery` | 联合查询,跨日志源的串联查询 |
**关键功能**:
- 主题管理:日志源的字段配置
- 特征分析:按维度统计日志特征
- Bubble Up异常根因分析
- 联合查询:跨多个日志源的 Trace 串联
### 告警管理 - `/alert-*`
告警规则和事件的管理。
| 页面路径 | 功能说明 |
| ------------------- | -------------- |
| `/alert-rules` | 告警规则列表 |
| `/alert-mutes` | 屏蔽规则列表 |
| `/alert-subscribes` | 订阅规则列表 |
| `/alert-cur-events` | 活跃告警(当前未恢复的告警) |
| `/alert-his-events` | 历史告警(所有告警记录) |
### 仪表盘 - `/dashboards`
监控仪表盘的管理和展示。
| 页面路径 | 功能说明 |
| ------------------- | ------ |
| `/dashboards` | 仪表盘列表 |
| `/dashboards/{id}` | 仪表盘详情页 |
| `/reports` | 报表管理 |
| `/template/screens` | 模板大屏管理 |
### 基础设施 - `/targets`, `/collects`
监控对象和采集配置管理。
| 页面路径 | 功能说明 |
| ----------------------------- | ----------- |
| `/targets` | 机器列表,监控对象管理 |
| `/collects` | 数据采集,采集任务配置 |
| `/settings/source/timeseries` | 数据集成,数据源管理 |
### 容器平台 - `/kubernetes`
| 页面路径 | 功能说明 |
| ------------- | --------------- |
| `/kubernetes` | Kubernetes 集群监控 |
### 空间管理 - `/space`
| 页面路径 | 功能说明 |
| -------- | --------------- |
| `/space` | 空间管理,工作空间的创建和配置 |
**空间概念**: 空间是 Flashcat 的多租户隔离单位,不同空间的数据相互隔离。灭火图、北极星等模块都基于空间组织数据。
---
## 故障定位流程详解
### 流程 A: 告警分析流程
**入口条件**: 用户提供了具体的告警 ID 或告警名称
**操作步骤**:
1. 访问 `/alert-cur-events``/alert-his-events` 查看告警详情
2. 查看告警规则、触发条件、标签
3. 点击关联告警查看同一 target 的其他告警
4. 查看告警相关的指标曲线
5. 检查是否有关联的灭火图卡片或变更事件
### 流程 B: 服务分析流程
**入口条件**: 用户提到具体服务名或接口
**操作步骤**:
1. 访问 `/trace/services` 查看服务 RED 指标
2. 访问 `/trace` 搜索错误或慢 Trace
3. 访问 `/trace/topology` 查看上下游依赖
4. 访问 `/log/explorer` 查询 ERROR 级别日志
### 流程 C: 灭火图分析流程
**入口条件**: 用户提到业务健康度、灭火图、卡片飘红
**操作步骤**:
1. 访问 `/firemap` 查看全局快照,找出异常卡片
2. 点击异常卡片查看详情和历史曲线
3. 检查卡片配置的告警规则
4. 点击下钻关联到日志/Trace
### 流程 D: 事件墙分析流程
**入口条件**: 用户提到某个时间段有问题,需要分析时间线
**操作步骤**:
1. 访问 `/events` 查看时间段内的事件时间线
2. 按服务或严重程度筛选事件
3. 分析变更时间与告警时间的关系
### 流程 E: 全局概览流程
**入口条件**: 用户不确定问题在哪,需要先了解整体状况
**操作步骤**:
1. 访问 `/alert-cur-events` 了解当前活跃告警
2. 访问 `/firemap` 查看灭火图健康状态
3. 访问 `/events` 查看最近变更事件
---
## 排查示例
以下是使用 Playwright 进行故障排查的完整示例。
### 场景
用户反馈:灭火图上有卡片飘红,需要定位问题原因。
### Step 1: 访问灭火图首页
**观察要点**
- 识别飘红critical和飘黄warning的卡片
- 记录异常卡片的名称和位置
- 截图保存当前状态
### Step 2: 点击异常卡片进入详情
**观察要点**
- 查看卡片的具体指标值(成功率、延迟等)
- 查看历史趋势曲线
- 识别异常开始的时间点
### Step 3: 下钻到子卡片
**按照灭火图层级逐层下钻**
```
系统级卡片 → 子系统分组 → 具体模块卡片
```
每层都观察哪些卡片是红色/黄色的,逐步缩小范围。
### Step 4: 查看关联数据
在卡片详情页,点击关联的数据源:
```
# 查看关联 Trace
# 查看关联日志
# 查看关联仪表盘
```
### Step 5: 分析下钻
---
### 输出分析结论
**直接原因**:
**证据链**
## **止损建议**:
## Playwright 常用操作速查
| 操作 | 工具调用 | 说明 |
| ---- | ----------------------------- | --------- |
| 打开页面 | `browser_navigate(url)` | 导航到指定 URL |
| 截图 | `browser_screenshot()` | 保存当前页面截图 |
| 点击 | `browser_click(element)` | 点击页面元素 |
| 输入 | `browser_type(element, text)` | 在输入框中输入文本 |
| 等待 | `browser_wait(selector)` | 等待元素出现 |
| 获取文本 | `browser_get_text(selector)` | 获取元素文本内容 |
---
## 分析输出模板
每次排查完成后,按以下格式输出分析报告:
```markdown
## 故障分析报告
### 1. 问题概述
- **问题描述**: [用户原始描述]
- **分析时间**: [开始时间 - 结束时间]
- **影响范围**: [服务/业务/实例]
### 2. 关键发现
#### 2.1 特征分析
- **时间区间**: [开始时间] → [结束时间]
- **筛选条件**: [url.path=/xxx, http.response.status_code=xxx 等]
- **响应码分布**: [状态码] 共 [数量] 次
- **关注维度**: [维度名称]
- **异常 Trace 数**: [数量] 条
#### 2.3 链路分析 (如有)
- **错误 Trace**: [数量]
- **错误类型**: [类型]
- **关键 Span**: [Span 名称]
#### 2.4 日志分析 (如有)
- **ERROR 日志数量**: [数量]
- **关键错误信息**: [错误摘要]
#### 2.5 变更关联
- **可疑变更**: [有/无]
- **变更内容**: [描述]
- **变更时间**: [时间] (告警前 [X] 分钟)
### 3. 根因分析
- **根本原因**: [原因描述]
- **证据链**:
1. [证据1]
2. [证据2]
3. [证据3]
### 4. 建议措施
- **立即行动**: [紧急措施]
- **后续跟进**: [长期改进]
```
---
## 安全注意事项
1. **最小查询**: 只查询必要数据,避免拉取全量数据
2. **输出脱敏**: 报告中不要包含密码、密钥等敏感信息
3. **权限控制**: 确保使用的账号有适当的权限范围
---
## 实战案例:订单提交服务成功率归零排查
以下是一个完整的实战排查案例,展示从问题发现到根因定位的全过程。
### 案例背景
- **问题现象**: 电商系统灭火图卡片飘红,订单提交功能异常
- **问题链接**: `https://demo.flashcat.cloud/firemap/function/706?end=1768480361&spaceId=848859344386&start=1768437161&time=1768475160`
- **平台**: Flashcat Demo 环境
### Step 1: 登录平台,访问问题页面
```
操作: browser_navigate → 登录页面 → 填写凭据 → 点击登录
观察: 成功进入北极星首页
```
### Step 2: 导航到异常灭火图卡片
```
操作: browser_navigate → 异常链接
观察:
- 系统: 电商
- 子系统: 订单子系统 (1/10 异常)
- 异常卡片: "订单提交" - 成功率 0% ❌
- 其他卡片状态正常 (99%~100%)
```
**关键发现**: 只有"订单提交"功能异常,其他功能正常,说明问题范围收敛到该接口。
### Step 3: 进入卡片详情,查看指标趋势
```
操作: browser_click → 订单提交卡片
观察:
- 标签: demo=demo-01
- 成功率图表: 有异常标记 (exclamation-circle)
- 响应码统计: 有异常标记
- 延时指标: 正常
```
**关键发现**: 成功率和响应码都异常,但延时正常,说明不是性能问题,而是功能性错误。
### Step 4: 查看特征分析
```
操作: browser_click → 特征分析 标签
观察:
- 特征维度,出现非 200 状态码,
- 点击非 200 状态码
- 点击日志表格中的 trace 链接
```
### Step 5: 进去 trace 页,分析调用链 Span
```
操作: browser_click → Trace链接 → 查看 Span 瀑布图
观察调用链:
/demoAddOrder (入口) ❌
└── process demoAddOrder
└── call kv interface ❌
└── /kv ❌
└── process KV
└── kv update
└── redis setex ❌ ← 根因所在
```
**关键发现**:
### Step 6: 查看 Span 详情,定位根因
```
操作: browser_click → redis setex span → 展开 Events
观察:
- Attributes:
- command=setex
- key=key1
- value=0
- Events (1):
- name=exception
```
**根因定位**: Redis 连接失败!服务无法连接到 `10.201.0.210:6379`
### 分析报告
```markdown
## 故障分析报告
### 1. 问题概述
- **问题描述**: 电商系统订单提交功能成功率降至 0%
- **分析时间**: 2026-01-15 19:03 ~ 19:07
- **影响范围**: 电商系统 - 订单子系统 - 订单提交接口
### 2. 关键发现
#### 2.1 灭火图分析
- **异常卡片**: 订单提交
- **成功率**: 0%(阈值: 99%
- **其他卡片**: 正常
#### 2.2 链路分析
- **错误 Trace**: 20 条
- **调用链深度**: 4 层7 个 Span
- **错误 Span**: redis setex
#### 2.3 根因 Span 详情
- **Service**: demo-dianshang
- **Operation**: redis setex
- **异常类型**: *net.OpError
- **异常信息**: dial tcp 10.201.0.210:6379: connect: connection refused
### 3. 根因分析
- **根本原因**: Redis 服务不可用
- **证据链**:
1. 灭火图显示订单提交成功率 0%
2. Tracing 显示 error=trueHTTP 200
3. 调用链定位到 redis setex 操作失败
4. 异常信息明确指出连接被拒绝
### 4. 建议措施
- **立即行动**:
- **后续跟进**:
```
### 排查要点总结
| 步骤 | 工具/页面 | 关键观察点 |
| --- | ---------- | --------------------- |
| 1 | 灭火图首页 | 识别飘红卡片,确定异常范围 |
| 2 | 卡片详情 | 查看成功率、响应码、延时趋势 |
| 3 | Tracing 列表 | 查看 error=true 的 Trace |
| 4 | Span 瀑布图 | 找到带 ❌ 标记的最底层 Span |
| 5 | Span 详情 | 展开 Events 查看具体异常信息 |
---
## 其他注意事项
1. **时间范围**: 查询时注意时间范围,避免查询过多数据
2. **Playwright 登录**: 使用 Playwright 前可能需要先登录,登录后 session 会保持
3. **截图证据**: 重要发现务必截图保存作为证据

View File

@@ -0,0 +1,94 @@
---
name: promql-generator
description: 根据自然语言生成 PromQL 查询语句
builtin_tools:
- list_metrics
- get_metric_labels
---
# PromQL 生成专家
你是一个 PromQL 专家,根据用户的自然语言描述生成正确的 PromQL 查询语句。
## 工作流程
1. **理解用户意图**:分析用户想要查询什么(指标、条件、聚合方式、时间范围等)
2. **搜索相关指标**:使用 `list_metrics` 工具搜索可能相关的指标名称
3. **了解指标结构**:使用 `get_metric_labels` 工具获取指标的标签键值,了解可用的过滤维度
4. **构建 PromQL**:基于获取的元数据,构建准确的 PromQL 查询语句
## 可用工具
### list_metrics
搜索 Prometheus 指标名称,支持关键词模糊匹配。
- `keyword`: 搜索关键词(可选)
- `limit`: 返回数量限制默认30
### get_metric_labels
获取指定指标的所有标签键及其可选值。
- `metric`: 指标名称(必填)
## PromQL 语法要点
### 选择器
- 即时向量:`metric_name{label="value"}`
- 范围向量:`metric_name{label="value"}[5m]`
- 标签匹配:`=`(精确), `!=`(不等于), `=~`(正则), `!~`(正则否定)
### 聚合操作
- `sum`, `avg`, `max`, `min`, `count`, `stddev`, `stdvar`
- `topk(n, metric)`, `bottomk(n, metric)`
- `by (label)``without (label)` 进行分组
### 常用函数
- `rate(metric[5m])` - Counter 类型的每秒增长率
- `increase(metric[1h])` - Counter 类型的增量
- `irate(metric[5m])` - 瞬时增长率
- `histogram_quantile(0.95, metric)` - 分位数计算
- `avg_over_time(metric[1h])` - 时间范围内平均值
- `absent(metric)` - 检测指标是否存在
### 运算符
- 算术:`+`, `-`, `*`, `/`, `%`, `^`
- 比较:`==`, `!=`, `>`, `<`, `>=`, `<=`
- 逻辑:`and`, `or`, `unless`
## 输出格式
最终答案必须是 JSON 格式:
```json
{
"query": "生成的 PromQL 语句",
"explanation": "查询逻辑的简要说明"
}
```
## 注意事项
1. **必须使用工具确认**:不要凭空猜测指标名和标签,必须先用工具确认存在
2. **rate() 的使用**`rate()` 只能用于 Counter 类型指标(通常以 `_total`, `_count`, `_sum` 结尾)
3. **时间窗口选择**
- 短时间窗口1m-5m适合实时监控
- 中等窗口15m-1h适合趋势分析
- 长时间窗口1h-24h适合容量规划
4. **找不到指标**:如果搜索不到相关指标,说明原因并建议用户检查指标是否存在或提供更多信息
## 示例
### 用户输入
"查询 CPU 使用率超过 80% 的机器"
### 工作流程
1. 使用 `list_metrics` 搜索 "cpu" 相关指标
2. 找到 `node_cpu_seconds_total`,使用 `get_metric_labels` 查看标签
3. 发现有 `mode`(包含 idle, user, system 等)和 `instance` 标签
4. 构建 PromQL计算 CPU 使用率 = 1 - idle 占比
### 输出
```json
{
"query": "100 - avg by(instance)(rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100 > 80",
"explanation": "计算每台机器的 CPU 使用率100% 减去空闲占比),筛选超过 80% 的实例"
}
```

View File

@@ -0,0 +1,129 @@
---
name: sql-generator
description: 根据自然语言生成 SQL 查询语句(支持 MySQL/Doris/ClickHouse/PostgreSQL
builtin_tools:
- list_databases
- list_tables
- describe_table
---
# SQL 生成专家
你是一个 SQL 专家,根据用户的自然语言描述生成正确的 SQL 查询语句。支持 MySQL、Doris、ClickHouse、PostgreSQL 等数据库。
## 工作流程
1. **理解用户意图**:分析用户想要查询什么数据、什么条件、什么排序
2. **探索数据库结构**:使用 `list_databases` 查看可用数据库
3. **查看表列表**:使用 `list_tables` 查看数据库中的表
4. **了解表结构**:使用 `describe_table` 获取表的字段信息
5. **构建 SQL**:基于表结构构建准确的 SQL 查询语句
## 可用工具
### list_databases
列出数据源中的所有数据库。
- 无参数
### list_tables
列出指定数据库中的所有表。
- `database`: 数据库名(必填)
### describe_table
获取表的字段结构(字段名、类型、注释)。
- `database`: 数据库名(必填)
- `table`: 表名(必填)
## SQL 语法要点
### 基础查询
```sql
SELECT column1, column2 FROM database.table WHERE condition;
```
### 聚合函数
- `COUNT(*)`, `COUNT(DISTINCT column)`
- `SUM(column)`, `AVG(column)`
- `MAX(column)`, `MIN(column)`
### 分组和排序
```sql
SELECT column, COUNT(*) as cnt
FROM table
GROUP BY column
HAVING cnt > 10
ORDER BY cnt DESC
LIMIT 100;
```
### 时间处理
- MySQL: `DATE(column)`, `DATE_SUB(NOW(), INTERVAL 7 DAY)`
- ClickHouse: `toDate(column)`, `now() - INTERVAL 7 DAY`
- Doris: `DATE(column)`, `DATE_SUB(NOW(), INTERVAL 7 DAY)`
### 连接查询
```sql
SELECT a.*, b.name
FROM table_a a
LEFT JOIN table_b b ON a.id = b.a_id;
```
## 不同数据库的差异
### MySQL
- 字符串连接:`CONCAT(a, b)`
- 分页:`LIMIT offset, count``LIMIT count OFFSET offset`
### ClickHouse
- 字符串连接:`concat(a, b)`
- 分页:`LIMIT count OFFSET offset`
- 近似去重:`uniqExact(column)`
- 时间函数:`toStartOfHour()`, `toStartOfDay()`
### Doris
- 类似 MySQL 语法
- 支持 `LIMIT offset, count`
### PostgreSQL
- 字符串连接:`a || b``CONCAT(a, b)`
- 分页:`LIMIT count OFFSET offset`
- 类型转换:`column::type`
## 输出格式
最终答案必须是 JSON 格式:
```json
{
"query": "生成的 SQL 语句",
"explanation": "查询逻辑的简要说明"
}
```
## 注意事项
1. **必须使用工具确认**:不要凭空猜测表名和字段名,必须先用工具确认存在
2. **完整表名**:使用 `database.table` 格式指定表名
3. **大表查询**:对于大表,建议加上 `LIMIT` 限制返回行数
4. **时间过滤**:有时间字段时优先使用时间条件过滤,提高查询效率
5. **找不到表**:如果找不到相关表,说明原因并建议用户检查表是否存在或提供更多信息
6. **SQL 注入**:生成的 SQL 应该使用参数化查询的思路,不要拼接用户输入
## 示例
### 用户输入
"查询最近7天每天的订单金额"
### 工作流程
1. 使用 `list_databases` 找到业务数据库
2. 使用 `list_tables` 找到订单表
3. 使用 `describe_table` 查看订单表结构,找到金额字段和时间字段
4. 构建 SQL
### 输出
```json
{
"query": "SELECT DATE(created_at) as date, SUM(amount) as total_amount FROM business.orders WHERE created_at >= DATE_SUB(CURDATE(), INTERVAL 7 DAY) GROUP BY DATE(created_at) ORDER BY date",
"explanation": "按天分组统计最近7天的订单金额总和按日期排序"
}
```

8
go.mod
View File

@@ -55,7 +55,7 @@ require (
github.com/tidwall/gjson v1.14.2
github.com/toolkits/pkg v1.3.8
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/oauth2 v0.27.0
golang.org/x/oauth2 v0.34.0
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gopkg.in/yaml.v2 v2.4.0
gorm.io/driver/clickhouse v0.6.1
@@ -93,6 +93,7 @@ require (
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/glebarez/go-sqlite v1.21.2 // indirect
github.com/google/jsonschema-go v0.4.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
@@ -103,15 +104,18 @@ require (
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/modelcontextprotocol/go-sdk v1.4.0 // indirect
github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee // indirect
github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 // indirect
github.com/pingcap/log v1.1.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/segmentio/encoding v0.5.3 // indirect
github.com/tjfoc/gmsm v1.4.1 // indirect
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
@@ -171,7 +175,7 @@ require (
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/image v0.18.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.31.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect

13
go.sum
View File

@@ -236,7 +236,10 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA=
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -335,6 +338,8 @@ github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modelcontextprotocol/go-sdk v1.4.0 h1:u0kr8lbJc1oBcawK7Df+/ajNMpIDFE41OEPxdeTLOn8=
github.com/modelcontextprotocol/go-sdk v1.4.0/go.mod h1:Nxc2n+n/GdCebUaqCOhTetptS17SXXNu9IfNTaLDi1E=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -420,6 +425,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/encoding v0.5.3 h1:OjMgICtcSFuNvQCdwqMCv9Tg7lEOXGwm1J5RPQccx6w=
github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -469,6 +476,8 @@ github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tz
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -571,6 +580,8 @@ golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -613,6 +624,8 @@ golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

96
models/ai_agent.go Normal file
View File

@@ -0,0 +1,96 @@
package models
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type AIAgent struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Name string `json:"name"`
Description string `json:"description"`
UseCase string `json:"use_case"`
LLMConfigId int64 `json:"llm_config_id"`
SkillIds []int64 `json:"skill_ids" gorm:"serializer:json"`
MCPServerIds []int64 `json:"mcp_server_ids" gorm:"serializer:json"`
Enabled int `json:"enabled"`
CreatedAt int64 `json:"created_at"`
CreatedBy string `json:"created_by"`
UpdatedAt int64 `json:"updated_at"`
UpdatedBy string `json:"updated_by"`
// Runtime: resolved from LLMConfigId, not stored in DB
LLMConfig *AILLMConfig `json:"llm_config,omitempty" gorm:"-"`
}
func (a *AIAgent) TableName() string {
return "ai_agent"
}
func (a *AIAgent) Verify() error {
if a.Name == "" {
return fmt.Errorf("name is required")
}
if a.LLMConfigId <= 0 {
return fmt.Errorf("llm_config_id is required")
}
return nil
}
func AIAgentGets(c *ctx.Context) ([]*AIAgent, error) {
var lst []*AIAgent
err := DB(c).Order("id").Find(&lst).Error
return lst, err
}
func AIAgentGet(c *ctx.Context, where string, args ...interface{}) (*AIAgent, error) {
var obj AIAgent
err := DB(c).Where(where, args...).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func AIAgentGetById(c *ctx.Context, id int64) (*AIAgent, error) {
return AIAgentGet(c, "id = ?", id)
}
func (a *AIAgent) Create(c *ctx.Context, username string) error {
now := time.Now().Unix()
a.CreatedAt = now
a.UpdatedAt = now
a.CreatedBy = username
a.UpdatedBy = username
if a.Enabled == 0 {
a.Enabled = 1
}
return Insert(c, a)
}
func (a *AIAgent) Update(c *ctx.Context, username string, data AIAgent) error {
data.UpdatedAt = time.Now().Unix()
data.UpdatedBy = username
return DB(c).Model(a).Select("name", "description", "use_case", "llm_config_id",
"skill_ids", "mcp_server_ids",
"enabled", "updated_at", "updated_by").Updates(data).Error
}
func (a *AIAgent) Delete(c *ctx.Context) error {
return DB(c).Where("id = ?", a.Id).Delete(&AIAgent{}).Error
}
func AIAgentGetByUseCase(c *ctx.Context, useCase string) (*AIAgent, error) {
return AIAgentGet(c, "use_case = ? and enabled = 1", useCase)
}
func AIAgentStatistics(c *ctx.Context) (*Statistics, error) {
return StatisticsGet(c, &AIAgent{})
}

104
models/ai_conversation.go Normal file
View File

@@ -0,0 +1,104 @@
package models
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type AIConversation struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Title string `json:"title"`
UserId int64 `json:"user_id"`
Context string `json:"context" gorm:"type:text"` // JSON, page-specific context (datasource, alert rule, etc.)
CreatedAt int64 `json:"created_at"`
UpdatedAt int64 `json:"updated_at"`
}
func (c *AIConversation) TableName() string {
return "ai_conversation"
}
func AIConversationGetsByUserId(c *ctx.Context, userId int64) ([]*AIConversation, error) {
var lst []*AIConversation
err := DB(c).Where("user_id = ?", userId).Order("updated_at desc").Find(&lst).Error
return lst, err
}
func AIConversationGetById(c *ctx.Context, id int64) (*AIConversation, error) {
var obj AIConversation
err := DB(c).Where("id = ?", id).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func (a *AIConversation) Create(c *ctx.Context) error {
now := time.Now().Unix()
a.CreatedAt = now
a.UpdatedAt = now
return Insert(c, a)
}
func (a *AIConversation) Update(c *ctx.Context, title string) error {
a.Title = title
a.UpdatedAt = time.Now().Unix()
return DB(c).Model(a).Select("title", "updated_at").Updates(a).Error
}
func (a *AIConversation) UpdateTime(c *ctx.Context) error {
a.UpdatedAt = time.Now().Unix()
return DB(c).Model(a).Select("updated_at").Updates(a).Error
}
func (a *AIConversation) Delete(c *ctx.Context) error {
// Cascade delete messages
if err := DB(c).Where("conversation_id = ?", a.Id).Delete(&AIConversationMessage{}).Error; err != nil {
return err
}
return DB(c).Where("id = ?", a.Id).Delete(&AIConversation{}).Error
}
func (a *AIConversation) Verify() error {
if a.UserId == 0 {
return fmt.Errorf("user_id is required")
}
return nil
}
type AIConversationMessage struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
ConversationId int64 `json:"conversation_id"`
Role string `json:"role"`
Content string `json:"content" gorm:"type:text"`
Thinking string `json:"thinking" gorm:"type:text"`
ToolCalls string `json:"tool_calls" gorm:"type:text"`
Query string `json:"query" gorm:"type:text"`
Explanation string `json:"explanation" gorm:"type:text"`
Error string `json:"error" gorm:"type:text"`
CreatedAt int64 `json:"created_at"`
}
func (m *AIConversationMessage) TableName() string {
return "ai_conversation_message"
}
func AIConversationMessageGetsByConversationId(c *ctx.Context, conversationId int64) ([]*AIConversationMessage, error) {
var lst []*AIConversationMessage
err := DB(c).Where("conversation_id = ?", conversationId).Order("id asc").Find(&lst).Error
return lst, err
}
func (m *AIConversationMessage) Create(c *ctx.Context) error {
m.CreatedAt = time.Now().Unix()
return Insert(c, m)
}
func AIConversationMessageDeleteByConversationId(c *ctx.Context, conversationId int64) error {
return DB(c).Where("conversation_id = ?", conversationId).Delete(&AIConversationMessage{}).Error
}

115
models/ai_llm_config.go Normal file
View File

@@ -0,0 +1,115 @@
package models
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type LLMExtraConfig struct {
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
SkipTLSVerify bool `json:"skip_tls_verify,omitempty"`
Proxy string `json:"proxy,omitempty"`
CustomHeaders map[string]string `json:"custom_headers,omitempty"`
CustomParams map[string]any `json:"custom_params,omitempty"`
Temperature *float64 `json:"temperature,omitempty"`
MaxTokens *int `json:"max_tokens,omitempty"`
ContextLength *int `json:"context_length,omitempty"`
}
type AILLMConfig struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Name string `json:"name"`
Description string `json:"description"`
APIType string `json:"api_type"`
APIURL string `json:"api_url"`
APIKey string `json:"api_key"`
Model string `json:"model"`
ExtraConfig LLMExtraConfig `json:"extra_config" gorm:"serializer:json"`
Enabled int `json:"enabled"`
CreatedAt int64 `json:"created_at"`
CreatedBy string `json:"created_by"`
UpdatedAt int64 `json:"updated_at"`
UpdatedBy string `json:"updated_by"`
}
func (a *AILLMConfig) TableName() string {
return "ai_llm_config"
}
func (a *AILLMConfig) Verify() error {
if a.Name == "" {
return fmt.Errorf("name is required")
}
if a.APIType == "" {
return fmt.Errorf("api_type is required")
}
if a.APIURL == "" {
return fmt.Errorf("api_url is required")
}
if a.APIKey == "" {
return fmt.Errorf("api_key is required")
}
if a.Model == "" {
return fmt.Errorf("model is required")
}
return nil
}
func AILLMConfigGets(c *ctx.Context) ([]*AILLMConfig, error) {
var lst []*AILLMConfig
err := DB(c).Order("id").Find(&lst).Error
return lst, err
}
func AILLMConfigGet(c *ctx.Context, where string, args ...interface{}) (*AILLMConfig, error) {
var obj AILLMConfig
err := DB(c).Where(where, args...).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func AILLMConfigGetById(c *ctx.Context, id int64) (*AILLMConfig, error) {
return AILLMConfigGet(c, "id = ?", id)
}
func AILLMConfigGetEnabled(c *ctx.Context) ([]*AILLMConfig, error) {
var lst []*AILLMConfig
err := DB(c).Where("enabled = 1").Order("id").Find(&lst).Error
return lst, err
}
func (a *AILLMConfig) Create(c *ctx.Context, username string) error {
now := time.Now().Unix()
a.CreatedAt = now
a.UpdatedAt = now
a.CreatedBy = username
a.UpdatedBy = username
if a.Enabled == 0 {
a.Enabled = 1
}
return Insert(c, a)
}
func (a *AILLMConfig) Update(c *ctx.Context, username string, data AILLMConfig) error {
data.UpdatedAt = time.Now().Unix()
data.UpdatedBy = username
// If api_key is empty, keep the original
if data.APIKey == "" {
data.APIKey = a.APIKey
}
return DB(c).Model(a).Select("name", "description", "api_type", "api_url", "api_key", "model",
"extra_config", "enabled", "updated_at", "updated_by").Updates(data).Error
}
func (a *AILLMConfig) Delete(c *ctx.Context) error {
return DB(c).Where("id = ?", a.Id).Delete(&AILLMConfig{}).Error
}

91
models/ai_skill.go Normal file
View File

@@ -0,0 +1,91 @@
package models
import (
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type AISkill struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Name string `json:"name"`
Description string `json:"description"`
Instructions string `json:"instructions" gorm:"type:text"`
License string `json:"license,omitempty"`
Compatibility string `json:"compatibility,omitempty"`
Metadata map[string]string `json:"metadata,omitempty" gorm:"serializer:json"`
AllowedTools string `json:"allowed_tools,omitempty"`
Enabled int `json:"enabled"`
CreatedAt int64 `json:"created_at"`
CreatedBy string `json:"created_by"`
UpdatedAt int64 `json:"updated_at"`
UpdatedBy string `json:"updated_by"`
// Runtime fields, not stored in DB
Files []*AISkillFile `json:"files,omitempty" gorm:"-"`
}
func (s *AISkill) TableName() string {
return "ai_skill"
}
func (s *AISkill) Verify() error {
s.Name = strings.TrimSpace(s.Name)
if s.Name == "" {
return fmt.Errorf("name is required")
}
s.Instructions = strings.TrimSpace(s.Instructions)
if s.Instructions == "" {
return fmt.Errorf("instructions is required")
}
return nil
}
func AISkillGets(c *ctx.Context, search string) ([]*AISkill, error) {
var lst []*AISkill
session := DB(c).Order("id")
if search != "" {
session = session.Where("name like ? or description like ?", "%"+search+"%", "%"+search+"%")
}
err := session.Find(&lst).Error
return lst, err
}
func AISkillGet(c *ctx.Context, where string, args ...interface{}) (*AISkill, error) {
var obj AISkill
err := DB(c).Where(where, args...).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func AISkillGetById(c *ctx.Context, id int64) (*AISkill, error) {
return AISkillGet(c, "id = ?", id)
}
func (s *AISkill) Create(c *ctx.Context) error {
now := time.Now().Unix()
s.CreatedAt = now
s.UpdatedAt = now
if s.Enabled == 0 {
s.Enabled = 1
}
return Insert(c, s)
}
func (s *AISkill) Update(c *ctx.Context, ref AISkill) error {
ref.UpdatedAt = time.Now().Unix()
return DB(c).Model(s).Select("name", "description", "instructions",
"license", "compatibility", "metadata", "allowed_tools",
"enabled", "updated_at", "updated_by").Updates(ref).Error
}
func (s *AISkill) Delete(c *ctx.Context) error {
return DB(c).Where("id = ?", s.Id).Delete(&AISkill{}).Error
}

72
models/ai_skill_file.go Normal file
View File

@@ -0,0 +1,72 @@
package models
import (
"fmt"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type AISkillFile struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
SkillId int64 `json:"skill_id"`
Name string `json:"name"`
Content string `json:"content" gorm:"type:mediumtext"`
Size int64 `json:"size"`
CreatedAt int64 `json:"created_at"`
CreatedBy string `json:"created_by"`
}
func (f *AISkillFile) TableName() string {
return "ai_skill_file"
}
func AISkillFileGets(c *ctx.Context, skillId int64) ([]*AISkillFile, error) {
var lst []*AISkillFile
err := DB(c).Select("id, skill_id, name, size, created_at, created_by").Where("skill_id = ?", skillId).Order("id").Find(&lst).Error
return lst, err
}
func AISkillFileGet(c *ctx.Context, where string, args ...interface{}) (*AISkillFile, error) {
var obj AISkillFile
err := DB(c).Where(where, args...).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func AISkillFileGetById(c *ctx.Context, id int64) (*AISkillFile, error) {
return AISkillFileGet(c, "id = ?", id)
}
func (f *AISkillFile) Create(c *ctx.Context) error {
f.Size = int64(len(f.Content))
f.CreatedAt = time.Now().Unix()
// Check file count limit per skill (max 20)
var count int64
DB(c).Model(&AISkillFile{}).Where("skill_id = ?", f.SkillId).Count(&count)
if count >= 20 {
return fmt.Errorf("max 20 files per skill")
}
return Insert(c, f)
}
func (f *AISkillFile) Delete(c *ctx.Context) error {
return DB(c).Where("id = ?", f.Id).Delete(&AISkillFile{}).Error
}
func AISkillFileDeleteBySkillId(c *ctx.Context, skillId int64) error {
return DB(c).Where("skill_id = ?", skillId).Delete(&AISkillFile{}).Error
}
func AISkillFileGetContents(c *ctx.Context, skillId int64) ([]*AISkillFile, error) {
var lst []*AISkillFile
err := DB(c).Where("skill_id = ?", skillId).Find(&lst).Error
return lst, err
}

86
models/mcp_server.go Normal file
View File

@@ -0,0 +1,86 @@
package models
import (
"fmt"
"strings"
"time"
"github.com/ccfos/nightingale/v6/pkg/ctx"
)
type MCPServer struct {
Id int64 `json:"id" gorm:"primaryKey;autoIncrement"`
Name string `json:"name"`
URL string `json:"url"`
Headers map[string]string `json:"headers" gorm:"serializer:json"`
Description string `json:"description" gorm:"type:text"`
Enabled int `json:"enabled"`
CreatedAt int64 `json:"created_at"`
CreatedBy string `json:"created_by"`
UpdatedAt int64 `json:"updated_at"`
UpdatedBy string `json:"updated_by"`
}
func (s *MCPServer) TableName() string {
return "mcp_server"
}
func (s *MCPServer) Verify() error {
s.Name = strings.TrimSpace(s.Name)
if s.Name == "" {
return fmt.Errorf("name is required")
}
s.URL = strings.TrimSpace(s.URL)
if s.URL == "" {
return fmt.Errorf("url is required")
}
return nil
}
func MCPServerGets(c *ctx.Context) ([]*MCPServer, error) {
var lst []*MCPServer
err := DB(c).Order("id").Find(&lst).Error
return lst, err
}
func MCPServerGet(c *ctx.Context, where string, args ...interface{}) (*MCPServer, error) {
var obj MCPServer
err := DB(c).Where(where, args...).First(&obj).Error
if err != nil {
if err.Error() == "record not found" {
return nil, nil
}
return nil, err
}
return &obj, nil
}
func MCPServerGetById(c *ctx.Context, id int64) (*MCPServer, error) {
return MCPServerGet(c, "id = ?", id)
}
func (s *MCPServer) Create(c *ctx.Context) error {
now := time.Now().Unix()
s.CreatedAt = now
s.UpdatedAt = now
if s.Enabled == 0 {
s.Enabled = 1
}
return Insert(c, s)
}
func (s *MCPServer) Update(c *ctx.Context, ref MCPServer) error {
ref.UpdatedAt = time.Now().Unix()
return DB(c).Model(s).Select("name", "url", "headers", "description",
"enabled", "updated_at", "updated_by").Updates(ref).Error
}
func (s *MCPServer) Delete(c *ctx.Context) error {
return DB(c).Where("id = ?", s.Id).Delete(&MCPServer{}).Error
}
func MCPServerGetEnabled(c *ctx.Context) ([]*MCPServer, error) {
var lst []*MCPServer
err := DB(c).Where("enabled = 1").Order("id").Find(&lst).Error
return lst, err
}

View File

@@ -69,7 +69,9 @@ func MigrateTables(db *gorm.DB) error {
&models.MetricFilter{}, &models.NotificationRecord{}, &models.TargetBusiGroup{},
&models.UserToken{}, &models.DashAnnotation{}, MessageTemplate{}, NotifyRule{}, NotifyChannelConfig{}, &EsIndexPatternMigrate{},
&models.EventPipeline{}, &models.EventPipelineExecution{}, &models.EmbeddedProduct{}, &models.SourceToken{},
&models.SavedView{}, &models.UserViewFavorite{}}
&models.SavedView{}, &models.UserViewFavorite{},
&models.AILLMConfig{}, &models.AIAgent{}, &models.AISkill{}, &models.AISkillFile{}, &models.MCPServer{},
&models.AIConversation{}, &models.AIConversationMessage{}}
if isPostgres(db) {
dts = append(dts, &models.PostgresBuiltinComponent{})