feat: manually mirror opencoze's code from bytedance

Change-Id: I09a73aadda978ad9511264a756b2ce51f5761adf
This commit is contained in:
fanlv
2025-07-20 17:36:12 +08:00
commit 890153324f
14811 changed files with 1923430 additions and 0 deletions

View File

@@ -0,0 +1,64 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package service
import (
"context"
einoCompose "github.com/cloudwego/eino/compose"
"github.com/cloudwego/eino/schema"
"github.com/coze-dev/coze-studio/backend/domain/workflow"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/execute"
)
type asToolImpl struct {
repo workflow.Repository
}
func (a *asToolImpl) WithMessagePipe() (einoCompose.Option, *schema.StreamReader[*entity.Message]) {
return execute.WithMessagePipe()
}
func (a *asToolImpl) WithExecuteConfig(cfg vo.ExecuteConfig) einoCompose.Option {
return einoCompose.WithToolsNodeOption(einoCompose.WithToolOption(execute.WithExecuteConfig(cfg)))
}
func (a *asToolImpl) WithResumeToolWorkflow(resumingEvent *entity.ToolInterruptEvent, resumeData string,
allInterruptEvents map[string]*entity.ToolInterruptEvent) einoCompose.Option {
return einoCompose.WithToolsNodeOption(
einoCompose.WithToolOption(
execute.WithResume(&entity.ResumeRequest{
ExecuteID: resumingEvent.ExecuteID,
EventID: resumingEvent.ID,
ResumeData: resumeData,
}, allInterruptEvents)))
}
func (a *asToolImpl) WorkflowAsModelTool(ctx context.Context, policies []*vo.GetPolicy) (tools []workflow.ToolFromWorkflow, err error) {
for _, id := range policies {
t, err := a.repo.WorkflowAsTool(ctx, *id, vo.WorkflowToolConfig{})
if err != nil {
return nil, err
}
tools = append(tools, t)
}
return tools, nil
}

View File

@@ -0,0 +1,938 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package service
import (
"context"
"errors"
"fmt"
"time"
einoCompose "github.com/cloudwego/eino/compose"
"github.com/cloudwego/eino/schema"
"github.com/coze-dev/coze-studio/backend/domain/workflow"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/canvas/adaptor"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/compose"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/execute"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/nodes"
"github.com/coze-dev/coze-studio/backend/pkg/errorx"
"github.com/coze-dev/coze-studio/backend/pkg/lang/ptr"
"github.com/coze-dev/coze-studio/backend/pkg/lang/slices"
"github.com/coze-dev/coze-studio/backend/pkg/logs"
"github.com/coze-dev/coze-studio/backend/pkg/sonic"
"github.com/coze-dev/coze-studio/backend/types/errno"
)
type executableImpl struct {
repo workflow.Repository
}
func (i *impl) SyncExecute(ctx context.Context, config vo.ExecuteConfig, input map[string]any) (*entity.WorkflowExecution, vo.TerminatePlan, error) {
var (
err error
wfEntity *entity.Workflow
)
wfEntity, err = i.Get(ctx, &vo.GetPolicy{
ID: config.ID,
QType: config.From,
MetaOnly: false,
Version: config.Version,
CommitID: config.CommitID,
})
if err != nil {
return nil, "", err
}
isApplicationWorkflow := wfEntity.AppID != nil
if isApplicationWorkflow && config.Mode == vo.ExecuteModeRelease {
err = i.checkApplicationWorkflowReleaseVersion(ctx, *wfEntity.AppID, config.ConnectorID, config.ID, config.Version)
if err != nil {
return nil, "", err
}
}
c := &vo.Canvas{}
if err = sonic.UnmarshalString(wfEntity.Canvas, c); err != nil {
return nil, "", fmt.Errorf("failed to unmarshal canvas: %w", err)
}
workflowSC, err := adaptor.CanvasToWorkflowSchema(ctx, c)
if err != nil {
return nil, "", fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
var wfOpts []compose.WorkflowOption
wfOpts = append(wfOpts, compose.WithIDAsName(wfEntity.ID))
if s := execute.GetStaticConfig(); s != nil && s.MaxNodeCountPerWorkflow > 0 {
wfOpts = append(wfOpts, compose.WithMaxNodeCount(s.MaxNodeCountPerWorkflow))
}
wf, err := compose.NewWorkflow(ctx, workflowSC, wfOpts...)
if err != nil {
return nil, "", fmt.Errorf("failed to create workflow: %w", err)
}
if wfEntity.AppID != nil && config.AppID == nil {
config.AppID = wfEntity.AppID
}
var cOpts []nodes.ConvertOption
if config.InputFailFast {
cOpts = append(cOpts, nodes.FailFast())
}
convertedInput, ws, err := nodes.ConvertInputs(ctx, input, wf.Inputs(), cOpts...)
if err != nil {
return nil, "", err
} else if ws != nil {
logs.CtxWarnf(ctx, "convert inputs warnings: %v", *ws)
}
inStr, err := sonic.MarshalString(input)
if err != nil {
return nil, "", err
}
cancelCtx, executeID, opts, lastEventChan, err := compose.NewWorkflowRunner(wfEntity.GetBasic(), workflowSC, config,
compose.WithInput(inStr)).Prepare(ctx)
if err != nil {
return nil, "", err
}
startTime := time.Now()
out, err := wf.SyncRun(cancelCtx, convertedInput, opts...)
if err != nil {
if _, ok := einoCompose.ExtractInterruptInfo(err); !ok {
var wfe vo.WorkflowError
if errors.As(err, &wfe) {
return nil, "", wfe.AppendDebug(executeID, wfEntity.SpaceID, wfEntity.ID)
} else {
return nil, "", vo.WrapWithDebug(errno.ErrWorkflowExecuteFail, err, executeID, wfEntity.SpaceID, wfEntity.ID, errorx.KV("cause", err.Error()))
}
}
}
lastEvent := <-lastEventChan
updateTime := time.Now()
var outStr string
if wf.TerminatePlan() == vo.ReturnVariables {
outStr, err = sonic.MarshalString(out)
if err != nil {
return nil, "", err
}
} else {
outStr = out["output"].(string)
}
var status entity.WorkflowExecuteStatus
switch lastEvent.Type {
case execute.WorkflowSuccess:
status = entity.WorkflowSuccess
case execute.WorkflowInterrupt:
status = entity.WorkflowInterrupted
case execute.WorkflowFailed:
status = entity.WorkflowFailed
case execute.WorkflowCancel:
status = entity.WorkflowCancel
}
var failReason *string
if lastEvent.Err != nil {
failReason = ptr.Of(lastEvent.Err.Error())
}
return &entity.WorkflowExecution{
ID: executeID,
WorkflowID: wfEntity.ID,
Version: wfEntity.GetVersion(),
SpaceID: wfEntity.SpaceID,
ExecuteConfig: config,
CreatedAt: startTime,
NodeCount: workflowSC.NodeCount(),
Status: status,
Duration: lastEvent.Duration,
Input: ptr.Of(inStr),
Output: ptr.Of(outStr),
ErrorCode: ptr.Of("-1"),
FailReason: failReason,
TokenInfo: &entity.TokenUsage{
InputTokens: lastEvent.GetInputTokens(),
OutputTokens: lastEvent.GetOutputTokens(),
},
UpdatedAt: ptr.Of(updateTime),
RootExecutionID: executeID,
InterruptEvents: lastEvent.InterruptEvents,
}, wf.TerminatePlan(), nil
}
// AsyncExecute executes the specified workflow asynchronously, returning the execution ID.
// Intermediate results are not emitted on the fly.
// The caller is expected to poll the execution status using the GetExecution method and the returned execution ID.
func (i *impl) AsyncExecute(ctx context.Context, config vo.ExecuteConfig, input map[string]any) (int64, error) {
var (
err error
wfEntity *entity.Workflow
)
wfEntity, err = i.Get(ctx, &vo.GetPolicy{
ID: config.ID,
QType: config.From,
MetaOnly: false,
Version: config.Version,
CommitID: config.CommitID,
})
if err != nil {
return 0, err
}
isApplicationWorkflow := wfEntity.AppID != nil
if isApplicationWorkflow && config.Mode == vo.ExecuteModeRelease {
err = i.checkApplicationWorkflowReleaseVersion(ctx, *wfEntity.AppID, config.ConnectorID, config.ID, config.Version)
if err != nil {
return 0, err
}
}
c := &vo.Canvas{}
if err = sonic.UnmarshalString(wfEntity.Canvas, c); err != nil {
return 0, fmt.Errorf("failed to unmarshal canvas: %w", err)
}
workflowSC, err := adaptor.CanvasToWorkflowSchema(ctx, c)
if err != nil {
return 0, fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
var wfOpts []compose.WorkflowOption
wfOpts = append(wfOpts, compose.WithIDAsName(wfEntity.ID))
if s := execute.GetStaticConfig(); s != nil && s.MaxNodeCountPerWorkflow > 0 {
wfOpts = append(wfOpts, compose.WithMaxNodeCount(s.MaxNodeCountPerWorkflow))
}
wf, err := compose.NewWorkflow(ctx, workflowSC, wfOpts...)
if err != nil {
return 0, fmt.Errorf("failed to create workflow: %w", err)
}
if wfEntity.AppID != nil && config.AppID == nil {
config.AppID = wfEntity.AppID
}
config.CommitID = wfEntity.CommitID
var cOpts []nodes.ConvertOption
if config.InputFailFast {
cOpts = append(cOpts, nodes.FailFast())
}
convertedInput, ws, err := nodes.ConvertInputs(ctx, input, wf.Inputs(), cOpts...)
if err != nil {
return 0, err
} else if ws != nil {
logs.CtxWarnf(ctx, "convert inputs warnings: %v", *ws)
}
inStr, err := sonic.MarshalString(input)
if err != nil {
return 0, err
}
cancelCtx, executeID, opts, _, err := compose.NewWorkflowRunner(wfEntity.GetBasic(), workflowSC, config,
compose.WithInput(inStr)).Prepare(ctx)
if err != nil {
return 0, err
}
if config.Mode == vo.ExecuteModeDebug {
if err = i.repo.SetTestRunLatestExeID(ctx, wfEntity.ID, config.Operator, executeID); err != nil {
logs.CtxErrorf(ctx, "failed to set test run latest exe id: %v", err)
}
}
wf.AsyncRun(cancelCtx, convertedInput, opts...)
return executeID, nil
}
func (i *impl) AsyncExecuteNode(ctx context.Context, nodeID string, config vo.ExecuteConfig, input map[string]any) (int64, error) {
var (
err error
wfEntity *entity.Workflow
)
wfEntity, err = i.Get(ctx, &vo.GetPolicy{
ID: config.ID,
QType: config.From,
MetaOnly: false,
Version: config.Version,
})
if err != nil {
return 0, err
}
isApplicationWorkflow := wfEntity.AppID != nil
if isApplicationWorkflow && config.Mode == vo.ExecuteModeRelease {
err = i.checkApplicationWorkflowReleaseVersion(ctx, *wfEntity.AppID, config.ConnectorID, config.ID, config.Version)
if err != nil {
return 0, err
}
}
c := &vo.Canvas{}
if err = sonic.UnmarshalString(wfEntity.Canvas, c); err != nil {
return 0, fmt.Errorf("failed to unmarshal canvas: %w", err)
}
workflowSC, err := adaptor.WorkflowSchemaFromNode(ctx, c, nodeID)
if err != nil {
return 0, fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
wf, err := compose.NewWorkflowFromNode(ctx, workflowSC, vo.NodeKey(nodeID), einoCompose.WithGraphName(fmt.Sprintf("%d", wfEntity.ID)))
if err != nil {
return 0, fmt.Errorf("failed to create workflow: %w", err)
}
var cOpts []nodes.ConvertOption
if config.InputFailFast {
cOpts = append(cOpts, nodes.FailFast())
}
convertedInput, ws, err := nodes.ConvertInputs(ctx, input, wf.Inputs(), cOpts...)
if err != nil {
return 0, err
} else if ws != nil {
logs.CtxWarnf(ctx, "convert inputs warnings: %v", *ws)
}
if wfEntity.AppID != nil && config.AppID == nil {
config.AppID = wfEntity.AppID
}
config.CommitID = wfEntity.CommitID
inStr, err := sonic.MarshalString(input)
if err != nil {
return 0, err
}
cancelCtx, executeID, opts, _, err := compose.NewWorkflowRunner(wfEntity.GetBasic(), workflowSC, config,
compose.WithInput(inStr)).Prepare(ctx)
if err != nil {
return 0, err
}
if config.Mode == vo.ExecuteModeNodeDebug {
if err = i.repo.SetNodeDebugLatestExeID(ctx, wfEntity.ID, nodeID, config.Operator, executeID); err != nil {
logs.CtxErrorf(ctx, "failed to set node debug latest exe id: %v", err)
}
}
wf.AsyncRun(cancelCtx, convertedInput, opts...)
return executeID, nil
}
// StreamExecute executes the specified workflow, returning a stream of execution events.
// The caller is expected to receive from the returned stream immediately.
func (i *impl) StreamExecute(ctx context.Context, config vo.ExecuteConfig, input map[string]any) (*schema.StreamReader[*entity.Message], error) {
var (
err error
wfEntity *entity.Workflow
ws *nodes.ConversionWarnings
)
wfEntity, err = i.Get(ctx, &vo.GetPolicy{
ID: config.ID,
QType: config.From,
MetaOnly: false,
Version: config.Version,
CommitID: config.CommitID,
})
if err != nil {
return nil, err
}
isApplicationWorkflow := wfEntity.AppID != nil
if isApplicationWorkflow && config.Mode == vo.ExecuteModeRelease {
err = i.checkApplicationWorkflowReleaseVersion(ctx, *wfEntity.AppID, config.ConnectorID, config.ID, config.Version)
if err != nil {
return nil, err
}
}
c := &vo.Canvas{}
if err = sonic.UnmarshalString(wfEntity.Canvas, c); err != nil {
return nil, fmt.Errorf("failed to unmarshal canvas: %w", err)
}
workflowSC, err := adaptor.CanvasToWorkflowSchema(ctx, c)
if err != nil {
return nil, fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
var wfOpts []compose.WorkflowOption
wfOpts = append(wfOpts, compose.WithIDAsName(wfEntity.ID))
if s := execute.GetStaticConfig(); s != nil && s.MaxNodeCountPerWorkflow > 0 {
wfOpts = append(wfOpts, compose.WithMaxNodeCount(s.MaxNodeCountPerWorkflow))
}
wf, err := compose.NewWorkflow(ctx, workflowSC, wfOpts...)
if err != nil {
return nil, fmt.Errorf("failed to create workflow: %w", err)
}
if wfEntity.AppID != nil && config.AppID == nil {
config.AppID = wfEntity.AppID
}
config.CommitID = wfEntity.CommitID
var cOpts []nodes.ConvertOption
if config.InputFailFast {
cOpts = append(cOpts, nodes.FailFast())
}
input, ws, err = nodes.ConvertInputs(ctx, input, wf.Inputs(), cOpts...)
if err != nil {
return nil, err
} else if ws != nil {
logs.CtxWarnf(ctx, "convert inputs warnings: %v", *ws)
}
inStr, err := sonic.MarshalString(input)
if err != nil {
return nil, err
}
sr, sw := schema.Pipe[*entity.Message](10)
cancelCtx, executeID, opts, _, err := compose.NewWorkflowRunner(wfEntity.GetBasic(), workflowSC, config,
compose.WithInput(inStr), compose.WithStreamWriter(sw)).Prepare(ctx)
if err != nil {
return nil, err
}
_ = executeID
wf.AsyncRun(cancelCtx, input, opts...)
return sr, nil
}
func (i *impl) GetExecution(ctx context.Context, wfExe *entity.WorkflowExecution, includeNodes bool) (*entity.WorkflowExecution, error) {
wfExeID := wfExe.ID
wfID := wfExe.WorkflowID
version := wfExe.Version
rootExeID := wfExe.RootExecutionID
wfExeEntity, found, err := i.repo.GetWorkflowExecution(ctx, wfExeID)
if err != nil {
return nil, err
}
if !found {
return &entity.WorkflowExecution{
ID: wfExeID,
WorkflowID: wfID,
Version: version,
RootExecutionID: rootExeID,
Status: entity.WorkflowRunning,
}, nil
}
interruptEvent, found, err := i.repo.GetFirstInterruptEvent(ctx, wfExeID)
if err != nil {
return nil, fmt.Errorf("failed to find interrupt events: %v", err)
}
if found {
// if we are currently interrupted, return this interrupt event,
// otherwise only return this event if it's the current resuming event
if wfExeEntity.Status == entity.WorkflowInterrupted ||
(wfExeEntity.CurrentResumingEventID != nil && *wfExeEntity.CurrentResumingEventID == interruptEvent.ID) {
wfExeEntity.InterruptEvents = []*entity.InterruptEvent{interruptEvent}
}
}
if !includeNodes {
return wfExeEntity, nil
}
// query the node executions for the root execution
nodeExecs, err := i.repo.GetNodeExecutionsByWfExeID(ctx, wfExeID)
if err != nil {
return nil, fmt.Errorf("failed to find node executions: %v", err)
}
nodeGroups := make(map[string]map[int]*entity.NodeExecution)
nodeGroupMaxIndex := make(map[string]int)
var nodeIDSet map[string]struct{}
for i := range nodeExecs {
nodeExec := nodeExecs[i]
if nodeExec.ParentNodeID != nil {
if nodeIDSet == nil {
nodeIDSet = slices.ToMap(nodeExecs, func(e *entity.NodeExecution) (string, struct{}) {
return e.NodeID, struct{}{}
})
}
if _, ok := nodeIDSet[*nodeExec.ParentNodeID]; ok {
if _, ok := nodeGroups[nodeExec.NodeID]; !ok {
nodeGroups[nodeExec.NodeID] = make(map[int]*entity.NodeExecution)
}
nodeGroups[nodeExec.NodeID][nodeExec.Index] = nodeExecs[i]
if nodeExec.Index > nodeGroupMaxIndex[nodeExec.NodeID] {
nodeGroupMaxIndex[nodeExec.NodeID] = nodeExec.Index
}
continue
}
}
wfExeEntity.NodeExecutions = append(wfExeEntity.NodeExecutions, nodeExec)
}
for nodeID, nodeExes := range nodeGroups {
groupNodeExe := mergeCompositeInnerNodes(nodeExes, nodeGroupMaxIndex[nodeID])
wfExeEntity.NodeExecutions = append(wfExeEntity.NodeExecutions, groupNodeExe)
}
return wfExeEntity, nil
}
func (i *impl) GetNodeExecution(ctx context.Context, exeID int64, nodeID string) (*entity.NodeExecution, *entity.NodeExecution, error) {
nodeExe, found, err := i.repo.GetNodeExecution(ctx, exeID, nodeID)
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, fmt.Errorf("try getting node exe for exeID : %d, nodeID : %s, but not found", exeID, nodeID)
}
if nodeExe.NodeType != entity.NodeTypeBatch {
return nodeExe, nil, nil
}
wfExe, found, err := i.repo.GetWorkflowExecution(ctx, exeID)
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, fmt.Errorf("try getting workflow exe for exeID : %d, but not found", exeID)
}
if wfExe.Mode != vo.ExecuteModeNodeDebug {
return nodeExe, nil, nil
}
// when node debugging a node with batch mode, we need to query the inner node executions and return it together
innerNodeExecs, err := i.repo.GetNodeExecutionByParent(ctx, exeID, nodeExe.NodeID)
if err != nil {
return nil, nil, err
}
for i := range innerNodeExecs {
innerNodeID := innerNodeExecs[i].NodeID
if !vo.IsGeneratedNodeForBatchMode(innerNodeID, nodeExe.NodeID) {
// inner node is not generated, means this is normal batch, not node in batch mode
return nodeExe, nil, nil
}
}
var (
maxIndex int
index2Exe = make(map[int]*entity.NodeExecution)
)
for i := range innerNodeExecs {
index2Exe[innerNodeExecs[i].Index] = innerNodeExecs[i]
if innerNodeExecs[i].Index > maxIndex {
maxIndex = innerNodeExecs[i].Index
}
}
return nodeExe, mergeCompositeInnerNodes(index2Exe, maxIndex), nil
}
func (i *impl) GetLatestTestRunInput(ctx context.Context, wfID int64, userID int64) (*entity.NodeExecution, bool, error) {
exeID, err := i.repo.GetTestRunLatestExeID(ctx, wfID, userID)
if err != nil {
logs.CtxErrorf(ctx, "[GetLatestTestRunInput] failed to get node execution from redis, wfID: %d, err: %v", wfID, err)
return nil, false, nil
}
if exeID == 0 {
return nil, false, nil
}
nodeExe, _, err := i.GetNodeExecution(ctx, exeID, entity.EntryNodeKey)
if err != nil {
logs.CtxErrorf(ctx, "[GetLatestTestRunInput] failed to get node execution, exeID: %d, err: %v", exeID, err)
return nil, false, nil
}
return nodeExe, true, nil
}
func (i *impl) GetLatestNodeDebugInput(ctx context.Context, wfID int64, nodeID string, userID int64) (
*entity.NodeExecution, *entity.NodeExecution, bool, error) {
exeID, err := i.repo.GetNodeDebugLatestExeID(ctx, wfID, nodeID, userID)
if err != nil {
logs.CtxErrorf(ctx, "[GetLatestNodeDebugInput] failed to get node execution from redis, wfID: %d, nodeID: %s, err: %v",
wfID, nodeID, err)
return nil, nil, false, nil
}
if exeID == 0 {
return nil, nil, false, nil
}
nodeExe, innerExe, err := i.GetNodeExecution(ctx, exeID, nodeID)
if err != nil {
logs.CtxErrorf(ctx, "[GetLatestNodeDebugInput] failed to get node execution, exeID: %d, nodeID: %s, err: %v",
exeID, nodeID, err)
return nil, nil, false, nil
}
return nodeExe, innerExe, true, nil
}
func mergeCompositeInnerNodes(nodeExes map[int]*entity.NodeExecution, maxIndex int) *entity.NodeExecution {
var groupNodeExe *entity.NodeExecution
for _, v := range nodeExes {
groupNodeExe = &entity.NodeExecution{
ID: v.ID,
ExecuteID: v.ExecuteID,
NodeID: v.NodeID,
NodeName: v.NodeName,
NodeType: v.NodeType,
ParentNodeID: v.ParentNodeID,
}
break
}
var (
duration time.Duration
tokenInfo *entity.TokenUsage
status = entity.NodeSuccess
)
groupNodeExe.IndexedExecutions = make([]*entity.NodeExecution, maxIndex+1)
for index, ne := range nodeExes {
duration = max(duration, ne.Duration)
if ne.TokenInfo != nil {
if tokenInfo == nil {
tokenInfo = &entity.TokenUsage{}
}
tokenInfo.InputTokens += ne.TokenInfo.InputTokens
tokenInfo.OutputTokens += ne.TokenInfo.OutputTokens
}
if ne.Status == entity.NodeFailed {
status = entity.NodeFailed
} else if ne.Status == entity.NodeRunning {
status = entity.NodeRunning
}
groupNodeExe.IndexedExecutions[index] = nodeExes[index]
}
groupNodeExe.Duration = duration
groupNodeExe.TokenInfo = tokenInfo
groupNodeExe.Status = status
return groupNodeExe
}
// AsyncResume resumes a workflow execution asynchronously, using the passed in executionID and eventID.
// Intermediate results during the resuming run are not emitted on the fly.
// Caller is expected to poll the execution status using the GetExecution method.
func (i *impl) AsyncResume(ctx context.Context, req *entity.ResumeRequest, config vo.ExecuteConfig) error {
wfExe, found, err := i.repo.GetWorkflowExecution(ctx, req.ExecuteID)
if err != nil {
return err
}
if !found {
return fmt.Errorf("workflow execution does not exist, id: %d", req.ExecuteID)
}
if wfExe.RootExecutionID != wfExe.ID {
return fmt.Errorf("only root workflow can be resumed")
}
if wfExe.Status != entity.WorkflowInterrupted {
return fmt.Errorf("workflow execution %d is not interrupted, status is %v, cannot resume", req.ExecuteID, wfExe.Status)
}
var from vo.Locator
if wfExe.Version == "" {
from = vo.FromDraft
} else {
from = vo.FromSpecificVersion
}
wfEntity, err := i.Get(ctx, &vo.GetPolicy{
ID: wfExe.WorkflowID,
QType: from,
Version: wfExe.Version,
CommitID: wfExe.CommitID,
})
if err != nil {
return err
}
var canvas vo.Canvas
err = sonic.UnmarshalString(wfEntity.Canvas, &canvas)
if err != nil {
return err
}
config.From = from
config.Version = wfExe.Version
config.AppID = wfExe.AppID
config.AgentID = wfExe.AgentID
config.CommitID = wfExe.CommitID
if config.ConnectorID == 0 {
config.ConnectorID = wfExe.ConnectorID
}
if wfExe.Mode == vo.ExecuteModeNodeDebug {
nodeExes, err := i.repo.GetNodeExecutionsByWfExeID(ctx, wfExe.ID)
if err != nil {
return err
}
if len(nodeExes) == 0 {
return fmt.Errorf("during node debug resume, no node execution found for workflow execution %d", wfExe.ID)
}
var nodeID string
for _, ne := range nodeExes {
if ne.ParentNodeID == nil {
nodeID = ne.NodeID
break
}
}
workflowSC, err := adaptor.WorkflowSchemaFromNode(ctx, &canvas, nodeID)
if err != nil {
return fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
wf, err := compose.NewWorkflowFromNode(ctx, workflowSC, vo.NodeKey(nodeID),
einoCompose.WithGraphName(fmt.Sprintf("%d", wfExe.WorkflowID)))
if err != nil {
return fmt.Errorf("failed to create workflow: %w", err)
}
config.Mode = vo.ExecuteModeNodeDebug
cancelCtx, _, opts, _, err := compose.NewWorkflowRunner(
wfEntity.GetBasic(), workflowSC, config, compose.WithResumeReq(req)).Prepare(ctx)
if err != nil {
return err
}
wf.AsyncRun(cancelCtx, nil, opts...)
return nil
}
workflowSC, err := adaptor.CanvasToWorkflowSchema(ctx, &canvas)
if err != nil {
return fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
var wfOpts []compose.WorkflowOption
wfOpts = append(wfOpts, compose.WithIDAsName(wfExe.WorkflowID))
if s := execute.GetStaticConfig(); s != nil && s.MaxNodeCountPerWorkflow > 0 {
wfOpts = append(wfOpts, compose.WithMaxNodeCount(s.MaxNodeCountPerWorkflow))
}
wf, err := compose.NewWorkflow(ctx, workflowSC, wfOpts...)
if err != nil {
return fmt.Errorf("failed to create workflow: %w", err)
}
cancelCtx, _, opts, _, err := compose.NewWorkflowRunner(
wfEntity.GetBasic(), workflowSC, config, compose.WithResumeReq(req)).Prepare(ctx)
if err != nil {
return err
}
wf.AsyncRun(cancelCtx, nil, opts...)
return nil
}
// StreamResume resumes a workflow execution, using the passed in executionID and eventID.
// Intermediate results during the resuming run are emitted using the returned StreamReader.
// Caller is expected to poll the execution status using the GetExecution method.
func (i *impl) StreamResume(ctx context.Context, req *entity.ResumeRequest, config vo.ExecuteConfig) (
*schema.StreamReader[*entity.Message], error) {
// must get the interrupt event
// generate the state modifier
wfExe, found, err := i.repo.GetWorkflowExecution(ctx, req.ExecuteID)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("workflow execution does not exist, id: %d", req.ExecuteID)
}
if wfExe.RootExecutionID != wfExe.ID {
return nil, fmt.Errorf("only root workflow can be resumed")
}
if wfExe.Status != entity.WorkflowInterrupted {
return nil, fmt.Errorf("workflow execution %d is not interrupted, status is %v, cannot resume", req.ExecuteID, wfExe.Status)
}
var from vo.Locator
if wfExe.Version == "" {
from = vo.FromDraft
} else {
from = vo.FromSpecificVersion
}
wfEntity, err := i.Get(ctx, &vo.GetPolicy{
ID: wfExe.WorkflowID,
QType: from,
Version: wfExe.Version,
CommitID: wfExe.CommitID,
})
if err != nil {
return nil, err
}
var canvas vo.Canvas
err = sonic.UnmarshalString(wfEntity.Canvas, &canvas)
if err != nil {
return nil, err
}
workflowSC, err := adaptor.CanvasToWorkflowSchema(ctx, &canvas)
if err != nil {
return nil, fmt.Errorf("failed to convert canvas to workflow schema: %w", err)
}
var wfOpts []compose.WorkflowOption
wfOpts = append(wfOpts, compose.WithIDAsName(wfExe.WorkflowID))
if s := execute.GetStaticConfig(); s != nil && s.MaxNodeCountPerWorkflow > 0 {
wfOpts = append(wfOpts, compose.WithMaxNodeCount(s.MaxNodeCountPerWorkflow))
}
wf, err := compose.NewWorkflow(ctx, workflowSC, wfOpts...)
if err != nil {
return nil, fmt.Errorf("failed to create workflow: %w", err)
}
config.From = from
config.Version = wfExe.Version
config.AppID = wfExe.AppID
config.AgentID = wfExe.AgentID
config.CommitID = wfExe.CommitID
if config.ConnectorID == 0 {
config.ConnectorID = wfExe.ConnectorID
}
sr, sw := schema.Pipe[*entity.Message](10)
cancelCtx, _, opts, _, err := compose.NewWorkflowRunner(wfEntity.GetBasic(), workflowSC, config,
compose.WithResumeReq(req), compose.WithStreamWriter(sw)).Prepare(ctx)
if err != nil {
return nil, err
}
wf.AsyncRun(cancelCtx, nil, opts...)
return sr, nil
}
func (i *impl) Cancel(ctx context.Context, wfExeID int64, wfID, spaceID int64) error {
wfExe, found, err := i.repo.GetWorkflowExecution(ctx, wfExeID)
if err != nil {
return err
}
if !found {
return fmt.Errorf("workflow execution does not exist, wfExeID: %d", wfExeID)
}
if wfExe.WorkflowID != wfID || wfExe.SpaceID != spaceID {
return fmt.Errorf("workflow execution id mismatch, wfExeID: %d, wfID: %d, spaceID: %d", wfExeID, wfID, spaceID)
}
if wfExe.Status != entity.WorkflowRunning && wfExe.Status != entity.WorkflowInterrupted {
// already reached terminal state, no need to cancel
return nil
}
if wfExe.ID != wfExe.RootExecutionID {
return fmt.Errorf("can only cancel root execute ID")
}
wfExec := &entity.WorkflowExecution{
ID: wfExe.ID,
Status: entity.WorkflowCancel,
}
var (
updatedRows int64
currentStatus entity.WorkflowExecuteStatus
)
if updatedRows, currentStatus, err = i.repo.UpdateWorkflowExecution(ctx, wfExec, []entity.WorkflowExecuteStatus{entity.WorkflowInterrupted}); err != nil {
return fmt.Errorf("failed to save workflow execution to canceled while interrupted: %v", err)
} else if updatedRows == 0 {
if currentStatus != entity.WorkflowRunning {
// already terminal state, try cancel all nodes just in case
return i.repo.CancelAllRunningNodes(ctx, wfExe.ID)
} else {
// current running, let the execution time event handle do the actual updating status to cancel
}
} else if err = i.repo.CancelAllRunningNodes(ctx, wfExe.ID); err != nil { // we updated the workflow from interrupted to cancel, so we need to cancel all interrupting nodes
return fmt.Errorf("failed to update all running nodes to cancel: %v", err)
}
// emit cancel signal just in case the execution is running
return i.repo.SetWorkflowCancelFlag(ctx, wfExeID)
}
func (i *impl) checkApplicationWorkflowReleaseVersion(ctx context.Context, appID, connectorID, workflowID int64, version string) error {
ok, err := i.repo.IsApplicationConnectorWorkflowVersion(ctx, connectorID, workflowID, version)
if err != nil {
return err
}
if !ok {
return vo.WrapError(errno.ErrWorkflowSpecifiedVersionNotFound, fmt.Errorf("applcaition id %v, workflow id %v,connector id %v not have version %v", appID, workflowID, connectorID, version))
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,356 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package service
import (
"context"
"fmt"
"strconv"
"strings"
cloudworkflow "github.com/coze-dev/coze-studio/backend/api/model/ocean/cloud/workflow"
"github.com/coze-dev/coze-studio/backend/domain/workflow/crossdomain/variable"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/canvas/adaptor"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/canvas/validate"
"github.com/coze-dev/coze-studio/backend/pkg/lang/slices"
"github.com/coze-dev/coze-studio/backend/pkg/sonic"
"github.com/coze-dev/coze-studio/backend/types/errno"
)
func validateWorkflowTree(ctx context.Context, config vo.ValidateTreeConfig) ([]*validate.Issue, error) {
c := &vo.Canvas{}
err := sonic.UnmarshalString(config.CanvasSchema, &c)
if err != nil {
return nil, vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to unmarshal canvas schema: %w", err))
}
c.Nodes, c.Edges = adaptor.PruneIsolatedNodes(c.Nodes, c.Edges, nil)
validator, err := validate.NewCanvasValidator(ctx, &validate.Config{
Canvas: c,
AppID: config.AppID,
AgentID: config.AgentID,
VariablesMetaGetter: variable.GetVariablesMetaGetter(),
})
if err != nil {
return nil, fmt.Errorf("failed to new canvas validate : %w", err)
}
var issues []*validate.Issue
issues, err = validator.ValidateConnections(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check connectivity : %w", err)
}
if len(issues) > 0 {
return issues, nil
}
issues, err = validator.DetectCycles(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check loops: %w", err)
}
if len(issues) > 0 {
return issues, nil
}
issues, err = validator.ValidateNestedFlows(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check nested batch or recurse: %w", err)
}
if len(issues) > 0 {
return issues, nil
}
issues, err = validator.CheckRefVariable(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check ref variable: %w", err)
}
if len(issues) > 0 {
return issues, nil
}
issues, err = validator.CheckGlobalVariables(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check global variables: %w", err)
}
if len(issues) > 0 {
return issues, nil
}
issues, err = validator.CheckSubWorkFlowTerminatePlanType(ctx)
if err != nil {
return nil, fmt.Errorf("failed to check sub workflow terminate plan type: %w", err)
}
if len(issues) > 0 {
return issues, nil
}
return issues, nil
}
func convertToValidationError(issue *validate.Issue) *cloudworkflow.ValidateErrorData {
e := &cloudworkflow.ValidateErrorData{}
e.Message = issue.Message
if issue.NodeErr != nil {
e.Type = cloudworkflow.ValidateErrorType_BotValidateNodeErr
e.NodeError = &cloudworkflow.NodeError{
NodeID: issue.NodeErr.NodeID,
}
} else if issue.PathErr != nil {
e.Type = cloudworkflow.ValidateErrorType_BotValidatePathErr
e.PathError = &cloudworkflow.PathError{
Start: issue.PathErr.StartNode,
End: issue.PathErr.EndNode,
}
}
return e
}
func toValidateErrorData(issues []*validate.Issue) []*cloudworkflow.ValidateErrorData {
validateErrors := make([]*cloudworkflow.ValidateErrorData, 0, len(issues))
for _, issue := range issues {
validateErrors = append(validateErrors, convertToValidationError(issue))
}
return validateErrors
}
func toValidateIssue(id int64, name string, issues []*validate.Issue) *vo.ValidateIssue {
vIssue := &vo.ValidateIssue{
WorkflowID: id,
WorkflowName: name,
}
for _, issue := range issues {
vIssue.IssueMessages = append(vIssue.IssueMessages, issue.Message)
}
return vIssue
}
type version struct {
Prefix string
Major int
Minor int
Patch int
}
func parseVersion(versionString string) (_ version, err error) {
defer func() {
if err != nil {
err = vo.WrapError(errno.ErrInvalidVersionName, err)
}
}()
if !strings.HasPrefix(versionString, "v") {
return version{}, fmt.Errorf("invalid prefix format: %s", versionString)
}
versionString = strings.TrimPrefix(versionString, "v")
parts := strings.Split(versionString, ".")
if len(parts) != 3 {
return version{}, fmt.Errorf("invalid version format: %s", versionString)
}
major, err := strconv.Atoi(parts[0])
if err != nil {
return version{}, fmt.Errorf("invalid major version: %s", parts[0])
}
minor, err := strconv.Atoi(parts[1])
if err != nil {
return version{}, fmt.Errorf("invalid minor version: %s", parts[1])
}
patch, err := strconv.Atoi(parts[2])
if err != nil {
return version{}, fmt.Errorf("invalid patch version: %s", parts[2])
}
return version{Major: major, Minor: minor, Patch: patch}, nil
}
func isIncremental(prev version, next version) bool {
if next.Major < prev.Major {
return false
}
if next.Major > prev.Major {
return true
}
if next.Minor < prev.Minor {
return false
}
if next.Minor > prev.Minor {
return true
}
return next.Patch > prev.Patch
}
func replaceRelatedWorkflowOrPluginInWorkflowNodes(nodes []*vo.Node, relatedWorkflows map[int64]entity.IDVersionPair, relatedPlugins map[int64]vo.PluginEntity) error {
for _, node := range nodes {
if node.Type == vo.BlockTypeBotSubWorkflow {
workflowID, err := strconv.ParseInt(node.Data.Inputs.WorkflowID, 10, 64)
if err != nil {
return err
}
if wf, ok := relatedWorkflows[workflowID]; ok {
node.Data.Inputs.WorkflowID = strconv.FormatInt(wf.ID, 10)
node.Data.Inputs.WorkflowVersion = wf.Version
}
}
if node.Type == vo.BlockTypeBotAPI {
apiParams := slices.ToMap(node.Data.Inputs.APIParams, func(e *vo.Param) (string, *vo.Param) {
return e.Name, e
})
pluginIDParam, ok := apiParams["pluginID"]
if !ok {
return fmt.Errorf("plugin id param is not found")
}
pID, err := strconv.ParseInt(pluginIDParam.Input.Value.Content.(string), 10, 64)
if err != nil {
return err
}
pluginVersionParam, ok := apiParams["pluginVersion"]
if !ok {
return fmt.Errorf("plugin version param is not found")
}
if refPlugin, ok := relatedPlugins[pID]; ok {
pluginIDParam.Input.Value.Content = refPlugin.PluginID
if refPlugin.PluginVersion != nil {
pluginVersionParam.Input.Value.Content = *refPlugin.PluginVersion
}
}
}
if node.Type == vo.BlockTypeBotLLM {
if node.Data.Inputs.FCParam != nil && node.Data.Inputs.FCParam.WorkflowFCParam != nil {
for idx := range node.Data.Inputs.FCParam.WorkflowFCParam.WorkflowList {
wf := node.Data.Inputs.FCParam.WorkflowFCParam.WorkflowList[idx]
workflowID, err := strconv.ParseInt(wf.WorkflowID, 10, 64)
if err != nil {
return err
}
if refWf, ok := relatedWorkflows[workflowID]; ok {
wf.WorkflowID = strconv.FormatInt(refWf.ID, 10)
wf.WorkflowVersion = refWf.Version
}
}
}
if node.Data.Inputs.FCParam != nil && node.Data.Inputs.FCParam.PluginFCParam != nil {
for idx := range node.Data.Inputs.FCParam.PluginFCParam.PluginList {
pl := node.Data.Inputs.FCParam.PluginFCParam.PluginList[idx]
pluginID, err := strconv.ParseInt(pl.PluginID, 10, 64)
if err != nil {
return err
}
if refPlugin, ok := relatedPlugins[pluginID]; ok {
pl.PluginID = strconv.FormatInt(refPlugin.PluginID, 10)
if refPlugin.PluginVersion != nil {
pl.PluginVersion = *refPlugin.PluginVersion
}
}
}
}
}
if len(node.Blocks) > 0 {
err := replaceRelatedWorkflowOrPluginInWorkflowNodes(node.Blocks, relatedWorkflows, relatedPlugins)
if err != nil {
return err
}
}
}
return nil
}
// entityNodeTypeToBlockType converts an entity.NodeType to the corresponding vo.BlockType.
func entityNodeTypeToBlockType(nodeType entity.NodeType) (vo.BlockType, error) {
switch nodeType {
case entity.NodeTypeEntry:
return vo.BlockTypeBotStart, nil
case entity.NodeTypeExit:
return vo.BlockTypeBotEnd, nil
case entity.NodeTypeLLM:
return vo.BlockTypeBotLLM, nil
case entity.NodeTypePlugin:
return vo.BlockTypeBotAPI, nil
case entity.NodeTypeCodeRunner:
return vo.BlockTypeBotCode, nil
case entity.NodeTypeKnowledgeRetriever:
return vo.BlockTypeBotDataset, nil
case entity.NodeTypeSelector:
return vo.BlockTypeCondition, nil
case entity.NodeTypeSubWorkflow:
return vo.BlockTypeBotSubWorkflow, nil
case entity.NodeTypeDatabaseCustomSQL:
return vo.BlockTypeDatabase, nil
case entity.NodeTypeOutputEmitter:
return vo.BlockTypeBotMessage, nil
case entity.NodeTypeTextProcessor:
return vo.BlockTypeBotText, nil
case entity.NodeTypeQuestionAnswer:
return vo.BlockTypeQuestion, nil
case entity.NodeTypeBreak:
return vo.BlockTypeBotBreak, nil
case entity.NodeTypeVariableAssigner:
return vo.BlockTypeBotAssignVariable, nil
case entity.NodeTypeVariableAssignerWithinLoop:
return vo.BlockTypeBotLoopSetVariable, nil
case entity.NodeTypeLoop:
return vo.BlockTypeBotLoop, nil
case entity.NodeTypeIntentDetector:
return vo.BlockTypeBotIntent, nil
case entity.NodeTypeKnowledgeIndexer:
return vo.BlockTypeBotDatasetWrite, nil
case entity.NodeTypeBatch:
return vo.BlockTypeBotBatch, nil
case entity.NodeTypeContinue:
return vo.BlockTypeBotContinue, nil
case entity.NodeTypeInputReceiver:
return vo.BlockTypeBotInput, nil
case entity.NodeTypeDatabaseUpdate:
return vo.BlockTypeDatabaseUpdate, nil
case entity.NodeTypeDatabaseQuery:
return vo.BlockTypeDatabaseSelect, nil
case entity.NodeTypeDatabaseDelete:
return vo.BlockTypeDatabaseDelete, nil
case entity.NodeTypeHTTPRequester:
return vo.BlockTypeBotHttp, nil
case entity.NodeTypeDatabaseInsert:
return vo.BlockTypeDatabaseInsert, nil
case entity.NodeTypeVariableAggregator:
return vo.BlockTypeBotVariableMerge, nil
case entity.NodeTypeJsonSerialization:
return vo.BlockTypeJsonSerialization, nil
case entity.NodeTypeJsonDeserialization:
return vo.BlockTypeJsonDeserialization, nil
case entity.NodeTypeKnowledgeDeleter:
return vo.BlockTypeBotDatasetDelete, nil
default:
return "", vo.WrapError(errno.ErrSchemaConversionFail,
fmt.Errorf("cannot map entity node type '%s' to a workflow.NodeTemplateType", nodeType))
}
}