feat: manually mirror opencoze's code from bytedance

Change-Id: I09a73aadda978ad9511264a756b2ce51f5761adf
This commit is contained in:
fanlv
2025-07-20 17:36:12 +08:00
commit 890153324f
14811 changed files with 1923430 additions and 0 deletions

View File

@@ -0,0 +1,63 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package repo
import (
"context"
"fmt"
"time"
"github.com/redis/go-redis/v9"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/types/errno"
)
type cancelSignalStoreImpl struct {
redis *redis.Client
}
const workflowExecutionCancelStatusKey = "workflow:cancel:status:%d"
func (c *cancelSignalStoreImpl) SetWorkflowCancelFlag(ctx context.Context, wfExeID int64) (err error) {
statusKey := fmt.Sprintf(workflowExecutionCancelStatusKey, wfExeID)
// Define a reasonable expiration for the status key, e.g., 24 hours
expiration := 24 * time.Hour
// set a kv to redis to indicate cancellation status
err = c.redis.Set(ctx, statusKey, "cancelled", expiration).Err()
if err != nil {
return vo.WrapError(errno.ErrRedisError,
fmt.Errorf("failed to set workflow cancel status for wfExeID %d after publishing signal: %w", wfExeID, err))
}
return nil
}
func (c *cancelSignalStoreImpl) GetWorkflowCancelFlag(ctx context.Context, wfExeID int64) (bool, error) {
// Construct Redis key for workflow cancellation status
key := fmt.Sprintf(workflowExecutionCancelStatusKey, wfExeID)
// Check if the key exists in Redis
count, err := c.redis.Exists(ctx, key).Result()
if err != nil {
return false, vo.WrapError(errno.ErrRedisError, fmt.Errorf("failed to check cancellation status in Redis: %w", err))
}
// If key exists (count == 1), return true; otherwise return false
return count == 1, nil
}

View File

@@ -0,0 +1,22 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
const TableNameConnectorWorkflowVersion = "connector_workflow_version"
// ConnectorWorkflowVersion mapped from table <connector_workflow_version>
type ConnectorWorkflowVersion struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement:true;comment:id" json:"id"` // id
AppID int64 `gorm:"column:app_id;not null;comment:app id" json:"app_id"` // app id
ConnectorID int64 `gorm:"column:connector_id;not null;comment:connector id" json:"connector_id"` // connector id
WorkflowID int64 `gorm:"column:workflow_id;not null;comment:workflow id" json:"workflow_id"` // workflow id
Version string `gorm:"column:version;not null;comment:version" json:"version"` // version
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:create time in millisecond" json:"created_at"` // create time in millisecond
}
// TableName ConnectorWorkflowVersion's table name
func (*ConnectorWorkflowVersion) TableName() string {
return TableNameConnectorWorkflowVersion
}

View File

@@ -0,0 +1,37 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
const TableNameNodeExecution = "node_execution"
// NodeExecution node 节点运行记录用于记录每次workflow执行时每个节点的状态信息
type NodeExecution struct {
ID int64 `gorm:"column:id;primaryKey;comment:node execution id" json:"id"` // node execution id
ExecuteID int64 `gorm:"column:execute_id;not null;comment:the workflow execute id this node execution belongs to" json:"execute_id"` // the workflow execute id this node execution belongs to
NodeID string `gorm:"column:node_id;not null;comment:node key" json:"node_id"` // node key
NodeName string `gorm:"column:node_name;not null;comment:name of the node" json:"node_name"` // name of the node
NodeType string `gorm:"column:node_type;not null;comment:the type of the node, in string" json:"node_type"` // the type of the node, in string
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:create time in millisecond" json:"created_at"` // create time in millisecond
Status int32 `gorm:"column:status;not null;comment:1=waiting 2=running 3=success 4=fail" json:"status"` // 1=waiting 2=running 3=success 4=fail
Duration int64 `gorm:"column:duration;comment:execution duration in millisecond" json:"duration"` // execution duration in millisecond
Input string `gorm:"column:input;comment:actual input of the node" json:"input"` // actual input of the node
Output string `gorm:"column:output;comment:actual output of the node" json:"output"` // actual output of the node
RawOutput string `gorm:"column:raw_output;comment:the original output of the node" json:"raw_output"` // the original output of the node
ErrorInfo string `gorm:"column:error_info;comment:error info" json:"error_info"` // error info
ErrorLevel string `gorm:"column:error_level;comment:level of the error" json:"error_level"` // level of the error
InputTokens int64 `gorm:"column:input_tokens;comment:number of input tokens" json:"input_tokens"` // number of input tokens
OutputTokens int64 `gorm:"column:output_tokens;comment:number of output tokens" json:"output_tokens"` // number of output tokens
UpdatedAt int64 `gorm:"column:updated_at;autoUpdateTime:milli;comment:update time in millisecond" json:"updated_at"` // update time in millisecond
CompositeNodeIndex int64 `gorm:"column:composite_node_index;comment:loop or batch's execution index" json:"composite_node_index"` // loop or batch's execution index
CompositeNodeItems string `gorm:"column:composite_node_items;comment:the items extracted from parent composite node for this index" json:"composite_node_items"` // the items extracted from parent composite node for this index
ParentNodeID string `gorm:"column:parent_node_id;comment:when as inner node for loop or batch, this is the parent node's key" json:"parent_node_id"` // when as inner node for loop or batch, this is the parent node's key
SubExecuteID int64 `gorm:"column:sub_execute_id;comment:if this node is sub_workflow, the exe id of the sub workflow" json:"sub_execute_id"` // if this node is sub_workflow, the exe id of the sub workflow
Extra string `gorm:"column:extra;comment:extra info" json:"extra"` // extra info
}
// TableName NodeExecution's table name
func (*NodeExecution) TableName() string {
return TableNameNodeExecution
}

View File

@@ -0,0 +1,29 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"gorm.io/gorm"
)
const TableNameWorkflowDraft = "workflow_draft"
// WorkflowDraft workflow 画布草稿表用于记录workflow最新的草稿画布信息
type WorkflowDraft struct {
ID int64 `gorm:"column:id;primaryKey;comment:workflow ID" json:"id"` // workflow ID
Canvas string `gorm:"column:canvas;not null;comment:前端 schema" json:"canvas"` // 前端 schema
InputParams string `gorm:"column:input_params;comment: 入参 schema" json:"input_params"` // 入参 schema
OutputParams string `gorm:"column:output_params;comment: 出参 schema" json:"output_params"` // 出参 schema
TestRunSuccess bool `gorm:"column:test_run_success;not null;comment:0 未运行, 1 运行成功" json:"test_run_success"` // 0 未运行, 1 运行成功
Modified bool `gorm:"column:modified;not null;comment:0 未被修改, 1 已被修改" json:"modified"` // 0 未被修改, 1 已被修改
UpdatedAt int64 `gorm:"column:updated_at;autoUpdateTime:milli" json:"updated_at"`
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at" json:"deleted_at"`
CommitID string `gorm:"column:commit_id;not null;comment:used to uniquely identify a draft snapshot" json:"commit_id"` // used to uniquely identify a draft snapshot
}
// TableName WorkflowDraft's table name
func (*WorkflowDraft) TableName() string {
return TableNameWorkflowDraft
}

View File

@@ -0,0 +1,43 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
const TableNameWorkflowExecution = "workflow_execution"
// WorkflowExecution workflow 执行记录表用于记录每次workflow执行时的状态
type WorkflowExecution struct {
ID int64 `gorm:"column:id;primaryKey;comment:execute id" json:"id"` // execute id
WorkflowID int64 `gorm:"column:workflow_id;not null;comment:workflow_id" json:"workflow_id"` // workflow_id
Version string `gorm:"column:version;comment:workflow version. empty if is draft" json:"version"` // workflow version. empty if is draft
SpaceID int64 `gorm:"column:space_id;not null;comment:the space id the workflow belongs to" json:"space_id"` // the space id the workflow belongs to
Mode int32 `gorm:"column:mode;not null;comment:the execution mode: 1. debug run 2. release run 3. node debug" json:"mode"` // the execution mode: 1. debug run 2. release run 3. node debug
OperatorID int64 `gorm:"column:operator_id;not null;comment:the user id that runs this workflow" json:"operator_id"` // the user id that runs this workflow
ConnectorID int64 `gorm:"column:connector_id;comment:the connector on which this execution happened" json:"connector_id"` // the connector on which this execution happened
ConnectorUID string `gorm:"column:connector_uid;comment:user id of the connector" json:"connector_uid"` // user id of the connector
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:create time in millisecond" json:"created_at"` // create time in millisecond
LogID string `gorm:"column:log_id;comment:log id" json:"log_id"` // log id
Status int32 `gorm:"column:status;comment:1=running 2=success 3=fail 4=interrupted" json:"status"` // 1=running 2=success 3=fail 4=interrupted
Duration int64 `gorm:"column:duration;comment:execution duration in millisecond" json:"duration"` // execution duration in millisecond
Input string `gorm:"column:input;comment:actual input of this execution" json:"input"` // actual input of this execution
Output string `gorm:"column:output;comment:the actual output of this execution" json:"output"` // the actual output of this execution
ErrorCode string `gorm:"column:error_code;comment:error code if any" json:"error_code"` // error code if any
FailReason string `gorm:"column:fail_reason;comment:the reason for failure" json:"fail_reason"` // the reason for failure
InputTokens int64 `gorm:"column:input_tokens;comment:number of input tokens" json:"input_tokens"` // number of input tokens
OutputTokens int64 `gorm:"column:output_tokens;comment:number of output tokens" json:"output_tokens"` // number of output tokens
UpdatedAt int64 `gorm:"column:updated_at;autoUpdateTime:milli;comment:update time in millisecond" json:"updated_at"` // update time in millisecond
RootExecutionID int64 `gorm:"column:root_execution_id;comment:the top level execution id. Null if this is the root" json:"root_execution_id"` // the top level execution id. Null if this is the root
ParentNodeID string `gorm:"column:parent_node_id;comment:the node key for the sub_workflow node that executes this workflow" json:"parent_node_id"` // the node key for the sub_workflow node that executes this workflow
AppID int64 `gorm:"column:app_id;comment:app id this workflow execution belongs to" json:"app_id"` // app id this workflow execution belongs to
NodeCount int32 `gorm:"column:node_count;comment:the total node count of the workflow" json:"node_count"` // the total node count of the workflow
ResumeEventID int64 `gorm:"column:resume_event_id;comment:the current event ID which is resuming" json:"resume_event_id"` // the current event ID which is resuming
AgentID int64 `gorm:"column:agent_id;comment:the agent that this execution binds to" json:"agent_id"` // the agent that this execution binds to
SyncPattern int32 `gorm:"column:sync_pattern;comment:the sync pattern 1. sync 2. async 3. stream" json:"sync_pattern"` // the sync pattern 1. sync 2. async 3. stream
CommitID string `gorm:"column:commit_id;comment:draft commit id this execution belongs to" json:"commit_id"` // draft commit id this execution belongs to
}
// TableName WorkflowExecution's table name
func (*WorkflowExecution) TableName() string {
return TableNameWorkflowExecution
}

View File

@@ -0,0 +1,39 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"gorm.io/gorm"
)
const TableNameWorkflowMeta = "workflow_meta"
// WorkflowMeta workflow 元信息表用于记录workflow基本的元信息
type WorkflowMeta struct {
ID int64 `gorm:"column:id;primaryKey;comment:workflow id" json:"id"` // workflow id
Name string `gorm:"column:name;not null;comment:workflow name" json:"name"` // workflow name
Description string `gorm:"column:description;not null;comment:workflow description" json:"description"` // workflow description
IconURI string `gorm:"column:icon_uri;not null;comment:icon uri" json:"icon_uri"` // icon uri
Status int32 `gorm:"column:status;not null;comment:0:未发布过, 1:已发布过" json:"status"` // 0:未发布过, 1:已发布过
ContentType int32 `gorm:"column:content_type;not null;comment:0用户 1官方" json:"content_type"` // 0用户 1官方
Mode int32 `gorm:"column:mode;not null;comment:0:workflow, 3:chat_flow" json:"mode"` // 0:workflow, 3:chat_flow
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:create time in millisecond" json:"created_at"` // create time in millisecond
UpdatedAt int64 `gorm:"column:updated_at;autoUpdateTime:milli;comment:update time in millisecond" json:"updated_at"` // update time in millisecond
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;comment:delete time in millisecond" json:"deleted_at"` // delete time in millisecond
CreatorID int64 `gorm:"column:creator_id;not null;comment:user id for creator" json:"creator_id"` // user id for creator
Tag int32 `gorm:"column:tag;comment:template tag: Tag: 1=All, 2=Hot, 3=Information, 4=Music, 5=Picture, 6=UtilityTool, 7=Life, 8=Traval, 9=Network, 10=System, 11=Movie, 12=Office, 13=Shopping, 14=Education, 15=Health, 16=Social, 17=Entertainment, 18=Finance, 100=Hidden" json:"tag"` // template tag: Tag: 1=All, 2=Hot, 3=Information, 4=Music, 5=Picture, 6=UtilityTool, 7=Life, 8=Traval, 9=Network, 10=System, 11=Movie, 12=Office, 13=Shopping, 14=Education, 15=Health, 16=Social, 17=Entertainment, 18=Finance, 100=Hidden
AuthorID int64 `gorm:"column:author_id;not null;comment:原作者用户 ID" json:"author_id"` // 原作者用户 ID
SpaceID int64 `gorm:"column:space_id;not null;comment: 空间 ID" json:"space_id"` // 空间 ID
UpdaterID int64 `gorm:"column:updater_id;comment: 更新元信息的用户 ID" json:"updater_id"` // 更新元信息的用户 ID
SourceID int64 `gorm:"column:source_id;comment: 复制来源的 workflow ID" json:"source_id"` // 复制来源的 workflow ID
AppID int64 `gorm:"column:app_id;comment:应用 ID" json:"app_id"` // 应用 ID
LatestVersion string `gorm:"column:latest_version;comment:the version of the most recent publish" json:"latest_version"` // the version of the most recent publish
LatestVersionTs int64 `gorm:"column:latest_version_ts;comment:create time of latest version" json:"latest_version_ts"` // create time of latest version
}
// TableName WorkflowMeta's table name
func (*WorkflowMeta) TableName() string {
return TableNameWorkflowMeta
}

View File

@@ -0,0 +1,28 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"gorm.io/gorm"
)
const TableNameWorkflowReference = "workflow_reference"
// WorkflowReference workflow 关联关系表用于记录workflow 直接互相引用关系
type WorkflowReference struct {
ID int64 `gorm:"column:id;primaryKey;comment:workflow id" json:"id"` // workflow id
ReferredID int64 `gorm:"column:referred_id;not null;comment:the id of the workflow that is referred by other entities" json:"referred_id"` // the id of the workflow that is referred by other entities
ReferringID int64 `gorm:"column:referring_id;not null;comment:the entity id that refers this workflow" json:"referring_id"` // the entity id that refers this workflow
ReferType int32 `gorm:"column:refer_type;not null;comment:1 subworkflow 2 tool" json:"refer_type"` // 1 subworkflow 2 tool
ReferringBizType int32 `gorm:"column:referring_biz_type;not null;comment:the biz type the referring entity belongs to: 1. workflow 2. agent" json:"referring_biz_type"` // the biz type the referring entity belongs to: 1. workflow 2. agent
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:create time in millisecond" json:"created_at"` // create time in millisecond
Status int32 `gorm:"column:status;not null;comment:whether this reference currently takes effect. 0: disabled 1: enabled" json:"status"` // whether this reference currently takes effect. 0: disabled 1: enabled
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at" json:"deleted_at"`
}
// TableName WorkflowReference's table name
func (*WorkflowReference) TableName() string {
return TableNameWorkflowReference
}

View File

@@ -0,0 +1,23 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
const TableNameWorkflowSnapshot = "workflow_snapshot"
// WorkflowSnapshot snapshot for executed workflow draft
type WorkflowSnapshot struct {
WorkflowID int64 `gorm:"column:workflow_id;not null;comment:workflow id this snapshot belongs to" json:"workflow_id"` // workflow id this snapshot belongs to
CommitID string `gorm:"column:commit_id;not null;comment:the commit id of the workflow draft" json:"commit_id"` // the commit id of the workflow draft
Canvas string `gorm:"column:canvas;not null;comment:frontend schema for this snapshot" json:"canvas"` // frontend schema for this snapshot
InputParams string `gorm:"column:input_params;comment:input parameter info" json:"input_params"` // input parameter info
OutputParams string `gorm:"column:output_params;comment:output parameter info" json:"output_params"` // output parameter info
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli" json:"created_at"`
ID int64 `gorm:"column:id;primaryKey;autoIncrement:true;comment:ID" json:"id"` // ID
}
// TableName WorkflowSnapshot's table name
func (*WorkflowSnapshot) TableName() string {
return TableNameWorkflowSnapshot
}

View File

@@ -0,0 +1,31 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package model
import (
"gorm.io/gorm"
)
const TableNameWorkflowVersion = "workflow_version"
// WorkflowVersion workflow 画布版本信息表,用于记录不同版本的画布信息
type WorkflowVersion struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement:true;comment:ID" json:"id"` // ID
WorkflowID int64 `gorm:"column:workflow_id;not null;comment:workflow id" json:"workflow_id"` // workflow id
Version string `gorm:"column:version;not null;comment:发布版本" json:"version"` // 发布版本
VersionDescription string `gorm:"column:version_description;not null;comment:版本描述" json:"version_description"` // 版本描述
Canvas string `gorm:"column:canvas;not null;comment:前端 schema" json:"canvas"` // 前端 schema
InputParams string `gorm:"column:input_params" json:"input_params"`
OutputParams string `gorm:"column:output_params" json:"output_params"`
CreatorID int64 `gorm:"column:creator_id;not null;comment:发布用户 ID" json:"creator_id"` // 发布用户 ID
CreatedAt int64 `gorm:"column:created_at;not null;autoCreateTime:milli;comment:创建时间毫秒时间戳" json:"created_at"` // 创建时间毫秒时间戳
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;comment:删除毫秒时间戳" json:"deleted_at"` // 删除毫秒时间戳
CommitID string `gorm:"column:commit_id;not null;comment:the commit id corresponding to this version" json:"commit_id"` // the commit id corresponding to this version
}
// TableName WorkflowVersion's table name
func (*WorkflowVersion) TableName() string {
return TableNameWorkflowVersion
}

View File

@@ -0,0 +1,400 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newConnectorWorkflowVersion(db *gorm.DB, opts ...gen.DOOption) connectorWorkflowVersion {
_connectorWorkflowVersion := connectorWorkflowVersion{}
_connectorWorkflowVersion.connectorWorkflowVersionDo.UseDB(db, opts...)
_connectorWorkflowVersion.connectorWorkflowVersionDo.UseModel(&model.ConnectorWorkflowVersion{})
tableName := _connectorWorkflowVersion.connectorWorkflowVersionDo.TableName()
_connectorWorkflowVersion.ALL = field.NewAsterisk(tableName)
_connectorWorkflowVersion.ID = field.NewInt64(tableName, "id")
_connectorWorkflowVersion.AppID = field.NewInt64(tableName, "app_id")
_connectorWorkflowVersion.ConnectorID = field.NewInt64(tableName, "connector_id")
_connectorWorkflowVersion.WorkflowID = field.NewInt64(tableName, "workflow_id")
_connectorWorkflowVersion.Version = field.NewString(tableName, "version")
_connectorWorkflowVersion.CreatedAt = field.NewInt64(tableName, "created_at")
_connectorWorkflowVersion.fillFieldMap()
return _connectorWorkflowVersion
}
type connectorWorkflowVersion struct {
connectorWorkflowVersionDo
ALL field.Asterisk
ID field.Int64 // id
AppID field.Int64 // app id
ConnectorID field.Int64 // connector id
WorkflowID field.Int64 // workflow id
Version field.String // version
CreatedAt field.Int64 // create time in millisecond
fieldMap map[string]field.Expr
}
func (c connectorWorkflowVersion) Table(newTableName string) *connectorWorkflowVersion {
c.connectorWorkflowVersionDo.UseTable(newTableName)
return c.updateTableName(newTableName)
}
func (c connectorWorkflowVersion) As(alias string) *connectorWorkflowVersion {
c.connectorWorkflowVersionDo.DO = *(c.connectorWorkflowVersionDo.As(alias).(*gen.DO))
return c.updateTableName(alias)
}
func (c *connectorWorkflowVersion) updateTableName(table string) *connectorWorkflowVersion {
c.ALL = field.NewAsterisk(table)
c.ID = field.NewInt64(table, "id")
c.AppID = field.NewInt64(table, "app_id")
c.ConnectorID = field.NewInt64(table, "connector_id")
c.WorkflowID = field.NewInt64(table, "workflow_id")
c.Version = field.NewString(table, "version")
c.CreatedAt = field.NewInt64(table, "created_at")
c.fillFieldMap()
return c
}
func (c *connectorWorkflowVersion) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := c.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (c *connectorWorkflowVersion) fillFieldMap() {
c.fieldMap = make(map[string]field.Expr, 6)
c.fieldMap["id"] = c.ID
c.fieldMap["app_id"] = c.AppID
c.fieldMap["connector_id"] = c.ConnectorID
c.fieldMap["workflow_id"] = c.WorkflowID
c.fieldMap["version"] = c.Version
c.fieldMap["created_at"] = c.CreatedAt
}
func (c connectorWorkflowVersion) clone(db *gorm.DB) connectorWorkflowVersion {
c.connectorWorkflowVersionDo.ReplaceConnPool(db.Statement.ConnPool)
return c
}
func (c connectorWorkflowVersion) replaceDB(db *gorm.DB) connectorWorkflowVersion {
c.connectorWorkflowVersionDo.ReplaceDB(db)
return c
}
type connectorWorkflowVersionDo struct{ gen.DO }
type IConnectorWorkflowVersionDo interface {
gen.SubQuery
Debug() IConnectorWorkflowVersionDo
WithContext(ctx context.Context) IConnectorWorkflowVersionDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IConnectorWorkflowVersionDo
WriteDB() IConnectorWorkflowVersionDo
As(alias string) gen.Dao
Session(config *gorm.Session) IConnectorWorkflowVersionDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IConnectorWorkflowVersionDo
Not(conds ...gen.Condition) IConnectorWorkflowVersionDo
Or(conds ...gen.Condition) IConnectorWorkflowVersionDo
Select(conds ...field.Expr) IConnectorWorkflowVersionDo
Where(conds ...gen.Condition) IConnectorWorkflowVersionDo
Order(conds ...field.Expr) IConnectorWorkflowVersionDo
Distinct(cols ...field.Expr) IConnectorWorkflowVersionDo
Omit(cols ...field.Expr) IConnectorWorkflowVersionDo
Join(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo
LeftJoin(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo
RightJoin(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo
Group(cols ...field.Expr) IConnectorWorkflowVersionDo
Having(conds ...gen.Condition) IConnectorWorkflowVersionDo
Limit(limit int) IConnectorWorkflowVersionDo
Offset(offset int) IConnectorWorkflowVersionDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IConnectorWorkflowVersionDo
Unscoped() IConnectorWorkflowVersionDo
Create(values ...*model.ConnectorWorkflowVersion) error
CreateInBatches(values []*model.ConnectorWorkflowVersion, batchSize int) error
Save(values ...*model.ConnectorWorkflowVersion) error
First() (*model.ConnectorWorkflowVersion, error)
Take() (*model.ConnectorWorkflowVersion, error)
Last() (*model.ConnectorWorkflowVersion, error)
Find() ([]*model.ConnectorWorkflowVersion, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ConnectorWorkflowVersion, err error)
FindInBatches(result *[]*model.ConnectorWorkflowVersion, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.ConnectorWorkflowVersion) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IConnectorWorkflowVersionDo
Assign(attrs ...field.AssignExpr) IConnectorWorkflowVersionDo
Joins(fields ...field.RelationField) IConnectorWorkflowVersionDo
Preload(fields ...field.RelationField) IConnectorWorkflowVersionDo
FirstOrInit() (*model.ConnectorWorkflowVersion, error)
FirstOrCreate() (*model.ConnectorWorkflowVersion, error)
FindByPage(offset int, limit int) (result []*model.ConnectorWorkflowVersion, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IConnectorWorkflowVersionDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (c connectorWorkflowVersionDo) Debug() IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Debug())
}
func (c connectorWorkflowVersionDo) WithContext(ctx context.Context) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.WithContext(ctx))
}
func (c connectorWorkflowVersionDo) ReadDB() IConnectorWorkflowVersionDo {
return c.Clauses(dbresolver.Read)
}
func (c connectorWorkflowVersionDo) WriteDB() IConnectorWorkflowVersionDo {
return c.Clauses(dbresolver.Write)
}
func (c connectorWorkflowVersionDo) Session(config *gorm.Session) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Session(config))
}
func (c connectorWorkflowVersionDo) Clauses(conds ...clause.Expression) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Clauses(conds...))
}
func (c connectorWorkflowVersionDo) Returning(value interface{}, columns ...string) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Returning(value, columns...))
}
func (c connectorWorkflowVersionDo) Not(conds ...gen.Condition) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Not(conds...))
}
func (c connectorWorkflowVersionDo) Or(conds ...gen.Condition) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Or(conds...))
}
func (c connectorWorkflowVersionDo) Select(conds ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Select(conds...))
}
func (c connectorWorkflowVersionDo) Where(conds ...gen.Condition) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Where(conds...))
}
func (c connectorWorkflowVersionDo) Order(conds ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Order(conds...))
}
func (c connectorWorkflowVersionDo) Distinct(cols ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Distinct(cols...))
}
func (c connectorWorkflowVersionDo) Omit(cols ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Omit(cols...))
}
func (c connectorWorkflowVersionDo) Join(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Join(table, on...))
}
func (c connectorWorkflowVersionDo) LeftJoin(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.LeftJoin(table, on...))
}
func (c connectorWorkflowVersionDo) RightJoin(table schema.Tabler, on ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.RightJoin(table, on...))
}
func (c connectorWorkflowVersionDo) Group(cols ...field.Expr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Group(cols...))
}
func (c connectorWorkflowVersionDo) Having(conds ...gen.Condition) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Having(conds...))
}
func (c connectorWorkflowVersionDo) Limit(limit int) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Limit(limit))
}
func (c connectorWorkflowVersionDo) Offset(offset int) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Offset(offset))
}
func (c connectorWorkflowVersionDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Scopes(funcs...))
}
func (c connectorWorkflowVersionDo) Unscoped() IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Unscoped())
}
func (c connectorWorkflowVersionDo) Create(values ...*model.ConnectorWorkflowVersion) error {
if len(values) == 0 {
return nil
}
return c.DO.Create(values)
}
func (c connectorWorkflowVersionDo) CreateInBatches(values []*model.ConnectorWorkflowVersion, batchSize int) error {
return c.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (c connectorWorkflowVersionDo) Save(values ...*model.ConnectorWorkflowVersion) error {
if len(values) == 0 {
return nil
}
return c.DO.Save(values)
}
func (c connectorWorkflowVersionDo) First() (*model.ConnectorWorkflowVersion, error) {
if result, err := c.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.ConnectorWorkflowVersion), nil
}
}
func (c connectorWorkflowVersionDo) Take() (*model.ConnectorWorkflowVersion, error) {
if result, err := c.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.ConnectorWorkflowVersion), nil
}
}
func (c connectorWorkflowVersionDo) Last() (*model.ConnectorWorkflowVersion, error) {
if result, err := c.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.ConnectorWorkflowVersion), nil
}
}
func (c connectorWorkflowVersionDo) Find() ([]*model.ConnectorWorkflowVersion, error) {
result, err := c.DO.Find()
return result.([]*model.ConnectorWorkflowVersion), err
}
func (c connectorWorkflowVersionDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ConnectorWorkflowVersion, err error) {
buf := make([]*model.ConnectorWorkflowVersion, 0, batchSize)
err = c.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (c connectorWorkflowVersionDo) FindInBatches(result *[]*model.ConnectorWorkflowVersion, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return c.DO.FindInBatches(result, batchSize, fc)
}
func (c connectorWorkflowVersionDo) Attrs(attrs ...field.AssignExpr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Attrs(attrs...))
}
func (c connectorWorkflowVersionDo) Assign(attrs ...field.AssignExpr) IConnectorWorkflowVersionDo {
return c.withDO(c.DO.Assign(attrs...))
}
func (c connectorWorkflowVersionDo) Joins(fields ...field.RelationField) IConnectorWorkflowVersionDo {
for _, _f := range fields {
c = *c.withDO(c.DO.Joins(_f))
}
return &c
}
func (c connectorWorkflowVersionDo) Preload(fields ...field.RelationField) IConnectorWorkflowVersionDo {
for _, _f := range fields {
c = *c.withDO(c.DO.Preload(_f))
}
return &c
}
func (c connectorWorkflowVersionDo) FirstOrInit() (*model.ConnectorWorkflowVersion, error) {
if result, err := c.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.ConnectorWorkflowVersion), nil
}
}
func (c connectorWorkflowVersionDo) FirstOrCreate() (*model.ConnectorWorkflowVersion, error) {
if result, err := c.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.ConnectorWorkflowVersion), nil
}
}
func (c connectorWorkflowVersionDo) FindByPage(offset int, limit int) (result []*model.ConnectorWorkflowVersion, count int64, err error) {
result, err = c.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = c.Offset(-1).Limit(-1).Count()
return
}
func (c connectorWorkflowVersionDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = c.Count()
if err != nil {
return
}
err = c.Offset(offset).Limit(limit).Scan(result)
return
}
func (c connectorWorkflowVersionDo) Scan(result interface{}) (err error) {
return c.DO.Scan(result)
}
func (c connectorWorkflowVersionDo) Delete(models ...*model.ConnectorWorkflowVersion) (result gen.ResultInfo, err error) {
return c.DO.Delete(models)
}
func (c *connectorWorkflowVersionDo) withDO(do gen.Dao) *connectorWorkflowVersionDo {
c.DO = *do.(*gen.DO)
return c
}

View File

@@ -0,0 +1,159 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"database/sql"
"gorm.io/gorm"
"gorm.io/gen"
"gorm.io/plugin/dbresolver"
)
var (
Q = new(Query)
ConnectorWorkflowVersion *connectorWorkflowVersion
NodeExecution *nodeExecution
WorkflowDraft *workflowDraft
WorkflowExecution *workflowExecution
WorkflowMeta *workflowMeta
WorkflowReference *workflowReference
WorkflowSnapshot *workflowSnapshot
WorkflowVersion *workflowVersion
)
func SetDefault(db *gorm.DB, opts ...gen.DOOption) {
*Q = *Use(db, opts...)
ConnectorWorkflowVersion = &Q.ConnectorWorkflowVersion
NodeExecution = &Q.NodeExecution
WorkflowDraft = &Q.WorkflowDraft
WorkflowExecution = &Q.WorkflowExecution
WorkflowMeta = &Q.WorkflowMeta
WorkflowReference = &Q.WorkflowReference
WorkflowSnapshot = &Q.WorkflowSnapshot
WorkflowVersion = &Q.WorkflowVersion
}
func Use(db *gorm.DB, opts ...gen.DOOption) *Query {
return &Query{
db: db,
ConnectorWorkflowVersion: newConnectorWorkflowVersion(db, opts...),
NodeExecution: newNodeExecution(db, opts...),
WorkflowDraft: newWorkflowDraft(db, opts...),
WorkflowExecution: newWorkflowExecution(db, opts...),
WorkflowMeta: newWorkflowMeta(db, opts...),
WorkflowReference: newWorkflowReference(db, opts...),
WorkflowSnapshot: newWorkflowSnapshot(db, opts...),
WorkflowVersion: newWorkflowVersion(db, opts...),
}
}
type Query struct {
db *gorm.DB
ConnectorWorkflowVersion connectorWorkflowVersion
NodeExecution nodeExecution
WorkflowDraft workflowDraft
WorkflowExecution workflowExecution
WorkflowMeta workflowMeta
WorkflowReference workflowReference
WorkflowSnapshot workflowSnapshot
WorkflowVersion workflowVersion
}
func (q *Query) Available() bool { return q.db != nil }
func (q *Query) clone(db *gorm.DB) *Query {
return &Query{
db: db,
ConnectorWorkflowVersion: q.ConnectorWorkflowVersion.clone(db),
NodeExecution: q.NodeExecution.clone(db),
WorkflowDraft: q.WorkflowDraft.clone(db),
WorkflowExecution: q.WorkflowExecution.clone(db),
WorkflowMeta: q.WorkflowMeta.clone(db),
WorkflowReference: q.WorkflowReference.clone(db),
WorkflowSnapshot: q.WorkflowSnapshot.clone(db),
WorkflowVersion: q.WorkflowVersion.clone(db),
}
}
func (q *Query) ReadDB() *Query {
return q.ReplaceDB(q.db.Clauses(dbresolver.Read))
}
func (q *Query) WriteDB() *Query {
return q.ReplaceDB(q.db.Clauses(dbresolver.Write))
}
func (q *Query) ReplaceDB(db *gorm.DB) *Query {
return &Query{
db: db,
ConnectorWorkflowVersion: q.ConnectorWorkflowVersion.replaceDB(db),
NodeExecution: q.NodeExecution.replaceDB(db),
WorkflowDraft: q.WorkflowDraft.replaceDB(db),
WorkflowExecution: q.WorkflowExecution.replaceDB(db),
WorkflowMeta: q.WorkflowMeta.replaceDB(db),
WorkflowReference: q.WorkflowReference.replaceDB(db),
WorkflowSnapshot: q.WorkflowSnapshot.replaceDB(db),
WorkflowVersion: q.WorkflowVersion.replaceDB(db),
}
}
type queryCtx struct {
ConnectorWorkflowVersion IConnectorWorkflowVersionDo
NodeExecution INodeExecutionDo
WorkflowDraft IWorkflowDraftDo
WorkflowExecution IWorkflowExecutionDo
WorkflowMeta IWorkflowMetaDo
WorkflowReference IWorkflowReferenceDo
WorkflowSnapshot IWorkflowSnapshotDo
WorkflowVersion IWorkflowVersionDo
}
func (q *Query) WithContext(ctx context.Context) *queryCtx {
return &queryCtx{
ConnectorWorkflowVersion: q.ConnectorWorkflowVersion.WithContext(ctx),
NodeExecution: q.NodeExecution.WithContext(ctx),
WorkflowDraft: q.WorkflowDraft.WithContext(ctx),
WorkflowExecution: q.WorkflowExecution.WithContext(ctx),
WorkflowMeta: q.WorkflowMeta.WithContext(ctx),
WorkflowReference: q.WorkflowReference.WithContext(ctx),
WorkflowSnapshot: q.WorkflowSnapshot.WithContext(ctx),
WorkflowVersion: q.WorkflowVersion.WithContext(ctx),
}
}
func (q *Query) Transaction(fc func(tx *Query) error, opts ...*sql.TxOptions) error {
return q.db.Transaction(func(tx *gorm.DB) error { return fc(q.clone(tx)) }, opts...)
}
func (q *Query) Begin(opts ...*sql.TxOptions) *QueryTx {
tx := q.db.Begin(opts...)
return &QueryTx{Query: q.clone(tx), Error: tx.Error}
}
type QueryTx struct {
*Query
Error error
}
func (q *QueryTx) Commit() error {
return q.db.Commit().Error
}
func (q *QueryTx) Rollback() error {
return q.db.Rollback().Error
}
func (q *QueryTx) SavePoint(name string) error {
return q.db.SavePoint(name).Error
}
func (q *QueryTx) RollbackTo(name string) error {
return q.db.RollbackTo(name).Error
}

View File

@@ -0,0 +1,461 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newNodeExecution(db *gorm.DB, opts ...gen.DOOption) nodeExecution {
_nodeExecution := nodeExecution{}
_nodeExecution.nodeExecutionDo.UseDB(db, opts...)
_nodeExecution.nodeExecutionDo.UseModel(&model.NodeExecution{})
tableName := _nodeExecution.nodeExecutionDo.TableName()
_nodeExecution.ALL = field.NewAsterisk(tableName)
_nodeExecution.ID = field.NewInt64(tableName, "id")
_nodeExecution.ExecuteID = field.NewInt64(tableName, "execute_id")
_nodeExecution.NodeID = field.NewString(tableName, "node_id")
_nodeExecution.NodeName = field.NewString(tableName, "node_name")
_nodeExecution.NodeType = field.NewString(tableName, "node_type")
_nodeExecution.CreatedAt = field.NewInt64(tableName, "created_at")
_nodeExecution.Status = field.NewInt32(tableName, "status")
_nodeExecution.Duration = field.NewInt64(tableName, "duration")
_nodeExecution.Input = field.NewString(tableName, "input")
_nodeExecution.Output = field.NewString(tableName, "output")
_nodeExecution.RawOutput = field.NewString(tableName, "raw_output")
_nodeExecution.ErrorInfo = field.NewString(tableName, "error_info")
_nodeExecution.ErrorLevel = field.NewString(tableName, "error_level")
_nodeExecution.InputTokens = field.NewInt64(tableName, "input_tokens")
_nodeExecution.OutputTokens = field.NewInt64(tableName, "output_tokens")
_nodeExecution.UpdatedAt = field.NewInt64(tableName, "updated_at")
_nodeExecution.CompositeNodeIndex = field.NewInt64(tableName, "composite_node_index")
_nodeExecution.CompositeNodeItems = field.NewString(tableName, "composite_node_items")
_nodeExecution.ParentNodeID = field.NewString(tableName, "parent_node_id")
_nodeExecution.SubExecuteID = field.NewInt64(tableName, "sub_execute_id")
_nodeExecution.Extra = field.NewString(tableName, "extra")
_nodeExecution.fillFieldMap()
return _nodeExecution
}
// nodeExecution node 节点运行记录用于记录每次workflow执行时每个节点的状态信息
type nodeExecution struct {
nodeExecutionDo
ALL field.Asterisk
ID field.Int64 // node execution id
ExecuteID field.Int64 // the workflow execute id this node execution belongs to
NodeID field.String // node key
NodeName field.String // name of the node
NodeType field.String // the type of the node, in string
CreatedAt field.Int64 // create time in millisecond
Status field.Int32 // 1=waiting 2=running 3=success 4=fail
Duration field.Int64 // execution duration in millisecond
Input field.String // actual input of the node
Output field.String // actual output of the node
RawOutput field.String // the original output of the node
ErrorInfo field.String // error info
ErrorLevel field.String // level of the error
InputTokens field.Int64 // number of input tokens
OutputTokens field.Int64 // number of output tokens
UpdatedAt field.Int64 // update time in millisecond
CompositeNodeIndex field.Int64 // loop or batch's execution index
CompositeNodeItems field.String // the items extracted from parent composite node for this index
ParentNodeID field.String // when as inner node for loop or batch, this is the parent node's key
SubExecuteID field.Int64 // if this node is sub_workflow, the exe id of the sub workflow
Extra field.String // extra info
fieldMap map[string]field.Expr
}
func (n nodeExecution) Table(newTableName string) *nodeExecution {
n.nodeExecutionDo.UseTable(newTableName)
return n.updateTableName(newTableName)
}
func (n nodeExecution) As(alias string) *nodeExecution {
n.nodeExecutionDo.DO = *(n.nodeExecutionDo.As(alias).(*gen.DO))
return n.updateTableName(alias)
}
func (n *nodeExecution) updateTableName(table string) *nodeExecution {
n.ALL = field.NewAsterisk(table)
n.ID = field.NewInt64(table, "id")
n.ExecuteID = field.NewInt64(table, "execute_id")
n.NodeID = field.NewString(table, "node_id")
n.NodeName = field.NewString(table, "node_name")
n.NodeType = field.NewString(table, "node_type")
n.CreatedAt = field.NewInt64(table, "created_at")
n.Status = field.NewInt32(table, "status")
n.Duration = field.NewInt64(table, "duration")
n.Input = field.NewString(table, "input")
n.Output = field.NewString(table, "output")
n.RawOutput = field.NewString(table, "raw_output")
n.ErrorInfo = field.NewString(table, "error_info")
n.ErrorLevel = field.NewString(table, "error_level")
n.InputTokens = field.NewInt64(table, "input_tokens")
n.OutputTokens = field.NewInt64(table, "output_tokens")
n.UpdatedAt = field.NewInt64(table, "updated_at")
n.CompositeNodeIndex = field.NewInt64(table, "composite_node_index")
n.CompositeNodeItems = field.NewString(table, "composite_node_items")
n.ParentNodeID = field.NewString(table, "parent_node_id")
n.SubExecuteID = field.NewInt64(table, "sub_execute_id")
n.Extra = field.NewString(table, "extra")
n.fillFieldMap()
return n
}
func (n *nodeExecution) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := n.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (n *nodeExecution) fillFieldMap() {
n.fieldMap = make(map[string]field.Expr, 21)
n.fieldMap["id"] = n.ID
n.fieldMap["execute_id"] = n.ExecuteID
n.fieldMap["node_id"] = n.NodeID
n.fieldMap["node_name"] = n.NodeName
n.fieldMap["node_type"] = n.NodeType
n.fieldMap["created_at"] = n.CreatedAt
n.fieldMap["status"] = n.Status
n.fieldMap["duration"] = n.Duration
n.fieldMap["input"] = n.Input
n.fieldMap["output"] = n.Output
n.fieldMap["raw_output"] = n.RawOutput
n.fieldMap["error_info"] = n.ErrorInfo
n.fieldMap["error_level"] = n.ErrorLevel
n.fieldMap["input_tokens"] = n.InputTokens
n.fieldMap["output_tokens"] = n.OutputTokens
n.fieldMap["updated_at"] = n.UpdatedAt
n.fieldMap["composite_node_index"] = n.CompositeNodeIndex
n.fieldMap["composite_node_items"] = n.CompositeNodeItems
n.fieldMap["parent_node_id"] = n.ParentNodeID
n.fieldMap["sub_execute_id"] = n.SubExecuteID
n.fieldMap["extra"] = n.Extra
}
func (n nodeExecution) clone(db *gorm.DB) nodeExecution {
n.nodeExecutionDo.ReplaceConnPool(db.Statement.ConnPool)
return n
}
func (n nodeExecution) replaceDB(db *gorm.DB) nodeExecution {
n.nodeExecutionDo.ReplaceDB(db)
return n
}
type nodeExecutionDo struct{ gen.DO }
type INodeExecutionDo interface {
gen.SubQuery
Debug() INodeExecutionDo
WithContext(ctx context.Context) INodeExecutionDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() INodeExecutionDo
WriteDB() INodeExecutionDo
As(alias string) gen.Dao
Session(config *gorm.Session) INodeExecutionDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) INodeExecutionDo
Not(conds ...gen.Condition) INodeExecutionDo
Or(conds ...gen.Condition) INodeExecutionDo
Select(conds ...field.Expr) INodeExecutionDo
Where(conds ...gen.Condition) INodeExecutionDo
Order(conds ...field.Expr) INodeExecutionDo
Distinct(cols ...field.Expr) INodeExecutionDo
Omit(cols ...field.Expr) INodeExecutionDo
Join(table schema.Tabler, on ...field.Expr) INodeExecutionDo
LeftJoin(table schema.Tabler, on ...field.Expr) INodeExecutionDo
RightJoin(table schema.Tabler, on ...field.Expr) INodeExecutionDo
Group(cols ...field.Expr) INodeExecutionDo
Having(conds ...gen.Condition) INodeExecutionDo
Limit(limit int) INodeExecutionDo
Offset(offset int) INodeExecutionDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) INodeExecutionDo
Unscoped() INodeExecutionDo
Create(values ...*model.NodeExecution) error
CreateInBatches(values []*model.NodeExecution, batchSize int) error
Save(values ...*model.NodeExecution) error
First() (*model.NodeExecution, error)
Take() (*model.NodeExecution, error)
Last() (*model.NodeExecution, error)
Find() ([]*model.NodeExecution, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.NodeExecution, err error)
FindInBatches(result *[]*model.NodeExecution, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.NodeExecution) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) INodeExecutionDo
Assign(attrs ...field.AssignExpr) INodeExecutionDo
Joins(fields ...field.RelationField) INodeExecutionDo
Preload(fields ...field.RelationField) INodeExecutionDo
FirstOrInit() (*model.NodeExecution, error)
FirstOrCreate() (*model.NodeExecution, error)
FindByPage(offset int, limit int) (result []*model.NodeExecution, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) INodeExecutionDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (n nodeExecutionDo) Debug() INodeExecutionDo {
return n.withDO(n.DO.Debug())
}
func (n nodeExecutionDo) WithContext(ctx context.Context) INodeExecutionDo {
return n.withDO(n.DO.WithContext(ctx))
}
func (n nodeExecutionDo) ReadDB() INodeExecutionDo {
return n.Clauses(dbresolver.Read)
}
func (n nodeExecutionDo) WriteDB() INodeExecutionDo {
return n.Clauses(dbresolver.Write)
}
func (n nodeExecutionDo) Session(config *gorm.Session) INodeExecutionDo {
return n.withDO(n.DO.Session(config))
}
func (n nodeExecutionDo) Clauses(conds ...clause.Expression) INodeExecutionDo {
return n.withDO(n.DO.Clauses(conds...))
}
func (n nodeExecutionDo) Returning(value interface{}, columns ...string) INodeExecutionDo {
return n.withDO(n.DO.Returning(value, columns...))
}
func (n nodeExecutionDo) Not(conds ...gen.Condition) INodeExecutionDo {
return n.withDO(n.DO.Not(conds...))
}
func (n nodeExecutionDo) Or(conds ...gen.Condition) INodeExecutionDo {
return n.withDO(n.DO.Or(conds...))
}
func (n nodeExecutionDo) Select(conds ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Select(conds...))
}
func (n nodeExecutionDo) Where(conds ...gen.Condition) INodeExecutionDo {
return n.withDO(n.DO.Where(conds...))
}
func (n nodeExecutionDo) Order(conds ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Order(conds...))
}
func (n nodeExecutionDo) Distinct(cols ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Distinct(cols...))
}
func (n nodeExecutionDo) Omit(cols ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Omit(cols...))
}
func (n nodeExecutionDo) Join(table schema.Tabler, on ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Join(table, on...))
}
func (n nodeExecutionDo) LeftJoin(table schema.Tabler, on ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.LeftJoin(table, on...))
}
func (n nodeExecutionDo) RightJoin(table schema.Tabler, on ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.RightJoin(table, on...))
}
func (n nodeExecutionDo) Group(cols ...field.Expr) INodeExecutionDo {
return n.withDO(n.DO.Group(cols...))
}
func (n nodeExecutionDo) Having(conds ...gen.Condition) INodeExecutionDo {
return n.withDO(n.DO.Having(conds...))
}
func (n nodeExecutionDo) Limit(limit int) INodeExecutionDo {
return n.withDO(n.DO.Limit(limit))
}
func (n nodeExecutionDo) Offset(offset int) INodeExecutionDo {
return n.withDO(n.DO.Offset(offset))
}
func (n nodeExecutionDo) Scopes(funcs ...func(gen.Dao) gen.Dao) INodeExecutionDo {
return n.withDO(n.DO.Scopes(funcs...))
}
func (n nodeExecutionDo) Unscoped() INodeExecutionDo {
return n.withDO(n.DO.Unscoped())
}
func (n nodeExecutionDo) Create(values ...*model.NodeExecution) error {
if len(values) == 0 {
return nil
}
return n.DO.Create(values)
}
func (n nodeExecutionDo) CreateInBatches(values []*model.NodeExecution, batchSize int) error {
return n.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (n nodeExecutionDo) Save(values ...*model.NodeExecution) error {
if len(values) == 0 {
return nil
}
return n.DO.Save(values)
}
func (n nodeExecutionDo) First() (*model.NodeExecution, error) {
if result, err := n.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.NodeExecution), nil
}
}
func (n nodeExecutionDo) Take() (*model.NodeExecution, error) {
if result, err := n.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.NodeExecution), nil
}
}
func (n nodeExecutionDo) Last() (*model.NodeExecution, error) {
if result, err := n.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.NodeExecution), nil
}
}
func (n nodeExecutionDo) Find() ([]*model.NodeExecution, error) {
result, err := n.DO.Find()
return result.([]*model.NodeExecution), err
}
func (n nodeExecutionDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.NodeExecution, err error) {
buf := make([]*model.NodeExecution, 0, batchSize)
err = n.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (n nodeExecutionDo) FindInBatches(result *[]*model.NodeExecution, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return n.DO.FindInBatches(result, batchSize, fc)
}
func (n nodeExecutionDo) Attrs(attrs ...field.AssignExpr) INodeExecutionDo {
return n.withDO(n.DO.Attrs(attrs...))
}
func (n nodeExecutionDo) Assign(attrs ...field.AssignExpr) INodeExecutionDo {
return n.withDO(n.DO.Assign(attrs...))
}
func (n nodeExecutionDo) Joins(fields ...field.RelationField) INodeExecutionDo {
for _, _f := range fields {
n = *n.withDO(n.DO.Joins(_f))
}
return &n
}
func (n nodeExecutionDo) Preload(fields ...field.RelationField) INodeExecutionDo {
for _, _f := range fields {
n = *n.withDO(n.DO.Preload(_f))
}
return &n
}
func (n nodeExecutionDo) FirstOrInit() (*model.NodeExecution, error) {
if result, err := n.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.NodeExecution), nil
}
}
func (n nodeExecutionDo) FirstOrCreate() (*model.NodeExecution, error) {
if result, err := n.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.NodeExecution), nil
}
}
func (n nodeExecutionDo) FindByPage(offset int, limit int) (result []*model.NodeExecution, count int64, err error) {
result, err = n.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = n.Offset(-1).Limit(-1).Count()
return
}
func (n nodeExecutionDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = n.Count()
if err != nil {
return
}
err = n.Offset(offset).Limit(limit).Scan(result)
return
}
func (n nodeExecutionDo) Scan(result interface{}) (err error) {
return n.DO.Scan(result)
}
func (n nodeExecutionDo) Delete(models ...*model.NodeExecution) (result gen.ResultInfo, err error) {
return n.DO.Delete(models)
}
func (n *nodeExecutionDo) withDO(do gen.Dao) *nodeExecutionDo {
n.DO = *do.(*gen.DO)
return n
}

View File

@@ -0,0 +1,413 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowDraft(db *gorm.DB, opts ...gen.DOOption) workflowDraft {
_workflowDraft := workflowDraft{}
_workflowDraft.workflowDraftDo.UseDB(db, opts...)
_workflowDraft.workflowDraftDo.UseModel(&model.WorkflowDraft{})
tableName := _workflowDraft.workflowDraftDo.TableName()
_workflowDraft.ALL = field.NewAsterisk(tableName)
_workflowDraft.ID = field.NewInt64(tableName, "id")
_workflowDraft.Canvas = field.NewString(tableName, "canvas")
_workflowDraft.InputParams = field.NewString(tableName, "input_params")
_workflowDraft.OutputParams = field.NewString(tableName, "output_params")
_workflowDraft.TestRunSuccess = field.NewBool(tableName, "test_run_success")
_workflowDraft.Modified = field.NewBool(tableName, "modified")
_workflowDraft.UpdatedAt = field.NewInt64(tableName, "updated_at")
_workflowDraft.DeletedAt = field.NewField(tableName, "deleted_at")
_workflowDraft.CommitID = field.NewString(tableName, "commit_id")
_workflowDraft.fillFieldMap()
return _workflowDraft
}
// workflowDraft workflow 画布草稿表用于记录workflow最新的草稿画布信息
type workflowDraft struct {
workflowDraftDo
ALL field.Asterisk
ID field.Int64 // workflow ID
Canvas field.String // 前端 schema
InputParams field.String // 入参 schema
OutputParams field.String // 出参 schema
TestRunSuccess field.Bool // 0 未运行, 1 运行成功
Modified field.Bool // 0 未被修改, 1 已被修改
UpdatedAt field.Int64
DeletedAt field.Field
CommitID field.String // used to uniquely identify a draft snapshot
fieldMap map[string]field.Expr
}
func (w workflowDraft) Table(newTableName string) *workflowDraft {
w.workflowDraftDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowDraft) As(alias string) *workflowDraft {
w.workflowDraftDo.DO = *(w.workflowDraftDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowDraft) updateTableName(table string) *workflowDraft {
w.ALL = field.NewAsterisk(table)
w.ID = field.NewInt64(table, "id")
w.Canvas = field.NewString(table, "canvas")
w.InputParams = field.NewString(table, "input_params")
w.OutputParams = field.NewString(table, "output_params")
w.TestRunSuccess = field.NewBool(table, "test_run_success")
w.Modified = field.NewBool(table, "modified")
w.UpdatedAt = field.NewInt64(table, "updated_at")
w.DeletedAt = field.NewField(table, "deleted_at")
w.CommitID = field.NewString(table, "commit_id")
w.fillFieldMap()
return w
}
func (w *workflowDraft) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowDraft) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 9)
w.fieldMap["id"] = w.ID
w.fieldMap["canvas"] = w.Canvas
w.fieldMap["input_params"] = w.InputParams
w.fieldMap["output_params"] = w.OutputParams
w.fieldMap["test_run_success"] = w.TestRunSuccess
w.fieldMap["modified"] = w.Modified
w.fieldMap["updated_at"] = w.UpdatedAt
w.fieldMap["deleted_at"] = w.DeletedAt
w.fieldMap["commit_id"] = w.CommitID
}
func (w workflowDraft) clone(db *gorm.DB) workflowDraft {
w.workflowDraftDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowDraft) replaceDB(db *gorm.DB) workflowDraft {
w.workflowDraftDo.ReplaceDB(db)
return w
}
type workflowDraftDo struct{ gen.DO }
type IWorkflowDraftDo interface {
gen.SubQuery
Debug() IWorkflowDraftDo
WithContext(ctx context.Context) IWorkflowDraftDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowDraftDo
WriteDB() IWorkflowDraftDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowDraftDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowDraftDo
Not(conds ...gen.Condition) IWorkflowDraftDo
Or(conds ...gen.Condition) IWorkflowDraftDo
Select(conds ...field.Expr) IWorkflowDraftDo
Where(conds ...gen.Condition) IWorkflowDraftDo
Order(conds ...field.Expr) IWorkflowDraftDo
Distinct(cols ...field.Expr) IWorkflowDraftDo
Omit(cols ...field.Expr) IWorkflowDraftDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo
Group(cols ...field.Expr) IWorkflowDraftDo
Having(conds ...gen.Condition) IWorkflowDraftDo
Limit(limit int) IWorkflowDraftDo
Offset(offset int) IWorkflowDraftDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowDraftDo
Unscoped() IWorkflowDraftDo
Create(values ...*model.WorkflowDraft) error
CreateInBatches(values []*model.WorkflowDraft, batchSize int) error
Save(values ...*model.WorkflowDraft) error
First() (*model.WorkflowDraft, error)
Take() (*model.WorkflowDraft, error)
Last() (*model.WorkflowDraft, error)
Find() ([]*model.WorkflowDraft, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowDraft, err error)
FindInBatches(result *[]*model.WorkflowDraft, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowDraft) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowDraftDo
Assign(attrs ...field.AssignExpr) IWorkflowDraftDo
Joins(fields ...field.RelationField) IWorkflowDraftDo
Preload(fields ...field.RelationField) IWorkflowDraftDo
FirstOrInit() (*model.WorkflowDraft, error)
FirstOrCreate() (*model.WorkflowDraft, error)
FindByPage(offset int, limit int) (result []*model.WorkflowDraft, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowDraftDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowDraftDo) Debug() IWorkflowDraftDo {
return w.withDO(w.DO.Debug())
}
func (w workflowDraftDo) WithContext(ctx context.Context) IWorkflowDraftDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowDraftDo) ReadDB() IWorkflowDraftDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowDraftDo) WriteDB() IWorkflowDraftDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowDraftDo) Session(config *gorm.Session) IWorkflowDraftDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowDraftDo) Clauses(conds ...clause.Expression) IWorkflowDraftDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowDraftDo) Returning(value interface{}, columns ...string) IWorkflowDraftDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowDraftDo) Not(conds ...gen.Condition) IWorkflowDraftDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowDraftDo) Or(conds ...gen.Condition) IWorkflowDraftDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowDraftDo) Select(conds ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowDraftDo) Where(conds ...gen.Condition) IWorkflowDraftDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowDraftDo) Order(conds ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowDraftDo) Distinct(cols ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowDraftDo) Omit(cols ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowDraftDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowDraftDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowDraftDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowDraftDo) Group(cols ...field.Expr) IWorkflowDraftDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowDraftDo) Having(conds ...gen.Condition) IWorkflowDraftDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowDraftDo) Limit(limit int) IWorkflowDraftDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowDraftDo) Offset(offset int) IWorkflowDraftDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowDraftDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowDraftDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowDraftDo) Unscoped() IWorkflowDraftDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowDraftDo) Create(values ...*model.WorkflowDraft) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowDraftDo) CreateInBatches(values []*model.WorkflowDraft, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowDraftDo) Save(values ...*model.WorkflowDraft) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowDraftDo) First() (*model.WorkflowDraft, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowDraft), nil
}
}
func (w workflowDraftDo) Take() (*model.WorkflowDraft, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowDraft), nil
}
}
func (w workflowDraftDo) Last() (*model.WorkflowDraft, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowDraft), nil
}
}
func (w workflowDraftDo) Find() ([]*model.WorkflowDraft, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowDraft), err
}
func (w workflowDraftDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowDraft, err error) {
buf := make([]*model.WorkflowDraft, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowDraftDo) FindInBatches(result *[]*model.WorkflowDraft, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowDraftDo) Attrs(attrs ...field.AssignExpr) IWorkflowDraftDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowDraftDo) Assign(attrs ...field.AssignExpr) IWorkflowDraftDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowDraftDo) Joins(fields ...field.RelationField) IWorkflowDraftDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowDraftDo) Preload(fields ...field.RelationField) IWorkflowDraftDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowDraftDo) FirstOrInit() (*model.WorkflowDraft, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowDraft), nil
}
}
func (w workflowDraftDo) FirstOrCreate() (*model.WorkflowDraft, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowDraft), nil
}
}
func (w workflowDraftDo) FindByPage(offset int, limit int) (result []*model.WorkflowDraft, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowDraftDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowDraftDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowDraftDo) Delete(models ...*model.WorkflowDraft) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowDraftDo) withDO(do gen.Dao) *workflowDraftDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,485 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowExecution(db *gorm.DB, opts ...gen.DOOption) workflowExecution {
_workflowExecution := workflowExecution{}
_workflowExecution.workflowExecutionDo.UseDB(db, opts...)
_workflowExecution.workflowExecutionDo.UseModel(&model.WorkflowExecution{})
tableName := _workflowExecution.workflowExecutionDo.TableName()
_workflowExecution.ALL = field.NewAsterisk(tableName)
_workflowExecution.ID = field.NewInt64(tableName, "id")
_workflowExecution.WorkflowID = field.NewInt64(tableName, "workflow_id")
_workflowExecution.Version = field.NewString(tableName, "version")
_workflowExecution.SpaceID = field.NewInt64(tableName, "space_id")
_workflowExecution.Mode = field.NewInt32(tableName, "mode")
_workflowExecution.OperatorID = field.NewInt64(tableName, "operator_id")
_workflowExecution.ConnectorID = field.NewInt64(tableName, "connector_id")
_workflowExecution.ConnectorUID = field.NewString(tableName, "connector_uid")
_workflowExecution.CreatedAt = field.NewInt64(tableName, "created_at")
_workflowExecution.LogID = field.NewString(tableName, "log_id")
_workflowExecution.Status = field.NewInt32(tableName, "status")
_workflowExecution.Duration = field.NewInt64(tableName, "duration")
_workflowExecution.Input = field.NewString(tableName, "input")
_workflowExecution.Output = field.NewString(tableName, "output")
_workflowExecution.ErrorCode = field.NewString(tableName, "error_code")
_workflowExecution.FailReason = field.NewString(tableName, "fail_reason")
_workflowExecution.InputTokens = field.NewInt64(tableName, "input_tokens")
_workflowExecution.OutputTokens = field.NewInt64(tableName, "output_tokens")
_workflowExecution.UpdatedAt = field.NewInt64(tableName, "updated_at")
_workflowExecution.RootExecutionID = field.NewInt64(tableName, "root_execution_id")
_workflowExecution.ParentNodeID = field.NewString(tableName, "parent_node_id")
_workflowExecution.AppID = field.NewInt64(tableName, "app_id")
_workflowExecution.NodeCount = field.NewInt32(tableName, "node_count")
_workflowExecution.ResumeEventID = field.NewInt64(tableName, "resume_event_id")
_workflowExecution.AgentID = field.NewInt64(tableName, "agent_id")
_workflowExecution.SyncPattern = field.NewInt32(tableName, "sync_pattern")
_workflowExecution.CommitID = field.NewString(tableName, "commit_id")
_workflowExecution.fillFieldMap()
return _workflowExecution
}
// workflowExecution workflow 执行记录表用于记录每次workflow执行时的状态
type workflowExecution struct {
workflowExecutionDo
ALL field.Asterisk
ID field.Int64 // execute id
WorkflowID field.Int64 // workflow_id
Version field.String // workflow version. empty if is draft
SpaceID field.Int64 // the space id the workflow belongs to
Mode field.Int32 // the execution mode: 1. debug run 2. release run 3. node debug
OperatorID field.Int64 // the user id that runs this workflow
ConnectorID field.Int64 // the connector on which this execution happened
ConnectorUID field.String // user id of the connector
CreatedAt field.Int64 // create time in millisecond
LogID field.String // log id
Status field.Int32 // 1=running 2=success 3=fail 4=interrupted
Duration field.Int64 // execution duration in millisecond
Input field.String // actual input of this execution
Output field.String // the actual output of this execution
ErrorCode field.String // error code if any
FailReason field.String // the reason for failure
InputTokens field.Int64 // number of input tokens
OutputTokens field.Int64 // number of output tokens
UpdatedAt field.Int64 // update time in millisecond
RootExecutionID field.Int64 // the top level execution id. Null if this is the root
ParentNodeID field.String // the node key for the sub_workflow node that executes this workflow
AppID field.Int64 // app id this workflow execution belongs to
NodeCount field.Int32 // the total node count of the workflow
ResumeEventID field.Int64 // the current event ID which is resuming
AgentID field.Int64 // the agent that this execution binds to
SyncPattern field.Int32 // the sync pattern 1. sync 2. async 3. stream
CommitID field.String // draft commit id this execution belongs to
fieldMap map[string]field.Expr
}
func (w workflowExecution) Table(newTableName string) *workflowExecution {
w.workflowExecutionDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowExecution) As(alias string) *workflowExecution {
w.workflowExecutionDo.DO = *(w.workflowExecutionDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowExecution) updateTableName(table string) *workflowExecution {
w.ALL = field.NewAsterisk(table)
w.ID = field.NewInt64(table, "id")
w.WorkflowID = field.NewInt64(table, "workflow_id")
w.Version = field.NewString(table, "version")
w.SpaceID = field.NewInt64(table, "space_id")
w.Mode = field.NewInt32(table, "mode")
w.OperatorID = field.NewInt64(table, "operator_id")
w.ConnectorID = field.NewInt64(table, "connector_id")
w.ConnectorUID = field.NewString(table, "connector_uid")
w.CreatedAt = field.NewInt64(table, "created_at")
w.LogID = field.NewString(table, "log_id")
w.Status = field.NewInt32(table, "status")
w.Duration = field.NewInt64(table, "duration")
w.Input = field.NewString(table, "input")
w.Output = field.NewString(table, "output")
w.ErrorCode = field.NewString(table, "error_code")
w.FailReason = field.NewString(table, "fail_reason")
w.InputTokens = field.NewInt64(table, "input_tokens")
w.OutputTokens = field.NewInt64(table, "output_tokens")
w.UpdatedAt = field.NewInt64(table, "updated_at")
w.RootExecutionID = field.NewInt64(table, "root_execution_id")
w.ParentNodeID = field.NewString(table, "parent_node_id")
w.AppID = field.NewInt64(table, "app_id")
w.NodeCount = field.NewInt32(table, "node_count")
w.ResumeEventID = field.NewInt64(table, "resume_event_id")
w.AgentID = field.NewInt64(table, "agent_id")
w.SyncPattern = field.NewInt32(table, "sync_pattern")
w.CommitID = field.NewString(table, "commit_id")
w.fillFieldMap()
return w
}
func (w *workflowExecution) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowExecution) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 27)
w.fieldMap["id"] = w.ID
w.fieldMap["workflow_id"] = w.WorkflowID
w.fieldMap["version"] = w.Version
w.fieldMap["space_id"] = w.SpaceID
w.fieldMap["mode"] = w.Mode
w.fieldMap["operator_id"] = w.OperatorID
w.fieldMap["connector_id"] = w.ConnectorID
w.fieldMap["connector_uid"] = w.ConnectorUID
w.fieldMap["created_at"] = w.CreatedAt
w.fieldMap["log_id"] = w.LogID
w.fieldMap["status"] = w.Status
w.fieldMap["duration"] = w.Duration
w.fieldMap["input"] = w.Input
w.fieldMap["output"] = w.Output
w.fieldMap["error_code"] = w.ErrorCode
w.fieldMap["fail_reason"] = w.FailReason
w.fieldMap["input_tokens"] = w.InputTokens
w.fieldMap["output_tokens"] = w.OutputTokens
w.fieldMap["updated_at"] = w.UpdatedAt
w.fieldMap["root_execution_id"] = w.RootExecutionID
w.fieldMap["parent_node_id"] = w.ParentNodeID
w.fieldMap["app_id"] = w.AppID
w.fieldMap["node_count"] = w.NodeCount
w.fieldMap["resume_event_id"] = w.ResumeEventID
w.fieldMap["agent_id"] = w.AgentID
w.fieldMap["sync_pattern"] = w.SyncPattern
w.fieldMap["commit_id"] = w.CommitID
}
func (w workflowExecution) clone(db *gorm.DB) workflowExecution {
w.workflowExecutionDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowExecution) replaceDB(db *gorm.DB) workflowExecution {
w.workflowExecutionDo.ReplaceDB(db)
return w
}
type workflowExecutionDo struct{ gen.DO }
type IWorkflowExecutionDo interface {
gen.SubQuery
Debug() IWorkflowExecutionDo
WithContext(ctx context.Context) IWorkflowExecutionDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowExecutionDo
WriteDB() IWorkflowExecutionDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowExecutionDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowExecutionDo
Not(conds ...gen.Condition) IWorkflowExecutionDo
Or(conds ...gen.Condition) IWorkflowExecutionDo
Select(conds ...field.Expr) IWorkflowExecutionDo
Where(conds ...gen.Condition) IWorkflowExecutionDo
Order(conds ...field.Expr) IWorkflowExecutionDo
Distinct(cols ...field.Expr) IWorkflowExecutionDo
Omit(cols ...field.Expr) IWorkflowExecutionDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo
Group(cols ...field.Expr) IWorkflowExecutionDo
Having(conds ...gen.Condition) IWorkflowExecutionDo
Limit(limit int) IWorkflowExecutionDo
Offset(offset int) IWorkflowExecutionDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowExecutionDo
Unscoped() IWorkflowExecutionDo
Create(values ...*model.WorkflowExecution) error
CreateInBatches(values []*model.WorkflowExecution, batchSize int) error
Save(values ...*model.WorkflowExecution) error
First() (*model.WorkflowExecution, error)
Take() (*model.WorkflowExecution, error)
Last() (*model.WorkflowExecution, error)
Find() ([]*model.WorkflowExecution, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowExecution, err error)
FindInBatches(result *[]*model.WorkflowExecution, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowExecution) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowExecutionDo
Assign(attrs ...field.AssignExpr) IWorkflowExecutionDo
Joins(fields ...field.RelationField) IWorkflowExecutionDo
Preload(fields ...field.RelationField) IWorkflowExecutionDo
FirstOrInit() (*model.WorkflowExecution, error)
FirstOrCreate() (*model.WorkflowExecution, error)
FindByPage(offset int, limit int) (result []*model.WorkflowExecution, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowExecutionDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowExecutionDo) Debug() IWorkflowExecutionDo {
return w.withDO(w.DO.Debug())
}
func (w workflowExecutionDo) WithContext(ctx context.Context) IWorkflowExecutionDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowExecutionDo) ReadDB() IWorkflowExecutionDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowExecutionDo) WriteDB() IWorkflowExecutionDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowExecutionDo) Session(config *gorm.Session) IWorkflowExecutionDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowExecutionDo) Clauses(conds ...clause.Expression) IWorkflowExecutionDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowExecutionDo) Returning(value interface{}, columns ...string) IWorkflowExecutionDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowExecutionDo) Not(conds ...gen.Condition) IWorkflowExecutionDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowExecutionDo) Or(conds ...gen.Condition) IWorkflowExecutionDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowExecutionDo) Select(conds ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowExecutionDo) Where(conds ...gen.Condition) IWorkflowExecutionDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowExecutionDo) Order(conds ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowExecutionDo) Distinct(cols ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowExecutionDo) Omit(cols ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowExecutionDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowExecutionDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowExecutionDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowExecutionDo) Group(cols ...field.Expr) IWorkflowExecutionDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowExecutionDo) Having(conds ...gen.Condition) IWorkflowExecutionDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowExecutionDo) Limit(limit int) IWorkflowExecutionDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowExecutionDo) Offset(offset int) IWorkflowExecutionDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowExecutionDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowExecutionDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowExecutionDo) Unscoped() IWorkflowExecutionDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowExecutionDo) Create(values ...*model.WorkflowExecution) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowExecutionDo) CreateInBatches(values []*model.WorkflowExecution, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowExecutionDo) Save(values ...*model.WorkflowExecution) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowExecutionDo) First() (*model.WorkflowExecution, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowExecution), nil
}
}
func (w workflowExecutionDo) Take() (*model.WorkflowExecution, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowExecution), nil
}
}
func (w workflowExecutionDo) Last() (*model.WorkflowExecution, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowExecution), nil
}
}
func (w workflowExecutionDo) Find() ([]*model.WorkflowExecution, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowExecution), err
}
func (w workflowExecutionDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowExecution, err error) {
buf := make([]*model.WorkflowExecution, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowExecutionDo) FindInBatches(result *[]*model.WorkflowExecution, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowExecutionDo) Attrs(attrs ...field.AssignExpr) IWorkflowExecutionDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowExecutionDo) Assign(attrs ...field.AssignExpr) IWorkflowExecutionDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowExecutionDo) Joins(fields ...field.RelationField) IWorkflowExecutionDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowExecutionDo) Preload(fields ...field.RelationField) IWorkflowExecutionDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowExecutionDo) FirstOrInit() (*model.WorkflowExecution, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowExecution), nil
}
}
func (w workflowExecutionDo) FirstOrCreate() (*model.WorkflowExecution, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowExecution), nil
}
}
func (w workflowExecutionDo) FindByPage(offset int, limit int) (result []*model.WorkflowExecution, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowExecutionDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowExecutionDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowExecutionDo) Delete(models ...*model.WorkflowExecution) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowExecutionDo) withDO(do gen.Dao) *workflowExecutionDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,453 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowMeta(db *gorm.DB, opts ...gen.DOOption) workflowMeta {
_workflowMeta := workflowMeta{}
_workflowMeta.workflowMetaDo.UseDB(db, opts...)
_workflowMeta.workflowMetaDo.UseModel(&model.WorkflowMeta{})
tableName := _workflowMeta.workflowMetaDo.TableName()
_workflowMeta.ALL = field.NewAsterisk(tableName)
_workflowMeta.ID = field.NewInt64(tableName, "id")
_workflowMeta.Name = field.NewString(tableName, "name")
_workflowMeta.Description = field.NewString(tableName, "description")
_workflowMeta.IconURI = field.NewString(tableName, "icon_uri")
_workflowMeta.Status = field.NewInt32(tableName, "status")
_workflowMeta.ContentType = field.NewInt32(tableName, "content_type")
_workflowMeta.Mode = field.NewInt32(tableName, "mode")
_workflowMeta.CreatedAt = field.NewInt64(tableName, "created_at")
_workflowMeta.UpdatedAt = field.NewInt64(tableName, "updated_at")
_workflowMeta.DeletedAt = field.NewField(tableName, "deleted_at")
_workflowMeta.CreatorID = field.NewInt64(tableName, "creator_id")
_workflowMeta.Tag = field.NewInt32(tableName, "tag")
_workflowMeta.AuthorID = field.NewInt64(tableName, "author_id")
_workflowMeta.SpaceID = field.NewInt64(tableName, "space_id")
_workflowMeta.UpdaterID = field.NewInt64(tableName, "updater_id")
_workflowMeta.SourceID = field.NewInt64(tableName, "source_id")
_workflowMeta.AppID = field.NewInt64(tableName, "app_id")
_workflowMeta.LatestVersion = field.NewString(tableName, "latest_version")
_workflowMeta.LatestVersionTs = field.NewInt64(tableName, "latest_version_ts")
_workflowMeta.fillFieldMap()
return _workflowMeta
}
// workflowMeta workflow 元信息表用于记录workflow基本的元信息
type workflowMeta struct {
workflowMetaDo
ALL field.Asterisk
ID field.Int64 // workflow id
Name field.String // workflow name
Description field.String // workflow description
IconURI field.String // icon uri
Status field.Int32 // 0:未发布过, 1:已发布过
ContentType field.Int32 // 0用户 1官方
Mode field.Int32 // 0:workflow, 3:chat_flow
CreatedAt field.Int64 // create time in millisecond
UpdatedAt field.Int64 // update time in millisecond
DeletedAt field.Field // delete time in millisecond
CreatorID field.Int64 // user id for creator
Tag field.Int32 // template tag: Tag: 1=All, 2=Hot, 3=Information, 4=Music, 5=Picture, 6=UtilityTool, 7=Life, 8=Traval, 9=Network, 10=System, 11=Movie, 12=Office, 13=Shopping, 14=Education, 15=Health, 16=Social, 17=Entertainment, 18=Finance, 100=Hidden
AuthorID field.Int64 // 原作者用户 ID
SpaceID field.Int64 // 空间 ID
UpdaterID field.Int64 // 更新元信息的用户 ID
SourceID field.Int64 // 复制来源的 workflow ID
AppID field.Int64 // 应用 ID
LatestVersion field.String // the version of the most recent publish
LatestVersionTs field.Int64 // create time of latest version
fieldMap map[string]field.Expr
}
func (w workflowMeta) Table(newTableName string) *workflowMeta {
w.workflowMetaDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowMeta) As(alias string) *workflowMeta {
w.workflowMetaDo.DO = *(w.workflowMetaDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowMeta) updateTableName(table string) *workflowMeta {
w.ALL = field.NewAsterisk(table)
w.ID = field.NewInt64(table, "id")
w.Name = field.NewString(table, "name")
w.Description = field.NewString(table, "description")
w.IconURI = field.NewString(table, "icon_uri")
w.Status = field.NewInt32(table, "status")
w.ContentType = field.NewInt32(table, "content_type")
w.Mode = field.NewInt32(table, "mode")
w.CreatedAt = field.NewInt64(table, "created_at")
w.UpdatedAt = field.NewInt64(table, "updated_at")
w.DeletedAt = field.NewField(table, "deleted_at")
w.CreatorID = field.NewInt64(table, "creator_id")
w.Tag = field.NewInt32(table, "tag")
w.AuthorID = field.NewInt64(table, "author_id")
w.SpaceID = field.NewInt64(table, "space_id")
w.UpdaterID = field.NewInt64(table, "updater_id")
w.SourceID = field.NewInt64(table, "source_id")
w.AppID = field.NewInt64(table, "app_id")
w.LatestVersion = field.NewString(table, "latest_version")
w.LatestVersionTs = field.NewInt64(table, "latest_version_ts")
w.fillFieldMap()
return w
}
func (w *workflowMeta) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowMeta) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 19)
w.fieldMap["id"] = w.ID
w.fieldMap["name"] = w.Name
w.fieldMap["description"] = w.Description
w.fieldMap["icon_uri"] = w.IconURI
w.fieldMap["status"] = w.Status
w.fieldMap["content_type"] = w.ContentType
w.fieldMap["mode"] = w.Mode
w.fieldMap["created_at"] = w.CreatedAt
w.fieldMap["updated_at"] = w.UpdatedAt
w.fieldMap["deleted_at"] = w.DeletedAt
w.fieldMap["creator_id"] = w.CreatorID
w.fieldMap["tag"] = w.Tag
w.fieldMap["author_id"] = w.AuthorID
w.fieldMap["space_id"] = w.SpaceID
w.fieldMap["updater_id"] = w.UpdaterID
w.fieldMap["source_id"] = w.SourceID
w.fieldMap["app_id"] = w.AppID
w.fieldMap["latest_version"] = w.LatestVersion
w.fieldMap["latest_version_ts"] = w.LatestVersionTs
}
func (w workflowMeta) clone(db *gorm.DB) workflowMeta {
w.workflowMetaDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowMeta) replaceDB(db *gorm.DB) workflowMeta {
w.workflowMetaDo.ReplaceDB(db)
return w
}
type workflowMetaDo struct{ gen.DO }
type IWorkflowMetaDo interface {
gen.SubQuery
Debug() IWorkflowMetaDo
WithContext(ctx context.Context) IWorkflowMetaDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowMetaDo
WriteDB() IWorkflowMetaDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowMetaDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowMetaDo
Not(conds ...gen.Condition) IWorkflowMetaDo
Or(conds ...gen.Condition) IWorkflowMetaDo
Select(conds ...field.Expr) IWorkflowMetaDo
Where(conds ...gen.Condition) IWorkflowMetaDo
Order(conds ...field.Expr) IWorkflowMetaDo
Distinct(cols ...field.Expr) IWorkflowMetaDo
Omit(cols ...field.Expr) IWorkflowMetaDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo
Group(cols ...field.Expr) IWorkflowMetaDo
Having(conds ...gen.Condition) IWorkflowMetaDo
Limit(limit int) IWorkflowMetaDo
Offset(offset int) IWorkflowMetaDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowMetaDo
Unscoped() IWorkflowMetaDo
Create(values ...*model.WorkflowMeta) error
CreateInBatches(values []*model.WorkflowMeta, batchSize int) error
Save(values ...*model.WorkflowMeta) error
First() (*model.WorkflowMeta, error)
Take() (*model.WorkflowMeta, error)
Last() (*model.WorkflowMeta, error)
Find() ([]*model.WorkflowMeta, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowMeta, err error)
FindInBatches(result *[]*model.WorkflowMeta, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowMeta) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowMetaDo
Assign(attrs ...field.AssignExpr) IWorkflowMetaDo
Joins(fields ...field.RelationField) IWorkflowMetaDo
Preload(fields ...field.RelationField) IWorkflowMetaDo
FirstOrInit() (*model.WorkflowMeta, error)
FirstOrCreate() (*model.WorkflowMeta, error)
FindByPage(offset int, limit int) (result []*model.WorkflowMeta, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowMetaDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowMetaDo) Debug() IWorkflowMetaDo {
return w.withDO(w.DO.Debug())
}
func (w workflowMetaDo) WithContext(ctx context.Context) IWorkflowMetaDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowMetaDo) ReadDB() IWorkflowMetaDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowMetaDo) WriteDB() IWorkflowMetaDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowMetaDo) Session(config *gorm.Session) IWorkflowMetaDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowMetaDo) Clauses(conds ...clause.Expression) IWorkflowMetaDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowMetaDo) Returning(value interface{}, columns ...string) IWorkflowMetaDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowMetaDo) Not(conds ...gen.Condition) IWorkflowMetaDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowMetaDo) Or(conds ...gen.Condition) IWorkflowMetaDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowMetaDo) Select(conds ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowMetaDo) Where(conds ...gen.Condition) IWorkflowMetaDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowMetaDo) Order(conds ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowMetaDo) Distinct(cols ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowMetaDo) Omit(cols ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowMetaDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowMetaDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowMetaDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowMetaDo) Group(cols ...field.Expr) IWorkflowMetaDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowMetaDo) Having(conds ...gen.Condition) IWorkflowMetaDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowMetaDo) Limit(limit int) IWorkflowMetaDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowMetaDo) Offset(offset int) IWorkflowMetaDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowMetaDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowMetaDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowMetaDo) Unscoped() IWorkflowMetaDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowMetaDo) Create(values ...*model.WorkflowMeta) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowMetaDo) CreateInBatches(values []*model.WorkflowMeta, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowMetaDo) Save(values ...*model.WorkflowMeta) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowMetaDo) First() (*model.WorkflowMeta, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowMeta), nil
}
}
func (w workflowMetaDo) Take() (*model.WorkflowMeta, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowMeta), nil
}
}
func (w workflowMetaDo) Last() (*model.WorkflowMeta, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowMeta), nil
}
}
func (w workflowMetaDo) Find() ([]*model.WorkflowMeta, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowMeta), err
}
func (w workflowMetaDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowMeta, err error) {
buf := make([]*model.WorkflowMeta, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowMetaDo) FindInBatches(result *[]*model.WorkflowMeta, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowMetaDo) Attrs(attrs ...field.AssignExpr) IWorkflowMetaDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowMetaDo) Assign(attrs ...field.AssignExpr) IWorkflowMetaDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowMetaDo) Joins(fields ...field.RelationField) IWorkflowMetaDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowMetaDo) Preload(fields ...field.RelationField) IWorkflowMetaDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowMetaDo) FirstOrInit() (*model.WorkflowMeta, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowMeta), nil
}
}
func (w workflowMetaDo) FirstOrCreate() (*model.WorkflowMeta, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowMeta), nil
}
}
func (w workflowMetaDo) FindByPage(offset int, limit int) (result []*model.WorkflowMeta, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowMetaDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowMetaDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowMetaDo) Delete(models ...*model.WorkflowMeta) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowMetaDo) withDO(do gen.Dao) *workflowMetaDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,409 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowReference(db *gorm.DB, opts ...gen.DOOption) workflowReference {
_workflowReference := workflowReference{}
_workflowReference.workflowReferenceDo.UseDB(db, opts...)
_workflowReference.workflowReferenceDo.UseModel(&model.WorkflowReference{})
tableName := _workflowReference.workflowReferenceDo.TableName()
_workflowReference.ALL = field.NewAsterisk(tableName)
_workflowReference.ID = field.NewInt64(tableName, "id")
_workflowReference.ReferredID = field.NewInt64(tableName, "referred_id")
_workflowReference.ReferringID = field.NewInt64(tableName, "referring_id")
_workflowReference.ReferType = field.NewInt32(tableName, "refer_type")
_workflowReference.ReferringBizType = field.NewInt32(tableName, "referring_biz_type")
_workflowReference.CreatedAt = field.NewInt64(tableName, "created_at")
_workflowReference.Status = field.NewInt32(tableName, "status")
_workflowReference.DeletedAt = field.NewField(tableName, "deleted_at")
_workflowReference.fillFieldMap()
return _workflowReference
}
// workflowReference workflow 关联关系表用于记录workflow 直接互相引用关系
type workflowReference struct {
workflowReferenceDo
ALL field.Asterisk
ID field.Int64 // workflow id
ReferredID field.Int64 // the id of the workflow that is referred by other entities
ReferringID field.Int64 // the entity id that refers this workflow
ReferType field.Int32 // 1 subworkflow 2 tool
ReferringBizType field.Int32 // the biz type the referring entity belongs to: 1. workflow 2. agent
CreatedAt field.Int64 // create time in millisecond
Status field.Int32 // whether this reference currently takes effect. 0: disabled 1: enabled
DeletedAt field.Field
fieldMap map[string]field.Expr
}
func (w workflowReference) Table(newTableName string) *workflowReference {
w.workflowReferenceDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowReference) As(alias string) *workflowReference {
w.workflowReferenceDo.DO = *(w.workflowReferenceDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowReference) updateTableName(table string) *workflowReference {
w.ALL = field.NewAsterisk(table)
w.ID = field.NewInt64(table, "id")
w.ReferredID = field.NewInt64(table, "referred_id")
w.ReferringID = field.NewInt64(table, "referring_id")
w.ReferType = field.NewInt32(table, "refer_type")
w.ReferringBizType = field.NewInt32(table, "referring_biz_type")
w.CreatedAt = field.NewInt64(table, "created_at")
w.Status = field.NewInt32(table, "status")
w.DeletedAt = field.NewField(table, "deleted_at")
w.fillFieldMap()
return w
}
func (w *workflowReference) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowReference) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 8)
w.fieldMap["id"] = w.ID
w.fieldMap["referred_id"] = w.ReferredID
w.fieldMap["referring_id"] = w.ReferringID
w.fieldMap["refer_type"] = w.ReferType
w.fieldMap["referring_biz_type"] = w.ReferringBizType
w.fieldMap["created_at"] = w.CreatedAt
w.fieldMap["status"] = w.Status
w.fieldMap["deleted_at"] = w.DeletedAt
}
func (w workflowReference) clone(db *gorm.DB) workflowReference {
w.workflowReferenceDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowReference) replaceDB(db *gorm.DB) workflowReference {
w.workflowReferenceDo.ReplaceDB(db)
return w
}
type workflowReferenceDo struct{ gen.DO }
type IWorkflowReferenceDo interface {
gen.SubQuery
Debug() IWorkflowReferenceDo
WithContext(ctx context.Context) IWorkflowReferenceDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowReferenceDo
WriteDB() IWorkflowReferenceDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowReferenceDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowReferenceDo
Not(conds ...gen.Condition) IWorkflowReferenceDo
Or(conds ...gen.Condition) IWorkflowReferenceDo
Select(conds ...field.Expr) IWorkflowReferenceDo
Where(conds ...gen.Condition) IWorkflowReferenceDo
Order(conds ...field.Expr) IWorkflowReferenceDo
Distinct(cols ...field.Expr) IWorkflowReferenceDo
Omit(cols ...field.Expr) IWorkflowReferenceDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo
Group(cols ...field.Expr) IWorkflowReferenceDo
Having(conds ...gen.Condition) IWorkflowReferenceDo
Limit(limit int) IWorkflowReferenceDo
Offset(offset int) IWorkflowReferenceDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowReferenceDo
Unscoped() IWorkflowReferenceDo
Create(values ...*model.WorkflowReference) error
CreateInBatches(values []*model.WorkflowReference, batchSize int) error
Save(values ...*model.WorkflowReference) error
First() (*model.WorkflowReference, error)
Take() (*model.WorkflowReference, error)
Last() (*model.WorkflowReference, error)
Find() ([]*model.WorkflowReference, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowReference, err error)
FindInBatches(result *[]*model.WorkflowReference, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowReference) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowReferenceDo
Assign(attrs ...field.AssignExpr) IWorkflowReferenceDo
Joins(fields ...field.RelationField) IWorkflowReferenceDo
Preload(fields ...field.RelationField) IWorkflowReferenceDo
FirstOrInit() (*model.WorkflowReference, error)
FirstOrCreate() (*model.WorkflowReference, error)
FindByPage(offset int, limit int) (result []*model.WorkflowReference, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowReferenceDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowReferenceDo) Debug() IWorkflowReferenceDo {
return w.withDO(w.DO.Debug())
}
func (w workflowReferenceDo) WithContext(ctx context.Context) IWorkflowReferenceDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowReferenceDo) ReadDB() IWorkflowReferenceDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowReferenceDo) WriteDB() IWorkflowReferenceDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowReferenceDo) Session(config *gorm.Session) IWorkflowReferenceDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowReferenceDo) Clauses(conds ...clause.Expression) IWorkflowReferenceDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowReferenceDo) Returning(value interface{}, columns ...string) IWorkflowReferenceDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowReferenceDo) Not(conds ...gen.Condition) IWorkflowReferenceDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowReferenceDo) Or(conds ...gen.Condition) IWorkflowReferenceDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowReferenceDo) Select(conds ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowReferenceDo) Where(conds ...gen.Condition) IWorkflowReferenceDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowReferenceDo) Order(conds ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowReferenceDo) Distinct(cols ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowReferenceDo) Omit(cols ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowReferenceDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowReferenceDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowReferenceDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowReferenceDo) Group(cols ...field.Expr) IWorkflowReferenceDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowReferenceDo) Having(conds ...gen.Condition) IWorkflowReferenceDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowReferenceDo) Limit(limit int) IWorkflowReferenceDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowReferenceDo) Offset(offset int) IWorkflowReferenceDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowReferenceDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowReferenceDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowReferenceDo) Unscoped() IWorkflowReferenceDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowReferenceDo) Create(values ...*model.WorkflowReference) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowReferenceDo) CreateInBatches(values []*model.WorkflowReference, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowReferenceDo) Save(values ...*model.WorkflowReference) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowReferenceDo) First() (*model.WorkflowReference, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowReference), nil
}
}
func (w workflowReferenceDo) Take() (*model.WorkflowReference, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowReference), nil
}
}
func (w workflowReferenceDo) Last() (*model.WorkflowReference, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowReference), nil
}
}
func (w workflowReferenceDo) Find() ([]*model.WorkflowReference, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowReference), err
}
func (w workflowReferenceDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowReference, err error) {
buf := make([]*model.WorkflowReference, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowReferenceDo) FindInBatches(result *[]*model.WorkflowReference, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowReferenceDo) Attrs(attrs ...field.AssignExpr) IWorkflowReferenceDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowReferenceDo) Assign(attrs ...field.AssignExpr) IWorkflowReferenceDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowReferenceDo) Joins(fields ...field.RelationField) IWorkflowReferenceDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowReferenceDo) Preload(fields ...field.RelationField) IWorkflowReferenceDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowReferenceDo) FirstOrInit() (*model.WorkflowReference, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowReference), nil
}
}
func (w workflowReferenceDo) FirstOrCreate() (*model.WorkflowReference, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowReference), nil
}
}
func (w workflowReferenceDo) FindByPage(offset int, limit int) (result []*model.WorkflowReference, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowReferenceDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowReferenceDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowReferenceDo) Delete(models ...*model.WorkflowReference) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowReferenceDo) withDO(do gen.Dao) *workflowReferenceDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,405 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowSnapshot(db *gorm.DB, opts ...gen.DOOption) workflowSnapshot {
_workflowSnapshot := workflowSnapshot{}
_workflowSnapshot.workflowSnapshotDo.UseDB(db, opts...)
_workflowSnapshot.workflowSnapshotDo.UseModel(&model.WorkflowSnapshot{})
tableName := _workflowSnapshot.workflowSnapshotDo.TableName()
_workflowSnapshot.ALL = field.NewAsterisk(tableName)
_workflowSnapshot.WorkflowID = field.NewInt64(tableName, "workflow_id")
_workflowSnapshot.CommitID = field.NewString(tableName, "commit_id")
_workflowSnapshot.Canvas = field.NewString(tableName, "canvas")
_workflowSnapshot.InputParams = field.NewString(tableName, "input_params")
_workflowSnapshot.OutputParams = field.NewString(tableName, "output_params")
_workflowSnapshot.CreatedAt = field.NewInt64(tableName, "created_at")
_workflowSnapshot.ID = field.NewInt64(tableName, "id")
_workflowSnapshot.fillFieldMap()
return _workflowSnapshot
}
// workflowSnapshot snapshot for executed workflow draft
type workflowSnapshot struct {
workflowSnapshotDo
ALL field.Asterisk
WorkflowID field.Int64 // workflow id this snapshot belongs to
CommitID field.String // the commit id of the workflow draft
Canvas field.String // frontend schema for this snapshot
InputParams field.String // input parameter info
OutputParams field.String // output parameter info
CreatedAt field.Int64
ID field.Int64 // ID
fieldMap map[string]field.Expr
}
func (w workflowSnapshot) Table(newTableName string) *workflowSnapshot {
w.workflowSnapshotDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowSnapshot) As(alias string) *workflowSnapshot {
w.workflowSnapshotDo.DO = *(w.workflowSnapshotDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowSnapshot) updateTableName(table string) *workflowSnapshot {
w.ALL = field.NewAsterisk(table)
w.WorkflowID = field.NewInt64(table, "workflow_id")
w.CommitID = field.NewString(table, "commit_id")
w.Canvas = field.NewString(table, "canvas")
w.InputParams = field.NewString(table, "input_params")
w.OutputParams = field.NewString(table, "output_params")
w.CreatedAt = field.NewInt64(table, "created_at")
w.ID = field.NewInt64(table, "id")
w.fillFieldMap()
return w
}
func (w *workflowSnapshot) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowSnapshot) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 7)
w.fieldMap["workflow_id"] = w.WorkflowID
w.fieldMap["commit_id"] = w.CommitID
w.fieldMap["canvas"] = w.Canvas
w.fieldMap["input_params"] = w.InputParams
w.fieldMap["output_params"] = w.OutputParams
w.fieldMap["created_at"] = w.CreatedAt
w.fieldMap["id"] = w.ID
}
func (w workflowSnapshot) clone(db *gorm.DB) workflowSnapshot {
w.workflowSnapshotDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowSnapshot) replaceDB(db *gorm.DB) workflowSnapshot {
w.workflowSnapshotDo.ReplaceDB(db)
return w
}
type workflowSnapshotDo struct{ gen.DO }
type IWorkflowSnapshotDo interface {
gen.SubQuery
Debug() IWorkflowSnapshotDo
WithContext(ctx context.Context) IWorkflowSnapshotDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowSnapshotDo
WriteDB() IWorkflowSnapshotDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowSnapshotDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowSnapshotDo
Not(conds ...gen.Condition) IWorkflowSnapshotDo
Or(conds ...gen.Condition) IWorkflowSnapshotDo
Select(conds ...field.Expr) IWorkflowSnapshotDo
Where(conds ...gen.Condition) IWorkflowSnapshotDo
Order(conds ...field.Expr) IWorkflowSnapshotDo
Distinct(cols ...field.Expr) IWorkflowSnapshotDo
Omit(cols ...field.Expr) IWorkflowSnapshotDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo
Group(cols ...field.Expr) IWorkflowSnapshotDo
Having(conds ...gen.Condition) IWorkflowSnapshotDo
Limit(limit int) IWorkflowSnapshotDo
Offset(offset int) IWorkflowSnapshotDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowSnapshotDo
Unscoped() IWorkflowSnapshotDo
Create(values ...*model.WorkflowSnapshot) error
CreateInBatches(values []*model.WorkflowSnapshot, batchSize int) error
Save(values ...*model.WorkflowSnapshot) error
First() (*model.WorkflowSnapshot, error)
Take() (*model.WorkflowSnapshot, error)
Last() (*model.WorkflowSnapshot, error)
Find() ([]*model.WorkflowSnapshot, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowSnapshot, err error)
FindInBatches(result *[]*model.WorkflowSnapshot, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowSnapshot) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowSnapshotDo
Assign(attrs ...field.AssignExpr) IWorkflowSnapshotDo
Joins(fields ...field.RelationField) IWorkflowSnapshotDo
Preload(fields ...field.RelationField) IWorkflowSnapshotDo
FirstOrInit() (*model.WorkflowSnapshot, error)
FirstOrCreate() (*model.WorkflowSnapshot, error)
FindByPage(offset int, limit int) (result []*model.WorkflowSnapshot, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowSnapshotDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowSnapshotDo) Debug() IWorkflowSnapshotDo {
return w.withDO(w.DO.Debug())
}
func (w workflowSnapshotDo) WithContext(ctx context.Context) IWorkflowSnapshotDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowSnapshotDo) ReadDB() IWorkflowSnapshotDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowSnapshotDo) WriteDB() IWorkflowSnapshotDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowSnapshotDo) Session(config *gorm.Session) IWorkflowSnapshotDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowSnapshotDo) Clauses(conds ...clause.Expression) IWorkflowSnapshotDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowSnapshotDo) Returning(value interface{}, columns ...string) IWorkflowSnapshotDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowSnapshotDo) Not(conds ...gen.Condition) IWorkflowSnapshotDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowSnapshotDo) Or(conds ...gen.Condition) IWorkflowSnapshotDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowSnapshotDo) Select(conds ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowSnapshotDo) Where(conds ...gen.Condition) IWorkflowSnapshotDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowSnapshotDo) Order(conds ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowSnapshotDo) Distinct(cols ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowSnapshotDo) Omit(cols ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowSnapshotDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowSnapshotDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowSnapshotDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowSnapshotDo) Group(cols ...field.Expr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowSnapshotDo) Having(conds ...gen.Condition) IWorkflowSnapshotDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowSnapshotDo) Limit(limit int) IWorkflowSnapshotDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowSnapshotDo) Offset(offset int) IWorkflowSnapshotDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowSnapshotDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowSnapshotDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowSnapshotDo) Unscoped() IWorkflowSnapshotDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowSnapshotDo) Create(values ...*model.WorkflowSnapshot) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowSnapshotDo) CreateInBatches(values []*model.WorkflowSnapshot, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowSnapshotDo) Save(values ...*model.WorkflowSnapshot) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowSnapshotDo) First() (*model.WorkflowSnapshot, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowSnapshot), nil
}
}
func (w workflowSnapshotDo) Take() (*model.WorkflowSnapshot, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowSnapshot), nil
}
}
func (w workflowSnapshotDo) Last() (*model.WorkflowSnapshot, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowSnapshot), nil
}
}
func (w workflowSnapshotDo) Find() ([]*model.WorkflowSnapshot, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowSnapshot), err
}
func (w workflowSnapshotDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowSnapshot, err error) {
buf := make([]*model.WorkflowSnapshot, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowSnapshotDo) FindInBatches(result *[]*model.WorkflowSnapshot, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowSnapshotDo) Attrs(attrs ...field.AssignExpr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowSnapshotDo) Assign(attrs ...field.AssignExpr) IWorkflowSnapshotDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowSnapshotDo) Joins(fields ...field.RelationField) IWorkflowSnapshotDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowSnapshotDo) Preload(fields ...field.RelationField) IWorkflowSnapshotDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowSnapshotDo) FirstOrInit() (*model.WorkflowSnapshot, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowSnapshot), nil
}
}
func (w workflowSnapshotDo) FirstOrCreate() (*model.WorkflowSnapshot, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowSnapshot), nil
}
}
func (w workflowSnapshotDo) FindByPage(offset int, limit int) (result []*model.WorkflowSnapshot, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowSnapshotDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowSnapshotDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowSnapshotDo) Delete(models ...*model.WorkflowSnapshot) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowSnapshotDo) withDO(do gen.Dao) *workflowSnapshotDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,421 @@
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
// Code generated by gorm.io/gen. DO NOT EDIT.
package query
import (
"context"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gen"
"gorm.io/gen/field"
"gorm.io/plugin/dbresolver"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
)
func newWorkflowVersion(db *gorm.DB, opts ...gen.DOOption) workflowVersion {
_workflowVersion := workflowVersion{}
_workflowVersion.workflowVersionDo.UseDB(db, opts...)
_workflowVersion.workflowVersionDo.UseModel(&model.WorkflowVersion{})
tableName := _workflowVersion.workflowVersionDo.TableName()
_workflowVersion.ALL = field.NewAsterisk(tableName)
_workflowVersion.ID = field.NewInt64(tableName, "id")
_workflowVersion.WorkflowID = field.NewInt64(tableName, "workflow_id")
_workflowVersion.Version = field.NewString(tableName, "version")
_workflowVersion.VersionDescription = field.NewString(tableName, "version_description")
_workflowVersion.Canvas = field.NewString(tableName, "canvas")
_workflowVersion.InputParams = field.NewString(tableName, "input_params")
_workflowVersion.OutputParams = field.NewString(tableName, "output_params")
_workflowVersion.CreatorID = field.NewInt64(tableName, "creator_id")
_workflowVersion.CreatedAt = field.NewInt64(tableName, "created_at")
_workflowVersion.DeletedAt = field.NewField(tableName, "deleted_at")
_workflowVersion.CommitID = field.NewString(tableName, "commit_id")
_workflowVersion.fillFieldMap()
return _workflowVersion
}
// workflowVersion workflow 画布版本信息表,用于记录不同版本的画布信息
type workflowVersion struct {
workflowVersionDo
ALL field.Asterisk
ID field.Int64 // ID
WorkflowID field.Int64 // workflow id
Version field.String // 发布版本
VersionDescription field.String // 版本描述
Canvas field.String // 前端 schema
InputParams field.String
OutputParams field.String
CreatorID field.Int64 // 发布用户 ID
CreatedAt field.Int64 // 创建时间毫秒时间戳
DeletedAt field.Field // 删除毫秒时间戳
CommitID field.String // the commit id corresponding to this version
fieldMap map[string]field.Expr
}
func (w workflowVersion) Table(newTableName string) *workflowVersion {
w.workflowVersionDo.UseTable(newTableName)
return w.updateTableName(newTableName)
}
func (w workflowVersion) As(alias string) *workflowVersion {
w.workflowVersionDo.DO = *(w.workflowVersionDo.As(alias).(*gen.DO))
return w.updateTableName(alias)
}
func (w *workflowVersion) updateTableName(table string) *workflowVersion {
w.ALL = field.NewAsterisk(table)
w.ID = field.NewInt64(table, "id")
w.WorkflowID = field.NewInt64(table, "workflow_id")
w.Version = field.NewString(table, "version")
w.VersionDescription = field.NewString(table, "version_description")
w.Canvas = field.NewString(table, "canvas")
w.InputParams = field.NewString(table, "input_params")
w.OutputParams = field.NewString(table, "output_params")
w.CreatorID = field.NewInt64(table, "creator_id")
w.CreatedAt = field.NewInt64(table, "created_at")
w.DeletedAt = field.NewField(table, "deleted_at")
w.CommitID = field.NewString(table, "commit_id")
w.fillFieldMap()
return w
}
func (w *workflowVersion) GetFieldByName(fieldName string) (field.OrderExpr, bool) {
_f, ok := w.fieldMap[fieldName]
if !ok || _f == nil {
return nil, false
}
_oe, ok := _f.(field.OrderExpr)
return _oe, ok
}
func (w *workflowVersion) fillFieldMap() {
w.fieldMap = make(map[string]field.Expr, 11)
w.fieldMap["id"] = w.ID
w.fieldMap["workflow_id"] = w.WorkflowID
w.fieldMap["version"] = w.Version
w.fieldMap["version_description"] = w.VersionDescription
w.fieldMap["canvas"] = w.Canvas
w.fieldMap["input_params"] = w.InputParams
w.fieldMap["output_params"] = w.OutputParams
w.fieldMap["creator_id"] = w.CreatorID
w.fieldMap["created_at"] = w.CreatedAt
w.fieldMap["deleted_at"] = w.DeletedAt
w.fieldMap["commit_id"] = w.CommitID
}
func (w workflowVersion) clone(db *gorm.DB) workflowVersion {
w.workflowVersionDo.ReplaceConnPool(db.Statement.ConnPool)
return w
}
func (w workflowVersion) replaceDB(db *gorm.DB) workflowVersion {
w.workflowVersionDo.ReplaceDB(db)
return w
}
type workflowVersionDo struct{ gen.DO }
type IWorkflowVersionDo interface {
gen.SubQuery
Debug() IWorkflowVersionDo
WithContext(ctx context.Context) IWorkflowVersionDo
WithResult(fc func(tx gen.Dao)) gen.ResultInfo
ReplaceDB(db *gorm.DB)
ReadDB() IWorkflowVersionDo
WriteDB() IWorkflowVersionDo
As(alias string) gen.Dao
Session(config *gorm.Session) IWorkflowVersionDo
Columns(cols ...field.Expr) gen.Columns
Clauses(conds ...clause.Expression) IWorkflowVersionDo
Not(conds ...gen.Condition) IWorkflowVersionDo
Or(conds ...gen.Condition) IWorkflowVersionDo
Select(conds ...field.Expr) IWorkflowVersionDo
Where(conds ...gen.Condition) IWorkflowVersionDo
Order(conds ...field.Expr) IWorkflowVersionDo
Distinct(cols ...field.Expr) IWorkflowVersionDo
Omit(cols ...field.Expr) IWorkflowVersionDo
Join(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo
LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo
RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo
Group(cols ...field.Expr) IWorkflowVersionDo
Having(conds ...gen.Condition) IWorkflowVersionDo
Limit(limit int) IWorkflowVersionDo
Offset(offset int) IWorkflowVersionDo
Count() (count int64, err error)
Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowVersionDo
Unscoped() IWorkflowVersionDo
Create(values ...*model.WorkflowVersion) error
CreateInBatches(values []*model.WorkflowVersion, batchSize int) error
Save(values ...*model.WorkflowVersion) error
First() (*model.WorkflowVersion, error)
Take() (*model.WorkflowVersion, error)
Last() (*model.WorkflowVersion, error)
Find() ([]*model.WorkflowVersion, error)
FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowVersion, err error)
FindInBatches(result *[]*model.WorkflowVersion, batchSize int, fc func(tx gen.Dao, batch int) error) error
Pluck(column field.Expr, dest interface{}) error
Delete(...*model.WorkflowVersion) (info gen.ResultInfo, err error)
Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
Updates(value interface{}) (info gen.ResultInfo, err error)
UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error)
UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error)
UpdateColumns(value interface{}) (info gen.ResultInfo, err error)
UpdateFrom(q gen.SubQuery) gen.Dao
Attrs(attrs ...field.AssignExpr) IWorkflowVersionDo
Assign(attrs ...field.AssignExpr) IWorkflowVersionDo
Joins(fields ...field.RelationField) IWorkflowVersionDo
Preload(fields ...field.RelationField) IWorkflowVersionDo
FirstOrInit() (*model.WorkflowVersion, error)
FirstOrCreate() (*model.WorkflowVersion, error)
FindByPage(offset int, limit int) (result []*model.WorkflowVersion, count int64, err error)
ScanByPage(result interface{}, offset int, limit int) (count int64, err error)
Scan(result interface{}) (err error)
Returning(value interface{}, columns ...string) IWorkflowVersionDo
UnderlyingDB() *gorm.DB
schema.Tabler
}
func (w workflowVersionDo) Debug() IWorkflowVersionDo {
return w.withDO(w.DO.Debug())
}
func (w workflowVersionDo) WithContext(ctx context.Context) IWorkflowVersionDo {
return w.withDO(w.DO.WithContext(ctx))
}
func (w workflowVersionDo) ReadDB() IWorkflowVersionDo {
return w.Clauses(dbresolver.Read)
}
func (w workflowVersionDo) WriteDB() IWorkflowVersionDo {
return w.Clauses(dbresolver.Write)
}
func (w workflowVersionDo) Session(config *gorm.Session) IWorkflowVersionDo {
return w.withDO(w.DO.Session(config))
}
func (w workflowVersionDo) Clauses(conds ...clause.Expression) IWorkflowVersionDo {
return w.withDO(w.DO.Clauses(conds...))
}
func (w workflowVersionDo) Returning(value interface{}, columns ...string) IWorkflowVersionDo {
return w.withDO(w.DO.Returning(value, columns...))
}
func (w workflowVersionDo) Not(conds ...gen.Condition) IWorkflowVersionDo {
return w.withDO(w.DO.Not(conds...))
}
func (w workflowVersionDo) Or(conds ...gen.Condition) IWorkflowVersionDo {
return w.withDO(w.DO.Or(conds...))
}
func (w workflowVersionDo) Select(conds ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Select(conds...))
}
func (w workflowVersionDo) Where(conds ...gen.Condition) IWorkflowVersionDo {
return w.withDO(w.DO.Where(conds...))
}
func (w workflowVersionDo) Order(conds ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Order(conds...))
}
func (w workflowVersionDo) Distinct(cols ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Distinct(cols...))
}
func (w workflowVersionDo) Omit(cols ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Omit(cols...))
}
func (w workflowVersionDo) Join(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Join(table, on...))
}
func (w workflowVersionDo) LeftJoin(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.LeftJoin(table, on...))
}
func (w workflowVersionDo) RightJoin(table schema.Tabler, on ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.RightJoin(table, on...))
}
func (w workflowVersionDo) Group(cols ...field.Expr) IWorkflowVersionDo {
return w.withDO(w.DO.Group(cols...))
}
func (w workflowVersionDo) Having(conds ...gen.Condition) IWorkflowVersionDo {
return w.withDO(w.DO.Having(conds...))
}
func (w workflowVersionDo) Limit(limit int) IWorkflowVersionDo {
return w.withDO(w.DO.Limit(limit))
}
func (w workflowVersionDo) Offset(offset int) IWorkflowVersionDo {
return w.withDO(w.DO.Offset(offset))
}
func (w workflowVersionDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IWorkflowVersionDo {
return w.withDO(w.DO.Scopes(funcs...))
}
func (w workflowVersionDo) Unscoped() IWorkflowVersionDo {
return w.withDO(w.DO.Unscoped())
}
func (w workflowVersionDo) Create(values ...*model.WorkflowVersion) error {
if len(values) == 0 {
return nil
}
return w.DO.Create(values)
}
func (w workflowVersionDo) CreateInBatches(values []*model.WorkflowVersion, batchSize int) error {
return w.DO.CreateInBatches(values, batchSize)
}
// Save : !!! underlying implementation is different with GORM
// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values)
func (w workflowVersionDo) Save(values ...*model.WorkflowVersion) error {
if len(values) == 0 {
return nil
}
return w.DO.Save(values)
}
func (w workflowVersionDo) First() (*model.WorkflowVersion, error) {
if result, err := w.DO.First(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowVersion), nil
}
}
func (w workflowVersionDo) Take() (*model.WorkflowVersion, error) {
if result, err := w.DO.Take(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowVersion), nil
}
}
func (w workflowVersionDo) Last() (*model.WorkflowVersion, error) {
if result, err := w.DO.Last(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowVersion), nil
}
}
func (w workflowVersionDo) Find() ([]*model.WorkflowVersion, error) {
result, err := w.DO.Find()
return result.([]*model.WorkflowVersion), err
}
func (w workflowVersionDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.WorkflowVersion, err error) {
buf := make([]*model.WorkflowVersion, 0, batchSize)
err = w.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error {
defer func() { results = append(results, buf...) }()
return fc(tx, batch)
})
return results, err
}
func (w workflowVersionDo) FindInBatches(result *[]*model.WorkflowVersion, batchSize int, fc func(tx gen.Dao, batch int) error) error {
return w.DO.FindInBatches(result, batchSize, fc)
}
func (w workflowVersionDo) Attrs(attrs ...field.AssignExpr) IWorkflowVersionDo {
return w.withDO(w.DO.Attrs(attrs...))
}
func (w workflowVersionDo) Assign(attrs ...field.AssignExpr) IWorkflowVersionDo {
return w.withDO(w.DO.Assign(attrs...))
}
func (w workflowVersionDo) Joins(fields ...field.RelationField) IWorkflowVersionDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Joins(_f))
}
return &w
}
func (w workflowVersionDo) Preload(fields ...field.RelationField) IWorkflowVersionDo {
for _, _f := range fields {
w = *w.withDO(w.DO.Preload(_f))
}
return &w
}
func (w workflowVersionDo) FirstOrInit() (*model.WorkflowVersion, error) {
if result, err := w.DO.FirstOrInit(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowVersion), nil
}
}
func (w workflowVersionDo) FirstOrCreate() (*model.WorkflowVersion, error) {
if result, err := w.DO.FirstOrCreate(); err != nil {
return nil, err
} else {
return result.(*model.WorkflowVersion), nil
}
}
func (w workflowVersionDo) FindByPage(offset int, limit int) (result []*model.WorkflowVersion, count int64, err error) {
result, err = w.Offset(offset).Limit(limit).Find()
if err != nil {
return
}
if size := len(result); 0 < limit && 0 < size && size < limit {
count = int64(size + offset)
return
}
count, err = w.Offset(-1).Limit(-1).Count()
return
}
func (w workflowVersionDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) {
count, err = w.Count()
if err != nil {
return
}
err = w.Offset(offset).Limit(limit).Scan(result)
return
}
func (w workflowVersionDo) Scan(result interface{}) (err error) {
return w.DO.Scan(result)
}
func (w workflowVersionDo) Delete(models ...*model.WorkflowVersion) (result gen.ResultInfo, err error) {
return w.DO.Delete(models)
}
func (w *workflowVersionDo) withDO(do gen.Dao) *workflowVersionDo {
w.DO = *do.(*gen.DO)
return w
}

View File

@@ -0,0 +1,561 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package repo
import (
"context"
"errors"
"fmt"
"strconv"
"time"
"github.com/redis/go-redis/v9"
"gorm.io/gorm"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/query"
"github.com/coze-dev/coze-studio/backend/pkg/lang/ptr"
"github.com/coze-dev/coze-studio/backend/pkg/lang/slices"
"github.com/coze-dev/coze-studio/backend/pkg/lang/ternary"
"github.com/coze-dev/coze-studio/backend/pkg/logs"
"github.com/coze-dev/coze-studio/backend/pkg/sonic"
"github.com/coze-dev/coze-studio/backend/types/errno"
)
type executeHistoryStoreImpl struct {
query *query.Query
redis *redis.Client
}
func (e *executeHistoryStoreImpl) CreateWorkflowExecution(ctx context.Context, execution *entity.WorkflowExecution) (err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrDatabaseError, err)
}
}()
var mode int32
if execution.Mode == vo.ExecuteModeDebug {
mode = 1
} else if execution.Mode == vo.ExecuteModeRelease {
mode = 2
} else if execution.Mode == vo.ExecuteModeNodeDebug {
mode = 3
}
var syncPattern int32
switch execution.SyncPattern {
case vo.SyncPatternSync:
syncPattern = 1
case vo.SyncPatternAsync:
syncPattern = 2
case vo.SyncPatternStream:
syncPattern = 3
default:
}
wfExec := &model.WorkflowExecution{
ID: execution.ID,
WorkflowID: execution.WorkflowID,
Version: execution.Version,
SpaceID: execution.SpaceID,
Mode: mode,
OperatorID: execution.Operator,
Status: int32(entity.WorkflowRunning),
Input: ptr.FromOrDefault(execution.Input, ""),
RootExecutionID: execution.RootExecutionID,
ParentNodeID: ptr.FromOrDefault(execution.ParentNodeID, ""),
AppID: ptr.FromOrDefault(execution.AppID, 0),
AgentID: ptr.FromOrDefault(execution.AgentID, 0),
ConnectorID: execution.ConnectorID,
ConnectorUID: execution.ConnectorUID,
NodeCount: execution.NodeCount,
SyncPattern: syncPattern,
CommitID: execution.CommitID,
LogID: execution.LogID,
}
if execution.ParentNodeID == nil {
return e.query.WorkflowExecution.WithContext(ctx).Create(wfExec)
}
return e.query.Transaction(func(tx *query.Query) error {
if err := e.query.WorkflowExecution.WithContext(ctx).Create(wfExec); err != nil {
return err
}
// update the parent node execution's sub execute id
if _, err := e.query.NodeExecution.WithContext(ctx).Where(e.query.NodeExecution.ID.Eq(*execution.ParentNodeExecuteID)).
UpdateColumn(e.query.NodeExecution.SubExecuteID, wfExec.ID); err != nil {
return err
}
return nil
})
}
func (e *executeHistoryStoreImpl) UpdateWorkflowExecution(ctx context.Context, execution *entity.WorkflowExecution,
allowedStatus []entity.WorkflowExecuteStatus) (_ int64, _ entity.WorkflowExecuteStatus, err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrDatabaseError, err)
}
}()
// Use map[string]any to explicitly specify fields for update
updateMap := map[string]any{
"status": int32(execution.Status),
"output": ptr.FromOrDefault(execution.Output, ""),
"duration": execution.Duration.Milliseconds(),
"error_code": ptr.FromOrDefault(execution.ErrorCode, ""),
"fail_reason": ptr.FromOrDefault(execution.FailReason, ""),
"resume_event_id": ptr.FromOrDefault(execution.CurrentResumingEventID, 0),
}
if execution.TokenInfo != nil {
updateMap["input_tokens"] = execution.TokenInfo.InputTokens
updateMap["output_tokens"] = execution.TokenInfo.OutputTokens
}
statuses := slices.Transform(allowedStatus, func(e entity.WorkflowExecuteStatus) int32 {
return int32(e)
})
info, err := e.query.WorkflowExecution.WithContext(ctx).Where(e.query.WorkflowExecution.ID.Eq(execution.ID),
e.query.WorkflowExecution.Status.In(statuses...)).Updates(updateMap)
if err != nil {
return 0, 0, fmt.Errorf("failed to update workflow execution: %w", err)
}
if info.RowsAffected == 0 {
wfExe, found, err := e.GetWorkflowExecution(ctx, execution.ID)
if err != nil {
return 0, 0, err
}
if !found {
return 0, 0, fmt.Errorf("workflow execution not found for ID %d", execution.ID)
}
return 0, wfExe.Status, nil
}
return info.RowsAffected, execution.Status, nil
}
func (e *executeHistoryStoreImpl) TryLockWorkflowExecution(ctx context.Context, wfExeID, resumingEventID int64) (
_ bool, _ entity.WorkflowExecuteStatus, err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrDatabaseError, err)
}
}()
// Update WorkflowExecution set current_resuming_event_id = resumingEventID, status = 1
// where id = wfExeID and current_resuming_event_id = 0 and status = 5
result, err := e.query.WorkflowExecution.WithContext(ctx).
Where(e.query.WorkflowExecution.ID.Eq(wfExeID)).
Where(e.query.WorkflowExecution.ResumeEventID.Eq(0)).
Where(e.query.WorkflowExecution.Status.Eq(int32(entity.WorkflowInterrupted))).
Updates(map[string]interface{}{
"resume_event_id": resumingEventID,
"status": int32(entity.WorkflowRunning),
})
if err != nil {
return false, 0, fmt.Errorf("update workflow execution lock failed: %w", err)
}
// If no rows were updated, the lock attempt failed
if result.RowsAffected == 0 {
wfExe, found, err := e.GetWorkflowExecution(ctx, wfExeID)
if err != nil {
return false, 0, err
}
if !found {
return false, 0, fmt.Errorf("workflow execution not found for ID %d", wfExeID)
}
return false, wfExe.Status, nil
}
return true, entity.WorkflowInterrupted, nil
}
func (e *executeHistoryStoreImpl) GetWorkflowExecution(ctx context.Context, id int64) (*entity.WorkflowExecution, bool, error) {
rootExes, err := e.query.WorkflowExecution.WithContext(ctx).
Where(e.query.WorkflowExecution.ID.Eq(id)).
Find()
if err != nil {
return nil, false, vo.WrapError(errno.ErrDatabaseError, fmt.Errorf("failed to find workflow execution: %v", err))
}
if len(rootExes) == 0 {
return nil, false, nil
}
rootExe := rootExes[0]
var exeMode vo.ExecuteMode
if rootExe.Mode == 1 {
exeMode = vo.ExecuteModeDebug
} else if rootExe.Mode == 2 {
exeMode = vo.ExecuteModeRelease
} else {
exeMode = vo.ExecuteModeNodeDebug
}
var syncPattern vo.SyncPattern
switch rootExe.SyncPattern {
case 1:
syncPattern = vo.SyncPatternSync
case 2:
syncPattern = vo.SyncPatternAsync
case 3:
syncPattern = vo.SyncPatternStream
default:
}
exe := &entity.WorkflowExecution{
ID: rootExe.ID,
WorkflowID: rootExe.WorkflowID,
Version: rootExe.Version,
SpaceID: rootExe.SpaceID,
ExecuteConfig: vo.ExecuteConfig{
Operator: rootExe.OperatorID,
Mode: exeMode,
AppID: ternary.IFElse(rootExe.AppID > 0, ptr.Of(rootExe.AppID), nil),
AgentID: ternary.IFElse(rootExe.AgentID > 0, ptr.Of(rootExe.AgentID), nil),
ConnectorID: rootExe.ConnectorID,
ConnectorUID: rootExe.ConnectorUID,
SyncPattern: syncPattern,
},
CreatedAt: time.UnixMilli(rootExe.CreatedAt),
LogID: rootExe.LogID,
NodeCount: rootExe.NodeCount,
Status: entity.WorkflowExecuteStatus(rootExe.Status),
Duration: time.Duration(rootExe.Duration) * time.Millisecond,
Input: &rootExe.Input,
Output: &rootExe.Output,
ErrorCode: &rootExe.ErrorCode,
FailReason: &rootExe.FailReason,
TokenInfo: &entity.TokenUsage{
InputTokens: rootExe.InputTokens,
OutputTokens: rootExe.OutputTokens,
},
UpdatedAt: ternary.IFElse(rootExe.UpdatedAt > 0, ptr.Of(time.UnixMilli(rootExe.UpdatedAt)), nil),
ParentNodeID: ptr.Of(rootExe.ParentNodeID),
ParentNodeExecuteID: nil, // keep it nil here, query parent node execution separately
NodeExecutions: nil, // keep it nil here, query node executions separately
RootExecutionID: rootExe.RootExecutionID,
CurrentResumingEventID: ternary.IFElse(rootExe.ResumeEventID == 0, nil, ptr.Of(rootExe.ResumeEventID)),
CommitID: rootExe.CommitID,
}
return exe, true, nil
}
func (e *executeHistoryStoreImpl) CreateNodeExecution(ctx context.Context, execution *entity.NodeExecution) error {
nodeExec := &model.NodeExecution{
ID: execution.ID,
ExecuteID: execution.ExecuteID,
NodeID: execution.NodeID,
NodeName: execution.NodeName,
NodeType: string(execution.NodeType),
Status: int32(entity.NodeRunning),
Input: ptr.FromOrDefault(execution.Input, ""),
CompositeNodeIndex: int64(execution.Index),
CompositeNodeItems: ptr.FromOrDefault(execution.Items, ""),
ParentNodeID: ptr.FromOrDefault(execution.ParentNodeID, ""),
}
if execution.Extra != nil {
m, err := sonic.MarshalString(execution.Extra)
if err != nil {
return vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to marshal extra: %w", err))
}
nodeExec.Extra = m
}
return e.query.NodeExecution.WithContext(ctx).Create(nodeExec)
}
func (e *executeHistoryStoreImpl) UpdateNodeExecutionStreaming(ctx context.Context, execution *entity.NodeExecution) error {
if execution.Output == nil {
return nil
}
key := fmt.Sprintf(nodeExecOutputKey, execution.ID)
if err := e.redis.Set(ctx, key, execution.Output, nodeExecDataExpiry).Err(); err != nil {
return vo.WrapError(errno.ErrRedisError, err)
}
return nil
}
func (e *executeHistoryStoreImpl) UpdateNodeExecution(ctx context.Context, execution *entity.NodeExecution) (err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrDatabaseError, err)
}
}()
nodeExec := &model.NodeExecution{
Status: int32(execution.Status),
Input: ptr.FromOrDefault(execution.Input, ""),
Output: ptr.FromOrDefault(execution.Output, ""),
RawOutput: ptr.FromOrDefault(execution.RawOutput, ""),
Duration: execution.Duration.Milliseconds(),
ErrorInfo: ptr.FromOrDefault(execution.ErrorInfo, ""),
ErrorLevel: ptr.FromOrDefault(execution.ErrorLevel, ""),
}
if execution.TokenInfo != nil {
nodeExec.InputTokens = execution.TokenInfo.InputTokens
nodeExec.OutputTokens = execution.TokenInfo.OutputTokens
}
if execution.Extra != nil {
m, err := sonic.MarshalString(execution.Extra)
if err != nil {
return fmt.Errorf("failed to marshal extra: %w", err)
}
nodeExec.Extra = m
}
if execution.SubWorkflowExecution != nil {
nodeExec.SubExecuteID = execution.SubWorkflowExecution.ID
}
_, err = e.query.NodeExecution.WithContext(ctx).Where(e.query.NodeExecution.ID.Eq(execution.ID)).Updates(nodeExec)
if err != nil {
return fmt.Errorf("failed to update node execution: %w", err)
}
return nil
}
func (e *executeHistoryStoreImpl) CancelAllRunningNodes(ctx context.Context, wfExeID int64) (err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrDatabaseError, err)
}
}()
_, err = e.query.NodeExecution.WithContext(ctx).
Where(e.query.NodeExecution.ExecuteID.Eq(wfExeID),
e.query.NodeExecution.Status.In(int32(entity.NodeRunning))).
Updates(map[string]interface{}{
"error_info": "workflow cancel by user",
"error_level": vo.LevelCancel,
"status": int32(entity.NodeFailed),
})
if err != nil {
return fmt.Errorf("failed to cancel running nodes: %w", err)
}
_, err = e.query.WorkflowExecution.WithContext(ctx).
Where(e.query.WorkflowExecution.RootExecutionID.Eq(wfExeID)).
Updates(map[string]interface{}{
"status": int32(entity.WorkflowCancel),
"fail_reason": "workflow cancel by user",
"error_code": strconv.Itoa(errno.ErrWorkflowCanceledByUser),
})
if err != nil {
return fmt.Errorf("failed to cancel workflow execution: %w", err)
}
return nil
}
func convertNodeExecution(nodeExec *model.NodeExecution) *entity.NodeExecution {
nodeExeEntity := &entity.NodeExecution{
ID: nodeExec.ID,
ExecuteID: nodeExec.ExecuteID,
NodeID: nodeExec.NodeID,
NodeName: nodeExec.NodeName,
NodeType: entity.NodeType(nodeExec.NodeType),
CreatedAt: time.UnixMilli(nodeExec.CreatedAt),
Status: entity.NodeExecuteStatus(nodeExec.Status),
Duration: time.Duration(nodeExec.Duration) * time.Millisecond,
Input: &nodeExec.Input,
Output: &nodeExec.Output,
RawOutput: &nodeExec.RawOutput,
ErrorInfo: &nodeExec.ErrorInfo,
ErrorLevel: &nodeExec.ErrorLevel,
TokenInfo: &entity.TokenUsage{InputTokens: nodeExec.InputTokens, OutputTokens: nodeExec.OutputTokens},
ParentNodeID: ternary.IFElse(nodeExec.ParentNodeID != "", ptr.Of(nodeExec.ParentNodeID), nil),
Index: int(nodeExec.CompositeNodeIndex),
Items: ternary.IFElse(nodeExec.CompositeNodeItems != "", ptr.Of(nodeExec.CompositeNodeItems), nil),
SubWorkflowExecution: ternary.IFElse(nodeExec.SubExecuteID > 0, &entity.WorkflowExecution{ID: nodeExec.SubExecuteID}, nil),
}
if nodeExec.UpdatedAt > 0 {
nodeExeEntity.UpdatedAt = ptr.Of(time.UnixMilli(nodeExec.UpdatedAt))
}
if nodeExec.SubExecuteID > 0 {
nodeExeEntity.SubWorkflowExecution = &entity.WorkflowExecution{
ID: nodeExec.SubExecuteID,
}
}
if len(nodeExec.Extra) > 0 {
var extra entity.NodeExtra
if err := sonic.UnmarshalString(nodeExec.Extra, &extra); err != nil {
logs.Errorf("failed to unmarshal extra: %v", err)
} else {
nodeExeEntity.Extra = &extra
}
}
return nodeExeEntity
}
func (e *executeHistoryStoreImpl) GetNodeExecutionsByWfExeID(ctx context.Context, wfExeID int64) (result []*entity.NodeExecution, err error) {
nodeExecs, err := e.query.NodeExecution.WithContext(ctx).
Where(e.query.NodeExecution.ExecuteID.Eq(wfExeID)).
Find()
if err != nil {
return nil, vo.WrapError(errno.ErrDatabaseError, fmt.Errorf("failed to find node executions: %v", err))
}
for _, nodeExec := range nodeExecs {
nodeExeEntity := convertNodeExecution(nodeExec)
// For nodes that are currently running and support streaming, their complete information needs to be retrieved from Redis.
if nodeExeEntity.Status == entity.NodeRunning {
meta := entity.NodeMetaByNodeType(nodeExeEntity.NodeType)
if meta.ExecutableMeta.IncrementalOutput {
if err := e.loadNodeExecutionFromRedis(ctx, nodeExeEntity); err != nil {
logs.CtxErrorf(ctx, "failed to load node execution from redis: %v", err)
}
}
}
result = append(result, nodeExeEntity)
}
return result, nil
}
func (e *executeHistoryStoreImpl) loadNodeExecutionFromRedis(ctx context.Context, nodeExeEntity *entity.NodeExecution) error {
key := fmt.Sprintf(nodeExecOutputKey, nodeExeEntity.ID)
result, err := e.redis.Get(ctx, key).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return nil
}
return vo.WrapError(errno.ErrRedisError, err)
}
if result != "" {
nodeExeEntity.Output = &result
}
return nil
}
func (e *executeHistoryStoreImpl) GetNodeExecution(ctx context.Context, wfExeID int64, nodeID string) (*entity.NodeExecution, bool, error) {
nodeExec, err := e.query.NodeExecution.WithContext(ctx).
Where(e.query.NodeExecution.ExecuteID.Eq(wfExeID), e.query.NodeExecution.NodeID.Eq(nodeID)).
First()
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, false, nil
}
return nil, false, vo.WrapError(errno.ErrDatabaseError, fmt.Errorf("failed to find node executions: %w", err))
}
nodeExeEntity := convertNodeExecution(nodeExec)
return nodeExeEntity, true, nil
}
func (e *executeHistoryStoreImpl) GetNodeExecutionByParent(ctx context.Context, wfExeID int64, parentNodeID string) (
[]*entity.NodeExecution, error) {
nodeExecs, err := e.query.NodeExecution.WithContext(ctx).
Where(e.query.NodeExecution.ExecuteID.Eq(wfExeID), e.query.NodeExecution.ParentNodeID.Eq(parentNodeID)).
Find()
if err != nil {
return nil, vo.WrapError(errno.ErrDatabaseError, fmt.Errorf("failed to find node executions: %w", err))
}
var result []*entity.NodeExecution
for _, nodeExec := range nodeExecs {
nodeExeEntity := convertNodeExecution(nodeExec)
result = append(result, nodeExeEntity)
}
return result, nil
}
const (
testRunLastExeKey = "test_run_last_exe_id:%d:%d"
nodeDebugLastExeKey = "node_debug_last_exe_id:%d:%s:%d"
nodeExecDataExpiry = 24 * time.Hour // keep it for 24 hours
nodeExecOutputKey = "wf:node_exec:output:%d"
)
func (e *executeHistoryStoreImpl) SetTestRunLatestExeID(ctx context.Context, wfID int64, uID int64, exeID int64) error {
key := fmt.Sprintf(testRunLastExeKey, wfID, uID)
err := e.redis.Set(ctx, key, exeID, 7*24*time.Hour).Err()
if err != nil {
return vo.WrapError(errno.ErrRedisError, err)
}
return nil
}
func (e *executeHistoryStoreImpl) GetTestRunLatestExeID(ctx context.Context, wfID int64, uID int64) (int64, error) {
key := fmt.Sprintf(testRunLastExeKey, wfID, uID)
exeIDStr, err := e.redis.Get(ctx, key).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return 0, nil
}
return 0, vo.WrapError(errno.ErrRedisError, err)
}
exeID, err := strconv.ParseInt(exeIDStr, 10, 64)
if err != nil {
return 0, err
}
return exeID, nil
}
func (e *executeHistoryStoreImpl) SetNodeDebugLatestExeID(ctx context.Context, wfID int64, nodeID string, uID int64, exeID int64) error {
key := fmt.Sprintf(nodeDebugLastExeKey, wfID, nodeID, uID)
err := e.redis.Set(ctx, key, exeID, 7*24*time.Hour).Err()
if err != nil {
return vo.WrapError(errno.ErrRedisError, err)
}
return nil
}
func (e *executeHistoryStoreImpl) GetNodeDebugLatestExeID(ctx context.Context, wfID int64, nodeID string, uID int64) (int64, error) {
key := fmt.Sprintf(nodeDebugLastExeKey, wfID, nodeID, uID)
exeIDStr, err := e.redis.Get(ctx, key).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return 0, nil
}
return 0, vo.WrapError(errno.ErrRedisError, err)
}
exeID, err := strconv.ParseInt(exeIDStr, 10, 64)
if err != nil {
return 0, err
}
return exeID, nil
}

View File

@@ -0,0 +1,119 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package repo
import (
"context"
"fmt"
"regexp"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/query"
)
type ExecuteHistoryStoreSuite struct {
suite.Suite
db *gorm.DB
redis *redis.Client
mock sqlmock.Sqlmock
store *executeHistoryStoreImpl
}
func (s *ExecuteHistoryStoreSuite) SetupTest() {
var err error
mr, err := miniredis.Run()
assert.NoError(s.T(), err)
s.redis = redis.NewClient(&redis.Options{Addr: mr.Addr()})
mockDB, mock, err := sqlmock.New()
assert.NoError(s.T(), err)
s.mock = mock
dialector := mysql.New(mysql.Config{
Conn: mockDB,
SkipInitializeWithVersion: true,
})
s.db, err = gorm.Open(dialector, &gorm.Config{})
assert.NoError(s.T(), err)
s.store = &executeHistoryStoreImpl{
query: query.Use(s.db),
redis: s.redis,
}
}
func (s *ExecuteHistoryStoreSuite) TestNodeExecutionStreaming() {
ctx := context.Background()
wfExeID := int64(1)
nodeExecID := int64(12345)
nodeExecution := &entity.NodeExecution{
ID: nodeExecID,
ExecuteID: wfExeID,
NodeID: "54321",
NodeName: "Test Node",
NodeType: entity.NodeTypeOutputEmitter,
Status: entity.NodeRunning,
}
// 1. CreateNodeExecution
s.mock.ExpectBegin()
s.mock.ExpectExec(regexp.QuoteMeta(
"INSERT INTO `node_execution` (`execute_id`,`node_id`,`node_name`,`node_type`,`created_at`,`status`,`duration`,`input`,`output`,`raw_output`,`error_info`,`error_level`,`input_tokens`,`output_tokens`,`updated_at`,`composite_node_index`,`composite_node_items`,`parent_node_id`,`sub_execute_id`,`extra`,`id`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")).
WithArgs(nodeExecution.ExecuteID, nodeExecution.NodeID, nodeExecution.NodeName, string(nodeExecution.NodeType), sqlmock.AnyArg(), int32(entity.NodeRunning), int64(0), "", "", "", "", "", int64(0), int64(0), sqlmock.AnyArg(), int64(0), "", "", int64(0), "", nodeExecution.ID).
WillReturnResult(sqlmock.NewResult(1, 1))
s.mock.ExpectCommit()
err := s.store.CreateNodeExecution(ctx, nodeExecution)
assert.NoError(s.T(), err)
// 2. UpdateNodeExecutionStreaming
streamingOutput := "streaming output"
nodeExecution.Output = &streamingOutput
err = s.store.UpdateNodeExecutionStreaming(ctx, nodeExecution)
assert.NoError(s.T(), err)
val, err := s.redis.Get(ctx, fmt.Sprintf("wf:node_exec:output:%d", nodeExecID)).Result()
assert.NoError(s.T(), err)
assert.Equal(s.T(), streamingOutput, val)
// 3. GetNodeExecutionsByWfExeID
rows := sqlmock.NewRows([]string{"id", "execute_id", "node_id", "node_name", "node_type", "status", "created_at"}).
AddRow(nodeExecution.ID, nodeExecution.ExecuteID, nodeExecution.NodeID, nodeExecution.NodeName, string(nodeExecution.NodeType), int32(entity.NodeRunning), time.Now().UnixMilli())
s.mock.ExpectQuery(regexp.QuoteMeta(
"SELECT * FROM `node_execution` WHERE `node_execution`.`execute_id` = ?")).
WithArgs(wfExeID).
WillReturnRows(rows)
execs, err := s.store.GetNodeExecutionsByWfExeID(ctx, wfExeID)
assert.NoError(s.T(), err)
assert.Len(s.T(), execs, 1)
assert.Equal(s.T(), streamingOutput, *execs[0].Output)
}
func TestExecuteHistoryStore(t *testing.T) {
suite.Run(t, new(ExecuteHistoryStoreSuite))
}

View File

@@ -0,0 +1,249 @@
/*
* Copyright 2025 coze-dev Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package repo
import (
"context"
"errors"
"fmt"
"time"
"github.com/redis/go-redis/v9"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity"
"github.com/coze-dev/coze-studio/backend/domain/workflow/entity/vo"
"github.com/coze-dev/coze-studio/backend/pkg/sonic"
"github.com/coze-dev/coze-studio/backend/types/errno"
)
type interruptEventStoreImpl struct {
redis *redis.Client
}
const (
// interruptEventListKeyPattern stores events as a list (e.g., "interrupt_event_list:{wfExeID}")
interruptEventListKeyPattern = "interrupt_event_list:%d"
interruptEventTTL = 24 * time.Hour // Example: expire after 24 hours
previousResumedEventKeyPattern = "previous_resumed_event:%d"
)
// SaveInterruptEvents saves multiple interrupt events to the end of a Redis list.
func (i *interruptEventStoreImpl) SaveInterruptEvents(ctx context.Context, wfExeID int64, events []*entity.InterruptEvent) (err error) {
if len(events) == 0 {
return nil
}
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrRedisError, err)
}
}()
listKey := fmt.Sprintf(interruptEventListKeyPattern, wfExeID)
previousResumedEventKey := fmt.Sprintf(previousResumedEventKeyPattern, wfExeID)
currentEvents, err := i.ListInterruptEvents(ctx, wfExeID)
if err != nil {
return err
}
for _, currentE := range currentEvents {
if len(events) == 0 {
break
}
j := len(events)
for i := 0; i < j; i++ {
if events[i].ID == currentE.ID {
events = append(events[:i], events[i+1:]...)
i--
j--
}
}
}
if len(events) == 0 {
return nil
}
previousEventStr, err := i.redis.Get(ctx, previousResumedEventKey).Result()
if err != nil {
if !errors.Is(err, redis.Nil) {
return fmt.Errorf("failed to get previous resumed event for wfExeID %d: %w", wfExeID, err)
}
}
var previousEvent *entity.InterruptEvent
if previousEventStr != "" {
err = sonic.UnmarshalString(previousEventStr, &previousEvent)
if err != nil {
return vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to unmarshal previous resumed event (wfExeID %d) from JSON: %w", wfExeID, err))
}
}
var topPriorityEvent *entity.InterruptEvent
if previousEvent != nil {
for i := range events {
if previousEvent.NodeKey == events[i].NodeKey {
topPriorityEvent = events[i]
events = append(events[:i], events[i+1:]...)
break
}
}
}
pipe := i.redis.Pipeline()
eventJSONs := make([]interface{}, 0, len(events))
for _, event := range events {
eventJSON, err := sonic.MarshalString(event)
if err != nil {
return vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to marshal interrupt event %d to JSON: %w", event.ID, err))
}
eventJSONs = append(eventJSONs, eventJSON)
}
if topPriorityEvent != nil {
topPriorityEventJSON, err := sonic.MarshalString(topPriorityEvent)
if err != nil {
return vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to marshal top priority interrupt event %d to JSON: %w", topPriorityEvent.ID, err))
}
pipe.LPush(ctx, listKey, topPriorityEventJSON)
}
if len(eventJSONs) > 0 {
pipe.RPush(ctx, listKey, eventJSONs...)
}
pipe.Expire(ctx, listKey, interruptEventTTL)
_, err = pipe.Exec(ctx) // ignore_security_alert SQL_INJECTION
if err != nil {
return fmt.Errorf("failed to save interrupt events to Redis list: %w", err)
}
return nil
}
// GetFirstInterruptEvent retrieves the first interrupt event from the list without removing it.
func (i *interruptEventStoreImpl) GetFirstInterruptEvent(ctx context.Context, wfExeID int64) (
_ *entity.InterruptEvent, _ bool, err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrRedisError, err)
}
}()
listKey := fmt.Sprintf(interruptEventListKeyPattern, wfExeID)
eventJSON, err := i.redis.LIndex(ctx, listKey, 0).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return nil, false, nil // List is empty or key does not exist
}
return nil, false, fmt.Errorf("failed to get first interrupt event from Redis list for wfExeID %d: %w", wfExeID, err)
}
var event entity.InterruptEvent
err = sonic.UnmarshalString(eventJSON, &event)
if err != nil {
return nil, false, vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to unmarshal first interrupt event (wfExeID %d) from JSON: %w", wfExeID, err))
}
return &event, true, nil
}
func (i *interruptEventStoreImpl) UpdateFirstInterruptEvent(ctx context.Context, wfExeID int64, event *entity.InterruptEvent) (err error) {
defer func() {
if err != nil {
err = vo.WrapIfNeeded(errno.ErrRedisError, err)
}
}()
listKey := fmt.Sprintf(interruptEventListKeyPattern, wfExeID)
eventJSON, err := sonic.MarshalString(event)
if err != nil {
return vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to marshal interrupt event %d to JSON: %w", event.ID, err))
}
err = i.redis.LSet(ctx, listKey, 0, eventJSON).Err()
if err != nil {
return fmt.Errorf("failed to update first interrupt event in Redis list for wfExeID %d: %w", wfExeID, err)
}
previousResumedEventKey := fmt.Sprintf(previousResumedEventKeyPattern, wfExeID)
err = i.redis.Set(ctx, previousResumedEventKey, eventJSON, interruptEventTTL).Err()
if err != nil {
return fmt.Errorf("failed to set previous resumed event for wfExeID %d: %w", wfExeID, err)
}
return nil
}
// PopFirstInterruptEvent retrieves and removes the first interrupt event from the list.
func (i *interruptEventStoreImpl) PopFirstInterruptEvent(ctx context.Context, wfExeID int64) (*entity.InterruptEvent, bool, error) {
listKey := fmt.Sprintf(interruptEventListKeyPattern, wfExeID)
eventJSON, err := i.redis.LPop(ctx, listKey).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return nil, false, nil // List is empty or key does not exist
}
return nil, false, vo.WrapError(errno.ErrRedisError,
fmt.Errorf("failed to pop first interrupt event from Redis list for wfExeID %d: %w", wfExeID, err))
}
var event entity.InterruptEvent
err = sonic.UnmarshalString(eventJSON, &event)
if err != nil {
// If unmarshalling fails, the event is already popped.
// Consider if you need to re-queue or handle this scenario.
return nil, true, vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to unmarshal popped interrupt event (wfExeID %d) from JSON: %w", wfExeID, err))
}
return &event, true, nil
}
func (i *interruptEventStoreImpl) ListInterruptEvents(ctx context.Context, wfExeID int64) ([]*entity.InterruptEvent, error) {
listKey := fmt.Sprintf(interruptEventListKeyPattern, wfExeID)
eventJSONs, err := i.redis.LRange(ctx, listKey, 0, -1).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return nil, nil // List is empty or key does not exist
}
return nil, vo.WrapError(errno.ErrRedisError,
fmt.Errorf("failed to get all interrupt events from Redis list for wfExeID %d: %w", wfExeID, err))
}
var events []*entity.InterruptEvent
for _, s := range eventJSONs {
var event entity.InterruptEvent
err = sonic.UnmarshalString(s, &event)
if err != nil {
return nil, vo.WrapError(errno.ErrSerializationDeserializationFail,
fmt.Errorf("failed to unmarshal first interrupt event (wfExeID %d) from JSON: %w", wfExeID, err))
}
events = append(events, &event)
}
return events, nil
}

File diff suppressed because it is too large Load Diff