fix: replace workflow hardcoding chat model for knowledge recall (#233)
This commit is contained in:
parent
5013a9ed53
commit
b9d03b148c
|
|
@ -239,7 +239,7 @@ func newWfTestRunner(t *testing.T) *wfTestRunner {
|
||||||
|
|
||||||
mockTos := storageMock.NewMockStorage(ctrl)
|
mockTos := storageMock.NewMockStorage(ctrl)
|
||||||
mockTos.EXPECT().GetObjectUrl(gomock.Any(), gomock.Any(), gomock.Any()).Return("", nil).AnyTimes()
|
mockTos.EXPECT().GetObjectUrl(gomock.Any(), gomock.Any(), gomock.Any()).Return("", nil).AnyTimes()
|
||||||
workflowRepo := service.NewWorkflowRepository(mockIDGen, db, redisClient, mockTos, cpStore)
|
workflowRepo := service.NewWorkflowRepository(mockIDGen, db, redisClient, mockTos, cpStore, nil)
|
||||||
mockey.Mock(appworkflow.GetWorkflowDomainSVC).Return(service.NewWorkflowService(workflowRepo)).Build()
|
mockey.Mock(appworkflow.GetWorkflowDomainSVC).Return(service.NewWorkflowService(workflowRepo)).Build()
|
||||||
mockey.Mock(workflow2.GetRepository).Return(workflowRepo).Build()
|
mockey.Mock(workflow2.GetRepository).Return(workflowRepo).Build()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -190,8 +190,11 @@ func initPrimaryServices(ctx context.Context, basicServices *basicServices) (*pr
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
workflowDomainSVC := workflow.InitService(
|
workflowDomainSVC, err := workflow.InitService(ctx,
|
||||||
basicServices.toWorkflowServiceComponents(pluginSVC, memorySVC, knowledgeSVC))
|
basicServices.toWorkflowServiceComponents(pluginSVC, memorySVC, knowledgeSVC))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
shortcutSVC := shortcutcmd.InitService(basicServices.infra.DB, basicServices.infra.IDGenSVC)
|
shortcutSVC := shortcutcmd.InitService(basicServices.infra.DB, basicServices.infra.IDGenSVC)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,108 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2025 coze-dev Authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
ao "github.com/cloudwego/eino-ext/components/model/ark"
|
||||||
|
"github.com/cloudwego/eino-ext/components/model/deepseek"
|
||||||
|
"github.com/cloudwego/eino-ext/components/model/gemini"
|
||||||
|
"github.com/cloudwego/eino-ext/components/model/ollama"
|
||||||
|
mo "github.com/cloudwego/eino-ext/components/model/openai"
|
||||||
|
"github.com/cloudwego/eino-ext/components/model/qwen"
|
||||||
|
"github.com/coze-dev/coze-studio/backend/infra/contract/chatmodel"
|
||||||
|
"google.golang.org/genai"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetBuiltinChatModel(ctx context.Context, envPrefix string) (bcm chatmodel.BaseChatModel, configured bool, err error) {
|
||||||
|
getEnv := func(key string) string {
|
||||||
|
if val := os.Getenv(envPrefix + key); val != "" {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return os.Getenv(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch getEnv("BUILTIN_CM_TYPE") {
|
||||||
|
case "openai":
|
||||||
|
byAzure, _ := strconv.ParseBool(getEnv("BUILTIN_CM_OPENAI_BY_AZURE"))
|
||||||
|
bcm, err = mo.NewChatModel(ctx, &mo.ChatModelConfig{
|
||||||
|
APIKey: getEnv("BUILTIN_CM_OPENAI_API_KEY"),
|
||||||
|
ByAzure: byAzure,
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_OPENAI_BASE_URL"),
|
||||||
|
Model: getEnv("BUILTIN_CM_OPENAI_MODEL"),
|
||||||
|
})
|
||||||
|
case "ark":
|
||||||
|
bcm, err = ao.NewChatModel(ctx, &ao.ChatModelConfig{
|
||||||
|
APIKey: getEnv("BUILTIN_CM_ARK_API_KEY"),
|
||||||
|
Model: getEnv("BUILTIN_CM_ARK_MODEL"),
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_ARK_BASE_URL"),
|
||||||
|
})
|
||||||
|
case "deepseek":
|
||||||
|
bcm, err = deepseek.NewChatModel(ctx, &deepseek.ChatModelConfig{
|
||||||
|
APIKey: getEnv("BUILTIN_CM_DEEPSEEK_API_KEY"),
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_DEEPSEEK_BASE_URL"),
|
||||||
|
Model: getEnv("BUILTIN_CM_DEEPSEEK_MODEL"),
|
||||||
|
})
|
||||||
|
case "ollama":
|
||||||
|
bcm, err = ollama.NewChatModel(ctx, &ollama.ChatModelConfig{
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_OLLAMA_BASE_URL"),
|
||||||
|
Model: getEnv("BUILTIN_CM_OLLAMA_MODEL"),
|
||||||
|
})
|
||||||
|
case "qwen":
|
||||||
|
bcm, err = qwen.NewChatModel(ctx, &qwen.ChatModelConfig{
|
||||||
|
APIKey: getEnv("BUILTIN_CM_QWEN_API_KEY"),
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_QWEN_BASE_URL"),
|
||||||
|
Model: getEnv("BUILTIN_CM_QWEN_MODEL"),
|
||||||
|
})
|
||||||
|
case "gemini":
|
||||||
|
backend, convErr := strconv.ParseInt(getEnv("BUILTIN_CM_GEMINI_BACKEND"), 10, 64)
|
||||||
|
if convErr != nil {
|
||||||
|
return nil, false, convErr
|
||||||
|
}
|
||||||
|
c, clientErr := genai.NewClient(ctx, &genai.ClientConfig{
|
||||||
|
APIKey: getEnv("BUILTIN_CM_GEMINI_API_KEY"),
|
||||||
|
Backend: genai.Backend(backend),
|
||||||
|
Project: getEnv("BUILTIN_CM_GEMINI_PROJECT"),
|
||||||
|
Location: getEnv("BUILTIN_CM_GEMINI_LOCATION"),
|
||||||
|
HTTPOptions: genai.HTTPOptions{
|
||||||
|
BaseURL: getEnv("BUILTIN_CM_GEMINI_BASE_URL"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if clientErr != nil {
|
||||||
|
return nil, false, clientErr
|
||||||
|
}
|
||||||
|
bcm, err = gemini.NewChatModel(ctx, &gemini.Config{
|
||||||
|
Client: c,
|
||||||
|
Model: getEnv("BUILTIN_CM_GEMINI_MODEL"),
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
// accept builtin chat model not configured
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("knowledge init openai chat mode failed, %w", err)
|
||||||
|
}
|
||||||
|
if bcm != nil {
|
||||||
|
configured = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
@ -28,25 +28,18 @@ import (
|
||||||
"github.com/cloudwego/eino-ext/components/embedding/ark"
|
"github.com/cloudwego/eino-ext/components/embedding/ark"
|
||||||
ollamaEmb "github.com/cloudwego/eino-ext/components/embedding/ollama"
|
ollamaEmb "github.com/cloudwego/eino-ext/components/embedding/ollama"
|
||||||
"github.com/cloudwego/eino-ext/components/embedding/openai"
|
"github.com/cloudwego/eino-ext/components/embedding/openai"
|
||||||
ao "github.com/cloudwego/eino-ext/components/model/ark"
|
|
||||||
"github.com/cloudwego/eino-ext/components/model/deepseek"
|
|
||||||
"github.com/cloudwego/eino-ext/components/model/gemini"
|
|
||||||
"github.com/cloudwego/eino-ext/components/model/ollama"
|
|
||||||
mo "github.com/cloudwego/eino-ext/components/model/openai"
|
|
||||||
"github.com/cloudwego/eino-ext/components/model/qwen"
|
|
||||||
"github.com/cloudwego/eino/components/prompt"
|
"github.com/cloudwego/eino/components/prompt"
|
||||||
"github.com/cloudwego/eino/schema"
|
"github.com/cloudwego/eino/schema"
|
||||||
|
"github.com/coze-dev/coze-studio/backend/application/internal"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/impl/embedding/http"
|
"github.com/coze-dev/coze-studio/backend/infra/impl/embedding/http"
|
||||||
"github.com/milvus-io/milvus/client/v2/milvusclient"
|
"github.com/milvus-io/milvus/client/v2/milvusclient"
|
||||||
"github.com/volcengine/volc-sdk-golang/service/vikingdb"
|
"github.com/volcengine/volc-sdk-golang/service/vikingdb"
|
||||||
"github.com/volcengine/volc-sdk-golang/service/visual"
|
"github.com/volcengine/volc-sdk-golang/service/visual"
|
||||||
"google.golang.org/genai"
|
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
|
|
||||||
"github.com/coze-dev/coze-studio/backend/application/search"
|
"github.com/coze-dev/coze-studio/backend/application/search"
|
||||||
knowledgeImpl "github.com/coze-dev/coze-studio/backend/domain/knowledge/service"
|
knowledgeImpl "github.com/coze-dev/coze-studio/backend/domain/knowledge/service"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/cache"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/cache"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/chatmodel"
|
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/document/nl2sql"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/document/nl2sql"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/document/ocr"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/document/ocr"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/document/searchstore"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/document/searchstore"
|
||||||
|
|
@ -131,7 +124,7 @@ func InitService(c *ServiceComponents) (*KnowledgeApplicationService, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var rewriter messages2query.MessagesToQuery
|
var rewriter messages2query.MessagesToQuery
|
||||||
if rewriterChatModel, _, err := getBuiltinChatModel(ctx, "M2Q_"); err != nil {
|
if rewriterChatModel, _, err := internal.GetBuiltinChatModel(ctx, "M2Q_"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
} else {
|
||||||
filePath := filepath.Join(root, "resources/conf/prompt/messages_to_query_template_jinja2.json")
|
filePath := filepath.Join(root, "resources/conf/prompt/messages_to_query_template_jinja2.json")
|
||||||
|
|
@ -146,7 +139,7 @@ func InitService(c *ServiceComponents) (*KnowledgeApplicationService, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var n2s nl2sql.NL2SQL
|
var n2s nl2sql.NL2SQL
|
||||||
if n2sChatModel, _, err := getBuiltinChatModel(ctx, "NL2SQL_"); err != nil {
|
if n2sChatModel, _, err := internal.GetBuiltinChatModel(ctx, "NL2SQL_"); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
} else {
|
||||||
filePath := filepath.Join(root, "resources/conf/prompt/nl2sql_template_jinja2.json")
|
filePath := filepath.Join(root, "resources/conf/prompt/nl2sql_template_jinja2.json")
|
||||||
|
|
@ -160,7 +153,7 @@ func InitService(c *ServiceComponents) (*KnowledgeApplicationService, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
imageAnnoChatModel, configured, err := getBuiltinChatModel(ctx, "IA_")
|
imageAnnoChatModel, configured, err := internal.GetBuiltinChatModel(ctx, "IA_")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -372,7 +365,6 @@ func getEmbedding(ctx context.Context) (embedding.Embedder, error) {
|
||||||
return nil, fmt.Errorf("init ollama embedding failed, err=%w", err)
|
return nil, fmt.Errorf("init ollama embedding failed, err=%w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
case "http":
|
case "http":
|
||||||
var (
|
var (
|
||||||
httpEmbeddingBaseURL = os.Getenv("HTTP_EMBEDDING_ADDR")
|
httpEmbeddingBaseURL = os.Getenv("HTTP_EMBEDDING_ADDR")
|
||||||
|
|
@ -394,81 +386,6 @@ func getEmbedding(ctx context.Context) (embedding.Embedder, error) {
|
||||||
return emb, nil
|
return emb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBuiltinChatModel(ctx context.Context, envPrefix string) (bcm chatmodel.BaseChatModel, configured bool, err error) {
|
|
||||||
getEnv := func(key string) string {
|
|
||||||
if val := os.Getenv(envPrefix + key); val != "" {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
return os.Getenv(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch getEnv("BUILTIN_CM_TYPE") {
|
|
||||||
case "openai":
|
|
||||||
byAzure, _ := strconv.ParseBool(getEnv("BUILTIN_CM_OPENAI_BY_AZURE"))
|
|
||||||
bcm, err = mo.NewChatModel(ctx, &mo.ChatModelConfig{
|
|
||||||
APIKey: getEnv("BUILTIN_CM_OPENAI_API_KEY"),
|
|
||||||
ByAzure: byAzure,
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_OPENAI_BASE_URL"),
|
|
||||||
Model: getEnv("BUILTIN_CM_OPENAI_MODEL"),
|
|
||||||
})
|
|
||||||
case "ark":
|
|
||||||
bcm, err = ao.NewChatModel(ctx, &ao.ChatModelConfig{
|
|
||||||
APIKey: getEnv("BUILTIN_CM_ARK_API_KEY"),
|
|
||||||
Model: getEnv("BUILTIN_CM_ARK_MODEL"),
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_ARK_BASE_URL"),
|
|
||||||
})
|
|
||||||
case "deepseek":
|
|
||||||
bcm, err = deepseek.NewChatModel(ctx, &deepseek.ChatModelConfig{
|
|
||||||
APIKey: getEnv("BUILTIN_CM_DEEPSEEK_API_KEY"),
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_DEEPSEEK_BASE_URL"),
|
|
||||||
Model: getEnv("BUILTIN_CM_DEEPSEEK_MODEL"),
|
|
||||||
})
|
|
||||||
case "ollama":
|
|
||||||
bcm, err = ollama.NewChatModel(ctx, &ollama.ChatModelConfig{
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_OLLAMA_BASE_URL"),
|
|
||||||
Model: getEnv("BUILTIN_CM_OLLAMA_MODEL"),
|
|
||||||
})
|
|
||||||
case "qwen":
|
|
||||||
bcm, err = qwen.NewChatModel(ctx, &qwen.ChatModelConfig{
|
|
||||||
APIKey: getEnv("BUILTIN_CM_QWEN_API_KEY"),
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_QWEN_BASE_URL"),
|
|
||||||
Model: getEnv("BUILTIN_CM_QWEN_MODEL"),
|
|
||||||
})
|
|
||||||
case "gemini":
|
|
||||||
backend, convErr := strconv.ParseInt(getEnv("BUILTIN_CM_GEMINI_BACKEND"), 10, 64)
|
|
||||||
if convErr != nil {
|
|
||||||
return nil, false, convErr
|
|
||||||
}
|
|
||||||
c, clientErr := genai.NewClient(ctx, &genai.ClientConfig{
|
|
||||||
APIKey: getEnv("BUILTIN_CM_GEMINI_API_KEY"),
|
|
||||||
Backend: genai.Backend(backend),
|
|
||||||
Project: getEnv("BUILTIN_CM_GEMINI_PROJECT"),
|
|
||||||
Location: getEnv("BUILTIN_CM_GEMINI_LOCATION"),
|
|
||||||
HTTPOptions: genai.HTTPOptions{
|
|
||||||
BaseURL: getEnv("BUILTIN_CM_GEMINI_BASE_URL"),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if clientErr != nil {
|
|
||||||
return nil, false, clientErr
|
|
||||||
}
|
|
||||||
bcm, err = gemini.NewChatModel(ctx, &gemini.Config{
|
|
||||||
Client: c,
|
|
||||||
Model: getEnv("BUILTIN_CM_GEMINI_MODEL"),
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
// accept builtin chat model not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("knowledge init openai chat mode failed, %w", err)
|
|
||||||
}
|
|
||||||
if bcm != nil {
|
|
||||||
configured = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func readJinja2PromptTemplate(jsonFilePath string) (prompt.ChatTemplate, error) {
|
func readJinja2PromptTemplate(jsonFilePath string) (prompt.ChatTemplate, error) {
|
||||||
b, err := os.ReadFile(jsonFilePath)
|
b, err := os.ReadFile(jsonFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,11 @@
|
||||||
package workflow
|
package workflow
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/cloudwego/eino/compose"
|
"github.com/cloudwego/eino/compose"
|
||||||
|
"github.com/coze-dev/coze-studio/backend/application/internal"
|
||||||
|
"github.com/coze-dev/coze-studio/backend/pkg/logs"
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
|
|
||||||
|
|
@ -64,9 +68,16 @@ type ServiceComponents struct {
|
||||||
CodeRunner coderunner.Runner
|
CodeRunner coderunner.Runner
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitService(components *ServiceComponents) *ApplicationService {
|
func InitService(ctx context.Context, components *ServiceComponents) (*ApplicationService, error) {
|
||||||
|
bcm, ok, err := internal.GetBuiltinChatModel(ctx, "WKR_")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
logs.CtxWarnf(ctx, "workflow builtin chat model for knowledge recall not configured")
|
||||||
|
}
|
||||||
workflowRepo := service.NewWorkflowRepository(components.IDGen, components.DB, components.Cache,
|
workflowRepo := service.NewWorkflowRepository(components.IDGen, components.DB, components.Cache,
|
||||||
components.Tos, components.CPStore)
|
components.Tos, components.CPStore, bcm)
|
||||||
workflow.SetRepository(workflowRepo)
|
workflow.SetRepository(workflowRepo)
|
||||||
|
|
||||||
workflowDomainSVC := service.NewWorkflowService(workflowRepo)
|
workflowDomainSVC := service.NewWorkflowService(workflowRepo)
|
||||||
|
|
@ -84,5 +95,5 @@ func InitService(components *ServiceComponents) *ApplicationService {
|
||||||
SVC.TosClient = components.Tos
|
SVC.TosClient = components.Tos
|
||||||
SVC.IDGenerator = components.IDGen
|
SVC.IDGenerator = components.IDGen
|
||||||
|
|
||||||
return SVC
|
return SVC, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ package workflow
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/cloudwego/eino/components/model"
|
||||||
"github.com/cloudwego/eino/compose"
|
"github.com/cloudwego/eino/compose"
|
||||||
|
|
||||||
"github.com/coze-dev/coze-studio/backend/api/model/ocean/cloud/workflow"
|
"github.com/coze-dev/coze-studio/backend/api/model/ocean/cloud/workflow"
|
||||||
|
|
@ -96,6 +97,8 @@ type Repository interface {
|
||||||
|
|
||||||
compose.CheckPointStore
|
compose.CheckPointStore
|
||||||
idgen.IDGenerator
|
idgen.IDGenerator
|
||||||
|
|
||||||
|
GetKnowledgeRecallChatModel() model.BaseChatModel
|
||||||
}
|
}
|
||||||
|
|
||||||
var repositorySingleton Repository
|
var repositorySingleton Repository
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ func TestQuestionAnswer(t *testing.T) {
|
||||||
mockTos := storageMock.NewMockStorage(ctrl)
|
mockTos := storageMock.NewMockStorage(ctrl)
|
||||||
mockTos.EXPECT().GetObjectUrl(gomock.Any(), gomock.Any(), gomock.Any()).Return("", nil).AnyTimes()
|
mockTos.EXPECT().GetObjectUrl(gomock.Any(), gomock.Any(), gomock.Any()).Return("", nil).AnyTimes()
|
||||||
repo := repo2.NewRepository(mockIDGen, db, redisClient, mockTos,
|
repo := repo2.NewRepository(mockIDGen, db, redisClient, mockTos,
|
||||||
checkpoint.NewRedisStore(redisClient))
|
checkpoint.NewRedisStore(redisClient), nil)
|
||||||
mockey.Mock(workflow.GetRepository).Return(repo).Build()
|
mockey.Mock(workflow.GetRepository).Return(repo).Build()
|
||||||
|
|
||||||
t.Run("answer directly, no structured output", func(t *testing.T) {
|
t.Run("answer directly, no structured output", func(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -223,9 +223,9 @@ func (s *NodeSchema) ToLLMConfig(ctx context.Context) (*llm.Config, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if fcParams.KnowledgeFCParam != nil && len(fcParams.KnowledgeFCParam.KnowledgeList) > 0 {
|
if fcParams.KnowledgeFCParam != nil && len(fcParams.KnowledgeFCParam.KnowledgeList) > 0 {
|
||||||
kwChatModel, err := knowledgeRecallChatModel(ctx)
|
kwChatModel := workflow2.GetRepository().GetKnowledgeRecallChatModel()
|
||||||
if err != nil {
|
if kwChatModel == nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("workflow builtin chat model for knowledge recall not configured")
|
||||||
}
|
}
|
||||||
knowledgeOperator := crossknowledge.GetKnowledgeOperator()
|
knowledgeOperator := crossknowledge.GetKnowledgeOperator()
|
||||||
setting := fcParams.KnowledgeFCParam.GlobalSetting
|
setting := fcParams.KnowledgeFCParam.GlobalSetting
|
||||||
|
|
@ -650,15 +650,3 @@ func totRetrievalSearchType(s int64) (crossknowledge.SearchType, error) {
|
||||||
return "", fmt.Errorf("invalid retrieval search type %v", s)
|
return "", fmt.Errorf("invalid retrieval search type %v", s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// knowledgeRecallChatModel the chat model used by the knowledge base recall in the LLM node, not the user-configured model
|
|
||||||
func knowledgeRecallChatModel(ctx context.Context) (einomodel.BaseChatModel, error) {
|
|
||||||
defaultChatModelParma := &model.LLMParams{
|
|
||||||
ModelName: "豆包·1.5·Pro·32k",
|
|
||||||
ModelType: 1,
|
|
||||||
Temperature: ptr.Of(0.5),
|
|
||||||
MaxTokens: 4096,
|
|
||||||
}
|
|
||||||
m, _, err := model.GetManager().GetModel(ctx, defaultChatModelParma)
|
|
||||||
return m, err
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,7 @@ import (
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/execute"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/execute"
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/model"
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/query"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo/dal/query"
|
||||||
|
cm "github.com/coze-dev/coze-studio/backend/infra/contract/chatmodel"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/idgen"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/idgen"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/storage"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/storage"
|
||||||
"github.com/coze-dev/coze-studio/backend/pkg/errorx"
|
"github.com/coze-dev/coze-studio/backend/pkg/errorx"
|
||||||
|
|
@ -66,10 +67,11 @@ type RepositoryImpl struct {
|
||||||
workflow.InterruptEventStore
|
workflow.InterruptEventStore
|
||||||
workflow.CancelSignalStore
|
workflow.CancelSignalStore
|
||||||
workflow.ExecuteHistoryStore
|
workflow.ExecuteHistoryStore
|
||||||
|
builtinModel cm.BaseChatModel
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRepository(idgen idgen.IDGenerator, db *gorm.DB, redis *redis.Client, tos storage.Storage,
|
func NewRepository(idgen idgen.IDGenerator, db *gorm.DB, redis *redis.Client, tos storage.Storage,
|
||||||
cpStore einoCompose.CheckPointStore) workflow.Repository {
|
cpStore einoCompose.CheckPointStore, chatModel cm.BaseChatModel) workflow.Repository {
|
||||||
return &RepositoryImpl{
|
return &RepositoryImpl{
|
||||||
IDGenerator: idgen,
|
IDGenerator: idgen,
|
||||||
query: query.Use(db),
|
query: query.Use(db),
|
||||||
|
|
@ -86,6 +88,7 @@ func NewRepository(idgen idgen.IDGenerator, db *gorm.DB, redis *redis.Client, to
|
||||||
query: query.Use(db),
|
query: query.Use(db),
|
||||||
redis: redis,
|
redis: redis,
|
||||||
},
|
},
|
||||||
|
builtinModel: chatModel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1583,6 +1586,10 @@ func (r *RepositoryImpl) BatchCreateConnectorWorkflowVersion(ctx context.Context
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RepositoryImpl) GetKnowledgeRecallChatModel() cm.BaseChatModel {
|
||||||
|
return r.builtinModel
|
||||||
|
}
|
||||||
|
|
||||||
func filterDisabledAPIParameters(parametersCfg []*workflow3.APIParameter, m map[string]any) map[string]any {
|
func filterDisabledAPIParameters(parametersCfg []*workflow3.APIParameter, m map[string]any) map[string]any {
|
||||||
result := make(map[string]any, len(m))
|
result := make(map[string]any, len(m))
|
||||||
responseParameterMap := slices.ToMap(parametersCfg, func(p *workflow3.APIParameter) (string, *workflow3.APIParameter) {
|
responseParameterMap := slices.ToMap(parametersCfg, func(p *workflow3.APIParameter) (string, *workflow3.APIParameter) {
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,7 @@ import (
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/canvas/adaptor"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/canvas/adaptor"
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/compose"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/compose"
|
||||||
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo"
|
"github.com/coze-dev/coze-studio/backend/domain/workflow/internal/repo"
|
||||||
|
"github.com/coze-dev/coze-studio/backend/infra/contract/chatmodel"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/idgen"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/idgen"
|
||||||
"github.com/coze-dev/coze-studio/backend/infra/contract/storage"
|
"github.com/coze-dev/coze-studio/backend/infra/contract/storage"
|
||||||
"github.com/coze-dev/coze-studio/backend/pkg/lang/ptr"
|
"github.com/coze-dev/coze-studio/backend/pkg/lang/ptr"
|
||||||
|
|
@ -67,8 +68,8 @@ func NewWorkflowService(repo workflow.Repository) workflow.Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWorkflowRepository(idgen idgen.IDGenerator, db *gorm.DB, redis *redis.Client, tos storage.Storage,
|
func NewWorkflowRepository(idgen idgen.IDGenerator, db *gorm.DB, redis *redis.Client, tos storage.Storage,
|
||||||
cpStore einoCompose.CheckPointStore) workflow.Repository {
|
cpStore einoCompose.CheckPointStore, chatModel chatmodel.BaseChatModel) workflow.Repository {
|
||||||
return repo.NewRepository(idgen, db, redis, tos, cpStore)
|
return repo.NewRepository(idgen, db, redis, tos, cpStore, chatModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *impl) ListNodeMeta(ctx context.Context, nodeTypes map[entity.NodeType]bool) (map[string][]*entity.NodeTypeMeta, []entity.Category, error) {
|
func (i *impl) ListNodeMeta(ctx context.Context, nodeTypes map[entity.NodeType]bool) (map[string][]*entity.NodeTypeMeta, []entity.Category, error) {
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ import (
|
||||||
context "context"
|
context "context"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
|
||||||
|
model "github.com/cloudwego/eino/components/model"
|
||||||
compose "github.com/cloudwego/eino/compose"
|
compose "github.com/cloudwego/eino/compose"
|
||||||
schema "github.com/cloudwego/eino/schema"
|
schema "github.com/cloudwego/eino/schema"
|
||||||
workflow "github.com/coze-dev/coze-studio/backend/api/model/ocean/cloud/workflow"
|
workflow "github.com/coze-dev/coze-studio/backend/api/model/ocean/cloud/workflow"
|
||||||
|
|
@ -26,7 +27,6 @@ import (
|
||||||
type MockService struct {
|
type MockService struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockServiceMockRecorder
|
recorder *MockServiceMockRecorder
|
||||||
isgomock struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockServiceMockRecorder is the mock recorder for MockService.
|
// MockServiceMockRecorder is the mock recorder for MockService.
|
||||||
|
|
@ -364,17 +364,17 @@ func (mr *MockServiceMockRecorder) ReleaseApplicationWorkflows(ctx, appID, confi
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save mocks base method.
|
// Save mocks base method.
|
||||||
func (m *MockService) Save(ctx context.Context, id int64, arg2 string) error {
|
func (m *MockService) Save(ctx context.Context, id int64, schema string) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "Save", ctx, id, arg2)
|
ret := m.ctrl.Call(m, "Save", ctx, id, schema)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save indicates an expected call of Save.
|
// Save indicates an expected call of Save.
|
||||||
func (mr *MockServiceMockRecorder) Save(ctx, id, arg2 any) *gomock.Call {
|
func (mr *MockServiceMockRecorder) Save(ctx, id, schema any) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockService)(nil).Save), ctx, id, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockService)(nil).Save), ctx, id, schema)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StreamExecute mocks base method.
|
// StreamExecute mocks base method.
|
||||||
|
|
@ -528,7 +528,6 @@ func (mr *MockServiceMockRecorder) WorkflowAsModelTool(ctx, policies any) *gomoc
|
||||||
type MockRepository struct {
|
type MockRepository struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *MockRepositoryMockRecorder
|
recorder *MockRepositoryMockRecorder
|
||||||
isgomock struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockRepositoryMockRecorder is the mock recorder for MockRepository.
|
// MockRepositoryMockRecorder is the mock recorder for MockRepository.
|
||||||
|
|
@ -548,6 +547,20 @@ func (m *MockRepository) EXPECT() *MockRepositoryMockRecorder {
|
||||||
return m.recorder
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchCreateConnectorWorkflowVersion mocks base method.
|
||||||
|
func (m *MockRepository) BatchCreateConnectorWorkflowVersion(ctx context.Context, appID, connectorID int64, workflowIDs []int64, version string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "BatchCreateConnectorWorkflowVersion", ctx, appID, connectorID, workflowIDs, version)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchCreateConnectorWorkflowVersion indicates an expected call of BatchCreateConnectorWorkflowVersion.
|
||||||
|
func (mr *MockRepositoryMockRecorder) BatchCreateConnectorWorkflowVersion(ctx, appID, connectorID, workflowIDs, version any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchCreateConnectorWorkflowVersion", reflect.TypeOf((*MockRepository)(nil).BatchCreateConnectorWorkflowVersion), ctx, appID, connectorID, workflowIDs, version)
|
||||||
|
}
|
||||||
|
|
||||||
// CancelAllRunningNodes mocks base method.
|
// CancelAllRunningNodes mocks base method.
|
||||||
func (m *MockRepository) CancelAllRunningNodes(ctx context.Context, wfExeID int64) error {
|
func (m *MockRepository) CancelAllRunningNodes(ctx context.Context, wfExeID int64) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -784,6 +797,20 @@ func (mr *MockRepositoryMockRecorder) GetFirstInterruptEvent(ctx, wfExeID any) *
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFirstInterruptEvent", reflect.TypeOf((*MockRepository)(nil).GetFirstInterruptEvent), ctx, wfExeID)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFirstInterruptEvent", reflect.TypeOf((*MockRepository)(nil).GetFirstInterruptEvent), ctx, wfExeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetKnowledgeRecallChatModel mocks base method.
|
||||||
|
func (m *MockRepository) GetKnowledgeRecallChatModel() model.BaseChatModel {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetKnowledgeRecallChatModel")
|
||||||
|
ret0, _ := ret[0].(model.BaseChatModel)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetKnowledgeRecallChatModel indicates an expected call of GetKnowledgeRecallChatModel.
|
||||||
|
func (mr *MockRepositoryMockRecorder) GetKnowledgeRecallChatModel() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKnowledgeRecallChatModel", reflect.TypeOf((*MockRepository)(nil).GetKnowledgeRecallChatModel))
|
||||||
|
}
|
||||||
|
|
||||||
// GetLatestVersion mocks base method.
|
// GetLatestVersion mocks base method.
|
||||||
func (m *MockRepository) GetLatestVersion(ctx context.Context, id int64) (*vo.VersionInfo, error) {
|
func (m *MockRepository) GetLatestVersion(ctx context.Context, id int64) (*vo.VersionInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -936,6 +963,21 @@ func (mr *MockRepositoryMockRecorder) GetWorkflowExecution(ctx, id any) *gomock.
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecution", reflect.TypeOf((*MockRepository)(nil).GetWorkflowExecution), ctx, id)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecution", reflect.TypeOf((*MockRepository)(nil).GetWorkflowExecution), ctx, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsApplicationConnectorWorkflowVersion mocks base method.
|
||||||
|
func (m *MockRepository) IsApplicationConnectorWorkflowVersion(ctx context.Context, connectorID, workflowID int64, version string) (bool, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "IsApplicationConnectorWorkflowVersion", ctx, connectorID, workflowID, version)
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsApplicationConnectorWorkflowVersion indicates an expected call of IsApplicationConnectorWorkflowVersion.
|
||||||
|
func (mr *MockRepositoryMockRecorder) IsApplicationConnectorWorkflowVersion(ctx, connectorID, workflowID, version any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsApplicationConnectorWorkflowVersion", reflect.TypeOf((*MockRepository)(nil).IsApplicationConnectorWorkflowVersion), ctx, connectorID, workflowID, version)
|
||||||
|
}
|
||||||
|
|
||||||
// ListInterruptEvents mocks base method.
|
// ListInterruptEvents mocks base method.
|
||||||
func (m *MockRepository) ListInterruptEvents(ctx context.Context, wfExeID int64) ([]*entity.InterruptEvent, error) {
|
func (m *MockRepository) ListInterruptEvents(ctx context.Context, wfExeID int64) ([]*entity.InterruptEvent, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
@ -1172,6 +1214,20 @@ func (mr *MockRepositoryMockRecorder) UpdateNodeExecution(ctx, execution any) *g
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodeExecution", reflect.TypeOf((*MockRepository)(nil).UpdateNodeExecution), ctx, execution)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodeExecution", reflect.TypeOf((*MockRepository)(nil).UpdateNodeExecution), ctx, execution)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateNodeExecutionStreaming mocks base method.
|
||||||
|
func (m *MockRepository) UpdateNodeExecutionStreaming(ctx context.Context, execution *entity.NodeExecution) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "UpdateNodeExecutionStreaming", ctx, execution)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNodeExecutionStreaming indicates an expected call of UpdateNodeExecutionStreaming.
|
||||||
|
func (mr *MockRepositoryMockRecorder) UpdateNodeExecutionStreaming(ctx, execution any) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodeExecutionStreaming", reflect.TypeOf((*MockRepository)(nil).UpdateNodeExecutionStreaming), ctx, execution)
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateWorkflowDraftTestRunSuccess mocks base method.
|
// UpdateWorkflowDraftTestRunSuccess mocks base method.
|
||||||
func (m *MockRepository) UpdateWorkflowDraftTestRunSuccess(ctx context.Context, id int64) error {
|
func (m *MockRepository) UpdateWorkflowDraftTestRunSuccess(ctx context.Context, id int64) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ export EMBEDDING_TYPE="ark"
|
||||||
export OPENAI_EMBEDDING_BASE_URL="" # (string) OpenAI base_url
|
export OPENAI_EMBEDDING_BASE_URL="" # (string) OpenAI base_url
|
||||||
export OPENAI_EMBEDDING_MODEL="" # (string) OpenAI embedding model
|
export OPENAI_EMBEDDING_MODEL="" # (string) OpenAI embedding model
|
||||||
export OPENAI_EMBEDDING_API_KEY="" # (string) OpenAI api_key
|
export OPENAI_EMBEDDING_API_KEY="" # (string) OpenAI api_key
|
||||||
export OPENAI_EMBEDDING_BY_AZURE=true # (bool) OpenAI by_azure
|
export OPENAI_EMBEDDING_BY_AZURE=false # (bool) OpenAI by_azure
|
||||||
export OPENAI_EMBEDDING_API_VERSION="" # OpenAI azure api version
|
export OPENAI_EMBEDDING_API_VERSION="" # OpenAI azure api version
|
||||||
export OPENAI_EMBEDDING_DIMS=1024 # (int) 向量维度
|
export OPENAI_EMBEDDING_DIMS=1024 # (int) 向量维度
|
||||||
export OPENAI_EMBEDDING_REQUEST_DIMS=1024
|
export OPENAI_EMBEDDING_REQUEST_DIMS=1024
|
||||||
|
|
@ -140,17 +140,18 @@ export MODEL_ID_0="" # model name for connection
|
||||||
export MODEL_API_KEY_0="" # model api key
|
export MODEL_API_KEY_0="" # model api key
|
||||||
export MODEL_BASE_URL_0="" # model base url
|
export MODEL_BASE_URL_0="" # model base url
|
||||||
|
|
||||||
# Model for knowledge nl2sql, messages2query (rewrite), image annotation
|
# Model for knowledge nl2sql, messages2query (rewrite), image annotation, workflow knowledge recall
|
||||||
# add prefix to assign specific model, downgrade to default config when prefix is not configured:
|
# add prefix to assign specific model, downgrade to default config when prefix is not configured:
|
||||||
# 1. nl2sql: NL2SQL_ (e.g. NL2SQL_BUILTIN_CM_TYPE)
|
# 1. nl2sql: NL2SQL_ (e.g. NL2SQL_BUILTIN_CM_TYPE)
|
||||||
# 2. messages2query: M2Q_ (e.g. M2Q_BUILTIN_CM_TYPE)
|
# 2. messages2query: M2Q_ (e.g. M2Q_BUILTIN_CM_TYPE)
|
||||||
# 3. image annotation: IA_ (e.g. IA_BUILTIN_CM_TYPE)
|
# 3. image annotation: IA_ (e.g. IA_BUILTIN_CM_TYPE)
|
||||||
|
# 4. workflow knowledge recall: WKR_ (e.g. WKR_BUILTIN_CM_TYPE)
|
||||||
# supported chat model type: openai / ark / deepseek / ollama / qwen / gemini
|
# supported chat model type: openai / ark / deepseek / ollama / qwen / gemini
|
||||||
export BUILTIN_CM_TYPE="ark"
|
export BUILTIN_CM_TYPE="ark"
|
||||||
# type openai
|
# type openai
|
||||||
export BUILTIN_CM_OPENAI_BASE_URL=""
|
export BUILTIN_CM_OPENAI_BASE_URL=""
|
||||||
export BUILTIN_CM_OPENAI_API_KEY=""
|
export BUILTIN_CM_OPENAI_API_KEY=""
|
||||||
export BUILTIN_CM_OPENAI_BY_AZURE=true
|
export BUILTIN_CM_OPENAI_BY_AZURE=false
|
||||||
export BUILTIN_CM_OPENAI_MODEL=""
|
export BUILTIN_CM_OPENAI_MODEL=""
|
||||||
|
|
||||||
# type ark
|
# type ark
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue