diff --git a/backend/application/knowledge/init.go b/backend/application/knowledge/init.go index c7ea4b32..fe3f2153 100644 --- a/backend/application/knowledge/init.go +++ b/backend/application/knowledge/init.go @@ -172,7 +172,13 @@ func getVectorStore(ctx context.Context) (searchstore.Manager, error) { defer cancel() milvusAddr := os.Getenv("MILVUS_ADDR") - mc, err := milvusclient.New(cctx, &milvusclient.ClientConfig{Address: milvusAddr}) + user := os.Getenv("MILVUS_USER") + password := os.Getenv("MILVUS_PASSWORD") + mc, err := milvusclient.New(cctx, &milvusclient.ClientConfig{ + Address: milvusAddr, + Username: user, + Password: password, + }) if err != nil { return nil, fmt.Errorf("init milvus client failed, err=%w", err) } diff --git a/docker/.env.example b/docker/.env.example index a9bdaca8..ea652d0b 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -86,6 +86,8 @@ export RMQ_SECRET_KEY="" export VECTOR_STORE_TYPE="milvus" # milvus vector store export MILVUS_ADDR="milvus:19530" +export MILVUS_USER="" +export MILVUS_PASSWORD="" # vikingdb vector store for Volcengine export VIKING_DB_HOST="" export VIKING_DB_REGION="" @@ -98,10 +100,17 @@ export VIKING_DB_MODEL_NAME="" # if vikingdb model name is not set, you need to # The Embedding model relied on by knowledge base vectorization does not need to be configured # if the vector database comes with built-in Embedding functionality (such as VikingDB). Currently, # Coze Studio supports four access methods: openai, ark, ollama, and custom http. Users can simply choose one of them when using -# embedding type: openai / ark / ollama / http +# embedding type: ark / openai / ollama / http export EMBEDDING_TYPE="ark" export EMBEDDING_MAX_BATCH_SIZE=100 +# ark embedding by volcengine / byteplus +export ARK_EMBEDDING_BASE_URL="" # (string, required) Ark embedding base_url +export ARK_EMBEDDING_MODEL="" # (string, required) Ark embedding model +export ARK_EMBEDDING_API_KEY="" # (string, required) Ark embedding api_key +export ARK_EMBEDDING_DIMS="2048" # (int, required) Ark embedding dimensions +export ARK_EMBEDDING_API_TYPE="" # (string, optional) Ark embedding api type, should be "text_api" / "multi_modal_api". Default "text_api". + # openai embedding export OPENAI_EMBEDDING_BASE_URL="" # (string, required) OpenAI embedding base_url export OPENAI_EMBEDDING_MODEL="" # (string, required) OpenAI embedding model @@ -111,13 +120,6 @@ export OPENAI_EMBEDDING_API_VERSION="" # (string, optional) OpenAI embedding export OPENAI_EMBEDDING_DIMS=1024 # (int, required) OpenAI embedding dimensions export OPENAI_EMBEDDING_REQUEST_DIMS=1024 # (int, optional) OpenAI embedding dimensions in requests, need to be empty if api doesn't support specifying dimensions. -# ark embedding by volcengine / byteplus -export ARK_EMBEDDING_MODEL="" # (string, required) Ark embedding model -export ARK_EMBEDDING_API_KEY="" # (string, required) Ark embedding api_key -export ARK_EMBEDDING_DIMS="2048" # (int, required) Ark embedding dimensions -export ARK_EMBEDDING_BASE_URL="" # (string, required) Ark embedding base_url -export ARK_EMBEDDING_API_TYPE="" # (string, optional) Ark embedding api type, should be "text_api" / "multi_modal_api". Default "text_api". - # ollama embedding export OLLAMA_EMBEDDING_BASE_URL="" # (string, required) Ollama embedding base_url export OLLAMA_EMBEDDING_MODEL="" # (string, required) Ollama embedding model