feat: intergrate gemini embedding (#783)
This commit is contained in:
@@ -101,7 +101,7 @@ export VIKING_DB_MODEL_NAME="" # if vikingdb model name is not set, you need to
|
||||
# The Embedding model relied on by knowledge base vectorization does not need to be configured
|
||||
# if the vector database comes with built-in Embedding functionality (such as VikingDB). Currently,
|
||||
# Coze Studio supports four access methods: openai, ark, ollama, and custom http. Users can simply choose one of them when using
|
||||
# embedding type: openai / ark / ollama / http
|
||||
# embedding type: openai / ark / ollama / http / gemini
|
||||
export EMBEDDING_TYPE="ark"
|
||||
export EMBEDDING_MAX_BATCH_SIZE=100
|
||||
|
||||
@@ -126,6 +126,15 @@ export OLLAMA_EMBEDDING_BASE_URL="" # (string, required) Ollama embedding base_u
|
||||
export OLLAMA_EMBEDDING_MODEL="" # (string, required) Ollama embedding model
|
||||
export OLLAMA_EMBEDDING_DIMS="" # (int, required) Ollama embedding dimensions
|
||||
|
||||
# gemini embedding
|
||||
export GEMINI_EMBEDDING_BASE_URL="" # (string, required) Gemini embedding base_url
|
||||
export GEMINI_EMBEDDING_MODEL="gemini-embedding-001" # (string, required) Gemini embedding model.
|
||||
export GEMINI_EMBEDDING_API_KEY="" # (string, required) Gemini embedding api_key
|
||||
export GEMINI_EMBEDDING_DIMS=2048 # (int, required) Gemini embedding dimensions
|
||||
export GEMINI_EMBEDDING_BACKEND="1" # (string, required) Gemini embedding backend, should be "1" for BackendGeminiAPI / "2" for BackendVertexAI.
|
||||
export GEMINI_EMBEDDING_PROJECT="" # (string, optional) Gemini embedding project
|
||||
export GEMINI_EMBEDDING_LOCATION="" # (string, optional) Gemini embedding location
|
||||
|
||||
# http embedding
|
||||
export HTTP_EMBEDDING_ADDR="" # (string, required) http embedding address
|
||||
export HTTP_EMBEDDING_DIMS=1024 # (string, required) http embedding dimensions
|
||||
|
||||
@@ -99,7 +99,7 @@ export VIKING_DB_MODEL_NAME="" # if vikingdb model name is not set, you need to
|
||||
# The Embedding model relied on by knowledge base vectorization does not need to be configured
|
||||
# if the vector database comes with built-in Embedding functionality (such as VikingDB). Currently,
|
||||
# Coze Studio supports four access methods: openai, ark, ollama, and custom http. Users can simply choose one of them when using
|
||||
# embedding type: ark / openai / ollama / http
|
||||
# embedding type: ark / openai / ollama / gemini / http
|
||||
export EMBEDDING_TYPE="ark"
|
||||
export EMBEDDING_MAX_BATCH_SIZE=100
|
||||
|
||||
@@ -124,6 +124,15 @@ export OLLAMA_EMBEDDING_BASE_URL="" # (string, required) Ollama embedding base_u
|
||||
export OLLAMA_EMBEDDING_MODEL="" # (string, required) Ollama embedding model
|
||||
export OLLAMA_EMBEDDING_DIMS="" # (int, required) Ollama embedding dimensions
|
||||
|
||||
# gemini embedding
|
||||
export GEMINI_EMBEDDING_BASE_URL="" # (string, required) Gemini embedding base_url
|
||||
export GEMINI_EMBEDDING_MODEL="gemini-embedding-001" # (string, required) Gemini embedding model.
|
||||
export GEMINI_EMBEDDING_API_KEY="" # (string, required) Gemini embedding api_key
|
||||
export GEMINI_EMBEDDING_DIMS=2048 # (int, required) Gemini embedding dimensions
|
||||
export GEMINI_EMBEDDING_BACKEND="1" # (string, required) Gemini embedding backend, should be "1" for BackendGeminiAPI / "2" for BackendVertexAI.
|
||||
export GEMINI_EMBEDDING_PROJECT="" # (string, optional) Gemini embedding project
|
||||
export GEMINI_EMBEDDING_LOCATION="" # (string, optional) Gemini embedding location
|
||||
|
||||
# http embedding
|
||||
export HTTP_EMBEDDING_ADDR="" # (string, required) http embedding address
|
||||
export HTTP_EMBEDDING_DIMS=1024 # (string, required) http embedding dimensions
|
||||
|
||||
Reference in New Issue
Block a user