chore: replace all cn comments to en version by volc api (#313)

This commit is contained in:
tecvan
2025-07-31 15:18:11 +08:00
committed by GitHub
parent 91d6cdb430
commit 5abc63fba6
254 changed files with 5899 additions and 5844 deletions

View File

@@ -34,52 +34,52 @@ type Config struct {
// ParsingStrategy for document parse before indexing
type ParsingStrategy struct {
// Doc
ExtractImage bool `json:"extract_image"` // 提取图片元素
ExtractTable bool `json:"extract_table"` // 提取表格元素
ImageOCR bool `json:"image_ocr"` // 图片 ocr
FilterPages []int `json:"filter_pages"` // 页过滤, 第一页=1
ExtractImage bool `json:"extract_image"` // Extract image elements
ExtractTable bool `json:"extract_table"` // Extract table elements
ImageOCR bool `json:"image_ocr"` // Image ocr
FilterPages []int `json:"filter_pages"` // Page filter, first page = 1
// Sheet
SheetID *int `json:"sheet_id"` // xlsx sheet id
HeaderLine int `json:"header_line"` // 表头行
DataStartLine int `json:"data_start_line"` // 数据起始行
RowsCount int `json:"rows_count"` // 读取数据行数
IsAppend bool `json:"-"` // 行插入
Columns []*document.Column `json:"-"` // sheet 对齐表头
IgnoreColumnTypeErr bool `json:"-"` // true 时忽略 column type value 未对齐的问题,此时 value 为空
HeaderLine int `json:"header_line"` // header row
DataStartLine int `json:"data_start_line"` // Data start row
RowsCount int `json:"rows_count"` // number of rows read
IsAppend bool `json:"-"` // row insertion
Columns []*document.Column `json:"-"` // Sheet Alignment Header
IgnoreColumnTypeErr bool `json:"-"` // Ignore the problem that the column type and value are not aligned when true, when the value is empty
// Image
ImageAnnotationType ImageAnnotationType `json:"image_annotation_type"` // 图片内容标注类型
ImageAnnotationType ImageAnnotationType `json:"image_annotation_type"` // Image content labeling type
}
type ChunkingStrategy struct {
ChunkType ChunkType `json:"chunk_type"`
// custom config
ChunkSize int64 `json:"chunk_size"` // 分段最大长度
Separator string `json:"separator"` // 分段标识符
Overlap int64 `json:"overlap"` // 分段重叠比例
ChunkSize int64 `json:"chunk_size"` // maximum segmentation length
Separator string `json:"separator"` // segmentation identifier
Overlap int64 `json:"overlap"` // segmented overlap ratio
TrimSpace bool `json:"trim_space"`
TrimURLAndEmail bool `json:"trim_url_and_email"`
// leveled config
MaxDepth int64 `json:"max_depth"` // 按层级分段时的最大层级
SaveTitle bool `json:"save_title"` // 保留层级标题
MaxDepth int64 `json:"max_depth"` // Maximum level when segmented by level
SaveTitle bool `json:"save_title"` // Preserve Hierarchical Titles
}
type ChunkType int64
const (
ChunkTypeDefault ChunkType = 0 // 自动分片
ChunkTypeCustom ChunkType = 1 // 自定义规则分片
ChunkTypeLeveled ChunkType = 2 // 层级分片
ChunkTypeDefault ChunkType = 0 // Automatic sharding
ChunkTypeCustom ChunkType = 1 // Custom rule sharding
ChunkTypeLeveled ChunkType = 2 // Hierarchical sharding
)
type ImageAnnotationType int64
const (
ImageAnnotationTypeModel ImageAnnotationType = 0 // 模型自动标注
ImageAnnotationTypeManual ImageAnnotationType = 1 // 人工标注
ImageAnnotationTypeModel ImageAnnotationType = 0 // automatic model annotation
ImageAnnotationTypeManual ImageAnnotationType = 1 // manual annotation
)
type FileExtension string

View File

@@ -33,7 +33,7 @@ type Request struct {
}
type Response struct {
SortedData []*Data // 高分在前
SortedData []*Data // High score
TokenUsage *int64
}

View File

@@ -64,7 +64,7 @@ const (
type FieldName = string
// 内置 field name
// Built-in field name
const (
FieldID FieldName = "id" // int64
FieldCreatorID FieldName = "creator_id" // int64

View File

@@ -25,15 +25,15 @@ import (
type IndexerOptions struct {
PartitionKey *string
Partition *string // 存储分片映射
Partition *string // Storage sharding map
IndexingFields []string
ProgressBar progressbar.ProgressBar
}
type RetrieverOptions struct {
MultiMatch *MultiMatch // field 查询
MultiMatch *MultiMatch // Multi-field query
PartitionKey *string
Partitions []string // 查询分片映射
Partitions []string // Query sharding map
}
type MultiMatch struct {

View File

@@ -36,7 +36,7 @@ type Column struct {
Description string
Nullable bool
IsPrimary bool
Sequence int // 排序编号
Sequence int // sort number
}
type TableColumnType int

View File

@@ -41,9 +41,9 @@ type SecurityToken struct {
}
type ResourceURL struct {
// REQUIRED; 结果图访问精简地址,与默认地址相比缺少 Bucket 部分。
// REQUIRED; The resulting graph accesses the thin address, missing the bucket part compared to the default address.
CompactURL string `json:"CompactURL"`
// REQUIRED; 结果图访问默认地址。
// REQUIRED; Result graph access default address.
URL string `json:"URL"`
}
@@ -55,7 +55,7 @@ type UploadResult struct {
type Result struct {
Uri string `json:"Uri"`
UriStatus int `json:"UriStatus"` // 2000表示上传成功
UriStatus int `json:"UriStatus"` // 2000 means the upload was successful.
}
type FileInfo struct {

View File

@@ -47,7 +47,7 @@ const (
)
// Deprecated
type Scenario int64 // 模型实体使用场景
type Scenario int64 // Model entity usage scenarios
type Modal string
@@ -62,10 +62,10 @@ const (
type ModelStatus int64
const (
StatusDefault ModelStatus = 0 // 未配置时的默认状态,表现等同 StatusInUse
StatusInUse ModelStatus = 1 // 应用中,可使用可新建
StatusPending ModelStatus = 5 // 待下线,可使用不可新建
StatusDeleted ModelStatus = 10 // 已下线,不可使用不可新建
StatusDefault ModelStatus = 0 // Default state when not configured, equivalent to StatusInUse
StatusInUse ModelStatus = 1 // In the application, it can be used to create new
StatusPending ModelStatus = 5 // To be offline, it can be used and cannot be created.
StatusDeleted ModelStatus = 10 // It is offline, unusable, and cannot be created.
)
type Widget string

View File

@@ -123,10 +123,10 @@ func (p *Parameter) GetString(tp DefaultType) (string, error) {
}
type ModelMeta struct {
Protocol chatmodel.Protocol `yaml:"protocol"` // 模型通信协议
Capability *Capability `yaml:"capability"` // 模型能力
ConnConfig *chatmodel.Config `yaml:"conn_config"` // 模型连接配置
Status ModelStatus `yaml:"status"` // 模型状态
Protocol chatmodel.Protocol `yaml:"protocol"` // Model Communication Protocol
Capability *Capability `yaml:"capability"` // model capability
ConnConfig *chatmodel.Config `yaml:"conn_config"` // model connection configuration
Status ModelStatus `yaml:"status"` // model state
}
type DefaultValue map[DefaultType]string

View File

@@ -38,7 +38,7 @@ const (
NormalKey IndexType = "KEY"
)
// AlterTableAction 定义修改表的动作类型
// AlterTableAction defines the type of action to modify a table
type AlterTableAction string
const (
@@ -79,8 +79,8 @@ const (
type SortDirection string
const (
SortDirectionAsc SortDirection = "ASC" // 升序
SortDirectionDesc SortDirection = "DESC" // 降序
SortDirectionAsc SortDirection = "ASC" // ascending order
SortDirectionDesc SortDirection = "DESC" // descending order
)
type SQLType int32

View File

@@ -17,12 +17,12 @@
package entity
type Column struct {
Name string // 保证唯一性
Name string // guaranteed uniqueness
DataType DataType
Length *int
NotNull bool
DefaultValue *string
AutoIncrement bool // 表示该列是否为自动递增
AutoIncrement bool // Indicates whether the column is automatically incremented
Comment *string
}
@@ -34,12 +34,12 @@ type Index struct {
type TableOption struct {
Collate *string
AutoIncrement *int64 // 设置表的自动递增初始值
AutoIncrement *int64 // Set the auto-increment initial value of the table
Comment *string
}
type Table struct {
Name string // 保证唯一性
Name string // guaranteed uniqueness
Columns []*Column
Indexes []*Index
Options *TableOption

View File

@@ -38,17 +38,17 @@ type RDB interface {
ExecuteSQL(ctx context.Context, req *ExecuteSQLRequest) (*ExecuteSQLResponse, error)
}
// CreateTableRequest 创建表请求
// CreateTableRequest Create table request
type CreateTableRequest struct {
Table *entity.Table
}
// CreateTableResponse 创建表响应
// CreateTableResponse Create table response
type CreateTableResponse struct {
Table *entity.Table
}
// AlterTableOperation 修改表操作
// AlterTableOperation Modify table operation
type AlterTableOperation struct {
Action entity.AlterTableAction
Column *entity.Column
@@ -58,64 +58,64 @@ type AlterTableOperation struct {
NewTableName *string
}
// AlterTableRequest 修改表请求
// AlterTableRequest Modify table request
type AlterTableRequest struct {
TableName string
Operations []*AlterTableOperation
}
// AlterTableResponse 修改表响应
// AlterTableResponse Modify table response
type AlterTableResponse struct {
Table *entity.Table
}
// DropTableRequest 删除表请求
// DropTableRequest drop table request
type DropTableRequest struct {
TableName string
IfExists bool
}
// DropTableResponse 删除表响应
// DropTableResponse Delete table response
type DropTableResponse struct {
Success bool
}
// GetTableRequest 获取表信息请求
// GetTableRequest Get table information request
type GetTableRequest struct {
TableName string
}
// GetTableResponse 获取表信息响应
// GetTableResponse Get table information response
type GetTableResponse struct {
Table *entity.Table
}
// InsertDataRequest 插入数据请求
// InsertDataRequest insert data request
type InsertDataRequest struct {
TableName string
Data []map[string]interface{}
}
// InsertDataResponse 插入数据响应
// InsertDataResponse
type InsertDataResponse struct {
AffectedRows int64
}
// Condition 定义查询条件
// Condition defines query conditions
type Condition struct {
Field string
Operator entity.Operator
Value interface{}
}
// ComplexCondition 复杂条件
// ComplexCondition
type ComplexCondition struct {
Conditions []*Condition
NestedConditions []*ComplexCondition // Conditions互斥 example: WHERE (age >= 18 AND status = 'active') OR (age >= 21 AND status = 'pending')
NestedConditions []*ComplexCondition // Conditions mutual exclusion example: WHERE (age > = 18 AND status = 'active') OR (age > = 21 AND status = 'pending')
Operator entity.LogicalOperator
}
// UpdateDataRequest 更新数据请求
// UpdateDataRequest
type UpdateDataRequest struct {
TableName string
Data map[string]interface{}
@@ -123,67 +123,67 @@ type UpdateDataRequest struct {
Limit *int
}
// UpdateDataResponse 更新数据响应
// UpdateDataResponse
type UpdateDataResponse struct {
AffectedRows int64
}
// DeleteDataRequest 删除数据请求
// DeleteDataRequest Delete data request
type DeleteDataRequest struct {
TableName string
Where *ComplexCondition
Limit *int
}
// DeleteDataResponse 删除数据响应
// DeleteDataResponse
type DeleteDataResponse struct {
AffectedRows int64
}
type OrderBy struct {
Field string // 排序字段
Direction entity.SortDirection // 排序方向
Field string // sort field
Direction entity.SortDirection // sort direction
}
// SelectDataRequest 查询数据请求
// SelectDataRequest query data request
type SelectDataRequest struct {
TableName string
Fields []string // 要查询的字段,如果为空则查询所有字段
Fields []string // The field to query, if empty, query all fields
Where *ComplexCondition
OrderBy []*OrderBy // 排序条件
Limit *int // 限制返回行数
Offset *int // 偏移量
OrderBy []*OrderBy // sort condition
Limit *int // Limit the number of rows returned
Offset *int // Offset
}
// SelectDataResponse 查询数据响应
// SelectDataResponse
type SelectDataResponse struct {
ResultSet *entity.ResultSet
Total int64 // 符合条件的总记录数(不考虑分页)
Total int64 // Total number of eligible records (excluding paging)
}
type UpsertDataRequest struct {
TableName string
Data []map[string]interface{} // 要更新或插入的数据
Keys []string // 用于标识唯一记录的列名,为空的话默认使用主键
Data []map[string]interface{} // Data to be updated or inserted
Keys []string // The column name used to identify a unique record, if empty, the primary key is used by default
}
type UpsertDataResponse struct {
AffectedRows int64 // 受影响的行数
InsertedRows int64 // 新插入的行数
UpdatedRows int64 // 更新的行数
UnchangedRows int64 // 不变的行数(没有插入或更新的行数)
AffectedRows int64 // Number of rows affected
InsertedRows int64 // Number of newly inserted rows
UpdatedRows int64 // updated rows
UnchangedRows int64 // Constant number of rows (no rows inserted or updated)
}
// ExecuteSQLRequest 执行SQL请求
// ExecuteSQLRequest Execute SQL request
type ExecuteSQLRequest struct {
SQL string
Params []interface{} // 用于参数化查询
Params []interface{} // For parameterized queries
// SQLType indicates the type of SQL: parameterized or raw SQL. It takes effect if OperateType is 0.
SQLType entity.SQLType
}
// ExecuteSQLResponse 执行SQL响应
// ExecuteSQLResponse
type ExecuteSQLResponse struct {
ResultSet *entity.ResultSet
}

View File

@@ -32,16 +32,16 @@ func New() *redis.Client {
Addr: addr, // Redis地址
DB: 0, // 默认数据库
Password: password,
// 连接池配置
PoolSize: 100, // 最大连接数建议设置为CPU核心数*10
MinIdleConns: 10, // 最小空闲连接
MaxIdleConns: 30, // 最大空闲连接
ConnMaxIdleTime: 5 * time.Minute, // 空闲连接超时时间
// connection pool configuration
PoolSize: 100, // Maximum number of connections (recommended to set to CPU cores * 10)
MinIdleConns: 10, // minimum idle connection
MaxIdleConns: 30, // maximum idle connection
ConnMaxIdleTime: 5 * time.Minute, // Idle connection timeout
// 超时配置
DialTimeout: 5 * time.Second, // 连接建立超时
ReadTimeout: 3 * time.Second, // 读操作超时
WriteTimeout: 3 * time.Second, // 写操作超时
// timeout configuration
DialTimeout: 5 * time.Second, // Connection establishment timed out
ReadTimeout: 3 * time.Second, // read operation timed out
WriteTimeout: 3 * time.Second, // write operation timed out
})
return rdb

View File

@@ -62,7 +62,7 @@ func assertValAs(typ document.TableColumnType, val string) (*document.ColumnData
ValTime: ptr.Of(emptyTime),
}, nil
}
// 支持时间戳和时间字符串
// Supports timestamp and time string
i, err := strconv.ParseInt(val, 10, 64)
if err == nil {
t := time.Unix(i, 0)
@@ -152,7 +152,7 @@ func assertValAsForce(typ document.TableColumnType, val string, nullable bool) *
}
func assertVal(val string) document.ColumnData {
// TODO: 先不处理 image
// TODO: Do not process images first
if val == "" {
return document.ColumnData{
Type: document.TableColumnTypeUnknown,

View File

@@ -75,7 +75,7 @@ func parseByRowIterator(iter rowIterator, config *contract.Config, opts ...parse
var rowData []*document.ColumnData
for j := range row {
colSchema, found := rev[j]
if !found { // 列裁剪
if !found { // column clipping
continue
}

View File

@@ -51,7 +51,7 @@ func parseJSON(config *contract.Config) parseFn {
}
} else {
for k := range rawSlices[0] {
// init 取首个 json item 中 key 的随机顺序
// Init takes the random order of keys in the first json item
header = append(header, k)
}
}

View File

@@ -163,7 +163,7 @@ func parseByPython(config *contract.Config, storage storage.Storage, ocr ocr.OCR
}
docs = append(docs, doc)
} else {
// TODO: 这里有点问题img label 可能被较短的 chunk size 截断
// TODO: There is a problem here, the img label may be truncated by the shorter chunk size
result.Content[i+1].Content = label + result.Content[i+1].Content
}
case contentTypeTable:

View File

@@ -35,7 +35,7 @@ const imgSrcFormat = `<img src="" data-tos-key="%s">`
func createSecret(uid int64, fileType string) string {
num := 10
input := fmt.Sprintf("upload_%d_Ma*9)fhi_%d_gou_%s_rand_%d", uid, time.Now().Unix(), fileType, rand.Intn(100000))
// 做md5取前20,// mapIntToBase62 把数字映射到 Base62
// Do md5, take the first 20,//mapIntToBase62 map the number to Base62
hash := sha256.Sum256([]byte(fmt.Sprintf("%s", input)))
hashString := base64.StdEncoding.EncodeToString(hash[:])
if len(hashString) > num {

View File

@@ -27,11 +27,11 @@ package vikingdb
// resp, err := r.Rerank(context.Background(), &rerank.Request{
// Data: [][]*knowledge.RetrieveSlice{
// {
// {Slice: &entity.Slice{PlainText: "吉尼斯世界纪录网站数据显示蓝鲸是目前已知世界上最大的动物体长可达30米相当于一架波音737飞机的长度"}},
// {Slice: &entity.Slice{PlainText: "一头成年雌性弓头鲸可以长到22米长而一头雄性鲸鱼可以长到18米长"}},
// {Slice: & entity. Slice {PlainText: "According to the Guinness World Records website, the blue whale is currently the largest animal known in the world, with a body length of up to 30 meters, which is equivalent to the length of a Boeing 737 aircraft"}},
// {Slice: & entity. Slice {PlainText: "An adult female bowhead whale can grow to 22 meters long, while a male whale can grow to 18 meters long"}},
// },
// },
// Query: "世界上最大的鲸鱼是什么?",
// Query: "What is the largest whale in the world?"
// TopN: nil,
// })
// assert.NoError(t, err)
@@ -39,8 +39,8 @@ package vikingdb
// for _, item := range resp.Sorted {
// fmt.Println(item.Slice.PlainText, item.Score)
// }
// // 吉尼斯世界纪录网站数据显示蓝鲸是目前已知世界上最大的动物体长可达30米相当于一架波音737飞机的长度 0.6209664529733573
// // 一头成年雌性弓头鲸可以长到22米长而一头雄性鲸鱼可以长到18米长 0.4269785303456468
// According to the Guinness World Records website, the blue whale is the largest known animal in the world, with a body length of up to 30 meters, which is equivalent to the length of a Boeing 737 aircraft 6209664529733573
// //An adult female bowhead whale can grow up to 22 meters long, while a male whale can grow up to 18 meters 4269785303456468
//
// fmt.Println(resp.TokenUsage)
// // 95

View File

@@ -272,7 +272,7 @@ func (m *milvusManager) convertFields(fields []*searchstore.Field) ([]*mentity.F
if f.Type != searchstore.FieldTypeText {
return nil, fmt.Errorf("[convertFields] milvus only support text field indexing, field=%s, type=%d", f.Name, f.Type)
}
// indexing 时只有 content 存储原文
// Only content is stored when indexing
if f.Name == searchstore.FieldTextContent {
resp = append(resp, mentity.NewField().
WithName(f.Name).

View File

@@ -112,7 +112,7 @@ func (v *vkSearchStore) Retrieve(ctx context.Context, query string, opts ...retr
return nil, fmt.Errorf("[Retrieve] vikingdb failed to build filter, %w", err)
}
if filter != nil {
// 不支持跨 partition 召回,使用 filter 替代
// Cross-partition recall is not supported, use filter instead
searchOpts = searchOpts.SetFilter(filter)
}

View File

@@ -26,14 +26,14 @@ const (
)
type Options struct {
absRepoRoot string // yaml文件root前缀的绝对路径
useJson bool // 需要bind json文件不指定默认bind yaml
absRepoRoot string // Absolute path to root prefix of yaml file
useJson bool // Requires bind json file, does not specify default bind yaml
groups []string
}
type OptFunc func(o *Options)
// WithAbsRepoRoot 传入自定义指定读取绝对路径/xx下的config.<xx>.(yaml,json),例如/opt/tiger/xxx
// WithAbsRepoRoot pass in a custom specified read config. < xx >. (yaml, json) under the absolute path/xx, for example/opt/tiger/xxx
func WithAbsRepoRoot(absRepoRoot string) OptFunc {
return func(o *Options) {
if len(absRepoRoot) > 0 {
@@ -42,7 +42,7 @@ func WithAbsRepoRoot(absRepoRoot string) OptFunc {
}
}
// WithUseJSONType 需要查找xx.json结尾的文
// WithUseJSONType needs to find the text at the end of xx.json
func WithUseJSONType(useJson bool) OptFunc {
return func(o *Options) {
o.useJson = useJson
@@ -66,7 +66,7 @@ func loadOpts(opts ...OptFunc) *Options {
var configers sync.Map // key: abs path, value: configer
// New 可传入local_config_dir指定读取自定义的绝对路径文件 `<local_config_dir>/config.<env>.<region>.<cluster>.yaml`
// New can be passed in local_config_dir specify to read the custom absolute path file '< local_config_dir >/config. < env >. < region >. < cluster > .yaml'
func getOrCreateConf(opts ...OptFunc) (configer, error) {
options := loadOpts(opts...)
if options.absRepoRoot == "" {
@@ -102,7 +102,7 @@ func getOrCreateConf(opts ...OptFunc) (configer, error) {
return nil, err
}
} else {
cfg, err = NewConfYaml(dir, options.groups) // 默认使用yaml
cfg, err = NewConfYaml(dir, options.groups) // Default use yaml
if err != nil {
return nil, err
}
@@ -113,8 +113,8 @@ func getOrCreateConf(opts ...OptFunc) (configer, error) {
return cfg, nil
}
// JSONBind 不传dir值按默认路径优先级读取/opt/tiger/flowdevops/confcenter/psm/p.s.m/config.<env>.<region>.<cluster>.json
// 可使用WithAbsRepoRoot 传入自定义指定读取
// JSONBind does not pass the dir value, according to the default path, the priority is to read/opt/tiger/flowdevops/confcenter/psm/p.s.m/config. < env >. < region >. < cluster > .json
// Custom specified reads can be passed in using WithAbsRepoRoot
func JSONBind(structPtr interface{}, opts ...OptFunc) error {
opts = append(opts, WithUseJSONType(true))
conf, err := getOrCreateConf(opts...)
@@ -124,8 +124,8 @@ func JSONBind(structPtr interface{}, opts ...OptFunc) error {
return bindAndValidate(structPtr, conf)
}
// YAMLBind 不传dir值按默认路径按优先级读取/opt/tiger/flowdevops/confcenter/psm/p.s.m/config.<env>.<region>.<cluster>.yaml
// 可使用WithAbsRepoRoot 传入自定义指定读取
// YAMLBind does not pass the dir value, according to the default path, read/opt/tiger/flowdevops/confcenter/psm/p.s.m/config by priority. < env >. < region >. < cluster > .yaml
// Custom specified reads can be passed in using WithAbsRepoRoot
func YAMLBind(structPtr interface{}, opts ...OptFunc) error {
conf, err := getOrCreateConf(opts...)
if err != nil {

View File

@@ -267,12 +267,12 @@ func (c *es7Client) query2ESQuery(q *Query) map[string]any {
base = map[string]any{}
}
// 若没有 BoolQuery直接返回 base query
// If there is no BoolQuery, return the base query directly
if q.Bool == nil {
return base
}
// 如果有 BoolQuery把 base 作为 BoolQuery 的一部分(或为空)
// If there is a BoolQuery, make base part of the BoolQuery (or empty).
boolQuery := map[string]any{}
appendBool := func(key string, queries []Query) {
@@ -296,7 +296,7 @@ func (c *es7Client) query2ESQuery(q *Query) map[string]any {
appendBool("must_not", q.Bool.MustNot)
appendBool("should", q.Bool.Should)
// 如果 base 不是空,作为一个 filter 附加进去
// If base is not empty, append it as a filter
if len(base) > 0 {
if _, ok := boolQuery["filter"]; !ok {
boolQuery["filter"] = []map[string]any{}

View File

@@ -132,7 +132,7 @@ func (c *es8Client) query2ESQuery(q *Query) *types.Query {
Wildcard: map[string]types.WildcardQuery{
q.KV.Key: {
Value: ptr.Of(fmt.Sprintf("*%s*", q.KV.Value)),
CaseInsensitive: ptr.Of(true), // 忽略大小写
CaseInsensitive: ptr.Of(true), // Ignore case
},
},
}

View File

@@ -38,7 +38,7 @@ type consumerImpl struct {
func RegisterConsumer(broker string, topic, groupID string, handler eventbus.ConsumerHandler, opts ...eventbus.ConsumerOpt) error {
config := sarama.NewConfig()
config.Consumer.Offsets.Initial = sarama.OffsetOldest // 从最早消息开始消费
config.Consumer.Offsets.Initial = sarama.OffsetOldest // Start consuming from the earliest message
config.Consumer.Group.Session.Timeout = 30 * time.Second
o := &eventbus.ConsumerOption{}
@@ -102,7 +102,7 @@ func (c *consumerImpl) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sara
continue
}
sess.MarkMessage(msg, "") // TODO: 消费策略可以配置
sess.MarkMessage(msg, "") // TODO: Consumer policies can be configured
}
return nil
}

View File

@@ -70,7 +70,7 @@ func RegisterConsumer(nameServer, topic, group string, consumerHandler eventbus.
return nil
}
// 自定义 Handler处理收到的每条消息
// Customize the Handler to handle each message received
type MessageHandler struct {
Topic string
Group string

View File

@@ -90,7 +90,7 @@ func RegisterConsumer(nameServer, topic, group string, consumerHandler eventbus.
err = consumerHandler.HandleMessage(ctx, msg)
if err != nil {
logs.CtxErrorf(ctx, "[Subscribe] handle msg failed, topic : %s , group : %s, err: %v \n", msg.Topic, msg.Group, err)
return consumer.ConsumeRetryLater, err // TODO: 策略可以可以配置
return consumer.ConsumeRetryLater, err // TODO: Policies can be configured
}
fmt.Printf("subscribe callback: %v \n", msgArr[i])

View File

@@ -34,7 +34,7 @@ const (
type IDGenerator = idgen.IDGenerator
func New(client *redis.Client) (idgen.IDGenerator, error) {
// 初始化代码。
// Initialization code.
return &idGenImpl{
cli: client,
}, nil
@@ -60,7 +60,7 @@ func (i *idGenImpl) GenMultiIDs(ctx context.Context, counts int) ([]int64, error
leftNum := int64(counts)
lastMs := int64(0)
ids := make([]int64, 0, counts)
svrID := int64(0) // 一个 server id 全部是 0
svrID := int64(0) // A server id is all 0.
for idx := int64(0); leftNum > 0 && idx < maxTimeAddrTimes; idx++ {
ms := maxInt64(i.GetIDTimeMs(), lastMs)
@@ -131,12 +131,12 @@ func (i *idGenImpl) GetIDTimeMs() int64 {
}
func (i *idGenImpl) Expire(ctx context.Context, key string) {
// 暂时忽略错误
// Temporarily ignore errors
_, _ = i.cli.Expire(ctx, key, counterKeyExpirationTime).Result()
}
func genIDKey(space string, svrID int64, ms int64) string {
// 此 Key 的格式一旦确定,便不能再变化
// Once the format of this key is determined, it cannot be changed
return fmt.Sprintf("id_generator:%v:%v:%v", space, svrID, ms)
}

View File

@@ -63,7 +63,7 @@ func (v *veImageX) GetUploadAuth(ctx context.Context, opt ...imagex.UploadAuthOp
}
func (v *veImageX) GetUploadAuthWithExpire(ctx context.Context, expire time.Duration, opt ...imagex.UploadAuthOpt) (*imagex.SecurityToken, error) {
// opt UploadAuthOption
// Opt to UploadAuthOption
option := &imagex.UploadAuthOption{}
for _, o := range opt {
o(option)

View File

@@ -550,7 +550,7 @@ func (m *mysqlService) UpsertData(ctx context.Context, req *rdb.UpsertDataReques
}
}
// ON DUPLICATE KEY UPDATE部分
// ON DUPLICATE KEY UPDATE PART
updateClauses := make([]string, 0, len(fields))
for _, field := range fields {
isKey := false
@@ -622,7 +622,7 @@ func (m *mysqlService) getTablePrimaryKeys(ctx context.Context, tableName string
return primaryKeys, nil
}
// calculateInsertedUpdated 函数保持不变
// calculateInsertedUpdated function remains unchanged
func calculateInsertedUpdated(affectedRows int64, batchSize int) (int64, int64, int64) {
updated := int64(0)
inserted := affectedRows

View File

@@ -64,7 +64,7 @@ func getTosClient(ctx context.Context, ak, sk, bucketName, endpoint, region stri
bucketName: bucketName,
}
// 创建存储桶
// Create bucket
err = t.CheckAndCreateBucket(ctx)
if err != nil {
return nil, err
@@ -82,14 +82,14 @@ func New(ctx context.Context, ak, sk, bucketName, endpoint, region string) (stor
}
func (t *tosClient) test() {
// 测试上传
// test upload
objectKey := fmt.Sprintf("test-%s.txt", time.Now().Format("20060102150405"))
err := t.PutObject(context.Background(), objectKey, []byte("hello world"))
if err != nil {
logs.CtxErrorf(context.Background(), "PutObject failed, objectKey: %s, err: %v", objectKey, err)
}
// 测试下载
// test download
content, err := t.GetObject(context.Background(), objectKey)
if err != nil {
logs.CtxErrorf(context.Background(), "GetObject failed, objectKey: %s, err: %v", objectKey, err)
@@ -97,7 +97,7 @@ func (t *tosClient) test() {
logs.CtxInfof(context.Background(), "GetObject content: %s", string(content))
// 测试获取URL
// Test Get URL
url, err := t.GetObjectUrl(context.Background(), objectKey)
if err != nil {
logs.CtxErrorf(context.Background(), "GetObjectUrl failed, objectKey: %s, err: %v", objectKey, err)
@@ -105,7 +105,7 @@ func (t *tosClient) test() {
logs.CtxInfof(context.Background(), "GetObjectUrl url: %s", url)
// 测试删除
// test delete
err = t.DeleteObject(context.Background(), objectKey)
if err != nil {
logs.CtxErrorf(context.Background(), "DeleteObject failed, objectKey: %s, err: %v", objectKey, err)
@@ -127,7 +127,7 @@ func (t *tosClient) CheckAndCreateBucket(ctx context.Context) error {
}
if serverErr.StatusCode == http.StatusNotFound {
// 存储桶不存在
// Bucket does not exist
logs.CtxInfof(ctx, "Bucket not found.")
resp, err := client.CreateBucketV2(context.Background(), &tos.CreateBucketV2Input{
Bucket: bucketName,
@@ -163,7 +163,7 @@ func (t *tosClient) GetObject(ctx context.Context, objectKey string) ([]byte, er
client := t.client
bucketName := t.bucketName
// 下载数据到内存
// Download data to memory
getOutput, err := client.GetObjectV2(ctx, &tos.GetObjectV2Input{
Bucket: bucketName,
Key: objectKey,
@@ -188,7 +188,7 @@ func (t *tosClient) DeleteObject(ctx context.Context, objectKey string) error {
client := t.client
bucketName := t.bucketName
// 删除存储桶中指定对象
// Delete the specified object in the bucket
_, err := client.DeleteObjectV2(ctx, &tos.DeleteObjectV2Input{
Bucket: bucketName,
Key: objectKey,