chore: replace all cn comments of fe to en version by volc api (#320)

This commit is contained in:
tecvan
2025-07-31 10:32:15 +08:00
committed by GitHub
parent 716ec0cba8
commit 71f6245a01
2960 changed files with 15545 additions and 15545 deletions

View File

@@ -117,7 +117,7 @@ function findReasoningContent(
}
/**
* output 属性排序,保证 reasoning content 在最下面
* Output attribute sort to ensure that the reasoning content is at the bottom
*/
export const sortOutputs = (
value: ViewVariableTreeNode[] | undefined,
@@ -144,7 +144,7 @@ export const sortOutputs = (
};
/**
* 根据模型类型获取输出
* Get output based on model type
* @param modelType
* @param outputs
* @param isBatch
@@ -174,7 +174,7 @@ export function getOutputs({
}
/**
* 初始化时格式化推理内容为只读
* Format inference content as read-only during initialization
* @param outputs
* @param isBatch
* @returns
@@ -196,7 +196,7 @@ export function formatReasoningContentOnInit({
let newOutputs: ViewVariableTreeNode[] | undefined = outputs;
if (modelType && modelsService.isCoTModel(modelType)) {
// 后端返回的没有readonly字段需要前端处理, 取第一个类型是stringreasoning_content
// There is no readonly field returned by the backend, which needs to be processed by the front end. Take the first type that is string reasoning_content
const reasoningContent = findReasoningContent(
outputs,
isBatch,
@@ -206,7 +206,7 @@ export function formatReasoningContentOnInit({
reasoningContent.readonly = true;
reasoningContent.readonlyTooltip = readonlyTooltip;
} else {
// 存量数据兼容,如果是推理模型,则添加推理内容字段
// The existing data is compatible, if it is an inference model, add the inference content field
newOutputs = addReasoningContent(outputs, isBatch);
}
}
@@ -215,7 +215,7 @@ export function formatReasoningContentOnInit({
}
/**
* 提交时格式化推理内容移除readonly
* Format inference content on commit Remove readonly
* @param outputs
* @param isBatch
* @returns
@@ -245,19 +245,19 @@ export function formatReasoningContentOnSubmit(
}
/**
* 去除 outputs 中的 为readonly的 reasoning_content
* Remove the reasoning_content in outputs for readonly
*/
export const omitSystemReasoningContent = (
value: ViewVariableTreeNodeWithReadonly[] | undefined,
isBatch?: boolean,
) => {
// 批量,去除 children 中的 为readonly reasoning_content
// Batch, remove from children, for readonly reasoning_content
if (isBatch) {
return value?.map(v => ({
...v,
children: v?.children?.filter(c => !isSystemReasoningContent(c)),
}));
}
// 单次,去除 value 中的 为readonly的 reasoning_content
// Single, remove the reasoning_content in value for readonly
return value?.filter(v => !isSystemReasoningContent(v));
};

View File

@@ -17,7 +17,7 @@
import { useForm } from '@flowgram-adapter/free-layout-editor';
/**
* 获取模型type
* Get model type
*/
export function useModelType() {
const form = useForm();

View File

@@ -91,7 +91,7 @@ import {
import styles from './index.module.less';
/** 默认会话轮数 */
/** Default session rounds */
const DEFAULT_CHAT_ROUND = 3;
const Render = ({ form }: FormRenderProps<FormData>) => {
@@ -384,7 +384,7 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
const models = modelsService.getModels();
let llmParam = get(value, 'inputs.llmParam');
// 初次拖入画布时:从后端返回值里,解析出来默认值。
// When first dragged into the canvas: Parse out the default value from the backend return value.
if (!llmParam) {
llmParam = getDefaultLLMParams(models);
}
@@ -412,14 +412,14 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
? [{ name: 'input', input: { type: ValueExpressionType.REF } }]
: inputParameters,
chatHistorySetting: {
// 是否开启会话历史
// Whether to open session history
enableChatHistory:
get(
llmParam.find(item => item.name === 'enableChatHistory'),
'input.value.content',
) || false,
// 会话轮数,默认为 3 轮
// Number of session rounds, the default is 3 rounds
chatHistoryRound: Number(
get(
llmParam.find(item => item.name === 'chatHistoryRound'),
@@ -438,8 +438,8 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
modelType: model.modelType as number,
}),
// model 会根据 llmParam 重新填充值,此时也会将之前的 chatHistoryRound 也填充上去
// 由于在 submit 时会重新添加一个 chatHistoryRound这里先忽略掉避免出现问题
// The model will re-fill the value according to llmParam, and the previous chatHistoryRound will also be filled at this time.
// Since a chatHistoryRound will be re-added when submitting, it will be ignored here to avoid problems.
model: omit(model, ['chatHistoryRound']),
$$prompt_decorator$$: {
prompt,
@@ -453,7 +453,7 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
fcParam: formatFcParamOnInit(get(value, 'inputs.fcParam')),
};
// 获取后端下发 version 信息
// Get the version information sent by the backend
const schema = JSON.parse(
playgroundContext.globalState.info?.schema_json || '{}',
);
@@ -462,7 +462,7 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
parseInt(curNode?.data?.version) >= parseInt(NEW_NODE_DEFAULT_VERSION)
? curNode?.data?.version
: NEW_NODE_DEFAULT_VERSION;
// LLM 节点订正需求 新增节点默认为 3
// [LLM node revised requirements, new node defaults to 3]
set(initValue, 'version', versionFromBackend);
return initValue;
@@ -488,7 +488,7 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
const enableChatHistory = BlockInput.createBoolean(
'enableChatHistory',
// 工作流没有会话历史,需要设置成 false会话流按照实际勾选的来
// The workflow has no session history, it needs to be set to false, and the session flow is checked according to the actual check.
globalState.isChatflow
? Boolean(
get(
@@ -523,8 +523,8 @@ export const LLM_FORM_META: FormMetaV2<FormData> = {
},
outputs: formatReasoningContentOnSubmit(value.outputs, isBatch),
/**
* - LLM 节点 format优化」需求,将 outputs 内容整合到 prompt 中限制输出格式,后端需要标志位区分逻辑,版本为 2
* - LLM 节点订正需求 兜底逻辑」,版本为 3
* - "LLM node format optimization" requirement, integrate the output content into the prompt to limit the output format, the backend needs flag distinction logic, version 2
* - "LLM node revised requirements fallback logic", version 3
*/
version: NEW_NODE_DEFAULT_VERSION,

View File

@@ -25,7 +25,7 @@ import { type NodeTestMeta } from '@/test-run-kit';
export const test: NodeTestMeta = {
generateRelatedContext(node, context) {
const { isInProject, isChatflow } = context;
/** 不在会话流LLM 节点无需关联环境 */
/** Not in session flow, LLM nodes do not need to be associated with the environment */
const formData = node
.getData(FlowNodeFormData)
.formModel.getFormItemValueByPath('/');

View File

@@ -36,7 +36,7 @@ interface FunctionCallParamDTO extends BoundSkills {
type FunctionCallParamVO = BoundSkills;
/**
* fc参数后端转前端
* FC parameter backend to frontend
* @param fcParamDTO
* @returns
*/
@@ -64,7 +64,7 @@ export function formatFcParamOnInit(fcParamDTO?: FunctionCallParamDTO) {
}
/**
* fc参数前端转后端
* FC parameter front-end to back-end
* @param fcParamVO
* @returns
*/

View File

@@ -106,7 +106,7 @@ export const Skills: FC<SkillsProps> = props => {
});
}
// 表单的onChange 值传递是异步,所以这里延迟下
// The onChange value passing of the form is asynchronous, so there is a delay here
setTimeout(() => {
refetch();
}, 10);

View File

@@ -71,7 +71,7 @@ export const SkillModal: FC<SkillModalProps> = props => {
SkillKnowledgeSiderCategory.Library,
);
// plugin 添加弹窗
// Plugin Add pop-up window
const pluginModalFrom = projectId
? From.ProjectWorkflow
: From.WorkflowAddNode;

View File

@@ -41,10 +41,10 @@ export type WorkflowFCSetting = FCWorkflowSetting;
export interface BoundWorkflowItem {
plugin_id: string;
workflow_id: string;
// 如果是project 填project version资源库填plugin version
// If it is a project, fill in the project version, and fill in the plugin version in the resource library.
plugin_version: string;
workflow_version: string;
// 如果是project 就填true资源库 false
// Fill in true if it is project, false for resource library
is_draft: boolean;
fc_setting?: WorkflowFCSetting;
}
@@ -53,9 +53,9 @@ export interface BoundPluginItem {
plugin_id: string;
api_id: string;
api_name: string;
// 如果是project 填project version资源库填plugin version
// If it is a project, fill in the project version, and fill in the plugin version in the resource library.
plugin_version: string;
// 如果是project 就填true资源库 false
// Fill in true if it is project, false for resource library
is_draft: boolean;
fc_setting?: PluginFCSetting;
}

View File

@@ -21,7 +21,7 @@ import { WorkflowModelsService } from '@/services';
import { useModelType } from '../hooks/use-model-type';
/**
* 判断模型是不是支持技能
* Is the judgment model a supporting skill?
*/
export function useModelSkillDisabled() {
const modelType = useModelType();

View File

@@ -41,13 +41,13 @@ import {
// sub_type: 1,
// location: 1, // Path = 1, Query = 2, Body = 3, Header = 4,
// is_required: false,
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
@@ -65,18 +65,18 @@ import {
// sub_type: 1,
// location: 1, // Path = 1, Query = 2, Body = 3, Header = 4,
// is_required: false,
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// response_style: {
// mode: 1, // Raw = 0, // 原始输出 Card = 1, // 渲染成卡片 Template = 2, // 包含变量的模板内容用jinja2渲染 TODO
// Mode: 1,//Raw = 0,//Raw Output Card = 1,//Render as Card Template = 2,//Template content containing variables, render TODO with jinja2
// },
// },
// workflow_fc_setting: {
@@ -94,13 +94,13 @@ import {
// sub_type: 1,
// location: 1, // Path = 1, Query = 2, Body = 3, Header = 4,
// is_required: false,
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
@@ -118,30 +118,30 @@ import {
// sub_type: 1,
// location: 1, // Path = 1, Query = 2, Body = 3, Header = 4,
// is_required: false,
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// local_default: '', // 默认值
// local_disable: false, // 是否启用
// local_default : '', // default
// local_disable: false,//enabled
// assist_type: 1, //DEFAULT = 1, IMAGE = 2, DOC = 3,CODE = 4,PPT = 5, TXT = 6, EXCEL = 7, AUDIO = 8, ZIP = 9,VIDEO = 10,
// },
// ],
// response_style: {
// mode: 1, // Raw = 0, // 原始输出 Card = 1, // 渲染成卡片 Template = 2, // 包含变量的模板内容用jinja2渲染 TODO
// Mode: 1,//Raw = 0,//Raw Output Card = 1,//Render as Card Template = 2,//Template content containing variables, render TODO with jinja2
// },
// },
// dataset_fc_setting: {
// top_k: 5, // 召回数量
// min_score: 0.46, // 召回的最小相似度阈值
// auto: true, // 是否自动召回
// search_mode: 1, // 搜索策略
// no_recall_reply_mode: 1, // 无召回回复mode默认0
// top_k: 5,//Recall Quantity
// min_score: 0.46,//minimum similarity threshold for recall
// Auto: true,//whether to recall automatically
// search_mode: 1,//search strategy
// no_recall_reply_mode: 1,//no recall reply mode, default 0
// no_recall_reply_customize_prompt:
// '抱歉,您的问题超出了我的知识范围,并且无法在当前阶段回答', // 无召回回复时自定义promptNoRecallReplyMode=1时生效
// show_source: true, // 是否展示来源
// show_source_mode: 1, // 来源展示方式 默认值0 卡片列表方式
// 'Sorry, your question is beyond my knowledge and cannot be answered at this stage ',//Custom prompt when no recall reply, takes effect when NoRecallReplyMode = 1
// show_source: true,//whether to show the source
// show_source_mode: 1,//source display method, default value 0 card list method
// },
// }

View File

@@ -25,7 +25,7 @@ import {
import { PromiseLimiter } from '@/utils/promise-limiter';
// 限制并发因为同一个流程上可能会有很多LLM节点同时请求
// Limit concurrency, because there may be many LLM nodes on the same process, simultaneously requesting
const CONCURRENCY = 3;
const limiter = new PromiseLimiter(CONCURRENCY, true);

View File

@@ -17,15 +17,15 @@
import { type BoundSkills } from './types';
/**
* 根据projectId判断是否是草稿
* 资源库里面的插件 project_id = '0'
* Determine whether it is a draft according to the projectId
* Plugins in library project_id = '0'
*/
export function isDraftByProjectId(projectId?: string) {
return projectId && projectId !== '0' ? true : false;
}
/**
* 技能是否为空
* Is the skill empty?
* @param value
* @returns
*/
@@ -38,7 +38,7 @@ export function isSkillsEmpty(value: BoundSkills) {
}
/**
* 获取技能查询参数
* Get skill query parameters
* @param fcParam
* @returns
*/

View File

@@ -36,7 +36,7 @@ export const UserPrompt = ({ field, fieldState }) => {
const isUserPromptRequired = curModel?.is_up_required ?? false;
useEffect(() => {
// TODO: 临时方案,待节点引擎提供新 api 后替换
// TODO: Temporary solution, replaced after the node engine provides a new API
field._fieldModel.validate();
}, [isUserPromptRequired]);

View File

@@ -45,7 +45,7 @@ const getDefaultModels = (modelMeta: Model): InputValueDTO[] => {
modelMeta?.model_params?.forEach(p => {
const k = camelCase(p.name) as string;
const { type } = p;
// 优先取平衡,自定义兜底
// Priority to take the balance, custom bottom line
const defaultValue =
p.default_val[GenerationDiversity.Balance] ??
p.default_val[GenerationDiversity.Customize];
@@ -79,8 +79,8 @@ export const getDefaultLLMParams = (models: Model[]) => {
export const reviseLLMParamPair = (d: InputValueDTO): [string, unknown] => {
let k = d?.name || '';
// TODO 前端不依赖这个字段,确认后端无依赖后,可删除
// 兼容一个历史悠久的拼写错误
// The TODO front end does not rely on this field. After confirming that the back end does not rely on it, it can be deleted.
// Compatible with a long-standing spelling error
if (k === 'modleName') {
k = 'modelName';
}
@@ -112,7 +112,7 @@ export const modelItemToBlockInput = (
// eslint-disable-next-line @typescript-eslint/naming-convention
let _k = k;
// TODO 前端不依赖这个字段,确认后端无依赖后,可删除
// The TODO front end does not rely on this field. After confirming that the back end does not rely on it, it can be deleted.
if (_k === 'modelName') {
_k = 'modleName';
}
@@ -145,7 +145,7 @@ export const addSKillFromLibrary = (
plugin_id: detail?.plugin_id as string,
api_id: detail?.api_id as string,
api_name: detail?.name as string,
plugin_version: '', // 和 @张友松 确认 不传版本
plugin_version: '', // And @Zhang Yousong, confirm, do not send the version
is_draft: isDraftByProjectId(detail?.project_id),
});
@@ -162,7 +162,7 @@ export const addSKillFromLibrary = (
plugin_id: detail?.plugin_id as string,
workflow_id: detail?.workflow_id as string,
plugin_version: '',
workflow_version: '', // 和 @张友松 确认 不传版本
workflow_version: '', // And @Zhang Yousong, confirm, do not send the version
is_draft: isDraftByProjectId(detail?.project_id),
});

View File

@@ -24,7 +24,7 @@ import { isVisionEqual, isVisionInput } from '../vision';
export const llmInputNameValidator = ({ value, formValues, name }) => {
const validatorRule = nameValidationRule;
/** 命名校验 */
/** name check */
if (!validatorRule.test(value)) {
return I18n.t('workflow_detail_node_error_format');
}
@@ -43,13 +43,13 @@ export const llmInputNameValidator = ({ value, formValues, name }) => {
item => item.name === value && isVisionEqual(item, inputValue),
);
// 都是输入或者视觉理解的场景直接返回重名
// All scenes are input or visually understood, and the same name is directly returned.
if (sameVisionInputs.length > 1) {
return I18n.t('workflow_detail_node_input_duplicated');
}
// 输入和视觉理解参数重名的场景,返回不能和视觉理解参数重名
// 视觉理解参数和输入重名,返回不能和输入重名
// Scenes with the same name as the input and visual understanding parameters are returned that cannot be the same name as the visual understanding parameters
// Visual understanding of parameters and input duplicate names, return cannot duplicate input names
const differentVisionInputs = inputValues.filter(
item => item.name === value && !isVisionEqual(item, inputValue),
);

View File

@@ -22,11 +22,11 @@ import { type Validate } from '@flowgram-adapter/free-layout-editor';
import { omitSystemReasoningContent, REASONING_CONTENT_NAME } from '../cot';
/** 变量命名校验规则 */
/** Variable Naming Validation Rules */
const outputTreeValidationRule =
/^(?!.*\b(true|false|and|AND|or|OR|not|NOT|null|nil|If|Switch)\b)[a-zA-Z_][a-zA-Z_$0-9]*$/;
/** 校验逻辑 */
/** check logic */
// eslint-disable-next-line @typescript-eslint/naming-convention
const OutputTreeMetaSchema = z.lazy(() =>
z
@@ -57,26 +57,26 @@ const OutputTreeMetaSchema = z.lazy(() =>
);
const omitErrorBody = (value, isBatch) => {
// 批量,去除 children 中的 errorBody
// Batch, remove errorBody from children
if (isBatch) {
return value?.map(v => ({
...v,
children: v?.children?.filter(c => c?.name !== 'errorBody'),
}));
}
// 单次,去除 value 中的 errorBody
// Single, remove errorBody from value
return value?.filter(v => v?.name !== 'errorBody');
};
export const llmOutputTreeMetaValidator: Validate = ({ value, formValues }) => {
/**
* 判断错误异常处理是否打开,如果打开需要过滤掉 errorBody 后做校验
* Determine whether the error exception handling is turned on. If it is turned on, you need to filter out the errorBody and check it.
*/
const { settingOnErrorIsOpen = false } = (get(formValues, 'settingOnError') ??
{}) as { settingOnErrorIsOpen?: boolean };
/**
* 根据 batch 数据判断,当前是否处于批处理状态
* According to the batch data, whether it is currently in the batch state
*/
const batchValue = get(formValues, 'batchMode');
const isBatch = batchValue === 'batch';

View File

@@ -38,7 +38,7 @@ interface VisionInputFieldProps {
}
/**
* 输入字段
* input field
*/
export const VisionInputField: FC<VisionInputFieldProps> = ({
readonly,

View File

@@ -40,7 +40,7 @@ interface VisionNameFieldProps {
}
/**
* 输入名称字段
* Enter name field
*/
export const VisionNameField: FC<VisionNameFieldProps> = ({
inputField,

View File

@@ -33,7 +33,7 @@ interface VisionProps {
}
/**
* 输入值字段
* input value field
* @returns */
export const VisionValueField: FC<VisionProps> = ({ enabledTypes, name }) => {
const disabledTypes = ViewVariableType.getComplement([
@@ -57,7 +57,7 @@ export const VisionValueField: FC<VisionProps> = ({ enabledTypes, name }) => {
onChange={v => {
const expression = v as ValueExpression;
if (!expression) {
// 默认值需要带raw meta不然无法区分是不是视觉理解
// The default value needs to be accompanied by raw meta, otherwise it is impossible to distinguish whether it is visual understanding or not.
childInputField?.onChange(DEFUALT_VISION_INPUT);
return;
}

View File

@@ -39,7 +39,7 @@ interface VisionProps {
}
/**
* 视觉理解配置
* Visual understanding configuration
*/
export const Vision: FC<VisionProps> = () => {
const enabledTypes = useModelEnabledTypes();

View File

@@ -22,7 +22,7 @@ import { WorkflowModelsService } from '@/services';
import { useModelType } from '../../hooks/use-model-type';
/**
* 模型支持的数据类型
* Data types supported by the model
*/
export function useModelEnabledTypes() {
const modelType = useModelType();

View File

@@ -19,7 +19,7 @@ import { type InputValueVO } from '@coze-workflow/base';
import { isVisionInput } from './is-vision-input';
/**
* 判断是否是相同的输入类型
* Determine if they are the same input type
* @param value1
* @param value2
* @returns

View File

@@ -17,7 +17,7 @@
import { type InputValueVO } from '@coze-workflow/base';
/**
* 是不是视觉理解的输入
* Is it visually comprehensible input?
*/
export const isVisionInput = (value: InputValueVO): boolean =>
!!value?.input?.rawMeta?.isVision;