90 lines
		
	
	
		
			3.0 KiB
		
	
	
	
		
			YAML
		
	
	
	
			
		
		
	
	
			90 lines
		
	
	
		
			3.0 KiB
		
	
	
	
		
			YAML
		
	
	
	
| id: 2007 # 模型 entity id, 同 id 数据不会覆盖
 | ||
| name: Gemini-2.5-Flash
 | ||
| description: test gemini description
 | ||
| meta:
 | ||
|   id: 107
 | ||
| default_parameters:
 | ||
|   -
 | ||
|     name: temperature
 | ||
|     label:
 | ||
|       zh: 生成随机性
 | ||
|       en: Temperature
 | ||
|     desc:
 | ||
|       zh: '- **temperature**: 调高温度会使得模型的输出更多样性和创新性,反之,降低温度会使输出内容更加遵循指令要求但减少多样性。建议不要与“Top p”同时调整。'
 | ||
|       en: '**Temperature**:\n\n- When you increase this value, the model outputs more diverse and innovative content; when you decrease it, the model outputs less diverse content that strictly follows the given instructions.\n- It is recommended not to adjust this value with \"Top p\" at the same time.'
 | ||
|     type: float
 | ||
|     min: '0'
 | ||
|     max: '1'
 | ||
|     precision: 1
 | ||
|     default_val:
 | ||
|       default_val: '1.0'
 | ||
|       creative: '1'
 | ||
|       balance: '0.8'
 | ||
|       precise: '0.3'
 | ||
|     style:
 | ||
|       widget: slider
 | ||
|       label:
 | ||
|         zh: 生成多样性
 | ||
|         en: Generation diversity
 | ||
|   -
 | ||
|     name: max_tokens
 | ||
|     label:
 | ||
|       zh: 最大回复长度
 | ||
|       en: Response max length
 | ||
|     desc:
 | ||
|       zh: '控制模型输出的Tokens 长度上限。通常 100 Tokens 约等于 150 个中文汉字。'
 | ||
|       en: 'You can specify the maximum length of the tokens output through this value. Typically, 100 tokens are approximately equal to 150 Chinese characters.'
 | ||
|     type: int
 | ||
|     min: '1'
 | ||
|     max: '4096'
 | ||
|     precision: 0
 | ||
|     default_val:
 | ||
|       default_val: '4096'
 | ||
|     style:
 | ||
|       widget: slider
 | ||
|       label:
 | ||
|         zh: 输入及输出设置
 | ||
|         en: Input and output settings
 | ||
|   -
 | ||
|     name: top_p
 | ||
|     label:
 | ||
|       zh: Top P
 | ||
|       en: Top P
 | ||
|     desc:
 | ||
|       zh: '- **Top p 为累计概率**: 模型在生成输出时会从概率最高的词汇开始选择,直到这些词汇的总概率累积达到Top p 值。这样可以限制模型只选择这些高概率的词汇,从而控制输出内容的多样性。建议不要与“生成随机性”同时调整。'
 | ||
|       en: '**Top P**:\n\n- An alternative to sampling with temperature, where only tokens within the top p probability mass are considered. For example, 0.1 means only the top 10% probability mass tokens are considered.\n- We recommend altering this or temperature, but not both.'
 | ||
|     type: float
 | ||
|     min: '0'
 | ||
|     max: '1'
 | ||
|     precision: 2
 | ||
|     default_val:
 | ||
|       default_val: '0.7'
 | ||
|     style:
 | ||
|       widget: slider
 | ||
|       label:
 | ||
|         zh: 生成多样性
 | ||
|         en: Generation diversity
 | ||
|   -
 | ||
|     name: response_format
 | ||
|     label:
 | ||
|       zh: 输出格式
 | ||
|       en: Response format
 | ||
|     desc:
 | ||
|       zh: '- **文本**: 使用普通文本格式回复\n- **JSON**: 将引导模型使用JSON格式输出'
 | ||
|       en: '**Response Format**:\n\n- **JSON**: Uses JSON format for replies'
 | ||
|     type: int
 | ||
|     precision: 0
 | ||
|     default_val:
 | ||
|       default_val: '0'
 | ||
|     options:
 | ||
|       -
 | ||
|         value: '0'
 | ||
|         label: 'Text'
 | ||
|       -
 | ||
|         value: '2'
 | ||
|         label: 'JSON'
 | ||
|     style:
 | ||
|       widget: radio_buttons
 | ||
|       label:
 | ||
|         zh: 输入及输出设置
 | ||
|         en: Input and output settings |