126 lines
		
	
	
		
			4.5 KiB
		
	
	
	
		
			YAML
		
	
	
		
			Executable File
		
	
	
			
		
		
	
	
			126 lines
		
	
	
		
			4.5 KiB
		
	
	
	
		
			YAML
		
	
	
		
			Executable File
		
	
	
| id: 60040
 | ||
| name: Doubao-1.5-Thinking-Pro
 | ||
| icon_uri: default_icon/doubao_v2.png
 | ||
| icon_url: ""
 | ||
| description:
 | ||
|     zh: doubao-1.5 全新深度思考模型,在数学、编程、科学推理等专业领域及创意写作等通用任务中表现突出,在 AIME 2024、Codeforces、GPQA 等多项权威基准上达到或接近业界第一梯队水平。
 | ||
|     en: doubao-1.5 is a brand-new deep thinking model that excels in specialized fields such as mathematics, programming, scientific reasoning, as well as general tasks like creative writing. It achieves or approaches the industry’s top-tier level on multiple authoritative benchmarks including AIME 2024, Codeforces, and GPQA.
 | ||
| default_parameters:
 | ||
|     - name: temperature
 | ||
|       label:
 | ||
|         zh: 生成随机性
 | ||
|         en: Temperature
 | ||
|       desc:
 | ||
|         zh: '- **temperature**: 调高温度会使得模型的输出更多样性和创新性,反之,降低温度会使输出内容更加遵循指令要求但减少多样性。建议不要与“Top p”同时调整。'
 | ||
|         en: '**Temperature**:\n\n- When you increase this value, the model outputs more diverse and innovative content; when you decrease it, the model outputs less diverse content that strictly follows the given instructions.\n- It is recommended not to adjust this value with \"Top p\" at the same time.'
 | ||
|       type: float
 | ||
|       min: "0"
 | ||
|       max: "1"
 | ||
|       default_val:
 | ||
|         balance: "0.8"
 | ||
|         creative: "1"
 | ||
|         default_val: "1.0"
 | ||
|         precise: "0.3"
 | ||
|       precision: 1
 | ||
|       options: []
 | ||
|       style:
 | ||
|         widget: slider
 | ||
|         label:
 | ||
|             zh: 生成多样性
 | ||
|             en: Generation diversity
 | ||
|     - name: max_tokens
 | ||
|       label:
 | ||
|         zh: 最大回复长度
 | ||
|         en: Response max length
 | ||
|       desc:
 | ||
|         zh: 控制模型输出的Tokens 长度上限。通常 100 Tokens 约等于 150 个中文汉字。
 | ||
|         en: You can specify the maximum length of the tokens output through this value. Typically, 100 tokens are approximately equal to 150 Chinese characters.
 | ||
|       type: int
 | ||
|       min: "1"
 | ||
|       max: "4096"
 | ||
|       default_val:
 | ||
|         default_val: "4096"
 | ||
|       options: []
 | ||
|       style:
 | ||
|         widget: slider
 | ||
|         label:
 | ||
|             zh: 输入及输出设置
 | ||
|             en: Input and output settings
 | ||
|     - name: top_p
 | ||
|       label:
 | ||
|         zh: Top P
 | ||
|         en: Top P
 | ||
|       desc:
 | ||
|         zh: '- **Top p 为累计概率**: 模型在生成输出时会从概率最高的词汇开始选择,直到这些词汇的总概率累积达到Top p 值。这样可以限制模型只选择这些高概率的词汇,从而控制输出内容的多样性。建议不要与“生成随机性”同时调整。'
 | ||
|         en: '**Top P**:\n\n- An alternative to sampling with temperature, where only tokens within the top p probability mass are considered. For example, 0.1 means only the top 10% probability mass tokens are considered.\n- We recommend altering this or temperature, but not both.'
 | ||
|       type: float
 | ||
|       min: "0"
 | ||
|       max: "1"
 | ||
|       default_val:
 | ||
|         default_val: "0.7"
 | ||
|       precision: 2
 | ||
|       options: []
 | ||
|       style:
 | ||
|         widget: slider
 | ||
|         label:
 | ||
|             zh: 生成多样性
 | ||
|             en: Generation diversity
 | ||
|     - name: response_format
 | ||
|       label:
 | ||
|         zh: 输出格式
 | ||
|         en: Response format
 | ||
|       desc:
 | ||
|         zh: '- **JSON**: 将引导模型使用JSON格式输出'
 | ||
|         en: '**Response Format**:\n\n- **JSON**: Uses JSON format for replies'
 | ||
|       type: int
 | ||
|       min: ""
 | ||
|       max: ""
 | ||
|       default_val:
 | ||
|         default_val: "0"
 | ||
|       options:
 | ||
|         - label: Text
 | ||
|           value: "0"
 | ||
|         - label: JSON
 | ||
|           value: "1"
 | ||
|       style:
 | ||
|         widget: radio_buttons
 | ||
|         label:
 | ||
|             zh: 输入及输出设置
 | ||
|             en: Input and output settings
 | ||
| meta:
 | ||
|     protocol: ark
 | ||
|     capability:
 | ||
|         function_call: true
 | ||
|         input_modal:
 | ||
|             - text
 | ||
|             - image
 | ||
|         input_tokens: 96000
 | ||
|         json_mode: true
 | ||
|         max_tokens: 128000
 | ||
|         output_modal:
 | ||
|             - text
 | ||
|         output_tokens: 16000
 | ||
|         prefix_caching: false
 | ||
|         reasoning: true
 | ||
|         prefill_response: false
 | ||
|     conn_config:
 | ||
|         base_url: "https://ark.cn-beijing.volces.com/api/v3/"
 | ||
|         api_key: ""
 | ||
|         timeout: 0s
 | ||
|         model: "" # model_id / endpoint_id
 | ||
|         temperature: 0.1
 | ||
|         frequency_penalty: 0
 | ||
|         presence_penalty: 0
 | ||
|         max_tokens: 4096
 | ||
|         top_p: 0.7
 | ||
|         top_k: 0
 | ||
|         stop: []
 | ||
|         ark:
 | ||
|             region: ""
 | ||
|             access_key: ""
 | ||
|             secret_key: ""
 | ||
|             retry_times: null
 | ||
|             custom_header: {}
 | ||
|         custom: {}
 | ||
|     status: 0
 |