Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 166 additions & 0 deletions openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3317,6 +3317,87 @@ paths:
description: Invalid request parameters.
'404':
description: Fine-tune ID not found.
/fine-tunes/models/supported:
get:
tags: ['Fine-tuning']
summary: List supported models
description: List models supported for fine-tuning, or check if a specific model is supported.
x-codeSamples:
- lang: Shell
label: cURL (list all)
source: |
curl "https://api.together.xyz/v1/fine-tunes/models/supported" \
-H "Authorization: Bearer $TOGETHER_API_KEY"
- lang: Shell
label: cURL (check specific model)
source: |
curl "https://api.together.xyz/v1/fine-tunes/models/supported?model_name=meta-llama/Meta-Llama-3.1-8B-Instruct-Reference" \
-H "Authorization: Bearer $TOGETHER_API_KEY"
parameters:
- in: query
name: model_name
schema:
type: string
description: Optional model name to check support for. If omitted, returns all supported models.
required: false
responses:
'200':
description: Supported models or support status for a specific model.
content:
application/json:
schema:
oneOf:
- type: object
required:
- models
properties:
models:
type: array
items:
type: string
description: List of supported model names.
- type: object
required:
- supported
properties:
supported:
type: boolean
description: Whether the specified model is supported.
/fine-tunes/models/limits:
get:
tags: ['Fine-tuning']
summary: Get model limits
description: Get model limits for a specific fine-tuning model.
x-codeSamples:
- lang: Shell
label: cURL
source: |
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I asked max about this, we dont cdn cache in case the limits update on our end. Note: this is the SSOT for sequence lengths, etc

curl "https://api.together.xyz/v1/fine-tunes/models/limits?model_name=meta-llama/Meta-Llama-3.1-8B-Instruct-Reference" \
-H "Authorization: Bearer $TOGETHER_API_KEY"
parameters:
- in: query
name: model_name
schema:
type: string
description: The model name to get limits for.
required: true
responses:
'200':
description: Model limits.
content:
application/json:
schema:
$ref: '#/components/schemas/FineTuneModelLimits'
'404':
description: Model not found or not supported for fine-tuning.
content:
application/json:
schema:
type: object
properties:
message:
type: string
description: Error message explaining the model is not available.
/rerank:
post:
tags: ['Rerank']
Expand Down Expand Up @@ -10605,6 +10686,91 @@ components:
message:
type: string
description: Message indicating the result of the deletion
FineTuneModelLimits:
type: object
description: Model limits for fine-tuning.
required:
- model_name
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Based on what isnt omitempty in the go struct

- max_num_epochs
- max_num_evals
- max_learning_rate
- min_learning_rate
- supports_vision
- supports_tools
- supports_reasoning
- merge_output_lora
properties:
model_name:
type: string
description: The name of the model.
full_training:
type: object
description: Limits for full training.
required:
- max_batch_size
- max_batch_size_dpo
- min_batch_size
properties:
max_batch_size:
type: integer
description: Maximum batch size for SFT full training.
max_batch_size_dpo:
type: integer
description: Maximum batch size for DPO full training.
min_batch_size:
type: integer
description: Minimum batch size for full training.
lora_training:
type: object
description: Limits for LoRA training.
required:
- max_batch_size
- max_batch_size_dpo
- min_batch_size
- max_rank
- target_modules
properties:
max_batch_size:
type: integer
description: Maximum batch size for SFT LoRA training.
max_batch_size_dpo:
type: integer
description: Maximum batch size for DPO LoRA training.
min_batch_size:
type: integer
description: Minimum batch size for LoRA training.
max_rank:
type: integer
description: Maximum LoRA rank.
target_modules:
type: array
items:
type: string
description: Available target modules for LoRA.
max_num_epochs:
type: integer
description: Maximum number of training epochs.
max_num_evals:
type: integer
description: Maximum number of evaluations.
max_learning_rate:
type: number
description: Maximum learning rate.
min_learning_rate:
type: number
description: Minimum learning rate.
supports_vision:
type: boolean
description: Whether the model supports vision/multimodal inputs.
supports_tools:
type: boolean
description: Whether the model supports tool/function calling.
supports_reasoning:
type: boolean
description: Whether the model supports reasoning.
merge_output_lora:
type: boolean
description: Whether to merge the output LoRA.
FinetuneJobStatus:
type: string
enum:
Expand Down