Skip to content

Commit 28032d8

Browse files
authored
Merge pull request #218 from togethercomputer/conner/ft-limits-api
Add FT models supported and limits
2 parents 8d7897b + 07044f6 commit 28032d8

1 file changed

Lines changed: 166 additions & 0 deletions

File tree

openapi.yaml

Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3317,6 +3317,87 @@ paths:
33173317
description: Invalid request parameters.
33183318
'404':
33193319
description: Fine-tune ID not found.
3320+
/fine-tunes/models/supported:
3321+
get:
3322+
tags: ['Fine-tuning']
3323+
summary: List supported models
3324+
description: List models supported for fine-tuning, or check if a specific model is supported.
3325+
x-codeSamples:
3326+
- lang: Shell
3327+
label: cURL (list all)
3328+
source: |
3329+
curl "https://api.together.xyz/v1/fine-tunes/models/supported" \
3330+
-H "Authorization: Bearer $TOGETHER_API_KEY"
3331+
- lang: Shell
3332+
label: cURL (check specific model)
3333+
source: |
3334+
curl "https://api.together.xyz/v1/fine-tunes/models/supported?model_name=meta-llama/Meta-Llama-3.1-8B-Instruct-Reference" \
3335+
-H "Authorization: Bearer $TOGETHER_API_KEY"
3336+
parameters:
3337+
- in: query
3338+
name: model_name
3339+
schema:
3340+
type: string
3341+
description: Optional model name to check support for. If omitted, returns all supported models.
3342+
required: false
3343+
responses:
3344+
'200':
3345+
description: Supported models or support status for a specific model.
3346+
content:
3347+
application/json:
3348+
schema:
3349+
oneOf:
3350+
- type: object
3351+
required:
3352+
- models
3353+
properties:
3354+
models:
3355+
type: array
3356+
items:
3357+
type: string
3358+
description: List of supported model names.
3359+
- type: object
3360+
required:
3361+
- supported
3362+
properties:
3363+
supported:
3364+
type: boolean
3365+
description: Whether the specified model is supported.
3366+
/fine-tunes/models/limits:
3367+
get:
3368+
tags: ['Fine-tuning']
3369+
summary: Get model limits
3370+
description: Get model limits for a specific fine-tuning model.
3371+
x-codeSamples:
3372+
- lang: Shell
3373+
label: cURL
3374+
source: |
3375+
curl "https://api.together.xyz/v1/fine-tunes/models/limits?model_name=meta-llama/Meta-Llama-3.1-8B-Instruct-Reference" \
3376+
-H "Authorization: Bearer $TOGETHER_API_KEY"
3377+
parameters:
3378+
- in: query
3379+
name: model_name
3380+
schema:
3381+
type: string
3382+
description: The model name to get limits for.
3383+
required: true
3384+
responses:
3385+
'200':
3386+
description: Model limits.
3387+
content:
3388+
application/json:
3389+
schema:
3390+
$ref: '#/components/schemas/FineTuneModelLimits'
3391+
'404':
3392+
description: Model not found or not supported for fine-tuning.
3393+
content:
3394+
application/json:
3395+
schema:
3396+
type: object
3397+
properties:
3398+
message:
3399+
type: string
3400+
description: Error message explaining the model is not available.
33203401
/rerank:
33213402
post:
33223403
tags: ['Rerank']
@@ -10605,6 +10686,91 @@ components:
1060510686
message:
1060610687
type: string
1060710688
description: Message indicating the result of the deletion
10689+
FineTuneModelLimits:
10690+
type: object
10691+
description: Model limits for fine-tuning.
10692+
required:
10693+
- model_name
10694+
- max_num_epochs
10695+
- max_num_evals
10696+
- max_learning_rate
10697+
- min_learning_rate
10698+
- supports_vision
10699+
- supports_tools
10700+
- supports_reasoning
10701+
- merge_output_lora
10702+
properties:
10703+
model_name:
10704+
type: string
10705+
description: The name of the model.
10706+
full_training:
10707+
type: object
10708+
description: Limits for full training.
10709+
required:
10710+
- max_batch_size
10711+
- max_batch_size_dpo
10712+
- min_batch_size
10713+
properties:
10714+
max_batch_size:
10715+
type: integer
10716+
description: Maximum batch size for SFT full training.
10717+
max_batch_size_dpo:
10718+
type: integer
10719+
description: Maximum batch size for DPO full training.
10720+
min_batch_size:
10721+
type: integer
10722+
description: Minimum batch size for full training.
10723+
lora_training:
10724+
type: object
10725+
description: Limits for LoRA training.
10726+
required:
10727+
- max_batch_size
10728+
- max_batch_size_dpo
10729+
- min_batch_size
10730+
- max_rank
10731+
- target_modules
10732+
properties:
10733+
max_batch_size:
10734+
type: integer
10735+
description: Maximum batch size for SFT LoRA training.
10736+
max_batch_size_dpo:
10737+
type: integer
10738+
description: Maximum batch size for DPO LoRA training.
10739+
min_batch_size:
10740+
type: integer
10741+
description: Minimum batch size for LoRA training.
10742+
max_rank:
10743+
type: integer
10744+
description: Maximum LoRA rank.
10745+
target_modules:
10746+
type: array
10747+
items:
10748+
type: string
10749+
description: Available target modules for LoRA.
10750+
max_num_epochs:
10751+
type: integer
10752+
description: Maximum number of training epochs.
10753+
max_num_evals:
10754+
type: integer
10755+
description: Maximum number of evaluations.
10756+
max_learning_rate:
10757+
type: number
10758+
description: Maximum learning rate.
10759+
min_learning_rate:
10760+
type: number
10761+
description: Minimum learning rate.
10762+
supports_vision:
10763+
type: boolean
10764+
description: Whether the model supports vision/multimodal inputs.
10765+
supports_tools:
10766+
type: boolean
10767+
description: Whether the model supports tool/function calling.
10768+
supports_reasoning:
10769+
type: boolean
10770+
description: Whether the model supports reasoning.
10771+
merge_output_lora:
10772+
type: boolean
10773+
description: Whether to merge the output LoRA.
1060810774
FinetuneJobStatus:
1060910775
type: string
1061010776
enum:

0 commit comments

Comments
 (0)