curl --request POST \
--url https://api.fireworks.ai/v1/accounts/{account_id}/supervisedFineTuningJobs \
--header 'Authorization: Bearer <token>' \
--header 'Content-Type: application/json' \
--data '
{
"dataset": "<string>",
"displayName": "<string>",
"outputModel": "<string>",
"baseModel": "<string>",
"warmStartFrom": "<string>",
"jinjaTemplate": "<string>",
"earlyStop": true,
"epochs": 123,
"learningRate": 123,
"maxContextLength": 123,
"loraRank": 123,
"wandbConfig": {
"enabled": true,
"apiKey": "<string>",
"project": "<string>",
"entity": "<string>",
"runId": "<string>"
},
"evaluationDataset": "<string>",
"isTurbo": true,
"evalAutoCarveout": true,
"region": "REGION_UNSPECIFIED",
"nodes": 123,
"batchSize": 123,
"mtpEnabled": true,
"mtpNumDraftTokens": 123,
"mtpFreezeBaseModel": true,
"hiddenStatesGenConfig": {
"deployedModel": "<string>",
"maxWorkers": 123,
"maxTokens": 123,
"inputOffset": 123,
"inputLimit": 123,
"maxContextLen": 123,
"regenerateAssistant": true,
"outputActivations": true,
"apiKey": "<string>"
},
"metricsFileSignedUrl": "<string>",
"gradientAccumulationSteps": 123,
"learningRateWarmupSteps": 123
}
'{
"dataset": "<string>",
"name": "<string>",
"displayName": "<string>",
"createTime": "2023-11-07T05:31:56Z",
"completedTime": "2023-11-07T05:31:56Z",
"state": "JOB_STATE_UNSPECIFIED",
"status": {
"code": "OK",
"message": "<string>"
},
"createdBy": "<string>",
"outputModel": "<string>",
"baseModel": "<string>",
"warmStartFrom": "<string>",
"jinjaTemplate": "<string>",
"earlyStop": true,
"epochs": 123,
"learningRate": 123,
"maxContextLength": 123,
"loraRank": 123,
"wandbConfig": {
"enabled": true,
"apiKey": "<string>",
"project": "<string>",
"entity": "<string>",
"runId": "<string>",
"url": "<string>"
},
"evaluationDataset": "<string>",
"isTurbo": true,
"evalAutoCarveout": true,
"region": "REGION_UNSPECIFIED",
"updateTime": "2023-11-07T05:31:56Z",
"nodes": 123,
"batchSize": 123,
"mtpEnabled": true,
"mtpNumDraftTokens": 123,
"mtpFreezeBaseModel": true,
"hiddenStatesGenConfig": {
"deployedModel": "<string>",
"maxWorkers": 123,
"maxTokens": 123,
"inputOffset": 123,
"inputLimit": 123,
"maxContextLen": 123,
"regenerateAssistant": true,
"outputActivations": true,
"apiKey": "<string>"
},
"metricsFileSignedUrl": "<string>",
"gradientAccumulationSteps": 123,
"learningRateWarmupSteps": 123,
"estimatedCost": {
"currencyCode": "<string>",
"units": "<string>",
"nanos": 123
}
}curl --request POST \
--url https://api.fireworks.ai/v1/accounts/{account_id}/supervisedFineTuningJobs \
--header 'Authorization: Bearer <token>' \
--header 'Content-Type: application/json' \
--data '
{
"dataset": "<string>",
"displayName": "<string>",
"outputModel": "<string>",
"baseModel": "<string>",
"warmStartFrom": "<string>",
"jinjaTemplate": "<string>",
"earlyStop": true,
"epochs": 123,
"learningRate": 123,
"maxContextLength": 123,
"loraRank": 123,
"wandbConfig": {
"enabled": true,
"apiKey": "<string>",
"project": "<string>",
"entity": "<string>",
"runId": "<string>"
},
"evaluationDataset": "<string>",
"isTurbo": true,
"evalAutoCarveout": true,
"region": "REGION_UNSPECIFIED",
"nodes": 123,
"batchSize": 123,
"mtpEnabled": true,
"mtpNumDraftTokens": 123,
"mtpFreezeBaseModel": true,
"hiddenStatesGenConfig": {
"deployedModel": "<string>",
"maxWorkers": 123,
"maxTokens": 123,
"inputOffset": 123,
"inputLimit": 123,
"maxContextLen": 123,
"regenerateAssistant": true,
"outputActivations": true,
"apiKey": "<string>"
},
"metricsFileSignedUrl": "<string>",
"gradientAccumulationSteps": 123,
"learningRateWarmupSteps": 123
}
'{
"dataset": "<string>",
"name": "<string>",
"displayName": "<string>",
"createTime": "2023-11-07T05:31:56Z",
"completedTime": "2023-11-07T05:31:56Z",
"state": "JOB_STATE_UNSPECIFIED",
"status": {
"code": "OK",
"message": "<string>"
},
"createdBy": "<string>",
"outputModel": "<string>",
"baseModel": "<string>",
"warmStartFrom": "<string>",
"jinjaTemplate": "<string>",
"earlyStop": true,
"epochs": 123,
"learningRate": 123,
"maxContextLength": 123,
"loraRank": 123,
"wandbConfig": {
"enabled": true,
"apiKey": "<string>",
"project": "<string>",
"entity": "<string>",
"runId": "<string>",
"url": "<string>"
},
"evaluationDataset": "<string>",
"isTurbo": true,
"evalAutoCarveout": true,
"region": "REGION_UNSPECIFIED",
"updateTime": "2023-11-07T05:31:56Z",
"nodes": 123,
"batchSize": 123,
"mtpEnabled": true,
"mtpNumDraftTokens": 123,
"mtpFreezeBaseModel": true,
"hiddenStatesGenConfig": {
"deployedModel": "<string>",
"maxWorkers": 123,
"maxTokens": 123,
"inputOffset": 123,
"inputLimit": 123,
"maxContextLen": 123,
"regenerateAssistant": true,
"outputActivations": true,
"apiKey": "<string>"
},
"metricsFileSignedUrl": "<string>",
"gradientAccumulationSteps": 123,
"learningRateWarmupSteps": 123,
"estimatedCost": {
"currencyCode": "<string>",
"units": "<string>",
"nanos": 123
}
}Bearer authentication using your Fireworks API key. Format: Bearer <API_KEY>
The Account Id
ID of the supervised fine-tuning job, a random UUID will be generated if not specified.
The name of the dataset used for training.
The model ID to be assigned to the resulting fine-tuned model. If not specified, the job ID will be used.
The name of the base model to be fine-tuned Only one of 'base_model' or 'warm_start_from' should be specified.
The PEFT addon model in Fireworks format to be fine-tuned from Only one of 'base_model' or 'warm_start_from' should be specified.
Whether to stop training early if the validation loss does not improve.
The number of epochs to train for.
The learning rate used for training.
The maximum context length to use with the model.
The rank of the LoRA layers.
The Weights & Biases team/user account for logging training progress.
Show child attributes
Whether to enable wandb logging.
The API key for the wandb service.
The project name for the wandb service.
The entity name for the wandb service.
The run ID for the wandb service.
The name of a separate dataset to use for evaluation.
Whether to run the fine-tuning job in turbo mode.
Whether to auto-carve the dataset for eval.
The region where the fine-tuning job is located.
REGION_UNSPECIFIED, US_IOWA_1, US_VIRGINIA_1, US_ILLINOIS_1, AP_TOKYO_1, US_ARIZONA_1, US_TEXAS_1, US_ILLINOIS_2, EU_FRANKFURT_1, US_TEXAS_2, EU_ICELAND_1, EU_ICELAND_2, US_WASHINGTON_1, US_WASHINGTON_2, US_WASHINGTON_3, AP_TOKYO_2, US_CALIFORNIA_1, US_UTAH_1, US_TEXAS_3, US_GEORGIA_1, US_GEORGIA_2, US_WASHINGTON_4, US_GEORGIA_3 The number of nodes to use for the fine-tuning job.
Config for generating dataset with hidden states for training.
Show child attributes
A successful response.
The name of the dataset used for training.
JobState represents the state an asynchronous job can be in.
JOB_STATE_UNSPECIFIED, JOB_STATE_CREATING, JOB_STATE_RUNNING, JOB_STATE_COMPLETED, JOB_STATE_FAILED, JOB_STATE_CANCELLED, JOB_STATE_DELETING, JOB_STATE_WRITING_RESULTS, JOB_STATE_VALIDATING, JOB_STATE_DELETING_CLEANING_UP, JOB_STATE_PENDING, JOB_STATE_EXPIRED, JOB_STATE_RE_QUEUEING, JOB_STATE_CREATING_INPUT_DATASET, JOB_STATE_IDLE, JOB_STATE_CANCELLING, JOB_STATE_EARLY_STOPPED, JOB_STATE_PAUSED Show child attributes
The status code.
OK, CANCELLED, UNKNOWN, INVALID_ARGUMENT, DEADLINE_EXCEEDED, NOT_FOUND, ALREADY_EXISTS, PERMISSION_DENIED, UNAUTHENTICATED, RESOURCE_EXHAUSTED, FAILED_PRECONDITION, ABORTED, OUT_OF_RANGE, UNIMPLEMENTED, INTERNAL, UNAVAILABLE, DATA_LOSS A developer-facing error message in English.
The email address of the user who initiated this fine-tuning job.
The model ID to be assigned to the resulting fine-tuned model. If not specified, the job ID will be used.
The name of the base model to be fine-tuned Only one of 'base_model' or 'warm_start_from' should be specified.
The PEFT addon model in Fireworks format to be fine-tuned from Only one of 'base_model' or 'warm_start_from' should be specified.
Whether to stop training early if the validation loss does not improve.
The number of epochs to train for.
The learning rate used for training.
The maximum context length to use with the model.
The rank of the LoRA layers.
The Weights & Biases team/user account for logging training progress.
Show child attributes
Whether to enable wandb logging.
The API key for the wandb service.
The project name for the wandb service.
The entity name for the wandb service.
The run ID for the wandb service.
The URL for the wandb service.
The name of a separate dataset to use for evaluation.
Whether to run the fine-tuning job in turbo mode.
Whether to auto-carve the dataset for eval.
The region where the fine-tuning job is located.
REGION_UNSPECIFIED, US_IOWA_1, US_VIRGINIA_1, US_ILLINOIS_1, AP_TOKYO_1, US_ARIZONA_1, US_TEXAS_1, US_ILLINOIS_2, EU_FRANKFURT_1, US_TEXAS_2, EU_ICELAND_1, EU_ICELAND_2, US_WASHINGTON_1, US_WASHINGTON_2, US_WASHINGTON_3, AP_TOKYO_2, US_CALIFORNIA_1, US_UTAH_1, US_TEXAS_3, US_GEORGIA_1, US_GEORGIA_2, US_WASHINGTON_4, US_GEORGIA_3 The update time for the supervised fine-tuning job.
The number of nodes to use for the fine-tuning job.
Config for generating dataset with hidden states for training.
Show child attributes
The estimated cost of the job.
Show child attributes
The three-letter currency code defined in ISO 4217.
The whole units of the amount.
For example if currencyCode is "USD", then 1 unit is one US dollar.
Number of nano (10^-9) units of the amount.
The value must be between -999,999,999 and +999,999,999 inclusive.
If units is positive, nanos must be positive or zero.
If units is zero, nanos can be positive, zero, or negative.
If units is negative, nanos must be negative or zero.
For example $-1.75 is represented as units=-1 and nanos=-750,000,000.
Was this page helpful?