torch.optim.SGDOptions
export interface SGDOptions {
/** Learning rate (required) */
lr: number;
/** Momentum factor (default: 0) */
momentum?: number;
/** Weight decay (L2 penalty) (default: 0) */
weight_decay?: number;
/** Dampening for momentum (default: 0) */
dampening?: number;
/** Enables Nesterov momentum (default: false) */
nesterov?: boolean;
/** Maximize the objective with respect to params (default: false) */
maximize?: boolean;
/** Whether to use a differentiable learning rate (default: false) */
differentiable?: boolean;
/** Whether to use fused implementation (default: false) */
fused?: boolean;
}lr(number)- – Learning rate (required)
momentum(number)optional- – Momentum factor (default: 0)
weight_decay(number)optional- – Weight decay (L2 penalty) (default: 0)
dampening(number)optional- – Dampening for momentum (default: 0)
nesterov(boolean)optional- – Enables Nesterov momentum (default: false)
maximize(boolean)optional- – Maximize the objective with respect to params (default: false)
differentiable(boolean)optional- – Whether to use a differentiable learning rate (default: false)
fused(boolean)optional- – Whether to use fused implementation (default: false)
Options for SGD optimizer.