torch.optim.AdadeltaOptions
export interface AdadeltaOptions {
/** Learning rate (default: 1.0) */
lr?: number;
/** Coefficient for running average of squared gradients (default: 0.9) */
rho?: number;
/** Term added to the denominator to improve numerical stability (default: 1e-6) */
eps?: number;
/** Weight decay (L2 penalty) (default: 0) */
weight_decay?: number;
/** Whether to use foreach implementation (default: false) */
foreach?: boolean;
/** Maximize the objective with respect to params (default: false) */
maximize?: boolean;
/** Whether to use a differentiable learning rate (default: false) */
differentiable?: boolean;
}lr(number)optional- – Learning rate (default: 1.0)
rho(number)optional- – Coefficient for running average of squared gradients (default: 0.9)
eps(number)optional- – Term added to the denominator to improve numerical stability (default: 1e-6)
weight_decay(number)optional- – Weight decay (L2 penalty) (default: 0)
foreach(boolean)optional- – Whether to use foreach implementation (default: false)
maximize(boolean)optional- – Maximize the objective with respect to params (default: false)
differentiable(boolean)optional- – Whether to use a differentiable learning rate (default: false)
Options for Adadelta optimizer.