torch.optim.AdagradOptions
export interface AdagradOptions {
/** Learning rate (default: 1e-2) */
lr?: number;
/** Learning rate decay (default: 0) */
lr_decay?: number;
/** Weight decay (L2 penalty) (default: 0) */
weight_decay?: number;
/** Initial value for accumulator (default: 0) */
initial_accumulator_value?: number;
/** Term added to the denominator to improve numerical stability (default: 1e-10) */
eps?: number;
/** Whether to use foreach implementation (default: false) */
foreach?: boolean;
/** Maximize the objective with respect to params (default: false) */
maximize?: boolean;
/** Whether to use a differentiable learning rate (default: false) */
differentiable?: boolean;
}lr(number)optional- – Learning rate (default: 1e-2)
lr_decay(number)optional- – Learning rate decay (default: 0)
weight_decay(number)optional- – Weight decay (L2 penalty) (default: 0)
initial_accumulator_value(number)optional- – Initial value for accumulator (default: 0)
eps(number)optional- – Term added to the denominator to improve numerical stability (default: 1e-10)
foreach(boolean)optional- – Whether to use foreach implementation (default: false)
maximize(boolean)optional- – Maximize the objective with respect to params (default: false)
differentiable(boolean)optional- – Whether to use a differentiable learning rate (default: false)
Options for Adagrad optimizer.