torch.optim.SparseAdamOptions
export interface SparseAdamOptions {
/** Learning rate (default: 1e-3) */
lr?: number;
/** Coefficients for computing running averages of gradient and its square (default: [0.9, 0.999]) */
betas?: [number, number];
/** Term added to the denominator to improve numerical stability (default: 1e-8) */
eps?: number;
/** Maximize the objective with respect to params (default: false) */
maximize?: boolean;
}lr(number)optional- – Learning rate (default: 1e-3)
betas([number, number])optional- – Coefficients for computing running averages of gradient and its square (default: [0.9, 0.999])
eps(number)optional- – Term added to the denominator to improve numerical stability (default: 1e-8)
maximize(boolean)optional- – Maximize the objective with respect to params (default: false)
Options for SparseAdam optimizer.