torch.nn.functional.EmbeddingFunctionalOptions
export interface EmbeddingFunctionalOptions {
/** If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed "pad". */
padding_idx?: number;
/** If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. */
max_norm?: number;
/** The p of the p-norm to compute for the max_norm option. (default: 2.0) */
norm_type?: number;
/** If true, this will scale gradients by the inverse of frequency of the words in the mini-batch. (default: false) */
scale_grad_by_freq?: boolean;
/** If true, gradient w.r.t. weight matrix will be a sparse tensor. (default: false) */
sparse?: boolean;
}padding_idx(number)optional- – If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed "pad".
max_norm(number)optional- – If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm.
norm_type(number)optional- – The p of the p-norm to compute for the max_norm option. (default: 2.0)
scale_grad_by_freq(boolean)optional- – If true, this will scale gradients by the inverse of frequency of the words in the mini-batch. (default: false)
sparse(boolean)optional- – If true, gradient w.r.t. weight matrix will be a sparse tensor. (default: false)
Options for embedding functional operation.