Skip to main content
torch.js has not been released yet.
torch.js logotorch.js logotorch.js
PlaygroundContact
Login
Documentation
IntroductionType SafetyTensor ExpressionsTensor IndexingEinsumEinopsAutogradTraining a ModelProfiling & MemoryPyTorch MigrationBest PracticesRuntimesPerformancePyTorch CompatibilityBenchmarksDType Coverage
adaptive_avg_pool1dadaptive_avg_pool2dadaptive_avg_pool3dadaptive_max_pool1dadaptive_max_pool1d_with_indicesadaptive_max_pool2dadaptive_max_pool2d_with_indicesadaptive_max_pool3dadaptive_max_pool3d_with_indicesAdaptiveMaxPoolFunctionalOptionsaffine_gridAffineGridFunctionalOptionsalpha_dropoutAlphaDropoutFunctionalOptionsavg_pool1davg_pool2davg_pool3dAvgPool1dFunctionalOptionsAvgPool2dFunctionalOptionsAvgPool3dFunctionalOptionsbatch_normBatchNormFunctionalOptionsbinary_cross_entropybinary_cross_entropy_with_logitsBinaryCrossEntropyFunctionalOptionsBinaryCrossEntropyWithLogitsFunctionalOptionsCeluFunctionalOptionschannel_shuffleconv_transpose1dconv_transpose2dconv_transpose3dconv1dConv1dFunctionalOptionsconv2dConv2dFunctionalOptionsconv3dConv3dFunctionalOptionsConvTranspose1dFunctionalOptionsConvTranspose2dFunctionalOptionsConvTranspose3dFunctionalOptionscosine_embedding_losscosine_similarityCosineEmbeddingLossFunctionalOptionsCosineSimilarityFunctionalOptionscross_entropyCrossEntropyFunctionalOptionsctc_lossCTCLossOptionsdropoutdropout1ddropout2ddropout3dDropoutFunctionalOptionsEluFunctionalOptionsembeddingembedding_bagEmbeddingBagFunctionalOptionsEmbeddingFunctionalOptionsfeature_alpha_dropoutfoldFoldFunctionalOptionsfractional_max_pool2dfractional_max_pool2d_with_indicesfractional_max_pool3dfractional_max_pool3d_with_indicesFractionalMaxPoolFunctionalOptionsgaussian_nll_lossGluFunctionalOptionsgrid_sampleGridSampleFunctionalOptionsgroup_normgrouped_mmGroupedMMFunctionalOptionsGroupNormFunctionalOptionsHardshrinkFunctionalOptionsHardtanhFunctionalOptionshinge_embedding_lossHingeEmbeddingLossFunctionalOptionshuber_lossHuberLossFunctionalOptionsinstance_normInstanceNormFunctionalOptionsinterpolateInterpolateFunctionalOptionskl_divKlDivFunctionalOptionsKLDivOptionsl1_lossL1LossFunctionalOptionslayer_normLayerNormFunctionalOptionsLeakyReluFunctionalOptionslinearlocal_response_normLocalResponseNormFunctionalOptionslog_softmaxlp_pool1dlp_pool2dlp_pool3dLPPoolFunctionalOptionsmargin_ranking_lossMarginRankingLossFunctionalOptionsmax_pool1dmax_pool1d_with_indicesmax_pool2dmax_pool2d_with_indicesmax_pool3dmax_pool3d_with_indicesmax_unpool1dmax_unpool2dmax_unpool3dMaxPool1dFunctionalOptionsMaxPool2dFunctionalOptionsMaxPool3dFunctionalOptionsMaxUnpoolFunctionalOptionsmse_lossMseLossFunctionalOptionsmulti_head_attention_forwardmulti_margin_lossMultiHeadAttentionFunctionalOptionsmultilabel_margin_lossmultilabel_soft_margin_lossnll_lossNllLossFunctionalOptionsnormalizeNormalizeFunctionalOptionsone_hotpadPadFunctionalOptionspairwise_distancePairwiseDistanceFunctionalOptionspdistPdistFunctionalOptionspixel_shufflepixel_unshufflepoisson_nll_lossPoolWithIndicesResultReluFunctionalOptionsrms_normRmsNormFunctionalOptionsRreluFunctionalOptionsscaled_grouped_mmscaled_mmScaledDotProductAttentionFunctionalOptionsScaledGroupedMMFunctionalOptionsScaledMMFunctionalOptionssmooth_l1_lossSmoothL1LossFunctionalOptionssoft_margin_lossSoftMarginLossFunctionalOptionsSoftmaxOptionsSoftminFunctionalOptionsSoftplusFunctionalOptionsSoftshrinkFunctionalOptionstriplet_margin_losstriplet_margin_with_distance_lossTripletMarginLossFunctionalOptionsunfoldUnfoldFunctionalOptionsupsampleupsample_bilinearupsample_nearestUpsampleBilinearOptionsUpsampleNearestOptionsUpsampleOptions
ActivationOptionsAdaptiveAvgPool1dAdaptiveAvgPool2dAdaptiveAvgPool3dAdaptiveLogSoftmaxOptionsAdaptiveLogSoftmaxWithLossAdaptiveMaxPool1dAdaptiveMaxPool1dOptionsAdaptiveMaxPool2dAdaptiveMaxPool2dOptionsAdaptiveMaxPool3dAdaptiveMaxPool3dOptionsadd_moduleAlphaDropoutappendappendapplyAvgPool1dAvgPool1dOptionsAvgPool2dAvgPool2dOptionsAvgPool3dAvgPool3dOptionsBackwardHookBackwardPreHookBatchNorm1dBatchNorm2dBatchNorm3dBatchNormOptionsBCELossBCEWithLogitsLossBilinearBilinearOptionsBufferBufferOptionsBufferRegistrationHookbufferscallCELUCELUOptionsChannelShufflechildrenCircularPad1dCircularPad2dCircularPad3dclearConstantPad1dConstantPad2dConstantPad3dConv1dConv2dConv3dConvOptionsConvTranspose1dConvTranspose2dConvTranspose3dConvTransposeOptionsCosineEmbeddingLossCosineEmbeddingLossOptionsCosineSimilarityCosineSimilarityOptionscreatecreateCrossEntropyLossCTCLossdecodedecodedeleteDropoutDropout1dDropout2dDropout3dDropoutOptionsELUELUOptionsEmbeddingEmbeddingBagEmbeddingBagForwardOptionsEmbeddingBagFromPretrainedOptionsEmbeddingBagOptionsEmbeddingFromPretrainedOptionsEmbeddingOptionsencodeencodeentriesentriesevalextendFeatureAlphaDropoutFlattenFlattenOptionsFoldFoldOptionsforwardforwardforwardforwardforwardforwardforwardforwardforwardforwardforwardforwardforward_with_targetForwardHookForwardPreHookFractionalMaxPool2dFractionalMaxPool3dFractionalMaxPoolOptionsfrom_pretrainedfrom_pretrainedGaussianNLLLossGELUGELUOptionsgenerate_square_subsequent_maskgetgetgetgetgetget_bufferget_parameterget_submoduleGLUGLUOptionsGroupNormGroupNormOptionsGRUGRUCellHardshrinkHardshrinkOptionsHardsigmoidHardswishHardtanhHardtanhOptionshashasHingeEmbeddingLossHingeEmbeddingLossOptionsHuberLossHuberLossOptionsIdentityInstanceNorm1dInstanceNorm2dInstanceNorm3dInstanceNormOptionsis_uninitialized_bufferis_uninitialized_parameteriterator]iterator]iterator]iterator]keyskeysKLDivLossL1LossL1LossOptionsLayerNormLayerNormOptionsLazyBatchNorm1dLazyBatchNorm2dLazyBatchNorm3dLazyConv1dLazyConv2dLazyConv3dLazyConvOptionsLazyConvTranspose1dLazyConvTranspose2dLazyConvTranspose3dLazyConvTransposeOptionsLazyInstanceNorm1dLazyInstanceNorm2dLazyInstanceNorm3dLazyLinearLeakyReLULeakyReLUOptionsLinearLinearOptionsload_state_dictload_state_dictLocalResponseNormLocalResponseNormOptionslog_probLogSigmoidLogSoftmaxLogSoftmaxOptionsLPPool1dLPPool1dOptionsLPPool2dLPPool2dOptionsLPPool3dLPPool3dOptionsLSTMLSTMCellLSTMCellOptionsMarginRankingLossMarginRankingLossOptionsmaterializematerializematerialize_uninitializedmaterialize_uninitializedMaxPool1dMaxPool1dOptionsMaxPool2dMaxPool2dOptionsMaxPool3dMaxPool3dOptionsMaxUnpool1dMaxUnpool1dOptionsMaxUnpool2dMaxUnpool2dOptionsMaxUnpool3dMaxUnpool3dOptionsMishModuleModuleBuffersModuleChildrenModuleDictModuleDictOptionsModuleListModuleListOptionsModuleParametersModuleRegistrationHookmodulesMSELossMSELossOptionsmultihead_attnMultiheadAttentionMultiheadAttentionOptionsMultiheadAttnOptionsMultiLabelMarginLossMultiLabelMarginLossOptionsMultiLabelSoftMarginLossMultiMarginLossnamed_buffersnamed_childrennamed_modulesnamed_parametersNamedModulesOptionsNamedRecurseOptionsNLLLossnum_parametersNumParametersOptionsPairwiseDistancePairwiseDistanceOptionsParameterParameterDictParameterDictOptionsParameterListParameterListOptionsParameterOptionsParameterRegistrationHookparametersPixelShufflePixelUnshufflePoissonNLLLosspoppopPReLUPReLUOptionsRecurseOptionsReflectionPad1dReflectionPad2dReflectionPad3dregister_backward_hookregister_bufferregister_forward_hookregister_forward_pre_hookregister_full_backward_hookregister_full_backward_pre_hookregister_module_backward_hookregister_module_buffer_registration_hookregister_module_forward_hookregister_module_forward_pre_hookregister_module_full_backward_hookregister_module_full_backward_pre_hookregister_module_module_registration_hookregister_module_parameter_registration_hookregister_parameterReLUReLU6RemovableHandleremoveReplicationPad1dReplicationPad2dReplicationPad3dRMSNormRMSNormOptionsRNNRNNBaseRNNBaseOptionsRNNCellRNNCellOptionsRReLURReLUOptionsrunrunSELUSequentialsetsetsetSigmoidSiLUSmoothL1LossSmoothL1LossOptionsSoftMarginLossSoftMarginLossOptionsSoftmaxSoftmax2dSoftmaxOptionsSoftminSoftminOptionsSoftplusSoftplusOptionsSoftshrinkSoftshrinkOptionsSoftsignstate_dictstate_dictStateDictOptionsstepSyncBatchNormTanhTanhshrinkThresholdThresholdOptionstotototrainTrainOptionsTransformerTransformerDecoderTransformerDecoderDecodeOptionsTransformerDecoderLayerTransformerDecoderLayerDecodeOptionsTransformerDecoderLayerOptionsTransformerDecoderOptionsTransformerEncoderTransformerEncoderEncodeOptionsTransformerEncoderLayerTransformerEncoderLayerEncodeOptionsTransformerEncoderLayerOptionsTransformerEncoderOptionsTransformerOptionsTransformerRunOptionsTripletMarginLossTripletMarginWithDistanceLossUnflattenUnfoldUnfoldOptionsUninitializedBufferUninitializedOptionsUninitializedParameterupdateUpsampleUpsamplingBilinear2dUpsamplingNearest2dvaluesvalueszero_gradZeroPad1dZeroPad2dZeroPad3d
absacosacoshAdaptivePool1dShapeAdaptivePool2dShapeaddaddbmmAddbmmOptionsaddcdivAddcdivOptionsaddcmulAddcmulOptionsaddmmAddmmOptionsaddmvAddmvOptionsaddrAddrOptionsadjointallallcloseAllcloseOptionsAlphaBetaOptionsamaxaminaminmaxAminmaxOptionsangleanyapplyOutarangeare_deterministic_algorithms_enabledargmaxargminargsortargwhereas_stridedas_tensorasinasinhAssertNoShapeErrorAssertNotErrorAsStridedOptionsAtat_error_index_out_of_boundsatanatan2atanhatleast_1datleast_2datleast_3dAtShapeautocast_decrement_nestingautocast_increment_nestingautograd_gradient_mismatch_errorautograd_not_registered_errorAutogradConfigAutogradDeviceAutogradDTypeAutogradEntryAutogradHandleAutogradHandleImplAxesRecordBackwardFnbaddbmmBaddbmmOptionsbartlett_windowBaseKernelConfigbatch_dimensions_do_not_match_errorbernoulliBernoulliOptionsBinaryBackwardFnBinaryBroadcastResultBinaryDTypeBinaryKernelConfigCPUBinaryKernelCPUBinaryOpConfigBinaryOpNamesBinaryOpSchemaBinaryOptionsbincountBincountOptionsbitwise_andbitwise_left_shiftbitwise_notbitwise_orbitwise_right_shiftbitwise_xorblackman_windowblock_diagbmmBooleanDTypeRulebroadcast_error_incompatible_dimensionsbroadcast_shapesbroadcast_tensorsbroadcast_toBroadcastShapeBroadcastShapeRulebroadcastShapesbucketizeBucketizeOptionsBufferUsagebuildEinopsErrorbuildErrorMessagecanBroadcastTocartesian_prodcatCatOptionsCatShapeCauchyOptionscdistCdistOptionsceilceluCeluFunctionalOptionschain_matmulCheckShapeErrorCholeskyShapechunkchunk_error_dim_out_of_rangeChunkOptionsclampClampOptionsclear_autocast_cacheclearEinopsCacheclearEinsumCacheclonecolumn_stackcombinationsCombinationsOptionscompiled_with_cxx11_abicomplexconjconj_physicalcontiguousConv1dShapeConv2dShapeConv3dShapeConvTranspose2dShapecopysigncorrcoefcoscoshcount_nonzeroCountNonzeroOptionscovcoverage_reportcoverageReportCoverageReportCovOptionsCPUForwardFnCPUKernelConfigCPUKernelEntryCPUOnlyResultCPUTensorDatacreateCumExtremeResultcreateTorchCreationOpSchemaCumExtremeResultcummaxcummincumprodCumShapecumsumcumulative_trapezoidCumulativeOptionsCumulativeOptionsWithDimdeg2raddetachDeterministicOptionsDetShapeDevicedevice_error_requiresDeviceBufferDeviceCapabilitiesDeviceCheckedResultDeviceConfigDeviceContextDeviceEntryDeviceHandleDeviceInputDeviceOptionsDeviceRegistryDeviceTypediagdiag_embedDiagEmbedOptionsdiagflatDiagflatOptionsDiagFlatOptionsdiagonal_scatterDiagonalOptionsDiagonalScatterOptionsDiagOptionsDiagShapediffDiffOptionsdigammadimension_error_out_of_rangeDispatchConfigdistDistOptionsdivdotDotShapeRuleDoubleDoubleDimdropoutDropoutFunctionalOptionsdsplitdstackDTypedtype_already_registered_errordtype_components_mismatch_errordtype_not_found_errorDTypeComponentsDTypeConfigDTypeCoverageReportDTypeDisplayConfigDTypeEntryDTypeHandleDTypeHandleImplDTypeInfoDTypeRegistryDTypeRuleDTypeSerializationConfigDynamicShapeEigShapeeinops_error_ambiguous_decompositioneinops_error_anonymous_in_outputeinops_error_dimension_mismatcheinops_error_invalid_patterneinops_error_reduce_undefined_outputeinops_error_repeat_missing_sizeeinops_error_undefined_axiseinsumeinsum_error_dimension_mismatcheinsum_error_index_out_of_rangeeinsum_error_invalid_equationeinsum_error_invalid_sublist_elementeinsum_error_operand_count_mismatcheinsum_error_subscript_rank_mismatcheinsum_error_unknown_output_indexEinsumOptionsEinsumOutputShapeEllipsiseluelu_EluFunctionalOptionsembedding_bag_error_requires_2d_inputemptyempty_cacheempty_likeeqequalerferfcerfinvexpexp2expandexpand_asexpand_error_incompatibleExpandShapeexpm1ExponentialOptionseyeEyeOptionsfftFFTOptionsfindKernelWithPredicatefindSimilarPatternsflattenFlattenOptionsFlattenShapeflipflip_error_dim_out_of_rangefliplrFlipShapeflipudfloat_powerFloatDTypeRulefloorfloor_dividefmaxfminfmodformatEquationErrorformatShapefracfrexpfrombufferfullfull_likefunction_already_registered_errorFunctionConfigFunctionEntryFunctionHandlegathergather_error_dim_out_of_rangeGatherShapegcdgegeluGeometricOptionsget_autocast_cpu_dtypeget_autocast_gpu_dtypeget_autocast_ipu_dtypeget_autocast_xla_dtypeget_default_deviceget_default_dtypeget_deterministic_debug_modeget_device_configget_device_contextget_device_moduleget_dtype_infoget_file_pathget_float32_matmul_precisionget_num_interop_threadsget_num_threadsget_op_infoget_printoptionsget_real_dtypeget_rng_stategetAutogradgetDTypegetEinopsCacheSizegetEinsumCacheSizegetFunctiongetKernelgetMethodgetOpInfoGetOpKindGetOpSchemagetScalarKernelgluGluFunctionalOptionsGradContextGradFnGradientsForgtHalfHalfDimhamming_windowhann_windowhardshrinkhardsigmoidhardswishhardtanhhardtanh_HardtanhFunctionalOptionshas_autogradhas_devicehas_dtypehas_kernelhasAutogradhasDTypehasFunctionhasKernelhasMethodhasScalarKernelHasShapeErrorheavisidehistcHistcOptionshistogramHistogramOptionsHistogramResulthsplithstackhypoti0IdentityShapeifftimagindex_addindex_copyindex_fillindex_putindex_reduceindex_selectindex_select_error_dim_out_of_rangeIndexPutOptionsIndexSelectShapeIndexSpecIndicesOptionsIndicesSpecinitialize_deviceInputsForInsertDiminvalid_config_errorinverseInverseShapeirfftis_anomaly_check_nan_enabledis_anomaly_enabledis_autocast_cache_enabledis_autocast_cpu_enabledis_autocast_ipu_enabledis_autocast_xla_enabledis_complexis_complex_dtypeis_cpu_only_modeis_deterministic_algorithms_warn_only_enabledis_floating_pointis_floating_point_dtypeis_inference_mode_enabledis_nonzerois_tensoris_warn_always_enabledis_webgpu_availableIs2DIsAtLeast1DIsBinaryOpIsBinaryOpNameiscloseIscloseOptionsisfiniteisinisinfisnanisneginfisposinfisrealIsReductionOpIsReductionOpNameIsRegistryErrorIsShapeErroristftISTFTOptionsIsUnaryOpIsUnaryOpNameitem_error_not_scalarItemResultkaiser_windowKaiserWindowOptionskernel_not_registered_errorkernel_signature_mismatch_errorKernelConfigKernelConfigWebGPUKernelEntryKernelHandleKernelInfoKernelPredicateKernelRegistryKernelWebGPUkronkthvalueKthvalueOptionslcmldexpleleaky_reluleaky_relu_LeakyReluFunctionalOptionslerplevenshteinDistancelgammalinalg_error_not_square_matrixlinalg_error_requires_2dlinalg_error_requires_at_least_2dlinearlinspacelist_custom_deviceslist_custom_dtypeslist_deviceslist_dtypeslist_functionslist_kernelslist_methodslist_opslistCustomDTypeslistDTypeslistFunctionslistKernelsListKernelsOptionslistMethodslistOpsListOpsOptionsloglog_softmaxlog10log1plog2logaddexplogaddexp2logcumsumexplogical_andlogical_notlogical_orlogical_xorLogitOptionsLogNormalOptionsLogOptionslogsigmoidlogspacelogsumexpLogsumexpOptionsltLUShapeLuSolveOptionsmasked_fillmasked_selectmasked_select_asyncMaskSpecmatmulmatmul_error_inner_dimensions_do_not_matchMatmul2DShapeMatmulShapeMatmulShapeRuleMatrixTransposeShapemaxmaximummeanmedianmemory_statsmemory_summarymeshgridmethod_already_registered_errormethod_dtype_not_supported_errorMethodConfigMethodEntryMethodHandleminminimummishmmMMShapeRulemodemovedimmsortmulmultinomialmultinomial_asyncMultinomialAsyncOptionsMultinomialOptionsMultiplyBymvMVShapeRulenan_to_numnanmeannanmediannanquantileNanReductionOptionsnansumNanToNumOptionsnarrownarrow_copynarrow_error_length_exceeds_boundsnarrow_error_start_out_of_boundsNarrowShapeneneedsBroadcastnegNegativeDimnextafternonzeroNonzeroOptionsnormnormalNormalOptionsNormOptionsnumelonesones_likeop_kind_mismatch_errorop_not_found_errorOpCoverageEntryOpInfoOpKindOpNameOpSchemaOpSchemasouterOuterShapepackPackShapepermutepermute_error_dimension_count_mismatchPermuteShapepoissonpolarPool1dShapePool2dShapePool3dShapepositivepowpreluPrintOptionsprodprofiler_allow_cudagraph_cupti_lazy_reinit_cuda12promote_typesPromoteDTypeRulePutOptionsquantileQuantileOptionsrad2degrandrand_likerandintrandint_likeRandintLikeOptionsRandintOptionsrandnrandn_likeRandomLikeOptionsRandomOptionsrandpermRangeSpecRankravelrealrearrangeRearrangeOptionsRearrangeShapereciprocalreduceReduceOperationReduceOptionsReduceShapeReductionKernelConfigCPUReductionKernelCPUReductionOpNamesReductionOpSchemaReductionOptionsReductionShapeRuleregister_backwardregister_deviceregister_dtyperegister_forwardregister_functionregister_methodregister_scalar_forwardregisterAutogradRegisterBackwardOptionsregisterBinaryOpregisterDTypeRegisterDTypeOptionsRegisteredDTyperegisterFunctionRegisterFunctionOptionsregisterKernelRegisterKernelOptionsregisterMethodRegisterMethodOptionsregisterScalarKernelregisterUnaryOpregistration_failed_errorrelurelu_relu6ReluFunctionalOptionsremainderRemoveDimrepeatrepeat_interleaveRepeatInterleaveOptionsRepeatOptionsRepeatShapeReplaceDimrequireWebGPUreset_peak_memory_statsreshapeReshapeShaperesult_typerfftrollRollOptionsrot90Rot90Optionsroundrrelurrelu_RreluFunctionalOptionsrsqrtSafeExpandShapeSameDTypeRuleSameShapeRuleSaveForBackwardScalarCPUForwardFnScalarCPUKernelConfigScalarKernelEntryScalarKernelHandleScalarWebGPUKernelConfigScaleDimscatterscatter_addscatter_add_scatter_error_dim_out_of_rangescatter_reducescatter_reduce_ScatterReduceOptionsScatterShapesearchsortedSearchSortedOptionsselectselect_error_index_out_of_boundsselect_scatterSelectShapeseluset_default_deviceset_default_tensor_typeset_deterministic_debug_modeset_float32_matmul_precisionset_printoptionsset_warn_alwaysSetupContextFnShapeShapeCheckedResultShapedTensorShapeErrorMessageShapeOpSchemaShapeRulesigmoidsignsignbitsilusinsincsinhSizeOptionsslice_error_out_of_boundsslice_scatterSliceOptionsSliceScatterOptionsSliceShapeSliceSpecsoftmaxsoftmax_error_dim_out_of_rangeSoftmaxShapesoftminSoftminFunctionalOptionssoftplusSoftplusFunctionalOptionssoftshrinksoftsignsortSortOptionssplitsplit_error_dim_out_of_rangeSplitOptionssqrtsquaresqueezeSqueezeOptionsSqueezeShapestackStackOptionsStackShapestdstd_meanStdVarMeanOptionsStdVarOptionsstftSTFTOptionsStrideOptionssubSublistSublistElementSubscriptIndexsumSVDShapeswapaxessym_floatsym_intsym_notttaketake_along_dimTakeAlongDimOptionstantanhtanhshrinktensortensor_splitTensorCreatorTensorDatatensordotTensordotOptionsTensorLikeTensorMetaTensorOptionsTensorStoragethresholdthreshold_tileTileShapeToOptionstopkTopkOptionsTorchtraceTraceShapetransposetranspose_dims_error_out_of_rangetranspose_error_requires_2d_tensorTransposeDimsShapeTransposeDimsShapeCheckedTransposeShapetrapezoidTrapezoidOptionsTriangularOptionstriltril_indicesTriOptionsTripletriutriu_indicestrue_dividetruncTupleOfLengthTypedArrayTypedArrayForTypedStorageTypeOptionsUnaryBackwardFnUnaryDTypeUnaryKernelConfigCPUUnaryKernelCPUUnaryOpConfigUnaryOpFnUnaryOpNamesUnaryOpParamsUnaryOpSchemaUnaryOptionsunbindunbind_error_dim_out_of_rangeUnbindOptionsunflattenUniformOptionsuniqueunique_consecutiveUniqueConsecutiveOptionsUniqueOptionsunpackUnpackShapeunravel_indexunregister_deviceunsqueezeUnsqueezeOptionsUnsqueezeShapeuse_deterministic_algorithmsValidateBatchedSquareMatrixValidateChunkDimValidatedEinsumShapevalidateDeviceValidateDeviceValidatedRearrangeShapeValidatedReduceShapeValidatedRepeatShapevalidateDTypeValidateEinsumValidateOperandCountValidateRanksValidateScalarValidateSplitDimValidateSquareMatrixValidateUnbindDimValueOptionsvar_var_meanvdotviewview_as_complexview_as_realvmapvsplitvstackWebGPUKernelConfigWebGPUOnlyResultWebGPUTensorDatawhereWindowOptionsxlogyzeroszeros_like
torch.js· 2026
LegalTerms of UsePrivacy Policy
/
/
  1. docs
  2. torch.js
  3. torch
  4. nn
  5. functional
  6. group_norm

torch.nn.functional.group_norm

function group_norm<S extends Shape, D extends DType = DType, Dev extends DeviceType = DeviceType>(input: Tensor<S, D, Dev>, num_groups: number, options?: GroupNormFunctionalOptions): Tensor<S, D, Dev>function group_norm<S extends Shape, D extends DType = DType, Dev extends DeviceType = DeviceType>(input: Tensor<S, D, Dev>, num_groups: number, weight: Tensor | undefined, bias: Tensor | undefined, eps: number, options?: GroupNormFunctionalOptions): Tensor<S, D, Dev>

Group Normalization: divides channels into groups and normalizes each group independently.

Applies group normalization where channels are split into num_groups disjoint groups, and normalization is computed independently over each group. A middle ground between layer normalization (normalizes all features together) and instance normalization (one group per channel). Particularly effective for tasks with small batch sizes or variable batch sizes. Essential for:

  • Small batch size training (batch norm unstable with small batches)
  • Object detection and semantic segmentation (standard in ResNets)
  • Video understanding (temporal batch dimension variability)
  • Adversarial training (batch norm statistics problematic)
  • Fine-grained image recognition with small batches
  • Models where batch normalization causes instability
  • Distributed training with per-GPU batch size = 1

How Group Norm works:

  1. Split C channels into G groups of size C/G each
  2. For each sample in batch: compute mean and variance per group
  3. Normalize each group independently (z-score)
  4. Apply learnable affine transform (weight and bias)

Group Norm variants (by num_groups):

  • num_groups = 1: Equivalent to LayerNorm (one group = all channels)
  • num_groups = C (channels): Equivalent to InstanceNorm (one group per channel)
  • 1 < num_groups < C: Balance between group norm and instance norm
  • Typical: num_groups = 32 for ResNet-style architectures

When to use GroupNorm:

  • Small batch size (batch_size < 8)
  • Variable batch sizes in training
  • Replacing batch norm for stability
  • Object detection/segmentation networks
  • When batch statistics are unreliable
  • Video/sequence models with temporal variability

Comparison with alternatives:

  • Batch Norm: Normalizes over batch; requires large batch for stability
  • Layer Norm: Single group over all channels; computationally efficient
  • Instance Norm: One group per channel; too aggressive for some tasks
  • Group Norm: Middle ground; best empirically for small batches
textGroupNorm(x)=gammafracx−mugsqrtsigmag2+epsilon+betatextwheremug,sigmagtextarecomputedpergroupgtextforeachsampletextChannelssplitintoGtextgroups:[C1,C2,...,CG]textwhereCi=C/GtextSpecialcases:G=1rightarrowtextLayerNorm,,G=CrightarrowtextInstanceNorm\begin{aligned} \\text{GroupNorm}(x) = \\gamma \\frac{x - \\mu_g}{\\sqrt{\\sigma_g^2 + \\epsilon}} + \\beta \\ \\text{where } \\mu_g, \\sigma_g \\text{ are computed per group } g \\text{ for each sample} \\ \\text{Channels split into } G \\text{ groups: } [C_1, C_2, ..., C_G] \\text{ where } C_i = C / G \\ \\text{Special cases: } G=1 \\rightarrow \\text{LayerNorm}, \\, G=C \\rightarrow \\text{InstanceNorm} \end{aligned}textGroupNorm(x)=gammafracx−mug​sqrtsigmag2​+epsilon+betatextwheremug​,sigmag​textarecomputedpergroupgtextforeachsampletextChannelssplitintoGtextgroups:[C1​,C2​,...,CG​]textwhereCi​=C/GtextSpecialcases:G=1rightarrowtextLayerNorm,,G=CrightarrowtextInstanceNorm​
  • Batch-size independent: Statistics computed per group, not per batch
  • Small batch friendly: Works well with batch_size=1 (unlike BatchNorm)
  • Flexible grouping: num_groups controls group size (balance between extremes)
  • Learnable parameters: weight and bias trained like LayerNorm
  • ResNet standard: Standard in modern object detection and segmentation models
  • Per-sample normalization: Each sample has independent group statistics
  • Channel divisibility: num_groups must divide num_channels exactly
  • num_groups constraint: num_groups must divide num_channels (e.g., 256 channels → 32 groups OK)
  • Minimum 2D: Requires at least 2D input (batch, channels required)
  • Small groups: Very small groups (1-2 channels) may hurt performance
  • Different from batch norm: Statistics computed differently; direct replacement may need tuning

Parameters

inputTensor<S, D, Dev>
Input tensor [batch, channels, spatial...]. Minimum 2D required. Example: [N, C, H, W] for images, [N, C, length] for 1D signals
num_groupsnumber
Number of groups to divide channels into. Must divide num_channels. Example: num_groups=32 for ResNet with 256 channels → 8 channels per group
optionsGroupNormFunctionalOptionsoptional

Returns

Tensor<S, D, Dev>– Normalized tensor with same shape as input

Examples

// Standard GroupNorm with 32 groups (ResNet-style)
const batch = 4, channels = 256, height = 28, width = 28;
const x = torch.randn([batch, channels, height, width]);  // [4, 256, 28, 28]
const normalized = torch.nn.functional.group_norm(x, 32);  // 32 groups, 8 channels/group
// Each sample has 32 independent normalizations over spatial dims + channel groups
// Object detection with small batch size (GroupNorm standard)
class ResNetBackbone extends torch.nn.Module {
  private layer1: torch.nn.Sequential;
  private gn1: torch.nn.Parameter;

  constructor() {
    super();
    this.layer1 = conv_block_3x3(64);  // Conv layer
    this.gn1 = torch.ones([64]);  // GroupNorm weight for 64 channels
  }

  forward(x: torch.Tensor): torch.Tensor {
    x = this.layer1.forward(x);  // [N, 64, H, W]
    x = torch.nn.functional.group_norm(x, 32, this.gn1);  // 2 channels per group
    return x;
  }
}
// Small batch training: where batch norm fails
const batch_size = 2;  // Too small for reliable batch norm
const x = torch.randn([batch_size, 512, 14, 14]);

// Batch norm would be unstable; use group norm instead
const weight = torch.ones([512]);
const bias = torch.zeros([512]);
const normalized = torch.nn.functional.group_norm(x, 32, weight, bias);
// Stable normalization even with batch_size=2
// Varying batch size in distributed training
const batch_per_gpu = 1;  // Each GPU has batch=1 (small!)
const x = torch.randn([batch_per_gpu, 64, 32, 32]);

// GroupNorm handles this gracefully (BatchNorm would fail)
const output = torch.nn.functional.group_norm(x, 8);  // 8 groups for 64 channels
// Per-group statistics computed independently per sample
// Comparing num_groups effect
const x = torch.randn([4, 256, 28, 28]);
const weight = torch.ones([256]);

const g1 = torch.nn.functional.group_norm(x, 1, weight);    // LayerNorm (single group)
const g32 = torch.nn.functional.group_norm(x, 32, weight);  // 8 channels/group
const g256 = torch.nn.functional.group_norm(x, 256, weight); // InstanceNorm (1 channel/group)

// Different num_groups produce different normalization statistics
// g32 is typically best for small batches

See Also

  • PyTorch torch.nn.functional.group_norm
  • torch.nn.functional.layer_norm - Single group (all channels together)
  • torch.nn.functional.instance_norm - Multiple groups (one per channel)
  • torch.nn.functional.batch_norm - Normalizes over batch (requires large batches)
  • torch.nn.GroupNorm - Module version with automatic parameter management
Previous
GridSampleFunctionalOptions
Next
grouped_mm