torch.nn.MSELossOptions
Mean Squared Error (MSE) loss: standard loss for regression tasks.
Measures the average squared difference between predicted and target values. Fundamental loss function for:
- Regression (predicting continuous values: prices, temperatures, distances)
- Denoising (image/audio denoising, removing noise from clean signal)
- Reconstruction (autoencoders, generative models)
- Pixel-wise predictions (depth estimation, semantic segmentation)
- Object detection (bounding box regression, coordinate prediction)
- Time series forecasting (predicting future values)
Squares the difference, making large errors disproportionately penalized (non-robust to outliers). Differentiable everywhere, enabling efficient gradient-based optimization.
Definition
export interface MSELossOptions {
/** How to reduce loss across batch (default: 'mean') */
reduction?: Reduction;
}reduction(Reduction)optional- – How to reduce loss across batch (default: 'mean')
Examples
// Simple regression: predicting house prices
const mse_loss = new torch.nn.MSELoss();
// Model predictions (continuous values)
const predictions = torch.tensor([100, 150, 200, 250], { dtype: 'float32' });
// True target values
const targets = torch.tensor([95, 155, 210, 245], { dtype: 'float32' });
// Compute loss
const loss = mse_loss.forward(predictions, targets); // scalar value
// loss = mean([(100-95)², (150-155)², (200-210)², (250-245)²])
// = mean([25, 25, 100, 25]) = 43.75// Batch regression with neural network
class RegressionNet extends torch.nn.Module {
fc1: torch.nn.Linear;
fc2: torch.nn.Linear;
fc3: torch.nn.Linear;
constructor() {
super();
this.fc1 = new torch.nn.Linear(10, 64);
this.fc2 = new torch.nn.Linear(64, 32);
this.fc3 = new torch.nn.Linear(32, 1); // Single output for regression
}
forward(x: torch.Tensor): torch.Tensor {
x = this.fc1.forward(x);
x = torch.nn.functional.relu(x);
x = this.fc2.forward(x);
x = torch.nn.functional.relu(x);
x = this.fc3.forward(x);
return x;
}
}
const model = new RegressionNet();
const mse = new torch.nn.MSELoss();
// Training batch
const batch_x = torch.randn([32, 10]);
const batch_y = torch.randn([32, 1]);
const predictions = model.forward(batch_x); // [32, 1]
const loss = mse.forward(predictions, batch_y); // scalar
// loss.backward(); // Compute gradients// Image denoising: predicting clean image from noisy input
const mse = new torch.nn.MSELoss();
// Model output (denoised prediction)
const denoised = torch.randn([1, 3, 256, 256]);
// Clean target image
const clean_image = torch.randn([1, 3, 256, 256]);
// Pixel-wise reconstruction loss
const loss = mse.forward(denoised, clean_image);
// Loss encourages denoised image to match clean image exactly// Using different reduction modes
const mse_none = new torch.nn.MSELoss('none');
const mse_mean = new torch.nn.MSELoss('mean');
const mse_sum = new torch.nn.MSELoss('sum');
const preds = torch.tensor([1, 2, 3, 4]);
const targets = torch.tensor([1, 2, 4, 4]); // Error on third element
const loss_none = mse_none.forward(preds, targets); // [0, 0, 1, 0] - per-element
const loss_mean = mse_mean.forward(preds, targets); // 0.25 - averaged
const loss_sum = mse_sum.forward(preds, targets); // 1.0 - summed