Hello, my need is to set different weights for each instance in a training batch when using BCEWithLogitsLoss. But I found it only supports the same weight for all instances in a batch. Is there anyway to deal with it or should we add the support ? Below is my test code
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
N, D_in, H, D_out = 64, 1000, 100, 10
x = Variable(torch.randn(N, D_in), requires_grad=True)
y = Variable(torch.randn(N, D_out), requires_grad=False)
model = TwoLayerNet(D_in, H, D_out)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(10):
y_pred = model(x)
#mask = torch.ones(y_pred.size()) # I wish I could use it like this
mask = torch.ones(D_out)
mask[t] = 0
criterion = nn.BCEWithLogitsLoss(size_average=True, weight=mask)
loss = criterion(y_pred, y)
print(t, loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()