Grokking-Deep-Learning
Grokking-Deep-Learning copied to clipboard
Chapter 15 small issue in train
bs is not defined and should probably be batch_size
def train(model, input_data, target_data, batch_size=500, iterations=5):
criterion = MSELoss()
optim = SGD(parameters=model.get_parameters(), alpha=0.01)
n_batches = int(len(input_data) / batch_size)
for iter in range(iterations):
iter_loss = 0
for b_i in range(n_batches):
# padding token should stay at 0
model.weight.data[w2i['<unk>']] *= 0
input = Tensor(input_data[b_i*bs:(b_i+1)*bs], autograd=True)
target = Tensor(target_data[b_i*bs:(b_i+1)*bs], autograd=True)
pred = model.forward(input).sum(1).sigmoid()
loss = criterion.forward(pred,target)
loss.backward()
optim.step()
iter_loss += loss.data[0] / bs
sys.stdout.write("\r\tLoss:" + str(iter_loss / (b_i+1)))
print()
return model
I guess the same and it works if change bs to the batch_size
Created pr here : https://github.com/iamtrask/Grokking-Deep-Learning/pull/44