File tree Expand file tree Collapse file tree
Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -239,11 +239,11 @@ def forward(self, inputs):
239239optimizer = optim .SGD (model .parameters (), lr = 0.001 )
240240
241241for epoch in range (10 ):
242- total_loss = torch . Tensor ([ 0 ])
242+ total_loss = 0
243243 for context , target in trigrams :
244244
245245 # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
246- # into integer indices and wrap them in variables )
246+ # into integer indices and wrap them in tensors )
247247 context_idxs = torch .tensor ([word_to_ix [w ] for w in context ], dtype = torch .long )
248248
249249 # Step 2. Recall that torch *accumulates* gradients. Before passing in a
@@ -256,7 +256,7 @@ def forward(self, inputs):
256256 log_probs = model (context_idxs )
257257
258258 # Step 4. Compute your loss function. (Again, Torch wants the target
259- # word wrapped in a variable )
259+ # word wrapped in a tensor )
260260 loss = loss_function (log_probs , torch .tensor ([word_to_ix [target ]], dtype = torch .long ))
261261
262262 # Step 5. Do the backward pass and update the gradient
You can’t perform that action at this time.
0 commit comments