|
30 | 30 |
|
31 | 31 | To prevent tracking history (and using memory), you can also wrap the code block |
32 | 32 | in ``with torch.no_grad():``. This can be particularly helpful when evaluating a |
33 | | -model because the model may have trainable parameters with `requires_grad=True`, |
34 | | -but for which we don't need the gradients. |
| 33 | +model because the model may have trainable parameters with |
| 34 | +``requires_grad=True``, but for which we don't need the gradients. |
35 | 35 |
|
36 | 36 | There’s one more class which is very important for autograd |
37 | 37 | implementation - a ``Function``. |
|
52 | 52 | import torch |
53 | 53 |
|
54 | 54 | ############################################################### |
55 | | -# Create a tensor and set requires_grad=True to track computation with it |
| 55 | +# Create a tensor and set ``requires_grad=True`` to track computation with it |
56 | 56 | x = torch.ones(2, 2, requires_grad=True) |
57 | 57 | print(x) |
58 | 58 |
|
59 | 59 | ############################################################### |
60 | | -# Do an operation of tensor: |
| 60 | +# Do a tensor operation: |
61 | 61 | y = x + 2 |
62 | 62 | print(y) |
63 | 63 |
|
|
66 | 66 | print(y.grad_fn) |
67 | 67 |
|
68 | 68 | ############################################################### |
69 | | -# Do more operations on y |
| 69 | +# Do more operations on ``y`` |
70 | 70 | z = y * y * 3 |
71 | 71 | out = z.mean() |
72 | 72 |
|
|
86 | 86 | ############################################################### |
87 | 87 | # Gradients |
88 | 88 | # --------- |
89 | | -# Let's backprop now |
| 89 | +# Let's backprop now. |
90 | 90 | # Because ``out`` contains a single scalar, ``out.backward()`` is |
91 | 91 | # equivalent to ``out.backward(torch.tensor(1.))``. |
92 | 92 |
|
93 | 93 | out.backward() |
94 | 94 |
|
95 | 95 | ############################################################### |
96 | | -# print gradients d(out)/dx |
| 96 | +# Print gradients d(out)/dx |
97 | 97 | # |
98 | 98 |
|
99 | 99 | print(x.grad) |
|
172 | 172 | ############################################################### |
173 | 173 | # You can also stop autograd from tracking history on Tensors |
174 | 174 | # with ``.requires_grad=True`` by wrapping the code block in |
175 | | -# ``with torch.no_grad()``: |
| 175 | +# ``with torch.no_grad():`` |
176 | 176 | print(x.requires_grad) |
177 | 177 | print((x ** 2).requires_grad) |
178 | 178 |
|
|
0 commit comments