4444# - ``PIL``, ``PIL.Image``, ``matplotlib.pyplot`` (load and display
4545# images)
4646# - ``torchvision.transforms`` (transform PIL images into tensors)
47- # - ``torchvision.models`` (train or load pre-trained models)
47+ # - ``torchvision.models`` (train or load pretrained models)
4848# - ``copy`` (to deep copy the models; system package)
4949
5050from __future__ import print_function
8484# torch library are trained with tensor values ranging from 0 to 1. If you
8585# try to feed the networks with 0 to 255 tensor images, then the activated
8686# feature maps will be unable to sense the intended content and style.
87- # However, pre-trained networks from the Caffe library are trained with 0
87+ # However, pretrained networks from the Caffe library are trained with 0
8888# to 255 tensor images.
8989#
9090#
9696# with name ``images`` in your current working directory.
9797
9898# desired size of the output image
99- imsize = 512 if torch .cuda .is_available () else 128 # use small size if no gpu
99+ imsize = 512 if torch .cuda .is_available () else 128 # use small size if no GPU
100100
101101loader = transforms .Compose ([
102102 transforms .Resize (imsize ), # scale imported image
@@ -220,7 +220,7 @@ def gram_matrix(input):
220220 # b=number of feature maps
221221 # (c,d)=dimensions of a f. map (N=c*d)
222222
223- features = input .view (a * b , c * d ) # resise F_XL into \hat F_XL
223+ features = input .view (a * b , c * d ) # resize F_XL into \hat F_XL
224224
225225 G = torch .mm (features , features .t ()) # compute the gram product
226226
@@ -251,7 +251,7 @@ def forward(self, input):
251251# Importing the Model
252252# -------------------
253253#
254- # Now we need to import a pre-trained neural network. We will use a 19
254+ # Now we need to import a pretrained neural network. We will use a 19
255255# layer VGG network like the one used in the paper.
256256#
257257# PyTorch’s implementation of VGG is a module divided into two child
@@ -277,7 +277,7 @@ def forward(self, input):
277277cnn_normalization_std = torch .tensor ([0.229 , 0.224 , 0.225 ]).to (device )
278278
279279# create a module to normalize input image so we can easily put it in a
280- # nn.Sequential
280+ # `` nn.Sequential``
281281class Normalization (nn .Module ):
282282 def __init__ (self , mean , std ):
283283 super (Normalization , self ).__init__ ()
@@ -288,14 +288,14 @@ def __init__(self, mean, std):
288288 self .std = torch .tensor (std ).view (- 1 , 1 , 1 )
289289
290290 def forward (self , img ):
291- # normalize img
291+ # normalize `` img``
292292 return (img - self .mean ) / self .std
293293
294294
295295######################################################################
296296# A ``Sequential`` module contains an ordered list of child modules. For
297- # instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, MaxPool2d,
298- # Conv2d, ReLU…) aligned in the right order of depth. We need to add our
297+ # instance, ``vgg19.features`` contains a sequence (`` Conv2d``, `` ReLU``, `` MaxPool2d`` ,
298+ # `` Conv2d``, `` ReLU`` …) aligned in the right order of depth. We need to add our
299299# content loss and style loss layers immediately after the convolution
300300# layer they are detecting. To do this we must create a new ``Sequential``
301301# module that has content loss and style loss modules correctly inserted.
@@ -312,12 +312,12 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
312312 # normalization module
313313 normalization = Normalization (normalization_mean , normalization_std ).to (device )
314314
315- # just in order to have an iterable access to or list of content/syle
315+ # just in order to have an iterable access to or list of content/style
316316 # losses
317317 content_losses = []
318318 style_losses = []
319319
320- # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
320+ # assuming that `` cnn`` is a `` nn.Sequential`` , so we make a new `` nn.Sequential``
321321 # to put in modules that are supposed to be activated sequentially
322322 model = nn .Sequential (normalization )
323323
@@ -328,8 +328,8 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
328328 name = 'conv_{}' .format (i )
329329 elif isinstance (layer , nn .ReLU ):
330330 name = 'relu_{}' .format (i )
331- # The in-place version doesn't play very nicely with the ContentLoss
332- # and StyleLoss we insert below. So we replace with out-of-place
331+ # The in-place version doesn't play very nicely with the `` ContentLoss``
332+ # and `` StyleLoss`` we insert below. So we replace with out-of-place
333333 # ones here.
334334 layer = nn .ReLU (inplace = False )
335335 elif isinstance (layer , nn .MaxPool2d ):
@@ -371,8 +371,11 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
371371#
372372
373373input_img = content_img .clone ()
374- # if you want to use white noise instead uncomment the below line:
375- # input_img = torch.randn(content_img.data.size(), device=device)
374+ # if you want to use white noise by using the following code:
375+ #
376+ # ::
377+ #
378+ # input_img = torch.randn(content_img.data.size(), device=device)
376379
377380# add the original input image to the figure:
378381plt .figure ()
@@ -385,7 +388,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
385388#
386389# As Leon Gatys, the author of the algorithm, suggested `here <https://discuss.pytorch.org/t/pytorch-tutorial-for-neural-transfert-of-artistic-style/336/20?u=alexis-jacq>`__, we will use
387390# L-BFGS algorithm to run our gradient descent. Unlike training a network,
388- # we want to train the input image in order to minimise the content/style
391+ # we want to train the input image in order to minimize the content/style
389392# losses. We will create a PyTorch L-BFGS optimizer ``optim.LBFGS`` and pass
390393# our image to it as the tensor to optimize.
391394#
@@ -400,7 +403,7 @@ def get_input_optimizer(input_img):
400403# Finally, we must define a function that performs the neural transfer. For
401404# each iteration of the networks, it is fed an updated input and computes
402405# new losses. We will run the ``backward`` methods of each loss module to
403- # dynamicaly compute their gradients. The optimizer requires a “closure”
406+ # dynamically compute their gradients. The optimizer requires a “closure”
404407# function, which reevaluates the module and returns the loss.
405408#
406409# We still have one final constraint to address. The network may try to
0 commit comments