Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 5dc364f

Browse files
authored
Merge pull request #309 from MatthewInkawhich/rc1
Added hosted model download link and fixed model saving
2 parents 27f6b35 + ae949d6 commit 5dc364f

1 file changed

Lines changed: 13 additions & 12 deletions

File tree

beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,8 @@
117117
import unicodedata
118118
import numpy as np
119119

120+
device = torch.device("cpu")
120121

121-
USE_CUDA = torch.cuda.is_available()
122-
device = torch.device("cuda" if USE_CUDA else "cpu")
123122

124123
MAX_LENGTH = 10 # Maximum sentence length
125124

@@ -677,7 +676,7 @@ def evaluateExample(sentence, encoder, decoder, searcher, voc):
677676
#
678677
# To load the hosted model:
679678
#
680-
# 1) Download the model `here <>`__.
679+
# 1) Download the model `here <https://download.pytorch.org/models/tutorials/4000_checkpoint.tar>`__.
681680
#
682681
# 2) Set the ``loadFilename`` variable to the path to the downloaded
683682
# checkpoint file.
@@ -728,18 +727,19 @@ def evaluateExample(sentence, encoder, decoder, searcher, voc):
728727
dropout = 0.1
729728
batch_size = 64
730729

730+
# If you're loading your own model
731731
# Set checkpoint to load from
732732
checkpoint_iter = 4000
733-
loadFilename = os.path.join(save_dir, model_name, corpus_name,
734-
'{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
735-
'{}_checkpoint.tar'.format(checkpoint_iter))
733+
# loadFilename = os.path.join(save_dir, model_name, corpus_name,
734+
# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
735+
# '{}_checkpoint.tar'.format(checkpoint_iter))
736736

737+
# If you're loading the hosted model
738+
loadFilename = '4000_checkpoint.tar'
737739

738740
# Load model
739-
# If loading on same machine the model was trained on
740-
checkpoint = torch.load(loadFilename)
741-
# If loading a model trained on GPU to CPU
742-
#checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
741+
# Force CPU device options (to match tensors in this tutorial)
742+
checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
743743
encoder_sd = checkpoint['en']
744744
decoder_sd = checkpoint['de']
745745
encoder_optimizer_sd = checkpoint['en_opt']
@@ -874,7 +874,8 @@ def evaluateExample(sentence, encoder, decoder, searcher, voc):
874874
# will serialize it for use in a non-Python deployment environment. To do
875875
# this, we can simply save our ``scripted_searcher`` module, as this is
876876
# the user-facing interface for running inference against the chatbot
877-
# model.
877+
# model. When saving a Script module, use script_module.save(PATH) instead
878+
# of torch.save(model, PATH).
878879
#
879880

880-
torch.save(scripted_searcher.state_dict(), "scripted_chatbot.pth")
881+
scripted_searcher.save("scripted_chatbot.pth")

0 commit comments

Comments
 (0)