Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 1abb722

Browse files
committed
Upgrade dynamic RNN for TensorFlow 1.0
1 parent 352ecdf commit 1abb722

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

examples/3_NeuralNetworks/dynamic_rnn.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -119,14 +119,14 @@ def dynamicRNN(x, seqlen, weights, biases):
119119
# Reshaping to (n_steps*batch_size, n_input)
120120
x = tf.reshape(x, [-1, 1])
121121
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
122-
x = tf.split(0, seq_max_len, x)
122+
x = tf.split(axis=0, num_or_size_splits=seq_max_len, value=x)
123123

124124
# Define a lstm cell with tensorflow
125-
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden)
125+
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
126126

127127
# Get lstm cell output, providing 'sequence_length' will perform dynamic
128128
# calculation.
129-
outputs, states = tf.nn.rnn(lstm_cell, x, dtype=tf.float32,
129+
outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32,
130130
sequence_length=seqlen)
131131

132132
# When performing dynamic calculation, we must retrieve the last
@@ -138,7 +138,7 @@ def dynamicRNN(x, seqlen, weights, biases):
138138

139139
# 'outputs' is a list of output at every timestep, we pack them in a Tensor
140140
# and change back dimension to [batch_size, n_step, n_input]
141-
outputs = tf.pack(outputs)
141+
outputs = tf.stack(outputs)
142142
outputs = tf.transpose(outputs, [1, 0, 2])
143143

144144
# Hack to build the indexing and retrieve the right output.
@@ -154,7 +154,7 @@ def dynamicRNN(x, seqlen, weights, biases):
154154
pred = dynamicRNN(x, seqlen, weights, biases)
155155

156156
# Define loss and optimizer
157-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
157+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
158158
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
159159

160160
# Evaluate model

0 commit comments

Comments
 (0)