@@ -119,14 +119,14 @@ def dynamicRNN(x, seqlen, weights, biases):
119
119
# Reshaping to (n_steps*batch_size, n_input)
120
120
x = tf .reshape (x , [- 1 , 1 ])
121
121
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
122
- x = tf .split (0 , seq_max_len , x )
122
+ x = tf .split (axis = 0 , num_or_size_splits = seq_max_len , value = x )
123
123
124
124
# Define a lstm cell with tensorflow
125
- lstm_cell = tf .nn . rnn_cell .BasicLSTMCell (n_hidden )
125
+ lstm_cell = tf .contrib . rnn .BasicLSTMCell (n_hidden )
126
126
127
127
# Get lstm cell output, providing 'sequence_length' will perform dynamic
128
128
# calculation.
129
- outputs , states = tf .nn .rnn (lstm_cell , x , dtype = tf .float32 ,
129
+ outputs , states = tf .contrib .rnn . static_rnn (lstm_cell , x , dtype = tf .float32 ,
130
130
sequence_length = seqlen )
131
131
132
132
# When performing dynamic calculation, we must retrieve the last
@@ -138,7 +138,7 @@ def dynamicRNN(x, seqlen, weights, biases):
138
138
139
139
# 'outputs' is a list of output at every timestep, we pack them in a Tensor
140
140
# and change back dimension to [batch_size, n_step, n_input]
141
- outputs = tf .pack (outputs )
141
+ outputs = tf .stack (outputs )
142
142
outputs = tf .transpose (outputs , [1 , 0 , 2 ])
143
143
144
144
# Hack to build the indexing and retrieve the right output.
@@ -154,7 +154,7 @@ def dynamicRNN(x, seqlen, weights, biases):
154
154
pred = dynamicRNN (x , seqlen , weights , biases )
155
155
156
156
# Define loss and optimizer
157
- cost = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (pred , y ))
157
+ cost = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (logits = pred , labels = y ))
158
158
optimizer = tf .train .GradientDescentOptimizer (learning_rate = learning_rate ).minimize (cost )
159
159
160
160
# Evaluate model
0 commit comments