Skip to content

Commit 56f893c

Browse files
committed
typo fix and correction for state processor output shape
1 parent c90ebaf commit 56f893c

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

DQN/Deep Q Learning Solution.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@
7979
" state: A [210, 160, 3] Atari RGB State\n",
8080
"\n",
8181
" Returns:\n",
82-
" A processed [84, 84, 1] state representing grayscale values.\n",
82+
" A processed [84, 84] state representing grayscale values.\n",
8383
" \"\"\"\n",
8484
" return sess.run(self.output, { self.input_state: state })"
8585
]
@@ -144,7 +144,7 @@
144144
" gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n",
145145
" self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n",
146146
"\n",
147-
" # Calcualte the loss\n",
147+
" # Calculate the loss\n",
148148
" self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n",
149149
" self.loss = tf.reduce_mean(self.losses)\n",
150150
"\n",

DQN/Deep Q Learning.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@
137137
" gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl\n",
138138
" self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)\n",
139139
"\n",
140-
" # Calcualte the loss\n",
140+
" # Calculate the loss\n",
141141
" self.losses = tf.squared_difference(self.y_pl, self.action_predictions)\n",
142142
" self.loss = tf.reduce_mean(self.losses)\n",
143143
"\n",

DQN/Double DQN Solution.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@
7373
" state: A [210, 160, 3] Atari RGB State\n",
7474
"\n",
7575
" Returns:\n",
76-
" A processed [84, 84, 1] state representing grayscale values.\n",
76+
" A processed [84, 84] state representing grayscale values.\n",
7777
" \"\"\"\n",
7878
" return sess.run(self.output, { self.input_state: state })"
7979
]

DQN/dqn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def process(self, sess, state):
3939
state: A [210, 160, 3] Atari RGB State
4040
4141
Returns:
42-
A processed [84, 84, 1] state representing grayscale values.
42+
A processed [84, 84] state representing grayscale values.
4343
"""
4444
return sess.run(self.output, { self.input_state: state })
4545

@@ -95,7 +95,7 @@ def _build_model(self):
9595
gather_indices = tf.range(batch_size) * tf.shape(self.predictions)[1] + self.actions_pl
9696
self.action_predictions = tf.gather(tf.reshape(self.predictions, [-1]), gather_indices)
9797

98-
# Calcualte the loss
98+
# Calculate the loss
9999
self.losses = tf.squared_difference(self.y_pl, self.action_predictions)
100100
self.loss = tf.reduce_mean(self.losses)
101101

0 commit comments

Comments
 (0)