Skip to content

Commit 2594eaa

Browse files
committed
update solution
1 parent 4115ba9 commit 2594eaa

File tree

1 file changed

+22
-25
lines changed

1 file changed

+22
-25
lines changed

LeNet-Lab-Solution.ipynb

Lines changed: 22 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"cell_type": "markdown",
1414
"metadata": {},
1515
"source": [
16-
"### Setup\n",
16+
"## Setup\n",
1717
"The `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy.\n",
1818
"\n",
1919
"You do not need to modify this section."
@@ -43,7 +43,7 @@
4343
"cell_type": "markdown",
4444
"metadata": {},
4545
"source": [
46-
"## Solution: Implement LeNet-5 Moidel\n",
46+
"## SOLUTION: Implement LeNet-5\n",
4747
"Implement the [LeNet-5](http://yann.lecun.com/exdb/lenet/) neural network architecture.\n",
4848
"\n",
4949
"This is the only cell you need to edit.\n",
@@ -91,49 +91,46 @@
9191
" # Add 2 rows/columns on each side for height and width dimensions.\n",
9292
" x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], mode=\"CONSTANT\")\n",
9393
" \n",
94-
" \n",
95-
"\n",
96-
" \n",
97-
"\n",
98-
" \n",
99-
" # TODO: Convolution Layer 1. Input = 32x32x3. Output = 28x28x6.\n",
94+
" # SOLUTION: Convolution Layer 1. Input = 32x32x3. Output = 28x28x6.\n",
10095
" conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6)))\n",
10196
" conv1_b = tf.Variable(tf.zeros(6))\n",
102-
" conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n",
97+
" conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n",
10398
"\n",
104-
" # TODO: Activation 1.\n",
99+
" # SOLUTION: Activation 1.\n",
105100
" conv1 = tf.nn.relu(conv1)\n",
106101
"\n",
107-
" # TODO: Pooling Layer 1. Input = 28x28x6. Output = 14x14x6.\n",
102+
" # SOLUTION: Pooling Layer 1. Input = 28x28x6. Output = 14x14x6.\n",
108103
" conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
109104
"\n",
110-
" # TODO: Convolution Layer 2. Output = 10x10x16.\n",
105+
" # SOLUTION: Convolution Layer 2. Output = 10x10x16.\n",
111106
" conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16)))\n",
112107
" conv2_b = tf.Variable(tf.zeros(16))\n",
113-
" conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n",
108+
" conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n",
114109
" \n",
115-
" # TODO: Activation 2.\n",
110+
" # SOLUTION: Activation 2.\n",
116111
" conv2 = tf.nn.relu(conv2)\n",
117112
"\n",
118-
" # TODO: Pooling Layer 2. Input = 10x10x16. Output = 5x5x16.\n",
113+
" # SOLUTION: Pooling Layer 2. Input = 10x10x16. Output = 5x5x16.\n",
119114
" conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
120115
"\n",
121-
" # TODO: Flatten Layer.\n",
116+
" # SOLUTION: Flatten Layer.\n",
122117
" fc1 = flatten(conv2)\n",
123118
" \n",
124-
" # TODO: Fully Connected Layer 1. Input = 5x5x16. Output = 120.\n",
119+
" # SOLUTION: Fully Connected Layer 1. Input = 5x5x16. Output = 120.\n",
125120
" fc1_shape = (fc1.get_shape().as_list()[-1], 120)\n",
126-
" fc1_W = tf.Variable(tf.truncated_normal(shape=(fc1_shape)))\n",
127-
" fc1_b = tf.Variable(tf.zeros(120))\n",
128-
" fc1 = tf.matmul(fc1, fc1_W) + fc1_b\n",
121+
" fc1_W = tf.Variable(tf.truncated_normal(shape=(fc1_shape)))\n",
122+
" fc1_b = tf.Variable(tf.zeros(120))\n",
123+
" fc1 = tf.matmul(fc1, fc1_W) + fc1_b\n",
129124
" \n",
130-
" # TODO: Activation 3.\n",
125+
" # SOLUTION: Activation 3.\n",
131126
" fc1 = tf.nn.relu(fc1)\n",
132127
"\n",
133-
" # TODO: Fully Connected Layer 2. Input = 120. Output = 10.\n",
134-
" fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 10)))\n",
135-
" fc2_b = tf.Variable(tf.zeros(10))\n",
136-
" return tf.matmul(fc1, fc2_W) + fc2_b"
128+
" # SOLUTION: Fully Connected Layer 2. Input = 120. Output = 10.\n",
129+
" fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 10)))\n",
130+
" fc2_b = tf.Variable(tf.zeros(10))\n",
131+
" logits = tf.matmul(fc1, fc2_W) + fc2_b\n",
132+
" \n",
133+
" return logits"
137134
]
138135
},
139136
{

0 commit comments

Comments
 (0)