|
| 1 | +import tinyflow as tf |
| 2 | +from tinyflow.datasets import get_cifar10 |
| 3 | +import numpy as np |
| 4 | + |
| 5 | +num_epoch = 10 |
| 6 | +num_batch = 600 |
| 7 | +batch_size = 100 |
| 8 | + |
| 9 | + |
| 10 | +def conv_factory(x, filter_size, in_filters, out_filters): |
| 11 | + x = tf.nn.conv2d(x, num_filter=out_filters, |
| 12 | + ksize=[1, filter_size, filter_size, 1], padding='SAME') |
| 13 | + x = tf.nn.batch_normalization(x) |
| 14 | + x = tf.nn.relu(x) |
| 15 | + return x |
| 16 | + |
| 17 | +def residual_factory(x, in_filters, out_filters): |
| 18 | + if in_filters == out_filters: |
| 19 | + orig_x = x |
| 20 | + conv1 = conv_factory(x, 3, in_filters, out_filters) |
| 21 | + conv2 = conv_factory(conv1, 3, out_filters, out_filters) |
| 22 | + new = orig_x + conv2 |
| 23 | + return tf.nn.relu(new) |
| 24 | + else: |
| 25 | + conv1 = conv_factory(x, 3, in_filters, out_filters) |
| 26 | + conv2 = conv_factory(conv1, 3, out_filters, out_filters) |
| 27 | + project_x = conv_factory(x, 1, in_filters, out_filters) |
| 28 | + new = project_x + conv2 |
| 29 | + return tf.nn.relu(new) |
| 30 | + |
| 31 | +def resnet(x, n, in_filters, out_filters): |
| 32 | + for i in range(n): |
| 33 | + if i == 0: |
| 34 | + x = residual_factory(x, in_filters, 16) |
| 35 | + else: |
| 36 | + x = residual_factory(x, 16, 16) |
| 37 | + for i in range(n): |
| 38 | + if i == 0: |
| 39 | + x = residual_factory(x, 16, 32) |
| 40 | + else: |
| 41 | + x = residual_factory(x, 32, 32) |
| 42 | + for i in range(n): |
| 43 | + if i == 0: |
| 44 | + x = residual_factory(x, 32, 64) |
| 45 | + else: |
| 46 | + x = residual_factory(x, 64, 64) |
| 47 | + return x |
| 48 | + |
| 49 | + |
| 50 | +x = tf.placeholder(tf.float32) |
| 51 | +conv1 = tf.nn.conv2d(x, num_filter=16, ksize=[1, 5, 5, 1], padding='SAME') |
| 52 | +tanh1 = tf.tanh(conv1) |
| 53 | +res = resnet(tanh1, 1, 16, 64) |
| 54 | +pool1 = tf.nn.avg_pool(res, ksize=[1, 4, 4, 1], strides=[1, 2, 2, 1], padding='SAME', data_format='NCHW') |
| 55 | +conv2 = tf.nn.conv2d(pool1, num_filter=16, ksize=[1, 5, 5, 1]) |
| 56 | +flatten = tf.nn.flatten_layer(conv2) |
| 57 | +fc1 = tf.nn.linear(flatten, num_hidden=10, name="fc1") |
| 58 | + |
| 59 | +# define loss |
| 60 | +label = tf.placeholder(tf.float32) |
| 61 | +cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc1, label) |
| 62 | +train_step = tf.train.AdamOptimizer(0.0005).minimize(cross_entropy) |
| 63 | + |
| 64 | +sess = tf.Session(config='gpu') |
| 65 | + |
| 66 | +# Auromatic variable shape inference API, infers the shape and initialize the weights. |
| 67 | +known_shape = {x: [batch_size, 3, 32, 32], label: [batch_size]} |
| 68 | +stdev = 0.01 |
| 69 | +init_step = [] |
| 70 | +for v, name, shape in tf.infer_variable_shapes( |
| 71 | + cross_entropy, feed_dict=known_shape): |
| 72 | + init_step.append(tf.assign(v, tf.normal(shape, stdev))) |
| 73 | + print("shape[%s]=%s" % (name, str(shape))) |
| 74 | +sess.run(init_step) |
| 75 | +sess.run(tf.initialize_all_variables()) |
| 76 | + |
| 77 | +# get the cifar dataset |
| 78 | +cifar = get_cifar10() |
| 79 | + |
| 80 | +for epoch in range(num_epoch): |
| 81 | + sum_loss = 0.0 |
| 82 | + for i in range(num_batch): |
| 83 | + batch_xs, batch_ys = cifar.train.next_batch(batch_size) |
| 84 | + loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, label:batch_ys}) |
| 85 | + sum_loss += loss |
| 86 | + print("epoch[%d] cross_entropy=%g" % (epoch, sum_loss /num_batch)) |
| 87 | + |
| 88 | +correct_prediction = tf.equal(tf.argmax(fc1, 1), label) |
| 89 | +accuracy = tf.reduce_mean(correct_prediction) |
| 90 | +print(sess.run(accuracy, feed_dict={x: cifar.test.images, label: cifar.test.labels})) |
0 commit comments