|
| 1 | +{ |
| 2 | + "cells": [ |
| 3 | + { |
| 4 | + "cell_type": "markdown", |
| 5 | + "metadata": {}, |
| 6 | + "source": [ |
| 7 | + "### 1 (2) Neural networks as function approximators.\n", |
| 8 | + "Design a feed-forward neural network to approximate the 1-dimensional function given in Fig. 1 on the following page. The output should match exactly. \n", |
| 9 | + "\n", |
| 10 | + "* How many hidden layers do you need? \n", |
| 11 | + "* How many units are there within each layer? Show the hidden layers, units, connections, weights, and biases.\n", |
| 12 | + "\n", |
| 13 | + "Use the ReLU nonlinearity for every unit. Every possible path from input to output must pass\n", |
| 14 | + "through the same number of layers. This means each layer should have the form:\n", |
| 15 | + "\n", |
| 16 | + "$Y_i = \\sigma(W_{i}Y_{i-1}^{T} + \\beta_i) $\n", |
| 17 | + "\n", |
| 18 | + "where\n", |
| 19 | + "\n", |
| 20 | + "$Y_i \\in \\mathbb{R}^{d_ix1}$\n", |
| 21 | + " output of the ith layer\n", |
| 22 | + " \n", |
| 23 | + "$W_i \\in \\mathbb{R}^{d_i x d_{i-1}}$\n", |
| 24 | + "\n", |
| 25 | + "weight matrix for that layer\n", |
| 26 | + "Y0 is x \n", |
| 27 | + "\n", |
| 28 | + "ReLU is defined as :\n", |
| 29 | + "\n", |
| 30 | + "$\\sigma(x) = \n", |
| 31 | + " \\begin{cases} \n", |
| 32 | + " x & \\quad x>=0\\\\\n", |
| 33 | + " 0 & \\quad \\text{otherwise}\\\\\n", |
| 34 | + " \\end{cases}\n", |
| 35 | + "$\n", |
| 36 | + " \n", |
| 37 | + "Writing out the figure as bunch of equations in form of ReLU units:\n", |
| 38 | + "\n", |
| 39 | + "$f(x) = \n", |
| 40 | + " \\begin{cases} \n", |
| 41 | + " 0*x & \\quad 0<=x<=1\\\\\n", |
| 42 | + " 2*x - 2 & \\quad 1<=x<=2\\\\\n", |
| 43 | + " 1/3*x + 4/3 & \\quad 2<=x<=5\\\\\n", |
| 44 | + " 2*x-7 & \\quad 5<=x<=6\\\\\n", |
| 45 | + " 15-5/3*x & \\quad 6<=x<=9\\\\\n", |
| 46 | + " 0*x & \\quad 9<=x<=10\\\\\n", |
| 47 | + " \\end{cases}\n", |
| 48 | + "$\n", |
| 49 | + "\n", |
| 50 | + "Rewriting the above equation in terms of the ReLU units:\n", |
| 51 | + "\n", |
| 52 | + "$f(x) = \n", |
| 53 | + " \\begin{cases} \n", |
| 54 | + " \\sigma(0*x) & \\text{This is not required??} \\\\\n", |
| 55 | + " \\sigma(2*x - 2) - \\sigma(2*x - 4) \\\\\n", |
| 56 | + " \\sigma(1/3*x - 2/3) - \\sigma(1/3*x - 5/3) \\\\\n", |
| 57 | + " \\sigma(2*x - 10) - \\sigma(2*x - 12) \\\\\n", |
| 58 | + " -\\sigma(5/3*x - 10) + \\sigma(5/3*x - 15) \\\\\n", |
| 59 | + " \\end{cases}\n", |
| 60 | + "$\n", |
| 61 | + "\n", |
| 62 | + "Now writing the weight and the bias vector for the Neural network. This will have :\n", |
| 63 | + "\n", |
| 64 | + "1 input layer with 1 neuron for input x\n", |
| 65 | + "1 hidden layer full-connected with 8 neurons\n", |
| 66 | + "1 output layer with 1 neuron for output y\n", |
| 67 | + "\n", |
| 68 | + "$$ W_{(input)(hidden)} = [0, 2, 2, 1/3, 1/3, 2, 2, 5/3, 5/3]$$\n", |
| 69 | + "\n", |
| 70 | + "$$ W_{(hidden)(output)}^T = [1, 1, -1, 1, -1, 1, -1, -1, 1]$$\n", |
| 71 | + "\n", |
| 72 | + "$$ Bias_{(input)(hidden)} = [0, -2, -4, -2/3, -5/3, -10, -12, -10, -15]$$" |
| 73 | + ] |
| 74 | + }, |
| 75 | + { |
| 76 | + "cell_type": "code", |
| 77 | + "execution_count": 38, |
| 78 | + "metadata": { |
| 79 | + "collapsed": false |
| 80 | + }, |
| 81 | + "outputs": [ |
| 82 | + { |
| 83 | + "name": "stdout", |
| 84 | + "output_type": "stream", |
| 85 | + "text": [ |
| 86 | + " (1, 8) (1, 100)\n" |
| 87 | + ] |
| 88 | + }, |
| 89 | + { |
| 90 | + "ename": "TypeError", |
| 91 | + "evalue": "matmul() takes at least 2 arguments (1 given)", |
| 92 | + "output_type": "error", |
| 93 | + "traceback": [ |
| 94 | + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", |
| 95 | + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", |
| 96 | + "\u001b[1;32m<ipython-input-38-b8453300055e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[0mtmp1\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtranspose\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mW01\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[0my1\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtmp1\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mb\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# add is with broadcasting\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 12\u001b[1;33m \u001b[0my\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my1\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mW12\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", |
| 97 | + "\u001b[1;31mTypeError\u001b[0m: matmul() takes at least 2 arguments (1 given)" |
| 98 | + ] |
| 99 | + } |
| 100 | + ], |
| 101 | + "source": [ |
| 102 | + "import numpy as np\n", |
| 103 | + "import tensorflow as tf\n", |
| 104 | + "\n", |
| 105 | + "W01 = np.array([[2,2,1/3,1/3,2,2,5/3,5/3]], dtype=np.float32) # 1x8\n", |
| 106 | + "b = np.array([-2.0,-4.0,-2/3,-5/3,-10.0,-12,-10,-15], dtype=np.float32)\n", |
| 107 | + "W12 = np.array([1,-1,1,-1,1,-1,-1,1],dtype=np.float32)\n", |
| 108 | + "x = np.array([np.linspace(0.0, 10.0, 100)]) # 1x100\n", |
| 109 | + "print W01.shape, x.shape\n", |
| 110 | + "#print tf.shape(x) \n", |
| 111 | + "tmp1 = np.dot(np.transpose(x), W01)\n", |
| 112 | + "y1 = tf.nn.relu(tmp1 + b) # add is with broadcasting\n", |
| 113 | + "y = tf.matmul(y1*W12)" |
| 114 | + ] |
| 115 | + }, |
| 116 | + { |
| 117 | + "cell_type": "code", |
| 118 | + "execution_count": null, |
| 119 | + "metadata": { |
| 120 | + "collapsed": true |
| 121 | + }, |
| 122 | + "outputs": [], |
| 123 | + "source": [] |
| 124 | + } |
| 125 | + ], |
| 126 | + "metadata": { |
| 127 | + "kernelspec": { |
| 128 | + "display_name": "Python 2", |
| 129 | + "language": "python", |
| 130 | + "name": "python2" |
| 131 | + }, |
| 132 | + "language_info": { |
| 133 | + "codemirror_mode": { |
| 134 | + "name": "ipython", |
| 135 | + "version": 2 |
| 136 | + }, |
| 137 | + "file_extension": ".py", |
| 138 | + "mimetype": "text/x-python", |
| 139 | + "name": "python", |
| 140 | + "nbconvert_exporter": "python", |
| 141 | + "pygments_lexer": "ipython2", |
| 142 | + "version": "2.7.10" |
| 143 | + } |
| 144 | + }, |
| 145 | + "nbformat": 4, |
| 146 | + "nbformat_minor": 0 |
| 147 | +} |
0 commit comments