1- sum = 0
2- for i in range (1 ,101 ):
3- sum += i * (i + 1 )* (2 * i + 3 )
1+ """ Bi-directional Recurrent Neural Network.
2+ A Bi-directional Recurrent Neural Network (LSTM) implementation example using
3+ TensorFlow library. This example is using the MNIST database of handwritten
4+ digits (http://yann.lecun.com/exdb/mnist/)
5+ Links:
6+ [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
7+ [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
8+ Author: Aymeric Damien
9+ Project: https://github.com/aymericdamien/TensorFlow-Examples/
10+ """
411
5- print ( sum )
12+ from __future__ import print_function
613
14+ import tensorflow as tf
15+ from tensorflow .contrib import rnn
16+ import numpy as np
17+
18+ # Import MNIST data
19+ from tensorflow .examples .tutorials .mnist import input_data
20+ mnist = input_data .read_data_sets ("/tmp/data/" , one_hot = True )
21+
22+ '''
23+ To classify images using a bidirectional recurrent neural network, we consider
24+ every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
25+ we will then handle 28 sequences of 28 steps for every sample.
26+ '''
27+
28+ # Training Parameters
29+ learning_rate = 0.001
30+ training_steps = 10000
31+ batch_size = 128
32+ display_step = 200
33+
34+ # Network Parameters
35+ num_input = 28 # MNIST data input (img shape: 28*28)
36+ timesteps = 28 # timesteps
37+ num_hidden = 128 # hidden layer num of features
38+ num_classes = 10 # MNIST total classes (0-9 digits)
39+
40+ # tf Graph input
41+ X = tf .placeholder ("float" , [None , timesteps , num_input ])
42+ Y = tf .placeholder ("float" , [None , num_classes ])
43+
44+ # Define weights
45+ weights = {
46+ # Hidden layer weights => 2*n_hidden because of forward + backward cells
47+ 'out' : tf .Variable (tf .random_normal ([2 * num_hidden , num_classes ]))
48+ }
49+ biases = {
50+ 'out' : tf .Variable (tf .random_normal ([num_classes ]))
51+ }
52+
53+
54+ def BiRNN (x , weights , biases ):
55+
56+ # Prepare data shape to match `rnn` function requirements
57+ # Current data input shape: (batch_size, timesteps, n_input)
58+ # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)
59+
60+ # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)
61+ x = tf .unstack (x , timesteps , 1 )
62+
63+ # Define lstm cells with tensorflow
64+ # Forward direction cell
65+ lstm_fw_cell = rnn .BasicLSTMCell (num_hidden , forget_bias = 1.0 )
66+ # Backward direction cell
67+ lstm_bw_cell = rnn .BasicLSTMCell (num_hidden , forget_bias = 1.0 )
68+
69+ # Get lstm cell output
70+ try :
71+ outputs , _ , _ = rnn .static_bidirectional_rnn (lstm_fw_cell , lstm_bw_cell , x ,
72+ dtype = tf .float32 )
73+ except Exception : # Old TensorFlow version only returns outputs not states
74+ outputs = rnn .static_bidirectional_rnn (lstm_fw_cell , lstm_bw_cell , x ,
75+ dtype = tf .float32 )
76+
77+ # Linear activation, using rnn inner loop last output
78+ return tf .matmul (outputs [- 1 ], weights ['out' ]) + biases ['out' ]
79+
80+ logits = BiRNN (X , weights , biases )
81+ prediction = tf .nn .softmax (logits )
82+
83+ # Define loss and optimizer
84+ loss_op = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (
85+ logits = logits , labels = Y ))
86+ optimizer = tf .train .GradientDescentOptimizer (learning_rate = learning_rate )
87+ train_op = optimizer .minimize (loss_op )
88+
89+ # Evaluate model (with test logits, for dropout to be disabled)
90+ correct_pred = tf .equal (tf .argmax (prediction , 1 ), tf .argmax (Y , 1 ))
91+ accuracy = tf .reduce_mean (tf .cast (correct_pred , tf .float32 ))
92+
93+ # Initialize the variables (i.e. assign their default value)
94+ init = tf .global_variables_initializer ()
95+
96+ # Start training
97+ with tf .Session () as sess :
98+
99+ # Run the initializer
100+ sess .run (init )
101+
102+ for step in range (1 , training_steps + 1 ):
103+ batch_x , batch_y = mnist .train .next_batch (batch_size )
104+ # Reshape data to get 28 seq of 28 elements
105+ batch_x = batch_x .reshape ((batch_size , timesteps , num_input ))
106+ # Run optimization op (backprop)
107+ sess .run (train_op , feed_dict = {X : batch_x , Y : batch_y })
108+ if step % display_step == 0 or step == 1 :
109+ # Calculate batch loss and accuracy
110+ loss , acc = sess .run ([loss_op , accuracy ], feed_dict = {X : batch_x ,
111+ Y : batch_y })
112+ print ("Step " + str (step ) + ", Minibatch Loss= " + \
113+ "{:.4f}" .format (loss ) + ", Training Accuracy= " + \
114+ "{:.3f}" .format (acc ))
115+
116+ print ("Optimization Finished!" )
117+
118+ # Calculate accuracy for 128 mnist test images
119+ test_len = 128
120+ test_data = mnist .test .images [:test_len ].reshape ((- 1 , timesteps , num_input ))
121+ test_label = mnist .test .labels [:test_len ]
122+ print ("Testing Accuracy:" , \
123+ sess .run (accuracy , feed_dict = {X : test_data , Y : test_label }))
0 commit comments