Skip to content

Commit 10adc7a

Browse files
committed
Remove profiling code
1 parent f5ad673 commit 10adc7a

File tree

7 files changed

+73
-61
lines changed

7 files changed

+73
-61
lines changed

examples/keras-perf/cnn.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,11 @@
4646
model.add(tf.keras.layers.Dense(config.dense_layer_size, activation='relu'))
4747
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
4848

49-
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
50-
run_metadata = tf.RunMetadata()
49+
# optional profiling setup...
50+
#run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
51+
#run_metadata = tf.RunMetadata()
52+
# options=run_options, run_metadata=run_metadata,
5153
model.compile(loss='categorical_crossentropy', optimizer='adam',
52-
options=run_options, run_metadata=run_metadata,
5354
metrics=['accuracy'])
5455
# log the number of total parameters
5556
config.total_params = model.count_params()
@@ -61,10 +62,10 @@
6162
tf.keras.callbacks.TensorBoard(log_dir=wandb.run.dir)])
6263
model.save('cnn.h5')
6364

64-
# Write performance profile
65-
tl = timeline.Timeline(run_metadata.step_stats)
66-
with open('profile.json', 'w') as f:
67-
f.write(tl.generate_chrome_trace_format())
65+
# optional profiling setup continued
66+
#tl = timeline.Timeline(run_metadata.step_stats)
67+
# with open('profile.json', 'w') as f:
68+
# f.write(tl.generate_chrome_trace_format())
6869

6970
# Convert to TensorFlow Lite model.
7071
converter = tf.lite.TFLiteConverter.from_keras_model_file('cnn.h5')

examples/lstm/imdb-classifier/imdb-bow.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import imdb
1+
import util
22
import numpy as np
33
import tensorflow as tf
44
from tensorflow.keras.preprocessing import text
@@ -8,7 +8,7 @@
88
config = wandb.config
99
config.vocab_size = 1000
1010

11-
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
11+
(X_train, y_train), (X_test, y_test) = util.load_imdb()
1212

1313
tokenizer = text.Tokenizer(num_words=config.vocab_size)
1414
tokenizer.fit_on_texts(X_train)

examples/lstm/imdb-classifier/imdb-cnn.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
import wandb
2-
import imdb
32
import numpy as np
43
import tensorflow as tf
54
from tensorflow.keras.preprocessing import text, sequence
65
from tensorflow.python.client import device_lib
76
from tensorflow.keras.layers import LSTM, GRU, CuDNNLSTM, CuDNNGRU
8-
7+
from tensorflow.keras.datasets import imdb
98

109
# set parameters:
1110
wandb.init()
@@ -19,12 +18,7 @@
1918
config.hidden_dims = 250
2019
config.epochs = 10
2120

22-
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
23-
print("Tokenizing text")
24-
tokenizer = text.Tokenizer(num_words=config.vocab_size)
25-
tokenizer.fit_on_texts(X_train)
26-
X_train = tokenizer.texts_to_sequences(X_train)
27-
X_test = tokenizer.texts_to_sequences(X_test)
21+
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config.vocab_size)
2822

2923
X_train = sequence.pad_sequences(X_train, maxlen=config.maxlen)
3024
X_test = sequence.pad_sequences(X_test, maxlen=config.maxlen)
@@ -60,4 +54,4 @@
6054
model.fit(X_train, y_train,
6155
batch_size=config.batch_size,
6256
epochs=config.epochs,
63-
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback()])
57+
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback(save_mode=False)])

examples/lstm/imdb-classifier/imdb-embedding.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55
from tensorflow.keras.preprocessing import text, sequence
66
from tensorflow.python.client import device_lib
77
from tensorflow.keras.layers import LSTM, GRU, CuDNNLSTM, CuDNNGRU
8-
import imdb
8+
from tensorflow.keras.datasets import imdb
9+
import util
910
import os
1011

1112
# set parameters:
@@ -20,7 +21,7 @@
2021
config.hidden_dims = 100
2122
config.epochs = 10
2223

23-
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
24+
(X_train, y_train), (X_test, y_test) = util.load_imdb()
2425

2526
if not os.path.exists("glove.6B.100d.txt"):
2627
print("Downloading glove embeddings...")
@@ -75,4 +76,4 @@
7576
model.fit(X_train, y_train,
7677
batch_size=config.batch_size,
7778
epochs=config.epochs,
78-
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback(input_type="time")])
79+
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback(save_model=False)])

examples/lstm/imdb-classifier/imdb-lstm.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
import wandb
2-
import imdb
32
import numpy as np
43
import tensorflow as tf
54
from tensorflow.keras.layers import LSTM, GRU, CuDNNLSTM, CuDNNGRU
65
from tensorflow.python.client import device_lib
76
from tensorflow.keras.preprocessing import text, sequence
7+
from tensorflow.keras.datasets import imdb
88

99
# set parameters:
1010
wandb.init()
@@ -19,12 +19,17 @@
1919
config.epochs = 10
2020

2121
# Load and tokenize input
22-
(X_train, y_train), (X_test, y_test) = imdb.load_imdb()
23-
print("Tokenizing text")
22+
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=config.vocab_size)
23+
24+
# Example of manual data loading
25+
"""
26+
import util
27+
(X_train, y_train), (X_test, y_test) = util.load_imdb()
2428
tokenizer = text.Tokenizer(num_words=config.vocab_size)
2529
tokenizer.fit_on_texts(X_train)
2630
X_train = tokenizer.texts_to_sequences(X_train)
2731
X_test = tokenizer.texts_to_sequences(X_test)
32+
"""
2833

2934
# Ensure all input is the same size
3035
X_train = sequence.pad_sequences(
@@ -51,6 +56,6 @@
5156
model.fit(X_train, y_train,
5257
batch_size=config.batch_size,
5358
epochs=config.epochs,
54-
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback()])
59+
validation_data=(X_test, y_test), callbacks=[wandb.keras.WandbCallback(save_model=False)])
5560

5661
model.save("seniment.h5")

examples/lstm/imdb-classifier/imdb.py

Lines changed: 0 additions & 36 deletions
This file was deleted.

examples/lstm/imdb-classifier/util.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import os
2+
import subprocess
3+
import numpy as np
4+
5+
6+
def load_imdb():
7+
if not os.path.exists("./aclImdb"):
8+
print("Downloading imdb dataset...")
9+
subprocess.check_output(
10+
"curl -SL http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz | tar xz", shell=True)
11+
12+
if not os.path.exists('./aclImdb/cache.npz'):
13+
X_train = []
14+
y_train = []
15+
16+
path = './aclImdb/train/pos/'
17+
X_train.extend([open(path + f).read()
18+
for f in os.listdir(path) if f.endswith('.txt')])
19+
y_train.extend([1 for _ in range(12500)])
20+
21+
path = './aclImdb/train/neg/'
22+
X_train.extend([open(path + f).read()
23+
for f in os.listdir(path) if f.endswith('.txt')])
24+
y_train.extend([0 for _ in range(12500)])
25+
26+
X_test = []
27+
y_test = []
28+
29+
path = './aclImdb/test/pos/'
30+
X_test.extend([open(path + f).read()
31+
for f in os.listdir(path) if f.endswith('.txt')])
32+
y_test.extend([1 for _ in range(12500)])
33+
34+
path = './aclImdb/test/neg/'
35+
X_test.extend([open(path + f).read()
36+
for f in os.listdir(path) if f.endswith('.txt')])
37+
y_test.extend([0 for _ in range(12500)])
38+
39+
np.savez('./aclImdb/cache.npz', X_train=X_train, y_train=y_train,
40+
X_test=X_test, y_test=y_test)
41+
42+
cached = np.load('./aclImdb/cache.npz')
43+
return (cached['X_train'], cached['y_train']), (cached['X_test'], cached['y_test'])
44+
45+
46+
if __name__ == '__main__':
47+
load_imdb()

0 commit comments

Comments
 (0)