Skip to content

Commit b05848a

Browse files
committed
Adding back examples
1 parent aea5a47 commit b05848a

File tree

219 files changed

+565674
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

219 files changed

+565674
-0
lines changed

examples/amazon-reviews/amazon-bow.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
from keras.preprocessing import sequence
2+
from keras.preprocessing import text
3+
import amazon
4+
import numpy as np
5+
from keras.models import Sequential
6+
from keras.layers import Dense, Dropout, Activation
7+
from keras.layers import Embedding, LSTM
8+
from keras.layers import Conv1D, Flatten
9+
from keras.preprocessing import text
10+
import wandb
11+
from wandb.keras import WandbCallback
12+
13+
wandb.init()
14+
config = wandb.config
15+
config.vocab_size = 1000
16+
17+
(train_summary, train_review_text, train_labels), (test_summary, test_review_text, test_labels) = amazon.load_amazon()
18+
19+
config.vocab_size = 1000
20+
config.maxlen = 1000
21+
config.batch_size = 32
22+
config.embedding_dims = 50
23+
config.filters = 250
24+
config.kernel_size = 3
25+
config.hidden_dims = 250
26+
config.epochs = 10
27+
28+
(X_train, y_train), (X_test, y_test) = (train_summary, train_labels), (test_summary, test_labels)
29+
print("Review", X_train[0])
30+
print("Label", y_train[0])
31+
32+
tokenizer = text.Tokenizer(num_words=config.vocab_size)
33+
tokenizer.fit_on_texts(X_train)
34+
X_train = tokenizer.texts_to_sequences(X_train)
35+
X_test = tokenizer.texts_to_sequences(X_test)
36+
37+
X_train = sequence.pad_sequences(X_train, maxlen=config.maxlen)
38+
X_test = sequence.pad_sequences(X_test, maxlen=config.maxlen)
39+
print(X_train.shape)
40+
print("After pre-processing", X_train[0])
41+
42+
model = Sequential()
43+
model.add(Embedding(config.vocab_size,
44+
config.embedding_dims,
45+
input_length=config.maxlen))
46+
model.add(Dropout(0.5))
47+
model.add(Conv1D(config.filters,
48+
config.kernel_size,
49+
padding='valid',
50+
activation='relu'))
51+
model.add(Flatten())
52+
model.add(Dense(config.hidden_dims, activation='relu'))
53+
model.add(Dropout(0.5))
54+
model.add(Dense(1, activation='sigmoid'))
55+
56+
57+
model.compile(loss='binary_crossentropy',
58+
optimizer='adam',
59+
metrics=['accuracy'])
60+
model.fit(X_train, train_labels, batch_size=100, epochs=10, validation_data=(X_test, test_labels),
61+
callbacks=[WandbCallback()])

examples/amazon-reviews/amazon.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import os
2+
import json
3+
4+
def load_amazon():
5+
filename = 'reviews_Video_Games_5.json'
6+
train_summary = []
7+
train_review_text = []
8+
train_labels = []
9+
10+
test_summary = []
11+
test_review_text = []
12+
test_labels = []
13+
14+
with open(filename, 'r') as f:
15+
for (i, line) in enumerate(f):
16+
data = json.loads(line)
17+
18+
if data['overall'] == 3:
19+
next
20+
elif data['overall'] == 4 or data['overall'] == 5:
21+
label = 1
22+
elif data['overall'] == 1 or data['overall'] == 2:
23+
label = 0
24+
else:
25+
raise Exception("Unexpected value " + str(data['overall']))
26+
27+
summary = data['summary']
28+
review_text = data['reviewText']
29+
30+
if (i % 10 == 0):
31+
test_summary.append(summary)
32+
test_review_text.append(review_text)
33+
test_labels.append(label)
34+
else:
35+
train_summary.append(summary)
36+
train_review_text.append(review_text)
37+
train_labels.append(label)
38+
39+
return (train_summary, train_review_text, train_labels), (test_summary, test_review_text, test_labels)
40+
41+
load_amazon()
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Video_Games_5.json.gz
2+
gunzip reviews_Video_Games_5.json.gz

examples/keras-audio/audio.ipynb

Lines changed: 251 additions & 0 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)