Skip to content

Commit 5106fdf

Browse files
committed
shuttle train data
1 parent e1a52f1 commit 5106fdf

File tree

5 files changed

+281
-136
lines changed

5 files changed

+281
-136
lines changed

KLH.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,29 +7,30 @@ class Train_Args():
77
name_prefix = ""
88
mic_path = "data/KLHdata/mic/"
99
# model-4 mean square std and epochs = 30 batchsize = 100
10-
model_save_path = "./data/KLHdata/model-0.2-30"
10+
model_save_path = "./data/KLHdata/model-0.1-3-relu-7-layer-max-softmax/"
11+
#training_message_path = "./data/KLHdata/model-0.3-30/"
1112
# model-3 changed the loss function
1213
# model_save_path = "./data/19Sdata/model-3"
1314
# model_save_path = "./data/19Sdata/model-2"
1415
# model_save_path = "./model"
1516
positive1_box_path = "data/KLHdata/positive/"
1617
negative1_box_path = "data/KLHdata/negative/"
1718
positive1_mic_start_num = 1
18-
positive1_mic_end_num = 100
19+
positive1_mic_end_num = 50
1920
negative1_mic_start_num = 1
20-
negative1_mic_end_num = 100
21-
do_train_again = 0
21+
negative1_mic_end_num = 50
22+
do_train_again = False
2223
num_positive1 = 800
2324
num_negative1 = 800
2425
num_positive2 = 800
2526
num_negative2 = 800
2627

2728
positive2_box_path = "data/19Sdata/sel_positive/"
2829
negative2_box_path = "data/19Sdata/sel_negative/"
29-
positive2_mic_start_num = 30051
30-
positive2_mic_end_num = 30090
31-
negative2_mic_start_num = 30051
32-
negative2_mic_end_num = 30100
30+
positive2_mic_start_num = 1
31+
positive2_mic_end_num = 50
32+
negative2_mic_start_num = 1
33+
negative2_mic_end_num = 50
3334

3435
rotation_angel = 90
3536
rotation_n = 4
@@ -51,7 +52,7 @@ class Train_Args():
5152

5253
SIL_poolingsize = 2
5354

54-
alpha = 0.2
55+
alpha = 0.001
5556
batch_size = 500
5657
num_epochs = 30
5758
decay_rate = 0.96
@@ -66,11 +67,11 @@ class Predict_Args():
6667
data_path = "data/KLHdata/mic/"
6768
result_path = "./data/KLHdata/result/"
6869
#model_save_path = "./data/19Sdata/model"
69-
model_save_path = "./data/KLHdata/model"
70+
model_save_path = "./data/KLHdata/model-0.1-3-relu-7-layer-max-softmax"
7071
#model_save_path = "data/19Sdata/model/"
7172
boxsize = 272
72-
start_mic_num = 78
73-
end_mic_num = 78
73+
start_mic_num = 47
74+
end_mic_num = 47
7475
dim_x = 2048
7576
dim_y = 2048
7677
scan_step = 20
@@ -98,7 +99,7 @@ class Predict_Args():
9899

99100
SIL_poolingsize = 2
100101

101-
alpha = 0.01
102+
alpha = 0.001
102103
batch_size = 50
103104
num_epochs = 20
104105
decay_rate = 0.96

model.py

Lines changed: 92 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -9,97 +9,149 @@ def __init__(self,args):
99
self.args = args
1010
self.variables_deepem()
1111
self.build_model()
12+
#self.one_layer()
1213

14+
15+
def one_layer(self):
16+
w1 = tf.Variable(tf.truncated_normal([self.args.FL_kernelsize, self.args.FL_kernelsize, 1, self.args.FL_feature_map], stddev = 4))
17+
b1 = tf.Variable(tf.truncated_normal([self.args.FL_feature_map],stddev = 0.5))
18+
19+
w2 = [1, self.args.SL_poolingsize, self.args.SL_poolingsize, 1]
20+
21+
input_map_size = self.args.boxsize
22+
C1_map_size = input_map_size - self.args.FL_kernelsize + 1
23+
S2_map_size = C1_map_size // self.args.SL_poolingsize
24+
25+
fully_para_num = int(self.args.FL_feature_map * S2_map_size * S2_map_size)
26+
hidden_neurons = 2
27+
print("type(fully_para_num):",type(fully_para_num))
28+
print("type(hidden_neurons):",type(hidden_neurons))
29+
# output layer
30+
# todo: decide the second patameters of fully connected layer , now is 1
31+
w3 = tf.Variable(tf.truncated_normal([fully_para_num, hidden_neurons], stddev = 4))
32+
b3 = tf.Variable(tf.truncated_normal([hidden_neurons],stddev = 0.5))
33+
34+
self.X = tf.placeholder(tf.float32, shape = [None, self.args.boxsize, self.args.boxsize, 1])
35+
self.Y = tf.placeholder(tf.float32, shape = [None,2])
36+
self.global_step = tf.Variable(0, name='global_step',trainable=False)
37+
38+
layer1_conv = tf.nn.conv2d(self.X, w1, [1, 1, 1, 1], padding = 'VALID')
39+
layer1_actv = tf.sigmoid(layer1_conv + b1)
40+
#layer1_actv = tf.nn.relu(layer1_conv + self.variables['b1'])
41+
print("layer 1 shape is: ",layer1_conv.shape)
42+
43+
print("w2 = " , w2)
44+
layer2_pool = tf.nn.avg_pool(layer1_actv, w2,w2, padding = 'VALID')
45+
print("layer 2 shape is: ",layer2_pool.shape)
46+
47+
layer3_input = tf.reshape(layer2_pool,[-1,fully_para_num])
48+
self.l3_input = layer3_input
49+
50+
print("layer 3 shape is: ",layer3_input.shape)
51+
print("w3 shape is: ",w3.shape)
52+
53+
self.logits = tf.matmul(layer3_input, w3) + b3
54+
55+
if not self.args.is_training:
56+
return
57+
#self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels = self.Y))
58+
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels = self.Y))
59+
#self.loss = tf.sqrt(tf.reduce_mean(tf.square(self.Y - self.logits)))
60+
#self.lr = tf.maximum(1e-5,tf.train.exponential_decay(self.args.alpha, self.global_step, self.args.decay_step, self.args.decay_rate, staircase=True))
61+
self.lr = self.args.alpha
62+
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
63+
64+
1365
def variables_deepem(self):
1466
# first convolutional layer
15-
w1 = tf.Variable(tf.truncated_normal([self.args.FL_kernelsize, self.args.FL_kernelsize, 1, self.args.FL_feature_map], stddev = 0.1))
16-
b1 = tf.Variable(tf.zeros([self.args.FL_feature_map]))
67+
w1 = tf.Variable(tf.truncated_normal([self.args.FL_kernelsize, self.args.FL_kernelsize, 1, self.args.FL_feature_map], stddev = 4))
68+
b1 = tf.Variable(tf.truncated_normal([self.args.FL_feature_map],stddev = 0.5))
69+
#b1 = tf.Variable(tf.zeros([self.args.FL_feature_map]))
1770

1871
# second pooling layer
1972
w2 = [1, self.args.SL_poolingsize, self.args.SL_poolingsize, 1]
2073

2174
# third convolutional layer
22-
w3 = tf.Variable(tf.truncated_normal([self.args.TL_kernelsize, self.args.TL_kernelsize, self.args.FL_feature_map, self.args.TL_feature_map], stddev = 0.1))
23-
b3 = tf.Variable(tf.zeros([self.args.TL_feature_map]))
75+
w3 = tf.Variable(tf.truncated_normal([self.args.TL_kernelsize, self.args.TL_kernelsize, self.args.FL_feature_map, self.args.TL_feature_map], stddev = 4))
76+
b3 = tf.Variable(tf.truncated_normal([self.args.TL_feature_map],stddev = 0.5))
77+
#b3 = tf.Variable(tf.zeros([self.args.TL_feature_map]))
2478

2579
# forth pooling layer
2680
w4 = [1, self.args.FOL_poolingsize, self.args.FOL_poolingsize, 1]
2781

2882
# fifth convolutional layer
29-
w5 = tf.Variable(tf.truncated_normal([self.args.FIL_kernelsize, self.args.FIL_kernelsize, self.args.TL_feature_map, self.args.FIL_feature_map], stddev = 0.1))
30-
b5 = tf.Variable(tf.zeros([self.args.FIL_feature_map]))
83+
w5 = tf.Variable(tf.truncated_normal([self.args.FIL_kernelsize, self.args.FIL_kernelsize, self.args.TL_feature_map, self.args.FIL_feature_map], stddev = 4))
84+
b5 = tf.Variable(tf.truncated_normal([self.args.FIL_feature_map],stddev = 0.5))
85+
#b5 = tf.Variable(tf.zeros([self.args.FIL_feature_map]))
3186

3287
# sixth pooling layer
3388
w6 = [1, self.args.SIL_poolingsize, self.args.SIL_poolingsize, 1]
3489

3590
input_map_size = self.args.boxsize
3691
C1_map_size = input_map_size - self.args.FL_kernelsize + 1
37-
S2_map_size = C1_map_size / self.args.SL_poolingsize
92+
S2_map_size = C1_map_size // self.args.SL_poolingsize
3893
C3_map_size = S2_map_size - self.args.TL_kernelsize + 1
39-
S4_map_size = C3_map_size / self.args.FOL_poolingsize
94+
S4_map_size = C3_map_size // self.args.FOL_poolingsize
4095
C5_map_size = S4_map_size - self.args.FIL_kernelsize + 1
41-
S6_map_size = C5_map_size / self.args.SIL_poolingsize
42-
fully_para_num = self.args.FIL_feature_map * S6_map_size * S6_map_size
43-
hidden_neurons = 1
44-
96+
S6_map_size = C5_map_size // self.args.SIL_poolingsize
97+
fully_para_num = int(self.args.FIL_feature_map * S6_map_size * S6_map_size)
98+
hidden_neurons = 2
99+
print("type(fully_para_num):",type(fully_para_num))
100+
print("type(hidden_neurons):",type(hidden_neurons))
45101
# output layer
46102
# todo: decide the second patameters of fully connected layer , now is 1
47-
w7 = tf.Variable(tf.truncated_normal([fully_para_num, hidden_neurons], stddev = 0.1))
48-
b7 = tf.Variable(tf.zeros([hidden_neurons]))
103+
w7 = tf.Variable(tf.truncated_normal([fully_para_num, hidden_neurons], stddev = 4))
104+
b7 = tf.Variable(tf.truncated_normal([hidden_neurons],stddev = 0.5))
105+
#b7 = tf.Variable(tf.zeros([hidden_neurons]))
49106

50107
self.variables = {
51108
'w1': w1, 'w2': w2, 'w3': w3, 'w4': w4, 'w5': w5, 'w6': w6, 'w7': w7,
52109
'b1': b1, 'b3': b3, 'b5': b5, 'b7': b7, 'fully_para_num': fully_para_num
53110
}
54111

55-
def build_model(self):
56-
# if self.args.is_training:
57-
# self.X = tf.placeholder(tf.float32, shape = [self.args.batch_size, self.args.boxsize, self.args.boxsize, 1])
58-
# self.Y = tf.placeholder(tf.float32, shape = [self.args.batch_size,1])
59-
# else:
60-
# self.X = tf.placeholder(tf.float32, shape = [self.args.rotation_n, self.args.boxsize, self.args.boxsize, 1])
61-
# self.Y = tf.placeholder(tf.float32, shape = [self.args.rotation_n,1])
62-
112+
def build_model(self):
63113
self.X = tf.placeholder(tf.float32, shape = [None, self.args.boxsize, self.args.boxsize, 1])
64-
self.Y = tf.placeholder(tf.float32, shape = [None,1])
114+
self.Y = tf.placeholder(tf.float32, shape = [None,2])
65115
self.global_step = tf.Variable(0, name='global_step',trainable=False)
66116

67117
layer1_conv = tf.nn.conv2d(self.X, self.variables['w1'], [1, 1, 1, 1], padding = 'VALID')
68-
layer1_actv = tf.sigmoid(layer1_conv + self.variables['b1'])
69-
print "layer 1 shape is: ",layer1_conv.shape
118+
layer1_actv = tf.nn.relu(layer1_conv + self.variables['b1'])
119+
#layer1_actv = tf.nn.relu(layer1_conv + self.variables['b1'])
120+
print("layer 1 shape is: ",layer1_conv.shape)
70121

71-
print "w2 = " , self.variables['w2']
72-
layer2_pool = tf.nn.avg_pool(layer1_actv, self.variables['w2'],self.variables['w2'], padding = 'VALID')
73-
print "layer 2 shape is: ",layer2_pool.shape
122+
print("w2 = " , self.variables['w2'])
123+
layer2_pool = tf.nn.max_pool(layer1_actv, self.variables['w2'],self.variables['w2'], padding = 'VALID')
124+
print("layer 2 shape is: ",layer2_pool.shape)
74125

75126
layer3_conv = tf.nn.conv2d(layer2_pool, self.variables['w3'], [1, 1, 1, 1], padding = 'VALID')
76-
layer3_actv = tf.sigmoid(layer3_conv + self.variables['b3'])
77-
print "layer 3 shape is: ",layer3_conv.shape
127+
layer3_actv = tf.nn.relu(layer3_conv + self.variables['b3'])
128+
print("layer 3 shape is: ",layer3_conv.shape)
78129

79-
layer4_pool = tf.nn.avg_pool(layer3_actv, self.variables['w4'], self.variables['w4'], padding = 'VALID')
80-
print "layer 4 shape is: ",layer4_pool.shape
130+
layer4_pool = tf.nn.max_pool(layer3_actv, self.variables['w4'], self.variables['w4'], padding = 'VALID')
131+
print("layer 4 shape is: ",layer4_pool.shape)
81132

82133
layer5_conv = tf.nn.conv2d(layer4_pool, self.variables['w5'], [1, 1, 1, 1], padding = 'VALID')
83-
layer5_actv = tf.sigmoid(layer5_conv + self.variables['b5'])
84-
print "layer 5 shape is: ",layer5_conv.shape
134+
layer5_actv = tf.nn.relu(layer5_conv + self.variables['b5'])
135+
print("layer 5 shape is: ",layer5_conv.shape)
85136

86-
layer6_pool = tf.nn.avg_pool(layer5_actv, self.variables['w6'], self.variables['w6'], padding = 'VALID')
137+
layer6_pool = tf.nn.max_pool(layer5_actv, self.variables['w6'], self.variables['w6'], padding = 'VALID')
87138

88139
# layer6_flatten = tf.contrib.layer.flaten(layer6_pool)
89140
# flatten the output of layer6
90-
print "layer 6 shape is: ",layer6_pool.shape
141+
print("layer 6 shape is: ",layer6_pool.shape)
91142
layer7_input = tf.reshape(layer6_pool,[-1,self.variables['fully_para_num']])
92143
self.l7_input = layer7_input
93144

94-
print "layer 7 shape is: ",layer7_input.shape
95-
print "w7 shape is: ",self.variables['w7'].shape
145+
print("layer 7 shape is: ",layer7_input.shape)
146+
print("w7 shape is: ",self.variables['w7'].shape)
96147

97148
self.logits = tf.matmul(layer7_input, self.variables['w7']) + self.variables['b7']
98149

99150
if not self.args.is_training:
100151
return
101-
#self.cost_func = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels = self.Y))
102-
self.loss = tf.sqrt(tf.reduce_mean(tf.square(self.Y - self.logits)))
152+
#self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels = self.Y))
153+
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels = self.Y))
154+
#self.loss = tf.sqrt(tf.reduce_mean(tf.square(self.Y - self.logits)))
103155
#self.lr = tf.maximum(1e-5,tf.train.exponential_decay(self.args.alpha, self.global_step, self.args.decay_step, self.args.decay_rate, staircase=True))
104156
self.lr = self.args.alpha
105157
self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss)

predict.py

Lines changed: 39 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from utils import load_predict
88
from utils import sub_img
99
from utils import mapstd
10-
from utils import rotate_map
1110
from model import deepEM
1211
from KLH import Predict_Args
1312

@@ -24,16 +23,17 @@ def predict():
2423
# print ("length of test_x: %d"% len(test_x))
2524
# print ("length of test_index: %d"% len(test_index))
2625
checkpoint_dir = args.model_save_path
27-
28-
with tf.Session() as sess:
26+
gpu_config = tf.ConfigProto()
27+
gpu_config.gpu_options.allow_growth = True
28+
with tf.Session(config = gpu_config) as sess:
2929
saver = tf.train.Saver(tf.global_variables())
3030
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
3131
if ckpt and ckpt.model_checkpoint_path:
3232
saver.restore(sess, ckpt.model_checkpoint_path)
3333
else:
3434
print('Restore model failed!')
3535

36-
if not os.path.exists(args.result_path):
36+
if not os.path.exists(args.result_path):
3737
os.mkdir(args.result_path)
3838

3939
for num in range(args.start_mic_num, args.end_mic_num + 1):
@@ -47,44 +47,58 @@ def predict():
4747
output = open(output_name, 'w')
4848

4949
# result = []
50-
x_step_num = (args.dim_x - args.boxsize) / args.scan_step
51-
y_step_num = (args.dim_y - args.boxsize) / args.scan_step
52-
for i in xrange(x_step_num):
53-
for j in xrange(y_step_num):
50+
count = 0
51+
x_step_num = (args.dim_x - args.boxsize) // args.scan_step
52+
y_step_num = (args.dim_y - args.boxsize) // args.scan_step
53+
for i in range(x_step_num):
54+
for j in range(y_step_num):
5455
test_x = []
5556
x = i*args.scan_step
5657
y = j*args.scan_step
58+
y_ = y
59+
y = int(args.dim_y -y - args.boxsize)
5760
img = sub_img(mrc.data,x, y, args.boxsize)
58-
print "img: " , img
61+
# print("img: " , img)
5962
stddev = np.std(img)
60-
print("the stddev of image %d is %.5f" % (num, stddev))
63+
# print("the stddev of image %d is %.5f" % (num, stddev))
6164
if stddev <= args.min_std or stddev >= args.max_std:
6265
continue
6366

6467
img = mapstd(img)
65-
print "img: " , img
68+
# print("img: " , img)
6669
test_x.append(img)
6770

68-
rotate_map(img) # rotate 90
71+
img = np.rot90(img)
6972
test_x.append(img)
70-
71-
rotate_map(img) # rotate 180
73+
74+
img = np.rot90(img)
7275
test_x.append(img)
73-
74-
rotate_map(img) # rotate 270
76+
77+
img = np.rot90(img)
7578
test_x.append(img)
76-
test_x = np.reshape(test_x,[4,args.boxsize,args.boxsize,1])
7779

78-
pred = sess.run(deepem.logits,feed_dict={deepem.X: test_x})
79-
#pred = sess.run(tf.nn.sigmoid(deepem.logits),feed_dict={deepem.X: test_x})
80-
print "pred is ", pred
81-
avg = pred.mean()
80+
# print("test_x: " , test_x)
81+
82+
# print("img.shape: ",img.shape)
83+
84+
85+
test_x = np.asarray(test_x).reshape(4,args.boxsize,args.boxsize,1)
86+
87+
#pred = sess.run(deepem.logits,feed_dict={deepem.X: test_x})
88+
pred = sess.run(tf.nn.softmax(deepem.logits),feed_dict={deepem.X: test_x})
89+
# print("pred is %s", pred)
90+
#avg = pred.mean()
91+
# print("avg is %s" % avg)
8292
# result.append([x,y,avg,stddev])
83-
print num,".mrc x = ",x,", y = ",y,", avgpred = ", avg
84-
if avg >= 0.5:
85-
output.write(str(x)+'\t'+str(y)+'\t'+args.boxsize+'\t'+args.boxsize)
86-
print num," ",x," ", y," ",args.boxsize," ",args.boxsize
93+
prob = np.mean(pred,0)
94+
print("%d .mrc x = %d, y = %d, pre = %.5f" %(num,x,y_,prob[0]))
95+
if prob[0] > 0.9999:
96+
#print("%d .mrc x = %d, y = %d, pre = %s, avgpred = %s" %(num,x,y,pred[0], avg))
97+
output.write(str(x)+'\t'+str(y_)+'\t'+str(args.boxsize)+'\t'+str(args.boxsize) +'\n')
98+
print(num," ",x," ",y," ",prob[0])
99+
count += 1
87100

101+
print("%d particles in this mrc!" % count)
88102
mrc.close()
89103
output.close
90104

0 commit comments

Comments
 (0)