|
4 | 4 | from __future__ import print_function
|
5 | 5 | import paddle.fluid as fluid
|
6 | 6 | from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data
|
| 7 | +from utility import check_gpu |
7 | 8 | import paddle.fluid.profiler as profiler
|
8 | 9 | from crnn_ctc_model import ctc_train_net
|
9 | 10 | from attention_model import attention_train_net
|
@@ -67,7 +68,9 @@ def train(args):
|
67 | 68 | cycle=args.total_step > 0,
|
68 | 69 | model=args.model)
|
69 | 70 | test_reader = data_reader.test(
|
70 |
| - test_images_dir=args.test_images, test_list_file=args.test_list, model=args.model) |
| 71 | + test_images_dir=args.test_images, |
| 72 | + test_list_file=args.test_list, |
| 73 | + model=args.model) |
71 | 74 |
|
72 | 75 | # prepare environment
|
73 | 76 | place = fluid.CPUPlace()
|
@@ -115,8 +118,8 @@ def test(iter_num):
|
115 | 118 | for data in test_reader():
|
116 | 119 | exe.run(inference_program, feed=get_feeder_data(data, place))
|
117 | 120 | _, test_seq_error = error_evaluator.eval(exe)
|
118 |
| - print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % ( |
119 |
| - time.time(), iter_num, str(test_seq_error[0]))) |
| 121 | + print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % |
| 122 | + (time.time(), iter_num, str(test_seq_error[0]))) |
120 | 123 |
|
121 | 124 | #Note: The following logs are special for CE monitoring.
|
122 | 125 | #Other situations do not need to care about these logs.
|
@@ -155,10 +158,10 @@ def save_model(args, exe, iter_num):
|
155 | 158 | iter_num += 1
|
156 | 159 | # training log
|
157 | 160 | if iter_num % args.log_period == 0:
|
158 |
| - print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" % ( |
159 |
| - time.time(), iter_num, |
160 |
| - total_loss / (args.log_period * args.batch_size), |
161 |
| - total_seq_error / (args.log_period * args.batch_size))) |
| 161 | + print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" |
| 162 | + % (time.time(), iter_num, |
| 163 | + total_loss / (args.log_period * args.batch_size), |
| 164 | + total_seq_error / (args.log_period * args.batch_size))) |
162 | 165 | print("kpis train_cost %f" % (total_loss / (args.log_period *
|
163 | 166 | args.batch_size)))
|
164 | 167 | print("kpis train_acc %f" % (
|
@@ -203,6 +206,7 @@ def save_model(args, exe, iter_num):
|
203 | 206 | def main():
|
204 | 207 | args = parser.parse_args()
|
205 | 208 | print_arguments(args)
|
| 209 | + check_gpu(args.use_gpu) |
206 | 210 | if args.profile:
|
207 | 211 | if args.use_gpu:
|
208 | 212 | with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
|
|
0 commit comments