@@ -86,9 +86,9 @@ def __init__(self, num_outputs, reuse=False, trainable=True):
8686 self .losses = - (tf .log (self .picked_action_probs ) * self .targets + 0.01 * self .entropy )
8787 self .loss = tf .reduce_sum (self .losses , name = "loss" )
8888
89- tf .scalar_summary (self .loss .op .name , self .loss )
90- tf .scalar_summary (self .entropy_mean .op .name , self .entropy_mean )
91- tf .histogram_summary (self .entropy .op .name , self .entropy )
89+ tf .summary . scalar (self .loss .op .name , self .loss )
90+ tf .summary . scalar (self .entropy_mean .op .name , self .entropy_mean )
91+ tf .summary . histogram (self .entropy .op .name , self .entropy )
9292
9393 if trainable :
9494 # self.optimizer = tf.train.AdamOptimizer(1e-4)
@@ -103,7 +103,7 @@ def __init__(self, num_outputs, reuse=False, trainable=True):
103103 summary_ops = tf .get_collection (tf .GraphKeys .SUMMARIES )
104104 sumaries = [s for s in summary_ops if "policy_net" in s .name or "shared" in s .name ]
105105 sumaries = [s for s in summary_ops if var_scope_name in s .name ]
106- self .summaries = tf .merge_summary (sumaries )
106+ self .summaries = tf .summary . merge (sumaries )
107107
108108
109109class ValueEstimator ():
@@ -146,15 +146,15 @@ def __init__(self, reuse=False, trainable=True):
146146
147147 # Summaries
148148 prefix = tf .get_variable_scope ().name
149- tf .scalar_summary (self .loss .name , self .loss )
150- tf .scalar_summary ("{}/max_value" .format (prefix ), tf .reduce_max (self .logits ))
151- tf .scalar_summary ("{}/min_value" .format (prefix ), tf .reduce_min (self .logits ))
152- tf .scalar_summary ("{}/mean_value" .format (prefix ), tf .reduce_mean (self .logits ))
153- tf .scalar_summary ("{}/reward_max" .format (prefix ), tf .reduce_max (self .targets ))
154- tf .scalar_summary ("{}/reward_min" .format (prefix ), tf .reduce_min (self .targets ))
155- tf .scalar_summary ("{}/reward_mean" .format (prefix ), tf .reduce_mean (self .targets ))
156- tf .histogram_summary ("{}/reward_targets" .format (prefix ), self .targets )
157- tf .histogram_summary ("{}/values" .format (prefix ), self .logits )
149+ tf .summary . scalar (self .loss .name , self .loss )
150+ tf .summary . scalar ("{}/max_value" .format (prefix ), tf .reduce_max (self .logits ))
151+ tf .summary . scalar ("{}/min_value" .format (prefix ), tf .reduce_min (self .logits ))
152+ tf .summary . scalar ("{}/mean_value" .format (prefix ), tf .reduce_mean (self .logits ))
153+ tf .summary . scalar ("{}/reward_max" .format (prefix ), tf .reduce_max (self .targets ))
154+ tf .summary . scalar ("{}/reward_min" .format (prefix ), tf .reduce_min (self .targets ))
155+ tf .summary . scalar ("{}/reward_mean" .format (prefix ), tf .reduce_mean (self .targets ))
156+ tf .summary . histogram ("{}/reward_targets" .format (prefix ), self .targets )
157+ tf .summary . histogram ("{}/values" .format (prefix ), self .logits )
158158
159159 if trainable :
160160 # self.optimizer = tf.train.AdamOptimizer(1e-4)
@@ -168,4 +168,4 @@ def __init__(self, reuse=False, trainable=True):
168168 summary_ops = tf .get_collection (tf .GraphKeys .SUMMARIES )
169169 sumaries = [s for s in summary_ops if "policy_net" in s .name or "shared" in s .name ]
170170 sumaries = [s for s in summary_ops if var_scope_name in s .name ]
171- self .summaries = tf .merge_summary (sumaries )
171+ self .summaries = tf .summary . merge (sumaries )
0 commit comments