@@ -2,14 +2,14 @@ syntax = "proto3";
22
33package tensorboard ;
44
5+ import "tensorboard/compat/proto/coordination_config.proto" ;
56import "tensorboard/compat/proto/cost_graph.proto" ;
67import "tensorboard/compat/proto/graph.proto" ;
78import "tensorboard/compat/proto/step_stats.proto" ;
89import "tensorboard/compat/proto/cluster.proto" ;
910import "tensorboard/compat/proto/debug.proto" ;
1011import "tensorboard/compat/proto/rewriter_config.proto" ;
1112import "tensorboard/compat/proto/rpc_options.proto" ;
12- import "tensorboard/compat/proto/coordination_config.proto" ;
1313
1414option cc_enable_arenas = true ;
1515option java_outer_classname = "ConfigProtos" ;
@@ -77,6 +77,11 @@ message GPUOptions {
7777 // name "/device:GPU:<id>") are also called "TF GPU id"s. Please
7878 // refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
7979 // for more information.
80+ // 3. The visible_device_list is also used for PluggableDevice. And
81+ // different types of PluggableDevices share this field. In that case,
82+ // the pluggable_device_type is used to distinguish them, making the
83+ // visible_device_list a list of <pluggable_device_type>:<device_index>,
84+ // e.g. "PluggableDeviceA:0,PluggableDeviceA:1,PluggableDeviceB:0".
8085 string visible_device_list = 5 ;
8186
8287 // In the event polling loop sleep this many microseconds between
@@ -306,7 +311,7 @@ message GPUOptions {
306311
307312 // Everything inside experimental is subject to change and is not subject
308313 // to API stability guarantees in
309- // https://www.tensorflow.org/guide/version_compat .
314+ // https://www.tensorflow.org/guide/versions .
310315 Experimental experimental = 9 ;
311316}
312317
@@ -578,7 +583,7 @@ message ConfigProto {
578583
579584 // Everything inside Experimental is subject to change and is not subject
580585 // to API stability guarantees in
581- // https://www.tensorflow.org/guide/version_compat .
586+ // https://www.tensorflow.org/guide/versions .
582587 message Experimental {
583588 // Task name for group resolution.
584589 string collective_group_leader = 1 ;
@@ -750,7 +755,7 @@ message ConfigProto {
750755 bool disable_functional_ops_lowering = 21 ;
751756
752757 // Provides a hint to XLA auto clustering to prefer forming a single large
753- // cluster that encompases most of the graph.
758+ // cluster that encompasses most of the graph.
754759 bool xla_prefer_single_graph_cluster = 22 ;
755760
756761 // Distributed coordination service configurations.
@@ -983,3 +988,28 @@ message CallableOptions {
983988
984989 // Next: 9
985990}
991+
992+ message BatchingOptions {
993+ // Number of scheduling threads for processing batches of work. Determines
994+ // the number of batches processed in parallel. This should be roughly in line
995+ // with the number of TPU cores available.
996+ int32 num_batch_threads = 1 ;
997+
998+ // The maximum allowed batch size. Can be larger than allowed_batch_sizes to
999+ // utilize large batch splitting.
1000+ int32 max_batch_size = 2 ;
1001+
1002+ // Maximum number of microseconds to wait before outputting an incomplete
1003+ // batch.
1004+ int32 batch_timeout_micros = 3 ;
1005+
1006+ // Optional list of allowed batch sizes. If left empty, does nothing.
1007+ // Otherwise, supplies a list of batch sizes, causing the op to pad batches up
1008+ // to one of those sizes. The entries must increase monotonically, and the
1009+ // final entry must be equal or less than the max_batch_size.
1010+ repeated int32 allowed_batch_sizes = 4 ;
1011+
1012+ // Maximum number of batches enqueued for processing before requests are
1013+ // failed fast.
1014+ int32 max_enqueued_batches = 5 ;
1015+ }
0 commit comments