Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 032db2a

Browse files
committed
Start of function ops
Signed-off-by: Ryan Nett <[email protected]>
1 parent 51a9f5e commit 032db2a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+6865
-1
lines changed
Lines changed: 353 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,353 @@
1+
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
=======================================================================*/
15+
16+
// This class has been generated, DO NOT EDIT!
17+
18+
package org.tensorflow.op.core;
19+
20+
import java.util.Arrays;
21+
import java.util.Iterator;
22+
import java.util.List;
23+
import org.tensorflow.ConcreteFunction;
24+
import org.tensorflow.Operand;
25+
import org.tensorflow.Operation;
26+
import org.tensorflow.OperationBuilder;
27+
import org.tensorflow.Output;
28+
import org.tensorflow.op.Operands;
29+
import org.tensorflow.op.RawOp;
30+
import org.tensorflow.op.Scope;
31+
import org.tensorflow.op.annotation.Endpoint;
32+
import org.tensorflow.op.annotation.Operator;
33+
import org.tensorflow.types.family.TType;
34+
35+
/**
36+
* Batches all the inputs tensors to the computation done by the function.
37+
* So, for example, in the following code
38+
* <pre>
39+
*
40+
* # This input will be captured.
41+
* y = tf.placeholder_with_default(1.0, shape=[])
42+
*
43+
* {@literal @}tf.Defun(tf.float32)
44+
* def computation(a):
45+
* return tf.matmul(a, a) + y
46+
*
47+
* b = gen_batch_ops.batch_function(
48+
* f=computation
49+
* in_tensors=[a],
50+
* captured_tensors=computation.captured_inputs,
51+
* Tout=[o.type for o in computation.definition.signature.output_arg],
52+
* num_batch_threads=1,
53+
* max_batch_size=10,
54+
* batch_timeout_micros=100000, # 100ms
55+
* allowed_batch_sizes=[3, 10],
56+
* batching_queue=&quot;&quot;)
57+
* </pre>
58+
* <p>If more than one session.run call is simultaneously trying to compute {@code b}
59+
* the values of {@code a} will be gathered, non-deterministically concatenated
60+
* along the first axis, and only one thread will run the computation.
61+
* <p>Assumes that all arguments of the function are Tensors which will be batched
62+
* along their first dimension.
63+
* <p>Arguments that are captured, are not batched. The session.run call which does
64+
* the concatenation, will use the values of the captured tensors available to it.
65+
* Therefore, typical uses of captured tensors should involve values which remain
66+
* unchanged across session.run calls. Inference is a good example of this.
67+
* <p>SparseTensor is not supported. The return value of the decorated function
68+
* must be a Tensor or a list/tuple of Tensors.
69+
*/
70+
@Operator
71+
public final class BatchFunction extends RawOp implements Iterable<Operand<TType>> {
72+
/**
73+
* The name of this op, as known by TensorFlow core engine
74+
*/
75+
public static final String OP_NAME = "BatchFunction";
76+
77+
private List<Output<?>> outTensors;
78+
79+
@SuppressWarnings("unchecked")
80+
private BatchFunction(Operation operation) {
81+
super(operation);
82+
int outputIdx = 0;
83+
int outTensorsLength = operation.outputListLength("out_tensors");
84+
outTensors = Arrays.asList(operation.outputList(outputIdx, outTensorsLength));
85+
outputIdx += outTensorsLength;
86+
}
87+
88+
/**
89+
* Factory method to create a class wrapping a new BatchFunction operation.
90+
*
91+
* @param scope current scope
92+
* @param inTensors The tensors to be batched.
93+
* @param capturedTensors The tensors which are captured in the function, and don't need
94+
* to be batched.
95+
* @param f the value of the f property
96+
* @param numBatchThreads Number of scheduling threads for processing batches of work.
97+
* Determines the number of batches processed in parallel.
98+
* @param maxBatchSize Batch sizes will never be bigger than this.
99+
* @param batchTimeoutMicros Maximum number of microseconds to wait before outputting
100+
* an incomplete batch.
101+
* @param Tout the types of the output tensors.
102+
* @param options carries optional attribute values
103+
* @return a new instance of BatchFunction
104+
*/
105+
@Endpoint(
106+
describeByClass = true
107+
)
108+
public static BatchFunction create(Scope scope, Iterable<Operand<?>> inTensors,
109+
Iterable<Operand<?>> capturedTensors, ConcreteFunction f, Long numBatchThreads,
110+
Long maxBatchSize, Long batchTimeoutMicros, List<Class<? extends TType>> Tout,
111+
Options... options) {
112+
OperationBuilder opBuilder = scope.env().opBuilder("BatchFunction", scope.makeOpName("BatchFunction"));
113+
opBuilder.addInputList(Operands.asOutputs(inTensors));
114+
opBuilder.addInputList(Operands.asOutputs(capturedTensors));
115+
opBuilder = scope.apply(opBuilder);
116+
opBuilder.setAttr("f", f);
117+
opBuilder.setAttr("num_batch_threads", numBatchThreads);
118+
opBuilder.setAttr("max_batch_size", maxBatchSize);
119+
opBuilder.setAttr("batch_timeout_micros", batchTimeoutMicros);
120+
opBuilder.setAttr("Tout", Operands.toDataTypes(Tout));
121+
if (options != null) {
122+
for (Options opts : options) {
123+
if (opts.maxEnqueuedBatches != null) {
124+
opBuilder.setAttr("max_enqueued_batches", opts.maxEnqueuedBatches);
125+
}
126+
if (opts.allowedBatchSizes != null) {
127+
long[] allowedBatchSizesArray = new long[opts.allowedBatchSizes.size()];
128+
for (int i = 0 ; i < allowedBatchSizesArray.length ; i++) {
129+
allowedBatchSizesArray[i] = opts.allowedBatchSizes.get(i);
130+
}
131+
opBuilder.setAttr("allowed_batch_sizes", allowedBatchSizesArray);
132+
}
133+
if (opts.container != null) {
134+
opBuilder.setAttr("container", opts.container);
135+
}
136+
if (opts.sharedName != null) {
137+
opBuilder.setAttr("shared_name", opts.sharedName);
138+
}
139+
if (opts.batchingQueue != null) {
140+
opBuilder.setAttr("batching_queue", opts.batchingQueue);
141+
}
142+
if (opts.enableLargeBatchSplitting != null) {
143+
opBuilder.setAttr("enable_large_batch_splitting", opts.enableLargeBatchSplitting);
144+
}
145+
}
146+
}
147+
return new BatchFunction(opBuilder.build());
148+
}
149+
150+
/**
151+
* Sets the maxEnqueuedBatches option.
152+
*
153+
* @param maxEnqueuedBatches Maximum number of batches enqueued. Default: 10.
154+
* @return this Options instance.
155+
*/
156+
public static Options maxEnqueuedBatches(Long maxEnqueuedBatches) {
157+
return new Options().maxEnqueuedBatches(maxEnqueuedBatches);
158+
}
159+
160+
/**
161+
* Sets the allowedBatchSizes option.
162+
*
163+
* @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does
164+
* nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
165+
* batches up to one of those sizes. The entries must increase monotonically.
166+
* If enable_large_batch_splitting is false (i.e., large-input-split is not
167+
* enabled) the final entry must equal max_batch_size.
168+
* @return this Options instance.
169+
*/
170+
public static Options allowedBatchSizes(List<Long> allowedBatchSizes) {
171+
return new Options().allowedBatchSizes(allowedBatchSizes);
172+
}
173+
174+
/**
175+
* Sets the allowedBatchSizes option.
176+
*
177+
* @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does
178+
* nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
179+
* batches up to one of those sizes. The entries must increase monotonically.
180+
* If enable_large_batch_splitting is false (i.e., large-input-split is not
181+
* enabled) the final entry must equal max_batch_size.
182+
* @return this Options instance.
183+
*/
184+
public static Options allowedBatchSizes(Long[] allowedBatchSizes) {
185+
return new Options().allowedBatchSizes(allowedBatchSizes);
186+
}
187+
188+
/**
189+
* Sets the container option.
190+
*
191+
* @param container Controls the scope of sharing of this batch.
192+
* @return this Options instance.
193+
*/
194+
public static Options container(String container) {
195+
return new Options().container(container);
196+
}
197+
198+
/**
199+
* Sets the sharedName option.
200+
*
201+
* @param sharedName Concurrently running instances of batch in the same device with the
202+
* same container and shared_name will batch their elements together. If left
203+
* empty, the op name will be used as the shared name.
204+
* @return this Options instance.
205+
*/
206+
public static Options sharedName(String sharedName) {
207+
return new Options().sharedName(sharedName);
208+
}
209+
210+
/**
211+
* Sets the batchingQueue option.
212+
*
213+
* @param batchingQueue the batchingQueue option
214+
* @return this Options instance.
215+
*/
216+
public static Options batchingQueue(String batchingQueue) {
217+
return new Options().batchingQueue(batchingQueue);
218+
}
219+
220+
/**
221+
* Sets the enableLargeBatchSplitting option.
222+
*
223+
* @param enableLargeBatchSplitting input with a large size (i.e., larger than the largest value of
224+
* {@code allowed_batch_sizes}) will be splitted into multiple batches with batch size.
225+
* @return this Options instance.
226+
*/
227+
public static Options enableLargeBatchSplitting(Boolean enableLargeBatchSplitting) {
228+
return new Options().enableLargeBatchSplitting(enableLargeBatchSplitting);
229+
}
230+
231+
/**
232+
* Gets outTensors.
233+
* The output tensors.
234+
* @return outTensors.
235+
*/
236+
public List<Output<?>> outTensors() {
237+
return outTensors;
238+
}
239+
240+
@Override
241+
@SuppressWarnings({"rawtypes", "unchecked"})
242+
public Iterator<Operand<TType>> iterator() {
243+
return (Iterator) outTensors.iterator();
244+
}
245+
246+
/**
247+
* Optional attributes for {@link org.tensorflow.op.core.BatchFunction}
248+
*/
249+
public static class Options {
250+
private Long maxEnqueuedBatches;
251+
252+
private List<Long> allowedBatchSizes;
253+
254+
private String container;
255+
256+
private String sharedName;
257+
258+
private String batchingQueue;
259+
260+
private Boolean enableLargeBatchSplitting;
261+
262+
private Options() {
263+
}
264+
265+
/**
266+
* Sets the maxEnqueuedBatches option.
267+
*
268+
* @param maxEnqueuedBatches Maximum number of batches enqueued. Default: 10.
269+
* @return this Options instance.
270+
*/
271+
public Options maxEnqueuedBatches(Long maxEnqueuedBatches) {
272+
this.maxEnqueuedBatches = maxEnqueuedBatches;
273+
return this;
274+
}
275+
276+
/**
277+
* Sets the allowedBatchSizes option.
278+
*
279+
* @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does
280+
* nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
281+
* batches up to one of those sizes. The entries must increase monotonically.
282+
* If enable_large_batch_splitting is false (i.e., large-input-split is not
283+
* enabled) the final entry must equal max_batch_size.
284+
* @return this Options instance.
285+
*/
286+
public Options allowedBatchSizes(List<Long> allowedBatchSizes) {
287+
this.allowedBatchSizes = allowedBatchSizes;
288+
return this;
289+
}
290+
291+
/**
292+
* Sets the allowedBatchSizes option.
293+
*
294+
* @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does
295+
* nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
296+
* batches up to one of those sizes. The entries must increase monotonically.
297+
* If enable_large_batch_splitting is false (i.e., large-input-split is not
298+
* enabled) the final entry must equal max_batch_size.
299+
* @return this Options instance.
300+
*/
301+
public Options allowedBatchSizes(Long... allowedBatchSizes) {
302+
this.allowedBatchSizes = Arrays.asList(allowedBatchSizes);
303+
return this;
304+
}
305+
306+
/**
307+
* Sets the container option.
308+
*
309+
* @param container Controls the scope of sharing of this batch.
310+
* @return this Options instance.
311+
*/
312+
public Options container(String container) {
313+
this.container = container;
314+
return this;
315+
}
316+
317+
/**
318+
* Sets the sharedName option.
319+
*
320+
* @param sharedName Concurrently running instances of batch in the same device with the
321+
* same container and shared_name will batch their elements together. If left
322+
* empty, the op name will be used as the shared name.
323+
* @return this Options instance.
324+
*/
325+
public Options sharedName(String sharedName) {
326+
this.sharedName = sharedName;
327+
return this;
328+
}
329+
330+
/**
331+
* Sets the batchingQueue option.
332+
*
333+
* @param batchingQueue the batchingQueue option
334+
* @return this Options instance.
335+
*/
336+
public Options batchingQueue(String batchingQueue) {
337+
this.batchingQueue = batchingQueue;
338+
return this;
339+
}
340+
341+
/**
342+
* Sets the enableLargeBatchSplitting option.
343+
*
344+
* @param enableLargeBatchSplitting input with a large size (i.e., larger than the largest value of
345+
* {@code allowed_batch_sizes}) will be splitted into multiple batches with batch size.
346+
* @return this Options instance.
347+
*/
348+
public Options enableLargeBatchSplitting(Boolean enableLargeBatchSplitting) {
349+
this.enableLargeBatchSplitting = enableLargeBatchSplitting;
350+
return this;
351+
}
352+
}
353+
}

0 commit comments

Comments
 (0)