Tensor Tiling Library
 
Loading...
Searching...
No Matches
TTL_double_scheme.h
Go to the documentation of this file.
1/*
2 * TTL_double_scheme.h
3 *
4 * Copyright (c) 2023 Mobileye
5 *
6 * Licensed under the Apache License, Version 2.0 (the License);
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an AS IS BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19// This file presumes that the following have been pre included.
20// this is not done here for path reasons.
21// #include "TTL_core.h"
22// #include "TTL_import_export.h"
23// #include TTL_IMPORT_EXPORT_INCLUDE_H
24
25/**
26 * @brief Wait for the previous import operation to complete before beginning an
27 * import of the next tile.
28 *
29 * @param db TTL_import_double_buffering_t describing the attributes of the
30 * transfer
31 * @param next_tile A description of the tile to begin importing.
32 *
33 */
34static inline TTL_int_void_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
36 // For performance, compute everything possible before waiting for the
37 // previous operations to finish.
38 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
40 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
42 next_tile.shape,
44 next_tile.offset,
46
47 TTL_wait(1, db->event);
48
49 if (TTL_tile_empty(next_tile) == false) {
50 TTL_import_sub_tensor(import_to, import_from, db->event);
51 }
52
53 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
54
55 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
57 db->prev_tile.shape,
58 prev_int_layout,
60 db->prev_tile.offset);
61
62 db->prev_tile = next_tile;
63
64 return result;
65}
66
67/**
68 * @brief Wait for the previous import operation to complete before beginning an
69 * export of next tile.
70 *
71 * @param db A TTL_export_double_buffering_t describing the attributes of the
72 * transfer
73 * @param tile_current A description of the tile to begin exporting.
74 *
75 */
76static inline TTL_int_void_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
80 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
82 db->prev_tile.shape,
84 db->prev_tile.offset,
86
87 TTL_wait(1, db->event);
88
89 if (TTL_tile_empty(db->prev_tile) == false)
91
92 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
93 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
95 tile_current.shape,
96 curr_int_layout,
98 tile_current.offset);
99 db->prev_tile = tile_current;
100
101 return result;
102}
103
104static inline void __attribute__((overloadable)) TTL_finish_buffering(
105 TTL_import_double_const_void_tensor_buffering_t *import_double_buffering) {
106 (void)import_double_buffering;
107 // Nothing to do.
108}
109
110static inline void __attribute__((overloadable)) TTL_finish_buffering(
111 TTL_export_double_const_void_tensor_buffering_t *export_double_buffering) {
112 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
113 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
114}
115/*
116 * TTL_double_scheme.h
117 *
118 * Copyright (c) 2023 Mobileye
119 *
120 * Licensed under the Apache License, Version 2.0 (the License);
121 * you may not use this file except in compliance with the License.
122 * You may obtain a copy of the License at
123 *
124 * http://www.apache.org/licenses/LICENSE-2.0
125 *
126 * Unless required by applicable law or agreed to in writing, software
127 * distributed under the License is distributed on an AS IS BASIS,
128 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
129 * See the License for the specific language governing permissions and
130 * limitations under the License.
131 */
132
133// This file presumes that the following have been pre included.
134// this is not done here for path reasons.
135// #include "TTL_core.h"
136// #include "TTL_import_export.h"
137// #include TTL_IMPORT_EXPORT_INCLUDE_H
138
139/**
140 * @brief Wait for the previous import operation to complete before beginning an
141 * import of the next tile.
142 *
143 * @param db TTL_import_double_buffering_t describing the attributes of the
144 * transfer
145 * @param next_tile A description of the tile to begin importing.
146 *
147 */
148static inline TTL_int_char_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
150 // For performance, compute everything possible before waiting for the
151 // previous operations to finish.
152 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
154 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
156 next_tile.shape,
158 next_tile.offset,
160
161 TTL_wait(1, db->event);
162
163 if (TTL_tile_empty(next_tile) == false) {
164 TTL_import_sub_tensor(import_to, import_from, db->event);
165 }
166
167 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
168
169 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
171 db->prev_tile.shape,
172 prev_int_layout,
174 db->prev_tile.offset);
175
176 db->prev_tile = next_tile;
177
178 return result;
179}
180
181/**
182 * @brief Wait for the previous import operation to complete before beginning an
183 * export of next tile.
184 *
185 * @param db A TTL_export_double_buffering_t describing the attributes of the
186 * transfer
187 * @param tile_current A description of the tile to begin exporting.
188 *
189 */
190static inline TTL_int_char_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
194 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
196 db->prev_tile.shape,
198 db->prev_tile.offset,
200
201 TTL_wait(1, db->event);
202
203 if (TTL_tile_empty(db->prev_tile) == false)
205
206 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
207 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
209 tile_current.shape,
210 curr_int_layout,
212 tile_current.offset);
213 db->prev_tile = tile_current;
214
215 return result;
216}
217
218static inline void __attribute__((overloadable)) TTL_finish_buffering(
219 TTL_import_double_const_char_tensor_buffering_t *import_double_buffering) {
220 (void)import_double_buffering;
221 // Nothing to do.
222}
223
224static inline void __attribute__((overloadable)) TTL_finish_buffering(
225 TTL_export_double_const_char_tensor_buffering_t *export_double_buffering) {
226 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
227 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
228}
229/*
230 * TTL_double_scheme.h
231 *
232 * Copyright (c) 2023 Mobileye
233 *
234 * Licensed under the Apache License, Version 2.0 (the License);
235 * you may not use this file except in compliance with the License.
236 * You may obtain a copy of the License at
237 *
238 * http://www.apache.org/licenses/LICENSE-2.0
239 *
240 * Unless required by applicable law or agreed to in writing, software
241 * distributed under the License is distributed on an AS IS BASIS,
242 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
243 * See the License for the specific language governing permissions and
244 * limitations under the License.
245 */
246
247// This file presumes that the following have been pre included.
248// this is not done here for path reasons.
249// #include "TTL_core.h"
250// #include "TTL_import_export.h"
251// #include TTL_IMPORT_EXPORT_INCLUDE_H
252
253/**
254 * @brief Wait for the previous import operation to complete before beginning an
255 * import of the next tile.
256 *
257 * @param db TTL_import_double_buffering_t describing the attributes of the
258 * transfer
259 * @param next_tile A description of the tile to begin importing.
260 *
261 */
262static inline TTL_int_uchar_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
264 // For performance, compute everything possible before waiting for the
265 // previous operations to finish.
266 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
268 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
270 next_tile.shape,
272 next_tile.offset,
274
275 TTL_wait(1, db->event);
276
277 if (TTL_tile_empty(next_tile) == false) {
278 TTL_import_sub_tensor(import_to, import_from, db->event);
279 }
280
281 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
282
283 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
285 db->prev_tile.shape,
286 prev_int_layout,
288 db->prev_tile.offset);
289
290 db->prev_tile = next_tile;
291
292 return result;
293}
294
295/**
296 * @brief Wait for the previous import operation to complete before beginning an
297 * export of next tile.
298 *
299 * @param db A TTL_export_double_buffering_t describing the attributes of the
300 * transfer
301 * @param tile_current A description of the tile to begin exporting.
302 *
303 */
304static inline TTL_int_uchar_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
308 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
310 db->prev_tile.shape,
312 db->prev_tile.offset,
314
315 TTL_wait(1, db->event);
316
317 if (TTL_tile_empty(db->prev_tile) == false)
319
320 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
321 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
323 tile_current.shape,
324 curr_int_layout,
326 tile_current.offset);
327 db->prev_tile = tile_current;
328
329 return result;
330}
331
332static inline void __attribute__((overloadable)) TTL_finish_buffering(
333 TTL_import_double_const_uchar_tensor_buffering_t *import_double_buffering) {
334 (void)import_double_buffering;
335 // Nothing to do.
336}
337
338static inline void __attribute__((overloadable)) TTL_finish_buffering(
339 TTL_export_double_const_uchar_tensor_buffering_t *export_double_buffering) {
340 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
341 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
342}
343/*
344 * TTL_double_scheme.h
345 *
346 * Copyright (c) 2023 Mobileye
347 *
348 * Licensed under the Apache License, Version 2.0 (the License);
349 * you may not use this file except in compliance with the License.
350 * You may obtain a copy of the License at
351 *
352 * http://www.apache.org/licenses/LICENSE-2.0
353 *
354 * Unless required by applicable law or agreed to in writing, software
355 * distributed under the License is distributed on an AS IS BASIS,
356 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
357 * See the License for the specific language governing permissions and
358 * limitations under the License.
359 */
360
361// This file presumes that the following have been pre included.
362// this is not done here for path reasons.
363// #include "TTL_core.h"
364// #include "TTL_import_export.h"
365// #include TTL_IMPORT_EXPORT_INCLUDE_H
366
367/**
368 * @brief Wait for the previous import operation to complete before beginning an
369 * import of the next tile.
370 *
371 * @param db TTL_import_double_buffering_t describing the attributes of the
372 * transfer
373 * @param next_tile A description of the tile to begin importing.
374 *
375 */
376static inline TTL_int_int_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
378 // For performance, compute everything possible before waiting for the
379 // previous operations to finish.
380 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
382 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
384 next_tile.shape,
386 next_tile.offset,
388
389 TTL_wait(1, db->event);
390
391 if (TTL_tile_empty(next_tile) == false) {
392 TTL_import_sub_tensor(import_to, import_from, db->event);
393 }
394
395 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
396
397 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
399 db->prev_tile.shape,
400 prev_int_layout,
402 db->prev_tile.offset);
403
404 db->prev_tile = next_tile;
405
406 return result;
407}
408
409/**
410 * @brief Wait for the previous import operation to complete before beginning an
411 * export of next tile.
412 *
413 * @param db A TTL_export_double_buffering_t describing the attributes of the
414 * transfer
415 * @param tile_current A description of the tile to begin exporting.
416 *
417 */
418static inline TTL_int_int_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
422 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
424 db->prev_tile.shape,
426 db->prev_tile.offset,
428
429 TTL_wait(1, db->event);
430
431 if (TTL_tile_empty(db->prev_tile) == false)
433
434 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
435 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
437 tile_current.shape,
438 curr_int_layout,
440 tile_current.offset);
441 db->prev_tile = tile_current;
442
443 return result;
444}
445
446static inline void __attribute__((overloadable)) TTL_finish_buffering(
447 TTL_import_double_const_int_tensor_buffering_t *import_double_buffering) {
448 (void)import_double_buffering;
449 // Nothing to do.
450}
451
452static inline void __attribute__((overloadable)) TTL_finish_buffering(
453 TTL_export_double_const_int_tensor_buffering_t *export_double_buffering) {
454 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
455 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
456}
457/*
458 * TTL_double_scheme.h
459 *
460 * Copyright (c) 2023 Mobileye
461 *
462 * Licensed under the Apache License, Version 2.0 (the License);
463 * you may not use this file except in compliance with the License.
464 * You may obtain a copy of the License at
465 *
466 * http://www.apache.org/licenses/LICENSE-2.0
467 *
468 * Unless required by applicable law or agreed to in writing, software
469 * distributed under the License is distributed on an AS IS BASIS,
470 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
471 * See the License for the specific language governing permissions and
472 * limitations under the License.
473 */
474
475// This file presumes that the following have been pre included.
476// this is not done here for path reasons.
477// #include "TTL_core.h"
478// #include "TTL_import_export.h"
479// #include TTL_IMPORT_EXPORT_INCLUDE_H
480
481/**
482 * @brief Wait for the previous import operation to complete before beginning an
483 * import of the next tile.
484 *
485 * @param db TTL_import_double_buffering_t describing the attributes of the
486 * transfer
487 * @param next_tile A description of the tile to begin importing.
488 *
489 */
490static inline TTL_int_uint_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
492 // For performance, compute everything possible before waiting for the
493 // previous operations to finish.
494 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
496 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
498 next_tile.shape,
500 next_tile.offset,
502
503 TTL_wait(1, db->event);
504
505 if (TTL_tile_empty(next_tile) == false) {
506 TTL_import_sub_tensor(import_to, import_from, db->event);
507 }
508
509 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
510
511 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
513 db->prev_tile.shape,
514 prev_int_layout,
516 db->prev_tile.offset);
517
518 db->prev_tile = next_tile;
519
520 return result;
521}
522
523/**
524 * @brief Wait for the previous import operation to complete before beginning an
525 * export of next tile.
526 *
527 * @param db A TTL_export_double_buffering_t describing the attributes of the
528 * transfer
529 * @param tile_current A description of the tile to begin exporting.
530 *
531 */
532static inline TTL_int_uint_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
536 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
538 db->prev_tile.shape,
540 db->prev_tile.offset,
542
543 TTL_wait(1, db->event);
544
545 if (TTL_tile_empty(db->prev_tile) == false)
547
548 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
549 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
551 tile_current.shape,
552 curr_int_layout,
554 tile_current.offset);
555 db->prev_tile = tile_current;
556
557 return result;
558}
559
560static inline void __attribute__((overloadable)) TTL_finish_buffering(
561 TTL_import_double_const_uint_tensor_buffering_t *import_double_buffering) {
562 (void)import_double_buffering;
563 // Nothing to do.
564}
565
566static inline void __attribute__((overloadable)) TTL_finish_buffering(
567 TTL_export_double_const_uint_tensor_buffering_t *export_double_buffering) {
568 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
569 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
570}
571/*
572 * TTL_double_scheme.h
573 *
574 * Copyright (c) 2023 Mobileye
575 *
576 * Licensed under the Apache License, Version 2.0 (the License);
577 * you may not use this file except in compliance with the License.
578 * You may obtain a copy of the License at
579 *
580 * http://www.apache.org/licenses/LICENSE-2.0
581 *
582 * Unless required by applicable law or agreed to in writing, software
583 * distributed under the License is distributed on an AS IS BASIS,
584 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
585 * See the License for the specific language governing permissions and
586 * limitations under the License.
587 */
588
589// This file presumes that the following have been pre included.
590// this is not done here for path reasons.
591// #include "TTL_core.h"
592// #include "TTL_import_export.h"
593// #include TTL_IMPORT_EXPORT_INCLUDE_H
594
595/**
596 * @brief Wait for the previous import operation to complete before beginning an
597 * import of the next tile.
598 *
599 * @param db TTL_import_double_buffering_t describing the attributes of the
600 * transfer
601 * @param next_tile A description of the tile to begin importing.
602 *
603 */
604static inline TTL_int_short_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
606 // For performance, compute everything possible before waiting for the
607 // previous operations to finish.
608 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
610 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
612 next_tile.shape,
614 next_tile.offset,
616
617 TTL_wait(1, db->event);
618
619 if (TTL_tile_empty(next_tile) == false) {
620 TTL_import_sub_tensor(import_to, import_from, db->event);
621 }
622
623 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
624
625 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
627 db->prev_tile.shape,
628 prev_int_layout,
630 db->prev_tile.offset);
631
632 db->prev_tile = next_tile;
633
634 return result;
635}
636
637/**
638 * @brief Wait for the previous import operation to complete before beginning an
639 * export of next tile.
640 *
641 * @param db A TTL_export_double_buffering_t describing the attributes of the
642 * transfer
643 * @param tile_current A description of the tile to begin exporting.
644 *
645 */
646static inline TTL_int_short_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
650 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
652 db->prev_tile.shape,
654 db->prev_tile.offset,
656
657 TTL_wait(1, db->event);
658
659 if (TTL_tile_empty(db->prev_tile) == false)
661
662 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
663 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
665 tile_current.shape,
666 curr_int_layout,
668 tile_current.offset);
669 db->prev_tile = tile_current;
670
671 return result;
672}
673
674static inline void __attribute__((overloadable)) TTL_finish_buffering(
675 TTL_import_double_const_short_tensor_buffering_t *import_double_buffering) {
676 (void)import_double_buffering;
677 // Nothing to do.
678}
679
680static inline void __attribute__((overloadable)) TTL_finish_buffering(
681 TTL_export_double_const_short_tensor_buffering_t *export_double_buffering) {
682 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
683 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
684}
685/*
686 * TTL_double_scheme.h
687 *
688 * Copyright (c) 2023 Mobileye
689 *
690 * Licensed under the Apache License, Version 2.0 (the License);
691 * you may not use this file except in compliance with the License.
692 * You may obtain a copy of the License at
693 *
694 * http://www.apache.org/licenses/LICENSE-2.0
695 *
696 * Unless required by applicable law or agreed to in writing, software
697 * distributed under the License is distributed on an AS IS BASIS,
698 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
699 * See the License for the specific language governing permissions and
700 * limitations under the License.
701 */
702
703// This file presumes that the following have been pre included.
704// this is not done here for path reasons.
705// #include "TTL_core.h"
706// #include "TTL_import_export.h"
707// #include TTL_IMPORT_EXPORT_INCLUDE_H
708
709/**
710 * @brief Wait for the previous import operation to complete before beginning an
711 * import of the next tile.
712 *
713 * @param db TTL_import_double_buffering_t describing the attributes of the
714 * transfer
715 * @param next_tile A description of the tile to begin importing.
716 *
717 */
718static inline TTL_int_ushort_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
720 // For performance, compute everything possible before waiting for the
721 // previous operations to finish.
722 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
724 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
726 next_tile.shape,
728 next_tile.offset,
730
731 TTL_wait(1, db->event);
732
733 if (TTL_tile_empty(next_tile) == false) {
734 TTL_import_sub_tensor(import_to, import_from, db->event);
735 }
736
737 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
738
739 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
741 db->prev_tile.shape,
742 prev_int_layout,
744 db->prev_tile.offset);
745
746 db->prev_tile = next_tile;
747
748 return result;
749}
750
751/**
752 * @brief Wait for the previous import operation to complete before beginning an
753 * export of next tile.
754 *
755 * @param db A TTL_export_double_buffering_t describing the attributes of the
756 * transfer
757 * @param tile_current A description of the tile to begin exporting.
758 *
759 */
760static inline TTL_int_ushort_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
764 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
766 db->prev_tile.shape,
768 db->prev_tile.offset,
770
771 TTL_wait(1, db->event);
772
773 if (TTL_tile_empty(db->prev_tile) == false)
775
776 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
777 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
778 const TTL_int_ushort_sub_tensor_t result =
780 tile_current.shape,
781 curr_int_layout,
783 tile_current.offset);
784 db->prev_tile = tile_current;
785
786 return result;
787}
788
789static inline void __attribute__((overloadable)) TTL_finish_buffering(
790 TTL_import_double_const_ushort_tensor_buffering_t *import_double_buffering) {
791 (void)import_double_buffering;
792 // Nothing to do.
793}
794
795static inline void __attribute__((overloadable)) TTL_finish_buffering(
796 TTL_export_double_const_ushort_tensor_buffering_t *export_double_buffering) {
797 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
798 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
799}
800/*
801 * TTL_double_scheme.h
802 *
803 * Copyright (c) 2023 Mobileye
804 *
805 * Licensed under the Apache License, Version 2.0 (the License);
806 * you may not use this file except in compliance with the License.
807 * You may obtain a copy of the License at
808 *
809 * http://www.apache.org/licenses/LICENSE-2.0
810 *
811 * Unless required by applicable law or agreed to in writing, software
812 * distributed under the License is distributed on an AS IS BASIS,
813 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
814 * See the License for the specific language governing permissions and
815 * limitations under the License.
816 */
817
818// This file presumes that the following have been pre included.
819// this is not done here for path reasons.
820// #include "TTL_core.h"
821// #include "TTL_import_export.h"
822// #include TTL_IMPORT_EXPORT_INCLUDE_H
823
824/**
825 * @brief Wait for the previous import operation to complete before beginning an
826 * import of the next tile.
827 *
828 * @param db TTL_import_double_buffering_t describing the attributes of the
829 * transfer
830 * @param next_tile A description of the tile to begin importing.
831 *
832 */
833static inline TTL_int_long_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
835 // For performance, compute everything possible before waiting for the
836 // previous operations to finish.
837 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
839 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
841 next_tile.shape,
843 next_tile.offset,
845
846 TTL_wait(1, db->event);
847
848 if (TTL_tile_empty(next_tile) == false) {
849 TTL_import_sub_tensor(import_to, import_from, db->event);
850 }
851
852 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
853
854 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
856 db->prev_tile.shape,
857 prev_int_layout,
859 db->prev_tile.offset);
860
861 db->prev_tile = next_tile;
862
863 return result;
864}
865
866/**
867 * @brief Wait for the previous import operation to complete before beginning an
868 * export of next tile.
869 *
870 * @param db A TTL_export_double_buffering_t describing the attributes of the
871 * transfer
872 * @param tile_current A description of the tile to begin exporting.
873 *
874 */
875static inline TTL_int_long_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
879 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
881 db->prev_tile.shape,
883 db->prev_tile.offset,
885
886 TTL_wait(1, db->event);
887
888 if (TTL_tile_empty(db->prev_tile) == false)
890
891 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
892 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
894 tile_current.shape,
895 curr_int_layout,
897 tile_current.offset);
898 db->prev_tile = tile_current;
899
900 return result;
901}
902
903static inline void __attribute__((overloadable)) TTL_finish_buffering(
904 TTL_import_double_const_long_tensor_buffering_t *import_double_buffering) {
905 (void)import_double_buffering;
906 // Nothing to do.
907}
908
909static inline void __attribute__((overloadable)) TTL_finish_buffering(
910 TTL_export_double_const_long_tensor_buffering_t *export_double_buffering) {
911 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
912 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
913}
914/*
915 * TTL_double_scheme.h
916 *
917 * Copyright (c) 2023 Mobileye
918 *
919 * Licensed under the Apache License, Version 2.0 (the License);
920 * you may not use this file except in compliance with the License.
921 * You may obtain a copy of the License at
922 *
923 * http://www.apache.org/licenses/LICENSE-2.0
924 *
925 * Unless required by applicable law or agreed to in writing, software
926 * distributed under the License is distributed on an AS IS BASIS,
927 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
928 * See the License for the specific language governing permissions and
929 * limitations under the License.
930 */
931
932// This file presumes that the following have been pre included.
933// this is not done here for path reasons.
934// #include "TTL_core.h"
935// #include "TTL_import_export.h"
936// #include TTL_IMPORT_EXPORT_INCLUDE_H
937
938/**
939 * @brief Wait for the previous import operation to complete before beginning an
940 * import of the next tile.
941 *
942 * @param db TTL_import_double_buffering_t describing the attributes of the
943 * transfer
944 * @param next_tile A description of the tile to begin importing.
945 *
946 */
947static inline TTL_int_ulong_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
949 // For performance, compute everything possible before waiting for the
950 // previous operations to finish.
951 const TTL_layout_t int_layout = TTL_create_layout(next_tile.shape.width, next_tile.shape.height);
953 db->common.int_base[db->common.index], next_tile.shape, int_layout, db->common.ext_tensor_in, next_tile.offset);
955 next_tile.shape,
957 next_tile.offset,
959
960 TTL_wait(1, db->event);
961
962 if (TTL_tile_empty(next_tile) == false) {
963 TTL_import_sub_tensor(import_to, import_from, db->event);
964 }
965
966 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
967
968 const TTL_layout_t prev_int_layout = TTL_create_layout(db->prev_tile.shape.width, db->prev_tile.shape.height);
970 db->prev_tile.shape,
971 prev_int_layout,
973 db->prev_tile.offset);
974
975 db->prev_tile = next_tile;
976
977 return result;
978}
979
980/**
981 * @brief Wait for the previous import operation to complete before beginning an
982 * export of next tile.
983 *
984 * @param db A TTL_export_double_buffering_t describing the attributes of the
985 * transfer
986 * @param tile_current A description of the tile to begin exporting.
987 *
988 */
989static inline TTL_int_ulong_sub_tensor_t __attribute__((overloadable)) TTL_step_buffering(
993 db->common.int_base[db->common.index], db->prev_tile.shape, int_layout, db->common.ext_tensor_in.elem_size);
995 db->prev_tile.shape,
997 db->prev_tile.offset,
999
1000 TTL_wait(1, db->event);
1001
1002 if (TTL_tile_empty(db->prev_tile) == false)
1003 TTL_export(*TTL_to_void_tensor(TTL_to_const_tensor(&export_from)), *TTL_to_void_tensor(&export_to), db->event);
1004
1005 db->common.index = (db->common.index + 1) % 2; // TTL_ARRAYSIZE(db->common.int_base);
1006 const TTL_layout_t curr_int_layout = TTL_create_layout(tile_current.shape.width, tile_current.shape.height);
1008 tile_current.shape,
1009 curr_int_layout,
1011 tile_current.offset);
1012 db->prev_tile = tile_current;
1013
1014 return result;
1015}
1016
1017static inline void __attribute__((overloadable)) TTL_finish_buffering(
1018 TTL_import_double_const_ulong_tensor_buffering_t *import_double_buffering) {
1019 (void)import_double_buffering;
1020 // Nothing to do.
1021}
1022
1023static inline void __attribute__((overloadable)) TTL_finish_buffering(
1024 TTL_export_double_const_ulong_tensor_buffering_t *export_double_buffering) {
1025 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
1026 TTL_step_buffering(export_double_buffering, TTL_create_empty_tile());
1027}
static void TTL_finish_buffering(TTL_import_double_const_void_tensor_buffering_t *import_double_buffering)
static TTL_int_void_sub_tensor_t TTL_step_buffering(TTL_import_double_const_void_tensor_buffering_t *const db, const TTL_tile_t next_tile)
Wait for the previous import operation to complete before beginning an import of the next tile.
static TTL_ext_void_tensor_t TTL_create_ext_tensor(__global void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
const and non-const tensor creation functions.
static const TTL_const_ext_void_tensor_t * TTL_to_const_tensor(const TTL_ext_void_tensor_t *const tensor)
static TTL_int_void_sub_tensor_t TTL_create_int_sub_tensor(__local void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_dim_t elem_size, const TTL_offset_t offset, const TTL_shape_t origin_shape, TTL_offset_t origin_offset)
const and non-const sub tensor creation functions.
static const TTL_ext_void_tensor_t * TTL_to_void_tensor(const TTL_ext_void_tensor_t *tensor)
static TTL_const_int_void_tensor_t TTL_create_const_int_tensor(__local const void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
create TTL_create_int_tensor_impl
static TTL_const_ext_void_tensor_t TTL_create_const_ext_tensor(__global const void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
create TTL_create_int_tensor_impl
static TTL_layout_t TTL_create_layout(void)
Create a 1D Description of a Tensor layout in memory.
static int TTL_tile_empty(TTL_tile_t tile)
Check if the tile passed is empty.
Definition TTL_tiles.h:257
static TTL_tile_t TTL_create_empty_tile()
Create an empty tile. Empty means it has all dimensions set to zero.
Definition TTL_tiles.h:267
static void TTL_import_sub_tensor(const TTL_int_void_sub_tensor_t internal_sub_tensor, const TTL_const_ext_void_tensor_t const_external_tensor, TTL_event_t *event)
Implementation of TTL_import_sub_tensor.
static void TTL_export(const TTL_const_int_void_tensor_t internal_tensor, const TTL_ext_void_tensor_t external_tensor, TTL_event_t *event)
Export the external tensor to the internal tensor returning when complete.
static void TTL_wait(const int num_events, TTL_event_t *const events)
Data required to perform double buffer pipelining.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
struct TTL_export_double_const_char_tensor_buffering_t::@200233076114154247321155320302220237271057254011 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_export_double_const_int_tensor_buffering_t::@313060012133251110151012065141246353033212354155 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
struct TTL_export_double_const_long_tensor_buffering_t::@051306341134176043165213027265273012004012367055 common
The information that is common to all pipeline schemes.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
Data required to perform double buffer pipelining.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_export_double_const_short_tensor_buffering_t::@150056263274274200046176112322011250071167131155 common
The information that is common to all pipeline schemes.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_export_double_const_uchar_tensor_buffering_t::@120077042257011030220003067113004231336304267015 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
struct TTL_export_double_const_uint_tensor_buffering_t::@107221175341137232014365354144230244306202073061 common
The information that is common to all pipeline schemes.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
Data required to perform double buffer pipelining.
struct TTL_export_double_const_ulong_tensor_buffering_t::@027231137167212033201275101006215005221042304220 common
The information that is common to all pipeline schemes.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_export_double_const_ushort_tensor_buffering_t::@375061026374100235353042355153316363375164312062 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
struct TTL_export_double_const_void_tensor_buffering_t::@226115001341057114005155110355324320343302200220 common
The information that is common to all pipeline schemes.
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
Data required to perform double buffer pipelining.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
struct TTL_import_double_const_char_tensor_buffering_t::@146036027027273331313307222270337354160364111375 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
struct TTL_import_double_const_int_tensor_buffering_t::@106262375052074004226242216351262120170331261346 common
The information that is common to all pipeline schemes.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_import_double_const_long_tensor_buffering_t::@024074226234216211045113372321204060156012303172 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_import_double_const_short_tensor_buffering_t::@262375313014076337253271030122335337026224261265 common
The information that is common to all pipeline schemes.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_import_double_const_uchar_tensor_buffering_t::@315006272311161130353174117352335377261356332125 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
struct TTL_import_double_const_uint_tensor_buffering_t::@321003117107140001241161134060231363013344377331 common
The information that is common to all pipeline schemes.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_import_double_const_ulong_tensor_buffering_t::@370171375106115245341147260355126204104002330034 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
struct TTL_import_double_const_ushort_tensor_buffering_t::@247302150354022371157326217307210165253036313031 common
The information that is common to all pipeline schemes.
Data required to perform double buffer pipelining.
struct TTL_import_double_const_void_tensor_buffering_t::@233056121271362105334233313347041105354353345060 common
The information that is common to all pipeline schemes.
TTL_tile_t prev_tile
Store of the previous imported/exported tile *‍/.
TTL_event_t * event
A pointer to the event that is used to track the progress of the transfer.
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
Description of a Tensor layout in memory.
TTL_dim_t width
Number of elements along dimension x.
TTL_dim_t height
Number of rows along dimension y.
TTL_offset_t offset
Definition TTL_tiles.h:126
TTL_shape_t shape
Definition TTL_tiles.h:125