208 tile_current_import.
shape,
210 tile_current_import.
offset,
214 tile_current_import.
shape,
217 tile_current_import.
offset);
225 &(*duplex_buffering->
events)[0]);
230 &(*duplex_buffering->
events)[1]);
235 tile_current_export.
shape,
237 tile_current_export.
offset,
241 tile_current_export.
shape,
244 tile_current_export.
offset);
465 tile_current_import.
shape,
467 tile_current_import.
offset,
471 tile_current_import.
shape,
474 tile_current_import.
offset);
482 &(*duplex_buffering->
events)[0]);
487 &(*duplex_buffering->
events)[1]);
492 tile_current_export.
shape,
494 tile_current_export.
offset,
498 tile_current_export.
shape,
501 tile_current_export.
offset);
722 tile_current_import.
shape,
724 tile_current_import.
offset,
728 tile_current_import.
shape,
731 tile_current_import.
offset);
739 &(*duplex_buffering->
events)[0]);
744 &(*duplex_buffering->
events)[1]);
750 tile_current_export.
shape,
752 tile_current_export.
offset,
756 tile_current_export.
shape,
759 tile_current_export.
offset);
980 tile_current_import.
shape,
982 tile_current_import.
offset,
986 tile_current_import.
shape,
989 tile_current_import.
offset);
997 &(*duplex_buffering->
events)[0]);
1002 &(*duplex_buffering->
events)[1]);
1007 tile_current_export.
shape,
1009 tile_current_export.
offset,
1013 tile_current_export.
shape,
1016 tile_current_export.
offset);
1237 tile_current_import.
shape,
1239 tile_current_import.
offset,
1243 tile_current_import.
shape,
1246 tile_current_import.
offset);
1254 &(*duplex_buffering->
events)[0]);
1259 &(*duplex_buffering->
events)[1]);
1264 tile_current_export.
shape,
1266 tile_current_export.
offset,
1270 tile_current_export.
shape,
1273 tile_current_export.
offset);
1494 tile_current_import.
shape,
1496 tile_current_import.
offset,
1500 tile_current_import.
shape,
1503 tile_current_import.
offset);
1511 &(*duplex_buffering->
events)[0]);
1516 &(*duplex_buffering->
events)[1]);
1522 tile_current_export.
shape,
1524 tile_current_export.
offset,
1528 tile_current_export.
shape,
1531 tile_current_export.
offset);
1752 tile_current_import.
shape,
1754 tile_current_import.
offset,
1758 tile_current_import.
shape,
1761 tile_current_import.
offset);
1769 &(*duplex_buffering->
events)[0]);
1774 &(*duplex_buffering->
events)[1]);
1780 tile_current_export.
shape,
1782 tile_current_export.
offset,
1786 tile_current_export.
shape,
1789 tile_current_export.
offset);
2010 tile_current_import.
shape,
2012 tile_current_import.
offset,
2016 tile_current_import.
shape,
2019 tile_current_import.
offset);
2027 &(*duplex_buffering->
events)[0]);
2032 &(*duplex_buffering->
events)[1]);
2037 tile_current_export.
shape,
2039 tile_current_export.
offset,
2043 tile_current_export.
shape,
2046 tile_current_export.
offset);
2267 tile_current_import.
shape,
2269 tile_current_import.
offset,
2273 tile_current_import.
shape,
2276 tile_current_import.
offset);
2284 &(*duplex_buffering->
events)[0]);
2289 &(*duplex_buffering->
events)[1]);
2295 tile_current_export.
shape,
2297 tile_current_export.
offset,
2301 tile_current_export.
shape,
2304 tile_current_export.
offset);
static TTL_io_void_tensor_t TTL_step_buffering(TTL_duplex_const_void_tensor_buffering_t *const duplex_buffering, TTL_tile_t tile_next_import, TTL_tile_t tile_current_export)
static void TTL_finish_buffering(TTL_duplex_const_void_tensor_buffering_t *const duplex_buffering)
static TTL_duplex_const_void_tensor_buffering_t TTL_start_duplex_buffering(TTL_ext_void_tensor_t ext_tensor_in, __local void *int_base_in, TTL_ext_void_tensor_t ext_tensor_out, __local void *int_base_out, TTL_event_t(*events)[2], TTL_tile_t first_tile)
Create a TTL_DUPLEX_BUFFERING_TYPE and begin the buffering process.
static TTL_ext_void_tensor_t TTL_create_empty_ext_tensor(__global void *unused)
static TTL_ext_void_tensor_t TTL_create_ext_tensor(__global void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
const and non-const tensor creation functions.
static TTL_const_int_void_tensor_t TTL_create_empty_const_int_tensor(__local void *unused)
static const TTL_const_ext_void_tensor_t * TTL_to_const_tensor(const TTL_ext_void_tensor_t *const tensor)
static TTL_int_void_sub_tensor_t TTL_create_int_sub_tensor(__local void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_dim_t elem_size, const TTL_offset_t offset, const TTL_shape_t origin_shape, TTL_offset_t origin_offset)
const and non-const sub tensor creation functions.
static bool TTL_const_int_tensor_empty(TTL_const_int_void_tensor_t tensor)
static const TTL_ext_void_tensor_t * TTL_to_void_tensor(const TTL_ext_void_tensor_t *tensor)
static const TTL_ext_void_sub_tensor_t * TTL_to_void_sub_tensor(const TTL_ext_void_sub_tensor_t *tensor)
static TTL_const_ext_void_tensor_t TTL_create_const_ext_tensor(__global const void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
create TTL_create_int_tensor_impl
static TTL_io_void_tensor_t TTL_create_io_tensors(TTL_int_void_sub_tensor_t imported_to, TTL_int_void_sub_tensor_t to_export_from)
Create a TTL_io_tensors_t from a pair of tensors.
static TTL_layout_t TTL_create_layout(void)
Create a 1D Description of a Tensor layout in memory.
static int TTL_tile_empty(TTL_tile_t tile)
Check if the tile passed is empty.
static TTL_tile_t TTL_create_empty_tile()
Create an empty tile. Empty means it has all dimensions set to zero.
static void TTL_import_sub_tensor(const TTL_int_void_sub_tensor_t internal_sub_tensor, const TTL_const_ext_void_tensor_t const_external_tensor, TTL_event_t *event)
Implementation of TTL_import_sub_tensor.
static void TTL_export(const TTL_const_int_void_tensor_t internal_tensor, const TTL_ext_void_tensor_t external_tensor, TTL_event_t *event)
Export the external tensor to the internal tensor returning when complete.
event_t TTL_event_t
TTL_event_t is a pseudonym for OpenCL event_t.
#define __global
The opencl __global namespace is not supported in C.
#define __local
The opencl __local namespace is not supported in C.
unsigned char uchar
opencl and so TTL supports a type called uchar which is not part of C
unsigned long ulong
OpenCL supports ulong so provide the same in c.
unsigned int uint
OpenCL supports uint so provide the same in c.
unsigned short ushort
OpenCL supports ushort so provide the same in c.
static void TTL_wait(const int num_events, TTL_event_t *const events)
Data required to perform duplex buffer pipelining.
struct TTL_duplex_const_char_tensor_buffering_t::@064104237117137030117227234105063000235053070337 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_char_tensor_t ext_tensor_in
TTL_ext_char_tensor_t ext_tensor_out
__local char * int_base[2]
struct TTL_duplex_const_char_tensor_buffering_t::@014370002005176330316356021025342266164365046356 common
The information that is common to all pipeline schemes.
TTL_const_int_char_tensor_t to_export_from
TTL_ext_char_tensor_t to_export_to
Data required to perform duplex buffer pipelining.
__local int * int_base[2]
TTL_ext_int_tensor_t ext_tensor_in
TTL_const_int_int_tensor_t to_export_from
struct TTL_duplex_const_int_tensor_buffering_t::@113001270154235351227363303241326346012173053057 common
The information that is common to all pipeline schemes.
TTL_ext_int_tensor_t ext_tensor_out
struct TTL_duplex_const_int_tensor_buffering_t::@071005333241035110075205003246037135223123217036 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_int_tensor_t to_export_to
Data required to perform duplex buffer pipelining.
TTL_const_int_long_tensor_t to_export_from
TTL_ext_long_tensor_t ext_tensor_in
TTL_ext_long_tensor_t to_export_to
__local long * int_base[2]
struct TTL_duplex_const_long_tensor_buffering_t::@227070227032076302161007067253066031170102114367 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_long_tensor_t ext_tensor_out
struct TTL_duplex_const_long_tensor_buffering_t::@036044252256072122062226156361073353275230253054 common
The information that is common to all pipeline schemes.
Data required to perform duplex buffer pipelining.
TTL_ext_short_tensor_t ext_tensor_in
TTL_ext_short_tensor_t to_export_to
struct TTL_duplex_const_short_tensor_buffering_t::@024114216310342034132013367354036153175252073102 common
The information that is common to all pipeline schemes.
__local short * int_base[2]
struct TTL_duplex_const_short_tensor_buffering_t::@264127032051024102251273212160044224162003153127 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_const_int_short_tensor_t to_export_from
TTL_ext_short_tensor_t ext_tensor_out
Data required to perform duplex buffer pipelining.
TTL_ext_uchar_tensor_t to_export_to
TTL_ext_uchar_tensor_t ext_tensor_out
TTL_ext_uchar_tensor_t ext_tensor_in
struct TTL_duplex_const_uchar_tensor_buffering_t::@354204013312214235270256355316357054356270012252 common
The information that is common to all pipeline schemes.
__local uchar * int_base[2]
TTL_const_int_uchar_tensor_t to_export_from
struct TTL_duplex_const_uchar_tensor_buffering_t::@301074276244055064044151350374040042161001165123 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
Data required to perform duplex buffer pipelining.
__local uint * int_base[2]
TTL_ext_uint_tensor_t ext_tensor_in
struct TTL_duplex_const_uint_tensor_buffering_t::@103023100132272271317246162104057304263260357164 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
struct TTL_duplex_const_uint_tensor_buffering_t::@321372255121242324312262334377057314147273272231 common
The information that is common to all pipeline schemes.
TTL_ext_uint_tensor_t to_export_to
TTL_const_int_uint_tensor_t to_export_from
TTL_ext_uint_tensor_t ext_tensor_out
Data required to perform duplex buffer pipelining.
TTL_ext_ulong_tensor_t to_export_to
TTL_const_int_ulong_tensor_t to_export_from
struct TTL_duplex_const_ulong_tensor_buffering_t::@043337237103076030355350231302054342062327342307 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_ulong_tensor_t ext_tensor_out
struct TTL_duplex_const_ulong_tensor_buffering_t::@377050367376170127244301220122253317167004362016 common
The information that is common to all pipeline schemes.
TTL_ext_ulong_tensor_t ext_tensor_in
__local ulong * int_base[2]
Data required to perform duplex buffer pipelining.
TTL_const_int_ushort_tensor_t to_export_from
TTL_ext_ushort_tensor_t to_export_to
struct TTL_duplex_const_ushort_tensor_buffering_t::@242202362352162060271227351266124343164075120060 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
__local ushort * int_base[2]
TTL_ext_ushort_tensor_t ext_tensor_out
TTL_ext_ushort_tensor_t ext_tensor_in
struct TTL_duplex_const_ushort_tensor_buffering_t::@050015212130100113116344316351156367204324277022 common
The information that is common to all pipeline schemes.
Data required to perform duplex buffer pipelining.
TTL_ext_void_tensor_t to_export_to
__local void * int_base[2]
TTL_ext_void_tensor_t ext_tensor_out
struct TTL_duplex_const_void_tensor_buffering_t::@010107256062204117376046335161267266024366237051 common
The information that is common to all pipeline schemes.
TTL_const_int_void_tensor_t to_export_from
struct TTL_duplex_const_void_tensor_buffering_t::@001051164243227154221035355330234222317337335265 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_void_tensor_t ext_tensor_in
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
TTL_int_char_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_int_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_long_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_short_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_uchar_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_uint_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_ulong_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_ushort_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_void_tensor_t tensor
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Description of a Tensor layout in memory.
TTL_dim_t width
Number of elements along dimension x.
TTL_dim_t height
Number of rows along dimension y.