208 tile_current_import.
shape,
210 tile_current_import.
offset,
214 tile_current_import.
shape,
217 tile_current_import.
offset);
225 &(*duplex_buffering->
events)[0]);
230 &(*duplex_buffering->
events)[1]);
235 tile_current_export.
shape,
237 tile_current_export.
offset,
241 tile_current_export.
shape,
244 tile_current_export.
offset);
465 tile_current_import.
shape,
467 tile_current_import.
offset,
471 tile_current_import.
shape,
474 tile_current_import.
offset);
482 &(*duplex_buffering->
events)[0]);
487 &(*duplex_buffering->
events)[1]);
492 tile_current_export.
shape,
494 tile_current_export.
offset,
498 tile_current_export.
shape,
501 tile_current_export.
offset);
722 tile_current_import.
shape,
724 tile_current_import.
offset,
728 tile_current_import.
shape,
731 tile_current_import.
offset);
739 &(*duplex_buffering->
events)[0]);
744 &(*duplex_buffering->
events)[1]);
750 tile_current_export.
shape,
752 tile_current_export.
offset,
756 tile_current_export.
shape,
759 tile_current_export.
offset);
980 tile_current_import.
shape,
982 tile_current_import.
offset,
986 tile_current_import.
shape,
989 tile_current_import.
offset);
997 &(*duplex_buffering->
events)[0]);
1002 &(*duplex_buffering->
events)[1]);
1007 tile_current_export.
shape,
1009 tile_current_export.
offset,
1013 tile_current_export.
shape,
1016 tile_current_export.
offset);
1237 tile_current_import.
shape,
1239 tile_current_import.
offset,
1243 tile_current_import.
shape,
1246 tile_current_import.
offset);
1254 &(*duplex_buffering->
events)[0]);
1259 &(*duplex_buffering->
events)[1]);
1264 tile_current_export.
shape,
1266 tile_current_export.
offset,
1270 tile_current_export.
shape,
1273 tile_current_export.
offset);
1494 tile_current_import.
shape,
1496 tile_current_import.
offset,
1500 tile_current_import.
shape,
1503 tile_current_import.
offset);
1511 &(*duplex_buffering->
events)[0]);
1516 &(*duplex_buffering->
events)[1]);
1522 tile_current_export.
shape,
1524 tile_current_export.
offset,
1528 tile_current_export.
shape,
1531 tile_current_export.
offset);
1752 tile_current_import.
shape,
1754 tile_current_import.
offset,
1758 tile_current_import.
shape,
1761 tile_current_import.
offset);
1769 &(*duplex_buffering->
events)[0]);
1774 &(*duplex_buffering->
events)[1]);
1780 tile_current_export.
shape,
1782 tile_current_export.
offset,
1786 tile_current_export.
shape,
1789 tile_current_export.
offset);
2010 tile_current_import.
shape,
2012 tile_current_import.
offset,
2016 tile_current_import.
shape,
2019 tile_current_import.
offset);
2027 &(*duplex_buffering->
events)[0]);
2032 &(*duplex_buffering->
events)[1]);
2037 tile_current_export.
shape,
2039 tile_current_export.
offset,
2043 tile_current_export.
shape,
2046 tile_current_export.
offset);
2267 tile_current_import.
shape,
2269 tile_current_import.
offset,
2273 tile_current_import.
shape,
2276 tile_current_import.
offset);
2284 &(*duplex_buffering->
events)[0]);
2289 &(*duplex_buffering->
events)[1]);
2295 tile_current_export.
shape,
2297 tile_current_export.
offset,
2301 tile_current_export.
shape,
2304 tile_current_export.
offset);
static int TTL_tile_empty(TTL_tile_t tile)
Check if the tile passed is empty.
static TTL_tile_t TTL_create_empty_tile()
Create an empty tile. Empty means it has all dimensions set to zero.
event_t TTL_event_t
TTL_event_t is a pseudonym for OpenCL event_t.
#define __global
The opencl __global namespace is not supported in C.
#define __local
The opencl __local namespace is not supported in C.
unsigned char uchar
OpenCL supports uchar so provide the same in c.
unsigned long ulong
OpenCL supports ulong so provide the same in c.
unsigned int uint
OpenCL supports uint so provide the same in c.
unsigned short ushort
OpenCL supports ushort so provide the same in c.
static void TTL_wait(const int num_events, TTL_event_t *const events)
static TTL_ext_void_tensor_t TTL_create_empty_ext_tensor(__global void *unused)
static TTL_ext_void_tensor_t TTL_create_ext_tensor(__global void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
const and non-const tensor creation functions.
static TTL_const_int_void_tensor_t TTL_create_empty_const_int_tensor(__local void *unused)
static const TTL_const_ext_void_tensor_t * TTL_to_const_tensor(const TTL_ext_void_tensor_t *const tensor)
static TTL_int_void_sub_tensor_t TTL_create_int_sub_tensor(__local void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_dim_t elem_size, const TTL_offset_t offset, const TTL_shape_t origin_shape, TTL_offset_t origin_offset)
const and non-const sub tensor creation functions.
static bool TTL_const_int_tensor_empty(TTL_const_int_void_tensor_t tensor)
static const TTL_ext_void_tensor_t * TTL_to_void_tensor(const TTL_ext_void_tensor_t *tensor)
static const TTL_ext_void_sub_tensor_t * TTL_to_void_sub_tensor(const TTL_ext_void_sub_tensor_t *tensor)
static TTL_const_ext_void_tensor_t TTL_create_const_ext_tensor(__global const void *base, const TTL_shape_t shape, const TTL_layout_t layout, const TTL_offset_t offset, const TTL_dim_t elem_size)
create TTL_create_int_tensor_impl
static TTL_layout_t TTL_create_layout(void)
Create a 1D Description of a Tensor layout in memory.
static void TTL_import_sub_tensor(const TTL_int_void_sub_tensor_t internal_sub_tensor, const TTL_const_ext_void_tensor_t const_external_tensor, TTL_event_t *event)
Implementation of TTL_import_sub_tensor.
static void TTL_export(const TTL_const_int_void_tensor_t internal_tensor, const TTL_ext_void_tensor_t external_tensor, TTL_event_t *event)
Export the external tensor to the internal tensor returning when complete.
static TTL_io_void_tensor_t TTL_step_buffering(TTL_duplex_const_void_tensor_buffering_t *const duplex_buffering, TTL_tile_t tile_next_import, TTL_tile_t tile_current_export)
static void TTL_finish_buffering(TTL_duplex_const_void_tensor_buffering_t *const duplex_buffering)
static TTL_duplex_const_void_tensor_buffering_t TTL_start_duplex_buffering(TTL_ext_void_tensor_t ext_tensor_in, __local void *int_base_in, TTL_ext_void_tensor_t ext_tensor_out, __local void *int_base_out, TTL_event_t(*events)[2], TTL_tile_t first_tile)
Create a TTL_DUPLEX_BUFFERING_TYPE and begin the buffering process.
static TTL_io_void_tensor_t TTL_create_io_tensors(TTL_int_void_sub_tensor_t imported_to, TTL_int_void_sub_tensor_t to_export_from)
Create a TTL_io_tensors_t from a pair of tensors.
Data required to perform duplex buffer pipelining.
TTL_ext_char_tensor_t ext_tensor_in
TTL_ext_char_tensor_t ext_tensor_out
__local char * int_base[2]
struct TTL_duplex_const_char_tensor_buffering_t::@312336245141017307151106262305266046224352236304 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_const_int_char_tensor_t to_export_from
TTL_ext_char_tensor_t to_export_to
struct TTL_duplex_const_char_tensor_buffering_t::@245341322112234032013261207342004241006352011135 common
The information that is common to all pipeline schemes.
Data required to perform duplex buffer pipelining.
__local int * int_base[2]
TTL_ext_int_tensor_t ext_tensor_in
TTL_const_int_int_tensor_t to_export_from
struct TTL_duplex_const_int_tensor_buffering_t::@116277316214163343364014260036133346034027220211 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_int_tensor_t ext_tensor_out
struct TTL_duplex_const_int_tensor_buffering_t::@031321325003017241265247300222255264315331130252 common
The information that is common to all pipeline schemes.
TTL_ext_int_tensor_t to_export_to
Data required to perform duplex buffer pipelining.
TTL_const_int_long_tensor_t to_export_from
TTL_ext_long_tensor_t ext_tensor_in
TTL_ext_long_tensor_t to_export_to
struct TTL_duplex_const_long_tensor_buffering_t::@221205170375214144022317366166043265052152204367 common
The information that is common to all pipeline schemes.
__local long * int_base[2]
struct TTL_duplex_const_long_tensor_buffering_t::@223266115357270112337123312124102216016115241150 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_long_tensor_t ext_tensor_out
Data required to perform duplex buffer pipelining.
TTL_ext_short_tensor_t ext_tensor_in
TTL_ext_short_tensor_t to_export_to
__local short * int_base[2]
struct TTL_duplex_const_short_tensor_buffering_t::@031117032142060271314225205226100011031155213012 common
The information that is common to all pipeline schemes.
struct TTL_duplex_const_short_tensor_buffering_t::@305005375365367365347353320116135040305054120071 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_const_int_short_tensor_t to_export_from
TTL_ext_short_tensor_t ext_tensor_out
Data required to perform duplex buffer pipelining.
TTL_ext_uchar_tensor_t to_export_to
TTL_ext_uchar_tensor_t ext_tensor_out
TTL_ext_uchar_tensor_t ext_tensor_in
struct TTL_duplex_const_uchar_tensor_buffering_t::@374075167261217125143156144124016260216104207155 common
The information that is common to all pipeline schemes.
__local uchar * int_base[2]
TTL_const_int_uchar_tensor_t to_export_from
struct TTL_duplex_const_uchar_tensor_buffering_t::@171067352313165204145131272276137230141144106114 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
Data required to perform duplex buffer pipelining.
__local uint * int_base[2]
TTL_ext_uint_tensor_t ext_tensor_in
struct TTL_duplex_const_uint_tensor_buffering_t::@264164370043316050136307275215132337220117222030 common
The information that is common to all pipeline schemes.
TTL_ext_uint_tensor_t to_export_to
struct TTL_duplex_const_uint_tensor_buffering_t::@314044264022165160140333154000234013010035000255 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_const_int_uint_tensor_t to_export_from
TTL_ext_uint_tensor_t ext_tensor_out
Data required to perform duplex buffer pipelining.
TTL_ext_ulong_tensor_t to_export_to
TTL_const_int_ulong_tensor_t to_export_from
TTL_ext_ulong_tensor_t ext_tensor_out
struct TTL_duplex_const_ulong_tensor_buffering_t::@215357065262240261023047264364005000002326270252 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_ulong_tensor_t ext_tensor_in
struct TTL_duplex_const_ulong_tensor_buffering_t::@041036332263005324072266141315327247142350001176 common
The information that is common to all pipeline schemes.
__local ulong * int_base[2]
Data required to perform duplex buffer pipelining.
TTL_const_int_ushort_tensor_t to_export_from
struct TTL_duplex_const_ushort_tensor_buffering_t::@075121152345321007207064066154035107000222121371 common
The information that is common to all pipeline schemes.
TTL_ext_ushort_tensor_t to_export_to
__local ushort * int_base[2]
TTL_ext_ushort_tensor_t ext_tensor_out
TTL_ext_ushort_tensor_t ext_tensor_in
struct TTL_duplex_const_ushort_tensor_buffering_t::@264222272105300110370077135311164066262005000203 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
Data required to perform duplex buffer pipelining.
struct TTL_duplex_const_void_tensor_buffering_t::@075250261337271136226144050104370310367152033173 common
The information that is common to all pipeline schemes.
TTL_ext_void_tensor_t to_export_to
__local void * int_base[2]
TTL_ext_void_tensor_t ext_tensor_out
TTL_const_int_void_tensor_t to_export_from
struct TTL_duplex_const_void_tensor_buffering_t::@050373116174146317217155137130151305330353001362 prev_out_tensors
Store of the buffers used for the previous import/export cycles.
TTL_ext_void_tensor_t ext_tensor_in
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const tensors in the appropriate address space
const and non-const sub tensors in the appropriate address space
TTL_int_char_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_int_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_long_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_short_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_uchar_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_uint_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_ulong_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_ushort_tensor_t tensor
const and non-const sub tensors in the appropriate address space
TTL_int_void_tensor_t tensor
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Describes a pair of internal Tensors after an operation.
Description of a Tensor layout in memory.
TTL_dim_t width
Number of elements along dimension x.
TTL_dim_t height
Number of rows along dimension y.