1
/*
2
 * This file contains low-level loops for copying and byte-swapping
3
 * strided data.
4
 *
5
 * Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
6
 * The University of British Columbia
7
 *
8
 * See LICENSE.txt for the license.
9
 */
10

11
#define PY_SSIZE_T_CLEAN
12
#include "Python.h"
13

14
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
15
#define _MULTIARRAYMODULE
16
#include <numpy/arrayobject.h>
17
#include <numpy/npy_cpu.h>
18
#include <numpy/halffloat.h>
19

20
#include "lowlevel_strided_loops.h"
21
#include "array_assign.h"
22

23

24
/*
25
 * x86 platform works with unaligned access but the compiler is allowed to
26
 * assume all data is aligned to its size by the C standard. This means it can
27
 * vectorize instructions peeling only by the size of the type, if the data is
28
 * not aligned to this size one ends up with data not correctly aligned for SSE
29
 * instructions (16 byte).
30
 * So this flag can only be enabled if autovectorization is disabled.
31
 */
32
#if NPY_CPU_HAVE_UNALIGNED_ACCESS
33
#  define NPY_USE_UNALIGNED_ACCESS 0
34
#else
35
#  define NPY_USE_UNALIGNED_ACCESS 0
36
#endif
37

38
#define _NPY_NOP1(x) (x)
39
#define _NPY_NOP2(x) (x)
40
#define _NPY_NOP4(x) (x)
41
#define _NPY_NOP8(x) (x)
42

43
#define _NPY_SWAP2(x) npy_bswap2(x)
44

45
#define _NPY_SWAP4(x) npy_bswap4(x)
46

47
#define _NPY_SWAP_PAIR4(x) (((((npy_uint32)x)&0xffu) << 8) | \
48
                       ((((npy_uint32)x)&0xff00u) >> 8) | \
49
                       ((((npy_uint32)x)&0xff0000u) << 8) | \
50
                       ((((npy_uint32)x)&0xff000000u) >> 8))
51

52
#define _NPY_SWAP8(x) npy_bswap8(x)
53

54
#define _NPY_SWAP_PAIR8(x) (((((npy_uint64)x)&0xffULL) << 24) | \
55
                       ((((npy_uint64)x)&0xff00ULL) << 8) | \
56
                       ((((npy_uint64)x)&0xff0000ULL) >> 8) | \
57
                       ((((npy_uint64)x)&0xff000000ULL) >> 24) | \
58
                       ((((npy_uint64)x)&0xff00000000ULL) << 24) | \
59
                       ((((npy_uint64)x)&0xff0000000000ULL) << 8) | \
60
                       ((((npy_uint64)x)&0xff000000000000ULL) >> 8) | \
61
                       ((((npy_uint64)x)&0xff00000000000000ULL) >> 24))
62

63
#define _NPY_SWAP_INPLACE2(x) npy_bswap2_unaligned(x)
64

65
#define _NPY_SWAP_INPLACE4(x) npy_bswap4_unaligned(x)
66

67
#define _NPY_SWAP_INPLACE8(x) npy_bswap8_unaligned(x)
68

69
#define _NPY_SWAP_INPLACE16(x) { \
70
        char a = (x)[0]; (x)[0] = (x)[15]; (x)[15] = a; \
71
        a = (x)[1]; (x)[1] = (x)[14]; (x)[14] = a; \
72
        a = (x)[2]; (x)[2] = (x)[13]; (x)[13] = a; \
73
        a = (x)[3]; (x)[3] = (x)[12]; (x)[12] = a; \
74
        a = (x)[4]; (x)[4] = (x)[11]; (x)[11] = a; \
75
        a = (x)[5]; (x)[5] = (x)[10]; (x)[10] = a; \
76
        a = (x)[6]; (x)[6] = (x)[9]; (x)[9] = a; \
77
        a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \
78
        }
79

80
/************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/
81

82
/**begin repeat
83
 * #elsize = 1, 2, 4, 8, 16#
84
 * #elsize_half = 0, 1, 2, 4, 8#
85
 * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint64#
86
 */
87
/**begin repeat1
88
 * #oper = strided_to_strided, strided_to_contig,
89
 *         contig_to_strided, contig_to_contig#
90
 * #src_contig = 0, 0, 1 ,1#
91
 * #dst_contig = 0, 1, 0 ,1#
92
 */
93
/**begin repeat2
94
 * #swap = _NPY_NOP, _NPY_NOP, _NPY_SWAP_INPLACE, _NPY_SWAP,
95
 *         _NPY_SWAP_INPLACE, _NPY_SWAP_PAIR#
96
 * #prefix = , _aligned, _swap, _aligned_swap, _swap_pair, _aligned_swap_pair#
97
 * #is_aligned = 0, 1, 0, 1, 0, 1#
98
 * #minelsize = 1, 1, 2, 2, 4, 4#
99
 * #is_swap = 0, 0, 1, 1, 2, 2#
100
 */
101

102
#if (@elsize@ >= @minelsize@) && \
103
    (@elsize@ > 1 || @is_aligned@) && \
104
    (!NPY_USE_UNALIGNED_ACCESS || @is_aligned@)
105

106

107
#if @is_swap@ || @src_contig@ == 0 || @dst_contig@ == 0
108
/*
109
 * unrolling gains about 20-50% if the copy can be done in one mov instruction
110
 * if not it can decrease performance
111
 * tested to improve performance on intel xeon 5x/7x, core2duo, amd phenom x4
112
 */
113
static int
114
#if @is_aligned@ && @is_swap@ == 0 && @elsize@ <= NPY_SIZEOF_INTP
115
    NPY_GCC_UNROLL_LOOPS
116
#endif
117 1
@prefix@_@oper@_size@elsize@(char *dst, npy_intp dst_stride,
118
                        char *src, npy_intp src_stride,
119
                        npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
120
                        NpyAuxData *NPY_UNUSED(data))
121
{
122
#if @is_aligned@
123
    /* sanity check */
124
    assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
125
    assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
126
#endif
127
    /*printf("fn @prefix@_@oper@_size@elsize@\n");*/
128 1
    while (N > 0) {
129
#if @is_aligned@
130

131
        /* aligned copy and swap */
132
#  if @elsize@ != 16
133 1
        (*((@type@ *)dst)) = @swap@@elsize@(*((@type@ *)src));
134
#  else
135
#    if @is_swap@ == 0
136 1
        (*((npy_uint64 *)dst)) = (*((npy_uint64 *)src));
137 1
        (*((npy_uint64 *)dst + 1)) = (*((npy_uint64 *)src + 1));
138
#    elif @is_swap@ == 1
139 1
        (*((npy_uint64 *)dst)) = _NPY_SWAP8(*((npy_uint64 *)src + 1));
140 1
        (*((npy_uint64 *)dst + 1)) = _NPY_SWAP8(*((npy_uint64 *)src));
141
#    elif @is_swap@ == 2
142 1
        (*((npy_uint64 *)dst)) = _NPY_SWAP8(*((npy_uint64 *)src));
143 1
        (*((npy_uint64 *)dst + 1)) = _NPY_SWAP8(*((npy_uint64 *)src + 1));
144
#    endif
145
#  endif
146

147
#else
148

149
        /* unaligned copy and swap */
150 1
        memmove(dst, src, @elsize@);
151
#  if @is_swap@ == 1
152 1
        @swap@@elsize@(dst);
153
#  elif @is_swap@ == 2
154 1
        @swap@@elsize_half@(dst);
155 1
        @swap@@elsize_half@(dst + @elsize_half@);
156
#  endif
157

158
#endif
159

160
#if @dst_contig@
161 1
        dst += @elsize@;
162
#else
163 1
        dst += dst_stride;
164
#endif
165

166
#if @src_contig@
167 1
        src += @elsize@;
168
#else
169 1
        src += src_stride;
170
#endif
171

172 1
        --N;
173
    }
174 1
    return 0;
175
}
176
#endif
177

178

179
/*
180
 * specialized copy and swap for source stride 0,
181
 * interestingly unrolling here is like above is only marginally profitable for
182
 * small types and detrimental for >= 8byte moves on x86
183
 * but it profits from vectorization enabled with -O3
184
 */
185
#if (@src_contig@ == 0) && @is_aligned@
186
static NPY_GCC_OPT_3 int
187 1
@prefix@_@oper@_size@elsize@_srcstride0(char *dst,
188
                        npy_intp dst_stride,
189
                        char *src, npy_intp NPY_UNUSED(src_stride),
190
                        npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
191
                        NpyAuxData *NPY_UNUSED(data))
192
{
193
#if @elsize@ != 16
194
#  if !(@elsize@ == 1 && @dst_contig@)
195
    @type@ temp;
196
#  endif
197
#else
198
    npy_uint64 temp0, temp1;
199
#endif
200 1
    if (N == 0) {
201
        return 0;
202
    }
203
#if @is_aligned@ && @elsize@ != 16
204
    /* sanity check */
205
    assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
206
    assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
207
#endif
208
#if @elsize@ == 1 && @dst_contig@
209 1
    memset(dst, *src, N);
210
#else
211

212
#  if @elsize@ != 16
213 1
    temp = @swap@@elsize@(*((@type@ *)src));
214
#  else
215
#    if @is_swap@ == 0
216 1
        temp0 = (*((npy_uint64 *)src));
217 1
        temp1 = (*((npy_uint64 *)src + 1));
218
#    elif @is_swap@ == 1
219 0
        temp0 = _NPY_SWAP8(*((npy_uint64 *)src + 1));
220 0
        temp1 = _NPY_SWAP8(*((npy_uint64 *)src));
221
#    elif @is_swap@ == 2
222 1
        temp0 = _NPY_SWAP8(*((npy_uint64 *)src));
223 1
        temp1 = _NPY_SWAP8(*((npy_uint64 *)src + 1));
224
#    endif
225
#  endif
226

227 1
    while (N > 0) {
228
#  if @elsize@ != 16
229 1
        *((@type@ *)dst) = temp;
230
#  else
231 1
        *((npy_uint64 *)dst) = temp0;
232 1
        *((npy_uint64 *)dst + 1) = temp1;
233
#  endif
234
#  if @dst_contig@
235 1
        dst += @elsize@;
236
#  else
237 1
        dst += dst_stride;
238
#  endif
239 1
        --N;
240
    }
241
#endif/* @elsize == 1 && @dst_contig@ -- else */
242 1
    return 0;
243
}
244
#endif/* (@src_contig@ == 0) && @is_aligned@ */
245

246
#endif/* @elsize@ >= @minelsize@ */
247

248
/**end repeat2**/
249
/**end repeat1**/
250
/**end repeat**/
251

252
static int
253 1
_strided_to_strided(char *dst, npy_intp dst_stride,
254
                        char *src, npy_intp src_stride,
255
                        npy_intp N, npy_intp src_itemsize,
256
                        NpyAuxData *NPY_UNUSED(data))
257
{
258 1
    while (N > 0) {
259 1
        memmove(dst, src, src_itemsize);
260 1
        dst += dst_stride;
261 1
        src += src_stride;
262 1
        --N;
263
    }
264 1
    return 0;
265
}
266

267
static int
268 0
_swap_strided_to_strided(char *dst, npy_intp dst_stride,
269
                        char *src, npy_intp src_stride,
270
                        npy_intp N, npy_intp src_itemsize,
271
                        NpyAuxData *NPY_UNUSED(data))
272
{
273
    char *a, *b, c;
274

275 0
    while (N > 0) {
276 0
        memmove(dst, src, src_itemsize);
277
        /* general in-place swap */
278 0
        a = dst;
279 0
        b = dst + src_itemsize - 1;
280 0
        while (a < b) {
281 0
            c = *a;
282 0
            *a = *b;
283 0
            *b = c;
284 0
            ++a; --b;
285
        }
286 0
        dst += dst_stride;
287 0
        src += src_stride;
288 0
        --N;
289
    }
290 0
    return 0;
291
}
292

293
static int
294 1
_swap_pair_strided_to_strided(char *dst, npy_intp dst_stride,
295
                        char *src, npy_intp src_stride,
296
                        npy_intp N, npy_intp src_itemsize,
297
                        NpyAuxData *NPY_UNUSED(data))
298
{
299
    char *a, *b, c;
300 1
    npy_intp itemsize_half = src_itemsize / 2;
301

302 1
    while (N > 0) {
303 1
        memmove(dst, src, src_itemsize);
304
        /* general in-place swap */
305 1
        a = dst;
306 1
        b = dst + itemsize_half - 1;
307 1
        while (a < b) {
308 1
            c = *a;
309 1
            *a = *b;
310 1
            *b = c;
311 1
            ++a; --b;
312
        }
313
        /* general in-place swap */
314 1
        a = dst + itemsize_half;
315 1
        b = dst + 2*itemsize_half - 1;
316 1
        while (a < b) {
317 1
            c = *a;
318 1
            *a = *b;
319 1
            *b = c;
320 1
            ++a; --b;
321
        }
322 1
        dst += dst_stride;
323 1
        src += src_stride;
324 1
        --N;
325
    }
326 1
    return 0;
327
}
328

329
static int
330 1
_contig_to_contig(char *dst, npy_intp NPY_UNUSED(dst_stride),
331
                        char *src, npy_intp NPY_UNUSED(src_stride),
332
                        npy_intp N, npy_intp src_itemsize,
333
                        NpyAuxData *NPY_UNUSED(data))
334
{
335 1
    memmove(dst, src, src_itemsize*N);
336 1
    return 0;
337
}
338

339

340
NPY_NO_EXPORT PyArray_StridedUnaryOp *
341 1
PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride,
342
                         npy_intp dst_stride, npy_intp itemsize)
343
{
344
/*
345
 * Skip the "unaligned" versions on CPUs which support unaligned
346
 * memory accesses.
347
 */
348
#if !NPY_USE_UNALIGNED_ACCESS
349 1
    if (aligned) {
350
#endif/*!NPY_USE_UNALIGNED_ACCESS*/
351

352
        /* contiguous dst */
353 1
        if (itemsize != 0 && dst_stride == itemsize) {
354
            /* constant src */
355 1
            if (src_stride == 0) {
356 1
                switch (itemsize) {
357
/**begin repeat
358
 * #elsize = 1, 2, 4, 8, 16#
359
 */
360 1
                    case @elsize@:
361
                        return
362 1
                          &_aligned_strided_to_contig_size@elsize@_srcstride0;
363
/**end repeat**/
364
                }
365
            }
366
            /* contiguous src */
367 1
            else if (src_stride == itemsize) {
368
                return &_contig_to_contig;
369
            }
370
            /* general src */
371
            else {
372 1
                switch (itemsize) {
373
/**begin repeat
374
 * #elsize = 1, 2, 4, 8, 16#
375
 */
376 1
                    case @elsize@:
377 1
                        return &_aligned_strided_to_contig_size@elsize@;
378
/**end repeat**/
379
                }
380
            }
381

382 0
            return &_strided_to_strided;
383
        }
384
        /* general dst */
385
        else {
386
            /* constant src */
387 1
            if (src_stride == 0) {
388 1
                switch (itemsize) {
389
/**begin repeat
390
 * #elsize = 1, 2, 4, 8, 16#
391
 */
392 1
                    case @elsize@:
393
                        return
394 1
                          &_aligned_strided_to_strided_size@elsize@_srcstride0;
395
/**end repeat**/
396
                }
397
            }
398
            /* contiguous src */
399 1
            else if (src_stride == itemsize) {
400 1
                switch (itemsize) {
401
/**begin repeat
402
 * #elsize = 1, 2, 4, 8, 16#
403
 */
404 1
                    case @elsize@:
405 1
                        return &_aligned_contig_to_strided_size@elsize@;
406
/**end repeat**/
407
                }
408

409 0
                return &_strided_to_strided;
410
            }
411
            else {
412 1
                switch (itemsize) {
413
/**begin repeat
414
 * #elsize = 1, 2, 4, 8, 16#
415
 */
416 1
                    case @elsize@:
417 1
                        return &_aligned_strided_to_strided_size@elsize@;
418
/**end repeat**/
419
                }
420
            }
421
        }
422

423
#if !NPY_USE_UNALIGNED_ACCESS
424
    }
425
    else {
426 1
        if (itemsize != 0) {
427 1
            if (dst_stride == itemsize) {
428
                /* contiguous dst */
429 1
                if (src_stride == itemsize) {
430
                    /* contiguous src, dst */
431
                    return &_contig_to_contig;
432
                }
433
                else {
434
                    /* general src */
435 1
                    switch (itemsize) {
436
                        case 1:
437
                            return &_aligned_strided_to_contig_size1;
438
/**begin repeat
439
 * #elsize = 2, 4, 8, 16#
440
 */
441 1
                        case @elsize@:
442 1
                            return &_strided_to_contig_size@elsize@;
443
/**end repeat**/
444
                    }
445
                }
446

447 1
                return &_strided_to_strided;
448
            }
449 1
            else if (src_stride == itemsize) {
450
                /* contiguous src, general dst */
451 1
                switch (itemsize) {
452
                    case 1:
453
                        return &_aligned_contig_to_strided_size1;
454
/**begin repeat
455
 * #elsize = 2, 4, 8, 16#
456
 */
457 1
                    case @elsize@:
458 1
                        return &_contig_to_strided_size@elsize@;
459
/**end repeat**/
460
                }
461

462 1
                return &_strided_to_strided;
463
            }
464
        }
465
        else {
466
            /* general src, dst */
467
            switch (itemsize) {
468
                case 1:
469
                    return &_aligned_strided_to_strided_size1;
470
/**begin repeat
471
 * #elsize = 2, 4, 8, 16#
472
 */
473
                case @elsize@:
474
                    return &_strided_to_strided_size@elsize@;
475
/**end repeat**/
476
            }
477
        }
478
    }
479
#endif/*!NPY_USE_UNALIGNED_ACCESS*/
480

481 1
    return &_strided_to_strided;
482
}
483

484
/*
485
 * PyArray_GetStridedCopySwapFn and PyArray_GetStridedCopySwapPairFn are
486
 * nearly identical, so can do a repeat for them.
487
 */
488
/**begin repeat
489
 * #function = PyArray_GetStridedCopySwapFn, PyArray_GetStridedCopySwapPairFn#
490
 * #tag = , _pair#
491
 * #not_pair = 1, 0#
492
 */
493

494
NPY_NO_EXPORT PyArray_StridedUnaryOp *
495 1
@function@(int aligned, npy_intp src_stride,
496
                             npy_intp dst_stride, npy_intp itemsize)
497
{
498
/*
499
 * Skip the "unaligned" versions on CPUs which support unaligned
500
 * memory accesses.
501
 */
502
#if !NPY_USE_UNALIGNED_ACCESS
503 1
    if (aligned) {
504
#endif/*!NPY_USE_UNALIGNED_ACCESS*/
505

506
        /* contiguous dst */
507 1
        if (itemsize != 0 && dst_stride == itemsize) {
508
            /* constant src */
509 1
            if (src_stride == 0) {
510 1
                switch (itemsize) {
511
/**begin repeat1
512
 * #elsize = 2, 4, 8, 16#
513
 */
514
#if @not_pair@ || @elsize@ > 2
515 1
                case @elsize@:
516
                    return
517 1
                 &_aligned_swap@tag@_strided_to_contig_size@elsize@_srcstride0;
518
#endif
519
/**end repeat1**/
520
                }
521
            }
522
            /* contiguous src */
523 1
            else if (src_stride == itemsize) {
524 1
                switch (itemsize) {
525
/**begin repeat1
526
 * #elsize = 2, 4, 8, 16#
527
 */
528
#if @not_pair@ || @elsize@ > 2
529 1
                case @elsize@:
530 1
                    return &_aligned_swap@tag@_contig_to_contig_size@elsize@;
531
#endif
532
/**end repeat1**/
533
                }
534
            }
535
            /* general src */
536
            else {
537 1
                switch (itemsize) {
538
/**begin repeat1
539
 * #elsize = 2, 4, 8, 16#
540
 */
541
#if @not_pair@ || @elsize@ > 2
542 1
                case @elsize@:
543 1
                    return &_aligned_swap@tag@_strided_to_contig_size@elsize@;
544
#endif
545
/**end repeat1**/
546
                }
547
            }
548
        }
549
        /* general dst */
550
        else {
551
            /* constant src */
552 1
            if (src_stride == 0) {
553 1
                switch (itemsize) {
554
/**begin repeat1
555
 * #elsize = 2, 4, 8, 16#
556
 */
557
#if @not_pair@ || @elsize@ > 2
558 1
                case @elsize@:
559
                    return
560 1
                &_aligned_swap@tag@_strided_to_strided_size@elsize@_srcstride0;
561
#endif
562
/**end repeat1**/
563
                }
564
            }
565
            /* contiguous src */
566 1
            else if (src_stride == itemsize) {
567 1
                switch (itemsize) {
568
/**begin repeat1
569
 * #elsize = 2, 4, 8, 16#
570
 */
571
#if @not_pair@ || @elsize@ > 2
572 1
                case @elsize@:
573 1
                    return &_aligned_swap@tag@_contig_to_strided_size@elsize@;
574
#endif
575
/**end repeat1**/
576
                }
577

578 0
                return  &_swap@tag@_strided_to_strided;
579
            }
580
            else {
581 0
                switch (itemsize) {
582
/**begin repeat1
583
 * #elsize = 2, 4, 8, 16#
584
 */
585
#if @not_pair@ || @elsize@ > 2
586 0
                case @elsize@:
587 0
                    return &_aligned_swap@tag@_strided_to_strided_size@elsize@;
588
#endif
589
/**end repeat1**/
590
                }
591
            }
592
        }
593

594
#if !NPY_USE_UNALIGNED_ACCESS
595
    }
596
    else {
597
        /* contiguous dst */
598 1
        if (itemsize != 0 && dst_stride == itemsize) {
599
            /* contiguous src */
600 1
            if (src_stride == itemsize) {
601 1
                switch (itemsize) {
602
/**begin repeat1
603
 * #elsize = 2, 4, 8, 16#
604
 */
605
#if @not_pair@ || @elsize@ > 2
606 1
                case @elsize@:
607 1
                    return &_swap@tag@_contig_to_contig_size@elsize@;
608
#endif
609
/**end repeat1**/
610
                }
611
            }
612
            /* general src */
613
            else {
614 1
                switch (itemsize) {
615
/**begin repeat1
616
 * #elsize = 2, 4, 8, 16#
617
 */
618
#if @not_pair@ || @elsize@ > 2
619 1
                    case @elsize@:
620 1
                        return &_swap@tag@_strided_to_contig_size@elsize@;
621
#endif
622
/**end repeat1**/
623
                }
624
            }
625

626 1
            return  &_swap@tag@_strided_to_strided;
627
        }
628
        /* general dst */
629
        else {
630
            /* contiguous src */
631 1
            if (itemsize != 0 && src_stride == itemsize) {
632 1
                switch (itemsize) {
633
/**begin repeat1
634
 * #elsize = 2, 4, 8, 16#
635
 */
636
#if @not_pair@ || @elsize@ > 2
637 1
                case @elsize@:
638 1
                    return &_swap@tag@_contig_to_strided_size@elsize@;
639
#endif
640
/**end repeat1**/
641
                }
642

643 0
                return  &_swap@tag@_strided_to_strided;
644
            }
645
            /* general src */
646
            else {
647 1
                switch (itemsize) {
648
/**begin repeat1
649
 * #elsize = 2, 4, 8, 16#
650
 */
651
#if @not_pair@ || @elsize@ > 2
652 1
                case @elsize@:
653 1
                    return &_swap@tag@_strided_to_strided_size@elsize@;
654
#endif
655
/**end repeat1**/
656
                }
657
            }
658
        }
659
    }
660
#endif/*!NPY_USE_UNALIGNED_ACCESS*/
661

662 0
    return &_swap@tag@_strided_to_strided;
663
}
664

665
/**end repeat**/
666

667
/************* STRIDED CASTING SPECIALIZED FUNCTIONS *************/
668

669
/**begin repeat
670
 *
671
 * #NAME1 = BOOL,
672
 *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
673
 *          BYTE, SHORT, INT, LONG, LONGLONG,
674
 *          HALF, FLOAT, DOUBLE, LONGDOUBLE,
675
 *          CFLOAT, CDOUBLE, CLONGDOUBLE#
676
 * #name1 = bool,
677
 *          ubyte, ushort, uint, ulong, ulonglong,
678
 *          byte, short, int, long, longlong,
679
 *          half, float, double, longdouble,
680
 *          cfloat, cdouble, clongdouble#
681
 * #type1 = npy_bool,
682
 *          npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
683
 *          npy_byte, npy_short, npy_int, npy_long, npy_longlong,
684
 *          npy_half, npy_float, npy_double, npy_longdouble,
685
 *          npy_cfloat, npy_cdouble, npy_clongdouble#
686
 * #rtype1 = npy_bool,
687
 *           npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
688
 *           npy_byte, npy_short, npy_int, npy_long, npy_longlong,
689
 *           npy_half, npy_float, npy_double, npy_longdouble,
690
 *           npy_float, npy_double, npy_longdouble#
691
 * #is_bool1 = 1, 0*17#
692
 * #is_half1 = 0*11, 1, 0*6#
693
 * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0#
694
 * #is_double1 = 0*13, 1, 0, 0, 1, 0#
695
 * #is_complex1 = 0*15, 1*3#
696
 */
697

698
/**begin repeat1
699
 *
700
 * #NAME2 = BOOL,
701
 *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
702
 *          BYTE, SHORT, INT, LONG, LONGLONG,
703
 *          HALF, FLOAT, DOUBLE, LONGDOUBLE,
704
 *          CFLOAT, CDOUBLE, CLONGDOUBLE#
705
 * #name2 = bool,
706
 *          ubyte, ushort, uint, ulong, ulonglong,
707
 *          byte, short, int, long, longlong,
708
 *          half, float, double, longdouble,
709
 *          cfloat, cdouble, clongdouble#
710
 * #type2 = npy_bool,
711
 *          npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
712
 *          npy_byte, npy_short, npy_int, npy_long, npy_longlong,
713
 *          npy_half, npy_float, npy_double, npy_longdouble,
714
 *          npy_cfloat, npy_cdouble, npy_clongdouble#
715
 * #rtype2 = npy_bool,
716
 *          npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
717
 *          npy_byte, npy_short, npy_int, npy_long, npy_longlong,
718
 *          npy_half, npy_float, npy_double, npy_longdouble,
719
 *          npy_float, npy_double, npy_longdouble#
720
 * #is_bool2 = 1, 0*17#
721
 * #is_half2 = 0*11, 1, 0*6#
722
 * #is_float2 = 0*12, 1, 0, 0, 1, 0, 0#
723
 * #is_double2 = 0*13, 1, 0, 0, 1, 0#
724
 * #is_complex2 = 0*15, 1*3#
725
 */
726

727
/**begin repeat2
728
 * #prefix = _aligned,,_aligned_contig,_contig#
729
 * #aligned = 1,0,1,0#
730
 * #contig = 0,0,1,1#
731
 */
732

733
#if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@)
734

735
/* For half types, don't use actual double/float types in conversion */
736
#if @is_half1@ || @is_half2@
737

738
#  if @is_float1@
739
#    define _TYPE1 npy_uint32
740
#  elif @is_double1@
741
#    define _TYPE1 npy_uint64
742
#  else
743
#    define _TYPE1 @rtype1@
744
#  endif
745

746
#  if @is_float2@
747
#    define _TYPE2 npy_uint32
748
#  elif @is_double2@
749
#    define _TYPE2 npy_uint64
750
#  else
751
#    define _TYPE2 @rtype2@
752
#  endif
753

754
#else
755

756
#define _TYPE1 @rtype1@
757
#define _TYPE2 @rtype2@
758

759
#endif
760

761
/* Determine an appropriate casting conversion function */
762
#if @is_half1@
763

764
#  if @is_float2@
765
#    define _CONVERT_FN(x) npy_halfbits_to_floatbits(x)
766
#  elif @is_double2@
767
#    define _CONVERT_FN(x) npy_halfbits_to_doublebits(x)
768
#  elif @is_half2@
769
#    define _CONVERT_FN(x) (x)
770
#  elif @is_bool2@
771
#    define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x))
772
#  else
773
#    define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x))
774
#  endif
775

776
#elif @is_half2@
777

778
#  if @is_float1@
779
#    define _CONVERT_FN(x) npy_floatbits_to_halfbits(x)
780
#  elif @is_double1@
781
#    define _CONVERT_FN(x) npy_doublebits_to_halfbits(x)
782
#  else
783
#    define _CONVERT_FN(x) npy_float_to_half((float)x)
784
#  endif
785

786
#else
787

788
#  if @is_bool2@ || @is_bool1@
789
#    define _CONVERT_FN(x) ((npy_bool)(x != 0))
790
#  else
791
#    define _CONVERT_FN(x) ((_TYPE2)x)
792
#  endif
793

794
#endif
795

796
static NPY_GCC_OPT_3 int
797 1
@prefix@_cast_@name1@_to_@name2@(
798
                        char *dst, npy_intp dst_stride,
799
                        char *src, npy_intp src_stride,
800
                        npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
801
                        NpyAuxData *NPY_UNUSED(data))
802
{
803
#if @is_complex1@
804
    _TYPE1 src_value[2];
805
#elif !@aligned@
806
    _TYPE1 src_value;
807
#endif
808
#if @is_complex2@
809
    _TYPE2 dst_value[2];
810
#elif !@aligned@
811
    _TYPE2 dst_value;
812
#endif
813

814
#if @aligned@
815
   /* sanity check */
816
    assert(N == 0 || npy_is_aligned(src, _ALIGN(_TYPE1)));
817
    assert(N == 0 || npy_is_aligned(dst, _ALIGN(_TYPE2)));
818
#endif
819

820
    /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/
821

822 1
    while (N--) {
823
#if @aligned@
824
#  if @is_complex1@
825 1
        src_value[0] = ((_TYPE1 *)src)[0];
826 1
        src_value[1] = ((_TYPE1 *)src)[1];
827
#  endif
828
#else
829 1
        memmove(&src_value, src, sizeof(src_value));
830
#endif
831

832
/* Do the cast */
833
#if @is_complex1@
834
#  if @is_complex2@
835 1
    dst_value[0] = _CONVERT_FN(src_value[0]);
836 1
    dst_value[1] = _CONVERT_FN(src_value[1]);
837
#  elif !@aligned@
838
#    if @is_bool2@
839 1
       dst_value = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]);
840
#    else
841 1
       dst_value = _CONVERT_FN(src_value[0]);
842
#    endif
843
#  else
844
#    if @is_bool2@
845 1
       *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]);
846
#    else
847 1
       *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]);
848
#    endif
849
#  endif
850
#else
851
#  if @is_complex2@
852
#    if !@aligned@
853 1
    dst_value[0] = _CONVERT_FN(src_value);
854
#    else
855 1
    dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src);
856
#    endif
857 1
    dst_value[1] = 0;
858
#  elif !@aligned@
859 1
    dst_value = _CONVERT_FN(src_value);
860
#  else
861 1
    *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src);
862
#  endif
863
#endif
864

865
#if @aligned@
866
#  if @is_complex2@
867 1
        ((_TYPE2 *)dst)[0] = dst_value[0];
868 1
        ((_TYPE2 *)dst)[1] = dst_value[1];
869
#  endif
870
#else
871 1
        memmove(dst, &dst_value, sizeof(dst_value));
872
#endif
873

874
#if @contig@
875 1
        dst += sizeof(@type2@);
876 1
        src += sizeof(@type1@);
877
#else
878 1
        dst += dst_stride;
879 1
        src += src_stride;
880
#endif
881
    }
882 1
    return 0;
883
}
884

885
#undef _CONVERT_FN
886
#undef _TYPE2
887
#undef _TYPE1
888

889
#endif
890

891
/**end repeat2**/
892

893
/**end repeat1**/
894

895
/**end repeat**/
896

897
NPY_NO_EXPORT PyArray_StridedUnaryOp *
898 1
PyArray_GetStridedNumericCastFn(int aligned, npy_intp src_stride,
899
                             npy_intp dst_stride,
900
                             int src_type_num, int dst_type_num)
901
{
902 1
    switch (src_type_num) {
903
/**begin repeat
904
 *
905
 * #NAME1 = BOOL,
906
 *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
907
 *          BYTE, SHORT, INT, LONG, LONGLONG,
908
 *          HALF, FLOAT, DOUBLE, LONGDOUBLE,
909
 *          CFLOAT, CDOUBLE, CLONGDOUBLE#
910
 * #name1 = bool,
911
 *          ubyte, ushort, uint, ulong, ulonglong,
912
 *          byte, short, int, long, longlong,
913
 *          half, float, double, longdouble,
914
 *          cfloat, cdouble, clongdouble#
915
 * #type1 = npy_bool,
916
 *          npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
917
 *          npy_byte, npy_short, npy_int, npy_long, npy_longlong,
918
 *          npy_half, npy_float, npy_double, npy_longdouble,
919
 *          npy_cfloat, npy_cdouble, npy_clongdouble#
920
 */
921

922 1
        case NPY_@NAME1@:
923
            /*printf("test fn %d - second %d\n", NPY_@NAME1@, dst_type_num);*/
924 1
            switch (dst_type_num) {
925
/**begin repeat1
926
 *
927
 * #NAME2 = BOOL,
928
 *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
929
 *          BYTE, SHORT, INT, LONG, LONGLONG,
930
 *          HALF, FLOAT, DOUBLE, LONGDOUBLE,
931
 *          CFLOAT, CDOUBLE, CLONGDOUBLE#
932
 * #name2 = bool,
933
 *          ubyte, ushort, uint, ulong, ulonglong,
934
 *          byte, short, int, long, longlong,
935
 *          half, float, double, longdouble,
936
 *          cfloat, cdouble, clongdouble#
937
 * #type2 = npy_bool,
938
 *          npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
939
 *          npy_byte, npy_short, npy_int, npy_long, npy_longlong,
940
 *          npy_half, npy_float, npy_double, npy_longdouble,
941
 *          npy_cfloat, npy_cdouble, npy_clongdouble#
942
 */
943

944 1
                case NPY_@NAME2@:
945
                    /*printf("ret fn %d %d\n", NPY_@NAME1@, NPY_@NAME2@);*/
946
#  if NPY_USE_UNALIGNED_ACCESS
947
                    if (src_stride == sizeof(@type1@) &&
948
                                dst_stride == sizeof(@type2@)) {
949
                        return &_aligned_contig_cast_@name1@_to_@name2@;
950
                    }
951
                    else {
952
                        return &_aligned_cast_@name1@_to_@name2@;
953
                    }
954
#  else
955 1
                    if (src_stride == sizeof(@type1@) &&
956 1
                                dst_stride == sizeof(@type2@)) {
957
                        return aligned ?
958 1
                                    &_aligned_contig_cast_@name1@_to_@name2@ :
959
                                    &_contig_cast_@name1@_to_@name2@;
960
                    }
961
                    else {
962 1
                        return aligned ? &_aligned_cast_@name1@_to_@name2@ :
963
                                         &_cast_@name1@_to_@name2@;
964
                    }
965
#  endif
966

967
/**end repeat1**/
968
            }
969
            /*printf("switched test fn %d - second %d\n", NPY_@NAME1@, dst_type_num);*/
970

971
/**end repeat**/
972
    }
973

974
    return NULL;
975
}
976

977

978
/****************** PRIMITIVE FLAT TO/FROM NDIM FUNCTIONS ******************/
979

980
/* See documentation of arguments in lowlevel_strided_loops.h */
981
NPY_NO_EXPORT npy_intp
982 1
PyArray_TransferNDimToStrided(npy_intp ndim,
983
                char *dst, npy_intp dst_stride,
984
                char *src, npy_intp const *src_strides, npy_intp src_strides_inc,
985
                npy_intp const *coords, npy_intp coords_inc,
986
                npy_intp const *shape, npy_intp shape_inc,
987
                npy_intp count, npy_intp src_itemsize,
988
                PyArray_StridedUnaryOp *stransfer,
989
                NpyAuxData *data)
990
{
991
    npy_intp i, M, N, coord0, shape0, src_stride0, coord1, shape1, src_stride1;
992

993
    /* Finish off dimension 0 */
994 1
    coord0 = coords[0];
995 1
    shape0 = shape[0];
996 1
    src_stride0 = src_strides[0];
997 1
    N = shape0 - coord0;
998 1
    if (N >= count) {
999 1
        return stransfer(dst, dst_stride, src, src_stride0,
1000
                         count, src_itemsize, data);
1001
    }
1002 1
    int res = stransfer(dst, dst_stride, src, src_stride0,
1003
                        N, src_itemsize, data);
1004 1
    if (res < 0) {
1005
        return -1;
1006
    }
1007 1
    count -= N;
1008

1009
    /* If it's 1-dimensional, there's no more to copy */
1010 1
    if (ndim == 1) {
1011
        return count;
1012
    }
1013

1014
    /* Adjust the src and dst pointers */
1015 1
    coord1 = (coords + coords_inc)[0];
1016 1
    shape1 = (shape + shape_inc)[0];
1017 1
    src_stride1 = (src_strides + src_strides_inc)[0];
1018 1
    src = src - coord0*src_stride0 + src_stride1;
1019 1
    dst += N*dst_stride;
1020

1021
    /* Finish off dimension 1 */
1022 1
    M = (shape1 - coord1 - 1);
1023 1
    N = shape0*M;
1024 1
    for (i = 0; i < M; ++i) {
1025 1
        if (shape0 >= count) {
1026 1
            return stransfer(dst, dst_stride, src, src_stride0,
1027
                             count, src_itemsize, data);
1028
        }
1029
        else {
1030 1
            res = stransfer(dst, dst_stride, src, src_stride0,
1031
                            shape0, src_itemsize, data);
1032 1
            if (res < 0) {
1033
                return -1;
1034
            }
1035
        }
1036 1
        count -= shape0;
1037 1
        src += src_stride1;
1038 1
        dst += shape0*dst_stride;
1039
    }
1040

1041
    /* If it's 2-dimensional, there's no more to copy */
1042 1
    if (ndim == 2) {
1043
        return count;
1044
    }
1045

1046
    /* General-case loop for everything else */
1047
    else {
1048
        /* Iteration structure for dimensions 2 and up */
1049
        struct {
1050
            npy_intp coord, shape, src_stride;
1051
        } it[NPY_MAXDIMS];
1052

1053
        /* Copy the coordinates and shape */
1054 1
        coords += 2*coords_inc;
1055 1
        shape += 2*shape_inc;
1056 1
        src_strides += 2*src_strides_inc;
1057 1
        for (i = 0; i < ndim-2; ++i) {
1058 1
            it[i].coord = coords[0];
1059 1
            it[i].shape = shape[0];
1060 1
            it[i].src_stride = src_strides[0];
1061 1
            coords += coords_inc;
1062 1
            shape += shape_inc;
1063 1
            src_strides += src_strides_inc;
1064
        }
1065

1066
        for (;;) {
1067
            /* Adjust the src pointer from the dimension 0 and 1 loop */
1068 1
            src = src - shape1*src_stride1;
1069

1070
            /* Increment to the next coordinate */
1071 1
            for (i = 0; i < ndim-2; ++i) {
1072 1
                src += it[i].src_stride;
1073 1
                if (++it[i].coord >= it[i].shape) {
1074 1
                    it[i].coord = 0;
1075 1
                    src -= it[i].src_stride*it[i].shape;
1076
                }
1077
                else {
1078
                    break;
1079
                }
1080
            }
1081
            /* If the last dimension rolled over, we're done */
1082 1
            if (i == ndim-2) {
1083
                return count;
1084
            }
1085

1086
            /* A loop for dimensions 0 and 1 */
1087 1
            for (i = 0; i < shape1; ++i) {
1088 1
                if (shape0 >= count) {
1089 1
                    return stransfer(dst, dst_stride, src, src_stride0,
1090
                                     count, src_itemsize, data);
1091
                }
1092
                else {
1093 1
                    res = stransfer(dst, dst_stride, src, src_stride0,
1094
                                    shape0, src_itemsize, data);
1095 1
                    if (res < 0) {
1096
                        return -1;
1097
                    }
1098
                }
1099 1
                count -= shape0;
1100 1
                src += src_stride1;
1101 1
                dst += shape0*dst_stride;
1102
            }
1103
        }
1104
    }
1105
}
1106

1107
/* See documentation of arguments in lowlevel_strided_loops.h */
1108
NPY_NO_EXPORT npy_intp
1109 1
PyArray_TransferStridedToNDim(npy_intp ndim,
1110
                char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
1111
                char *src, npy_intp src_stride,
1112
                npy_intp const *coords, npy_intp coords_inc,
1113
                npy_intp const *shape, npy_intp shape_inc,
1114
                npy_intp count, npy_intp src_itemsize,
1115
                PyArray_StridedUnaryOp *stransfer,
1116
                NpyAuxData *data)
1117
{
1118
    npy_intp i, M, N, coord0, shape0, dst_stride0, coord1, shape1, dst_stride1;
1119

1120
    /* Finish off dimension 0 */
1121 1
    coord0 = coords[0];
1122 1
    shape0 = shape[0];
1123 1
    dst_stride0 = dst_strides[0];
1124 1
    N = shape0 - coord0;
1125 1
    if (N >= count) {
1126 1
        return stransfer(dst, dst_stride0, src, src_stride,
1127
                         count, src_itemsize, data);
1128
    }
1129 1
    int res = stransfer(dst, dst_stride0, src, src_stride,
1130
                        N, src_itemsize, data);
1131 1
    if (res < 0) {
1132
        return -1;
1133
    }
1134 1
    count -= N;
1135

1136
    /* If it's 1-dimensional, there's no more to copy */
1137 1
    if (ndim == 1) {
1138
        return count;
1139
    }
1140

1141
    /* Adjust the src and dst pointers */
1142 1
    coord1 = (coords + coords_inc)[0];
1143 1
    shape1 = (shape + shape_inc)[0];
1144 1
    dst_stride1 = (dst_strides + dst_strides_inc)[0];
1145 1
    dst = dst - coord0*dst_stride0 + dst_stride1;
1146 1
    src += N*src_stride;
1147

1148
    /* Finish off dimension 1 */
1149 1
    M = (shape1 - coord1 - 1);
1150 1
    N = shape0*M;
1151 1
    for (i = 0; i < M; ++i) {
1152 1
        if (shape0 >= count) {
1153 1
            return stransfer(dst, dst_stride0, src, src_stride,
1154
                             count, src_itemsize, data);
1155
        }
1156
        else {
1157 1
            res = stransfer(dst, dst_stride0, src, src_stride,
1158
                            shape0, src_itemsize, data);
1159 1
            if (res < 0) {
1160
                return -1;
1161
            }
1162
        }
1163 1
        count -= shape0;
1164 1
        dst += dst_stride1;
1165 1
        src += shape0*src_stride;
1166
    }
1167

1168
    /* If it's 2-dimensional, there's no more to copy */
1169 1
    if (ndim == 2) {
1170
        return count;
1171
    }
1172

1173
    /* General-case loop for everything else */
1174
    else {
1175
        /* Iteration structure for dimensions 2 and up */
1176
        struct {
1177
            npy_intp coord, shape, dst_stride;
1178
        } it[NPY_MAXDIMS];
1179

1180
        /* Copy the coordinates and shape */
1181 1
        coords += 2*coords_inc;
1182 1
        shape += 2*shape_inc;
1183 1
        dst_strides += 2*dst_strides_inc;
1184 1
        for (i = 0; i < ndim-2; ++i) {
1185 1
            it[i].coord = coords[0];
1186 1
            it[i].shape = shape[0];
1187 1
            it[i].dst_stride = dst_strides[0];
1188 1
            coords += coords_inc;
1189 1
            shape += shape_inc;
1190 1
            dst_strides += dst_strides_inc;
1191
        }
1192

1193
        for (;;) {
1194
            /* Adjust the dst pointer from the dimension 0 and 1 loop */
1195 1
            dst = dst - shape1*dst_stride1;
1196

1197
            /* Increment to the next coordinate */
1198 1
            for (i = 0; i < ndim-2; ++i) {
1199 1
                dst += it[i].dst_stride;
1200 1
                if (++it[i].coord >= it[i].shape) {
1201 1
                    it[i].coord = 0;
1202 1
                    dst -= it[i].dst_stride*it[i].shape;
1203
                }
1204
                else {
1205
                    break;
1206
                }
1207
            }
1208
            /* If the last dimension rolled over, we're done */
1209 1
            if (i == ndim-2) {
1210
                return count;
1211
            }
1212

1213
            /* A loop for dimensions 0 and 1 */
1214 1
            for (i = 0; i < shape1; ++i) {
1215 1
                if (shape0 >= count) {
1216 1
                    return stransfer(dst, dst_stride0, src, src_stride,
1217
                                     count, src_itemsize, data);
1218
                }
1219
                else {
1220 1
                    res = stransfer(dst, dst_stride0, src, src_stride,
1221
                                    shape0, src_itemsize, data);
1222 1
                    if (res < 0) {
1223
                        return -1;
1224
                    }
1225
                }
1226 1
                count -= shape0;
1227 1
                dst += dst_stride1;
1228 1
                src += shape0*src_stride;
1229
            }
1230
        }
1231
    }
1232
}
1233

1234
/* See documentation of arguments in lowlevel_strided_loops.h */
1235
NPY_NO_EXPORT npy_intp
1236 1
PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
1237
                char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
1238
                char *src, npy_intp src_stride,
1239
                npy_uint8 *mask, npy_intp mask_stride,
1240
                npy_intp const *coords, npy_intp coords_inc,
1241
                npy_intp const *shape, npy_intp shape_inc,
1242
                npy_intp count, npy_intp src_itemsize,
1243
                PyArray_MaskedStridedUnaryOp *stransfer,
1244
                NpyAuxData *data)
1245
{
1246
    npy_intp i, M, N, coord0, shape0, dst_stride0, coord1, shape1, dst_stride1;
1247

1248
    /* Finish off dimension 0 */
1249 1
    coord0 = coords[0];
1250 1
    shape0 = shape[0];
1251 1
    dst_stride0 = dst_strides[0];
1252 1
    N = shape0 - coord0;
1253 1
    if (N >= count) {
1254 1
        return stransfer(
1255
                dst, dst_stride0, src, src_stride,
1256
                mask, mask_stride,
1257
                count, src_itemsize, data);
1258
    }
1259 1
    int res = stransfer(
1260
            dst, dst_stride0, src, src_stride,
1261
            mask, mask_stride,
1262
            N, src_itemsize, data);
1263 1
    if (res < 0) {
1264
        return -1;
1265
    }
1266 1
    count -= N;
1267

1268
    /* If it's 1-dimensional, there's no more to copy */
1269 1
    if (ndim == 1) {
1270
        return count;
1271
    }
1272

1273
    /* Adjust the src and dst pointers */
1274 1
    coord1 = (coords + coords_inc)[0];
1275 1
    shape1 = (shape + shape_inc)[0];
1276 1
    dst_stride1 = (dst_strides + dst_strides_inc)[0];
1277 1
    dst = dst - coord0*dst_stride0 + dst_stride1;
1278 1
    src += N*src_stride;
1279 1
    mask += N*mask_stride;
1280

1281
    /* Finish off dimension 1 */
1282 1
    M = (shape1 - coord1 - 1);
1283 1
    N = shape0*M;
1284 1
    for (i = 0; i < M; ++i) {
1285 1
        if (shape0 >= count) {
1286 1
            return stransfer(
1287
                    dst, dst_stride0, src, src_stride,
1288
                    mask, mask_stride,
1289
                    count, src_itemsize, data);
1290
        }
1291
        else {
1292 0
            int res = stransfer(
1293
                    dst, dst_stride0, src, src_stride,
1294
                    mask, mask_stride,
1295
                    N, src_itemsize, data);
1296 0
            if (res < 0) {
1297
                return -1;
1298
            }
1299
        }
1300 0
        count -= shape0;
1301 0
        dst += dst_stride1;
1302 0
        src += shape0*src_stride;
1303 0
        mask += shape0*mask_stride;
1304
    }
1305

1306
    /* If it's 2-dimensional, there's no more to copy */
1307 0
    if (ndim == 2) {
1308
        return count;
1309
    }
1310

1311
    /* General-case loop for everything else */
1312
    else {
1313
        /* Iteration structure for dimensions 2 and up */
1314
        struct {
1315
            npy_intp coord, shape, dst_stride;
1316
        } it[NPY_MAXDIMS];
1317

1318
        /* Copy the coordinates and shape */
1319 0
        coords += 2*coords_inc;
1320 0
        shape += 2*shape_inc;
1321 0
        dst_strides += 2*dst_strides_inc;
1322 0
        for (i = 0; i < ndim-2; ++i) {
1323 0
            it[i].coord = coords[0];
1324 0
            it[i].shape = shape[0];
1325 0
            it[i].dst_stride = dst_strides[0];
1326 0
            coords += coords_inc;
1327 0
            shape += shape_inc;
1328 0
            dst_strides += dst_strides_inc;
1329
        }
1330

1331
        for (;;) {
1332
            /* Adjust the dst pointer from the dimension 0 and 1 loop */
1333 0
            dst = dst - shape1*dst_stride1;
1334

1335
            /* Increment to the next coordinate */
1336 0
            for (i = 0; i < ndim-2; ++i) {
1337 0
                dst += it[i].dst_stride;
1338 0
                if (++it[i].coord >= it[i].shape) {
1339 0
                    it[i].coord = 0;
1340 0
                    dst -= it[i].dst_stride*it[i].shape;
1341
                }
1342
                else {
1343
                    break;
1344
                }
1345
            }
1346
            /* If the last dimension rolled over, we're done */
1347 0
            if (i == ndim-2) {
1348
                return count;
1349
            }
1350

1351
            /* A loop for dimensions 0 and 1 */
1352 0
            for (i = 0; i < shape1; ++i) {
1353 0
                if (shape0 >= count) {
1354 0
                    return stransfer(
1355
                            dst, dst_stride0, src, src_stride,
1356
                            mask, mask_stride,
1357
                            count, src_itemsize, data);
1358
                }
1359
                else {
1360 0
                    res = stransfer(
1361
                            dst, dst_stride0, src, src_stride,
1362
                            mask, mask_stride,
1363
                            shape0, src_itemsize, data);
1364 0
                    if (res < 0) {
1365
                        return -1;
1366
                    }
1367
                }
1368 0
                count -= shape0;
1369 0
                dst += dst_stride1;
1370 0
                src += shape0*src_stride;
1371 0
                mask += shape0*mask_stride;
1372
            }
1373
        }
1374
    }
1375
}
1376

1377

1378
/***************************************************************************/
1379
/****************** MapIter (Advanced indexing) Get/Set ********************/
1380
/***************************************************************************/
1381

1382
/**begin repeat
1383
 * #name = set, get#
1384
 * #isget = 0, 1#
1385
 */
1386

1387
/*
1388
 * Advanced indexing iteration of arrays when there is a single indexing
1389
 * array which has the same memory order as the value array and both
1390
 * can be trivially iterated (single stride, aligned, no casting necessary).
1391
 */
1392
NPY_NO_EXPORT int
1393 1
mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
1394
                       PyArrayObject *result)
1395
{
1396
    char *base_ptr, *ind_ptr, *result_ptr;
1397
    npy_intp self_stride, ind_stride, result_stride;
1398 1
    npy_intp fancy_dim = PyArray_DIM(self, 0);
1399

1400
    npy_intp itersize;
1401

1402 1
    int is_aligned = IsUintAligned(self) && IsUintAligned(result);
1403 1
    int needs_api = PyDataType_REFCHK(PyArray_DESCR(self));
1404

1405 1
    PyArray_CopySwapFunc *copyswap = PyArray_DESCR(self)->f->copyswap;
1406 1
    NPY_BEGIN_THREADS_DEF;
1407

1408 1
    base_ptr = PyArray_BYTES(self);
1409 1
    self_stride = PyArray_STRIDE(self, 0);
1410

1411 1
    PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(ind, result, itersize,
1412
                                           ind_ptr, result_ptr,
1413
                                           ind_stride, result_stride)
1414

1415 1
    if (!needs_api) {
1416 1
        NPY_BEGIN_THREADS_THRESHOLDED(PyArray_SIZE(ind));
1417
    }
1418
#if !@isget@
1419
    /* Check the indices beforehand */
1420 1
    while (itersize--) {
1421 1
        npy_intp indval = *((npy_intp*)ind_ptr);
1422 1
        if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) {
1423 1
            return -1;
1424
        }
1425 1
        ind_ptr += ind_stride;
1426
    }
1427

1428
    /*
1429
     * Reset the ind_ptr and itersize, due to broadcasting it is always
1430
     * the size of ind.
1431
     */
1432 1
    ind_ptr = PyArray_BYTES(ind);
1433 1
    itersize = PyArray_SIZE(ind);
1434
#endif
1435

1436
    /* Optimization for aligned types that do not need the api */
1437 1
    switch ((is_aligned && !needs_api) ? PyArray_ITEMSIZE(self) : 0) {
1438

1439
/**begin repeat1
1440
 * #elsize = 1, 2, 4, 8, 0#
1441
 * #copytype = npy_uint8, npy_uint16, npy_uint32, npy_uint64, 0#
1442
 */
1443

1444
#if @elsize@
1445
    case @elsize@:
1446
#else
1447
    default:
1448
#endif
1449 1
        while (itersize--) {
1450
            char * self_ptr;
1451 1
            npy_intp indval = *((npy_intp*)ind_ptr);
1452
            assert(npy_is_aligned(ind_ptr, _UINT_ALIGN(npy_intp)));
1453
#if @isget@
1454 1
            if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) {
1455 1
                return -1;
1456
            }
1457
#else
1458 1
            if (indval < 0) {
1459 1
                indval += fancy_dim;
1460
            }
1461
#endif
1462 1
            self_ptr = base_ptr + indval * self_stride;
1463

1464
#if @isget@
1465
#if @elsize@
1466
            assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
1467
            assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
1468 1
            *(@copytype@ *)result_ptr = *(@copytype@ *)self_ptr;
1469
#else
1470 1
            copyswap(result_ptr, self_ptr, 0, self);
1471
#endif
1472

1473
#else /* !@isget@ */
1474
#if @elsize@
1475
            assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
1476
            assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
1477 1
            *(@copytype@ *)self_ptr = *(@copytype@ *)result_ptr;
1478
#else
1479 1
            copyswap(self_ptr, result_ptr, 0, self);
1480
#endif
1481
#endif
1482

1483 1
            ind_ptr += ind_stride;
1484 1
            result_ptr += result_stride;
1485
        }
1486
        break;
1487

1488
/**end repeat1**/
1489
    }
1490

1491 1
    NPY_END_THREADS;
1492

1493
    return 0;
1494
}
1495

1496

1497
/*
1498
 * General advanced indexing iteration.
1499
 */
1500
NPY_NO_EXPORT int
1501 1
mapiter_@name@(PyArrayMapIterObject *mit)
1502
{
1503
    npy_intp *counter, count;
1504
    int i, is_aligned;
1505

1506
    /* Cached mit info */
1507 1
    int numiter = mit->numiter;
1508 1
    int needs_api = mit->needs_api;
1509
    /* Constant information */
1510
    npy_intp fancy_dims[NPY_MAXDIMS];
1511
    npy_intp fancy_strides[NPY_MAXDIMS];
1512
#if @isget@
1513
    int iteraxis;
1514
#endif
1515

1516 1
    char *baseoffset = mit->baseoffset;
1517 1
    char **outer_ptrs = mit->outer_ptrs;
1518 1
    npy_intp *outer_strides = mit->outer_strides;
1519 1
    PyArrayObject *array= mit->array;
1520

1521
    /* Fill constant information */
1522
#if @isget@
1523 1
    iteraxis = mit->iteraxes[0];
1524
#endif
1525 1
    for (i = 0; i < numiter; i++) {
1526 1
        fancy_dims[i] = mit->fancy_dims[i];
1527 1
        fancy_strides[i] = mit->fancy_strides[i];
1528
    }
1529

1530
    /*
1531
     * Alignment information (swapping is never needed, since we buffer),
1532
     * could also check extra_op is buffered, but it should rarely matter.
1533
     */
1534

1535 1
    is_aligned = IsUintAligned(array) && IsUintAligned(mit->extra_op);
1536

1537 1
    if (mit->size == 0) {
1538
       return 0;
1539
    }
1540

1541 1
    if (mit->subspace_iter == NULL) {
1542
        /*
1543
         * Item by item copy situation, the operand is buffered
1544
         * so use copyswap.
1545
         */
1546 1
         PyArray_CopySwapFunc *copyswap = PyArray_DESCR(array)->f->copyswap;
1547

1548
        /* We have only one iterator handling everything */
1549 1
        counter = NpyIter_GetInnerLoopSizePtr(mit->outer);
1550

1551
        /************ Optimized inner loops without subspace *************/
1552

1553
/**begin repeat1
1554
 * #one_iter = 1, 0#
1555
 * #numiter = 1, numiter#
1556
 */
1557

1558
#if @one_iter@
1559 1
        if (numiter == 1) {
1560
#else
1561
        else {
1562
#endif
1563 1
            NPY_BEGIN_THREADS_DEF;
1564 1
            if (!needs_api) {
1565 1
                NPY_BEGIN_THREADS;
1566
            }
1567

1568
            /* Optimization for aligned types that do not need the api */
1569 1
            switch ((is_aligned && !needs_api) ? PyArray_ITEMSIZE(array) : 0) {
1570

1571
/**begin repeat2
1572
 * #elsize = 1, 2, 4, 8, 0#
1573
 * #copytype = npy_uint8, npy_uint16, npy_uint32, npy_uint64, 0#
1574
 */
1575

1576
#if @elsize@
1577 1
            case @elsize@:
1578
#else
1579 0
            default:
1580
#endif
1581
                /* Outer iteration (safe because mit->size != 0) */
1582
                do {
1583
#if !@isget@
1584
                    /*
1585
                     * When the API is needed the casting might fail
1586
                     * TODO: (only if buffering is enabled).
1587
                     */
1588 1
                    if (needs_api && PyErr_Occurred()) {
1589
                        return -1;
1590
                    }
1591
#endif
1592 1
                    count = *counter;
1593 1
                    while (count--) {
1594
                        char * self_ptr = baseoffset;
1595 1
                        for (i=0; i < @numiter@; i++) {
1596 1
                            npy_intp indval = *((npy_intp*)outer_ptrs[i]);
1597
                            assert(npy_is_aligned(outer_ptrs[i],
1598
                                                  _UINT_ALIGN(npy_intp)));
1599

1600
#if @isget@ && @one_iter@
1601 1
                            if (check_and_adjust_index(&indval, fancy_dims[i],
1602
                                                       iteraxis, _save) < 0 ) {
1603 1
                                return -1;
1604
                            }
1605
#else
1606 1
                            if (indval < 0) {
1607 1
                                indval += fancy_dims[i];
1608
                            }
1609
#endif
1610 1
                            self_ptr += indval * fancy_strides[i];
1611

1612
                            /* advance indexing arrays */
1613 1
                            outer_ptrs[i] += outer_strides[i];
1614
                        }
1615

1616
#if @isget@
1617
#if @elsize@
1618
                        assert(npy_is_aligned(outer_ptrs[i],
1619
                                              _UINT_ALIGN(@copytype@)));
1620
                        assert(npy_is_aligned(self_ptr,
1621
                                              _UINT_ALIGN(@copytype@)));
1622 1
                        *(@copytype@ *)(outer_ptrs[i]) = *(@copytype@ *)self_ptr;
1623
#else
1624 1
                        copyswap(outer_ptrs[i], self_ptr, 0, array);
1625
#endif
1626
#else /* !@isget@ */
1627
#if @elsize@
1628
                        assert(npy_is_aligned(outer_ptrs[i],
1629
                               _UINT_ALIGN(@copytype@)));
1630
                        assert(npy_is_aligned(self_ptr,
1631
                               _UINT_ALIGN(@copytype@)));
1632 1
                        *(@copytype@ *)self_ptr = *(@copytype@ *)(outer_ptrs[i]);
1633
#else
1634 1
                        copyswap(self_ptr, outer_ptrs[i], 0, array);
1635
#endif
1636
#endif
1637
                        /* advance extra operand */
1638 1
                        outer_ptrs[i] += outer_strides[i];
1639
                    }
1640 1
                } while (mit->outer_next(mit->outer));
1641

1642
                break;
1643

1644
/**end repeat2**/
1645
            }
1646 1
            NPY_END_THREADS;
1647
        }
1648
/**end repeat1**/
1649
    }
1650

1651
    /******************* Nested Iteration Situation *******************/
1652
    else {
1653
        char *subspace_baseptrs[2];
1654 1
        char **subspace_ptrs = mit->subspace_ptrs;
1655 1
        npy_intp *subspace_strides = mit->subspace_strides;
1656 1
        int is_subiter_trivial = 0; /* has three states */
1657 1
        npy_intp reset_offsets[2] = {0, 0};
1658

1659
        /* Use strided transfer functions for the inner loop */
1660 1
        PyArray_StridedUnaryOp *stransfer = NULL;
1661 1
        NpyAuxData *transferdata = NULL;
1662
        npy_intp fixed_strides[2];
1663

1664
#if @isget@
1665 1
        npy_intp src_itemsize = PyArray_ITEMSIZE(array);
1666
#else
1667 1
        npy_intp src_itemsize = PyArray_ITEMSIZE(mit->extra_op);
1668
#endif
1669

1670
        /*
1671
         * Get a dtype transfer function, since there are no
1672
         * buffers, this is safe.
1673
         */
1674 1
        NpyIter_GetInnerFixedStrideArray(mit->subspace_iter, fixed_strides);
1675

1676 1
        if (PyArray_GetDTypeTransferFunction(is_aligned,
1677
#if @isget@
1678
                        fixed_strides[0], fixed_strides[1],
1679
                        PyArray_DESCR(array), PyArray_DESCR(mit->extra_op),
1680
#else
1681
                        fixed_strides[1], fixed_strides[0],
1682
                         PyArray_DESCR(mit->extra_op), PyArray_DESCR(array),
1683
#endif
1684
                        0,
1685
                        &stransfer, &transferdata,
1686
                        &needs_api) != NPY_SUCCEED) {
1687 1
            return -1;
1688
        }
1689

1690 1
        counter = NpyIter_GetInnerLoopSizePtr(mit->subspace_iter);
1691 1
        if (*counter == PyArray_SIZE(mit->subspace)) {
1692
           /*
1693
            * subspace is trivially iterable.
1694
            * manipulate pointers to avoid expensive resetting
1695
            */
1696 1
            is_subiter_trivial = 1;
1697
        }
1698
/**begin repeat1
1699
 * #one_iter = 1, 0#
1700
 * #numiter = 1, numiter#
1701
 */
1702

1703
#if @one_iter@
1704 1
        if (numiter == 1) {
1705
#else
1706
        else {
1707
#endif
1708 1
            NPY_BEGIN_THREADS_DEF;
1709 1
            if (!needs_api) {
1710 1
                NPY_BEGIN_THREADS;
1711
            }
1712

1713
            /* Outer iteration (safe because mit->size != 0) */
1714
            do {
1715 1
                char * self_ptr = baseoffset;
1716 1
                for (i=0; i < @numiter@; i++) {
1717 1
                    npy_intp indval = *((npy_intp*)outer_ptrs[i]);
1718

1719
#if @isget@ && @one_iter@
1720 1
                    if (check_and_adjust_index(&indval, fancy_dims[i],
1721
                                               iteraxis, _save) < 0 ) {
1722 1
                        NPY_AUXDATA_FREE(transferdata);
1723 1
                        return -1;
1724
                    }
1725
#else
1726 1
                    if (indval < 0) {
1727 1
                        indval += fancy_dims[i];
1728
                    }
1729
#endif
1730

1731 1
                    self_ptr += indval * fancy_strides[i];
1732
                }
1733

1734
                /*
1735
                 * Resetting is slow, so try to avoid resetting
1736
                 * if subspace iteration is trivial.
1737
                 * Watch out: reset_offsets are kept outside of the loop,
1738
                 * assuming the subspaces of different external iterations
1739
                 * share the same structure.
1740
                 */
1741 1
                if (is_subiter_trivial <= 1) {
1742
                    /* slower resetting: first iteration or non-trivial subspace */
1743

1744 1
                    char * errmsg = NULL;
1745 1
                    subspace_baseptrs[0] = self_ptr;
1746 1
                    subspace_baseptrs[1] = mit->extra_op_ptrs[0];
1747

1748
                    /* (can't really fail, since no buffering necessary) */
1749 1
                    if (!NpyIter_ResetBasePointers(mit->subspace_iter,
1750
                                                   subspace_baseptrs,
1751
                                                   &errmsg)) {
1752 0
                        NPY_END_THREADS;
1753 0
                        PyErr_SetString(PyExc_ValueError, errmsg);
1754 0
                        NPY_AUXDATA_FREE(transferdata);
1755 0
                        return -1;
1756
                    }
1757 1
                    if (is_subiter_trivial != 0) {
1758
                        /* reset_offsets are nonzero for negative strides.*/
1759 1
                        reset_offsets[0] = subspace_ptrs[0] - self_ptr;
1760 1
                        reset_offsets[1] = subspace_ptrs[1] - mit->extra_op_ptrs[0];
1761

1762
                        /* use the faster adjustment further on */
1763 1
                        is_subiter_trivial ++;
1764
                    }
1765
                }
1766
                else {
1767
                    /*
1768
                     * faster resetting if the subspace iteration is trivial.
1769
                     * reset_offsets are zero for positive strides,
1770
                     * for negative strides this shifts the pointer to the last
1771
                     * item.
1772
                     */
1773 1
                    subspace_ptrs[0] = self_ptr + reset_offsets[0];
1774 1
                    subspace_ptrs[1] = mit->extra_op_ptrs[0] + reset_offsets[1];
1775
                }
1776

1777
#if !@isget@
1778
                /*
1779
                 * When the API is needed the casting might fail
1780
                 * TODO: Could only check if casting is unsafe, or even just
1781
                 *       not at all...
1782
                 */
1783 1
                if (needs_api && PyErr_Occurred()) {
1784 0
                    NPY_AUXDATA_FREE(transferdata);
1785
                    return -1;
1786
                }
1787
#endif
1788

1789
                do {
1790

1791
#if @isget@
1792 1
                    if (NPY_UNLIKELY(stransfer(
1793
                            subspace_ptrs[1], subspace_strides[1],
1794
                            subspace_ptrs[0], subspace_strides[0],
1795
                            *counter, src_itemsize, transferdata) < 0)) {
1796 0
                        NPY_END_THREADS;
1797 0
                        NPY_AUXDATA_FREE(transferdata);
1798
                        return -1;
1799
                    }
1800
#else
1801 1
                    if (NPY_UNLIKELY(stransfer(
1802
                            subspace_ptrs[0], subspace_strides[0],
1803
                            subspace_ptrs[1], subspace_strides[1],
1804
                            *counter, src_itemsize, transferdata) < 0)) {
1805 0
                        NPY_END_THREADS;
1806 0
                        NPY_AUXDATA_FREE(transferdata);
1807
                        return -1;
1808
                    }
1809
#endif
1810 1
                } while (mit->subspace_next(mit->subspace_iter));
1811

1812 1
                mit->extra_op_next(mit->extra_op_iter);
1813 1
            } while (mit->outer_next(mit->outer));
1814 1
            NPY_END_THREADS;
1815
        }
1816
/**end repeat1**/
1817

1818 1
        NPY_AUXDATA_FREE(transferdata);
1819
    }
1820
    return 0;
1821
}
1822

1823
/**end repeat**/

Read our documentation on viewing source code .

Loading