1
|
|
#define PY_SSIZE_T_CLEAN
|
2
|
|
#include <Python.h>
|
3
|
|
#include "structmember.h"
|
4
|
|
|
5
|
|
/*#include <stdio.h>*/
|
6
|
|
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
|
7
|
|
#define _MULTIARRAYMODULE
|
8
|
|
#include "numpy/arrayobject.h"
|
9
|
|
#include "arrayobject.h"
|
10
|
|
|
11
|
|
#include "npy_config.h"
|
12
|
|
|
13
|
|
#include "npy_pycompat.h"
|
14
|
|
#include "npy_import.h"
|
15
|
|
|
16
|
|
#include "common.h"
|
17
|
|
#include "ctors.h"
|
18
|
|
#include "descriptor.h"
|
19
|
|
#include "iterators.h"
|
20
|
|
#include "mapping.h"
|
21
|
|
#include "lowlevel_strided_loops.h"
|
22
|
|
#include "item_selection.h"
|
23
|
|
#include "mem_overlap.h"
|
24
|
|
#include "array_assign.h"
|
25
|
|
#include "array_coercion.h"
|
26
|
|
|
27
|
|
|
28
|
|
#define HAS_INTEGER 1
|
29
|
|
#define HAS_NEWAXIS 2
|
30
|
|
#define HAS_SLICE 4
|
31
|
|
#define HAS_ELLIPSIS 8
|
32
|
|
/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */
|
33
|
|
#define HAS_FANCY 16
|
34
|
|
#define HAS_BOOL 32
|
35
|
|
/* NOTE: Only set if it is neither fancy nor purely integer index! */
|
36
|
|
#define HAS_SCALAR_ARRAY 64
|
37
|
|
/*
|
38
|
|
* Indicate that this is a fancy index that comes from a 0d boolean.
|
39
|
|
* This means that the index does not operate along a real axis. The
|
40
|
|
* corresponding index type is just HAS_FANCY.
|
41
|
|
*/
|
42
|
|
#define HAS_0D_BOOL (HAS_FANCY | 128)
|
43
|
|
|
44
|
|
|
45
|
|
static int
|
46
|
|
_nonzero_indices(PyObject *myBool, PyArrayObject **arrays);
|
47
|
|
|
48
|
|
/******************************************************************************
|
49
|
|
*** IMPLEMENT MAPPING PROTOCOL ***
|
50
|
|
*****************************************************************************/
|
51
|
|
|
52
|
|
NPY_NO_EXPORT Py_ssize_t
|
53
|
1
|
array_length(PyArrayObject *self)
|
54
|
|
{
|
55
|
1
|
if (PyArray_NDIM(self) != 0) {
|
56
|
1
|
return PyArray_DIMS(self)[0];
|
57
|
|
} else {
|
58
|
0
|
PyErr_SetString(PyExc_TypeError, "len() of unsized object");
|
59
|
0
|
return -1;
|
60
|
|
}
|
61
|
|
}
|
62
|
|
|
63
|
|
|
64
|
|
/* -------------------------------------------------------------- */
|
65
|
|
|
66
|
|
/*NUMPY_API
|
67
|
|
*
|
68
|
|
*/
|
69
|
|
NPY_NO_EXPORT void
|
70
|
1
|
PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getmap)
|
71
|
|
{
|
72
|
|
PyObject *new;
|
73
|
|
int n1, n2, n3, val, bnd;
|
74
|
|
int i;
|
75
|
|
PyArray_Dims permute;
|
76
|
|
npy_intp d[NPY_MAXDIMS];
|
77
|
|
PyArrayObject *arr;
|
78
|
|
|
79
|
1
|
permute.ptr = d;
|
80
|
1
|
permute.len = mit->nd;
|
81
|
|
|
82
|
|
/*
|
83
|
|
* arr might not have the right number of dimensions
|
84
|
|
* and need to be reshaped first by pre-pending ones
|
85
|
|
*/
|
86
|
1
|
arr = *ret;
|
87
|
1
|
if (PyArray_NDIM(arr) != mit->nd) {
|
88
|
1
|
for (i = 1; i <= PyArray_NDIM(arr); i++) {
|
89
|
1
|
permute.ptr[mit->nd-i] = PyArray_DIMS(arr)[PyArray_NDIM(arr)-i];
|
90
|
|
}
|
91
|
1
|
for (i = 0; i < mit->nd-PyArray_NDIM(arr); i++) {
|
92
|
1
|
permute.ptr[i] = 1;
|
93
|
|
}
|
94
|
1
|
new = PyArray_Newshape(arr, &permute, NPY_ANYORDER);
|
95
|
1
|
Py_DECREF(arr);
|
96
|
1
|
*ret = (PyArrayObject *)new;
|
97
|
1
|
if (new == NULL) {
|
98
|
0
|
return;
|
99
|
|
}
|
100
|
|
}
|
101
|
|
|
102
|
|
/*
|
103
|
|
* Setting and getting need to have different permutations.
|
104
|
|
* On the get we are permuting the returned object, but on
|
105
|
|
* setting we are permuting the object-to-be-set.
|
106
|
|
* The set permutation is the inverse of the get permutation.
|
107
|
|
*/
|
108
|
|
|
109
|
|
/*
|
110
|
|
* For getting the array the tuple for transpose is
|
111
|
|
* (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1)
|
112
|
|
* n1 is the number of dimensions of the broadcast index array
|
113
|
|
* n2 is the number of dimensions skipped at the start
|
114
|
|
* n3 is the number of dimensions of the result
|
115
|
|
*/
|
116
|
|
|
117
|
|
/*
|
118
|
|
* For setting the array the tuple for transpose is
|
119
|
|
* (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1)
|
120
|
|
*/
|
121
|
1
|
n1 = mit->nd_fancy;
|
122
|
1
|
n2 = mit->consec; /* axes to insert at */
|
123
|
1
|
n3 = mit->nd;
|
124
|
|
|
125
|
|
/* use n1 as the boundary if getting but n2 if setting */
|
126
|
1
|
bnd = getmap ? n1 : n2;
|
127
|
1
|
val = bnd;
|
128
|
1
|
i = 0;
|
129
|
1
|
while (val < n1 + n2) {
|
130
|
1
|
permute.ptr[i++] = val++;
|
131
|
|
}
|
132
|
|
val = 0;
|
133
|
1
|
while (val < bnd) {
|
134
|
1
|
permute.ptr[i++] = val++;
|
135
|
|
}
|
136
|
|
val = n1 + n2;
|
137
|
1
|
while (val < n3) {
|
138
|
1
|
permute.ptr[i++] = val++;
|
139
|
|
}
|
140
|
1
|
new = PyArray_Transpose(*ret, &permute);
|
141
|
1
|
Py_DECREF(*ret);
|
142
|
1
|
*ret = (PyArrayObject *)new;
|
143
|
|
}
|
144
|
|
|
145
|
|
static NPY_INLINE void
|
146
|
|
multi_DECREF(PyObject **objects, npy_intp n)
|
147
|
|
{
|
148
|
|
npy_intp i;
|
149
|
1
|
for (i = 0; i < n; i++) {
|
150
|
1
|
Py_DECREF(objects[i]);
|
151
|
|
}
|
152
|
|
}
|
153
|
|
|
154
|
|
/**
|
155
|
|
* Unpack a tuple into an array of new references. Returns the number of objects
|
156
|
|
* unpacked.
|
157
|
|
*
|
158
|
|
* Useful if a tuple is being iterated over multiple times, or for a code path
|
159
|
|
* that doesn't always want the overhead of allocating a tuple.
|
160
|
|
*/
|
161
|
|
static NPY_INLINE npy_intp
|
162
|
1
|
unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n)
|
163
|
|
{
|
164
|
|
npy_intp n, i;
|
165
|
1
|
n = PyTuple_GET_SIZE(index);
|
166
|
1
|
if (n > result_n) {
|
167
|
0
|
PyErr_SetString(PyExc_IndexError,
|
168
|
|
"too many indices for array");
|
169
|
0
|
return -1;
|
170
|
|
}
|
171
|
1
|
for (i = 0; i < n; i++) {
|
172
|
1
|
result[i] = PyTuple_GET_ITEM(index, i);
|
173
|
1
|
Py_INCREF(result[i]);
|
174
|
|
}
|
175
|
|
return n;
|
176
|
|
}
|
177
|
|
|
178
|
|
/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
|
179
|
|
static NPY_INLINE npy_intp
|
180
|
|
unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
|
181
|
|
{
|
182
|
1
|
Py_INCREF(index);
|
183
|
1
|
result[0] = index;
|
184
|
|
return 1;
|
185
|
|
}
|
186
|
|
|
187
|
|
/**
|
188
|
|
* Turn an index argument into a c-array of `PyObject *`s, one for each index.
|
189
|
|
*
|
190
|
|
* When a scalar is passed, this is written directly to the buffer. When a
|
191
|
|
* tuple is passed, the tuple elements are unpacked into the buffer.
|
192
|
|
*
|
193
|
|
* When some other sequence is passed, this implements the following section
|
194
|
|
* from the advanced indexing docs to decide whether to unpack or just write
|
195
|
|
* one element:
|
196
|
|
*
|
197
|
|
* > In order to remain backward compatible with a common usage in Numeric,
|
198
|
|
* > basic slicing is also initiated if the selection object is any non-ndarray
|
199
|
|
* > sequence (such as a list) containing slice objects, the Ellipsis object,
|
200
|
|
* > or the newaxis object, but not for integer arrays or other embedded
|
201
|
|
* > sequences.
|
202
|
|
*
|
203
|
|
* It might be worth deprecating this behaviour (gh-4434), in which case the
|
204
|
|
* entire function should become a simple check of PyTuple_Check.
|
205
|
|
*
|
206
|
|
* @param index The index object, which may or may not be a tuple. This is
|
207
|
|
* a borrowed reference.
|
208
|
|
* @param result An empty buffer of PyObject* to write each index component
|
209
|
|
* to. The references written are new.
|
210
|
|
* @param result_n The length of the result buffer
|
211
|
|
*
|
212
|
|
* @returns The number of items in `result`, or -1 if an error occurred.
|
213
|
|
* The entries in `result` at and beyond this index should be
|
214
|
|
* assumed to contain garbage, even if they were initialized
|
215
|
|
* to NULL, so are not safe to Py_XDECREF. Use multi_DECREF to
|
216
|
|
* dispose of them.
|
217
|
|
*/
|
218
|
|
NPY_NO_EXPORT npy_intp
|
219
|
1
|
unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
|
220
|
|
{
|
221
|
|
npy_intp n, i;
|
222
|
|
npy_bool commit_to_unpack;
|
223
|
|
|
224
|
|
/* Fast route for passing a tuple */
|
225
|
1
|
if (PyTuple_CheckExact(index)) {
|
226
|
1
|
return unpack_tuple((PyTupleObject *)index, result, result_n);
|
227
|
|
}
|
228
|
|
|
229
|
|
/* Obvious single-entry cases */
|
230
|
1
|
if (0 /* to aid macros below */
|
231
|
|
|| PyLong_CheckExact(index)
|
232
|
1
|
|| index == Py_None
|
233
|
1
|
|| PySlice_Check(index)
|
234
|
1
|
|| PyArray_Check(index)
|
235
|
1
|
|| !PySequence_Check(index)
|
236
|
1
|
|| PyUnicode_Check(index)) {
|
237
|
|
|
238
|
1
|
return unpack_scalar(index, result, result_n);
|
239
|
|
}
|
240
|
|
|
241
|
|
/*
|
242
|
|
* Passing a tuple subclass - coerce to the base type. This incurs an
|
243
|
|
* allocation, but doesn't need to be a fast path anyway
|
244
|
|
*/
|
245
|
1
|
if (PyTuple_Check(index)) {
|
246
|
1
|
PyTupleObject *tup = (PyTupleObject *) PySequence_Tuple(index);
|
247
|
1
|
if (tup == NULL) {
|
248
|
|
return -1;
|
249
|
|
}
|
250
|
1
|
n = unpack_tuple(tup, result, result_n);
|
251
|
1
|
Py_DECREF(tup);
|
252
|
|
return n;
|
253
|
|
}
|
254
|
|
|
255
|
|
/*
|
256
|
|
* At this point, we're left with a non-tuple, non-array, sequence:
|
257
|
|
* typically, a list. We use some somewhat-arbitrary heuristics from here
|
258
|
|
* onwards to decided whether to treat that list as a single index, or a
|
259
|
|
* list of indices.
|
260
|
|
*/
|
261
|
|
|
262
|
|
/* if len fails, treat like a scalar */
|
263
|
1
|
n = PySequence_Size(index);
|
264
|
1
|
if (n < 0) {
|
265
|
0
|
PyErr_Clear();
|
266
|
0
|
return unpack_scalar(index, result, result_n);
|
267
|
|
}
|
268
|
|
|
269
|
|
/*
|
270
|
|
* Backwards compatibility only takes effect for short sequences - otherwise
|
271
|
|
* we treat it like any other scalar.
|
272
|
|
*
|
273
|
|
* Sequences < NPY_MAXDIMS with any slice objects
|
274
|
|
* or newaxis, Ellipsis or other arrays or sequences
|
275
|
|
* embedded, are considered equivalent to an indexing
|
276
|
|
* tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
|
277
|
|
*/
|
278
|
1
|
if (n >= NPY_MAXDIMS) {
|
279
|
0
|
return unpack_scalar(index, result, result_n);
|
280
|
|
}
|
281
|
|
|
282
|
|
/* In case we change result_n elsewhere */
|
283
|
|
assert(n <= result_n);
|
284
|
|
|
285
|
|
/*
|
286
|
|
* Some other type of short sequence - assume we should unpack it like a
|
287
|
|
* tuple, and then decide whether that was actually necessary.
|
288
|
|
*/
|
289
|
|
commit_to_unpack = 0;
|
290
|
1
|
for (i = 0; i < n; i++) {
|
291
|
1
|
PyObject *tmp_obj = result[i] = PySequence_GetItem(index, i);
|
292
|
|
|
293
|
1
|
if (commit_to_unpack) {
|
294
|
|
/* propagate errors */
|
295
|
1
|
if (tmp_obj == NULL) {
|
296
|
|
goto fail;
|
297
|
|
}
|
298
|
|
}
|
299
|
|
else {
|
300
|
|
/*
|
301
|
|
* if getitem fails (unusual) before we've committed, then stop
|
302
|
|
* unpacking
|
303
|
|
*/
|
304
|
1
|
if (tmp_obj == NULL) {
|
305
|
1
|
PyErr_Clear();
|
306
|
1
|
break;
|
307
|
|
}
|
308
|
|
|
309
|
|
/* decide if we should treat this sequence like a tuple */
|
310
|
1
|
if (PyArray_Check(tmp_obj)
|
311
|
1
|
|| PySequence_Check(tmp_obj)
|
312
|
1
|
|| PySlice_Check(tmp_obj)
|
313
|
1
|
|| tmp_obj == Py_Ellipsis
|
314
|
1
|
|| tmp_obj == Py_None) {
|
315
|
1
|
if (DEPRECATE_FUTUREWARNING(
|
316
|
|
"Using a non-tuple sequence for multidimensional "
|
317
|
|
"indexing is deprecated; use `arr[tuple(seq)]` "
|
318
|
|
"instead of `arr[seq]`. In the future this will be "
|
319
|
|
"interpreted as an array index, `arr[np.array(seq)]`, "
|
320
|
|
"which will result either in an error or a different "
|
321
|
|
"result.") < 0) {
|
322
|
1
|
i++; /* since loop update doesn't run */
|
323
|
1
|
goto fail;
|
324
|
|
}
|
325
|
|
commit_to_unpack = 1;
|
326
|
|
}
|
327
|
|
}
|
328
|
|
}
|
329
|
|
|
330
|
|
/* unpacking was the right thing to do, and we already did it */
|
331
|
1
|
if (commit_to_unpack) {
|
332
|
|
return n;
|
333
|
|
}
|
334
|
|
/* got to the end, never found an indication that we should have unpacked */
|
335
|
|
else {
|
336
|
|
/* we partially filled result, so empty it first */
|
337
|
1
|
multi_DECREF(result, i);
|
338
|
1
|
return unpack_scalar(index, result, result_n);
|
339
|
|
}
|
340
|
|
|
341
|
0
|
fail:
|
342
|
|
multi_DECREF(result, i);
|
343
|
|
return -1;
|
344
|
|
}
|
345
|
|
|
346
|
|
/**
|
347
|
|
* Prepare an npy_index_object from the python slicing object.
|
348
|
|
*
|
349
|
|
* This function handles all index preparations with the exception
|
350
|
|
* of field access. It fills the array of index_info structs correctly.
|
351
|
|
* It already handles the boolean array special case for fancy indexing,
|
352
|
|
* i.e. if the index type is boolean, it is exactly one matching boolean
|
353
|
|
* array. If the index type is fancy, the boolean array is already
|
354
|
|
* converted to integer arrays. There is (as before) no checking of the
|
355
|
|
* boolean dimension.
|
356
|
|
*
|
357
|
|
* Checks everything but the bounds.
|
358
|
|
*
|
359
|
|
* @param the array being indexed
|
360
|
|
* @param the index object
|
361
|
|
* @param index info struct being filled (size of NPY_MAXDIMS * 2 + 1)
|
362
|
|
* @param number of indices found
|
363
|
|
* @param dimension of the indexing result
|
364
|
|
* @param dimension of the fancy/advanced indices part
|
365
|
|
* @param whether to allow the boolean special case
|
366
|
|
*
|
367
|
|
* @returns the index_type or -1 on failure and fills the number of indices.
|
368
|
|
*/
|
369
|
|
NPY_NO_EXPORT int
|
370
|
1
|
prepare_index(PyArrayObject *self, PyObject *index,
|
371
|
|
npy_index_info *indices,
|
372
|
|
int *num, int *ndim, int *out_fancy_ndim, int allow_boolean)
|
373
|
|
{
|
374
|
|
int new_ndim, fancy_ndim, used_ndim, index_ndim;
|
375
|
|
int curr_idx, get_idx;
|
376
|
|
|
377
|
|
int i;
|
378
|
|
npy_intp n;
|
379
|
|
|
380
|
1
|
PyObject *obj = NULL;
|
381
|
|
PyArrayObject *arr;
|
382
|
|
|
383
|
1
|
int index_type = 0;
|
384
|
1
|
int ellipsis_pos = -1;
|
385
|
|
|
386
|
|
/*
|
387
|
|
* The choice of only unpacking `2*NPY_MAXDIMS` items is historic.
|
388
|
|
* The longest "reasonable" index that produces a result of <= 32 dimensions
|
389
|
|
* is `(0,)*np.MAXDIMS + (None,)*np.MAXDIMS`. Longer indices can exist, but
|
390
|
|
* are uncommon.
|
391
|
|
*/
|
392
|
|
PyObject *raw_indices[NPY_MAXDIMS*2];
|
393
|
|
|
394
|
1
|
index_ndim = unpack_indices(index, raw_indices, NPY_MAXDIMS*2);
|
395
|
1
|
if (index_ndim == -1) {
|
396
|
|
return -1;
|
397
|
|
}
|
398
|
|
|
399
|
|
/*
|
400
|
|
* Parse all indices into the `indices` array of index_info structs
|
401
|
|
*/
|
402
|
|
used_ndim = 0;
|
403
|
|
new_ndim = 0;
|
404
|
|
fancy_ndim = 0;
|
405
|
|
get_idx = 0;
|
406
|
|
curr_idx = 0;
|
407
|
|
|
408
|
1
|
while (get_idx < index_ndim) {
|
409
|
1
|
if (curr_idx > NPY_MAXDIMS * 2) {
|
410
|
0
|
PyErr_SetString(PyExc_IndexError,
|
411
|
|
"too many indices for array");
|
412
|
0
|
goto failed_building_indices;
|
413
|
|
}
|
414
|
|
|
415
|
1
|
obj = raw_indices[get_idx++];
|
416
|
|
|
417
|
|
/**** Try the cascade of possible indices ****/
|
418
|
|
|
419
|
|
/* Index is an ellipsis (`...`) */
|
420
|
1
|
if (obj == Py_Ellipsis) {
|
421
|
|
/* At most one ellipsis in an index */
|
422
|
1
|
if (index_type & HAS_ELLIPSIS) {
|
423
|
1
|
PyErr_Format(PyExc_IndexError,
|
424
|
|
"an index can only have a single ellipsis ('...')");
|
425
|
1
|
goto failed_building_indices;
|
426
|
|
}
|
427
|
1
|
index_type |= HAS_ELLIPSIS;
|
428
|
1
|
indices[curr_idx].type = HAS_ELLIPSIS;
|
429
|
1
|
indices[curr_idx].object = NULL;
|
430
|
|
/* number of slices it is worth, won't update if it is 0: */
|
431
|
1
|
indices[curr_idx].value = 0;
|
432
|
|
|
433
|
1
|
ellipsis_pos = curr_idx;
|
434
|
|
/* the used and new ndim will be found later */
|
435
|
1
|
used_ndim += 0;
|
436
|
1
|
new_ndim += 0;
|
437
|
1
|
curr_idx += 1;
|
438
|
1
|
continue;
|
439
|
|
}
|
440
|
|
|
441
|
|
/* Index is np.newaxis/None */
|
442
|
1
|
else if (obj == Py_None) {
|
443
|
1
|
index_type |= HAS_NEWAXIS;
|
444
|
|
|
445
|
1
|
indices[curr_idx].type = HAS_NEWAXIS;
|
446
|
1
|
indices[curr_idx].object = NULL;
|
447
|
|
|
448
|
1
|
used_ndim += 0;
|
449
|
1
|
new_ndim += 1;
|
450
|
1
|
curr_idx += 1;
|
451
|
1
|
continue;
|
452
|
|
}
|
453
|
|
|
454
|
|
/* Index is a slice object. */
|
455
|
1
|
else if (PySlice_Check(obj)) {
|
456
|
1
|
index_type |= HAS_SLICE;
|
457
|
|
|
458
|
1
|
Py_INCREF(obj);
|
459
|
1
|
indices[curr_idx].object = obj;
|
460
|
1
|
indices[curr_idx].type = HAS_SLICE;
|
461
|
1
|
used_ndim += 1;
|
462
|
1
|
new_ndim += 1;
|
463
|
1
|
curr_idx += 1;
|
464
|
1
|
continue;
|
465
|
|
}
|
466
|
|
|
467
|
|
/*
|
468
|
|
* Special case to allow 0-d boolean indexing with scalars.
|
469
|
|
* Should be removed after boolean as integer deprecation.
|
470
|
|
* Since this is always an error if it was not a boolean, we can
|
471
|
|
* allow the 0-d special case before the rest.
|
472
|
|
*/
|
473
|
1
|
else if (PyArray_NDIM(self) != 0) {
|
474
|
|
/*
|
475
|
|
* Single integer index, there are two cases here.
|
476
|
|
* It could be an array, a 0-d array is handled
|
477
|
|
* a bit weird however, so need to special case it.
|
478
|
|
*
|
479
|
|
* Check for integers first, purely for performance
|
480
|
|
*/
|
481
|
1
|
if (PyLong_CheckExact(obj) || !PyArray_Check(obj)) {
|
482
|
1
|
npy_intp ind = PyArray_PyIntAsIntp(obj);
|
483
|
|
|
484
|
1
|
if (error_converting(ind)) {
|
485
|
1
|
PyErr_Clear();
|
486
|
|
}
|
487
|
|
else {
|
488
|
1
|
index_type |= HAS_INTEGER;
|
489
|
1
|
indices[curr_idx].object = NULL;
|
490
|
1
|
indices[curr_idx].value = ind;
|
491
|
1
|
indices[curr_idx].type = HAS_INTEGER;
|
492
|
1
|
used_ndim += 1;
|
493
|
1
|
new_ndim += 0;
|
494
|
1
|
curr_idx += 1;
|
495
|
1
|
continue;
|
496
|
|
}
|
497
|
|
}
|
498
|
|
}
|
499
|
|
|
500
|
|
/*
|
501
|
|
* At this point, we must have an index array (or array-like).
|
502
|
|
* It might still be a (purely) bool special case, a 0-d integer
|
503
|
|
* array (an array scalar) or something invalid.
|
504
|
|
*/
|
505
|
|
|
506
|
1
|
if (!PyArray_Check(obj)) {
|
507
|
|
PyArrayObject *tmp_arr;
|
508
|
1
|
tmp_arr = (PyArrayObject *)PyArray_FROM_O(obj);
|
509
|
1
|
if (tmp_arr == NULL) {
|
510
|
|
/* TODO: Should maybe replace the error here? */
|
511
|
|
goto failed_building_indices;
|
512
|
|
}
|
513
|
|
|
514
|
|
/*
|
515
|
|
* For example an empty list can be cast to an integer array,
|
516
|
|
* however it will default to a float one.
|
517
|
|
*/
|
518
|
1
|
if (PyArray_SIZE(tmp_arr) == 0) {
|
519
|
1
|
PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP);
|
520
|
|
|
521
|
1
|
arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype,
|
522
|
|
NPY_ARRAY_FORCECAST);
|
523
|
1
|
Py_DECREF(tmp_arr);
|
524
|
1
|
if (arr == NULL) {
|
525
|
|
goto failed_building_indices;
|
526
|
|
}
|
527
|
|
}
|
528
|
|
else {
|
529
|
|
arr = tmp_arr;
|
530
|
|
}
|
531
|
|
}
|
532
|
|
else {
|
533
|
1
|
Py_INCREF(obj);
|
534
|
1
|
arr = (PyArrayObject *)obj;
|
535
|
|
}
|
536
|
|
|
537
|
|
/* Check if the array is valid and fill the information */
|
538
|
1
|
if (PyArray_ISBOOL(arr)) {
|
539
|
|
/*
|
540
|
|
* There are two types of boolean indices (which are equivalent,
|
541
|
|
* for the most part though). A single boolean index of matching
|
542
|
|
* shape is a boolean index. If this is not the case, it is
|
543
|
|
* instead expanded into (multiple) integer array indices.
|
544
|
|
*/
|
545
|
|
PyArrayObject *nonzero_result[NPY_MAXDIMS];
|
546
|
|
|
547
|
1
|
if ((index_ndim == 1) && allow_boolean) {
|
548
|
|
/*
|
549
|
|
* If shapes match exactly, this can be optimized as a single
|
550
|
|
* boolean index. When the dimensions are identical but the shapes are not,
|
551
|
|
* this is always an error. The check ensures that these errors are raised
|
552
|
|
* and match those of the generic path.
|
553
|
|
*/
|
554
|
1
|
if ((PyArray_NDIM(arr) == PyArray_NDIM(self))
|
555
|
1
|
&& PyArray_CompareLists(PyArray_DIMS(arr),
|
556
|
1
|
PyArray_DIMS(self),
|
557
|
|
PyArray_NDIM(arr))) {
|
558
|
|
|
559
|
1
|
index_type = HAS_BOOL;
|
560
|
1
|
indices[curr_idx].type = HAS_BOOL;
|
561
|
1
|
indices[curr_idx].object = (PyObject *)arr;
|
562
|
|
|
563
|
|
/* keep track anyway, just to be complete */
|
564
|
1
|
used_ndim = PyArray_NDIM(self);
|
565
|
1
|
fancy_ndim = PyArray_NDIM(self);
|
566
|
1
|
curr_idx += 1;
|
567
|
1
|
break;
|
568
|
|
}
|
569
|
|
}
|
570
|
|
|
571
|
1
|
if (PyArray_NDIM(arr) == 0) {
|
572
|
|
/*
|
573
|
|
* This can actually be well defined. A new axis is added,
|
574
|
|
* but at the same time no axis is "used". So if we have True,
|
575
|
|
* we add a new axis (a bit like with np.newaxis). If it is
|
576
|
|
* False, we add a new axis, but this axis has 0 entries.
|
577
|
|
*/
|
578
|
|
|
579
|
1
|
index_type |= HAS_FANCY;
|
580
|
1
|
indices[curr_idx].type = HAS_0D_BOOL;
|
581
|
|
|
582
|
|
/* TODO: This can't fail, right? Is there a faster way? */
|
583
|
1
|
if (PyObject_IsTrue((PyObject *)arr)) {
|
584
|
1
|
n = 1;
|
585
|
|
}
|
586
|
|
else {
|
587
|
1
|
n = 0;
|
588
|
|
}
|
589
|
1
|
indices[curr_idx].value = n;
|
590
|
1
|
indices[curr_idx].object = PyArray_Zeros(1, &n,
|
591
|
|
PyArray_DescrFromType(NPY_INTP), 0);
|
592
|
1
|
Py_DECREF(arr);
|
593
|
|
|
594
|
1
|
if (indices[curr_idx].object == NULL) {
|
595
|
|
goto failed_building_indices;
|
596
|
|
}
|
597
|
|
|
598
|
1
|
used_ndim += 0;
|
599
|
1
|
if (fancy_ndim < 1) {
|
600
|
1
|
fancy_ndim = 1;
|
601
|
|
}
|
602
|
1
|
curr_idx += 1;
|
603
|
1
|
continue;
|
604
|
|
}
|
605
|
|
|
606
|
|
/* Convert the boolean array into multiple integer ones */
|
607
|
1
|
n = _nonzero_indices((PyObject *)arr, nonzero_result);
|
608
|
|
|
609
|
1
|
if (n < 0) {
|
610
|
0
|
Py_DECREF(arr);
|
611
|
|
goto failed_building_indices;
|
612
|
|
}
|
613
|
|
|
614
|
|
/* Check that we will not run out of indices to store new ones */
|
615
|
1
|
if (curr_idx + n >= NPY_MAXDIMS * 2) {
|
616
|
0
|
PyErr_SetString(PyExc_IndexError,
|
617
|
|
"too many indices for array");
|
618
|
0
|
for (i=0; i < n; i++) {
|
619
|
0
|
Py_DECREF(nonzero_result[i]);
|
620
|
|
}
|
621
|
0
|
Py_DECREF(arr);
|
622
|
|
goto failed_building_indices;
|
623
|
|
}
|
624
|
|
|
625
|
|
/* Add the arrays from the nonzero result to the index */
|
626
|
1
|
index_type |= HAS_FANCY;
|
627
|
1
|
for (i=0; i < n; i++) {
|
628
|
1
|
indices[curr_idx].type = HAS_FANCY;
|
629
|
1
|
indices[curr_idx].value = PyArray_DIM(arr, i);
|
630
|
1
|
indices[curr_idx].object = (PyObject *)nonzero_result[i];
|
631
|
|
|
632
|
1
|
used_ndim += 1;
|
633
|
1
|
curr_idx += 1;
|
634
|
|
}
|
635
|
1
|
Py_DECREF(arr);
|
636
|
|
|
637
|
|
/* All added indices have 1 dimension */
|
638
|
1
|
if (fancy_ndim < 1) {
|
639
|
1
|
fancy_ndim = 1;
|
640
|
|
}
|
641
|
1
|
continue;
|
642
|
|
}
|
643
|
|
|
644
|
|
/* Normal case of an integer array */
|
645
|
1
|
else if (PyArray_ISINTEGER(arr)) {
|
646
|
1
|
if (PyArray_NDIM(arr) == 0) {
|
647
|
|
/*
|
648
|
|
* A 0-d integer array is an array scalar and can
|
649
|
|
* be dealt with the HAS_SCALAR_ARRAY flag.
|
650
|
|
* We could handle 0-d arrays early on, but this makes
|
651
|
|
* sure that array-likes or odder arrays are always
|
652
|
|
* handled right.
|
653
|
|
*/
|
654
|
1
|
npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr);
|
655
|
|
|
656
|
1
|
Py_DECREF(arr);
|
657
|
1
|
if (error_converting(ind)) {
|
658
|
|
goto failed_building_indices;
|
659
|
|
}
|
660
|
|
else {
|
661
|
1
|
index_type |= (HAS_INTEGER | HAS_SCALAR_ARRAY);
|
662
|
1
|
indices[curr_idx].object = NULL;
|
663
|
1
|
indices[curr_idx].value = ind;
|
664
|
1
|
indices[curr_idx].type = HAS_INTEGER;
|
665
|
1
|
used_ndim += 1;
|
666
|
1
|
new_ndim += 0;
|
667
|
1
|
curr_idx += 1;
|
668
|
1
|
continue;
|
669
|
|
}
|
670
|
|
}
|
671
|
|
|
672
|
1
|
index_type |= HAS_FANCY;
|
673
|
1
|
indices[curr_idx].type = HAS_FANCY;
|
674
|
1
|
indices[curr_idx].value = -1;
|
675
|
1
|
indices[curr_idx].object = (PyObject *)arr;
|
676
|
|
|
677
|
1
|
used_ndim += 1;
|
678
|
1
|
if (fancy_ndim < PyArray_NDIM(arr)) {
|
679
|
1
|
fancy_ndim = PyArray_NDIM(arr);
|
680
|
|
}
|
681
|
1
|
curr_idx += 1;
|
682
|
1
|
continue;
|
683
|
|
}
|
684
|
|
|
685
|
|
/*
|
686
|
|
* The array does not have a valid type.
|
687
|
|
*/
|
688
|
1
|
if ((PyObject *)arr == obj) {
|
689
|
|
/* The input was an array already */
|
690
|
1
|
PyErr_SetString(PyExc_IndexError,
|
691
|
|
"arrays used as indices must be of integer (or boolean) type");
|
692
|
|
}
|
693
|
|
else {
|
694
|
|
/* The input was not an array, so give a general error message */
|
695
|
1
|
PyErr_SetString(PyExc_IndexError,
|
696
|
|
"only integers, slices (`:`), ellipsis (`...`), "
|
697
|
|
"numpy.newaxis (`None`) and integer or boolean "
|
698
|
|
"arrays are valid indices");
|
699
|
|
}
|
700
|
1
|
Py_DECREF(arr);
|
701
|
|
goto failed_building_indices;
|
702
|
|
}
|
703
|
|
|
704
|
|
/*
|
705
|
|
* Compare dimension of the index to the real ndim. this is
|
706
|
|
* to find the ellipsis value or append an ellipsis if necessary.
|
707
|
|
*/
|
708
|
1
|
if (used_ndim < PyArray_NDIM(self)) {
|
709
|
1
|
if (index_type & HAS_ELLIPSIS) {
|
710
|
1
|
indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim;
|
711
|
1
|
used_ndim = PyArray_NDIM(self);
|
712
|
1
|
new_ndim += indices[ellipsis_pos].value;
|
713
|
|
}
|
714
|
|
else {
|
715
|
|
/*
|
716
|
|
* There is no ellipsis yet, but it is not a full index
|
717
|
|
* so we append an ellipsis to the end.
|
718
|
|
*/
|
719
|
1
|
index_type |= HAS_ELLIPSIS;
|
720
|
1
|
indices[curr_idx].object = NULL;
|
721
|
1
|
indices[curr_idx].type = HAS_ELLIPSIS;
|
722
|
1
|
indices[curr_idx].value = PyArray_NDIM(self) - used_ndim;
|
723
|
1
|
ellipsis_pos = curr_idx;
|
724
|
|
|
725
|
1
|
used_ndim = PyArray_NDIM(self);
|
726
|
1
|
new_ndim += indices[curr_idx].value;
|
727
|
1
|
curr_idx += 1;
|
728
|
|
}
|
729
|
|
}
|
730
|
1
|
else if (used_ndim > PyArray_NDIM(self)) {
|
731
|
1
|
PyErr_Format(PyExc_IndexError,
|
732
|
|
"too many indices for array: "
|
733
|
|
"array is %d-dimensional, but %d were indexed",
|
734
|
|
PyArray_NDIM(self),
|
735
|
|
used_ndim);
|
736
|
1
|
goto failed_building_indices;
|
737
|
|
}
|
738
|
1
|
else if (index_ndim == 0) {
|
739
|
|
/*
|
740
|
|
* 0-d index into 0-d array, i.e. array[()]
|
741
|
|
* We consider this an integer index. Which means it will return
|
742
|
|
* the scalar.
|
743
|
|
* This makes sense, because then array[...] gives
|
744
|
|
* an array and array[()] gives the scalar.
|
745
|
|
*/
|
746
|
1
|
used_ndim = 0;
|
747
|
1
|
index_type = HAS_INTEGER;
|
748
|
|
}
|
749
|
|
|
750
|
|
/* HAS_SCALAR_ARRAY requires cleaning up the index_type */
|
751
|
1
|
if (index_type & HAS_SCALAR_ARRAY) {
|
752
|
|
/* clear as info is unnecessary and makes life harder later */
|
753
|
1
|
if (index_type & HAS_FANCY) {
|
754
|
0
|
index_type -= HAS_SCALAR_ARRAY;
|
755
|
|
}
|
756
|
|
/* A full integer index sees array scalars as part of itself */
|
757
|
1
|
else if (index_type == (HAS_INTEGER | HAS_SCALAR_ARRAY)) {
|
758
|
1
|
index_type -= HAS_SCALAR_ARRAY;
|
759
|
|
}
|
760
|
|
}
|
761
|
|
|
762
|
|
/*
|
763
|
|
* At this point indices are all set correctly, no bounds checking
|
764
|
|
* has been made and the new array may still have more dimensions
|
765
|
|
* than is possible and boolean indexing arrays may have an incorrect shape.
|
766
|
|
*
|
767
|
|
* Check this now so we do not have to worry about it later.
|
768
|
|
* It can happen for fancy indexing or with newaxis.
|
769
|
|
* This means broadcasting errors in the case of too many dimensions
|
770
|
|
* take less priority.
|
771
|
|
*/
|
772
|
1
|
if (index_type & (HAS_NEWAXIS | HAS_FANCY)) {
|
773
|
1
|
if (new_ndim + fancy_ndim > NPY_MAXDIMS) {
|
774
|
1
|
PyErr_Format(PyExc_IndexError,
|
775
|
|
"number of dimensions must be within [0, %d], "
|
776
|
|
"indexing result would have %d",
|
777
|
|
NPY_MAXDIMS, (new_ndim + fancy_ndim));
|
778
|
1
|
goto failed_building_indices;
|
779
|
|
}
|
780
|
|
|
781
|
|
/*
|
782
|
|
* If we had a fancy index, we may have had a boolean array index.
|
783
|
|
* So check if this had the correct shape now that we can find out
|
784
|
|
* which axes it acts on.
|
785
|
|
*/
|
786
|
|
used_ndim = 0;
|
787
|
1
|
for (i = 0; i < curr_idx; i++) {
|
788
|
1
|
if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) {
|
789
|
1
|
if (indices[i].value != PyArray_DIM(self, used_ndim)) {
|
790
|
|
char err_msg[174];
|
791
|
|
|
792
|
1
|
PyOS_snprintf(err_msg, sizeof(err_msg),
|
793
|
|
"boolean index did not match indexed array along "
|
794
|
|
"dimension %d; dimension is %" NPY_INTP_FMT
|
795
|
|
" but corresponding boolean dimension is %" NPY_INTP_FMT,
|
796
|
|
used_ndim, PyArray_DIM(self, used_ndim),
|
797
|
|
indices[i].value);
|
798
|
1
|
PyErr_SetString(PyExc_IndexError, err_msg);
|
799
|
|
goto failed_building_indices;
|
800
|
|
}
|
801
|
|
}
|
802
|
|
|
803
|
1
|
if (indices[i].type == HAS_ELLIPSIS) {
|
804
|
1
|
used_ndim += indices[i].value;
|
805
|
|
}
|
806
|
1
|
else if ((indices[i].type == HAS_NEWAXIS) ||
|
807
|
|
(indices[i].type == HAS_0D_BOOL)) {
|
808
|
|
used_ndim += 0;
|
809
|
|
}
|
810
|
|
else {
|
811
|
1
|
used_ndim += 1;
|
812
|
|
}
|
813
|
|
}
|
814
|
|
}
|
815
|
|
|
816
|
1
|
*num = curr_idx;
|
817
|
1
|
*ndim = new_ndim + fancy_ndim;
|
818
|
1
|
*out_fancy_ndim = fancy_ndim;
|
819
|
|
|
820
|
1
|
multi_DECREF(raw_indices, index_ndim);
|
821
|
|
|
822
|
|
return index_type;
|
823
|
|
|
824
|
1
|
failed_building_indices:
|
825
|
1
|
for (i=0; i < curr_idx; i++) {
|
826
|
1
|
Py_XDECREF(indices[i].object);
|
827
|
|
}
|
828
|
1
|
multi_DECREF(raw_indices, index_ndim);
|
829
|
|
return -1;
|
830
|
|
}
|
831
|
|
|
832
|
|
|
833
|
|
/**
|
834
|
|
* Check if self has memory overlap with one of the index arrays, or with extra_op.
|
835
|
|
*
|
836
|
|
* @returns 1 if memory overlap found, 0 if not.
|
837
|
|
*/
|
838
|
|
NPY_NO_EXPORT int
|
839
|
1
|
index_has_memory_overlap(PyArrayObject *self,
|
840
|
|
int index_type, npy_index_info *indices, int num,
|
841
|
|
PyObject *extra_op)
|
842
|
|
{
|
843
|
|
int i;
|
844
|
|
|
845
|
1
|
if (index_type & (HAS_FANCY | HAS_BOOL)) {
|
846
|
1
|
for (i = 0; i < num; ++i) {
|
847
|
1
|
if (indices[i].object != NULL &&
|
848
|
1
|
PyArray_Check(indices[i].object) &&
|
849
|
1
|
solve_may_share_memory(self,
|
850
|
1
|
(PyArrayObject *)indices[i].object,
|
851
|
|
1) != 0) {
|
852
|
|
return 1;
|
853
|
|
}
|
854
|
|
}
|
855
|
|
}
|
856
|
|
|
857
|
1
|
if (extra_op != NULL && PyArray_Check(extra_op) &&
|
858
|
1
|
solve_may_share_memory(self, (PyArrayObject *)extra_op, 1) != 0) {
|
859
|
|
return 1;
|
860
|
|
}
|
861
|
|
|
862
|
|
return 0;
|
863
|
|
}
|
864
|
|
|
865
|
|
|
866
|
|
/**
|
867
|
|
* Get pointer for an integer index.
|
868
|
|
*
|
869
|
|
* For a purely integer index, set ptr to the memory address.
|
870
|
|
* Returns 0 on success, -1 on failure.
|
871
|
|
* The caller must ensure that the index is a full integer
|
872
|
|
* one.
|
873
|
|
*
|
874
|
|
* @param Array being indexed
|
875
|
|
* @param result pointer
|
876
|
|
* @param parsed index information
|
877
|
|
* @param number of indices
|
878
|
|
*
|
879
|
|
* @return 0 on success -1 on failure
|
880
|
|
*/
|
881
|
|
static int
|
882
|
1
|
get_item_pointer(PyArrayObject *self, char **ptr,
|
883
|
|
npy_index_info *indices, int index_num) {
|
884
|
|
int i;
|
885
|
1
|
*ptr = PyArray_BYTES(self);
|
886
|
1
|
for (i=0; i < index_num; i++) {
|
887
|
1
|
if ((check_and_adjust_index(&(indices[i].value),
|
888
|
1
|
PyArray_DIMS(self)[i], i, NULL)) < 0) {
|
889
|
|
return -1;
|
890
|
|
}
|
891
|
1
|
*ptr += PyArray_STRIDE(self, i) * indices[i].value;
|
892
|
|
}
|
893
|
|
return 0;
|
894
|
|
}
|
895
|
|
|
896
|
|
|
897
|
|
/**
|
898
|
|
* Get view into an array using all non-array indices.
|
899
|
|
*
|
900
|
|
* For any index, get a view of the subspace into the original
|
901
|
|
* array. If there are no fancy indices, this is the result of
|
902
|
|
* the indexing operation.
|
903
|
|
* Ensure_array allows to fetch a safe subspace view for advanced
|
904
|
|
* indexing.
|
905
|
|
*
|
906
|
|
* @param Array being indexed
|
907
|
|
* @param resulting array (new reference)
|
908
|
|
* @param parsed index information
|
909
|
|
* @param number of indices
|
910
|
|
* @param Whether result should inherit the type from self
|
911
|
|
*
|
912
|
|
* @return 0 on success -1 on failure
|
913
|
|
*/
|
914
|
|
static int
|
915
|
1
|
get_view_from_index(PyArrayObject *self, PyArrayObject **view,
|
916
|
|
npy_index_info *indices, int index_num, int ensure_array) {
|
917
|
|
npy_intp new_strides[NPY_MAXDIMS];
|
918
|
|
npy_intp new_shape[NPY_MAXDIMS];
|
919
|
|
int i, j;
|
920
|
1
|
int new_dim = 0;
|
921
|
1
|
int orig_dim = 0;
|
922
|
1
|
char *data_ptr = PyArray_BYTES(self);
|
923
|
|
|
924
|
|
/* for slice parsing */
|
925
|
|
npy_intp start, stop, step, n_steps;
|
926
|
|
|
927
|
1
|
for (i=0; i < index_num; i++) {
|
928
|
1
|
switch (indices[i].type) {
|
929
|
1
|
case HAS_INTEGER:
|
930
|
1
|
if ((check_and_adjust_index(&indices[i].value,
|
931
|
1
|
PyArray_DIMS(self)[orig_dim], orig_dim,
|
932
|
|
NULL)) < 0) {
|
933
|
|
return -1;
|
934
|
|
}
|
935
|
1
|
data_ptr += PyArray_STRIDE(self, orig_dim) * indices[i].value;
|
936
|
|
|
937
|
1
|
new_dim += 0;
|
938
|
1
|
orig_dim += 1;
|
939
|
1
|
break;
|
940
|
|
case HAS_ELLIPSIS:
|
941
|
1
|
for (j=0; j < indices[i].value; j++) {
|
942
|
1
|
new_strides[new_dim] = PyArray_STRIDE(self, orig_dim);
|
943
|
1
|
new_shape[new_dim] = PyArray_DIMS(self)[orig_dim];
|
944
|
1
|
new_dim += 1;
|
945
|
1
|
orig_dim += 1;
|
946
|
|
}
|
947
|
|
break;
|
948
|
1
|
case HAS_SLICE:
|
949
|
1
|
if (PySlice_GetIndicesEx(indices[i].object,
|
950
|
|
PyArray_DIMS(self)[orig_dim],
|
951
|
1
|
&start, &stop, &step, &n_steps) < 0) {
|
952
|
|
return -1;
|
953
|
|
}
|
954
|
1
|
if (n_steps <= 0) {
|
955
|
|
/* TODO: Always points to start then, could change that */
|
956
|
1
|
n_steps = 0;
|
957
|
1
|
step = 1;
|
958
|
1
|
start = 0;
|
959
|
|
}
|
960
|
|
|
961
|
1
|
data_ptr += PyArray_STRIDE(self, orig_dim) * start;
|
962
|
1
|
new_strides[new_dim] = PyArray_STRIDE(self, orig_dim) * step;
|
963
|
1
|
new_shape[new_dim] = n_steps;
|
964
|
1
|
new_dim += 1;
|
965
|
1
|
orig_dim += 1;
|
966
|
1
|
break;
|
967
|
1
|
case HAS_NEWAXIS:
|
968
|
1
|
new_strides[new_dim] = 0;
|
969
|
1
|
new_shape[new_dim] = 1;
|
970
|
1
|
new_dim += 1;
|
971
|
1
|
break;
|
972
|
|
/* Fancy and 0-d boolean indices are ignored here */
|
973
|
|
case HAS_0D_BOOL:
|
974
|
|
break;
|
975
|
1
|
default:
|
976
|
1
|
new_dim += 0;
|
977
|
1
|
orig_dim += 1;
|
978
|
1
|
break;
|
979
|
|
}
|
980
|
|
}
|
981
|
|
|
982
|
|
/* Create the new view and set the base array */
|
983
|
1
|
Py_INCREF(PyArray_DESCR(self));
|
984
|
1
|
*view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
|
985
|
|
ensure_array ? &PyArray_Type : Py_TYPE(self),
|
986
|
|
PyArray_DESCR(self),
|
987
|
|
new_dim, new_shape, new_strides, data_ptr,
|
988
|
|
PyArray_FLAGS(self),
|
989
|
|
ensure_array ? NULL : (PyObject *)self,
|
990
|
|
(PyObject *)self);
|
991
|
1
|
if (*view == NULL) {
|
992
|
|
return -1;
|
993
|
|
}
|
994
|
|
|
995
|
1
|
return 0;
|
996
|
|
}
|
997
|
|
|
998
|
|
|
999
|
|
/*
|
1000
|
|
* Implements boolean indexing. This produces a one-dimensional
|
1001
|
|
* array which picks out all of the elements of 'self' for which
|
1002
|
|
* the corresponding element of 'op' is True.
|
1003
|
|
*
|
1004
|
|
* This operation is somewhat unfortunate, because to produce
|
1005
|
|
* a one-dimensional output array, it has to choose a particular
|
1006
|
|
* iteration order, in the case of NumPy that is always C order even
|
1007
|
|
* though this function allows different choices.
|
1008
|
|
*/
|
1009
|
|
NPY_NO_EXPORT PyArrayObject *
|
1010
|
1
|
array_boolean_subscript(PyArrayObject *self,
|
1011
|
|
PyArrayObject *bmask, NPY_ORDER order)
|
1012
|
|
{
|
1013
|
|
npy_intp size, itemsize;
|
1014
|
|
char *ret_data;
|
1015
|
|
PyArray_Descr *dtype;
|
1016
|
|
PyArrayObject *ret;
|
1017
|
1
|
int needs_api = 0;
|
1018
|
|
|
1019
|
1
|
size = count_boolean_trues(PyArray_NDIM(bmask), PyArray_DATA(bmask),
|
1020
|
1
|
PyArray_DIMS(bmask), PyArray_STRIDES(bmask));
|
1021
|
|
|
1022
|
|
/* Allocate the output of the boolean indexing */
|
1023
|
1
|
dtype = PyArray_DESCR(self);
|
1024
|
1
|
Py_INCREF(dtype);
|
1025
|
1
|
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size,
|
1026
|
|
NULL, NULL, 0, NULL);
|
1027
|
1
|
if (ret == NULL) {
|
1028
|
|
return NULL;
|
1029
|
|
}
|
1030
|
|
|
1031
|
1
|
itemsize = dtype->elsize;
|
1032
|
1
|
ret_data = PyArray_DATA(ret);
|
1033
|
|
|
1034
|
|
/* Create an iterator for the data */
|
1035
|
1
|
if (size > 0) {
|
1036
|
|
NpyIter *iter;
|
1037
|
1
|
PyArrayObject *op[2] = {self, bmask};
|
1038
|
|
npy_uint32 flags, op_flags[2];
|
1039
|
|
npy_intp fixed_strides[3];
|
1040
|
1
|
PyArray_StridedUnaryOp *stransfer = NULL;
|
1041
|
1
|
NpyAuxData *transferdata = NULL;
|
1042
|
|
|
1043
|
|
NpyIter_IterNextFunc *iternext;
|
1044
|
|
npy_intp innersize, *innerstrides;
|
1045
|
|
char **dataptrs;
|
1046
|
|
|
1047
|
|
npy_intp self_stride, bmask_stride, subloopsize;
|
1048
|
|
char *self_data;
|
1049
|
|
char *bmask_data;
|
1050
|
1
|
NPY_BEGIN_THREADS_DEF;
|
1051
|
|
|
1052
|
|
/* Set up the iterator */
|
1053
|
1
|
flags = NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK;
|
1054
|
1
|
op_flags[0] = NPY_ITER_READONLY | NPY_ITER_NO_BROADCAST;
|
1055
|
1
|
op_flags[1] = NPY_ITER_READONLY;
|
1056
|
|
|
1057
|
1
|
iter = NpyIter_MultiNew(2, op, flags, order, NPY_NO_CASTING,
|
1058
|
|
op_flags, NULL);
|
1059
|
1
|
if (iter == NULL) {
|
1060
|
0
|
Py_DECREF(ret);
|
1061
|
0
|
return NULL;
|
1062
|
|
}
|
1063
|
|
|
1064
|
|
/* Get a dtype transfer function */
|
1065
|
1
|
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
|
1066
|
1
|
if (PyArray_GetDTypeTransferFunction(
|
1067
|
1
|
IsUintAligned(self) && IsAligned(self),
|
1068
|
|
fixed_strides[0], itemsize,
|
1069
|
|
dtype, dtype,
|
1070
|
|
0,
|
1071
|
|
&stransfer, &transferdata,
|
1072
|
|
&needs_api) != NPY_SUCCEED) {
|
1073
|
0
|
Py_DECREF(ret);
|
1074
|
0
|
NpyIter_Deallocate(iter);
|
1075
|
0
|
return NULL;
|
1076
|
|
}
|
1077
|
|
|
1078
|
|
/* Get the values needed for the inner loop */
|
1079
|
1
|
iternext = NpyIter_GetIterNext(iter, NULL);
|
1080
|
1
|
if (iternext == NULL) {
|
1081
|
0
|
Py_DECREF(ret);
|
1082
|
0
|
NpyIter_Deallocate(iter);
|
1083
|
0
|
NPY_AUXDATA_FREE(transferdata);
|
1084
|
|
return NULL;
|
1085
|
|
}
|
1086
|
|
|
1087
|
1
|
NPY_BEGIN_THREADS_NDITER(iter);
|
1088
|
|
|
1089
|
1
|
innerstrides = NpyIter_GetInnerStrideArray(iter);
|
1090
|
1
|
dataptrs = NpyIter_GetDataPtrArray(iter);
|
1091
|
|
|
1092
|
1
|
self_stride = innerstrides[0];
|
1093
|
1
|
bmask_stride = innerstrides[1];
|
1094
|
1
|
int res = 0;
|
1095
|
|
do {
|
1096
|
1
|
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
|
1097
|
1
|
self_data = dataptrs[0];
|
1098
|
1
|
bmask_data = dataptrs[1];
|
1099
|
|
|
1100
|
1
|
while (innersize > 0) {
|
1101
|
|
/* Skip masked values */
|
1102
|
1
|
bmask_data = npy_memchr(bmask_data, 0, bmask_stride,
|
1103
|
|
innersize, &subloopsize, 1);
|
1104
|
1
|
innersize -= subloopsize;
|
1105
|
1
|
self_data += subloopsize * self_stride;
|
1106
|
|
/* Process unmasked values */
|
1107
|
1
|
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
|
1108
|
|
&subloopsize, 0);
|
1109
|
1
|
res = stransfer(ret_data, itemsize, self_data, self_stride,
|
1110
|
|
subloopsize, itemsize, transferdata);
|
1111
|
1
|
if (res < 0) {
|
1112
|
|
break;
|
1113
|
|
}
|
1114
|
1
|
innersize -= subloopsize;
|
1115
|
1
|
self_data += subloopsize * self_stride;
|
1116
|
1
|
ret_data += subloopsize * itemsize;
|
1117
|
|
}
|
1118
|
1
|
} while (iternext(iter));
|
1119
|
|
|
1120
|
1
|
NPY_END_THREADS;
|
1121
|
|
|
1122
|
1
|
if (!NpyIter_Deallocate(iter)) {
|
1123
|
0
|
res = -1;
|
1124
|
|
}
|
1125
|
1
|
NPY_AUXDATA_FREE(transferdata);
|
1126
|
1
|
if (res < 0) {
|
1127
|
|
/* Should be practically impossible, since there is no cast */
|
1128
|
0
|
Py_DECREF(ret);
|
1129
|
|
return NULL;
|
1130
|
|
}
|
1131
|
|
}
|
1132
|
|
|
1133
|
1
|
if (!PyArray_CheckExact(self)) {
|
1134
|
1
|
PyArrayObject *tmp = ret;
|
1135
|
|
|
1136
|
1
|
Py_INCREF(dtype);
|
1137
|
1
|
ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
|
1138
|
|
Py_TYPE(self), dtype,
|
1139
|
1
|
1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret),
|
1140
|
|
PyArray_FLAGS(self), (PyObject *)self, (PyObject *)tmp);
|
1141
|
|
|
1142
|
1
|
Py_DECREF(tmp);
|
1143
|
1
|
if (ret == NULL) {
|
1144
|
|
return NULL;
|
1145
|
|
}
|
1146
|
|
}
|
1147
|
|
|
1148
|
|
return ret;
|
1149
|
|
}
|
1150
|
|
|
1151
|
|
/*
|
1152
|
|
* Implements boolean indexing assignment. This takes the one-dimensional
|
1153
|
|
* array 'v' and assigns its values to all of the elements of 'self' for which
|
1154
|
|
* the corresponding element of 'op' is True.
|
1155
|
|
*
|
1156
|
|
* This operation is somewhat unfortunate, because to match up with
|
1157
|
|
* a one-dimensional output array, it has to choose a particular
|
1158
|
|
* iteration order, in the case of NumPy that is always C order even
|
1159
|
|
* though this function allows different choices.
|
1160
|
|
*
|
1161
|
|
* Returns 0 on success, -1 on failure.
|
1162
|
|
*/
|
1163
|
|
NPY_NO_EXPORT int
|
1164
|
1
|
array_assign_boolean_subscript(PyArrayObject *self,
|
1165
|
|
PyArrayObject *bmask, PyArrayObject *v, NPY_ORDER order)
|
1166
|
|
{
|
1167
|
|
npy_intp size, src_itemsize, v_stride;
|
1168
|
|
char *v_data;
|
1169
|
1
|
int needs_api = 0;
|
1170
|
|
npy_intp bmask_size;
|
1171
|
|
|
1172
|
1
|
if (PyArray_DESCR(bmask)->type_num != NPY_BOOL) {
|
1173
|
0
|
PyErr_SetString(PyExc_TypeError,
|
1174
|
|
"NumPy boolean array indexing assignment "
|
1175
|
|
"requires a boolean index");
|
1176
|
0
|
return -1;
|
1177
|
|
}
|
1178
|
|
|
1179
|
1
|
if (PyArray_NDIM(v) > 1) {
|
1180
|
0
|
PyErr_Format(PyExc_TypeError,
|
1181
|
|
"NumPy boolean array indexing assignment "
|
1182
|
|
"requires a 0 or 1-dimensional input, input "
|
1183
|
|
"has %d dimensions", PyArray_NDIM(v));
|
1184
|
0
|
return -1;
|
1185
|
|
}
|
1186
|
|
|
1187
|
1
|
if (PyArray_NDIM(bmask) != PyArray_NDIM(self)) {
|
1188
|
0
|
PyErr_SetString(PyExc_ValueError,
|
1189
|
|
"The boolean mask assignment indexing array "
|
1190
|
|
"must have the same number of dimensions as "
|
1191
|
|
"the array being indexed");
|
1192
|
0
|
return -1;
|
1193
|
|
}
|
1194
|
|
|
1195
|
1
|
size = count_boolean_trues(PyArray_NDIM(bmask), PyArray_DATA(bmask),
|
1196
|
1
|
PyArray_DIMS(bmask), PyArray_STRIDES(bmask));
|
1197
|
|
/* Correction factor for broadcasting 'bmask' to 'self' */
|
1198
|
1
|
bmask_size = PyArray_SIZE(bmask);
|
1199
|
1
|
if (bmask_size > 0) {
|
1200
|
1
|
size *= PyArray_SIZE(self) / bmask_size;
|
1201
|
|
}
|
1202
|
|
|
1203
|
|
/* Tweak the strides for 0-dim and broadcasting cases */
|
1204
|
1
|
if (PyArray_NDIM(v) > 0 && PyArray_DIMS(v)[0] != 1) {
|
1205
|
1
|
if (size != PyArray_DIMS(v)[0]) {
|
1206
|
1
|
PyErr_Format(PyExc_ValueError,
|
1207
|
|
"NumPy boolean array indexing assignment "
|
1208
|
|
"cannot assign %" NPY_INTP_FMT " input values to "
|
1209
|
|
"the %" NPY_INTP_FMT " output values where the mask is true",
|
1210
|
1
|
PyArray_DIMS(v)[0], size);
|
1211
|
1
|
return -1;
|
1212
|
|
}
|
1213
|
1
|
v_stride = PyArray_STRIDES(v)[0];
|
1214
|
|
}
|
1215
|
|
else {
|
1216
|
|
v_stride = 0;
|
1217
|
|
}
|
1218
|
|
|
1219
|
1
|
src_itemsize = PyArray_DESCR(v)->elsize;
|
1220
|
1
|
v_data = PyArray_DATA(v);
|
1221
|
|
|
1222
|
|
/* Create an iterator for the data */
|
1223
|
1
|
int res = 0;
|
1224
|
1
|
if (size > 0) {
|
1225
|
|
NpyIter *iter;
|
1226
|
1
|
PyArrayObject *op[2] = {self, bmask};
|
1227
|
|
npy_uint32 flags, op_flags[2];
|
1228
|
|
npy_intp fixed_strides[3];
|
1229
|
|
|
1230
|
|
NpyIter_IterNextFunc *iternext;
|
1231
|
|
npy_intp innersize, *innerstrides;
|
1232
|
|
char **dataptrs;
|
1233
|
|
|
1234
|
1
|
PyArray_StridedUnaryOp *stransfer = NULL;
|
1235
|
1
|
NpyAuxData *transferdata = NULL;
|
1236
|
|
npy_intp self_stride, bmask_stride, subloopsize;
|
1237
|
|
char *self_data;
|
1238
|
|
char *bmask_data;
|
1239
|
1
|
NPY_BEGIN_THREADS_DEF;
|
1240
|
|
|
1241
|
|
/* Set up the iterator */
|
1242
|
1
|
flags = NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK;
|
1243
|
1
|
op_flags[0] = NPY_ITER_WRITEONLY | NPY_ITER_NO_BROADCAST;
|
1244
|
1
|
op_flags[1] = NPY_ITER_READONLY;
|
1245
|
|
|
1246
|
1
|
iter = NpyIter_MultiNew(2, op, flags, order, NPY_NO_CASTING,
|
1247
|
|
op_flags, NULL);
|
1248
|
1
|
if (iter == NULL) {
|
1249
|
0
|
return -1;
|
1250
|
|
}
|
1251
|
|
|
1252
|
|
/* Get the values needed for the inner loop */
|
1253
|
1
|
iternext = NpyIter_GetIterNext(iter, NULL);
|
1254
|
1
|
if (iternext == NULL) {
|
1255
|
0
|
NpyIter_Deallocate(iter);
|
1256
|
0
|
return -1;
|
1257
|
|
}
|
1258
|
|
|
1259
|
1
|
innerstrides = NpyIter_GetInnerStrideArray(iter);
|
1260
|
1
|
dataptrs = NpyIter_GetDataPtrArray(iter);
|
1261
|
|
|
1262
|
1
|
self_stride = innerstrides[0];
|
1263
|
1
|
bmask_stride = innerstrides[1];
|
1264
|
|
|
1265
|
|
/* Get a dtype transfer function */
|
1266
|
1
|
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
|
1267
|
1
|
if (PyArray_GetDTypeTransferFunction(
|
1268
|
1
|
IsUintAligned(self) && IsAligned(self) &&
|
1269
|
1
|
IsUintAligned(v) && IsAligned(v),
|
1270
|
|
v_stride, fixed_strides[0],
|
1271
|
|
PyArray_DESCR(v), PyArray_DESCR(self),
|
1272
|
|
0,
|
1273
|
|
&stransfer, &transferdata,
|
1274
|
|
&needs_api) != NPY_SUCCEED) {
|
1275
|
0
|
NpyIter_Deallocate(iter);
|
1276
|
0
|
return -1;
|
1277
|
|
}
|
1278
|
|
|
1279
|
1
|
if (!needs_api) {
|
1280
|
1
|
NPY_BEGIN_THREADS_NDITER(iter);
|
1281
|
|
}
|
1282
|
|
|
1283
|
|
do {
|
1284
|
1
|
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
|
1285
|
1
|
self_data = dataptrs[0];
|
1286
|
1
|
bmask_data = dataptrs[1];
|
1287
|
|
|
1288
|
1
|
while (innersize > 0) {
|
1289
|
|
/* Skip masked values */
|
1290
|
1
|
bmask_data = npy_memchr(bmask_data, 0, bmask_stride,
|
1291
|
|
innersize, &subloopsize, 1);
|
1292
|
1
|
innersize -= subloopsize;
|
1293
|
1
|
self_data += subloopsize * self_stride;
|
1294
|
|
/* Process unmasked values */
|
1295
|
1
|
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
|
1296
|
|
&subloopsize, 0);
|
1297
|
1
|
res = stransfer(self_data, self_stride, v_data, v_stride,
|
1298
|
|
subloopsize, src_itemsize, transferdata);
|
1299
|
1
|
if (res < 0) {
|
1300
|
|
break;
|
1301
|
|
}
|
1302
|
1
|
innersize -= subloopsize;
|
1303
|
1
|
self_data += subloopsize * self_stride;
|
1304
|
1
|
v_data += subloopsize * v_stride;
|
1305
|
|
}
|
1306
|
1
|
} while (iternext(iter));
|
1307
|
|
|
1308
|
1
|
if (!needs_api) {
|
1309
|
1
|
NPY_END_THREADS;
|
1310
|
|
}
|
1311
|
|
|
1312
|
1
|
NPY_AUXDATA_FREE(transferdata);
|
1313
|
1
|
if (!NpyIter_Deallocate(iter)) {
|
1314
|
0
|
res = -1;
|
1315
|
|
}
|
1316
|
|
}
|
1317
|
|
|
1318
|
|
return res;
|
1319
|
|
}
|
1320
|
|
|
1321
|
|
|
1322
|
|
/*
|
1323
|
|
* C-level integer indexing always returning an array and never a scalar.
|
1324
|
|
* Works also for subclasses, but it will not be called on one from the
|
1325
|
|
* Python API.
|
1326
|
|
*
|
1327
|
|
* This function does not accept negative indices because it is called by
|
1328
|
|
* PySequence_GetItem (through array_item) and that converts them to
|
1329
|
|
* positive indices.
|
1330
|
|
*/
|
1331
|
|
NPY_NO_EXPORT PyObject *
|
1332
|
1
|
array_item_asarray(PyArrayObject *self, npy_intp i)
|
1333
|
|
{
|
1334
|
|
npy_index_info indices[2];
|
1335
|
|
PyObject *result;
|
1336
|
|
|
1337
|
1
|
if (PyArray_NDIM(self) == 0) {
|
1338
|
1
|
PyErr_SetString(PyExc_IndexError,
|
1339
|
|
"too many indices for array");
|
1340
|
1
|
return NULL;
|
1341
|
|
}
|
1342
|
1
|
if (i < 0) {
|
1343
|
|
/* This is an error, but undo PySequence_GetItem fix for message */
|
1344
|
1
|
i -= PyArray_DIM(self, 0);
|
1345
|
|
}
|
1346
|
|
|
1347
|
1
|
indices[0].value = i;
|
1348
|
1
|
indices[0].type = HAS_INTEGER;
|
1349
|
1
|
indices[1].value = PyArray_NDIM(self) - 1;
|
1350
|
1
|
indices[1].type = HAS_ELLIPSIS;
|
1351
|
1
|
if (get_view_from_index(self, (PyArrayObject **)&result,
|
1352
|
|
indices, 2, 0) < 0) {
|
1353
|
|
return NULL;
|
1354
|
|
}
|
1355
|
1
|
return result;
|
1356
|
|
}
|
1357
|
|
|
1358
|
|
|
1359
|
|
/*
|
1360
|
|
* Python C-Api level item subscription (implementation for PySequence_GetItem)
|
1361
|
|
*
|
1362
|
|
* Negative indices are not accepted because PySequence_GetItem converts
|
1363
|
|
* them to positive indices before calling this.
|
1364
|
|
*/
|
1365
|
|
NPY_NO_EXPORT PyObject *
|
1366
|
1
|
array_item(PyArrayObject *self, Py_ssize_t i)
|
1367
|
|
{
|
1368
|
1
|
if (PyArray_NDIM(self) == 1) {
|
1369
|
|
char *item;
|
1370
|
|
npy_index_info index;
|
1371
|
|
|
1372
|
1
|
if (i < 0) {
|
1373
|
|
/* This is an error, but undo PySequence_GetItem fix for message */
|
1374
|
1
|
i -= PyArray_DIM(self, 0);
|
1375
|
|
}
|
1376
|
|
|
1377
|
1
|
index.value = i;
|
1378
|
1
|
index.type = HAS_INTEGER;
|
1379
|
1
|
if (get_item_pointer(self, &item, &index, 1) < 0) {
|
1380
|
|
return NULL;
|
1381
|
|
}
|
1382
|
1
|
return PyArray_Scalar(item, PyArray_DESCR(self), (PyObject *)self);
|
1383
|
|
}
|
1384
|
|
else {
|
1385
|
1
|
return array_item_asarray(self, i);
|
1386
|
|
}
|
1387
|
|
}
|
1388
|
|
|
1389
|
|
|
1390
|
|
/* make sure subscript always returns an array object */
|
1391
|
|
NPY_NO_EXPORT PyObject *
|
1392
|
1
|
array_subscript_asarray(PyArrayObject *self, PyObject *op)
|
1393
|
|
{
|
1394
|
1
|
return PyArray_EnsureAnyArray(array_subscript(self, op));
|
1395
|
|
}
|
1396
|
|
|
1397
|
|
/*
|
1398
|
|
* Attempts to subscript an array using a field name or list of field names.
|
1399
|
|
*
|
1400
|
|
* ret = 0, view != NULL: view points to the requested fields of arr
|
1401
|
|
* ret = 0, view == NULL: an error occurred
|
1402
|
|
* ret = -1, view == NULL: unrecognized input, this is not a field index.
|
1403
|
|
*/
|
1404
|
|
NPY_NO_EXPORT int
|
1405
|
1
|
_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
|
1406
|
|
{
|
1407
|
1
|
*view = NULL;
|
1408
|
|
|
1409
|
|
/* first check for a single field name */
|
1410
|
1
|
if (PyUnicode_Check(ind)) {
|
1411
|
|
PyObject *tup;
|
1412
|
|
PyArray_Descr *fieldtype;
|
1413
|
|
npy_intp offset;
|
1414
|
|
|
1415
|
|
/* get the field offset and dtype */
|
1416
|
1
|
tup = PyDict_GetItemWithError(PyArray_DESCR(arr)->fields, ind);
|
1417
|
1
|
if (tup == NULL && PyErr_Occurred()) {
|
1418
|
|
return 0;
|
1419
|
|
}
|
1420
|
1
|
else if (tup == NULL){
|
1421
|
1
|
PyObject *errmsg = PyUnicode_FromString("no field of name ");
|
1422
|
1
|
PyUString_Concat(&errmsg, ind);
|
1423
|
1
|
PyErr_SetObject(PyExc_ValueError, errmsg);
|
1424
|
1
|
Py_DECREF(errmsg);
|
1425
|
|
return 0;
|
1426
|
|
}
|
1427
|
1
|
if (_unpack_field(tup, &fieldtype, &offset) < 0) {
|
1428
|
|
return 0;
|
1429
|
|
}
|
1430
|
|
|
1431
|
|
/* view the array at the new offset+dtype */
|
1432
|
1
|
Py_INCREF(fieldtype);
|
1433
|
1
|
*view = (PyArrayObject*)PyArray_NewFromDescr_int(
|
1434
|
1
|
Py_TYPE(arr),
|
1435
|
|
fieldtype,
|
1436
|
|
PyArray_NDIM(arr),
|
1437
|
1
|
PyArray_SHAPE(arr),
|
1438
|
1
|
PyArray_STRIDES(arr),
|
1439
|
1
|
PyArray_BYTES(arr) + offset,
|
1440
|
|
PyArray_FLAGS(arr),
|
1441
|
|
(PyObject *)arr, (PyObject *)arr,
|
1442
|
|
0, 1);
|
1443
|
|
if (*view == NULL) {
|
1444
|
|
return 0;
|
1445
|
|
}
|
1446
|
|
return 0;
|
1447
|
|
}
|
1448
|
|
|
1449
|
|
/* next check for a list of field names */
|
1450
|
1
|
else if (PySequence_Check(ind) && !PyTuple_Check(ind)) {
|
1451
|
|
npy_intp seqlen, i;
|
1452
|
|
PyArray_Descr *view_dtype;
|
1453
|
|
|
1454
|
1
|
seqlen = PySequence_Size(ind);
|
1455
|
|
|
1456
|
|
/* quit if have a fake sequence-like, which errors on len()*/
|
1457
|
1
|
if (seqlen == -1) {
|
1458
|
0
|
PyErr_Clear();
|
1459
|
0
|
return -1;
|
1460
|
|
}
|
1461
|
|
/* 0-len list is handled elsewhere as an integer index */
|
1462
|
1
|
if (seqlen == 0) {
|
1463
|
|
return -1;
|
1464
|
|
}
|
1465
|
|
|
1466
|
|
/* check the items are strings */
|
1467
|
1
|
for (i = 0; i < seqlen; i++) {
|
1468
|
|
npy_bool is_string;
|
1469
|
1
|
PyObject *item = PySequence_GetItem(ind, i);
|
1470
|
1
|
if (item == NULL) {
|
1471
|
1
|
PyErr_Clear();
|
1472
|
1
|
return -1;
|
1473
|
|
}
|
1474
|
1
|
is_string = PyUnicode_Check(item);
|
1475
|
1
|
Py_DECREF(item);
|
1476
|
1
|
if (!is_string) {
|
1477
|
|
return -1;
|
1478
|
|
}
|
1479
|
|
}
|
1480
|
|
|
1481
|
|
/* Call into the dtype subscript */
|
1482
|
1
|
view_dtype = arraydescr_field_subset_view(PyArray_DESCR(arr), ind);
|
1483
|
1
|
if (view_dtype == NULL) {
|
1484
|
|
return 0;
|
1485
|
|
}
|
1486
|
|
|
1487
|
1
|
*view = (PyArrayObject*)PyArray_NewFromDescr_int(
|
1488
|
1
|
Py_TYPE(arr),
|
1489
|
|
view_dtype,
|
1490
|
|
PyArray_NDIM(arr),
|
1491
|
1
|
PyArray_SHAPE(arr),
|
1492
|
|
PyArray_STRIDES(arr),
|
1493
|
|
PyArray_DATA(arr),
|
1494
|
|
PyArray_FLAGS(arr),
|
1495
|
|
(PyObject *)arr, (PyObject *)arr,
|
1496
|
|
0, 1);
|
1497
|
|
|
1498
|
|
if (*view == NULL) {
|
1499
|
|
return 0;
|
1500
|
|
}
|
1501
|
|
|
1502
|
|
return 0;
|
1503
|
|
}
|
1504
|
|
return -1;
|
1505
|
|
}
|
1506
|
|
|
1507
|
|
/*
|
1508
|
|
* General function for indexing a NumPy array with a Python object.
|
1509
|
|
*/
|
1510
|
|
NPY_NO_EXPORT PyObject *
|
1511
|
1
|
array_subscript(PyArrayObject *self, PyObject *op)
|
1512
|
|
{
|
1513
|
|
int index_type;
|
1514
|
|
int index_num;
|
1515
|
|
int i, ndim, fancy_ndim;
|
1516
|
|
/*
|
1517
|
|
* Index info array. We can have twice as many indices as dimensions
|
1518
|
|
* (because of None). The + 1 is to not need to check as much.
|
1519
|
|
*/
|
1520
|
|
npy_index_info indices[NPY_MAXDIMS * 2 + 1];
|
1521
|
|
|
1522
|
1
|
PyArrayObject *view = NULL;
|
1523
|
1
|
PyObject *result = NULL;
|
1524
|
|
|
1525
|
1
|
PyArrayMapIterObject * mit = NULL;
|
1526
|
|
|
1527
|
|
/* return fields if op is a string index */
|
1528
|
1
|
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
|
1529
|
|
PyArrayObject *view;
|
1530
|
1
|
int ret = _get_field_view(self, op, &view);
|
1531
|
1
|
if (ret == 0){
|
1532
|
1
|
if (view == NULL) {
|
1533
|
1
|
return NULL;
|
1534
|
|
}
|
1535
|
1
|
return (PyObject*)view;
|
1536
|
|
}
|
1537
|
|
}
|
1538
|
|
|
1539
|
|
/* Prepare the indices */
|
1540
|
1
|
index_type = prepare_index(self, op, indices, &index_num,
|
1541
|
|
&ndim, &fancy_ndim, 1);
|
1542
|
|
|
1543
|
1
|
if (index_type < 0) {
|
1544
|
|
return NULL;
|
1545
|
|
}
|
1546
|
|
|
1547
|
|
/* Full integer index */
|
1548
|
1
|
else if (index_type == HAS_INTEGER) {
|
1549
|
|
char *item;
|
1550
|
1
|
if (get_item_pointer(self, &item, indices, index_num) < 0) {
|
1551
|
|
goto finish;
|
1552
|
|
}
|
1553
|
1
|
result = (PyObject *) PyArray_Scalar(item, PyArray_DESCR(self),
|
1554
|
|
(PyObject *)self);
|
1555
|
|
/* Because the index is full integer, we do not need to decref */
|
1556
|
1
|
return result;
|
1557
|
|
}
|
1558
|
|
|
1559
|
|
/* Single boolean array */
|
1560
|
1
|
else if (index_type == HAS_BOOL) {
|
1561
|
1
|
result = (PyObject *)array_boolean_subscript(self,
|
1562
|
1
|
(PyArrayObject *)indices[0].object,
|
1563
|
|
NPY_CORDER);
|
1564
|
|
goto finish;
|
1565
|
|
}
|
1566
|
|
|
1567
|
|
/* If it is only a single ellipsis, just return a view */
|
1568
|
1
|
else if (index_type == HAS_ELLIPSIS) {
|
1569
|
|
/*
|
1570
|
|
* TODO: Should this be a view or not? The only reason not would be
|
1571
|
|
* optimization (i.e. of array[...] += 1) I think.
|
1572
|
|
* Before, it was just self for a single ellipsis.
|
1573
|
|
*/
|
1574
|
1
|
result = PyArray_View(self, NULL, NULL);
|
1575
|
|
/* A single ellipsis, so no need to decref */
|
1576
|
1
|
return result;
|
1577
|
|
}
|
1578
|
|
|
1579
|
|
/*
|
1580
|
|
* View based indexing.
|
1581
|
|
* There are two cases here. First we need to create a simple view,
|
1582
|
|
* second we need to create a (possibly invalid) view for the
|
1583
|
|
* subspace to the fancy index. This procedure is identical.
|
1584
|
|
*/
|
1585
|
|
|
1586
|
1
|
else if (index_type & (HAS_SLICE | HAS_NEWAXIS |
|
1587
|
|
HAS_ELLIPSIS | HAS_INTEGER)) {
|
1588
|
1
|
if (get_view_from_index(self, &view, indices, index_num,
|
1589
|
|
(index_type & HAS_FANCY)) < 0) {
|
1590
|
|
goto finish;
|
1591
|
|
}
|
1592
|
|
|
1593
|
|
/*
|
1594
|
|
* There is a scalar array, so we need to force a copy to simulate
|
1595
|
|
* fancy indexing.
|
1596
|
|
*/
|
1597
|
1
|
if (index_type & HAS_SCALAR_ARRAY) {
|
1598
|
1
|
result = PyArray_NewCopy(view, NPY_KEEPORDER);
|
1599
|
|
goto finish;
|
1600
|
|
}
|
1601
|
|
}
|
1602
|
|
|
1603
|
|
/* If there is no fancy indexing, we have the result */
|
1604
|
1
|
if (!(index_type & HAS_FANCY)) {
|
1605
|
1
|
result = (PyObject *)view;
|
1606
|
1
|
Py_INCREF(result);
|
1607
|
|
goto finish;
|
1608
|
|
}
|
1609
|
|
|
1610
|
|
/*
|
1611
|
|
* Special case for very simple 1-d fancy indexing, which however
|
1612
|
|
* is quite common. This saves not only a lot of setup time in the
|
1613
|
|
* iterator, but also is faster (must be exactly fancy because
|
1614
|
|
* we don't support 0-d booleans here)
|
1615
|
|
*/
|
1616
|
1
|
if (index_type == HAS_FANCY && index_num == 1) {
|
1617
|
|
/* The array being indexed has one dimension and it is a fancy index */
|
1618
|
1
|
PyArrayObject *ind = (PyArrayObject*)indices[0].object;
|
1619
|
|
|
1620
|
|
/* Check if the index is simple enough */
|
1621
|
1
|
if (PyArray_TRIVIALLY_ITERABLE(ind) &&
|
1622
|
|
/* Check if the type is equivalent to INTP */
|
1623
|
1
|
PyArray_ITEMSIZE(ind) == sizeof(npy_intp) &&
|
1624
|
1
|
PyArray_DESCR(ind)->kind == 'i' &&
|
1625
|
1
|
IsUintAligned(ind) &&
|
1626
|
1
|
PyDataType_ISNOTSWAPPED(PyArray_DESCR(ind))) {
|
1627
|
|
|
1628
|
1
|
Py_INCREF(PyArray_DESCR(self));
|
1629
|
1
|
result = PyArray_NewFromDescr(&PyArray_Type,
|
1630
|
|
PyArray_DESCR(self),
|
1631
|
|
PyArray_NDIM(ind),
|
1632
|
1
|
PyArray_SHAPE(ind),
|
1633
|
|
NULL, NULL,
|
1634
|
|
/* Same order as indices */
|
1635
|
1
|
PyArray_ISFORTRAN(ind) ?
|
1636
|
|
NPY_ARRAY_F_CONTIGUOUS : 0,
|
1637
|
|
NULL);
|
1638
|
1
|
if (result == NULL) {
|
1639
|
|
goto finish;
|
1640
|
|
}
|
1641
|
|
|
1642
|
1
|
if (mapiter_trivial_get(self, ind, (PyArrayObject *)result) < 0) {
|
1643
|
1
|
Py_DECREF(result);
|
1644
|
1
|
result = NULL;
|
1645
|
|
goto finish;
|
1646
|
|
}
|
1647
|
|
|
1648
|
|
goto wrap_out_array;
|
1649
|
|
}
|
1650
|
|
}
|
1651
|
|
|
1652
|
|
/* fancy indexing has to be used. And view is the subspace. */
|
1653
|
1
|
mit = (PyArrayMapIterObject *)PyArray_MapIterNew(indices, index_num,
|
1654
|
|
index_type,
|
1655
|
|
ndim, fancy_ndim,
|
1656
|
|
self, view, 0,
|
1657
|
|
NPY_ITER_READONLY,
|
1658
|
|
NPY_ITER_WRITEONLY,
|
1659
|
|
NULL, PyArray_DESCR(self));
|
1660
|
1
|
if (mit == NULL) {
|
1661
|
|
goto finish;
|
1662
|
|
}
|
1663
|
|
|
1664
|
1
|
if (mit->numiter > 1 || mit->size == 0) {
|
1665
|
|
/*
|
1666
|
|
* If it is one, the inner loop checks indices, otherwise
|
1667
|
|
* check indices beforehand, because it is much faster if
|
1668
|
|
* broadcasting occurs and most likely no big overhead.
|
1669
|
|
* The inner loop optimization skips index checks for size == 0 though.
|
1670
|
|
*/
|
1671
|
1
|
if (PyArray_MapIterCheckIndices(mit) < 0) {
|
1672
|
|
goto finish;
|
1673
|
|
}
|
1674
|
|
}
|
1675
|
|
|
1676
|
|
/* Reset the outer iterator */
|
1677
|
1
|
if (NpyIter_Reset(mit->outer, NULL) < 0) {
|
1678
|
|
goto finish;
|
1679
|
|
}
|
1680
|
|
|
1681
|
1
|
if (mapiter_get(mit) < 0) {
|
1682
|
|
goto finish;
|
1683
|
|
}
|
1684
|
|
|
1685
|
1
|
result = (PyObject *)mit->extra_op;
|
1686
|
1
|
Py_INCREF(result);
|
1687
|
|
|
1688
|
1
|
if (mit->consec) {
|
1689
|
1
|
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&result, 1);
|
1690
|
|
}
|
1691
|
|
|
1692
|
1
|
wrap_out_array:
|
1693
|
1
|
if (!PyArray_CheckExact(self)) {
|
1694
|
|
/*
|
1695
|
|
* Need to create a new array as if the old one never existed.
|
1696
|
|
*/
|
1697
|
1
|
PyArrayObject *tmp_arr = (PyArrayObject *)result;
|
1698
|
|
|
1699
|
1
|
Py_INCREF(PyArray_DESCR(tmp_arr));
|
1700
|
1
|
result = PyArray_NewFromDescrAndBase(
|
1701
|
|
Py_TYPE(self),
|
1702
|
|
PyArray_DESCR(tmp_arr),
|
1703
|
|
PyArray_NDIM(tmp_arr),
|
1704
|
1
|
PyArray_SHAPE(tmp_arr),
|
1705
|
1
|
PyArray_STRIDES(tmp_arr),
|
1706
|
1
|
PyArray_BYTES(tmp_arr),
|
1707
|
|
PyArray_FLAGS(tmp_arr),
|
1708
|
|
(PyObject *)self, (PyObject *)tmp_arr);
|
1709
|
1
|
Py_DECREF(tmp_arr);
|
1710
|
|
if (result == NULL) {
|
1711
|
|
goto finish;
|
1712
|
|
}
|
1713
|
|
}
|
1714
|
|
|
1715
|
1
|
finish:
|
1716
|
1
|
Py_XDECREF(mit);
|
1717
|
1
|
Py_XDECREF(view);
|
1718
|
|
/* Clean up indices */
|
1719
|
1
|
for (i=0; i < index_num; i++) {
|
1720
|
1
|
Py_XDECREF(indices[i].object);
|
1721
|
|
}
|
1722
|
1
|
return result;
|
1723
|
|
}
|
1724
|
|
|
1725
|
|
|
1726
|
|
/*
|
1727
|
|
* Python C-Api level item assignment (implementation for PySequence_SetItem)
|
1728
|
|
*
|
1729
|
|
* Negative indices are not accepted because PySequence_SetItem converts
|
1730
|
|
* them to positive indices before calling this.
|
1731
|
|
*/
|
1732
|
|
NPY_NO_EXPORT int
|
1733
|
1
|
array_assign_item(PyArrayObject *self, Py_ssize_t i, PyObject *op)
|
1734
|
|
{
|
1735
|
|
npy_index_info indices[2];
|
1736
|
|
|
1737
|
1
|
if (op == NULL) {
|
1738
|
1
|
PyErr_SetString(PyExc_ValueError,
|
1739
|
|
"cannot delete array elements");
|
1740
|
1
|
return -1;
|
1741
|
|
}
|
1742
|
1
|
if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
|
1743
|
|
return -1;
|
1744
|
|
}
|
1745
|
1
|
if (PyArray_NDIM(self) == 0) {
|
1746
|
1
|
PyErr_SetString(PyExc_IndexError,
|
1747
|
|
"too many indices for array");
|
1748
|
1
|
return -1;
|
1749
|
|
}
|
1750
|
|
|
1751
|
1
|
if (i < 0) {
|
1752
|
|
/* This is an error, but undo PySequence_SetItem fix for message */
|
1753
|
1
|
i -= PyArray_DIM(self, 0);
|
1754
|
|
}
|
1755
|
|
|
1756
|
1
|
indices[0].value = i;
|
1757
|
1
|
indices[0].type = HAS_INTEGER;
|
1758
|
1
|
if (PyArray_NDIM(self) == 1) {
|
1759
|
|
char *item;
|
1760
|
1
|
if (get_item_pointer(self, &item, indices, 1) < 0) {
|
1761
|
1
|
return -1;
|
1762
|
|
}
|
1763
|
1
|
if (PyArray_Pack(PyArray_DESCR(self), item, op) < 0) {
|
1764
|
|
return -1;
|
1765
|
|
}
|
1766
|
|
}
|
1767
|
|
else {
|
1768
|
|
PyArrayObject *view;
|
1769
|
|
|
1770
|
1
|
indices[1].value = PyArray_NDIM(self) - 1;
|
1771
|
1
|
indices[1].type = HAS_ELLIPSIS;
|
1772
|
1
|
if (get_view_from_index(self, &view, indices, 2, 0) < 0) {
|
1773
|
1
|
return -1;
|
1774
|
|
}
|
1775
|
1
|
if (PyArray_CopyObject(view, op) < 0) {
|
1776
|
0
|
Py_DECREF(view);
|
1777
|
|
return -1;
|
1778
|
|
}
|
1779
|
1
|
Py_DECREF(view);
|
1780
|
|
}
|
1781
|
|
return 0;
|
1782
|
|
}
|
1783
|
|
|
1784
|
|
|
1785
|
|
/*
|
1786
|
|
* General assignment with python indexing objects.
|
1787
|
|
*/
|
1788
|
|
static int
|
1789
|
1
|
array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
|
1790
|
|
{
|
1791
|
|
int index_type;
|
1792
|
|
int index_num;
|
1793
|
|
int i, ndim, fancy_ndim;
|
1794
|
1
|
PyArray_Descr *descr = PyArray_DESCR(self);
|
1795
|
1
|
PyArrayObject *view = NULL;
|
1796
|
1
|
PyArrayObject *tmp_arr = NULL;
|
1797
|
|
npy_index_info indices[NPY_MAXDIMS * 2 + 1];
|
1798
|
|
|
1799
|
1
|
PyArrayMapIterObject *mit = NULL;
|
1800
|
|
|
1801
|
1
|
if (op == NULL) {
|
1802
|
0
|
PyErr_SetString(PyExc_ValueError,
|
1803
|
|
"cannot delete array elements");
|
1804
|
0
|
return -1;
|
1805
|
|
}
|
1806
|
1
|
if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
|
1807
|
|
return -1;
|
1808
|
|
}
|
1809
|
|
|
1810
|
|
/* field access */
|
1811
|
1
|
if (PyDataType_HASFIELDS(PyArray_DESCR(self))){
|
1812
|
|
PyArrayObject *view;
|
1813
|
1
|
int ret = _get_field_view(self, ind, &view);
|
1814
|
1
|
if (ret == 0){
|
1815
|
1
|
if (view == NULL) {
|
1816
|
1
|
return -1;
|
1817
|
|
}
|
1818
|
1
|
if (PyArray_CopyObject(view, op) < 0) {
|
1819
|
0
|
Py_DECREF(view);
|
1820
|
|
return -1;
|
1821
|
|
}
|
1822
|
1
|
Py_DECREF(view);
|
1823
|
|
return 0;
|
1824
|
|
}
|
1825
|
|
}
|
1826
|
|
|
1827
|
|
/* Prepare the indices */
|
1828
|
1
|
index_type = prepare_index(self, ind, indices, &index_num,
|
1829
|
|
&ndim, &fancy_ndim, 1);
|
1830
|
|
|
1831
|
1
|
if (index_type < 0) {
|
1832
|
|
return -1;
|
1833
|
|
}
|
1834
|
|
|
1835
|
|
/* Full integer index */
|
1836
|
1
|
if (index_type == HAS_INTEGER) {
|
1837
|
|
char *item;
|
1838
|
1
|
if (get_item_pointer(self, &item, indices, index_num) < 0) {
|
1839
|
|
return -1;
|
1840
|
|
}
|
1841
|
1
|
if (PyArray_Pack(PyArray_DESCR(self), item, op) < 0) {
|
1842
|
|
return -1;
|
1843
|
|
}
|
1844
|
|
/* integers do not store objects in indices */
|
1845
|
1
|
return 0;
|
1846
|
|
}
|
1847
|
|
|
1848
|
|
/* Single boolean array */
|
1849
|
1
|
if (index_type == HAS_BOOL) {
|
1850
|
1
|
if (!PyArray_Check(op)) {
|
1851
|
1
|
Py_INCREF(PyArray_DESCR(self));
|
1852
|
1
|
tmp_arr = (PyArrayObject *)PyArray_FromAny(op,
|
1853
|
|
PyArray_DESCR(self), 0, 0,
|
1854
|
|
NPY_ARRAY_FORCECAST, NULL);
|
1855
|
1
|
if (tmp_arr == NULL) {
|
1856
|
|
goto fail;
|
1857
|
|
}
|
1858
|
|
}
|
1859
|
|
else {
|
1860
|
1
|
Py_INCREF(op);
|
1861
|
1
|
tmp_arr = (PyArrayObject *)op;
|
1862
|
|
}
|
1863
|
|
|
1864
|
1
|
if (array_assign_boolean_subscript(self,
|
1865
|
1
|
(PyArrayObject *)indices[0].object,
|
1866
|
|
tmp_arr, NPY_CORDER) < 0) {
|
1867
|
|
goto fail;
|
1868
|
|
}
|
1869
|
|
goto success;
|
1870
|
|
}
|
1871
|
|
|
1872
|
|
|
1873
|
|
/*
|
1874
|
|
* Single ellipsis index, no need to create a new view.
|
1875
|
|
* Note that here, we do *not* go through self.__getitem__ for subclasses
|
1876
|
|
* (defchar array failed then, due to uninitialized values...)
|
1877
|
|
*/
|
1878
|
1
|
else if (index_type == HAS_ELLIPSIS) {
|
1879
|
1
|
if ((PyObject *)self == op) {
|
1880
|
|
/*
|
1881
|
|
* CopyObject does not handle this case gracefully and
|
1882
|
|
* there is nothing to do. Removing the special case
|
1883
|
|
* will cause segfaults, though it is unclear what exactly
|
1884
|
|
* happens.
|
1885
|
|
*/
|
1886
|
|
return 0;
|
1887
|
|
}
|
1888
|
|
/* we can just use self, but incref for error handling */
|
1889
|
1
|
Py_INCREF((PyObject *)self);
|
1890
|
1
|
view = self;
|
1891
|
|
}
|
1892
|
|
|
1893
|
|
/*
|
1894
|
|
* WARNING: There is a huge special case here. If this is not a
|
1895
|
|
* base class array, we have to get the view through its
|
1896
|
|
* very own index machinery.
|
1897
|
|
* Many subclasses should probably call __setitem__
|
1898
|
|
* with a base class ndarray view to avoid this.
|
1899
|
|
*/
|
1900
|
1
|
else if (!(index_type & (HAS_FANCY | HAS_SCALAR_ARRAY))
|
1901
|
1
|
&& !PyArray_CheckExact(self)) {
|
1902
|
1
|
view = (PyArrayObject *)PyObject_GetItem((PyObject *)self, ind);
|
1903
|
1
|
if (view == NULL) {
|
1904
|
|
goto fail;
|
1905
|
|
}
|
1906
|
1
|
if (!PyArray_Check(view)) {
|
1907
|
0
|
PyErr_SetString(PyExc_RuntimeError,
|
1908
|
|
"Getitem not returning array");
|
1909
|
0
|
goto fail;
|
1910
|
|
}
|
1911
|
|
}
|
1912
|
|
|
1913
|
|
/*
|
1914
|
|
* View based indexing.
|
1915
|
|
* There are two cases here. First we need to create a simple view,
|
1916
|
|
* second we need to create a (possibly invalid) view for the
|
1917
|
|
* subspace to the fancy index. This procedure is identical.
|
1918
|
|
*/
|
1919
|
1
|
else if (index_type & (HAS_SLICE | HAS_NEWAXIS |
|
1920
|
|
HAS_ELLIPSIS | HAS_INTEGER)) {
|
1921
|
1
|
if (get_view_from_index(self, &view, indices, index_num,
|
1922
|
|
(index_type & HAS_FANCY)) < 0) {
|
1923
|
|
goto fail;
|
1924
|
|
}
|
1925
|
|
}
|
1926
|
|
else {
|
1927
|
1
|
view = NULL;
|
1928
|
|
}
|
1929
|
|
|
1930
|
|
/* If there is no fancy indexing, we have the array to assign to */
|
1931
|
1
|
if (!(index_type & HAS_FANCY)) {
|
1932
|
1
|
if (PyArray_CopyObject(view, op) < 0) {
|
1933
|
|
goto fail;
|
1934
|
|
}
|
1935
|
|
goto success;
|
1936
|
|
}
|
1937
|
|
|
1938
|
1
|
if (!PyArray_Check(op)) {
|
1939
|
|
/*
|
1940
|
|
* If the array is of object converting the values to an array
|
1941
|
|
* might not be legal even though normal assignment works.
|
1942
|
|
* So allocate a temporary array of the right size and use the
|
1943
|
|
* normal assignment to handle this case.
|
1944
|
|
*/
|
1945
|
1
|
if (PyDataType_REFCHK(descr) && PySequence_Check(op)) {
|
1946
|
1
|
tmp_arr = NULL;
|
1947
|
|
}
|
1948
|
|
else {
|
1949
|
|
/* There is nothing fancy possible, so just make an array */
|
1950
|
1
|
Py_INCREF(descr);
|
1951
|
1
|
tmp_arr = (PyArrayObject *)PyArray_FromAny(op, descr, 0, 0,
|
1952
|
|
NPY_ARRAY_FORCECAST, NULL);
|
1953
|
1
|
if (tmp_arr == NULL) {
|
1954
|
|
goto fail;
|
1955
|
|
}
|
1956
|
|
}
|
1957
|
|
}
|
1958
|
|
else {
|
1959
|
1
|
Py_INCREF(op);
|
1960
|
1
|
tmp_arr = (PyArrayObject *)op;
|
1961
|
|
}
|
1962
|
|
|
1963
|
|
/*
|
1964
|
|
* Special case for very simple 1-d fancy indexing, which however
|
1965
|
|
* is quite common. This saves not only a lot of setup time in the
|
1966
|
|
* iterator, but also is faster (must be exactly fancy because
|
1967
|
|
* we don't support 0-d booleans here)
|
1968
|
|
*/
|
1969
|
1
|
if (index_type == HAS_FANCY &&
|
1970
|
1
|
index_num == 1 && tmp_arr) {
|
1971
|
|
/* The array being indexed has one dimension and it is a fancy index */
|
1972
|
1
|
PyArrayObject *ind = (PyArrayObject*)indices[0].object;
|
1973
|
|
|
1974
|
|
/* Check if the type is equivalent */
|
1975
|
1
|
if (PyArray_EquivTypes(PyArray_DESCR(self),
|
1976
|
1
|
PyArray_DESCR(tmp_arr)) &&
|
1977
|
|
/*
|
1978
|
|
* Either they are equivalent, or the values must
|
1979
|
|
* be a scalar
|
1980
|
|
*/
|
1981
|
1
|
(PyArray_EQUIVALENTLY_ITERABLE(ind, tmp_arr,
|
1982
|
|
PyArray_TRIVIALLY_ITERABLE_OP_READ,
|
1983
|
1
|
PyArray_TRIVIALLY_ITERABLE_OP_READ) ||
|
1984
|
1
|
(PyArray_NDIM(tmp_arr) == 0 &&
|
1985
|
1
|
PyArray_TRIVIALLY_ITERABLE(ind))) &&
|
1986
|
|
/* Check if the type is equivalent to INTP */
|
1987
|
1
|
PyArray_ITEMSIZE(ind) == sizeof(npy_intp) &&
|
1988
|
1
|
PyArray_DESCR(ind)->kind == 'i' &&
|
1989
|
1
|
IsUintAligned(ind) &&
|
1990
|
1
|
PyDataType_ISNOTSWAPPED(PyArray_DESCR(ind))) {
|
1991
|
|
|
1992
|
|
/* trivial_set checks the index for us */
|
1993
|
1
|
if (mapiter_trivial_set(self, ind, tmp_arr) < 0) {
|
1994
|
|
goto fail;
|
1995
|
|
}
|
1996
|
|
goto success;
|
1997
|
|
}
|
1998
|
|
}
|
1999
|
|
|
2000
|
|
/*
|
2001
|
|
* NOTE: If tmp_arr was not allocated yet, mit should
|
2002
|
|
* handle the allocation.
|
2003
|
|
* The NPY_ITER_READWRITE is necessary for automatic
|
2004
|
|
* allocation. Readwrite would not allow broadcasting
|
2005
|
|
* correctly, but such an operand always has the full
|
2006
|
|
* size anyway.
|
2007
|
|
*/
|
2008
|
1
|
mit = (PyArrayMapIterObject *)PyArray_MapIterNew(indices,
|
2009
|
|
index_num, index_type,
|
2010
|
|
ndim, fancy_ndim, self,
|
2011
|
|
view, 0,
|
2012
|
|
NPY_ITER_WRITEONLY,
|
2013
|
|
((tmp_arr == NULL) ?
|
2014
|
|
NPY_ITER_READWRITE :
|
2015
|
|
NPY_ITER_READONLY),
|
2016
|
|
tmp_arr, descr);
|
2017
|
|
|
2018
|
1
|
if (mit == NULL) {
|
2019
|
|
goto fail;
|
2020
|
|
}
|
2021
|
|
|
2022
|
1
|
if (tmp_arr == NULL) {
|
2023
|
|
/* Fill extra op, need to swap first */
|
2024
|
1
|
tmp_arr = mit->extra_op;
|
2025
|
1
|
Py_INCREF(tmp_arr);
|
2026
|
1
|
if (mit->consec) {
|
2027
|
1
|
PyArray_MapIterSwapAxes(mit, &tmp_arr, 1);
|
2028
|
1
|
if (tmp_arr == NULL) {
|
2029
|
|
goto fail;
|
2030
|
|
}
|
2031
|
|
}
|
2032
|
1
|
if (PyArray_CopyObject(tmp_arr, op) < 0) {
|
2033
|
|
goto fail;
|
2034
|
|
}
|
2035
|
|
}
|
2036
|
|
|
2037
|
|
/* Can now reset the outer iterator (delayed bufalloc) */
|
2038
|
1
|
if (NpyIter_Reset(mit->outer, NULL) < 0) {
|
2039
|
|
goto fail;
|
2040
|
|
}
|
2041
|
|
|
2042
|
1
|
if (PyArray_MapIterCheckIndices(mit) < 0) {
|
2043
|
|
goto fail;
|
2044
|
|
}
|
2045
|
|
|
2046
|
|
/*
|
2047
|
|
* Could add a casting check, but apparently most assignments do
|
2048
|
|
* not care about safe casting.
|
2049
|
|
*/
|
2050
|
|
|
2051
|
1
|
if (mapiter_set(mit) < 0) {
|
2052
|
|
goto fail;
|
2053
|
|
}
|
2054
|
|
|
2055
|
1
|
Py_DECREF(mit);
|
2056
|
|
goto success;
|
2057
|
|
|
2058
|
|
/* Clean up temporary variables and indices */
|
2059
|
1
|
fail:
|
2060
|
1
|
Py_XDECREF((PyObject *)view);
|
2061
|
1
|
Py_XDECREF((PyObject *)tmp_arr);
|
2062
|
1
|
Py_XDECREF((PyObject *)mit);
|
2063
|
1
|
for (i=0; i < index_num; i++) {
|
2064
|
1
|
Py_XDECREF(indices[i].object);
|
2065
|
|
}
|
2066
|
|
return -1;
|
2067
|
|
|
2068
|
1
|
success:
|
2069
|
1
|
Py_XDECREF((PyObject *)view);
|
2070
|
1
|
Py_XDECREF((PyObject *)tmp_arr);
|
2071
|
1
|
for (i=0; i < index_num; i++) {
|
2072
|
1
|
Py_XDECREF(indices[i].object);
|
2073
|
|
}
|
2074
|
|
return 0;
|
2075
|
|
}
|
2076
|
|
|
2077
|
|
|
2078
|
|
NPY_NO_EXPORT PyMappingMethods array_as_mapping = {
|
2079
|
|
(lenfunc)array_length, /*mp_length*/
|
2080
|
|
(binaryfunc)array_subscript, /*mp_subscript*/
|
2081
|
|
(objobjargproc)array_assign_subscript, /*mp_ass_subscript*/
|
2082
|
|
};
|
2083
|
|
|
2084
|
|
/****************** End of Mapping Protocol ******************************/
|
2085
|
|
|
2086
|
|
/*********************** Subscript Array Iterator *************************
|
2087
|
|
* *
|
2088
|
|
* This object handles subscript behavior for array objects. *
|
2089
|
|
* It is an iterator object with a next method *
|
2090
|
|
* It abstracts the n-dimensional mapping behavior to make the looping *
|
2091
|
|
* code more understandable (maybe) *
|
2092
|
|
* and so that indexing can be set up ahead of time *
|
2093
|
|
*/
|
2094
|
|
|
2095
|
|
/*
|
2096
|
|
* This function takes a Boolean array and constructs index objects and
|
2097
|
|
* iterators as if nonzero(Bool) had been called
|
2098
|
|
*
|
2099
|
|
* Must not be called on a 0-d array.
|
2100
|
|
*/
|
2101
|
|
static int
|
2102
|
1
|
_nonzero_indices(PyObject *myBool, PyArrayObject **arrays)
|
2103
|
|
{
|
2104
|
|
PyArray_Descr *typecode;
|
2105
|
1
|
PyArrayObject *ba = NULL, *new = NULL;
|
2106
|
|
int nd, j;
|
2107
|
|
npy_intp size, i, count;
|
2108
|
|
npy_bool *ptr;
|
2109
|
|
npy_intp coords[NPY_MAXDIMS], dims_m1[NPY_MAXDIMS];
|
2110
|
|
npy_intp *dptr[NPY_MAXDIMS];
|
2111
|
|
static npy_intp one = 1;
|
2112
|
1
|
NPY_BEGIN_THREADS_DEF;
|
2113
|
|
|
2114
|
1
|
typecode=PyArray_DescrFromType(NPY_BOOL);
|
2115
|
1
|
ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0,
|
2116
|
|
NPY_ARRAY_CARRAY, NULL);
|
2117
|
1
|
if (ba == NULL) {
|
2118
|
|
return -1;
|
2119
|
|
}
|
2120
|
1
|
nd = PyArray_NDIM(ba);
|
2121
|
|
|
2122
|
1
|
for (j = 0; j < nd; j++) {
|
2123
|
1
|
arrays[j] = NULL;
|
2124
|
|
}
|
2125
|
1
|
size = PyArray_SIZE(ba);
|
2126
|
1
|
ptr = (npy_bool *)PyArray_DATA(ba);
|
2127
|
|
|
2128
|
|
/*
|
2129
|
|
* pre-determine how many nonzero entries there are,
|
2130
|
|
* ignore dimensionality of input as its a CARRAY
|
2131
|
|
*/
|
2132
|
1
|
count = count_boolean_trues(1, (char*)ptr, &size, &one);
|
2133
|
|
|
2134
|
|
/* create count-sized index arrays for each dimension */
|
2135
|
1
|
for (j = 0; j < nd; j++) {
|
2136
|
1
|
new = (PyArrayObject *)PyArray_NewFromDescr(
|
2137
|
|
&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
|
2138
|
|
1, &count, NULL, NULL,
|
2139
|
|
0, NULL);
|
2140
|
1
|
if (new == NULL) {
|
2141
|
|
goto fail;
|
2142
|
|
}
|
2143
|
1
|
arrays[j] = new;
|
2144
|
|
|
2145
|
1
|
dptr[j] = (npy_intp *)PyArray_DATA(new);
|
2146
|
1
|
coords[j] = 0;
|
2147
|
1
|
dims_m1[j] = PyArray_DIMS(ba)[j]-1;
|
2148
|
|
}
|
2149
|
1
|
if (count == 0) {
|
2150
|
|
goto finish;
|
2151
|
|
}
|
2152
|
|
|
2153
|
|
/*
|
2154
|
|
* Loop through the Boolean array and copy coordinates
|
2155
|
|
* for non-zero entries
|
2156
|
|
*/
|
2157
|
1
|
NPY_BEGIN_THREADS_THRESHOLDED(size);
|
2158
|
1
|
for (i = 0; i < size; i++) {
|
2159
|
1
|
if (*(ptr++)) {
|
2160
|
1
|
for (j = 0; j < nd; j++) {
|
2161
|
1
|
*(dptr[j]++) = coords[j];
|
2162
|
|
}
|
2163
|
|
}
|
2164
|
|
/* Borrowed from ITER_NEXT macro */
|
2165
|
1
|
for (j = nd - 1; j >= 0; j--) {
|
2166
|
1
|
if (coords[j] < dims_m1[j]) {
|
2167
|
1
|
coords[j]++;
|
2168
|
1
|
break;
|
2169
|
|
}
|
2170
|
|
else {
|
2171
|
1
|
coords[j] = 0;
|
2172
|
|
}
|
2173
|
|
}
|
2174
|
|
}
|
2175
|
1
|
NPY_END_THREADS;
|
2176
|
|
|
2177
|
1
|
finish:
|
2178
|
1
|
Py_DECREF(ba);
|
2179
|
|
return nd;
|
2180
|
|
|
2181
|
0
|
fail:
|
2182
|
0
|
for (j = 0; j < nd; j++) {
|
2183
|
0
|
Py_XDECREF(arrays[j]);
|
2184
|
|
}
|
2185
|
0
|
Py_XDECREF(ba);
|
2186
|
|
return -1;
|
2187
|
|
}
|
2188
|
|
|
2189
|
|
|
2190
|
|
/* Reset the map iterator to the beginning */
|
2191
|
|
NPY_NO_EXPORT void
|
2192
|
1
|
PyArray_MapIterReset(PyArrayMapIterObject *mit)
|
2193
|
|
{
|
2194
|
|
npy_intp indval;
|
2195
|
|
char *baseptrs[2];
|
2196
|
|
int i;
|
2197
|
|
|
2198
|
1
|
if (mit->size == 0) {
|
2199
|
|
return;
|
2200
|
|
}
|
2201
|
|
|
2202
|
1
|
NpyIter_Reset(mit->outer, NULL);
|
2203
|
1
|
if (mit->extra_op_iter) {
|
2204
|
0
|
NpyIter_Reset(mit->extra_op_iter, NULL);
|
2205
|
|
|
2206
|
0
|
baseptrs[1] = mit->extra_op_ptrs[0];
|
2207
|
|
}
|
2208
|
|
|
2209
|
1
|
baseptrs[0] = mit->baseoffset;
|
2210
|
|
|
2211
|
1
|
for (i = 0; i < mit->numiter; i++) {
|
2212
|
1
|
indval = *((npy_intp*)mit->outer_ptrs[i]);
|
2213
|
1
|
if (indval < 0) {
|
2214
|
0
|
indval += mit->fancy_dims[i];
|
2215
|
|
}
|
2216
|
1
|
baseptrs[0] += indval * mit->fancy_strides[i];
|
2217
|
|
}
|
2218
|
1
|
mit->dataptr = baseptrs[0];
|
2219
|
|
|
2220
|
1
|
if (mit->subspace_iter) {
|
2221
|
1
|
NpyIter_ResetBasePointers(mit->subspace_iter, baseptrs, NULL);
|
2222
|
1
|
mit->iter_count = *NpyIter_GetInnerLoopSizePtr(mit->subspace_iter);
|
2223
|
|
}
|
2224
|
|
else {
|
2225
|
1
|
mit->iter_count = *NpyIter_GetInnerLoopSizePtr(mit->outer);
|
2226
|
|
}
|
2227
|
|
|
2228
|
|
return;
|
2229
|
|
}
|
2230
|
|
|
2231
|
|
|
2232
|
|
/*NUMPY_API
|
2233
|
|
* This function needs to update the state of the map iterator
|
2234
|
|
* and point mit->dataptr to the memory-location of the next object
|
2235
|
|
*
|
2236
|
|
* Note that this function never handles an extra operand but provides
|
2237
|
|
* compatibility for an old (exposed) API.
|
2238
|
|
*/
|
2239
|
|
NPY_NO_EXPORT void
|
2240
|
1
|
PyArray_MapIterNext(PyArrayMapIterObject *mit)
|
2241
|
|
{
|
2242
|
|
int i;
|
2243
|
|
char *baseptr;
|
2244
|
|
npy_intp indval;
|
2245
|
|
|
2246
|
1
|
if (mit->subspace_iter) {
|
2247
|
1
|
if (--mit->iter_count > 0) {
|
2248
|
1
|
mit->subspace_ptrs[0] += mit->subspace_strides[0];
|
2249
|
1
|
mit->dataptr = mit->subspace_ptrs[0];
|
2250
|
1
|
return;
|
2251
|
|
}
|
2252
|
1
|
else if (mit->subspace_next(mit->subspace_iter)) {
|
2253
|
1
|
mit->iter_count = *NpyIter_GetInnerLoopSizePtr(mit->subspace_iter);
|
2254
|
1
|
mit->dataptr = mit->subspace_ptrs[0];
|
2255
|
|
}
|
2256
|
|
else {
|
2257
|
1
|
if (!mit->outer_next(mit->outer)) {
|
2258
|
|
return;
|
2259
|
|
}
|
2260
|
|
|
2261
|
1
|
baseptr = mit->baseoffset;
|
2262
|
|
|
2263
|
1
|
for (i = 0; i < mit->numiter; i++) {
|
2264
|
1
|
indval = *((npy_intp*)mit->outer_ptrs[i]);
|
2265
|
1
|
if (indval < 0) {
|
2266
|
0
|
indval += mit->fancy_dims[i];
|
2267
|
|
}
|
2268
|
1
|
baseptr += indval * mit->fancy_strides[i];
|
2269
|
|
}
|
2270
|
1
|
NpyIter_ResetBasePointers(mit->subspace_iter, &baseptr, NULL);
|
2271
|
1
|
mit->iter_count = *NpyIter_GetInnerLoopSizePtr(mit->subspace_iter);
|
2272
|
|
|
2273
|
1
|
mit->dataptr = mit->subspace_ptrs[0];
|
2274
|
|
}
|
2275
|
|
}
|
2276
|
|
else {
|
2277
|
1
|
if (--mit->iter_count > 0) {
|
2278
|
1
|
baseptr = mit->baseoffset;
|
2279
|
|
|
2280
|
1
|
for (i = 0; i < mit->numiter; i++) {
|
2281
|
1
|
mit->outer_ptrs[i] += mit->outer_strides[i];
|
2282
|
|
|
2283
|
1
|
indval = *((npy_intp*)mit->outer_ptrs[i]);
|
2284
|
1
|
if (indval < 0) {
|
2285
|
0
|
indval += mit->fancy_dims[i];
|
2286
|
|
}
|
2287
|
1
|
baseptr += indval * mit->fancy_strides[i];
|
2288
|
|
}
|
2289
|
|
|
2290
|
1
|
mit->dataptr = baseptr;
|
2291
|
1
|
return;
|
2292
|
|
}
|
2293
|
|
else {
|
2294
|
1
|
if (!mit->outer_next(mit->outer)) {
|
2295
|
|
return;
|
2296
|
|
}
|
2297
|
1
|
mit->iter_count = *NpyIter_GetInnerLoopSizePtr(mit->outer);
|
2298
|
1
|
baseptr = mit->baseoffset;
|
2299
|
|
|
2300
|
1
|
for (i = 0; i < mit->numiter; i++) {
|
2301
|
1
|
indval = *((npy_intp*)mit->outer_ptrs[i]);
|
2302
|
1
|
if (indval < 0) {
|
2303
|
0
|
indval += mit->fancy_dims[i];
|
2304
|
|
}
|
2305
|
1
|
baseptr += indval * mit->fancy_strides[i];
|
2306
|
|
}
|
2307
|
|
|
2308
|
1
|
mit->dataptr = baseptr;
|
2309
|
|
}
|
2310
|
|
}
|
2311
|
|
}
|
2312
|
|
|
2313
|
|
|
2314
|
|
/**
|
2315
|
|
* Fill information about the iterator. The MapIterObject does not
|
2316
|
|
* need to have any information set for this function to work.
|
2317
|
|
* (PyArray_MapIterSwapAxes requires also nd and nd_fancy info)
|
2318
|
|
*
|
2319
|
|
* Sets the following information:
|
2320
|
|
* * mit->consec: The axis where the fancy indices need transposing to.
|
2321
|
|
* * mit->iteraxes: The axis which the fancy index corresponds to.
|
2322
|
|
* * mit-> fancy_dims: the dimension of `arr` along the indexed dimension
|
2323
|
|
* for each fancy index.
|
2324
|
|
* * mit->fancy_strides: the strides for the dimension being indexed
|
2325
|
|
* by each fancy index.
|
2326
|
|
* * mit->dimensions: Broadcast dimension of the fancy indices and
|
2327
|
|
* the subspace iteration dimension.
|
2328
|
|
*
|
2329
|
|
* @param MapIterObject
|
2330
|
|
* @param The parsed indices object
|
2331
|
|
* @param Number of indices
|
2332
|
|
* @param The array that is being iterated
|
2333
|
|
*
|
2334
|
|
* @return 0 on success -1 on failure
|
2335
|
|
*/
|
2336
|
|
static int
|
2337
|
1
|
mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
|
2338
|
|
int index_num, PyArrayObject *arr)
|
2339
|
|
{
|
2340
|
1
|
int j = 0, i;
|
2341
|
1
|
int curr_dim = 0;
|
2342
|
|
/* dimension of index result (up to first fancy index) */
|
2343
|
1
|
int result_dim = 0;
|
2344
|
|
/* -1 init; 0 found fancy; 1 fancy stopped; 2 found not consecutive fancy */
|
2345
|
1
|
int consec_status = -1;
|
2346
|
|
int axis, broadcast_axis;
|
2347
|
|
npy_intp dimension;
|
2348
|
|
PyObject *errmsg, *tmp;
|
2349
|
|
|
2350
|
1
|
for (i = 0; i < mit->nd_fancy; i++) {
|
2351
|
1
|
mit->dimensions[i] = 1;
|
2352
|
|
}
|
2353
|
|
|
2354
|
1
|
mit->consec = 0;
|
2355
|
1
|
for (i = 0; i < index_num; i++) {
|
2356
|
|
/* integer and fancy indexes are transposed together */
|
2357
|
1
|
if (indices[i].type & (HAS_FANCY | HAS_INTEGER)) {
|
2358
|
|
/* there was no previous fancy index, so set consec */
|
2359
|
1
|
if (consec_status == -1) {
|
2360
|
1
|
mit->consec = result_dim;
|
2361
|
1
|
consec_status = 0;
|
2362
|
|
}
|
2363
|
|
/* there was already a non-fancy index after a fancy one */
|
2364
|
1
|
else if (consec_status == 1) {
|
2365
|
1
|
consec_status = 2;
|
2366
|
1
|
mit->consec = 0;
|
2367
|
|
}
|
2368
|
|
}
|
2369
|
|
else {
|
2370
|
|
/* consec_status == 0 means there was a fancy index before */
|
2371
|
1
|
if (consec_status == 0) {
|
2372
|
1
|
consec_status = 1;
|
2373
|
|
}
|
2374
|
|
}
|
2375
|
|
|
2376
|
|
/* (iterating) fancy index, store the iterator */
|
2377
|
1
|
if (indices[i].type == HAS_FANCY) {
|
2378
|
1
|
mit->fancy_strides[j] = PyArray_STRIDE(arr, curr_dim);
|
2379
|
1
|
mit->fancy_dims[j] = PyArray_DIM(arr, curr_dim);
|
2380
|
1
|
mit->iteraxes[j++] = curr_dim++;
|
2381
|
|
|
2382
|
|
/* Check broadcasting */
|
2383
|
1
|
broadcast_axis = mit->nd_fancy;
|
2384
|
|
/* Fill from back, we know how many dims there are */
|
2385
|
1
|
for (axis = PyArray_NDIM((PyArrayObject *)indices[i].object) - 1;
|
2386
|
1
|
axis >= 0; axis--) {
|
2387
|
1
|
broadcast_axis--;
|
2388
|
1
|
dimension = PyArray_DIM((PyArrayObject *)indices[i].object, axis);
|
2389
|
|
|
2390
|
|
/* If it is 1, we can broadcast */
|
2391
|
1
|
if (dimension != 1) {
|
2392
|
1
|
if (dimension != mit->dimensions[broadcast_axis]) {
|
2393
|
1
|
if (mit->dimensions[broadcast_axis] != 1) {
|
2394
|
|
goto broadcast_error;
|
2395
|
|
}
|
2396
|
1
|
mit->dimensions[broadcast_axis] = dimension;
|
2397
|
|
}
|
2398
|
|
}
|
2399
|
|
}
|
2400
|
|
}
|
2401
|
1
|
else if (indices[i].type == HAS_0D_BOOL) {
|
2402
|
1
|
mit->fancy_strides[j] = 0;
|
2403
|
1
|
mit->fancy_dims[j] = 1;
|
2404
|
|
/* Does not exist */
|
2405
|
1
|
mit->iteraxes[j++] = -1;
|
2406
|
1
|
if ((indices[i].value == 0) &&
|
2407
|
1
|
(mit->dimensions[mit->nd_fancy - 1]) > 1) {
|
2408
|
|
goto broadcast_error;
|
2409
|
|
}
|
2410
|
1
|
mit->dimensions[mit->nd_fancy-1] *= indices[i].value;
|
2411
|
|
}
|
2412
|
|
|
2413
|
|
/* advance curr_dim for non-fancy indices */
|
2414
|
1
|
else if (indices[i].type == HAS_ELLIPSIS) {
|
2415
|
1
|
curr_dim += (int)indices[i].value;
|
2416
|
1
|
result_dim += (int)indices[i].value;
|
2417
|
|
}
|
2418
|
1
|
else if (indices[i].type != HAS_NEWAXIS){
|
2419
|
1
|
curr_dim += 1;
|
2420
|
1
|
result_dim += 1;
|
2421
|
|
}
|
2422
|
|
else {
|
2423
|
1
|
result_dim += 1;
|
2424
|
|
}
|
2425
|
|
}
|
2426
|
|
|
2427
|
|
/* Fill dimension of subspace */
|
2428
|
1
|
if (mit->subspace) {
|
2429
|
1
|
for (i = 0; i < PyArray_NDIM(mit->subspace); i++) {
|
2430
|
1
|
mit->dimensions[mit->nd_fancy + i] = PyArray_DIM(mit->subspace, i);
|
2431
|
|
}
|
2432
|
|
}
|
2433
|
|
|
2434
|
|
return 0;
|
2435
|
|
|
2436
|
1
|
broadcast_error:
|
2437
|
|
/*
|
2438
|
|
* Attempt to set a meaningful exception. Could also find out
|
2439
|
|
* if a boolean index was converted.
|
2440
|
|
*/
|
2441
|
1
|
errmsg = PyUnicode_FromString("shape mismatch: indexing arrays could not "
|
2442
|
|
"be broadcast together with shapes ");
|
2443
|
1
|
if (errmsg == NULL) {
|
2444
|
|
return -1;
|
2445
|
|
}
|
2446
|
|
|
2447
|
1
|
for (i = 0; i < index_num; i++) {
|
2448
|
1
|
if (!(indices[i].type & HAS_FANCY)) {
|
2449
|
1
|
continue;
|
2450
|
|
}
|
2451
|
1
|
tmp = convert_shape_to_string(
|
2452
|
1
|
PyArray_NDIM((PyArrayObject *)indices[i].object),
|
2453
|
1
|
PyArray_SHAPE((PyArrayObject *)indices[i].object),
|
2454
|
|
" ");
|
2455
|
1
|
if (tmp == NULL) {
|
2456
|
|
return -1;
|
2457
|
|
}
|
2458
|
1
|
PyUString_ConcatAndDel(&errmsg, tmp);
|
2459
|
1
|
if (errmsg == NULL) {
|
2460
|
|
return -1;
|
2461
|
|
}
|
2462
|
|
}
|
2463
|
|
|
2464
|
1
|
PyErr_SetObject(PyExc_IndexError, errmsg);
|
2465
|
1
|
Py_DECREF(errmsg);
|
2466
|
|
return -1;
|
2467
|
|
}
|
2468
|
|
|
2469
|
|
|
2470
|
|
/*
|
2471
|
|
* Check whether the fancy indices are out of bounds.
|
2472
|
|
* Returns 0 on success and -1 on failure.
|
2473
|
|
* (Gets operands from the outer iterator, but iterates them independently)
|
2474
|
|
*/
|
2475
|
|
NPY_NO_EXPORT int
|
2476
|
1
|
PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
|
2477
|
|
{
|
2478
|
|
PyArrayObject *op;
|
2479
|
|
NpyIter *op_iter;
|
2480
|
|
NpyIter_IterNextFunc *op_iternext;
|
2481
|
|
npy_intp outer_dim, indval;
|
2482
|
|
int outer_axis;
|
2483
|
|
npy_intp itersize, *iterstride;
|
2484
|
|
char **iterptr;
|
2485
|
|
PyArray_Descr *intp_type;
|
2486
|
|
int i;
|
2487
|
1
|
NPY_BEGIN_THREADS_DEF;
|
2488
|
|
|
2489
|
1
|
if (NpyIter_GetIterSize(mit->outer) == 0) {
|
2490
|
|
/*
|
2491
|
|
* When the outer iteration is empty, the indices broadcast to an
|
2492
|
|
* empty shape, and in this case we do not check if there are out
|
2493
|
|
* of bounds indices.
|
2494
|
|
* The code below does use the indices without broadcasting since
|
2495
|
|
* broadcasting only repeats values.
|
2496
|
|
*/
|
2497
|
|
return 0;
|
2498
|
|
}
|
2499
|
|
|
2500
|
1
|
intp_type = PyArray_DescrFromType(NPY_INTP);
|
2501
|
|
|
2502
|
1
|
NPY_BEGIN_THREADS;
|
2503
|
|
|
2504
|
1
|
for (i=0; i < mit->numiter; i++) {
|
2505
|
1
|
op = NpyIter_GetOperandArray(mit->outer)[i];
|
2506
|
|
|
2507
|
1
|
outer_dim = mit->fancy_dims[i];
|
2508
|
1
|
outer_axis = mit->iteraxes[i];
|
2509
|
|
|
2510
|
|
/* See if it is possible to just trivially iterate the array */
|
2511
|
1
|
if (PyArray_TRIVIALLY_ITERABLE(op) &&
|
2512
|
|
/* Check if the type is equivalent to INTP */
|
2513
|
1
|
PyArray_ITEMSIZE(op) == sizeof(npy_intp) &&
|
2514
|
1
|
PyArray_DESCR(op)->kind == 'i' &&
|
2515
|
1
|
IsUintAligned(op) &&
|
2516
|
1
|
PyDataType_ISNOTSWAPPED(PyArray_DESCR(op))) {
|
2517
|
|
char *data;
|
2518
|
|
npy_intp stride;
|
2519
|
|
/* release GIL if it was taken by nditer below */
|
2520
|
1
|
if (_save == NULL) {
|
2521
|
1
|
NPY_BEGIN_THREADS;
|
2522
|
|
}
|
2523
|
|
|
2524
|
1
|
PyArray_PREPARE_TRIVIAL_ITERATION(op, itersize, data, stride);
|
2525
|
|
|
2526
|
1
|
while (itersize--) {
|
2527
|
1
|
indval = *((npy_intp*)data);
|
2528
|
1
|
if (check_and_adjust_index(&indval,
|
2529
|
|
outer_dim, outer_axis, _save) < 0) {
|
2530
|
1
|
Py_DECREF(intp_type);
|
2531
|
|
goto indexing_error;
|
2532
|
|
}
|
2533
|
1
|
data += stride;
|
2534
|
|
}
|
2535
|
|
/* GIL retake at end of function or if nditer path required */
|
2536
|
1
|
continue;
|
2537
|
|
}
|
2538
|
|
|
2539
|
|
/* Use NpyIter if the trivial iteration is not possible */
|
2540
|
1
|
NPY_END_THREADS;
|
2541
|
1
|
op_iter = NpyIter_New(op,
|
2542
|
|
NPY_ITER_BUFFERED | NPY_ITER_NBO | NPY_ITER_ALIGNED |
|
2543
|
|
NPY_ITER_EXTERNAL_LOOP | NPY_ITER_GROWINNER |
|
2544
|
|
NPY_ITER_READONLY | NPY_ITER_ZEROSIZE_OK,
|
2545
|
|
NPY_KEEPORDER, NPY_SAME_KIND_CASTING, intp_type);
|
2546
|
|
|
2547
|
1
|
if (op_iter == NULL) {
|
2548
|
0
|
Py_DECREF(intp_type);
|
2549
|
|
return -1;
|
2550
|
|
}
|
2551
|
1
|
if (NpyIter_GetIterSize(op_iter) == 0) {
|
2552
|
0
|
NpyIter_Deallocate(op_iter);
|
2553
|
0
|
continue;
|
2554
|
|
}
|
2555
|
|
|
2556
|
1
|
op_iternext = NpyIter_GetIterNext(op_iter, NULL);
|
2557
|
1
|
if (op_iternext == NULL) {
|
2558
|
0
|
Py_DECREF(intp_type);
|
2559
|
0
|
NpyIter_Deallocate(op_iter);
|
2560
|
0
|
return -1;
|
2561
|
|
}
|
2562
|
|
|
2563
|
1
|
NPY_BEGIN_THREADS_NDITER(op_iter);
|
2564
|
1
|
iterptr = NpyIter_GetDataPtrArray(op_iter);
|
2565
|
1
|
iterstride = NpyIter_GetInnerStrideArray(op_iter);
|
2566
|
|
do {
|
2567
|
1
|
itersize = *NpyIter_GetInnerLoopSizePtr(op_iter);
|
2568
|
1
|
while (itersize--) {
|
2569
|
1
|
indval = *((npy_intp*)*iterptr);
|
2570
|
1
|
if (check_and_adjust_index(&indval,
|
2571
|
|
outer_dim, outer_axis, _save) < 0) {
|
2572
|
1
|
Py_DECREF(intp_type);
|
2573
|
1
|
NpyIter_Deallocate(op_iter);
|
2574
|
1
|
goto indexing_error;
|
2575
|
|
}
|
2576
|
1
|
*iterptr += *iterstride;
|
2577
|
|
}
|
2578
|
1
|
} while (op_iternext(op_iter));
|
2579
|
|
|
2580
|
1
|
NPY_END_THREADS;
|
2581
|
1
|
NpyIter_Deallocate(op_iter);
|
2582
|
|
}
|
2583
|
|
|
2584
|
1
|
NPY_END_THREADS;
|
2585
|
1
|
Py_DECREF(intp_type);
|
2586
|
|
return 0;
|
2587
|
|
|
2588
|
1
|
indexing_error:
|
2589
|
|
|
2590
|
1
|
if (mit->size == 0) {
|
2591
|
1
|
PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
|
2592
|
1
|
PyErr_Fetch(&err_type, &err_value, &err_traceback);
|
2593
|
|
/* 2020-05-27, NumPy 1.20 */
|
2594
|
1
|
if (DEPRECATE(
|
2595
|
|
"Out of bound index found. This was previously ignored "
|
2596
|
|
"when the indexing result contained no elements. "
|
2597
|
|
"In the future the index error will be raised. This error "
|
2598
|
|
"occurs either due to an empty slice, or if an array has zero "
|
2599
|
|
"elements even before indexing.\n"
|
2600
|
|
"(Use `warnings.simplefilter('error')` to turn this "
|
2601
|
|
"DeprecationWarning into an error and get more details on "
|
2602
|
|
"the invalid index.)") < 0) {
|
2603
|
1
|
npy_PyErr_ChainExceptions(err_type, err_value, err_traceback);
|
2604
|
1
|
return -1;
|
2605
|
|
}
|
2606
|
1
|
Py_DECREF(err_type);
|
2607
|
1
|
Py_DECREF(err_value);
|
2608
|
1
|
Py_XDECREF(err_traceback);
|
2609
|
|
return 0;
|
2610
|
|
}
|
2611
|
|
|
2612
|
|
return -1;
|
2613
|
|
}
|
2614
|
|
|
2615
|
|
|
2616
|
|
/*
|
2617
|
|
* Create new mapiter.
|
2618
|
|
*
|
2619
|
|
* NOTE: The outer iteration (and subspace if requested buffered) is
|
2620
|
|
* created with DELAY_BUFALLOC. It must be reset before usage!
|
2621
|
|
*
|
2622
|
|
* @param Index information filled by prepare_index.
|
2623
|
|
* @param Number of indices (gotten through prepare_index).
|
2624
|
|
* @param Kind of index (gotten through preprare_index).
|
2625
|
|
* @param NpyIter flags for an extra array. If 0 assume that there is no
|
2626
|
|
* extra operand. NPY_ITER_ALLOCATE can make sense here.
|
2627
|
|
* @param Array being indexed
|
2628
|
|
* @param subspace (result of getting view for the indices)
|
2629
|
|
* @param Subspace iterator flags can be used to enable buffering.
|
2630
|
|
* NOTE: When no subspace is necessary, the extra operand will
|
2631
|
|
* always be buffered! Buffering the subspace when not
|
2632
|
|
* necessary is very slow when the subspace is small.
|
2633
|
|
* @param Subspace operand flags (should just be 0 normally)
|
2634
|
|
* @param Operand iteration flags for the extra operand, this must not be
|
2635
|
|
* 0 if an extra operand should be used, otherwise it must be 0.
|
2636
|
|
* Should be at least READONLY, WRITEONLY or READWRITE.
|
2637
|
|
* @param Extra operand. For getmap, this would be the result, for setmap
|
2638
|
|
* this would be the arrays to get from.
|
2639
|
|
* Can be NULL, and will be allocated in that case. However,
|
2640
|
|
* it matches the mapiter iteration, so you have to call
|
2641
|
|
* MapIterSwapAxes(mit, &extra_op, 1) on it.
|
2642
|
|
* The operand has no effect on the shape.
|
2643
|
|
* @param Dtype for the extra operand, borrows the reference and must not
|
2644
|
|
* be NULL (if extra_op_flags is not 0).
|
2645
|
|
*
|
2646
|
|
* @return A new MapIter (PyObject *) or NULL.
|
2647
|
|
*/
|
2648
|
|
NPY_NO_EXPORT PyObject *
|
2649
|
1
|
PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
|
2650
|
|
int ndim, int fancy_ndim,
|
2651
|
|
PyArrayObject *arr, PyArrayObject *subspace,
|
2652
|
|
npy_uint32 subspace_iter_flags, npy_uint32 subspace_flags,
|
2653
|
|
npy_uint32 extra_op_flags, PyArrayObject *extra_op,
|
2654
|
|
PyArray_Descr *extra_op_dtype)
|
2655
|
|
{
|
2656
|
|
PyObject *errmsg, *tmp;
|
2657
|
|
/* For shape reporting on error */
|
2658
|
1
|
PyArrayObject *original_extra_op = extra_op;
|
2659
|
|
|
2660
|
|
PyArrayObject *index_arrays[NPY_MAXDIMS];
|
2661
|
|
PyArray_Descr *intp_descr;
|
2662
|
|
PyArray_Descr *dtypes[NPY_MAXDIMS]; /* borrowed references */
|
2663
|
|
|
2664
|
|
npy_uint32 op_flags[NPY_MAXDIMS];
|
2665
|
|
npy_uint32 outer_flags;
|
2666
|
|
|
2667
|
|
PyArrayMapIterObject *mit;
|
2668
|
|
|
2669
|
|
int single_op_axis[NPY_MAXDIMS];
|
2670
|
1
|
int *op_axes[NPY_MAXDIMS] = {NULL};
|
2671
|
1
|
int i, j, dummy_array = 0;
|
2672
|
|
int nops;
|
2673
|
|
int uses_subspace;
|
2674
|
|
|
2675
|
1
|
intp_descr = PyArray_DescrFromType(NPY_INTP);
|
2676
|
1
|
if (intp_descr == NULL) {
|
2677
|
|
return NULL;
|
2678
|
|
}
|
2679
|
|
|
2680
|
|
/* create new MapIter object */
|
2681
|
1
|
mit = (PyArrayMapIterObject *)PyArray_malloc(sizeof(PyArrayMapIterObject));
|
2682
|
1
|
if (mit == NULL) {
|
2683
|
0
|
Py_DECREF(intp_descr);
|
2684
|
|
return NULL;
|
2685
|
|
}
|
2686
|
|
/* set all attributes of mapiter to zero */
|
2687
|
1
|
memset(mit, 0, sizeof(PyArrayMapIterObject));
|
2688
|
1
|
PyObject_Init((PyObject *)mit, &PyArrayMapIter_Type);
|
2689
|
|
|
2690
|
1
|
Py_INCREF(arr);
|
2691
|
1
|
mit->array = arr;
|
2692
|
1
|
Py_XINCREF(subspace);
|
2693
|
1
|
mit->subspace = subspace;
|
2694
|
|
|
2695
|
|
/*
|
2696
|
|
* The subspace, the part of the array which is not indexed by
|
2697
|
|
* arrays, needs to be iterated when the size of the subspace
|
2698
|
|
* is larger than 1. If it is one, it has only an effect on the
|
2699
|
|
* result shape. (Optimizes for example np.newaxis usage)
|
2700
|
|
*/
|
2701
|
1
|
if ((subspace == NULL) || PyArray_SIZE(subspace) == 1) {
|
2702
|
|
uses_subspace = 0;
|
2703
|
|
}
|
2704
|
|
else {
|
2705
|
|
uses_subspace = 1;
|
2706
|
|
}
|
2707
|
|
|
2708
|
|
/* Fill basic information about the mapiter */
|
2709
|
1
|
mit->nd = ndim;
|
2710
|
1
|
mit->nd_fancy = fancy_ndim;
|
2711
|
1
|
if (mapiter_fill_info(mit, indices, index_num, arr) < 0) {
|
2712
|
1
|
Py_DECREF(mit);
|
2713
|
1
|
Py_DECREF(intp_descr);
|
2714
|
|
return NULL;
|
2715
|
|
}
|
2716
|
|
|
2717
|
|
/*
|
2718
|
|
* Set iteration information of the indexing arrays.
|
2719
|
|
*/
|
2720
|
1
|
for (i=0; i < index_num; i++) {
|
2721
|
1
|
if (indices[i].type & HAS_FANCY) {
|
2722
|
1
|
index_arrays[mit->numiter] = (PyArrayObject *)indices[i].object;
|
2723
|
1
|
dtypes[mit->numiter] = intp_descr;
|
2724
|
|
|
2725
|
1
|
op_flags[mit->numiter] = (NPY_ITER_NBO |
|
2726
|
|
NPY_ITER_ALIGNED |
|
2727
|
|
NPY_ITER_READONLY);
|
2728
|
1
|
mit->numiter += 1;
|
2729
|
|
}
|
2730
|
|
}
|
2731
|
|
|
2732
|
1
|
if (mit->numiter == 0) {
|
2733
|
|
/*
|
2734
|
|
* For MapIterArray, it is possible that there is no fancy index.
|
2735
|
|
* to support this case, add a dummy iterator.
|
2736
|
|
* Since it is 0-d its transpose, etc. does not matter.
|
2737
|
|
*/
|
2738
|
|
|
2739
|
|
/* signal necessity to decref... */
|
2740
|
1
|
dummy_array = 1;
|
2741
|
|
|
2742
|
1
|
index_arrays[0] = (PyArrayObject *)PyArray_Zeros(0, NULL,
|
2743
|
|
PyArray_DescrFromType(NPY_INTP), 0);
|
2744
|
1
|
if (index_arrays[0] == NULL) {
|
2745
|
0
|
Py_DECREF(mit);
|
2746
|
0
|
Py_DECREF(intp_descr);
|
2747
|
|
return NULL;
|
2748
|
|
}
|
2749
|
1
|
dtypes[0] = intp_descr;
|
2750
|
1
|
op_flags[0] = NPY_ITER_NBO | NPY_ITER_ALIGNED | NPY_ITER_READONLY;
|
2751
|
|
|
2752
|
1
|
mit->fancy_dims[0] = 1;
|
2753
|
1
|
mit->numiter = 1;
|
2754
|
|
}
|
2755
|
|
|
2756
|
|
/*
|
2757
|
|
* Now there are two general cases how extra_op is used:
|
2758
|
|
* 1. No subspace iteration is necessary, so the extra_op can
|
2759
|
|
* be included into the index iterator (it will be buffered)
|
2760
|
|
* 2. Subspace iteration is necessary, so the extra op is iterated
|
2761
|
|
* independently, and the iteration order is fixed at C (could
|
2762
|
|
* also use Fortran order if the array is Fortran order).
|
2763
|
|
* In this case the subspace iterator is not buffered.
|
2764
|
|
*
|
2765
|
|
* If subspace iteration is necessary and an extra_op was given,
|
2766
|
|
* it may also be necessary to transpose the extra_op (or signal
|
2767
|
|
* the transposing to the advanced iterator).
|
2768
|
|
*/
|
2769
|
|
|
2770
|
1
|
if (extra_op != NULL) {
|
2771
|
|
/*
|
2772
|
|
* If we have an extra_op given, need to prepare it.
|
2773
|
|
* 1. Subclasses might mess with the shape, so need a baseclass
|
2774
|
|
* 2. Need to make sure the shape is compatible
|
2775
|
|
* 3. May need to remove leading 1s and transpose dimensions.
|
2776
|
|
* Normal assignments allows broadcasting away leading 1s, but
|
2777
|
|
* the transposing code does not like this.
|
2778
|
|
*/
|
2779
|
1
|
if (!PyArray_CheckExact(extra_op)) {
|
2780
|
1
|
extra_op = (PyArrayObject *)PyArray_View(extra_op, NULL,
|
2781
|
|
&PyArray_Type);
|
2782
|
1
|
if (extra_op == NULL) {
|
2783
|
|
goto fail;
|
2784
|
|
}
|
2785
|
|
}
|
2786
|
|
else {
|
2787
|
1
|
Py_INCREF(extra_op);
|
2788
|
|
}
|
2789
|
|
|
2790
|
1
|
if (PyArray_NDIM(extra_op) > mit->nd) {
|
2791
|
|
/*
|
2792
|
|
* Usual assignments allows removal of leading one dimensions.
|
2793
|
|
* (or equivalently adding of one dimensions to the array being
|
2794
|
|
* assigned to). To implement this, reshape the array.
|
2795
|
|
*/
|
2796
|
|
PyArrayObject *tmp_arr;
|
2797
|
|
PyArray_Dims permute;
|
2798
|
|
|
2799
|
1
|
permute.len = mit->nd;
|
2800
|
1
|
permute.ptr = &PyArray_DIMS(extra_op)[
|
2801
|
1
|
PyArray_NDIM(extra_op) - mit->nd];
|
2802
|
1
|
tmp_arr = (PyArrayObject*)PyArray_Newshape(extra_op, &permute,
|
2803
|
|
NPY_CORDER);
|
2804
|
1
|
if (tmp_arr == NULL) {
|
2805
|
|
goto broadcast_error;
|
2806
|
|
}
|
2807
|
1
|
Py_DECREF(extra_op);
|
2808
|
1
|
extra_op = tmp_arr;
|
2809
|
|
}
|
2810
|
|
|
2811
|
|
/*
|
2812
|
|
* If dimensions need to be prepended (and no swapaxis is needed),
|
2813
|
|
* use op_axes after extra_op is allocated for sure.
|
2814
|
|
*/
|
2815
|
1
|
if (mit->consec) {
|
2816
|
1
|
PyArray_MapIterSwapAxes(mit, &extra_op, 0);
|
2817
|
1
|
if (extra_op == NULL) {
|
2818
|
|
goto fail;
|
2819
|
|
}
|
2820
|
|
}
|
2821
|
|
|
2822
|
1
|
if (subspace && !uses_subspace) {
|
2823
|
|
/*
|
2824
|
|
* We are not using the subspace, so its size is 1.
|
2825
|
|
* All dimensions of the extra_op corresponding to the
|
2826
|
|
* subspace must be equal to 1.
|
2827
|
|
*/
|
2828
|
1
|
if (PyArray_NDIM(subspace) <= PyArray_NDIM(extra_op)) {
|
2829
|
|
j = PyArray_NDIM(subspace);
|
2830
|
|
}
|
2831
|
|
else {
|
2832
|
1
|
j = PyArray_NDIM(extra_op);
|
2833
|
|
}
|
2834
|
1
|
for (i = 1; i < j + 1; i++) {
|
2835
|
1
|
if (PyArray_DIM(extra_op, PyArray_NDIM(extra_op) - i) != 1) {
|
2836
|
|
goto broadcast_error;
|
2837
|
|
}
|
2838
|
|
}
|
2839
|
|
}
|
2840
|
|
}
|
2841
|
|
|
2842
|
|
/*
|
2843
|
|
* If subspace is not NULL, NpyIter cannot allocate extra_op for us.
|
2844
|
|
* This is a bit of a kludge. A dummy iterator is created to find
|
2845
|
|
* the correct output shape and stride permutation.
|
2846
|
|
* TODO: This can at least partially be replaced, since the shape
|
2847
|
|
* is found for broadcasting errors.
|
2848
|
|
*/
|
2849
|
1
|
else if (extra_op_flags && (subspace != NULL)) {
|
2850
|
|
npy_uint32 tmp_op_flags[NPY_MAXDIMS];
|
2851
|
|
|
2852
|
|
NpyIter *tmp_iter;
|
2853
|
|
npy_intp stride;
|
2854
|
|
npy_intp strides[NPY_MAXDIMS];
|
2855
|
|
npy_stride_sort_item strideperm[NPY_MAXDIMS];
|
2856
|
|
|
2857
|
1
|
for (i=0; i < mit->numiter; i++) {
|
2858
|
1
|
tmp_op_flags[i] = NPY_ITER_READONLY;
|
2859
|
|
}
|
2860
|
|
|
2861
|
1
|
Py_INCREF(extra_op_dtype);
|
2862
|
1
|
mit->extra_op_dtype = extra_op_dtype;
|
2863
|
|
|
2864
|
1
|
if (PyArray_SIZE(subspace) == 1) {
|
2865
|
|
/* Create an iterator, just to broadcast the arrays?! */
|
2866
|
1
|
tmp_iter = NpyIter_MultiNew(mit->numiter, index_arrays,
|
2867
|
|
NPY_ITER_ZEROSIZE_OK |
|
2868
|
|
NPY_ITER_REFS_OK |
|
2869
|
|
NPY_ITER_MULTI_INDEX |
|
2870
|
|
NPY_ITER_DONT_NEGATE_STRIDES,
|
2871
|
|
NPY_KEEPORDER,
|
2872
|
|
NPY_UNSAFE_CASTING,
|
2873
|
|
tmp_op_flags, NULL);
|
2874
|
1
|
if (tmp_iter == NULL) {
|
2875
|
|
goto fail;
|
2876
|
|
}
|
2877
|
|
|
2878
|
|
/*
|
2879
|
|
* nditer allows itemsize with npy_intp type, so it works
|
2880
|
|
* here, but it would *not* work directly, since elsize
|
2881
|
|
* is limited to int.
|
2882
|
|
*/
|
2883
|
1
|
if (!NpyIter_CreateCompatibleStrides(tmp_iter,
|
2884
|
1
|
extra_op_dtype->elsize * PyArray_SIZE(subspace),
|
2885
|
|
strides)) {
|
2886
|
0
|
PyErr_SetString(PyExc_ValueError,
|
2887
|
|
"internal error: failed to find output array strides");
|
2888
|
0
|
goto fail;
|
2889
|
|
}
|
2890
|
1
|
NpyIter_Deallocate(tmp_iter);
|
2891
|
|
}
|
2892
|
|
else {
|
2893
|
|
/* Just use C-order strides (TODO: allow also F-order) */
|
2894
|
1
|
stride = extra_op_dtype->elsize * PyArray_SIZE(subspace);
|
2895
|
1
|
for (i=mit->nd_fancy - 1; i >= 0; i--) {
|
2896
|
1
|
strides[i] = stride;
|
2897
|
1
|
stride *= mit->dimensions[i];
|
2898
|
|
}
|
2899
|
|
}
|
2900
|
|
|
2901
|
|
/* shape is set, and strides is set up to mit->nd, set rest */
|
2902
|
1
|
PyArray_CreateSortedStridePerm(PyArray_NDIM(subspace),
|
2903
|
1
|
PyArray_STRIDES(subspace), strideperm);
|
2904
|
1
|
stride = extra_op_dtype->elsize;
|
2905
|
1
|
for (i=PyArray_NDIM(subspace) - 1; i >= 0; i--) {
|
2906
|
1
|
strides[mit->nd_fancy + strideperm[i].perm] = stride;
|
2907
|
1
|
stride *= PyArray_DIM(subspace, (int)strideperm[i].perm);
|
2908
|
|
}
|
2909
|
|
|
2910
|
|
/*
|
2911
|
|
* Allocate new array. Note: Always base class, because
|
2912
|
|
* subclasses might mess with the shape.
|
2913
|
|
*/
|
2914
|
1
|
Py_INCREF(extra_op_dtype);
|
2915
|
1
|
extra_op = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
|
2916
|
|
extra_op_dtype,
|
2917
|
1
|
mit->nd_fancy + PyArray_NDIM(subspace),
|
2918
|
1
|
mit->dimensions, strides,
|
2919
|
|
NULL, 0, NULL);
|
2920
|
1
|
if (extra_op == NULL) {
|
2921
|
|
goto fail;
|
2922
|
|
}
|
2923
|
|
}
|
2924
|
|
|
2925
|
|
/*
|
2926
|
|
* The extra op is now either allocated, can be allocated by
|
2927
|
|
* NpyIter (no subspace) or is not used at all.
|
2928
|
|
*
|
2929
|
|
* Need to set the axis remapping for the extra_op. This needs
|
2930
|
|
* to cause ignoring of subspace dimensions and prepending -1
|
2931
|
|
* for broadcasting.
|
2932
|
|
*/
|
2933
|
1
|
if (extra_op) {
|
2934
|
1
|
for (j=0; j < mit->nd - PyArray_NDIM(extra_op); j++) {
|
2935
|
1
|
single_op_axis[j] = -1;
|
2936
|
|
}
|
2937
|
1
|
for (i=0; i < PyArray_NDIM(extra_op); i++) {
|
2938
|
|
/* (fills subspace dimensions too, but they are not unused) */
|
2939
|
1
|
single_op_axis[j++] = i;
|
2940
|
|
}
|
2941
|
|
}
|
2942
|
|
|
2943
|
|
/*
|
2944
|
|
* NOTE: If for some reason someone wishes to use REDUCE_OK, be
|
2945
|
|
* careful and fix the error message replacement at the end.
|
2946
|
|
*/
|
2947
|
1
|
outer_flags = NPY_ITER_ZEROSIZE_OK |
|
2948
|
|
NPY_ITER_REFS_OK |
|
2949
|
|
NPY_ITER_BUFFERED |
|
2950
|
|
NPY_ITER_DELAY_BUFALLOC |
|
2951
|
|
NPY_ITER_GROWINNER;
|
2952
|
|
|
2953
|
|
/*
|
2954
|
|
* For a single 1-d operand, guarantee iteration order
|
2955
|
|
* (scipy used this). Note that subspace may be used.
|
2956
|
|
*/
|
2957
|
1
|
if ((mit->numiter == 1) && (PyArray_NDIM(index_arrays[0]) == 1)) {
|
2958
|
1
|
outer_flags |= NPY_ITER_DONT_NEGATE_STRIDES;
|
2959
|
|
}
|
2960
|
|
|
2961
|
|
/* If external array is iterated, and no subspace is needed */
|
2962
|
1
|
nops = mit->numiter;
|
2963
|
1
|
if (extra_op_flags && !uses_subspace) {
|
2964
|
|
/*
|
2965
|
|
* NOTE: This small limitation should practically not matter.
|
2966
|
|
* (replaces npyiter error)
|
2967
|
|
*/
|
2968
|
1
|
if (mit->numiter > NPY_MAXDIMS - 1) {
|
2969
|
1
|
PyErr_Format(PyExc_IndexError,
|
2970
|
|
"when no subspace is given, the number of index "
|
2971
|
|
"arrays cannot be above %d, but %d index arrays found",
|
2972
|
|
NPY_MAXDIMS - 1, mit->numiter);
|
2973
|
1
|
goto fail;
|
2974
|
|
}
|
2975
|
|
|
2976
|
1
|
nops += 1;
|
2977
|
1
|
index_arrays[mit->numiter] = extra_op;
|
2978
|
|
|
2979
|
1
|
dtypes[mit->numiter] = extra_op_dtype;
|
2980
|
1
|
op_flags[mit->numiter] = (extra_op_flags |
|
2981
|
1
|
NPY_ITER_ALLOCATE |
|
2982
|
|
NPY_ITER_NO_SUBTYPE);
|
2983
|
|
|
2984
|
1
|
if (extra_op) {
|
2985
|
|
/* Use the axis remapping */
|
2986
|
1
|
op_axes[mit->numiter] = single_op_axis;
|
2987
|
1
|
mit->outer = NpyIter_AdvancedNew(nops, index_arrays, outer_flags,
|
2988
|
|
NPY_KEEPORDER, NPY_UNSAFE_CASTING, op_flags, dtypes,
|
2989
|
1
|
mit->nd_fancy, op_axes, mit->dimensions, 0);
|
2990
|
|
}
|
2991
|
|
else {
|
2992
|
1
|
mit->outer = NpyIter_MultiNew(nops, index_arrays, outer_flags,
|
2993
|
|
NPY_KEEPORDER, NPY_UNSAFE_CASTING, op_flags, dtypes);
|
2994
|
|
}
|
2995
|
|
|
2996
|
|
}
|
2997
|
|
else {
|
2998
|
|
/* TODO: Maybe add test for the CORDER, and maybe also allow F */
|
2999
|
1
|
mit->outer = NpyIter_MultiNew(nops, index_arrays, outer_flags,
|
3000
|
|
NPY_CORDER, NPY_UNSAFE_CASTING, op_flags, dtypes);
|
3001
|
|
}
|
3002
|
|
|
3003
|
|
/* NpyIter cleanup and information: */
|
3004
|
1
|
if (dummy_array) {
|
3005
|
1
|
Py_DECREF(index_arrays[0]);
|
3006
|
|
}
|
3007
|
1
|
if (mit->outer == NULL) {
|
3008
|
|
goto fail;
|
3009
|
|
}
|
3010
|
1
|
if (!uses_subspace) {
|
3011
|
1
|
NpyIter_EnableExternalLoop(mit->outer);
|
3012
|
|
}
|
3013
|
|
|
3014
|
1
|
mit->outer_next = NpyIter_GetIterNext(mit->outer, NULL);
|
3015
|
1
|
if (mit->outer_next == NULL) {
|
3016
|
|
goto fail;
|
3017
|
|
}
|
3018
|
1
|
mit->outer_ptrs = NpyIter_GetDataPtrArray(mit->outer);
|
3019
|
1
|
if (!uses_subspace) {
|
3020
|
1
|
mit->outer_strides = NpyIter_GetInnerStrideArray(mit->outer);
|
3021
|
|
}
|
3022
|
1
|
if (NpyIter_IterationNeedsAPI(mit->outer)) {
|
3023
|
1
|
mit->needs_api = 1;
|
3024
|
|
/* We may be doing a cast for the buffer, and that may have failed */
|
3025
|
1
|
if (PyErr_Occurred()) {
|
3026
|
|
goto fail;
|
3027
|
|
}
|
3028
|
|
}
|
3029
|
|
|
3030
|
|
/* Get the allocated extra_op */
|
3031
|
1
|
if (extra_op_flags) {
|
3032
|
1
|
if (extra_op == NULL) {
|
3033
|
1
|
mit->extra_op = NpyIter_GetOperandArray(mit->outer)[mit->numiter];
|
3034
|
|
}
|
3035
|
|
else {
|
3036
|
1
|
mit->extra_op = extra_op;
|
3037
|
|
}
|
3038
|
1
|
Py_INCREF(mit->extra_op);
|
3039
|
|
}
|
3040
|
|
|
3041
|
|
/*
|
3042
|
|
* If extra_op is being tracked but subspace is used, we need
|
3043
|
|
* to create a dedicated iterator for the outer iteration of
|
3044
|
|
* the extra operand.
|
3045
|
|
*/
|
3046
|
1
|
if (extra_op_flags && uses_subspace) {
|
3047
|
1
|
op_axes[0] = single_op_axis;
|
3048
|
1
|
mit->extra_op_iter = NpyIter_AdvancedNew(1, &extra_op,
|
3049
|
|
NPY_ITER_ZEROSIZE_OK |
|
3050
|
|
NPY_ITER_REFS_OK |
|
3051
|
|
NPY_ITER_GROWINNER,
|
3052
|
|
NPY_CORDER,
|
3053
|
|
NPY_NO_CASTING,
|
3054
|
|
&extra_op_flags,
|
3055
|
|
NULL,
|
3056
|
|
mit->nd_fancy, op_axes,
|
3057
|
1
|
mit->dimensions, 0);
|
3058
|
|
|
3059
|
1
|
if (mit->extra_op_iter == NULL) {
|
3060
|
|
goto fail;
|
3061
|
|
}
|
3062
|
|
|
3063
|
1
|
mit->extra_op_next = NpyIter_GetIterNext(mit->extra_op_iter, NULL);
|
3064
|
1
|
if (mit->extra_op_next == NULL) {
|
3065
|
|
goto fail;
|
3066
|
|
}
|
3067
|
1
|
mit->extra_op_ptrs = NpyIter_GetDataPtrArray(mit->extra_op_iter);
|
3068
|
|
}
|
3069
|
|
|
3070
|
|
/* Get the full dimension information */
|
3071
|
1
|
if (subspace != NULL) {
|
3072
|
1
|
mit->baseoffset = PyArray_BYTES(subspace);
|
3073
|
|
}
|
3074
|
|
else {
|
3075
|
1
|
mit->baseoffset = PyArray_BYTES(arr);
|
3076
|
|
}
|
3077
|
|
|
3078
|
|
/* Calculate total size of the MapIter */
|
3079
|
1
|
mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd);
|
3080
|
1
|
if (mit->size < 0) {
|
3081
|
0
|
PyErr_SetString(PyExc_ValueError,
|
3082
|
|
"advanced indexing operation result is too large");
|
3083
|
0
|
goto fail;
|
3084
|
|
}
|
3085
|
|
|
3086
|
|
/* Can now return early if no subspace is being used */
|
3087
|
1
|
if (!uses_subspace) {
|
3088
|
1
|
Py_XDECREF(extra_op);
|
3089
|
1
|
Py_DECREF(intp_descr);
|
3090
|
|
return (PyObject *)mit;
|
3091
|
|
}
|
3092
|
|
|
3093
|
|
/* Fill in the last bit of mapiter information needed */
|
3094
|
|
|
3095
|
|
/*
|
3096
|
|
* Now just need to create the correct subspace iterator.
|
3097
|
|
*/
|
3098
|
1
|
index_arrays[0] = subspace;
|
3099
|
1
|
dtypes[0] = NULL;
|
3100
|
1
|
op_flags[0] = subspace_flags;
|
3101
|
1
|
op_axes[0] = NULL;
|
3102
|
|
|
3103
|
1
|
if (extra_op_flags) {
|
3104
|
|
/* We should iterate the extra_op as well */
|
3105
|
1
|
nops = 2;
|
3106
|
1
|
index_arrays[1] = extra_op;
|
3107
|
|
|
3108
|
1
|
op_axes[1] = &single_op_axis[mit->nd_fancy];
|
3109
|
|
|
3110
|
|
/*
|
3111
|
|
* Buffering is never used here, but in case someone plugs it in
|
3112
|
|
* somewhere else, set the type correctly then.
|
3113
|
|
*/
|
3114
|
1
|
if ((subspace_iter_flags & NPY_ITER_BUFFERED)) {
|
3115
|
0
|
dtypes[1] = extra_op_dtype;
|
3116
|
|
}
|
3117
|
|
else {
|
3118
|
1
|
dtypes[1] = NULL;
|
3119
|
|
}
|
3120
|
1
|
op_flags[1] = extra_op_flags;
|
3121
|
|
}
|
3122
|
|
else {
|
3123
|
|
nops = 1;
|
3124
|
|
}
|
3125
|
|
|
3126
|
1
|
mit->subspace_iter = NpyIter_AdvancedNew(nops, index_arrays,
|
3127
|
|
NPY_ITER_ZEROSIZE_OK |
|
3128
|
|
NPY_ITER_REFS_OK |
|
3129
|
|
NPY_ITER_GROWINNER |
|
3130
|
|
NPY_ITER_EXTERNAL_LOOP |
|
3131
|
|
NPY_ITER_DELAY_BUFALLOC |
|
3132
|
|
subspace_iter_flags,
|
3133
|
|
(nops == 1 ? NPY_CORDER : NPY_KEEPORDER),
|
3134
|
|
NPY_UNSAFE_CASTING,
|
3135
|
|
op_flags, dtypes,
|
3136
|
|
PyArray_NDIM(subspace), op_axes,
|
3137
|
1
|
&mit->dimensions[mit->nd_fancy], 0);
|
3138
|
|
|
3139
|
1
|
if (mit->subspace_iter == NULL) {
|
3140
|
|
goto fail;
|
3141
|
|
}
|
3142
|
|
|
3143
|
1
|
mit->subspace_next = NpyIter_GetIterNext(mit->subspace_iter, NULL);
|
3144
|
1
|
if (mit->subspace_next == NULL) {
|
3145
|
|
goto fail;
|
3146
|
|
}
|
3147
|
1
|
mit->subspace_ptrs = NpyIter_GetDataPtrArray(mit->subspace_iter);
|
3148
|
1
|
mit->subspace_strides = NpyIter_GetInnerStrideArray(mit->subspace_iter);
|
3149
|
|
|
3150
|
1
|
if (NpyIter_IterationNeedsAPI(mit->outer)) {
|
3151
|
0
|
mit->needs_api = 1;
|
3152
|
|
/*
|
3153
|
|
* NOTE: In this case, need to call PyErr_Occurred() after
|
3154
|
|
* basepointer resetting (buffer allocation)
|
3155
|
|
*/
|
3156
|
|
}
|
3157
|
|
|
3158
|
1
|
Py_XDECREF(extra_op);
|
3159
|
1
|
Py_DECREF(intp_descr);
|
3160
|
|
return (PyObject *)mit;
|
3161
|
|
|
31 |