1
|
|
#ifndef _NPY_PRIVATE_COMMON_H_
|
2
|
|
#define _NPY_PRIVATE_COMMON_H_
|
3
|
|
#include "structmember.h"
|
4
|
|
#include <numpy/npy_common.h>
|
5
|
|
#include <numpy/ndarraytypes.h>
|
6
|
|
#include <limits.h>
|
7
|
|
#include "npy_import.h"
|
8
|
|
|
9
|
|
#define error_converting(x) (((x) == -1) && PyErr_Occurred())
|
10
|
|
|
11
|
|
#ifdef NPY_ALLOW_THREADS
|
12
|
|
#define NPY_BEGIN_THREADS_NDITER(iter) \
|
13
|
|
do { \
|
14
|
|
if (!NpyIter_IterationNeedsAPI(iter)) { \
|
15
|
|
NPY_BEGIN_THREADS_THRESHOLDED(NpyIter_GetIterSize(iter)); \
|
16
|
|
} \
|
17
|
|
} while(0)
|
18
|
|
#else
|
19
|
|
#define NPY_BEGIN_THREADS_NDITER(iter)
|
20
|
|
#endif
|
21
|
|
|
22
|
|
|
23
|
|
NPY_NO_EXPORT PyArray_Descr *
|
24
|
|
PyArray_DTypeFromObjectStringDiscovery(
|
25
|
|
PyObject *obj, PyArray_Descr *last_dtype, int string_type);
|
26
|
|
|
27
|
|
/*
|
28
|
|
* Recursively examines the object to determine an appropriate dtype
|
29
|
|
* to use for converting to an ndarray.
|
30
|
|
*
|
31
|
|
* 'obj' is the object to be converted to an ndarray.
|
32
|
|
*
|
33
|
|
* 'maxdims' is the maximum recursion depth.
|
34
|
|
*
|
35
|
|
* 'out_dtype' should be either NULL or a minimal starting dtype when
|
36
|
|
* the function is called. It is updated with the results of type
|
37
|
|
* promotion. This dtype does not get updated when processing NA objects.
|
38
|
|
*
|
39
|
|
* Returns 0 on success, -1 on failure.
|
40
|
|
*/
|
41
|
|
NPY_NO_EXPORT int
|
42
|
|
PyArray_DTypeFromObject(PyObject *obj, int maxdims,
|
43
|
|
PyArray_Descr **out_dtype);
|
44
|
|
|
45
|
|
NPY_NO_EXPORT int
|
46
|
|
PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
|
47
|
|
PyArray_Descr **out_dtype, int string_status);
|
48
|
|
|
49
|
|
/*
|
50
|
|
* Returns NULL without setting an exception if no scalar is matched, a
|
51
|
|
* new dtype reference otherwise.
|
52
|
|
*/
|
53
|
|
NPY_NO_EXPORT PyArray_Descr *
|
54
|
|
_array_find_python_scalar_type(PyObject *op);
|
55
|
|
|
56
|
|
NPY_NO_EXPORT PyArray_Descr *
|
57
|
|
_array_typedescr_fromstr(char const *str);
|
58
|
|
|
59
|
|
NPY_NO_EXPORT char *
|
60
|
|
index2ptr(PyArrayObject *mp, npy_intp i);
|
61
|
|
|
62
|
|
NPY_NO_EXPORT int
|
63
|
|
_zerofill(PyArrayObject *ret);
|
64
|
|
|
65
|
|
NPY_NO_EXPORT npy_bool
|
66
|
|
_IsWriteable(PyArrayObject *ap);
|
67
|
|
|
68
|
|
NPY_NO_EXPORT PyObject *
|
69
|
|
convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending);
|
70
|
|
|
71
|
|
/*
|
72
|
|
* Sets ValueError with "matrices not aligned" message for np.dot and friends
|
73
|
|
* when a.shape[i] should match b.shape[j], but doesn't.
|
74
|
|
*/
|
75
|
|
NPY_NO_EXPORT void
|
76
|
|
dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j);
|
77
|
|
|
78
|
|
/**
|
79
|
|
* unpack tuple of dtype->fields (descr, offset, title[not-needed])
|
80
|
|
*
|
81
|
|
* @param "value" should be the tuple.
|
82
|
|
*
|
83
|
|
* @return "descr" will be set to the field's dtype
|
84
|
|
* @return "offset" will be set to the field's offset
|
85
|
|
*
|
86
|
|
* returns -1 on failure, 0 on success.
|
87
|
|
*/
|
88
|
|
NPY_NO_EXPORT int
|
89
|
|
_unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset);
|
90
|
|
|
91
|
|
/*
|
92
|
|
* check whether arrays with datatype dtype might have object fields. This will
|
93
|
|
* only happen for structured dtypes (which may have hidden objects even if the
|
94
|
|
* HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type
|
95
|
|
* is either of these.
|
96
|
|
*/
|
97
|
|
NPY_NO_EXPORT int
|
98
|
|
_may_have_objects(PyArray_Descr *dtype);
|
99
|
|
|
100
|
|
/*
|
101
|
|
* Returns -1 and sets an exception if *index is an invalid index for
|
102
|
|
* an array of size max_item, otherwise adjusts it in place to be
|
103
|
|
* 0 <= *index < max_item, and returns 0.
|
104
|
|
* 'axis' should be the array axis that is being indexed over, if known. If
|
105
|
|
* unknown, use -1.
|
106
|
|
* If _save is NULL it is assumed the GIL is taken
|
107
|
|
* If _save is not NULL it is assumed the GIL is not taken and it
|
108
|
|
* is acquired in the case of an error
|
109
|
|
*/
|
110
|
|
static NPY_INLINE int
|
111
|
1
|
check_and_adjust_index(npy_intp *index, npy_intp max_item, int axis,
|
112
|
|
PyThreadState * _save)
|
113
|
|
{
|
114
|
|
/* Check that index is valid, taking into account negative indices */
|
115
|
1
|
if (NPY_UNLIKELY((*index < -max_item) || (*index >= max_item))) {
|
116
|
1
|
NPY_END_THREADS;
|
117
|
|
/* Try to be as clear as possible about what went wrong. */
|
118
|
1
|
if (axis >= 0) {
|
119
|
1
|
PyErr_Format(PyExc_IndexError,
|
120
|
|
"index %"NPY_INTP_FMT" is out of bounds "
|
121
|
|
"for axis %d with size %"NPY_INTP_FMT,
|
122
|
|
*index, axis, max_item);
|
123
|
|
} else {
|
124
|
1
|
PyErr_Format(PyExc_IndexError,
|
125
|
|
"index %"NPY_INTP_FMT" is out of bounds "
|
126
|
|
"for size %"NPY_INTP_FMT, *index, max_item);
|
127
|
|
}
|
128
|
|
return -1;
|
129
|
|
}
|
130
|
|
/* adjust negative indices */
|
131
|
1
|
if (*index < 0) {
|
132
|
1
|
*index += max_item;
|
133
|
|
}
|
134
|
|
return 0;
|
135
|
|
}
|
136
|
|
|
137
|
|
/*
|
138
|
|
* Returns -1 and sets an exception if *axis is an invalid axis for
|
139
|
|
* an array of dimension ndim, otherwise adjusts it in place to be
|
140
|
|
* 0 <= *axis < ndim, and returns 0.
|
141
|
|
*
|
142
|
|
* msg_prefix: borrowed reference, a string to prepend to the message
|
143
|
|
*/
|
144
|
|
static NPY_INLINE int
|
145
|
1
|
check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix)
|
146
|
|
{
|
147
|
|
/* Check that index is valid, taking into account negative indices */
|
148
|
1
|
if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) {
|
149
|
|
/*
|
150
|
|
* Load the exception type, if we don't already have it. Unfortunately
|
151
|
|
* we don't have access to npy_cache_import here
|
152
|
|
*/
|
153
|
|
static PyObject *AxisError_cls = NULL;
|
154
|
|
PyObject *exc;
|
155
|
|
|
156
|
1
|
npy_cache_import("numpy.core._exceptions", "AxisError", &AxisError_cls);
|
157
|
1
|
if (AxisError_cls == NULL) {
|
158
|
|
return -1;
|
159
|
|
}
|
160
|
|
|
161
|
|
/* Invoke the AxisError constructor */
|
162
|
1
|
exc = PyObject_CallFunction(AxisError_cls, "iiO",
|
163
|
|
*axis, ndim, msg_prefix);
|
164
|
1
|
if (exc == NULL) {
|
165
|
|
return -1;
|
166
|
|
}
|
167
|
1
|
PyErr_SetObject(AxisError_cls, exc);
|
168
|
1
|
Py_DECREF(exc);
|
169
|
|
|
170
|
|
return -1;
|
171
|
|
}
|
172
|
|
/* adjust negative indices */
|
173
|
1
|
if (*axis < 0) {
|
174
|
1
|
*axis += ndim;
|
175
|
|
}
|
176
|
|
return 0;
|
177
|
|
}
|
178
|
|
static NPY_INLINE int
|
179
|
|
check_and_adjust_axis(int *axis, int ndim)
|
180
|
|
{
|
181
|
1
|
return check_and_adjust_axis_msg(axis, ndim, Py_None);
|
182
|
|
}
|
183
|
|
|
184
|
|
/* used for some alignment checks */
|
185
|
|
#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
|
186
|
|
#define _UINT_ALIGN(type) npy_uint_alignment(sizeof(type))
|
187
|
|
/*
|
188
|
|
* Disable harmless compiler warning "4116: unnamed type definition in
|
189
|
|
* parentheses" which is caused by the _ALIGN macro.
|
190
|
|
*/
|
191
|
|
#if defined(_MSC_VER)
|
192
|
|
#pragma warning(disable:4116)
|
193
|
|
#endif
|
194
|
|
|
195
|
|
/*
|
196
|
|
* return true if pointer is aligned to 'alignment'
|
197
|
|
*/
|
198
|
|
static NPY_INLINE int
|
199
|
|
npy_is_aligned(const void * p, const npy_uintp alignment)
|
200
|
|
{
|
201
|
|
/*
|
202
|
|
* Assumes alignment is a power of two, as required by the C standard.
|
203
|
|
* Assumes cast from pointer to uintp gives a sensible representation we
|
204
|
|
* can use bitwise & on (not required by C standard, but used by glibc).
|
205
|
|
* This test is faster than a direct modulo.
|
206
|
|
* Note alignment value of 0 is allowed and returns False.
|
207
|
|
*/
|
208
|
1
|
return ((npy_uintp)(p) & ((alignment) - 1)) == 0;
|
209
|
|
}
|
210
|
|
|
211
|
|
/* Get equivalent "uint" alignment given an itemsize, for use in copy code */
|
212
|
|
static NPY_INLINE int
|
213
|
|
npy_uint_alignment(int itemsize)
|
214
|
|
{
|
215
|
1
|
npy_uintp alignment = 0; /* return value of 0 means unaligned */
|
216
|
|
|
217
|
1
|
switch(itemsize){
|
218
|
|
case 1:
|
219
|
|
return 1;
|
220
|
1
|
case 2:
|
221
|
1
|
alignment = _ALIGN(npy_uint16);
|
222
|
|
break;
|
223
|
1
|
case 4:
|
224
|
1
|
alignment = _ALIGN(npy_uint32);
|
225
|
|
break;
|
226
|
1
|
case 8:
|
227
|
1
|
alignment = _ALIGN(npy_uint64);
|
228
|
|
break;
|
229
|
1
|
case 16:
|
230
|
|
/*
|
231
|
|
* 16 byte types are copied using 2 uint64 assignments.
|
232
|
|
* See the strided copy function in lowlevel_strided_loops.c.
|
233
|
|
*/
|
234
|
1
|
alignment = _ALIGN(npy_uint64);
|
235
|
|
break;
|
236
|
|
default:
|
237
|
|
break;
|
238
|
|
}
|
239
|
|
|
240
|
1
|
return alignment;
|
241
|
|
}
|
242
|
|
|
243
|
|
/*
|
244
|
|
* memchr with stride and invert argument
|
245
|
|
* intended for small searches where a call out to libc memchr is costly.
|
246
|
|
* stride must be a multiple of size.
|
247
|
|
* compared to memchr it returns one stride past end instead of NULL if needle
|
248
|
|
* is not found.
|
249
|
|
*/
|
250
|
|
static NPY_INLINE char *
|
251
|
|
npy_memchr(char * haystack, char needle,
|
252
|
|
npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert)
|
253
|
|
{
|
254
|
1
|
char * p = haystack;
|
255
|
1
|
npy_intp subloopsize = 0;
|
256
|
|
|
257
|
|
if (!invert) {
|
258
|
|
/*
|
259
|
|
* this is usually the path to determine elements to process,
|
260
|
|
* performance less important here.
|
261
|
|
* memchr has large setup cost if 0 byte is close to start.
|
262
|
|
*/
|
263
|
1
|
while (subloopsize < size && *p != needle) {
|
264
|
1
|
subloopsize++;
|
265
|
1
|
p += stride;
|
266
|
|
}
|
267
|
|
}
|
268
|
|
else {
|
269
|
|
/* usually find elements to skip path */
|
270
|
1
|
if (NPY_CPU_HAVE_UNALIGNED_ACCESS && needle == 0 && stride == 1) {
|
271
|
|
/* iterate until last multiple of 4 */
|
272
|
1
|
char * block_end = haystack + size - (size % sizeof(unsigned int));
|
273
|
1
|
while (p < block_end) {
|
274
|
1
|
unsigned int v = *(unsigned int*)p;
|
275
|
1
|
if (v != 0) {
|
276
|
|
break;
|
277
|
|
}
|
278
|
1
|
p += sizeof(unsigned int);
|
279
|
|
}
|
280
|
|
/* handle rest */
|
281
|
1
|
subloopsize = (p - haystack);
|
282
|
|
}
|
283
|
1
|
while (subloopsize < size && *p == needle) {
|
284
|
1
|
subloopsize++;
|
285
|
1
|
p += stride;
|
286
|
|
}
|
287
|
|
}
|
288
|
|
|
289
|
1
|
*psubloopsize = subloopsize;
|
290
|
|
|
291
|
|
return p;
|
292
|
|
}
|
293
|
|
|
294
|
|
#include "ucsnarrow.h"
|
295
|
|
|
296
|
|
/*
|
297
|
|
* Make a new empty array, of the passed size, of a type that takes the
|
298
|
|
* priority of ap1 and ap2 into account.
|
299
|
|
*
|
300
|
|
* If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
|
301
|
|
* updateifcopy temporary array may be returned. If `result` is non-NULL, the
|
302
|
|
* output array to be returned (`out` if non-NULL and the newly allocated array
|
303
|
|
* otherwise) is incref'd and put to *result.
|
304
|
|
*/
|
305
|
|
NPY_NO_EXPORT PyArrayObject *
|
306
|
|
new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
|
307
|
|
int nd, npy_intp dimensions[], int typenum, PyArrayObject **result);
|
308
|
|
|
309
|
|
|
310
|
|
/*
|
311
|
|
* Used to indicate a broadcast axis, see also `npyiter_get_op_axis` in
|
312
|
|
* `nditer_constr.c`. This may be the preferred API for reduction axes
|
313
|
|
* probably. So we should consider making this public either as a macro or
|
314
|
|
* function (so that the way we flag the axis can be changed).
|
315
|
|
*/
|
316
|
|
#define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2)))
|
317
|
|
|
318
|
|
#endif
|
319
|
|
|