TRAVIS_OS_NAME=linux
<<<<<< ENV
benchmarks/ndslice/binarization.d
benchmarks/ndslice/convolution.d
benchmarks/ndslice/dot_product.d
benchmarks/ndslice/euclidean_distance.d
dub.json
examples/data/stop_words
examples/data/trndocs.dat
examples/data/words
examples/lda_hoffman_sparse.d
examples/means_of_columns.d
examples/median_filter.d
index.d
meson.build
source/mir/glas/l1.d
source/mir/glas/l2.d
source/mir/glas/package.d
source/mir/model/lda/hoffman.d
source/mir/sparse/blas/axpy.d
source/mir/sparse/blas/dot.d
source/mir/sparse/blas/gemm.d
source/mir/sparse/blas/gemv.d
source/mir/sparse/blas/package.d
source/mir/sparse/package.d
subprojects/mir-algorithm.wrap
subprojects/mir-core.wrap
subprojects/mir-linux-kernel.wrap
subprojects/mir-random.wrap
<<<<<< network
# path=./source-mir-sparse-blas-axpy.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas.axpy;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.sparse;
|import mir.series;
|
|/++
|Constant times a vector plus a vector.
|
|Params:
| x = sparse vector
| y = dense vector
| alpha = scalar
|Returns:
| `y = alpha * x + y`
|+/
|void axpy(
| CR,
| V1 : Series!(I1, T1),
| I1, T1, V2)
|(in CR alpha, V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|in
|{
28| if (x.index.length)
27| assert(x.index[$-1] < y.length);
|}
|body
|{
| import mir.internal.utility;
|
321| foreach (size_t i; 0 .. x.index.length)
| {
79| auto j = x.index[i];
79| y[j] = alpha * x.value[i] + y[j];
| }
|}
|
|///
|unittest
|{
| import mir.series;
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
|
|unittest
|{
| import mir.series;
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y.sliced);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
|
|unittest
|{
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y.slicedField);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
source/mir/sparse/blas/axpy.d is 100% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-sorting.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Note:
| The combination of
| $(SUBREF topology, pairwise) with lambda `"a <= b"` (`"a < b"`) and $(SUBREF algorithm, all) can be used
| to check if an ndslice is sorted (strictly monotonic).
| $(SUBREF topology iota) can be used to make an index.
| $(SUBREF topology map) and $(SUBREF topology zip) can be used to create Schwartzian transform.
| See also the examples in the module.
|
|
|See_also: $(SUBREF topology, flattened)
|
|`isSorted` and `isStrictlyMonotonic`
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Andrei Alexandrescu (Phobos), Ilya Yaroshenko (API, rework, Mir adoptation)
|
|Macros:
| SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|+/
|module mir.ndslice.sorting;
|
|/// Check if ndslice is sorted, or strictly monotonic.
|@safe pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.slice: sliced;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: pairwise;
|
| auto arr = [1, 1, 2].sliced;
|
| assert(arr.pairwise!"a <= b".all);
| assert(!arr.pairwise!"a < b".all);
|
| arr = [4, 3, 2, 1].sliced;
|
| assert(!arr.pairwise!"a <= b".all);
| assert(!arr.pairwise!"a < b".all);
|
| sort(arr);
|
| assert(arr.pairwise!"a <= b".all);
| assert(arr.pairwise!"a < b".all);
|}
|
|/// Create index
|version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice: sliced;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: iota, pairwise;
|
| auto arr = [4, 2, 3, 1].sliced;
|
| auto index = arr.length.iota.slice;
| index.sort!((a, b) => arr[a] < arr[b]);
|
| assert(arr[index].pairwise!"a <= b".all);
|}
|
|/// Schwartzian transform
|version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice: sliced;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: zip, map, pairwise;
|
| alias transform = (a) => (a - 3) ^^ 2;
|
| auto arr = [4, 2, 3, 1].sliced;
|
| arr.map!transform.slice.zip(arr).sort!((l, r) => l.a < r.a);
|
| assert(arr.map!transform.pairwise!"a <= b".all);
|}
|
|import mir.ndslice.slice;
|import mir.math.common: optmath;
|
|@optmath:
|
|@safe pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.topology: pairwise;
|
| auto a = [1, 2, 3].sliced;
| assert(a[0 .. 0].pairwise!"a <= b".all);
| assert(a[0 .. 1].pairwise!"a <= b".all);
| assert(a.pairwise!"a <= b".all);
| auto b = [1, 3, 2].sliced;
| assert(!b.pairwise!"a <= b".all);
|
| // ignores duplicates
| auto c = [1, 1, 2].sliced;
| assert(c.pairwise!"a <= b".all);
|}
|
|@safe pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.topology: pairwise;
|
| assert([1, 2, 3][0 .. 0].sliced.pairwise!"a < b".all);
| assert([1, 2, 3][0 .. 1].sliced.pairwise!"a < b".all);
| assert([1, 2, 3].sliced.pairwise!"a < b".all);
| assert(![1, 3, 2].sliced.pairwise!"a < b".all);
| assert(![1, 1, 2].sliced.pairwise!"a < b".all);
|}
|
|
|/++
|Sorts ndslice, array, or series.
|
|See_also: $(SUBREF topology, flattened).
|+/
|template sort(alias less = "a < b")
|{
| import mir.functional: naryFun;
| import mir.series: Series;
| static if (__traits(isSame, naryFun!less, less))
| {
|@optmath:
| /++
| Sort n-dimensional slice.
| +/
| Slice!(Iterator, N, kind) sort(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| if (false) // break safety
| {
| import mir.utility : swapStars;
| auto elem = typeof(*slice._iterator).init;
| elem = elem;
| auto l = less(elem, elem);
| }
| import mir.ndslice.topology: flattened;
| if (slice.anyEmpty)
| return slice;
| .quickSortImpl!less(slice.flattened);
| return slice;
| }
|
| /++
| Sort for arrays
| +/
| T[] sort(T)(T[] ar)
| {
| return .sort!less(ar.sliced).field;
| }
|
| /++
| Sort for one-dimensional Series.
| +/
| Series!(IndexIterator, Iterator, N, kind)
| sort(IndexIterator, Iterator, size_t N, SliceKind kind)
| (Series!(IndexIterator, Iterator, N, kind) series)
| if (N == 1)
| {
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: zip;
| with(series)
| index.zip(data).sort!((a, b) => less(a.a, b.a));
| return series;
| }
|
| /++
| Sort for n-dimensional Series.
| +/
| Series!(IndexIterator, Iterator, N, kind)
| sort(
| IndexIterator,
| Iterator,
| size_t N,
| SliceKind kind,
| SortIndexIterator,
| DataIterator,
| )
| (
| Series!(IndexIterator, Iterator, N, kind) series,
| Slice!SortIndexIterator indexBuffer,
| Slice!DataIterator dataBuffer,
| )
| {
| import mir.algorithm.iteration: each;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: iota, zip, ipack, evertPack;
|
| assert(indexBuffer.length == series.length);
| assert(dataBuffer.length == series.length);
| indexBuffer[] = indexBuffer.length.iota!(typeof(indexBuffer.front));
| series.index.zip(indexBuffer).sort!((a, b) => less(a.a, b.a));
| series.data.ipack!1.evertPack.each!((sl){
| {
| assert(sl.shape == dataBuffer.shape);
| dataBuffer[] = sl[indexBuffer];
| sl[] = dataBuffer;
| }});
| return series;
| }
| }
| else
| alias sort = .sort!(naryFun!less);
|}
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.slice;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: pairwise;
|
| int[10] arr = [7,1,3,2,9,0,5,4,8,6];
|
| auto data = arr[].sliced(arr.length);
| data.sort();
| assert(data.pairwise!"a <= b".all);
|}
|
|/// one-dimensional series
|pure version(mir_test) unittest
|{
| import mir.series;
|
| auto index = [4, 2, 1, 3, 0].sliced;
| auto data = [5.6, 3.4, 2.1, 7.8, 0.1].sliced;
| auto series = index.series(data);
| series.sort;
| assert(series.index == [0, 1, 2, 3, 4]);
| assert(series.data == [0.1, 2.1, 3.4, 7.8, 5.6]);
| /// initial index and data are the same
| assert(index.iterator is series.index.iterator);
| assert(data.iterator is series.data.iterator);
|
| foreach(obs; series)
| {
| static assert(is(typeof(obs) == Observation!(int, double)));
| }
|}
|
|/// two-dimensional series
|pure version(mir_test) unittest
|{
| import mir.series;
| import mir.ndslice.allocation: uninitSlice;
|
| auto index = [4, 2, 3, 1].sliced;
| auto data =
| [2.1, 3.4,
| 5.6, 7.8,
| 3.9, 9.0,
| 4.0, 2.0].sliced(4, 2);
| auto series = index.series(data);
|
| series.sort(
| uninitSlice!size_t(series.length), // index buffer
| uninitSlice!double(series.length), // data buffer
| );
|
| assert(series.index == [1, 2, 3, 4]);
| assert(series.data ==
| [[4.0, 2.0],
| [5.6, 7.8],
| [3.9, 9.0],
| [2.1, 3.4]]);
| /// initial index and data are the same
| assert(index.iterator is series.index.iterator);
| assert(data.iterator is series.data.iterator);
|}
|
|void quickSortImpl(alias less, Iterator)(Slice!Iterator slice) @trusted
|{
| import mir.utility : swap, swapStars;
|
| enum max_depth = 64;
| enum naive_est = 1024 / slice.Element!0.sizeof;
| enum size_t naive = 32 > naive_est ? 32 : naive_est;
| //enum size_t naive = 1;
| static assert(naive >= 1);
|
| for(;;)
| {
| auto l = slice._iterator;
| auto r = l;
| r += slice.length;
|
| static if (naive > 1)
| {
| if (slice.length <= naive || __ctfe)
| {
| auto p = r;
| --p;
| while(p != l)
| {
| --p;
| //static if (is(typeof(() nothrow
| // {
| // auto t = slice[0]; if (less(t, slice[0])) slice[0] = slice[0];
| // })))
| //{
| auto d = p;
| import mir.functional: unref;
| auto temp = unref(*d);
| auto c = d;
| ++c;
| if (less(*c, temp))
| {
| do
| {
| d[0] = *c;
| ++d;
| ++c;
| }
| while (c != r && less(*c, temp));
| d[0] = temp;
| }
| //}
| //else
| //{
| // auto d = p;
| // auto c = d;
| // ++c;
| // while (less(*c, *d))
| // {
| // swap(*d, *c);
| // d = c;
| // ++c;
| // if (c == maxJ) break;
| // }
| //}
| }
| return;
| }
| }
| else
| {
| if(slice.length <= 1)
| return;
| }
|
| // partition
| auto lessI = l;
| --r;
| auto pivotIdx = l + slice.length / 2;
| setPivot!less(slice.length, l, pivotIdx, r);
| import mir.functional: unref;
| auto pivot = unref(*pivotIdx);
| --lessI;
| auto greaterI = r;
| swapStars(pivotIdx, greaterI);
|
| outer: for (;;)
| {
| do ++lessI;
| while (less(*lessI, pivot));
| assert(lessI <= greaterI, "sort: invalid comparison function.");
| for (;;)
| {
| if (greaterI == lessI)
| break outer;
| --greaterI;
| if (!less(pivot, *greaterI))
| break;
| }
| assert(lessI <= greaterI, "sort: invalid comparison function.");
| if (lessI == greaterI)
| break;
| swapStars(lessI, greaterI);
| }
|
| swapStars(r, lessI);
|
| ptrdiff_t len = lessI - l;
| auto tail = slice[len + 1 .. $];
| slice = slice[0 .. len];
| if (tail.length > slice.length)
| swap(slice, tail);
| quickSortImpl!less(tail);
| }
|}
|
|void setPivot(alias less, Iterator)(size_t length, ref Iterator l, ref Iterator mid, ref Iterator r) @trusted
|{
| if (length < 512)
| {
| if (length >= 32)
| medianOf!less(l, mid, r);
| return;
| }
| auto quarter = length >> 2;
| auto b = mid - quarter;
| auto e = mid + quarter;
| medianOf!less(l, e, mid, b, r);
|}
|
|void medianOf(alias less, bool leanRight = false, Iterator)
| (ref Iterator a, ref Iterator b) @trusted
|{
| import mir.utility : swapStars;
|
| if (less(*b, *a)) {
| swapStars(a, b);
| }
| assert(!less(*b, *a));
|}
|
|void medianOf(alias less, bool leanRight = false, Iterator)
| (ref Iterator a, ref Iterator b, ref Iterator c) @trusted
|{
| import mir.utility : swapStars;
|
| if (less(*c, *a)) // c < a
| {
| if (less(*a, *b)) // c < a < b
| {
| swapStars(a, b);
| swapStars(a, c);
| }
| else // c < a, b <= a
| {
| swapStars(a, c);
| if (less(*b, *a)) swapStars(a, b);
| }
| }
| else // a <= c
| {
| if (less(*b, *a)) // b < a <= c
| {
| swapStars(a, b);
| }
| else // a <= c, a <= b
| {
| if (less(*c, *b)) swapStars(b, c);
| }
| }
| assert(!less(*b, *a));
| assert(!less(*c, *b));
|}
|
|void medianOf(alias less, bool leanRight = false, Iterator)
| (ref Iterator a, ref Iterator b, ref Iterator c, ref Iterator d) @trusted
|{
| import mir.utility: swapStars;
|
| static if (!leanRight)
| {
| // Eliminate the rightmost from the competition
| if (less(*d, *c)) swapStars(c, d); // c <= d
| if (less(*d, *b)) swapStars(b, d); // b <= d
| medianOf!less(a, b, c);
| }
| else
| {
| // Eliminate the leftmost from the competition
| if (less(*b, *a)) swapStars(a, b); // a <= b
| if (less(*c, *a)) swapStars(a, c); // a <= c
| medianOf!less(b, c, d);
| }
|}
|
|void medianOf(alias less, bool leanRight = false, Iterator)
| (ref Iterator a, ref Iterator b, ref Iterator c, ref Iterator d, ref Iterator e) @trusted
|{
| import mir.utility: swapStars; // Credit: Teppo Niinimäki
|
| version(unittest) scope(success)
| {
| assert(!less(*c, *a));
| assert(!less(*c, *b));
| assert(!less(*d, *c));
| assert(!less(*e, *c));
| }
|
| if (less(*c, *a)) swapStars(a, c);
| if (less(*d, *b)) swapStars(b, d);
| if (less(*d, *c))
| {
| swapStars(c, d);
| swapStars(a, b);
| }
| if (less(*e, *b)) swapStars(b, e);
| if (less(*e, *c))
| {
| swapStars(c, e);
| if (less(*c, *a)) swapStars(a, c);
| }
| else
| {
| if (less(*c, *b)) swapStars(b, c);
| }
|}
|
|
|/++
|Returns: `true` if a sorted array contains the value.
|
|Params:
| test = strict ordering symmetric predicate
|
|For non-symmetric predicates please use a structure with two `opCall`s or an alias of two global functions,
|that correponds to `(array[i], value)` and `(value, array[i])` cases.
|
|See_also: $(LREF transitionIndex).
|+/
|template assumeSortedContains(alias test = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!test, test))
| {
|@optmath:
| /++
| Params:
| slice = sorted one-dimensional slice or array.
| v = value to test with. It is passed to second argument.
| +/
| bool assumeSortedContains(Iterator, SliceKind kind, V)
| (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v)
| {
| auto ti = transitionIndex!test(slice, v);
| return ti < slice.length && !test(v, slice[ti]);
| }
|
| /// ditto
| bool assumeSortedContains(T, V)(scope T[] ar, auto ref scope const V v)
| {
| return .assumeSortedContains!test(ar.sliced, v);
| }
| }
| else
| alias assumeSortedContains = .assumeSortedContains!(naryFun!test);
|}
|
|/++
|Returns: the smallest index of a sorted array such
| that the index corresponds to the arrays element at the index according to the predicate
| and `-1` if the array doesn't contain corresponding element.
|
|Params:
| test = strict ordering symmetric predicate.
|
|For non-symmetric predicates please use a structure with two `opCall`s or an alias of two global functions,
|that correponds to `(array[i], value)` and `(value, array[i])` cases.
|
|See_also: $(LREF transitionIndex).
|+/
|template assumeSortedEqualIndex(alias test = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!test, test))
| {
|@optmath:
| /++
| Params:
| slice = sorted one-dimensional slice or array.
| v = value to test with. It is passed to second argument.
| +/
| sizediff_t assumeSortedEqualIndex(Iterator, SliceKind kind, V)
| (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v)
| {
| auto ti = transitionIndex!test(slice, v);
| return ti < slice.length && !test(v, slice[ti]) ? ti : -1;
| }
|
| /// ditto
| sizediff_t assumeSortedEqualIndex(T, V)(scope T[] ar, auto ref scope const V v)
| {
| return .assumeSortedEqualIndex!test(ar.sliced, v);
| }
| }
| else
| alias assumeSortedEqualIndex = .assumeSortedEqualIndex!(naryFun!test);
|}
|
|///
|version(mir_test)
|@safe pure unittest
|{
| // sorted: a < b
| auto a = [0, 1, 2, 3, 4, 6];
|
| assert(a.assumeSortedEqualIndex(2) == 2);
| assert(a.assumeSortedEqualIndex(5) == -1);
|
| // <= non strict predicates doesn't work
| assert(a.assumeSortedEqualIndex!"a <= b"(2) == -1);
|}
|
|/++
|Computes transition index using binary search.
|It is low-level API for lower and upper bounds of a sorted array.
|
|Params:
| test = ordering predicate for (`(array[i], value)`) pairs.
|
|See_also: $(SUBREF topology, assumeSortedEqualIndex).
|+/
|template transitionIndex(alias test = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!test, test))
| {
|@optmath:
| /++
| Params:
| slice = sorted one-dimensional slice or array.
| v = value to test with. It is passed to second argument.
| +/
| size_t transitionIndex(Iterator, SliceKind kind, V)
| (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v)
| {
| size_t first = 0, count = slice.length;
| while (count > 0)
| {
| immutable step = count / 2, it = first + step;
| if (test(slice[it], v))
| {
| first = it + 1;
| count -= step + 1;
| }
| else
| {
| count = step;
| }
| }
| return first;
| }
|
| /// ditto
| size_t transitionIndex(T, V)(scope T[] ar, auto ref scope const V v)
| {
| return .transitionIndex!test(ar.sliced, v);
| }
|
| }
| else
| alias transitionIndex = .transitionIndex!(naryFun!test);
|}
|
|///
|version(mir_test)
|@safe pure unittest
|{
| // sorted: a < b
| auto a = [0, 1, 2, 3, 4, 6];
|
| auto i = a.transitionIndex(2);
| assert(i == 2);
| auto lowerBound = a[0 .. i];
|
| auto j = a.transitionIndex!"a <= b"(2);
| assert(j == 3);
| auto upperBound = a[j .. $];
|
| assert(a.transitionIndex(a[$-1]) == a.length - 1);
| assert(a.transitionIndex!"a <= b"(a[$-1]) == a.length);
|}
|
|/++
|Computes an index for `r` based on the comparison `less`. The
|index is a sorted array of indices into the original
|range. This technique is similar to sorting, but it is more flexible
|because (1) it allows "sorting" of immutable collections, (2) allows
|binary search even if the original collection does not offer random
|access, (3) allows multiple indices, each on a different predicate,
|and (4) may be faster when dealing with large objects. However, using
|an index may also be slower under certain circumstances due to the
|extra indirection, and is always larger than a sorting-based solution
|because it needs space for the index in addition to the original
|collection. The complexity is the same as `sort`'s.
|Params:
| less = The comparison to use.
| r = The slice/array to index.
|Returns: Index slice/array.
|+/
|Slice!(I*) makeIndex(I = size_t, alias less = "a < b", Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) r)
|{
| import mir.functional: naryFun;
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
| return r
| .length
| .iota!I
| .slice
| .sort!((a, b) => naryFun!less(r[a], r[b]));
|}
|
|///
|I[] makeIndex(I = size_t, alias less = "a < b", T)(scope T[] r)
|{
| return .makeIndex!(I, less)(r.sliced).field;
|}
|
|///
|version(mir_test)
|@system unittest
|{
| import mir.algorithm.iteration: all;
| import mir.ndslice.topology: indexed, pairwise;
|
| immutable arr = [ 2, 3, 1, 5, 0 ];
| auto index = arr.makeIndex;
|
| assert(arr.indexed(index).pairwise!"a < b".all);
|}
|
|/++
|Partitions `slice` around `pivot` using comparison function `less`, algorithm
|akin to $(LINK2 https://en.wikipedia.org/wiki/Quicksort#Hoare_partition_scheme,
|Hoare partition). Specifically, permutes elements of `slice` and returns
|an index `k < slice.length` such that:
|
|$(UL
|
|$(LI `slice[pivot]` is swapped to `slice[k]`)
|
|
|$(LI All elements `e` in subrange `slice[0 .. k]` satisfy `!less(slice[k], e)`
|(i.e. `slice[k]` is greater than or equal to each element to its left according
|to predicate `less`))
|
|$(LI All elements `e` in subrange `slice[k .. $]` satisfy
|`!less(e, slice[k])` (i.e. `slice[k]` is less than or equal to each element to
|its right according to predicate `less`)))
|
|If `slice` contains equivalent elements, multiple permutations of `slice` may
|satisfy these constraints. In such cases, `pivotPartition` attempts to
|distribute equivalent elements fairly to the left and right of `k` such that `k`
|stays close to `slice.length / 2`.
|
|Params:
| less = The predicate used for comparison
|
|Returns:
| The new position of the pivot
|
|See_Also:
| $(HTTP jgrcs.info/index.php/jgrcs/article/view/142, Engineering of a Quicksort
| Partitioning Algorithm), D. Abhyankar, Journal of Global Research in Computer
| Science, February 2011. $(HTTPS youtube.com/watch?v=AxnotgLql0k, ACCU 2016
| Keynote), Andrei Alexandrescu.
|+/
|@trusted
|template pivotPartition(alias less = "a < b")
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!less, less))
| {
| /++
| Params:
| slice = slice being partitioned
| pivot = The index of the pivot for partitioning, must be less than
| `slice.length` or `0` if `slice.length` is `0`
| +/
| size_t pivotPartition(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice,
| size_t pivot)
| {
| assert(pivot < slice.elementCount || slice.elementCount == 0 && pivot == 0, "pivotPartition: pivot must be less than the elementCount of the slice or the slice must be empty and pivot zero");
|
| if (slice.elementCount <= 1) return 0;
|
| import mir.ndslice.topology: flattened;
|
| auto flattenedSlice = slice.flattened;
| auto frontI = flattenedSlice._iterator;
| auto lastI = frontI + flattenedSlice.length - 1;
| auto pivotI = frontI + pivot;
| pivotPartitionImpl!less(frontI, lastI, pivotI);
| return pivotI - frontI;
| }
| } else {
| alias pivotPartition = .pivotPartition!(naryFun!less);
| }
|}
|
|/// pivotPartition with 1-dimensional Slice
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: all;
|
| auto x = [5, 3, 2, 6, 4, 1, 3, 7].sliced;
| size_t pivot = pivotPartition(x, x.length / 2);
|
| assert(x[0 .. pivot].all!(a => a <= x[pivot]));
| assert(x[pivot .. $].all!(a => a >= x[pivot]));
|}
|
|/// pivotPartition with 2-dimensional Slice
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.topology: flattened;
| import mir.algorithm.iteration: all;
|
| auto x = [
| [5, 3, 2, 6],
| [4, 1, 3, 7]
| ].fuse;
|
| size_t pivot = pivotPartition(x, x.elementCount / 2);
|
| auto xFlattened = x.flattened;
| assert(xFlattened[0 .. pivot].all!(a => a <= xFlattened[pivot]));
| assert(xFlattened[pivot .. $].all!(a => a >= xFlattened[pivot]));
|}
|
|version(mir_test)
|@safe
|unittest
|{
| void test(alias less)()
| {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: all, equal;
|
| Slice!(int*) x;
| size_t pivot;
|
| x = [-9, -4, -2, -2, 9].sliced;
| pivot = pivotPartition!less(x, x.length / 2);
|
| assert(x[0 .. pivot].all!(a => a <= x[pivot]));
| assert(x[pivot .. $].all!(a => a >= x[pivot]));
|
| x = [9, 2, 8, -5, 5, 4, -8, -4, 9].sliced;
| pivot = pivotPartition!less(x, x.length / 2);
|
| assert(x[0 .. pivot].all!(a => a <= x[pivot]));
| assert(x[pivot .. $].all!(a => a >= x[pivot]));
|
| x = [ 42 ].sliced;
| pivot = pivotPartition!less(x, x.length / 2);
|
| assert(pivot == 0);
| assert(x.equal([ 42 ]));
|
| x = [ 43, 42 ].sliced;
| pivot = pivotPartition!less(x, 0);
| assert(pivot == 1);
| assert(x.equal([ 42, 43 ]));
|
| x = [ 43, 42 ].sliced;
| pivot = pivotPartition!less(x, 1);
|
| assert(pivot == 0);
| assert(x.equal([ 42, 43 ]));
|
| x = [ 42, 42 ].sliced;
| pivot = pivotPartition!less(x, 0);
|
| assert(pivot == 0 || pivot == 1);
| assert(x.equal([ 42, 42 ]));
|
| pivot = pivotPartition!less(x, 1);
|
| assert(pivot == 0 || pivot == 1);
| assert(x.equal([ 42, 42 ]));
| }
| test!"a < b";
| static bool myLess(int a, int b)
| {
| static bool bogus;
| if (bogus) throw new Exception(""); // just to make it no-nothrow
| return a < b;
| }
| test!myLess;
|}
|
|@trusted
|template pivotPartitionImpl(alias less)
|{
| void pivotPartitionImpl(Iterator)
| (ref Iterator frontI,
| ref Iterator lastI,
| ref Iterator pivotI)
| {
| assert(pivotI <= lastI && pivotI >= frontI, "pivotPartition: pivot must be less than the length of slice or slice must be empty and pivot zero");
|
| if (frontI == lastI) return;
|
| import mir.utility: swapStars;
|
| // Pivot at the front
| swapStars(pivotI, frontI);
|
| // Fork implementation depending on nothrow copy, assignment, and
| // comparison. If all of these are nothrow, use the specialized
| // implementation discussed at
| // https://youtube.com/watch?v=AxnotgLql0k.
| static if (is(typeof(
| () nothrow { auto x = frontI; x = frontI; return less(*x, *x); }
| )))
| {
| // Plant the pivot in the end as well as a sentinel
| auto loI = frontI;
| auto hiI = lastI;
| auto save = *hiI;
| *hiI = *frontI; // Vacancy is in r[$ - 1] now
|
| // Start process
| for (;;)
| {
| // Loop invariant
| version(mir_test)
| {
| // this used to import std.algorithm.all, but we want to
| // save imports when unittests are enabled if possible.
| size_t len = lastI - frontI + 1;
| foreach (x; 0 .. (loI - frontI))
| assert(!less(*frontI, frontI[x]), "pivotPartition: *frontI must not be less than frontI[x]");
| foreach (x; (hiI - frontI + 1) .. len)
| assert(!less(frontI[x], *frontI), "pivotPartition: frontI[x] must not be less than *frontI");
| }
| do ++loI; while (less(*loI, *frontI));
| *(hiI) = *(loI);
| // Vacancy is now in slice[lo]
| do --hiI; while (less(*frontI, *hiI));
| if (loI >= hiI) break;
| *(loI) = *(hiI);
| // Vacancy is not in slice[hi]
| }
| // Fixup
| assert(loI - hiI <= 2, "pivotPartition: Following compare not possible");
| assert(!less(*frontI, *hiI), "pivotPartition: *hiI must not be less than *frontI");
| if (loI - hiI == 2)
| {
| assert(!less(hiI[1], *frontI), "pivotPartition: *(hiI + 1) must not be less than *frontI");
| *(loI) = hiI[1];
| --loI;
| }
| *loI = save;
| if (less(*frontI, save)) --loI;
| assert(!less(*frontI, *loI), "pivotPartition: *frontI must not be less than *loI");
| } else {
| auto loI = frontI;
| ++loI;
| auto hiI = lastI;
|
| loop: for (;; loI++, hiI--)
| {
| for (;; ++loI)
| {
| if (loI > hiI) break loop;
| if (!less(*loI, *frontI)) break;
| }
| // found the left bound: !less(*loI, *frontI)
| assert(loI <= hiI, "pivotPartition: loI must be less or equal than hiI");
| for (;; --hiI)
| {
| if (loI >= hiI) break loop;
| if (!less(*frontI, *hiI)) break;
| }
| // found the right bound: !less(*frontI, *hiI), swap & make progress
| assert(!less(*loI, *hiI), "pivotPartition: *lowI must not be less than *hiI");
| swapStars(loI, hiI);
| }
| --loI;
| }
|
| swapStars(loI, frontI);
| pivotI = loI;
| }
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest {
| import mir.ndslice.sorting: partitionAt;
| import mir.ndslice.allocation: rcslice;
| auto x = rcslice!double(4);
| x[0] = 3;
| x[1] = 2;
| x[2] = 1;
| x[3] = 0;
| partitionAt!("a > b")(x, 2);
|}
|
|
|version(mir_test)
|@trusted pure nothrow
|unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: all;
|
| auto x = [5, 3, 2, 6, 4, 1, 3, 7].sliced;
| auto frontI = x._iterator;
| auto lastI = x._iterator + x.length - 1;
| auto pivotI = frontI + x.length / 2;
| alias less = (a, b) => (a < b);
| pivotPartitionImpl!less(frontI, lastI, pivotI);
| size_t pivot = pivotI - frontI;
|
| assert(x[0 .. pivot].all!(a => a <= x[pivot]));
| assert(x[pivot .. $].all!(a => a >= x[pivot]));
|}
|
|/++
|Partitions `slice`, such that all elements `e1` from `slice[0]` to `slice[nth]`
|satisfy `!less(slice[nth], e1)`, and all elements `e2` from `slice[nth]` to
|`slice[slice.length]` satisfy `!less(e2, slice[nth])`. This effectively reorders
|`slice` such that `slice[nth]` refers to the element that would fall there if
|the range were fully sorted. Performs an expected $(BIGOH slice.length)
|evaluations of `less` and `swap`, with a worst case of $(BIGOH slice.length^^2).
|
|This function implements the [Fast, Deterministic Selection](https://erdani.com/research/sea2017.pdf)
|algorithm that is implemented in the [`topN`](https://dlang.org/library/std/algorithm/sorting/top_n.html)
|function in D's standard library (as of version `2.092.0`).
|
|Params:
| less = The predicate to sort by.
|
|See_Also:
| $(LREF pivotPartition), https://erdani.com/research/sea2017.pdf
|
|+/
|template partitionAt(alias less = "a < b")
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!less, less))
| {
| /++
| Params:
| slice = n-dimensional slice
| nth = The index of the element that should be in sorted position after the
| function is finished.
| +/
| void partitionAt(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, size_t nth) @trusted nothrow @nogc
| {
| import mir.qualifier: lightScope;
| import core.lifetime: move;
| import mir.ndslice.topology: flattened;
|
| assert(slice.elementCount > 0, "partitionAt: slice must have elementCount greater than 0");
| assert(nth >= 0, "partitionAt: nth must be greater than or equal to zero");
| assert(nth < slice.elementCount, "partitionAt: nth must be less than the elementCount of the slice");
|
| bool useSampling = true;
| auto flattenedSlice = slice.move.flattened;
| auto frontI = flattenedSlice._iterator.lightScope;
| auto lastI = frontI + (flattenedSlice.length - 1);
| partitionAtImpl!less(frontI, lastI, nth, useSampling);
| }
| }
| else
| alias partitionAt = .partitionAt!(naryFun!less);
|}
|
|/// Partition 1-dimensional slice at nth
|version(mir_test)
|@safe pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 2;
| auto x = [3, 1, 5, 2, 0].sliced;
| x.partitionAt(nth);
| assert(x[nth] == 2);
|}
|
|/// Partition 2-dimensional slice
|version(mir_test)
|@safe pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 4;
| auto x = [3, 1, 5, 2, 0, 7].sliced(3, 2);
| x.partitionAt(nth);
| assert(x[2, 0] == 5);
|}
|
|/// Can supply alternate ordering function
|version(mir_test)
|@safe pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 2;
| auto x = [3, 1, 5, 2, 0].sliced;
| x.partitionAt!("a > b")(nth);
| assert(x[nth] == 2);
|}
|
|version(unittest) {
| template checkTopNAll(alias less = "a < b")
| {
| import mir.functional: naryFun;
| import mir.ndslice.slice: SliceKind, Slice;
|
| static if (__traits(isSame, naryFun!less, less))
| {
| @safe pure nothrow
| static bool checkTopNAll
| (Iterator, SliceKind kind)(
| Slice!(Iterator, 1, kind) x)
| {
| auto x_sorted = x.dup;
| x_sorted.sort!less;
|
| bool result = true;
|
| foreach (nth; 0 .. x.length)
| {
| auto x_i = x.dup;
| x_i.partitionAt!less(nth);
| if (x_i[nth] != x_sorted[nth]) {
| result = false;
| break;
| }
| }
| return result;
| }
| } else {
| alias checkTopNAll = .checkTopNAll!(naryFun!less);
| }
| }
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| assert(checkTopNAll([2, 2].sliced));
|
| assert(checkTopNAll([3, 1, 5, 2, 0].sliced));
| assert(checkTopNAll([3, 1, 5, 0, 2].sliced));
| assert(checkTopNAll([0, 0, 4, 3, 3].sliced));
| assert(checkTopNAll([5, 1, 5, 1, 5].sliced));
| assert(checkTopNAll([2, 2, 0, 0, 0].sliced));
|
| assert(checkTopNAll([ 2, 12, 10, 8, 1, 20, 19, 1, 2, 7].sliced));
| assert(checkTopNAll([ 4, 18, 16, 0, 15, 6, 2, 17, 10, 16].sliced));
| assert(checkTopNAll([ 7, 5, 9, 4, 4, 2, 12, 20, 15, 15].sliced));
|
| assert(checkTopNAll([17, 87, 58, 50, 34, 98, 25, 77, 88, 79].sliced));
|
| assert(checkTopNAll([ 6, 7, 10, 25, 5, 10, 9, 0, 2, 15, 7, 9, 11, 8, 13, 18, 17, 13, 25, 22].sliced));
| assert(checkTopNAll([21, 3, 11, 22, 24, 12, 14, 12, 15, 15, 1, 3, 12, 15, 25, 19, 9, 16, 16, 19].sliced));
| assert(checkTopNAll([22, 6, 18, 0, 1, 8, 13, 13, 16, 19, 23, 17, 4, 6, 12, 24, 15, 20, 11, 17].sliced));
| assert(checkTopNAll([19, 23, 14, 5, 12, 3, 13, 7, 25, 25, 24, 9, 21, 25, 12, 22, 15, 22, 7, 11].sliced));
| assert(checkTopNAll([ 0, 2, 7, 16, 2, 20, 1, 11, 17, 5, 22, 17, 25, 13, 14, 5, 22, 21, 24, 14].sliced));
|}
|
|private @trusted pure nothrow @nogc
|void partitionAtImpl(alias less, Iterator)(
| Iterator loI,
| Iterator hiI,
| size_t n,
| bool useSampling)
|{
| assert(loI <= hiI, "partitionAtImpl: frontI must be less than or equal to lastI");
|
| import mir.utility: swapStars;
| import mir.functional: reverseArgs;
|
| Iterator pivotI;
| size_t len;
|
| for (;;) {
| len = hiI - loI + 1;
|
| if (len <= 1) {
| break;
| }
|
| if (n == 0) {
| pivotI = loI;
| foreach (i; 1 .. len) {
| if (less(loI[i], *pivotI)) {
| pivotI = loI + i;
| }
| }
| swapStars(loI + n, pivotI);
| break;
| }
|
| if (n + 1 == len) {
| pivotI = loI;
| foreach (i; 1 .. len) {
| if (reverseArgs!less(loI[i], *pivotI)) {
| pivotI = loI + i;
| }
| }
| swapStars(loI + n, pivotI);
| break;
| }
|
| if (len <= 12) {
| pivotI = loI + len / 2;
| pivotPartitionImpl!less(loI, hiI, pivotI);
| } else if (n * 16 <= (len - 1) * 7) {
| pivotI = partitionAtPartitionOffMedian!(less, false)(loI, hiI, n, useSampling);
| // Quality check
| if (useSampling)
| {
| auto pivot = pivotI - loI;
| if (pivot < n)
| {
| if (pivot * 4 < len)
| {
| useSampling = false;
| }
| }
| else if ((len - pivot) * 8 < len * 3)
| {
| useSampling = false;
| }
| }
| } else if (n * 16 >= (len - 1) * 9) {
| pivotI = partitionAtPartitionOffMedian!(less, true)(loI, hiI, n, useSampling);
| // Quality check
| if (useSampling)
| {
| auto pivot = pivotI - loI;
| if (pivot < n)
| {
| if (pivot * 8 < len * 3)
| {
| useSampling = false;
| }
| }
| else if ((len - pivot) * 4 < len)
| {
| useSampling = false;
| }
| }
| } else {
| pivotI = partitionAtPartition!less(loI, hiI, n, useSampling);
| // Quality check
| if (useSampling) {
| auto pivot = pivotI - loI;
| if (pivot * 9 < len * 2 || pivot * 9 > len * 7)
| {
| // Failed - abort sampling going forward
| useSampling = false;
| }
| }
| }
|
| if (n < (pivotI - loI)) {
| hiI = pivotI - 1;
| } else if (n > (pivotI - loI)) {
| n -= (pivotI - loI + 1);
| loI = pivotI;
| ++loI;
| } else {
| break;
| }
| }
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 2;
| auto x = [3, 1, 5, 2, 0].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, 1, true);
| assert(x[nth] == 2);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 4;
| auto x = [3, 1, 5, 2, 0, 7].sliced(3, 2);
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[2, 0] == 5);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 1;
| auto x = [0, 0, 4, 3, 3].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 0);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 2;
| auto x = [0, 0, 4, 3, 3].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 3);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 3;
| auto x = [0, 0, 4, 3, 3].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 3);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 4;
| auto x = [ 2, 12, 10, 8, 1, 20, 19, 1, 2, 7].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 7);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 5;
| auto x = [ 2, 12, 10, 8, 1, 20, 19, 1, 2, 7].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 8);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| size_t nth = 6;
| auto x = [ 2, 12, 10, 8, 1, 20, 19, 1, 2, 7].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.elementCount - 1;
| partitionAtImpl!((a, b) => (a < b))(frontI, lastI, nth, true);
| assert(x[nth] == 10);
|}
|
|private @trusted pure nothrow @nogc
|Iterator partitionAtPartition(alias less, Iterator)(
| ref Iterator frontI,
| ref Iterator lastI,
| size_t n,
| bool useSampling)
|{
| size_t len = lastI - frontI + 1;
|
| assert(len >= 9 && n < len, "partitionAtImpl: length must be longer than 9 and n must be less than r.length");
|
| size_t ninth = len / 9;
| size_t pivot = ninth / 2;
| // Position subrange r[loI .. hiI] to have length equal to ninth and its upper
| // median r[loI .. hiI][$ / 2] in exactly the same place as the upper median
| // of the entire range r[$ / 2]. This is to improve behavior for searching
| // the median in already sorted ranges.
| auto loI = frontI;
| loI += len / 2 - pivot;
| auto hiI = loI;
| hiI += ninth;
|
| // We have either one straggler on the left, one on the right, or none.
| assert(loI - frontI <= lastI - hiI + 1 || lastI - hiI <= loI - frontI + 1, "partitionAtPartition: straggler check failed for loI, len, hiI");
| assert(loI - frontI >= ninth * 4, "partitionAtPartition: loI - frontI >= ninth * 4");
| assert(lastI - hiI >= ninth * 4, "partitionAtPartition: lastI - hiI >= ninth * 4");
|
| // Partition in groups of 3, and the mid tertile again in groups of 3
| if (!useSampling) {
| auto loI_ = loI;
| loI_ -= ninth;
| auto hiI_ = hiI;
| hiI_ += ninth;
| p3!(less, Iterator)(frontI, lastI, loI_, hiI_);
| }
| p3!(less, Iterator)(frontI, lastI, loI, hiI);
|
| // Get the median of medians of medians
| // Map the full interval of n to the full interval of the ninth
| pivot = (n * (ninth - 1)) / (len - 1);
| if (hiI > loI) {
| auto hiI_minus = hiI;
| --hiI_minus;
| partitionAtImpl!less(loI, hiI_minus, pivot, useSampling);
| }
|
| auto pivotI = loI;
| pivotI += pivot;
|
| return expandPartition!less(frontI, lastI, loI, pivotI, hiI);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
|
| auto x = [ 6, 7, 10, 25, 5, 10, 9, 0, 2, 15, 7, 9, 11, 8, 13, 18, 17, 13, 25, 22].sliced;
| auto x_sort = x.dup;
| x_sort = x_sort.sort;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| size_t n = x.length / 2;
| partitionAtPartition!((a, b) => (a < b))(frontI, lastI, n, true);
| assert(x[n - 1] == x_sort[n - 1]);
|}
|
|private @trusted pure nothrow @nogc
|Iterator partitionAtPartitionOffMedian(alias less, bool leanRight, Iterator)(
| ref Iterator frontI,
| ref Iterator lastI,
| size_t n,
| bool useSampling)
|{
| size_t len = lastI - frontI + 1;
|
| assert(len >= 12, "partitionAtPartitionOffMedian: len must be greater than 11");
| assert(n < len, "partitionAtPartitionOffMedian: n must be less than len");
| auto _4 = len / 4;
| auto leftLimitI = frontI;
| static if (leanRight)
| leftLimitI += 2 * _4;
| else
| leftLimitI += _4;
| // Partition in groups of 4, and the left quartile again in groups of 3
| if (!useSampling)
| {
| auto leftLimit_plus_4 = leftLimitI;
| leftLimit_plus_4 += _4;
| p4!(less, leanRight)(frontI, lastI, leftLimitI, leftLimit_plus_4);
| }
| auto _12 = _4 / 3;
| auto loI = leftLimitI;
| loI += _12;
| auto hiI = loI;
| hiI += _12;
| p3!less(frontI, lastI, loI, hiI);
|
| // Get the median of medians of medians
| // Map the full interval of n to the full interval of the ninth
| auto pivot = (n * (_12 - 1)) / (len - 1);
| if (hiI > loI) {
| auto hiI_minus = hiI;
| --hiI_minus;
| partitionAtImpl!less(loI, hiI_minus, pivot, useSampling);
| }
| auto pivotI = loI;
| pivotI += pivot;
| return expandPartition!less(frontI, lastI, loI, pivotI, hiI);
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: equal;
|
| auto x = [ 6, 7, 10, 25, 5, 10, 9, 0, 2, 15, 7, 9, 11, 8, 13, 18, 17, 13, 25, 22].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| partitionAtPartitionOffMedian!((a, b) => (a < b), false)(frontI, lastI, 5, true);
| assert(x.equal([6, 7, 8, 9, 5, 0, 2, 7, 9, 15, 10, 25, 11, 10, 13, 18, 17, 13, 25, 22]));
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: equal;
|
| auto x = [ 6, 7, 10, 25, 5, 10, 9, 0, 2, 15, 7, 9, 11, 8, 13, 18, 17, 13, 25, 22].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| partitionAtPartitionOffMedian!((a, b) => (a < b), true)(frontI, lastI, 15, true);
| assert(x.equal([6, 7, 8, 7, 5, 2, 9, 0, 9, 15, 25, 10, 11, 10, 13, 18, 17, 13, 25, 22]));
|}
|
|private @trusted
|void p3(alias less, Iterator)(
| Iterator frontI,
| Iterator lastI,
| Iterator loI,
| Iterator hiI)
|{
| assert(loI <= hiI && hiI <= lastI, "p3: loI must be less than or equal to hiI and hiI must be less than or equal to lastI");
| immutable diffI = hiI - loI;
| Iterator lo_loI;
| Iterator hi_loI;
| for (; loI < hiI; ++loI)
| {
| lo_loI = loI;
| lo_loI -= diffI;
| hi_loI = loI;
| hi_loI += diffI;
| assert(lo_loI >= frontI, "p3: lo_loI must be greater than or equal to frontI");
| assert(hi_loI <= lastI, "p3: hi_loI must be less than or equal to lastI");
| medianOf!less(lo_loI, loI, hi_loI);
| }
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: equal;
|
| auto x = [3, 4, 0, 5, 2, 1].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| auto loI = frontI + 2;
| auto hiI = frontI + 4;
| p3!((a, b) => (a < b))(frontI, lastI, loI, hiI);
| assert(x.equal([0, 1, 2, 4, 3, 5]));
|}
|
|private @trusted
|template p4(alias less, bool leanRight)
|{
| void p4(Iterator)(
| Iterator frontI,
| Iterator lastI,
| Iterator loI,
| Iterator hiI)
| {
| assert(loI <= hiI && hiI <= lastI, "p4: loI must be less than or equal to hiI and hiI must be less than or equal to lastI");
|
| immutable diffI = hiI - loI;
| immutable diffI2 = diffI * 2;
|
| Iterator lo_loI;
| Iterator hi_loI;
|
| static if (leanRight)
| Iterator lo2_loI;
| else
| Iterator hi2_loI;
|
| for (; loI < hiI; ++loI)
| {
| lo_loI = loI - diffI;
| hi_loI = loI + diffI;
|
| assert(lo_loI >= frontI, "p4: lo_loI must be greater than or equal to frontI");
| assert(hi_loI <= lastI, "p4: hi_loI must be less than or equal to lastI");
|
| static if (leanRight) {
| lo2_loI = loI - diffI2;
| assert(lo2_loI >= frontI, "lo2_loI must be greater than or equal to frontI");
| medianOf!(less, leanRight)(lo2_loI, lo_loI, loI, hi_loI);
| } else {
| hi2_loI = loI + diffI2;
| assert(hi2_loI <= lastI, "hi2_loI must be less than or equal to lastI");
| medianOf!(less, leanRight)(lo_loI, loI, hi_loI, hi2_loI);
| }
| }
| }
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: equal;
|
| auto x = [3, 4, 0, 7, 2, 6, 5, 1, 4].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| auto loI = frontI + 3;
| auto hiI = frontI + 5;
| p4!((a, b) => (a < b), false)(frontI, lastI, loI, hiI);
| assert(x.equal([3, 1, 0, 4, 2, 6, 4, 7, 5]));
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest {
| import mir.ndslice.slice: sliced;
| import mir.algorithm.iteration: equal;
|
| auto x = [3, 4, 0, 8, 2, 7, 5, 1, 4, 3].sliced;
| auto frontI = x._iterator;
| auto lastI = frontI + x.length - 1;
| auto loI = frontI + 4;
| auto hiI = frontI + 6;
| p4!((a, b) => (a < b), true)(frontI, lastI, loI, hiI);
| assert(x.equal([0, 4, 2, 1, 3, 7, 5, 8, 4, 3]));
|}
|
|private @trusted
|template expandPartition(alias less)
|{
| Iterator expandPartition(Iterator)(
| ref Iterator frontI,
| ref Iterator lastI,
| ref Iterator loI,
| ref Iterator pivotI,
| ref Iterator hiI)
| {
| import mir.algorithm.iteration: all;
|
| assert(frontI <= loI, "expandPartition: frontI must be less than or equal to loI");
| assert(loI <= pivotI, "expandPartition: loI must be less than or equal pivotI");
| assert(pivotI < hiI, "expandPartition: pivotI must be less than hiI");
| assert(hiI <= lastI, "expandPartition: hiI must be less than or equal to lastI");
|
| foreach(x; loI .. (pivotI + 1))
| assert(!less(*pivotI, *x), "expandPartition: loI .. (pivotI + 1) failed test");
| foreach(x; (pivotI + 1) .. hiI)
| assert(!less(*x, *pivotI), "expandPartition: (pivotI + 1) .. hiI failed test");
|
| import mir.utility: swapStars;
| import mir.algorithm.iteration: all;
| // We work with closed intervals!
| --hiI;
|
| auto leftI = frontI;
| auto rightI = lastI;
| loop: for (;; ++leftI, --rightI)
| {
| for (;; ++leftI)
| {
| if (leftI == loI) break loop;
| if (!less(*leftI, *pivotI)) break;
| }
| for (;; --rightI)
| {
| if (rightI == hiI) break loop;
| if (!less(*pivotI, *rightI)) break;
| }
| swapStars(leftI, rightI);
| }
|
| foreach(x; loI .. (pivotI + 1))
| assert(!less(*pivotI, *x), "expandPartition: loI .. (pivotI + 1) failed less than test");
| foreach(x; (pivotI + 1) .. (hiI + 1))
| assert(!less(*x, *pivotI), "expandPartition: (pivotI + 1) .. (hiI + 1) failed less than test");
| foreach(x; frontI .. leftI)
| assert(!less(*pivotI, *x), "expandPartition: frontI .. leftI failed less than test");
| foreach(x; (rightI + 1) .. (lastI + 1))
| assert(!less(*x, *pivotI), "expandPartition: (rightI + 1) .. (lastI + 1) failed less than test");
|
| auto oldPivotI = pivotI;
|
| if (leftI < loI)
| {
| // First loop: spend r[loI .. pivot]
| for (; loI < pivotI; ++leftI)
| {
| if (leftI == loI) goto done;
| if (!less(*oldPivotI, *leftI)) continue;
| --pivotI;
| assert(!less(*oldPivotI, *pivotI), "expandPartition: less check failed");
| swapStars(leftI, pivotI);
| }
| // Second loop: make leftI and pivot meet
| for (;; ++leftI)
| {
| if (leftI == pivotI) goto done;
| if (!less(*oldPivotI, *leftI)) continue;
| for (;;)
| {
| if (leftI == pivotI) goto done;
| --pivotI;
| if (less(*pivotI, *oldPivotI))
| {
| swapStars(leftI, pivotI);
| break;
| }
| }
| }
| }
|
| // First loop: spend r[lo .. pivot]
| for (; hiI != pivotI; --rightI)
| {
| if (rightI == hiI) goto done;
| if (!less(*rightI, *oldPivotI)) continue;
| ++pivotI;
| assert(!less(*pivotI, *oldPivotI), "expandPartition: less check failed");
| swapStars(rightI, pivotI);
| }
| // Second loop: make leftI and pivotI meet
| for (; rightI > pivotI; --rightI)
| {
| if (!less(*rightI, *oldPivotI)) continue;
| while (rightI > pivotI)
| {
| ++pivotI;
| if (less(*oldPivotI, *pivotI))
| {
| swapStars(rightI, pivotI);
| break;
| }
| }
| }
|
| done:
| swapStars(oldPivotI, pivotI);
|
|
| foreach(x; frontI .. (pivotI + 1))
| assert(!less(*pivotI, *x), "expandPartition: frontI .. (pivotI + 1) failed test");
| foreach(x; (pivotI + 1) .. (lastI + 1))
| assert(!less(*x, *pivotI), "expandPartition: (pivotI + 1) .. (lastI + 1) failed test");
| return pivotI;
| }
|}
|
|version(mir_test)
|@trusted pure nothrow
|unittest
|{
| import mir.ndslice.slice: sliced;
|
| auto a = [ 10, 5, 3, 4, 8, 11, 13, 3, 9, 4, 10 ].sliced;
| auto frontI = a._iterator;
| auto lastI = frontI + a.length - 1;
| auto loI = frontI + 4;
| auto pivotI = frontI + 5;
| auto hiI = frontI + 6;
| assert(expandPartition!((a, b) => a < b)(frontI, lastI, loI, pivotI, hiI) == (frontI + 9));
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/sorting.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-ndfield.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|NdField is a type with `opIndex(size_t[N] index...)` primitive.
|An ndslice can be created on top of a ndField using $(SUBREF slice, slicedNdField).
|
|$(BOOKTABLE $(H2 NdFields),
|$(TR $(TH NdField Name) $(TH Used By))
|$(T2 Cartesian, $(SUBREF topology, cartesian))
|$(T2 Kronecker, $(SUBREF topology, kronecker))
|)
|
|See_also: $(SUBREF concatenation, concatenation).
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.ndfield;
|
|import mir.qualifier;
|import mir.internal.utility;
|import mir.ndslice.internal;
|import mir.ndslice.slice;
|import mir.primitives;
|import std.meta;
|
|private template _indices(NdFields...)
|{
| static if (NdFields.length == 0)
| enum _indices = "";
| else
| {
| alias Next = NdFields[0 .. $ - 1];
| enum i = Next.length;
| enum _indices = ._indices!Next ~
| "_fields[" ~ i.stringof ~ "][" ~ _indices_range!([staticMap!(DimensionCount, Next)].sum, DimensionCount!(NdFields[$ - 1])) ~ "], ";
| }
|}
|
|private template _indices_range(size_t begin, size_t count)
|{
| static if (count == 0)
| enum _indices_range = "";
| else
| {
| enum next = count - 1;
| enum elem = begin + next;
| enum _indices_range = ._indices_range!(begin, next) ~ "indices[" ~ elem.stringof ~ "], ";
| }
|}
|
|///
|struct Cartesian(NdFields...)
| if (NdFields.length > 1)
|{
| ///
| NdFields _fields;
|
| package(mir) enum size_t M(size_t f) = [staticMap!(DimensionCount, NdFields[0..f])].sum;
| package(mir) enum size_t N = M!(NdFields.length);
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| return mixin("Cartesian!(staticMap!(LightConstOf, NdFields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| return mixin("Cartesian!(staticMap!(LightImmutableOf, NdFields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota));
| }
|
| ///
| size_t length(size_t d = 0)() @safe scope const @property
| {
| foreach(f, NdField; NdFields)
| static if (M!f <= d && M!(f + 1) > d)
| {
| enum d = d - M!f;
| static if (d)
| return _fields[f].length!(d - M!f);
| else
| return _fields[f].length;
| }
| }
|
| ///
| size_t[N] shape()() @safe scope const @property
| {
| typeof(return) ret;
| foreach(f, NdField; NdFields)
| {
| static if (hasShape!NdField)
| {
| auto s = _fields[f].shape;
| foreach(j; Iota!(s.length))
| ret[M!f + j] = s[j];
| }
| else
| {
| ret[M!f] = _fields[f].length;
| }
| }
| return ret;
| }
|
| ///
| size_t elementCount()() @safe scope const @property
| {
| size_t ret = 1;
| foreach (f, NdField; NdFields)
| ret *= _fields[f].elementCount;
| return ret;
| }
|
| ///
| auto opIndex(size_t[N] indices...)
| {
| import mir.functional : refTuple;
| return mixin("refTuple(" ~ _indices!(NdFields) ~ ")");
| }
|}
|
|private template _kr_indices(size_t n)
|{
| static if (n == 0)
| enum _kr_indices = "";
| else
| {
| enum i = n - 1;
| enum _kr_indices = ._kr_indices!i ~ "_fields[" ~ i.stringof ~ "][ind[" ~ i.stringof ~ "]], ";
| }
|}
|
|///
|struct Kronecker(alias fun, NdFields...)
| if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields[1 .. $]))
|{
| ///
| NdFields _fields;
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| return mixin("Kronecker!(fun, staticMap!(LightConstOf, NdFields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| return mixin("Kronecker!(fun, staticMap!(LightImmutableOf, NdFields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota));
| }
|
| private enum N = DimensionCount!(NdFields[$-1]);
|
| ///
| size_t length(size_t d = 0)() scope const @property
| {
| static if (d == 0)
| {
| size_t ret = 1;
| foreach (f, NdField; NdFields)
| ret *= _fields[f].length;
| }
| else
| {
| size_t ret = 1;
| foreach (f, NdField; NdFields)
| ret *= _fields[f].length!d;
| }
| return ret;
| }
|
|
| ///
| size_t[N] shape()() scope const @property
| {
| static if (N > 1)
| {
| size_t[N] ret = 1;
| foreach (f, NdField; NdFields)
| {
| auto s = _fields[f].shape;
| foreach(i; Iota!N)
| ret[i] *= s[i];
| }
| return ret;
| }
| else
| {
| size_t[1] ret = 1;
| foreach (f, NdField; NdFields)
| ret[0] *= _fields[f].length;
| return ret;
| }
| }
|
| ///
| size_t elementCount()() scope const @property
| {
| size_t ret = 1;
| foreach (f, NdField; NdFields)
| ret *= _fields[f].elementCount;
| ret;
| }
|
| ///
| auto ref opIndex()(size_t[N] indices...)
| {
| static if (N > 1)
| size_t[N][NdFields.length] ind;
| else
| size_t[NdFields.length] ind;
| foreach_reverse (f, NdField; NdFields)
| {
| static if (f)
| {
| static if (hasShape!(NdFields[f]))
| {
| auto s = _fields[f].shape;
| }
| else
| {
| size_t[1] s;
| s[0] = _fields[f].length;
| }
| static if (N > 1)
| {
| foreach(i; Iota!N)
| {
| ind[f][i] = indices[i] % s[i];
| indices[i] /= s[i];
| }
| }
| else
| {
| ind[f] = indices[0] % s[0];
| indices[0] /= s[0];
| }
| }
| else
| {
| static if (N > 1)
| {
| foreach(i; Iota!N)
| ind[f][i] = indices[i];
| }
| else
| {
| ind[f] = indices[0];
| }
| }
| }
| return mixin("fun(" ~ _kr_indices!(ind.length) ~ ")");
| }
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/ndfield.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-engine-mersenne_twister.lst
|/++
|The Mersenne Twister generator.
|
|Copyright: Copyright Andrei Alexandrescu 2008 - 2009, Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Authors: $(HTTP erdani.org, Andrei Alexandrescu) Ilya Yaroshenko (rework)
|+/
|module mir.random.engine.mersenne_twister;
|
|import std.traits;
|
|/++
|The $(LUCKY Mersenne Twister) generator.
|+/
|struct MersenneTwisterEngine(UIntType, size_t w, size_t n, size_t m, size_t r,
| UIntType a, size_t u, UIntType d, size_t s,
| UIntType b, size_t t,
| UIntType c, size_t l, UIntType f)
| if (isUnsigned!UIntType)
|{
| ///
| enum isRandomEngine = true;
|
| static assert(0 < w && w <= UIntType.sizeof * 8);
| static assert(1 <= m && m <= n);
| static assert(0 <= r && 0 <= u && 0 <= s && 0 <= t && 0 <= l);
| static assert(r <= w && u <= w && s <= w && t <= w && l <= w);
| static assert(0 <= a && 0 <= b && 0 <= c);
|
| @disable this();
| @disable this(this);
|
| /// Largest generated value.
| enum UIntType max = UIntType.max >> (UIntType.sizeof * 8u - w);
| static assert(a <= max && b <= max && c <= max && f <= max);
|
| private enum UIntType lowerMask = (cast(UIntType) 1u << r) - 1;
| private enum UIntType upperMask = ~lowerMask & max;
|
| /**
| Parameters for the generator.
| */
| enum size_t wordSize = w;
| enum size_t stateSize = n; /// ditto
| enum size_t shiftSize = m; /// ditto
| enum size_t maskBits = r; /// ditto
| enum UIntType xorMask = a; /// ditto
| enum size_t temperingU = u; /// ditto
| enum UIntType temperingD = d; /// ditto
| enum size_t temperingS = s; /// ditto
| enum UIntType temperingB = b; /// ditto
| enum size_t temperingT = t; /// ditto
| enum UIntType temperingC = c; /// ditto
| enum size_t temperingL = l; /// ditto
| enum UIntType initializationMultiplier = f; /// ditto
|
|
| /// The default seed value.
| enum UIntType defaultSeed = 5489;
|
| /++
| Current reversed payload index with initial value equals to `n-1`
| +/
| size_t index = void;
|
| private UIntType _z = void;
|
| /++
| Reversed(!) payload.
| +/
| UIntType[n] data = void;
|
| /*
| * Marker indicating it's safe to construct from void
| * (i.e. the constructor doesn't depend on the struct
| * being in an initially valid state).
| * Non-public because we don't want to commit to this
| * design.
| */
| package enum bool _isVoidInitOkay = true;
|
| /++
| Constructs a MersenneTwisterEngine object.
| +/
0000000| this(UIntType value) @safe pure nothrow @nogc
| {
| static if (max == UIntType.max)
0000000| data[$-1] = value;
| else
| data[$-1] = value & max;
0000000| foreach_reverse (size_t i, ref e; data[0 .. $-1])
| {
0000000| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
0000000| index = n-1;
0000000| opCall();
| }
|
| /++
| Constructs a MersenneTwisterEngine object.
|
| Note that `MersenneTwisterEngine([123])` will not result in
| the same initial state as `MersenneTwisterEngine(123)`.
| +/
| this()(scope const(UIntType)[] array) @safe pure nothrow @nogc
| {
| static if (is(UIntType == uint))
| {
| enum UIntType f2 = 1664525u;
| enum UIntType f3 = 1566083941u;
| }
| else static if (is(UIntType == ulong))
| {
| enum UIntType f2 = 3935559000370003845uL;
| enum UIntType f3 = 2862933555777941757uL;
| }
| else
| static assert(0, "init by slice only supported if UIntType is uint or ulong!");
|
| data[$-1] = cast(UIntType) (19650218u & max);
| foreach_reverse (size_t i, ref e; data[0 .. $-1])
| {
| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| index = n-1;
| if (array.length == 0)
| {
| opCall();
| return;
| }
|
| size_t final_mix_index = void;
|
| if (array.length >= n)
| {
| size_t j = 0;
| //Handle all but tail.
| while (array.length - j >= n - 1)
| {
| foreach_reverse (i, ref e; data[0 .. $-1])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| e &= max;
| ++j;
| }
| data[$ - 1] = data[0];
| }
| //Handle tail.
| size_t i = n - 2;
| while (j < array.length)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| ++j;
| --i;
| }
| //Set the index for use by the next pass.
| final_mix_index = i;
| }
| else
| {
| size_t i = n - 2;
| //Handle all but tail.
| while (i >= array.length)
| {
| foreach (j; 0 .. array.length)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| --i;
| }
| }
| //Handle tail.
| size_t j = 0;
| while (i != cast(size_t) -1)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| ++j;
| --i;
| }
| data[$ - 1] = data[0];
| i = n - 2;
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| //Set the index for use by the next pass.
| final_mix_index = n - 2;
| }
|
| foreach_reverse (i, ref e; data[0 .. final_mix_index])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3))
| - cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| foreach_reverse (i, ref e; data[final_mix_index .. n-1])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3))
| - cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| data[$-1] = (cast(UIntType)1) << ((UIntType.sizeof * 8) - 1); /* MSB is 1; assuring non-zero initial array */
| opCall();
| }
|
| /++
| Advances the generator.
| +/
| UIntType opCall() @safe pure nothrow @nogc
| {
| // This function blends two nominally independent
| // processes: (i) calculation of the next random
| // variate from the cached previous `data` entry
| // `_z`, and (ii) updating `data[index]` and `_z`
| // and advancing the `index` value to the next in
| // sequence.
| //
| // By interweaving the steps involved in these
| // procedures, rather than performing each of
| // them separately in sequence, the variables
| // are kept 'hot' in CPU registers, allowing
| // for significantly faster performance.
0000000| sizediff_t index = this.index;
0000000| sizediff_t next = index - 1;
0000000| if(next < 0)
0000000| next = n - 1;
0000000| auto z = _z;
0000000| sizediff_t conj = index - m;
0000000| if(conj < 0)
0000000| conj = index - m + n;
| static if (d == UIntType.max)
0000000| z ^= (z >> u);
| else
0000000| z ^= (z >> u) & d;
0000000| auto q = data[index] & upperMask;
0000000| auto p = data[next] & lowerMask;
0000000| z ^= (z << s) & b;
0000000| auto y = q | p;
0000000| auto x = y >> 1;
0000000| z ^= (z << t) & c;
0000000| if (y & 1)
0000000| x ^= a;
0000000| auto e = data[conj] ^ x;
0000000| z ^= (z >> l);
0000000| _z = data[index] = e;
0000000| this.index = next;
0000000| return z;
| }
|}
|
|/++
|A $(D MersenneTwisterEngine) instantiated with the parameters of the
|original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister,
|MT19937), generating uniformly-distributed 32-bit numbers with a
|period of 2 to the power of 19937.
|
|This is recommended for random number generation on 32-bit systems
|unless memory is severely restricted, in which case a
|$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift)
|would be the generator of choice.
|+/
|alias Mt19937 = MersenneTwisterEngine!(uint, 32, 624, 397, 31,
| 0x9908b0df, 11, 0xffffffff, 7,
| 0x9d2c5680, 15,
| 0xefc60000, 18, 1812433253);
|
|///
|@safe version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| // bit-masking by generator maximum is necessary
| // to handle 64-bit `unpredictableSeed`
| auto gen = Mt19937(unpredictableSeed & Mt19937.max);
| auto n = gen();
|
| import std.traits;
| static assert(is(ReturnType!gen == uint));
|}
|
|/++
|A $(D MersenneTwisterEngine) instantiated with the parameters of the
|original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister,
|MT19937), generating uniformly-distributed 64-bit numbers with a
|period of 2 to the power of 19937.
|
|This is recommended for random number generation on 64-bit systems
|unless memory is severely restricted, in which case a
|$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift)
|would be the generator of choice.
|+/
|alias Mt19937_64 = MersenneTwisterEngine!(ulong, 64, 312, 156, 31,
| 0xb5026f5aa96619e9, 29, 0x5555555555555555, 17,
| 0x71d67fffeda60000, 37,
| 0xfff7eee000000000, 43, 6364136223846793005);
|
|///
|@safe version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| auto gen = Mt19937_64(unpredictableSeed);
| auto n = gen();
|
| import std.traits;
| static assert(is(ReturnType!gen == ulong));
|}
|
|@safe nothrow version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| static assert(isSaturatedRandomEngine!Mt19937);
| static assert(isSaturatedRandomEngine!Mt19937_64);
| auto gen = Mt19937(Mt19937.defaultSeed);
| foreach(_; 0 .. 9999)
| gen();
| assert(gen() == 4123659995);
|
| auto gen64 = Mt19937_64(Mt19937_64.defaultSeed);
| foreach(_; 0 .. 9999)
| gen64();
| assert(gen64() == 9981545732273789042uL);
|}
|
|version(mir_random_test) unittest
|{
| enum val = [1341017984, 62051482162767];
| alias MT(UIntType, uint w) = MersenneTwisterEngine!(UIntType, w, 624, 397, 31,
| 0x9908b0df, 11, 0xffffffff, 7,
| 0x9d2c5680, 15,
| 0xefc60000, 18, 1812433253);
|
| import std.meta: AliasSeq;
| foreach (i, R; AliasSeq!(MT!(ulong, 32), MT!(ulong, 48)))
| {
| static if (R.wordSize == 48) static assert(R.max == 0xFFFFFFFFFFFF);
| auto a = R(R.defaultSeed);
| foreach(_; 0..999)
| a();
| assert(val[i] == a());
| }
|}
|
|@safe nothrow @nogc version(mir_random_test) unittest
|{
| //Verify that seeding with an array gives the same result as the reference
| //implementation.
|
| //32-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.tgz
| immutable uint[4] seed32 = [0x123u, 0x234u, 0x345u, 0x456u];
| auto gen32 = Mt19937(seed32);
| foreach(_; 0..999)
| gen32();
| assert(3460025646u == gen32());
|
| //64-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/mt19937-64.tgz
| immutable ulong[4] seed64 = [0x12345uL, 0x23456uL, 0x34567uL, 0x45678uL];
| auto gen64 = Mt19937_64(seed64);
| foreach(_; 0..999)
| gen64();
| assert(994412663058993407uL == gen64());
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/engine/mersenne_twister.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-filling.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Initialisation routines.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.filling;
|
|import mir.ndslice.slice: Slice, SliceKind;
|
|/++
|Fills a matrix with the terms of a geometric progression in each row.
|Params:
| matrix = `m × n` matrix to fill
| vec = vector of progression coefficients length of `m`
|See_also: $(LINK2 https://en.wikipedia.org/wiki/Vandermonde_matrix, Vandermonde matrix)
|+/
|void fillVandermonde(F, SliceKind matrixKind, SliceKind kind)(Slice!(F*, 2, matrixKind) matrix, Slice!(const(F)*, 1, kind) vec)
|in {
| assert(matrix.length == vec.length);
|}
|do {
| import mir.conv: to;
|
| foreach (v; matrix)
| {
| F a = vec.front;
| vec.popFront;
| F x = to!F(1);
| foreach (ref e; v)
| {
| e = x;
| x *= a;
| }
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.allocation: uninitSlice;
| auto x = [1.0, 2, 3, 4, 5].sliced;
| auto v = uninitSlice!double(x.length, x.length);
| v.fillVandermonde(x);
| assert(v ==
| [[ 1.0, 1, 1, 1, 1],
| [ 1.0, 2, 4, 8, 16],
| [ 1.0, 3, 9, 27, 81],
| [ 1.0, 4, 16, 64, 256],
| [ 1.0, 5, 25, 125, 625]]);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/filling.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-internal.lst
|module mir.ndslice.internal;
|
|import mir.internal.utility : isFloatingPoint, Iota;
|import mir.math.common: optmath;
|import mir.ndslice.iterator: IotaIterator;
|import mir.ndslice.slice;
|import mir.primitives;
|import std.meta;
|import std.traits;
|
|@optmath:
|
|template ConstIfPointer(T)
|{
| static if (isPointer!T)
| alias ConstIfPointer = const(PointerTarget!T)*;
| else
| alias ConstIfPointer = T;
|}
|
|public import mir.utility: _expect;
|
|struct RightOp(string op, T)
|{
| T value;
|
| auto lightConst()() const @property
| {
| import mir.qualifier;
| return RightOp!(op, LightConstOf!T)(value.lightConst);
| }
|
| auto lightImmutable()() immutable @property
| {
| import mir.qualifier;
| return RightOp!(op, LightImmutableOf!T)(value.lightImmutable);
| }
|
| this()(ref T v) { value = v; }
| this()(T v) { value = v; }
| auto ref opCall(F)(auto ref F right)
| {
| static if (op == "^^" && isNumeric!T && isFloatingPoint!F)
| {
| import mir.math.common: pow;
| return pow(value, right);
| }
| else
| {
| return mixin("value " ~ op ~ " right");
| }
| }
|}
|
|struct LeftOp(string op, T)
|{
| T value;
|
| auto lightConst()() const @property
| {
| import mir.qualifier;
| return LeftOp!(op, LightConstOf!T)(value.lightConst);
| }
|
| auto lightImmutable()() immutable @property
| {
| import mir.qualifier;
| return LeftOp!(op, LightImmutableOf!T)(value.lightImmutable);
| }
|
| this()(ref T v) { value = v; }
| this()(T v) { value = v; }
| auto ref opCall(F)(auto ref F left)
| {
| static if (op == "^^" && isFloatingPoint!T && isNumeric!F)
| {
| import mir.math.common: pow;
| return pow(left, value);
| }
| else
| {
| return mixin("left " ~ op ~ " value");
| }
| }
|}
|
|private template _prod(size_t len)
| if (len)
|{
| static if (len == 1)
| enum _prod = "elems[0]";
| else
| {
| enum i = len - 1;
| enum _prod = ._prod!i ~ " * elems[" ~ i.stringof ~ "]";
| }
|}
|
|auto product(Elems...)(auto ref Elems elems)
|{
| return mixin(_prod!(Elems.length));
|}
|
|
|template _iotaArgs(size_t length, string prefix, string suffix)
|{
| static if (length)
| {
| enum i = length - 1;
| enum _iotaArgs = _iotaArgs!(i, prefix, suffix) ~ prefix ~ i.stringof ~ suffix;
| }
| else
| enum _iotaArgs = "";
|}
|
|alias _IteratorOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = Iterator;
|
|E maxElem(E)(E[] arr...)
|{
| auto ret = Unqual!E.min;
| foreach(e; arr)
| if (e > ret)
| ret = e;
| return ret;
|}
|
|E minElem(E)(E[] arr...)
|{
| auto ret = Unqual!E.max;
| foreach(e; arr)
| if (e < ret)
| ret = e;
| return ret;
|}
|
|size_t sum()(size_t[] packs)
|{
| size_t s;
| foreach(pack; packs)
| s += pack;
| return s;
|}
|
|
|size_t[] reverse()(size_t[] ar)
|{
| foreach(i, e; ar[0..$/2])
| {
| ar[i] = ar[$ - i - 1];
| ar[$ - i - 1] = e;
| }
| return ar;
|}
|
|enum indexError(size_t pos, size_t N) =
| "index at position " ~ pos.stringof
| ~ " from the range [0 .." ~ N.stringof ~ ")"
| ~ " must be less than corresponding length.";
|
|enum string tailErrorMessage(
| string fun = __FUNCTION__,
| string pfun = __PRETTY_FUNCTION__) =
|"
|- - -
|Error in function
|" ~ fun ~ "
|- - -
|Function prototype
|" ~ pfun ~ "
|_____";
|
|mixin template DimensionsCountCTError()
|{
| static assert(Dimensions.length <= N,
| "Dimensions list length = " ~ Dimensions.length.stringof
| ~ " should be less than or equal to N = " ~ N.stringof
| ~ tailErrorMessage!());
|}
|
|enum DimensionsCountRTError = q{
| assert(dimensions.length <= N,
| "Dimensions list length should be less than or equal to N = " ~ N.stringof
| ~ tailErrorMessage!());
|};
|
|mixin template DimensionCTError()
|{
| static assert(dimension >= 0,
| "dimension = " ~ dimension.stringof ~ " at position "
| ~ i.stringof ~ " should be greater than or equal to 0"
| ~ tailErrorMessage!());
| static assert(dimension < N,
| "dimension = " ~ dimension.stringof ~ " at position "
| ~ i.stringof ~ " should be less than N = " ~ N.stringof
| ~ tailErrorMessage!());
| static assert(dimension < slice.S,
| "dimension = " ~ dimension.stringof ~ " at position "
| ~ i.stringof ~ " should be less than " ~ (slice.S).stringof ~ ". "
| ~ "`universal` and `canonical` from `mir.ndslice.topology` can be used to relax slice kind."
| ~ tailErrorMessage!());
|}
|
|enum DimensionRTError = q{
| static if (isSigned!(typeof(dimension)))
| assert(dimension >= 0, "dimension should be greater than or equal to 0"
| ~ tailErrorMessage!());
| assert(dimension < N, "dimension should be less than N = " ~ N.stringof
| ~ tailErrorMessage!());
| assert(dimension < slice.S,
| "dimension should be less than " ~ slice.S.stringof ~ ". "
| ~ "`universal` and `canonical` from `mir.ndslice.topology` can be used to relax slice kind."
| ~ tailErrorMessage!());
|};
|
|private alias IncFront(Seq...) = AliasSeq!(Seq[0] + 1, Seq[1 .. $]);
|
|private alias DecFront(Seq...) = AliasSeq!(Seq[0] - 1, Seq[1 .. $]);
|
|private enum bool isNotZero(alias t) = t != 0;
|
|alias NSeqEvert(Seq...) = Filter!(isNotZero, DecFront!(Reverse!(IncFront!Seq)));
|
|//alias Parts(Seq...) = DecAll!(IncFront!Seq);
|
|alias Snowball(Seq...) = AliasSeq!(size_t.init, SnowballImpl!(size_t.init, Seq));
|
|private template SnowballImpl(size_t val, Seq...)
|{
| static if (Seq.length == 0)
| alias SnowballImpl = AliasSeq!();
| else
| alias SnowballImpl = AliasSeq!(Seq[0] + val, SnowballImpl!(Seq[0] + val, Seq[1 .. $]));
|}
|
|private template DecAll(Seq...)
|{
| static if (Seq.length == 0)
| alias DecAll = AliasSeq!();
| else
| alias DecAll = AliasSeq!(Seq[0] - 1, DecAll!(Seq[1 .. $]));
|}
|
|//template SliceFromSeq(Range, Seq...)
|//{
|// static if (Seq.length == 0)
|// alias SliceFromSeq = Range;
|// else
|// {
|// import mir.ndslice.slice : Slice;
|// alias SliceFromSeq = SliceFromSeq!(Slice!(Seq[$ - 1], Range), Seq[0 .. $ - 1]);
|// }
|//}
|
|template DynamicArrayDimensionsCount(T)
|{
| static if (isDynamicArray!T)
| enum size_t DynamicArrayDimensionsCount = 1 + DynamicArrayDimensionsCount!(typeof(T.init[0]));
| else
| enum size_t DynamicArrayDimensionsCount = 0;
|}
|
|bool isPermutation(size_t N)(auto ref const scope size_t[N] perm)
|{
| int[N] mask;
| return isValidPartialPermutationImpl(perm, mask);
|}
|
|version(mir_test) unittest
|{
| assert(isPermutation([0, 1]));
| // all numbers 0..N-1 need to be part of the permutation
| assert(!isPermutation([1, 2]));
| assert(!isPermutation([0, 2]));
| // duplicates are not allowed
| assert(!isPermutation([0, 1, 1]));
|
| size_t[0] emptyArr;
| // empty permutations are not allowed either
| assert(!isPermutation(emptyArr));
|}
|
|bool isValidPartialPermutation(size_t N)(in size_t[] perm)
|{
| int[N] mask;
| return isValidPartialPermutationImpl(perm, mask);
|}
|
|private bool isValidPartialPermutationImpl(size_t N)(in size_t[] perm, ref int[N] mask)
|{
| if (perm.length == 0)
| return false;
| foreach (j; perm)
| {
| if (j >= N)
| return false;
| if (mask[j]) //duplicate
| return false;
| mask[j] = true;
| }
| return true;
|}
|
|template ShiftNegativeWith(size_t N)
|{
| enum ShiftNegativeWith(sizediff_t i) = i < 0 ? i + N : i;
|}
|
|enum toSize_t(size_t i) = i;
|enum toSizediff_t(sizediff_t i) = i;
|enum isSize_t(alias i) = is(typeof(i) == size_t);
|enum isSizediff_t(alias i) = is(typeof(i) == sizediff_t);
|enum isIndex(I) = is(I : size_t);
|template is_Slice(S)
|{
| static if (is(S : Slice!(IotaIterator!I), I))
| enum is_Slice = __traits(isIntegral, I);
| else
| enum is_Slice = false;
|}
|
|alias Repeat(size_t N : 0, T...) = AliasSeq!();
|
|private enum isReference(P) =
| hasIndirections!P
| || isFunctionPointer!P
| || is(P == interface);
|
|alias ImplicitlyUnqual(T) = Select!(isImplicitlyConvertible!(T, Unqual!T), Unqual!T, T);
|alias ImplicitlyUnqual(T : T*) = T*;
|
|size_t lengthsProduct(size_t N)(auto ref const scope size_t[N] lengths)
|{
0000000| size_t length = lengths[0];
| foreach (i; Iota!(1, N))
0000000| length *= lengths[i];
0000000| return length;
|}
|
|pure nothrow version(mir_test) unittest
|{
| const size_t[3] lengths = [3, 4, 5];
| assert(lengthsProduct(lengths) == 60);
| assert(lengthsProduct([3, 4, 5]) == 60);
|}
|
|package(mir) template strideOf(args...)
|{
| static if (args.length == 0)
| enum strideOf = args;
| else
| {
| @optmath @property auto ref ls()()
| {
| import mir.ndslice.topology: stride;
| return stride(args[0]);
| }
| alias strideOf = AliasSeq!(ls, strideOf!(args[1..$]));
| }
|}
|
|package(mir) template frontOf(args...)
|{
| static if (args.length == 0)
| enum frontOf = args;
| else
| {
| @optmath @property auto ref ls()()
| {
| return args[0].front;
| }
| alias frontOf = AliasSeq!(ls, frontOf!(args[1..$]));
| }
|}
|
|package(mir) template backOf(args...)
|{
| static if (args.length == 0)
| enum backOf = args;
| else
| {
| @optmath @property auto ref ls()()
| {
| return args[0].back;
| }
| alias backOf = AliasSeq!(ls, backOf!(args[1..$]));
| }
|}
|
|package(mir) template frontOfD(size_t dimension, args...)
|{
| static if (args.length == 0)
| enum frontOfD = args;
| else
| {
| @optmath @property auto ref ls()()
| {
| return args[0].front!dimension;
| }
| alias frontOfD = AliasSeq!(ls, frontOfD!(dimension, args[1..$]));
| }
|}
|
|package(mir) template backOfD(size_t dimension, args...)
|{
| static if (args.length == 0)
| enum backOfD = args;
| else
| {
| @optmath @property auto ref ls()()
| {
| return args[0].back!dimension;
| }
| alias backOfD = AliasSeq!(ls, backOfD!(dimension, args[1..$]));
| }
|}
|
|package(mir) template frontOfDim(size_t dim, args...)
|{
| static if (args.length == 0)
| enum frontOfDim = args;
| else
| {
| alias arg = args[0];
| @optmath @property auto ref ls()
| {
| return arg.front!dim;
| }
| alias frontOfDim = AliasSeq!(ls, frontOfDim!(dim, args[1..$]));
| }
|}
|
|package(mir) template selectFrontOf(alias input, args...)
|{
| static if (args.length == 0)
| enum selectFrontOf = args;
| else
| {
| alias arg = args[0];
| @optmath @property auto ref ls()()
| {
| return arg.lightScope.selectFront!0(input);
| }
| alias selectFrontOf = AliasSeq!(ls, selectFrontOf!(input, args[1..$]));
| }
|}
|
|package(mir) template selectBackOf(alias input, args...)
|{
| static if (args.length == 0)
| enum selectBackOf = args;
| else
| {
| alias arg = args[0];
| @optmath @property auto ref ls()()
| {
| return arg.selectBack!0(input);
| }
| alias selectBackOf = AliasSeq!(ls, selectBackOf!(input, args[1..$]));
| }
|}
|
|package(mir) template frontSelectFrontOf(alias input, args...)
|{
| static if (args.length == 0)
| enum frontSelectFrontOf = args;
| else
| {
| alias arg = args[0];
| @optmath @property auto ref ls()()
| {
| return arg.lightScope.front.selectFront!0(input);
| }
| alias frontSelectFrontOf = AliasSeq!(ls, frontSelectFrontOf!(input, args[1..$]));
| }
|}
|
|package(mir) template frontSelectBackOf(alias input, args...)
|{
| static if (args.length == 0)
| enum frontSelectBackOf = args;
| else
| {
| alias arg = args[0];
| @optmath @property auto ref ls()()
| {
| return arg.lightScope.front.selectBack!0(input);
| }
| alias frontSelectBackOf = AliasSeq!(ls, frontSelectBackOf!(input, args[1..$]));
| }
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/internal.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-core-1.1.51-mir-core-source-mir-bitop.lst
|/++
|This module contains a collection of bit-level operations.
|
|Authors: Ilya Yaroshenko, Phobos & LDC Authors (original Phobos unittests, docs, conventions).
|+/
|module mir.bitop;
|
|version(LDC)
| import ldc.intrinsics;
|version(GNU)
| import gcc.builtins;
|
|import mir.math.common: fastmath;
|
|/// Right shift vallue for bit index to get element's index (5 for `uint`).
|enum uint bitElemShift(T : ubyte) = 3;
|/// ditto
|enum uint bitElemShift(T : byte) = 3;
|/// ditto
|enum uint bitElemShift(T : ushort) = 4;
|/// ditto
|enum uint bitElemShift(T : short) = 4;
|/// ditto
|enum uint bitElemShift(T : uint) = 5;
|/// ditto
|enum uint bitElemShift(T : int) = 5;
|/// ditto
|enum uint bitElemShift(T : ulong) = 6;
|/// ditto
|enum uint bitElemShift(T : long) = 6;
|static if (is(ucent))
|/// ditto
|enum uint bitElemShift(T : ucent) = 7;
|/// ditto
|static if (is(cent))
|enum uint bitElemShift(T : cent) = 7;
|
|/// Bit mask for bit index to get element's bit shift (31 for uint).
|enum uint bitShiftMask(T : ubyte) = 7;
|/// ditto
|enum uint bitShiftMask(T : byte) = 7;
|/// ditto
|enum uint bitShiftMask(T : ushort) = 15;
|/// ditto
|enum uint bitShiftMask(T : short) = 15;
|/// ditto
|enum uint bitShiftMask(T : uint) = 31;
|/// ditto
|enum uint bitShiftMask(T : int) = 31;
|/// ditto
|enum uint bitShiftMask(T : ulong) = 63;
|/// ditto
|enum uint bitShiftMask(T : long) = 63;
|static if (is(ucent))
|/// ditto
|enum uint bitShiftMask(T : ucent) = 127;
|static if (is(cent))
|/// ditto
|enum uint bitShiftMask(T : cent) = 127;
|
|// no effect on this function, but better for optimization of other @fastmath code that uses this
|@fastmath:
|
|
|/++
|+/
|T nTrailingBitsToCount(T)(in T value, in T popcnt)
| if (__traits(isUnsigned, T))
|{
| import std.traits;
| import mir.internal.utility: Iota;
| alias S = Signed!(CommonType!(int, T));
| S mask = S(-1) << T.sizeof * 4;
| foreach_reverse (s; Iota!(bitElemShift!T - 1))
| {{
| enum shift = 1 << s;
| if (S(popcnt) > S(ctpop(cast(T)(value & ~mask))))
| mask <<= shift;
| else
| mask >>= shift;
| }}
| return cttz(cast(T)mask) + (S(popcnt) != ctpop(cast(T)(value & ~mask)));
|}
|
|///
|version(mir_core_test) unittest
|{
| assert(nTrailingBitsToCount(0xF0u, 3u) == 7);
| assert(nTrailingBitsToCount(0xE00u, 3u) == 12);
|
| foreach(uint i; 1 .. 32)
| assert(nTrailingBitsToCount(uint.max, i) == i);
|}
|
|/++
|+/
|T nLeadingBitsToCount(T)(in T value, in T popcnt)
| if (__traits(isUnsigned, T))
|{
| import std.traits;
| import mir.internal.utility: Iota;
| alias S = Signed!(CommonType!(int, T));
| S mask = S(-1) << T.sizeof * 4;
| foreach_reverse (s; Iota!(bitElemShift!T - 1))
| {{
| enum shift = 1 << s;
| if (S(popcnt) > S(ctpop(cast(T)(value & mask))))
| mask >>= shift;
| else
| mask <<= shift;
| }}
| return ctlz(cast(T)~mask) + (S(popcnt) != ctpop(cast(T)(value & mask)));
|}
|
|///
|version(mir_core_test) unittest
|{
| assert(nLeadingBitsToCount(0xF0u, 3u) == 32 - 5);
| assert(nLeadingBitsToCount(0x700u, 3u) == 32 - 8);
|
| foreach(uint i; 1 .. 32)
| assert(nLeadingBitsToCount(uint.max, i) == i);
|}
|
|/++
|Tests the bit.
|Returns:
| A non-zero value if the bit was set, and a zero
| if it was clear.
|+/
|auto bt(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum)
| if (__traits(isUnsigned, T))
|{
| auto index = bitnum >> bitElemShift!T;
| auto mask = T(1) << (bitnum & bitShiftMask!T);
| return p[index] & mask;
|}
|
|///
|@system pure version(mir_core_test) unittest
|{
| size_t[2] array;
|
| array[0] = 2;
| array[1] = 0x100;
|
| assert(bt(array.ptr, 1));
| assert(array[0] == 2);
| assert(array[1] == 0x100);
|}
|
|/++
|Tests and assign the bit.
|Returns:
| A non-zero value if the bit was set, and a zero if it was clear.
|+/
|auto bta(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum, bool value)
| if (__traits(isUnsigned, T))
|{
| auto index = bitnum >> bitElemShift!T;
| auto shift = bitnum & bitShiftMask!T;
| auto mask = T(1) << shift;
| static if (__traits(compiles, &p[size_t.init]))
| {
| auto qp = &p[index];
| auto q = *qp;
| auto ret = q & mask;
| *qp = cast(T)((q & ~mask) ^ (T(value) << shift));
| }
| else
| {
| auto q = p[index];
| auto ret = q & mask;
| p[index] = cast(T)((q & ~mask) ^ (T(value) << shift));
| }
| return ret;
|}
|
|/++
|Tests and complements the bit.
|Returns:
| A non-zero value if the bit was set, and a zero if it was clear.
|+/
|auto btc(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum)
| if (__traits(isUnsigned, T))
|{
| auto index = bitnum >> bitElemShift!T;
| auto mask = T(1) << (bitnum & bitShiftMask!T);
| static if (__traits(compiles, &p[size_t.init]))
| {
| auto qp = &p[index];
| auto q = *qp;
| auto ret = q & mask;
| *qp = cast(T)(q ^ mask);
| }
| else
| {
| auto q = p[index];
| auto ret = q & mask;
| p[index] = cast(T)(q ^ mask);
| }
| return ret;
|}
|
|/++
|Tests and resets (sets to 0) the bit.
|Returns:
| A non-zero value if the bit was set, and a zero if it was clear.
|+/
|auto btr(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum)
| if (__traits(isUnsigned, T))
|{
| auto index = bitnum >> bitElemShift!T;
| auto mask = T(1) << (bitnum & bitShiftMask!T);
| static if (__traits(compiles, &p[size_t.init]))
| {
| auto qp = &p[index];
| auto q = *qp;
| auto ret = q & mask;
| *qp = cast(T)(q & ~mask);
| }
| else
| {
| auto q = p[index];
| auto ret = q & mask;
| p[index] = cast(T)(q & ~mask);
| }
| return ret;
|}
|
|/++
|Tests and sets the bit.
|Params:
|p = a non-NULL field / pointer to an array of unsigned integers.
|bitnum = a bit number, starting with bit 0 of p[0],
|and progressing. It addresses bits like the expression:
|---
|p[index / (T.sizeof*8)] & (1 << (index & ((T.sizeof*8) - 1)))
|---
|Returns:
| A non-zero value if the bit was set, and a zero if it was clear.
|+/
|auto bts(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum)
| if (__traits(isUnsigned, T))
|{
| auto index = bitnum >> bitElemShift!T;
| auto mask = T(1) << (bitnum & bitShiftMask!T);
| static if (__traits(compiles, &p[size_t.init]))
| {
| auto qp = &p[index];
| auto q = *qp;
| auto ret = q & mask;
| *qp = cast(T)(q | mask);
| }
| else
| {
| auto q = p[index];
| auto ret = q & mask;
| p[index] = cast(T)(q | mask);
| }
| return ret;
|}
|
|///
|@system pure version(mir_core_test) unittest
|{
| size_t[2] array;
|
| array[0] = 2;
| array[1] = 0x100;
|
| assert(btc(array.ptr, 35) == 0);
| if (size_t.sizeof == 8)
| {
| assert(array[0] == 0x8_0000_0002);
| assert(array[1] == 0x100);
| }
| else
| {
| assert(array[0] == 2);
| assert(array[1] == 0x108);
| }
|
| assert(btc(array.ptr, 35));
| assert(array[0] == 2);
| assert(array[1] == 0x100);
|
| assert(bts(array.ptr, 35) == 0);
| if (size_t.sizeof == 8)
| {
| assert(array[0] == 0x8_0000_0002);
| assert(array[1] == 0x100);
| }
| else
| {
| assert(array[0] == 2);
| assert(array[1] == 0x108);
| }
|
| assert(btr(array.ptr, 35));
| assert(array[0] == 2);
| assert(array[1] == 0x100);
|}
|
|/// The 'ctpop' family of intrinsics counts the number of bits set in a value.
|T ctpop(T)(in T src)
| if (__traits(isUnsigned, T))
|{
| version(LDC) if (!__ctfe)
| return llvm_ctpop(src);
| version(GNU) if (!__ctfe)
| {
| static if (T.sizeof < __builtin_clong.sizeof)
| return cast(T) __builtin_popcount(src);
| else static if (T.sizeof <= __builtin_clong.sizeof)
| return cast(T) __builtin_popcountl(src);
| else
| return cast(T) __builtin_popcountll(src);
| }
| import core.bitop: popcnt;
| return cast(T) popcnt(src);
|}
|
|/++
|The 'ctlz' family of intrinsic functions counts the number of leading zeros in a variable.
|Result is undefined if the argument is zero.
|+/
|T ctlz(T)(in T src)
| if (__traits(isUnsigned, T))
|{
| version(LDC) if (!__ctfe)
| return llvm_ctlz(src, true);
| version(GNU) if (!__ctfe)
| {
| // Do not zero-extend when counting leading zeroes.
| static if (T.sizeof < __builtin_clong.sizeof && T.sizeof >= uint.sizeof)
| return cast(T) __builtin_clz(src);
| else static if (T.sizeof == __builtin_clong.sizeof)
| return cast(T) __builtin_clzl(src);
| else static if (T.sizeof > __builtin_clong.sizeof)
| return cast(T) __builtin_clzll(src);
| }
| import core.bitop: bsr;
0000000| return cast(T)(T.sizeof * 8 - 1 - bsr(src));
|}
|
|///
|version (mir_core_test) @nogc nothrow pure @safe version(mir_core_test) unittest
|{
| assert(ctlz(cast(ubyte) 0b0011_1111) == 2);
| assert(ctlz(cast(ushort) 0b0000_0001_1111_1111) == 7);
|}
|
|/++
|The 'ctlzp' family of intrinsic functions counts the number of leading zeros in a variable.
|Result is properly defined if the argument is zero.
|+/
|T ctlzp(T)(in T src)
| if (__traits(isUnsigned, T))
|{
| version(LDC) if (!__ctfe)
| return llvm_ctlz(src, false);
| return src ? ctlz(src) : T.sizeof * 8;
|}
|
|///
|version (mir_core_test) @nogc nothrow pure @safe version(mir_core_test) unittest
|{
| assert(ctlzp(cast(ubyte) 0b0000_0000) == 8);
| assert(ctlzp(cast(ubyte) 0b0011_1111) == 2);
| assert(ctlzp(cast(ushort) 0b0000_0001_1111_1111) == 7);
| assert(ctlzp(cast(ushort) 0) == 16);
| assert(ctlzp(cast(ulong) 0) == 64);
|}
|
|/++
|The 'cttz' family of intrinsic functions counts the number of trailing zeros.
|Result is undefined if the argument is zero.
|+/
|T cttz(T)(in T src)
| if (__traits(isUnsigned, T))
|{
| version(LDC) if (!__ctfe)
| return llvm_cttz(src, true);
| version(GNU) if (!__ctfe)
| {
| static if (T.sizeof <__builtin_clong.sizeof)
| return cast(T) __builtin_ctz(src);
| else static if (T.sizeof <=__builtin_clong.sizeof)
| return cast(T) __builtin_ctzl(src);
| else
| return cast(T) __builtin_ctzll(src);
| }
| import core.bitop: bsf;
| return cast(T) bsf(src);
|}
|
|///
|version (mir_core_test) @nogc nothrow pure @safe version(mir_core_test) unittest
|{
| assert(cttzp(cast(ubyte) 0b11111100) == 2);
| assert(cttzp(cast(ushort) 0b1111111110000000) == 7);
|}
|
|/++
|The 'cttz' family of intrinsic functions counts the number of trailing zeros.
|Result is properly defined if the argument is zero.
|+/
|T cttzp(T)(in T src)
| if (__traits(isUnsigned, T))
|{
| version(LDC) if (!__ctfe)
| return llvm_cttz(src, false);
| return src ? cttz(src) : T.sizeof * 8;
|}
|
|///
|version (mir_core_test) @nogc nothrow pure @safe version(mir_core_test) unittest
|{
| assert(cttzp(cast(ubyte) 0b0000_0000) == 8);
| assert(cttzp(cast(ubyte) 0b11111100) == 2);
| assert(cttzp(cast(ushort) 0b1111111110000000) == 7);
| assert(cttzp(cast(ushort) 0) == 16);
| assert(cttzp(cast(ulong) 0) == 64);
|}
../../../.dub/packages/mir-core-1.1.51/mir-core/source/mir/bitop.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-engine-package.lst
|/++
|$(SCRIPT inhibitQuickIndex = 1;)
|Uniform random engines.
|
|$(B Sections:)
| $(LINK2 #Convenience, Convenience)
|• $(LINK2 #Entropy, Entropy)
|• $(LINK2 #ThreadLocal, Thread-Local)
|• $(LINK2 #Traits, Traits)
|• $(LINK2 #CInterface, C Interface)
|
|$(BOOKTABLE
|
|$(LEADINGROW Convenience)
|$(TR
| $(RROW Random, Default random number _engine))
| $(RROW rne, Per-thread uniquely-seeded instance of default `Random`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).)
|
|$(LEADINGROW Entropy)
|$(TR
| $(RROW unpredictableSeed, Seed of `size_t` using system entropy. May use `unpredictableSeed!UIntType` for unsigned integers of different sizes.)
| $(RROW genRandomNonBlocking, Fills a buffer with system entropy, returning number of bytes copied or negative number on error)
| $(RROW genRandomBlocking, Fills a buffer with system entropy, possibly waiting if the system believes it has insufficient entropy. Returns 0 on success.))
|
|$(LEADINGROW Thread-Local (when $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS) enabled))
|$(TR
| $(TR $(TDNW $(LREF threadLocal)`!(Engine)`) $(TD Per-thread uniquely-seeded instance of any specified `Engine`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).))
| $(TR $(TDNW $(LREF threadLocalPtr)`!(Engine)`) $(TD `@safe` pointer to `threadLocal!Engine`. Always initializes before return. $(I Warning: do not share between threads!)))
| $(TR $(TDNW $(LREF threadLocalInitialized)`!(Engine)`) $(TD Explicitly manipulate "is seeded" flag for thread-local instance. Not needed by most library users.))
| $(TR $(TDNW $(LREF setThreadLocalSeed)`!(Engine, A...)`) $(TD Initialize thread-local `Engine` with a known seed rather than a random seed.))
| )
|
|$(LEADINGROW Traits)
|$(TR
| $(RROW EngineReturnType, Get return type of random number _engine's `opCall()`)
| $(RROW isRandomEngine, Check if is random number _engine)
| $(RROW isSaturatedRandomEngine, Check if random number _engine `G` such that `G.max == EngineReturnType!(G).max`)
| $(RROW preferHighBits, Are the high bits of the _engine's output known to have better statistical properties than the low bits?))
|
|$(LEADINGROW C Interface)
| $(RROW mir_random_engine_ctor, Perform any necessary setup. Automatically called by DRuntime.)
| $(RROW mir_random_engine_dtor, Release any resources. Automatically called by DRuntime.)
| $(RROW mir_random_genRandomNonBlocking, External name for $(LREF genRandomNonBlocking))
| $(RROW mir_random_genRandomBlocking, External name for $(LREF genRandomBlocking))
|)
|
|Copyright: Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Authors: Ilya Yaroshenko
|
|Macros:
| T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
| RROW = $(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.random.engine;
|
|version (OSX)
| version = Darwin;
|else version (iOS)
| version = Darwin;
|else version (TVOS)
| version = Darwin;
|else version (WatchOS)
| version = Darwin;
|
|// A secure arc4random implementation that uses some modern algorithm rather
|// than ARC4 may be used synonymously with non-blocking system entropy.
|version (CRuntime_Bionic)
| version = SecureARC4Random; // ChaCha20
|version (Darwin)
| version = SecureARC4Random; // AES
|version (OpenBSD)
| version = SecureARC4Random; // ChaCha20
|version (NetBSD)
| version = SecureARC4Random; // ChaCha20
|
|// A legacy arc4random should not be used when cryptographic security
|// is required but may used for `unpredictableSeed`.
|version (CRuntime_UClibc)
| version = LegacyARC4Random; // ARC4
|version (FreeBSD)
| version = LegacyARC4Random; // ARC4
|version (DragonFlyBSD)
| version = LegacyARC4Random; // ARC4
|version (BSD)
| version = LegacyARC4Random; // Unknown implementation
|
|version (SecureARC4Random)
| version = AnyARC4Random;
|version (LegacyARC4Random)
| version = AnyARC4Random;
|
|version (D_betterC)
| private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = false;
|else
| private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = __traits(compiles, { static size_t x = 0; });
|
|import std.traits;
|
|import mir.random.engine.mersenne_twister;
|
|/++
|Like `std.traits.ReturnType!T` but it works even if
|T.opCall is a function template.
|+/
|template EngineReturnType(T)
|{
| import std.traits : ReturnType;
| static if (is(ReturnType!T))
| alias EngineReturnType = ReturnType!T;
| else
| alias EngineReturnType = typeof(T.init());
|}
|
|/++
|Test if T is a random engine.
|A type should define `enum isRandomEngine = true;` to be a random engine.
|+/
|template isRandomEngine(T)
|{
| static if (is(typeof(T.isRandomEngine) : bool) && is(typeof(T.init())))
| {
| private alias R = typeof(T.init());
| static if (T.isRandomEngine && isUnsigned!R)
| enum isRandomEngine = is(typeof({
| enum max = T.max;
| static assert(is(typeof(T.max) == R));
| }));
| else enum isRandomEngine = false;
| }
| else enum isRandomEngine = false;
|}
|
|/++
|Test if T is a saturated random-bit generator.
|A random number generator is saturated if `T.max == ReturnType!T.max`.
|A type should define `enum isRandomEngine = true;` to be a random engine.
|+/
|template isSaturatedRandomEngine(T)
|{
| static if (isRandomEngine!T)
| enum isSaturatedRandomEngine = T.max == EngineReturnType!T.max;
| else
| enum isSaturatedRandomEngine = false;
|}
|
|/++
|Are the high bits of the engine's output known to have
|better statistical properties than the low bits of the
|output? This property is set by checking the value of
|an optional enum named `preferHighBits`. If the property
|is missing it is treated as false.
|
|This should be specified as true for:
|
|- linear congruential generators with power-of-2 modulus
|- xorshift+ family
|- xorshift* family
|- in principle any generator whose final operation is something like
|multiplication or addition in which the high bits depend on the low bits
|but the low bits are unaffected by the high bits.
|
|+/
|template preferHighBits(G)
| if (isSaturatedRandomEngine!G)
|{
| static if (__traits(compiles, { enum bool e = G.preferHighBits; }))
| private enum bool preferHighBits = G.preferHighBits;
| else
| private enum bool preferHighBits = false;
|}
|
|/*
| * Marker indicating it's safe to construct from void
| * (i.e. the constructor doesn't depend on the struct
| * being in an initially valid state).
| * Either checks an explicit flag `_isVoidInitOkay`
| * or tests to make sure that the structure contains
| * nothing that looks like a pointer or an index into
| * an array. Also ensures that there is not an elaborate
| * destructor since it could be called when the struct
| * is in an invalid state.
| * Non-public because we don't want to commit to this
| * design.
| */
|package template _isVoidInitOkay(G) if (isRandomEngine!G && is(G == struct))
|{
| static if (is(typeof(G._isVoidInitOkay) : bool))
| enum bool _isVoidInitOkay = G._isVoidInitOkay;
| else static if (!hasNested!G && !hasElaborateDestructor!G)
| {
| import std.meta : allSatisfy;
| static if (allSatisfy!(isScalarType, FieldTypeTuple!G))
| //All members are scalars.
| enum bool _isVoidInitOkay = true;
| else static if (FieldTypeTuple!(G).length == 1 && isStaticArray!(FieldTypeTuple!(G)[0]))
| //Only has one member which is a static array of scalars.
| enum bool _isVoidInitOkay = isScalarType!(typeof(FieldTypeTuple!(G)[0].init[0]));
| else
| enum bool _isVoidInitOkay = false;
| }
| else
| enum bool _isVoidInitOkay = false;
|}
|@nogc nothrow pure @safe version(mir_random_test)
|{
| import mir.random.engine.mersenne_twister: Mt19937, Mt19937_64;
| //Ensure that this property is set for the Mersenne Twister,
| //whose internal state is huge enough for this to potentially
| //matter:
| static assert(_isVoidInitOkay!Mt19937);
| static assert(_isVoidInitOkay!Mt19937_64);
| //Check that the property is set for a moderately-sized PRNG.
| import mir.random.engine.xorshift: Xorshift1024StarPhi;
| static assert(_isVoidInitOkay!Xorshift1024StarPhi);
| //Check that PRNGs not explicitly marked as void-init safe
| //can be inferred as such if they only have scalar fields.
| import mir.random.engine.pcg: pcg32, pcg32_oneseq;
| import mir.random.engine.splitmix: SplitMix64;
| static assert(_isVoidInitOkay!pcg32);
| static assert(_isVoidInitOkay!pcg32_oneseq);
| static assert(_isVoidInitOkay!SplitMix64);
| //Check that PRNGs not explicitly marked as void-init safe
| //can be inferred as such if their only field is a static
| //array of scalars.
| import mir.random.engine.xorshift: Xorshift128, Xoroshiro128Plus;
| static assert(_isVoidInitOkay!Xorshift128);
| static assert(_isVoidInitOkay!Xoroshiro128Plus);
|}
|
|version (D_Ddoc)
|{
| /++
| A "good" seed for initializing random number engines. Initializing
| with $(D_PARAM unpredictableSeed) makes engines generate different
| random number sequences every run.
|
| Returns:
| A single unsigned integer seed value, different on each successive call
| +/
| pragma(inline, true)
| @property size_t unpredictableSeed() @trusted nothrow @nogc
| {
| return unpredictableSeed!size_t;
| }
|}
|
|/// ditto
|pragma(inline, true)
|@property T unpredictableSeed(T = size_t)() @trusted nothrow @nogc
| if (isUnsigned!T)
|{
| import mir.utility: _expect;
0000000| T seed = void;
| version (AnyARC4Random)
| {
| // If we just need 32 bits it's faster to call arc4random()
| // than arc4random_buf(&seed, seed.sizeof).
| static if (T.sizeof <= uint.sizeof)
| seed = cast(T) arc4random();
| else
| arc4random_buf(&seed, seed.sizeof);
| }
0000000| else if (_expect(genRandomNonBlocking(&seed, seed.sizeof) != T.sizeof, false))
| {
| // fallback to old time/thread-based implementation in case of errors
0000000| seed = cast(T) fallbackSeed();
| }
0000000| return seed;
|}
|
|// Old name of `unpredictableSeedOf!T`. Undocumented but
|// defined so existing code using mir.random won't break.
|deprecated("Use unpredictableSeed!T instead of unpredictableSeedOf!T")
|public alias unpredictableSeedOf(T) = unpredictableSeed!T;
|
|version (mir_random_test) @nogc nothrow @safe unittest
|{
| // Check unpredictableSeed syntax works with or without parentheses.
| auto a = unpredictableSeed;
| auto b = unpredictableSeed!uint;
| auto c = unpredictableSeed!ulong;
| static assert(is(typeof(a) == size_t));
| static assert(is(typeof(b) == uint));
| static assert(is(typeof(c) == ulong));
|
| auto d = unpredictableSeed();
| auto f = unpredictableSeed!uint();
| auto g = unpredictableSeed!ulong();
| static assert(is(typeof(d) == size_t));
| static assert(is(typeof(f) == uint));
| static assert(is(typeof(g) == ulong));
|}
|
|// Is llvm_readcyclecounter supported on this platform?
|// We need to whitelist platforms where it is known to work because if it
|// isn't supported it will compile but always return 0.
|// https://llvm.org/docs/LangRef.html#llvm-readcyclecounter-intrinsic
|version(LDC)
|{
| // The only architectures the documentation says are supported are
| // x86 and Alpha. x86 uses RDTSC and Alpha uses RPCC.
| version(X86_64) version = LLVMReadCycleCounter;
| // Do *not* support 32-bit x86 because some x86 processors don't
| // support `rdtsc` and because on x86 (but not x86-64) Linux
| // `prctl` can disable a process's ability to use `rdtsc`.
| else version(Alpha) version = LLVMReadCycleCounter;
|}
|
|
|pragma(inline, false)
|private ulong fallbackSeed()()
|{
| // fallback to old time/thread-based implementation in case of errors
| version(LLVMReadCycleCounter)
| {
| import ldc.intrinsics : llvm_readcyclecounter;
| ulong ticks = llvm_readcyclecounter();
| }
| else version(D_InlineAsm_X86_64)
| {
| // RDTSC takes around 22 clock cycles.
| ulong ticks = void;
| asm @nogc nothrow
| {
| rdtsc;
| shl RDX, 32;
| xor RDX, RAX;
| mov ticks, RDX;
| }
| }
| //else version(D_InlineAsm_X86)
| //{
| // // We don't use `rdtsc` with version(D_InlineAsm_X86) because
| // // some x86 processors don't support `rdtsc` and because on
| // // x86 (but not x86-64) Linux `prctl` can disable a process's
| // // ability to use `rdtsc`.
| // static assert(0);
| //}
| else version(Windows)
| {
| import core.sys.windows.winbase : QueryPerformanceCounter;
| ulong ticks = void;
| QueryPerformanceCounter(cast(long*)&ticks);
| }
| else version(Darwin)
| {
| import core.time : mach_absolute_time;
| ulong ticks = mach_absolute_time();
| }
| else version(Posix)
| {
| import core.sys.posix.time : clock_gettime, CLOCK_MONOTONIC, timespec;
0000000| timespec ts = void;
0000000| const tserr = clock_gettime(CLOCK_MONOTONIC, &ts);
| // Should never fail. Only allowed arror codes are
| // EINVAL if the 1st argument is an invalid clock ID and
| // EFAULT if the 2nd argument is an invalid address.
0000000| assert(tserr == 0, "Call to clock_gettime failed.");
0000000| ulong ticks = (cast(ulong) ts.tv_sec << 32) ^ ts.tv_nsec;
| }
| version(Posix)
| {
| import core.sys.posix.unistd : getpid;
| import core.sys.posix.pthread : pthread_self;
0000000| auto pid = cast(uint) getpid;
0000000| auto tid = cast(uint) pthread_self();
| }
| else
| version(Windows)
| {
| import core.sys.windows.winbase : GetCurrentProcessId, GetCurrentThreadId;
| auto pid = cast(uint) GetCurrentProcessId;
| auto tid = cast(uint) GetCurrentThreadId;
| }
0000000| ulong k = ((cast(ulong)pid << 32) ^ tid) + ticks;
0000000| k ^= k >> 33;
0000000| k *= 0xff51afd7ed558ccd;
0000000| k ^= k >> 33;
0000000| k *= 0xc4ceb9fe1a85ec53;
0000000| k ^= k >> 33;
0000000| return k;
|}
|
|///
|@safe version(mir_random_test) unittest
|{
| auto rnd = Random(unpredictableSeed);
| auto n = rnd();
| static assert(is(typeof(n) == size_t));
|}
|
|/++
|The "default", "favorite", "suggested" random number generator type on
|the current platform. It is an alias for one of the
|generators. You may want to use it if (1) you need to generate some
|nice random numbers, and (2) you don't care for the minutiae of the
|method being used.
|+/
|static if (is(size_t == uint))
| alias Random = Mt19937;
|else
| alias Random = Mt19937_64;
|
|///
|version(mir_random_test) unittest
|{
| import std.traits;
| static assert(isSaturatedRandomEngine!Random);
| static assert(is(EngineReturnType!Random == size_t));
|}
|
|static if (THREAD_LOCAL_STORAGE_AVAILABLE)
|{
| /++
| Thread-local instance of the default $(LREF Random) allocated and seeded independently
| for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).
| +/
| alias rne = threadLocal!Random;
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import std.complex;
|
| auto c = complex(rne.rand!real, rne.rand!real);
|
| int[10] array;
| foreach (ref e; array)
| e = rne.rand!int;
| auto picked = array[rne.randIndex(array.length)];
| }
|
| private static struct TL(Engine)
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| static bool initialized;
| static if (_isVoidInitOkay!Engine)
| static Engine engine = void;
| else static if (__traits(compiles, { Engine defaultConstructed; }))
| static Engine engine;
| else
| static Engine engine = Engine.init;
|
| static if (is(ucent) && is(typeof((ucent t) => Engine(t))))
| alias seed_t = ucent;
| else static if (is(typeof((ulong t) => Engine(t))))
| alias seed_t = ulong;
| else static if (is(typeof((uint t) => Engine(t))))
| alias seed_t = uint;
| else
| alias seed_t = EngineReturnType!Engine;
|
| pragma(inline, false) // Usually called only once per thread.
| private static void reseed()
| {
0000000| engine.__ctor(unpredictableSeed!(seed_t));
0000000| initialized = true;
| }
| }
| /++
| `threadLocal!Engine` returns a reference to a thread-local instance of
| the specified random number generator allocated and seeded uniquely
| for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).
|
| `threadLocalPtr!Engine` is a pointer to the area of thread-local
| storage used by `threadLocal!Engine`. This function is provided because
| the compiler can infer it is `@safe`, unlike `&(threadLocal!Engine)`.
| Like `threadLocal!Engine` this function will auto-initialize the engine.
| $(I Do not share pointers returned by threadLocalPtr between
| threads!)
|
| `threadLocalInitialized!Engine` is a low-level way to explicitly change
| the "initialized" flag used by `threadLocal!Engine` to determine whether
| the Engine needs to be seeded. Setting this to `false` gives a way of
| forcing the next call to `threadLocal!Engine` to reseed. In general this
| is unnecessary but there are some specialized use cases where users have
| requested this ability.
| +/
| @property ref Engine threadLocal(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| import mir.utility: _expect;
0000000| if (_expect(!TL!Engine.initialized, false))
| {
0000000| TL!Engine.reseed();
| }
0000000| return TL!Engine.engine;
| }
| /// ditto
| @property Engine* threadLocalPtr(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| import mir.utility: _expect;
| if (_expect(!TL!Engine.initialized, false))
| {
| TL!Engine.reseed();
| }
| return &TL!Engine.engine;
| }
| /// ditto
| @property ref bool threadLocalInitialized(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| return TL!Engine.initialized;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
|
| alias gen = threadLocal!Xorshift1024StarPhi;
| double x = gen.rand!double;
| size_t i = gen.randIndex(100u);
| ulong a = gen.rand!ulong;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| //If you need a pointer to the engine, getting it like this is @safe:
| Random* ptr = threadLocalPtr!Random;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
| //If you need to mark the engine as uninitialized to force a reseed,
| //you can do it like this:
| threadLocalInitialized!Xorshift1024StarPhi = false;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.mersenne_twister;
| //You can mark the engine as already initialized to skip
| //automatic seeding then initialize it yourself, for instance
| //if you want to use a known seed rather than a random one.
| threadLocalInitialized!Mt19937 = true;
| immutable uint[4] customSeed = [0x123, 0x234, 0x345, 0x456];
| threadLocal!Mt19937.__ctor(customSeed);
| foreach(_; 0..999)
| threadLocal!Mt19937.rand!uint;
| assert(3460025646u == threadLocal!Mt19937.rand!uint);
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
|
| alias gen = threadLocal!Xorshift1024StarPhi;
|
| //If you want to you can call the generator's opCall instead of using
| //rand!T but it is somewhat clunky because of the ambiguity of
| //@property syntax: () looks like optional function parentheses.
| static assert(!__traits(compiles, {ulong x0 = gen();}));//<-- Won't work
| static assert(is(typeof(gen()) == Xorshift1024StarPhi));//<-- because the type is this.
| ulong x1 = gen.opCall();//<-- This works though.
| ulong x2 = gen()();//<-- This also works.
|
| //But instead of any of those you should really just use gen.rand!T.
| ulong x3 = gen.rand!ulong;
| }
|// ///
|// @nogc nothrow pure @safe version(mir_random_test) unittest
|// {
|// //If you want something like Phobos std.random.rndGen and
|// //don't care about the specific algorithm you can do this:
|// alias rndGen = threadLocal!Random;
|// }
|
| @nogc nothrow @system version(mir_random_test) unittest
| {
| //Verify Returns same instance every time per thread.
| import mir.random;
| import mir.random.engine.xorshift;
|
| Xorshift1024StarPhi* addr = &(threadLocal!Xorshift1024StarPhi());
| Xorshift1024StarPhi* sameAddr = &(threadLocal!Xorshift1024StarPhi());
| assert(addr is sameAddr);
| assert(sameAddr is threadLocalPtr!Xorshift1024StarPhi);
| }
|
| /++
| Sets or resets the _seed of `threadLocal!Engine` using the given arguments.
| It is not necessary to call this except if you wish to ensure the
| PRNG uses a known _seed.
| +/
| void setThreadLocalSeed(Engine, A...)(auto ref A seed)
| if (isSaturatedRandomEngine!Engine && is(Engine == struct)
| && A.length >= 1 && is(typeof((ref A a) => Engine(a))))
| {
| TL!Engine.initialized = true;
| TL!Engine.engine.__ctor(seed);
| }
| ///
| @nogc nothrow @system version(mir_random_test) unittest
| {
| import mir.random;
|
| alias rnd = threadLocal!Random;
|
| setThreadLocalSeed!Random(123);
| immutable float x = rnd.rand!float;
|
| assert(x != rnd.rand!float);
|
| setThreadLocalSeed!Random(123);
| immutable float y = rnd.rand!float;
|
| assert(x == y);
| }
|}
|else
|{
| static assert(!THREAD_LOCAL_STORAGE_AVAILABLE);
|
| @property ref Random rne()()
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocal(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocalPtr(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocalInitialized(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template setThreadLocalSeed(T, A...)
| {
| static assert(0, "Thread-local storage not available!");
| }
|}
|
|version(linux)
|{
| import mir.linux._asm.unistd;
| enum bool LINUX_NR_GETRANDOM = (__traits(compiles, {enum e = NR_getrandom;}));
| //If X86_64 or X86 are missing there is a problem with the library.
| static if (!LINUX_NR_GETRANDOM)
| {
| version (X86_64)
| static assert(0, "Missing linux syscall constants!");
| version (X86)
| static assert(0, "Missing linux syscall constants!");
| }
|}
|else
| enum bool LINUX_NR_GETRANDOM = false;
|
|static if (LINUX_NR_GETRANDOM)
|{
| // getrandom was introduced in Linux 3.17
| private __gshared bool getRandomFailedENOSYS = false;
|
| private extern(C) int syscall(size_t ident, size_t n, size_t arg1, size_t arg2) @nogc nothrow;
|
| /*
| * Flags for getrandom(2)
| *
| * GRND_NONBLOCK Don't block and return EAGAIN instead
| * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
| */
| private enum GRND_NONBLOCK = 0x0001;
| private enum GRND_RANDOM = 0x0002;
|
| private enum GETRANDOM = NR_getrandom;
|
| /*
| http://man7.org/linux/man-pages/man2/getrandom.2.html
| If the urandom source has been initialized, reads of up to 256 bytes
| will always return as many bytes as requested and will not be
| interrupted by signals. No such guarantees apply for larger buffer
| sizes.
| */
| private ptrdiff_t genRandomImplSysBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| while (len > 0)
| {
0000000| auto res = syscall(GETRANDOM, cast(size_t) ptr, len, 0);
0000000| if (res >= 0)
| {
0000000| len -= res;
0000000| ptr += res;
| }
| else
| {
0000000| return res;
| }
| }
0000000| return 0;
| }
|
| /*
| * If the GRND_NONBLOCK flag is set, then
| * getrandom() does not block in these cases, but instead
| * immediately returns -1 with errno set to EAGAIN.
| */
| private ptrdiff_t genRandomImplSysNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| return syscall(GETRANDOM, cast(size_t) ptr, len, GRND_NONBLOCK);
| }
|}
|
|version(AnyARC4Random)
|extern(C) private @nogc nothrow
|{
| void arc4random_buf(scope void* buf, size_t nbytes) @system;
| uint arc4random() @trusted;
|}
|
|version(Darwin)
|{
| //On Darwin /dev/random is identical to /dev/urandom (neither blocks
| //when there is low system entropy) so there is no point mucking
| //about with file descriptors. Just use arc4random_buf for both.
|}
|else version(Posix)
|{
| import core.stdc.stdio : fclose, feof, ferror, fopen, fread;
| alias IOType = typeof(fopen("a", "b"));
| private __gshared IOType fdRandom;
| version (SecureARC4Random)
| {
| //Don't need /dev/urandom if we have arc4random_buf.
| }
| else
| private __gshared IOType fdURandom;
|
|
| /* The /dev/random device is a legacy interface which dates back to a
| time where the cryptographic primitives used in the implementation of
| /dev/urandom were not widely trusted. It will return random bytes
| only within the estimated number of bits of fresh noise in the
| entropy pool, blocking if necessary. /dev/random is suitable for
| applications that need high quality randomness, and can afford
| indeterminate delays.
|
| When the entropy pool is empty, reads from /dev/random will block
| until additional environmental noise is gathered.
| */
| private ptrdiff_t genRandomImplFileBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| if (fdRandom is null)
| {
0000000| fdRandom = fopen("/dev/random", "r");
0000000| if (fdRandom is null)
0000000| return -1;
| }
|
0000000| while (len > 0)
| {
0000000| auto res = fread(ptr, 1, len, fdRandom);
0000000| len -= res;
0000000| ptr += res;
| // check for possible permanent errors
0000000| if (len != 0)
| {
0000000| if (fdRandom.ferror)
0000000| return -1;
|
0000000| if (fdRandom.feof)
0000000| return -1;
| }
| }
|
0000000| return 0;
| }
|}
|
|version (SecureARC4Random)
|{
| //Don't need /dev/urandom if we have arc4random_buf.
|}
|else version(Posix)
|{
| /**
| When read, the /dev/urandom device returns random bytes using a
| pseudorandom number generator seeded from the entropy pool. Reads
| from this device do not block (i.e., the CPU is not yielded), but can
| incur an appreciable delay when requesting large amounts of data.
| When read during early boot time, /dev/urandom may return data prior
| to the entropy pool being initialized.
| */
| private ptrdiff_t genRandomImplFileNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| if (fdURandom is null)
| {
0000000| fdURandom = fopen("/dev/urandom", "r");
0000000| if (fdURandom is null)
0000000| return -1;
| }
|
0000000| auto res = fread(ptr, 1, len, fdURandom);
| // check for possible errors
0000000| if (res != len)
| {
0000000| if (fdURandom.ferror)
0000000| return -1;
|
0000000| if (fdURandom.feof)
0000000| return -1;
| }
0000000| return res;
| }
|}
|
|version(Windows)
|{
| // the wincrypt headers in druntime are broken for x64!
| private alias ULONG_PTR = size_t; // uint in druntime
| private alias BOOL = bool;
| private alias DWORD = uint;
| private alias LPCWSTR = wchar*;
| private alias PBYTE = ubyte*;
| private alias HCRYPTPROV = ULONG_PTR;
| private alias LPCSTR = const(char)*;
|
| private extern(Windows) BOOL CryptGenRandom(HCRYPTPROV, DWORD, PBYTE) @nogc @safe nothrow;
| private extern(Windows) BOOL CryptAcquireContextA(HCRYPTPROV*, LPCSTR, LPCSTR, DWORD, DWORD) @nogc nothrow;
| private extern(Windows) BOOL CryptAcquireContextW(HCRYPTPROV*, LPCWSTR, LPCWSTR, DWORD, DWORD) @nogc nothrow;
| private extern(Windows) BOOL CryptReleaseContext(HCRYPTPROV, ULONG_PTR) @nogc nothrow;
|
| private __gshared ULONG_PTR hProvider;
|
| private auto initGetRandom()() @nogc @trusted nothrow
| {
| import core.sys.windows.winbase : GetLastError;
| import core.sys.windows.winerror : NTE_BAD_KEYSET;
| import core.sys.windows.wincrypt : PROV_RSA_FULL, CRYPT_NEWKEYSET, CRYPT_VERIFYCONTEXT, CRYPT_SILENT;
|
| // https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886(v=vs.85).aspx
| // For performance reasons, we recommend that you set the pszContainer
| // parameter to NULL and the dwFlags parameter to CRYPT_VERIFYCONTEXT
| // in all situations where you do not require a persisted key.
| // CRYPT_SILENT is intended for use with applications for which the UI cannot be displayed by the CSP.
| if (!CryptAcquireContextW(&hProvider, null, null, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT))
| {
| if (GetLastError() == NTE_BAD_KEYSET)
| {
| // Attempt to create default container
| if (!CryptAcquireContextA(&hProvider, null, null, PROV_RSA_FULL, CRYPT_NEWKEYSET | CRYPT_SILENT))
| return 1;
| }
| else
| {
| return 1;
| }
| }
|
| return 0;
| }
|}
|
|/++
|Constructs the mir random seed generators.
|This constructor needs to be called once $(I before)
|other calls in `mir.random.engine`.
|
|Automatically called by DRuntime.
|+/
|extern(C) void mir_random_engine_ctor() @system nothrow @nogc
|{
| version(Windows)
| {
| if (hProvider == 0)
| initGetRandom;
| }
|}
|
|/++
|Destructs the mir random seed generators.
|
|Automatically called by DRuntime.
|+/
|extern(C) void mir_random_engine_dtor() @system nothrow @nogc
|{
| version(Windows)
| {
| if (hProvider > 0)
| CryptReleaseContext(hProvider, 0);
| }
| else
| version(Darwin)
| {
|
| }
| else
| version(Posix)
| {
0000000| if (fdRandom !is null)
0000000| fdRandom.fclose;
|
| version (SecureARC4Random)
| {
| //Don't need /dev/urandom if we have arc4random_buf.
| }
0000000| else if (fdURandom !is null)
0000000| fdURandom.fclose;
| }
|}
|
|
|version(D_BetterC)
|{
| pragma(crt_constructor)
| extern(C) void mir_random_engine_ctor_() @system nothrow @nogc
| {
| mir_random_engine_ctor();
| }
|
| pragma(crt_destructor)
| extern(C) void mir_random_engine_dtor_() @system nothrow @nogc
| {
| mir_random_engine_dtor();
| }
|}
|else
|{
| /// Automatically calls the extern(C) module constructor
| shared static this()
| {
1| mir_random_engine_ctor();
| }
|
| /// Automatically calls the extern(C) module destructor
| shared static ~this()
| {
0000000| mir_random_engine_dtor();
| }
|}
|
|/++
|Fills a buffer with random data.
|If not enough entropy has been gathered, it will block.
|
|Note that on Mac OS X this method will never block.
|
|Params:
| ptr = pointer to the buffer to fill
| len = length of the buffer (in bytes)
|
|Returns:
| A non-zero integer if an error occurred.
|+/
|extern(C) ptrdiff_t mir_random_genRandomBlocking(scope void* ptr , size_t len) @nogc nothrow @system
|{
| version(Windows)
| {
| static if (DWORD.max >= size_t.max)
| while(!CryptGenRandom(hProvider, len, cast(PBYTE) ptr)) {}
| else
| while (len != 0)
| {
| import mir.utility : min;
| const n = min(DWORD.max, len);
| if (CryptGenRandom(hProvider, cast(DWORD) n, cast(PBYTE) ptr))
| {
| len -= n;
| }
| }
| return 0;
| }
| else version (Darwin)
| {
| arc4random_buf(ptr, len);
| return 0;
| }
| else
| {
| static if (LINUX_NR_GETRANDOM)
0000000| if (!getRandomFailedENOSYS) // harmless data race
| {
| import core.stdc.errno;
0000000| ptrdiff_t result = genRandomImplSysBlocking(ptr, len);
0000000| if (result >= 0)
0000000| return result;
0000000| if (errno != ENOSYS)
0000000| return result;
0000000| getRandomFailedENOSYS = true; // harmless data race
| }
0000000| return genRandomImplFileBlocking(ptr, len);
| }
|}
|
|/// ditto
|alias genRandomBlocking = mir_random_genRandomBlocking;
|
|/// ditto
|ptrdiff_t genRandomBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted
|{
| pragma(inline, true);
| return mir_random_genRandomBlocking(buffer.ptr, buffer.length);
|}
|
|///
|@safe nothrow version(mir_random_test) unittest
|{
| ubyte[] buf = new ubyte[10];
| genRandomBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| ubyte[10] buf;
| genRandomBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|/++
|Fills a buffer with random data.
|If not enough entropy has been gathered, it won't block.
|Hence the error code should be inspected.
|
|On Linux >= 3.17 genRandomNonBlocking is guaranteed to succeed for 256 bytes and
|fewer.
|
|On Mac OS X, OpenBSD, and NetBSD genRandomNonBlocking is guaranteed to
|succeed for any number of bytes.
|
|Params:
| buffer = the buffer to fill
| len = length of the buffer (in bytes)
|
|Returns:
| The number of bytes filled - a negative number if an error occurred
|+/
|extern(C) size_t mir_random_genRandomNonBlocking(scope void* ptr, size_t len) @nogc nothrow @system
|{
| version(Windows)
| {
| static if (DWORD.max < size_t.max)
| if (len > DWORD.max)
| len = DWORD.max;
| if (!CryptGenRandom(hProvider, cast(DWORD) len, cast(PBYTE) ptr))
| return -1;
| return len;
| }
| else version(SecureARC4Random)
| {
| arc4random_buf(ptr, len);
| return len;
| }
| else
| {
| static if (LINUX_NR_GETRANDOM)
0000000| if (!getRandomFailedENOSYS) // harmless data race
| {
| import core.stdc.errno;
0000000| ptrdiff_t result = genRandomImplSysNonBlocking(ptr, len);
0000000| if (result >= 0)
0000000| return result;
0000000| if (errno != ENOSYS)
0000000| return result;
0000000| getRandomFailedENOSYS = true; // harmless data race
| }
0000000| return genRandomImplFileNonBlocking(ptr, len);
| }
|}
|/// ditto
|alias genRandomNonBlocking = mir_random_genRandomNonBlocking;
|/// ditto
|size_t genRandomNonBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted
|{
| pragma(inline, true);
| return mir_random_genRandomNonBlocking(buffer.ptr, buffer.length);
|}
|
|///
|@safe nothrow version(mir_random_test) unittest
|{
| ubyte[] buf = new ubyte[10];
| genRandomNonBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|@nogc nothrow @safe
|version(mir_random_test) unittest
|{
| ubyte[10] buf;
| genRandomNonBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/engine/package.d is 1% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-traits.lst
|/++
|$(H2 Multidimensional traits)
|
|This is a submodule of $(MREF mir,ndslice).
|
|$(BOOKTABLE $(H2 Function),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 isVector, Test if type is a one-dimensional slice.)
|$(T2 isMatrix, Test if type is a two-dimensional slice.)
|$(T2 isContiguousSlice, Test if type is a contiguous slice.)
|$(T2 isCanonicalSlice, Test if type is a canonical slice.)
|$(T2 isUniversalSlice, Test if type is a universal slice.)
|$(T2 isContiguousVector, Test if type is a contiguous one-dimensional slice.)
|$(T2 isUniversalVector, Test if type is a universal one-dimensional slice.)
|$(T2 isContiguousMatrix, Test if type is a contiguous two-dimensional slice.)
|$(T2 isCanonicalMatrix, Test if type is a canonical two-dimensional slice.)
|$(T2 isUniversalMatrix, Test if type is a universal two-dimensional slice.)
|$(T2 isIterator, Test if type is a random access iterator.)
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: John Hall
|
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|
|module mir.ndslice.traits;
|
|import mir.ndslice.slice : Slice, SliceKind, Contiguous, Universal, Canonical;
|
|/// Test if type is a one-dimensional slice.
|enum bool isVector(T) = is(T : Slice!(Iterator, 1, kind), SliceKind kind, Iterator);
|
|/// Test if type is a two-dimensional slice.
|enum bool isMatrix(T) = is(T : Slice!(Iterator, 2, kind), SliceKind kind, Iterator);
|
|/// Test if type is a contiguous slice.
|enum bool isContiguousSlice(T) = is(T : Slice!(Iterator, N, Contiguous), Iterator, size_t N);
|
|/// Test if type is a canonical slice.
|enum bool isCanonicalSlice(T) = is(T : Slice!(Iterator, N, Canonical), Iterator, size_t N);
|
|/// Test if type is a universal slice.
|enum bool isUniversalSlice(T) = is(T : Slice!(Iterator, N, Universal), Iterator, size_t N);
|
|/// Test if type is a contiguous one-dimensional slice.
|enum bool isContiguousVector(T) = is(T : Slice!(Iterator, 1, Contiguous), Iterator);
|
|/// Test if type is a universal one-dimensional slice.
|enum bool isUniversalVector(T) = is(T : Slice!(Iterator, 1, Universal), Iterator);
|
|/// Test if type is a contiguous two-dimensional slice.
|enum bool isContiguousMatrix(T) = is(T : Slice!(Iterator, 2, Contiguous), Iterator);
|
|/// Test if type is a canonical two-dimensional slice.
|enum bool isCanonicalMatrix(T) = is(T : Slice!(Iterator, 2, Canonical), Iterator);
|
|/// Test if type is a universal two-dimensional slice.
|enum bool isUniversalMatrix(T) = is(T : Slice!(Iterator, 2, Universal), Iterator);
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.slice : Slice;
|
| alias S1 = Slice!(int*);
| static assert(isContiguousVector!S1);
| static assert(!isUniversalVector!S1);
|
| static assert(!isContiguousMatrix!S1);
| static assert(!isCanonicalMatrix!S1);
| static assert(!isUniversalMatrix!S1);
|
| static assert(isVector!S1);
| static assert(!isMatrix!S1);
|
| static assert(isContiguousSlice!S1);
| static assert(!isCanonicalSlice!S1);
| static assert(!isUniversalSlice!S1);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S2 = Slice!(float*, 1, Universal);
| static assert(!isContiguousVector!S2);
| static assert(isUniversalVector!S2);
|
| static assert(!isContiguousMatrix!S2);
| static assert(!isCanonicalMatrix!S2);
| static assert(!isUniversalMatrix!S2);
|
| static assert(isVector!S2);
| static assert(!isMatrix!S2);
|
| static assert(!isContiguousSlice!S2);
| static assert(!isCanonicalSlice!S2);
| static assert(isUniversalSlice!S2);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S3 = Slice!(byte*, 2);
| static assert(!isContiguousVector!S3);
| static assert(!isUniversalVector!S3);
|
| static assert(isContiguousMatrix!S3);
| static assert(!isCanonicalMatrix!S3);
| static assert(!isUniversalMatrix!S3);
|
| static assert(!isVector!S3);
| static assert(isMatrix!S3);
|
| static assert(isContiguousSlice!S3);
| static assert(!isCanonicalSlice!S3);
| static assert(!isUniversalSlice!S3);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S4 = Slice!(int*, 2, Canonical);
| static assert(!isContiguousVector!S4);
| static assert(!isUniversalVector!S4);
|
| static assert(!isContiguousMatrix!S4);
| static assert(isCanonicalMatrix!S4);
| static assert(!isUniversalMatrix!S4);
|
| static assert(!isVector!S4);
| static assert(isMatrix!S4);
|
| static assert(!isContiguousSlice!S4);
| static assert(isCanonicalSlice!S4);
| static assert(!isUniversalSlice!S4);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S5 = Slice!(int*, 2, Universal);
| static assert(!isContiguousVector!S5);
| static assert(!isUniversalVector!S5);
|
| static assert(!isContiguousMatrix!S5);
| static assert(!isCanonicalMatrix!S5);
| static assert(isUniversalMatrix!S5);
|
| static assert(!isVector!S5);
| static assert(isMatrix!S5);
|
| static assert(!isContiguousSlice!S5);
| static assert(!isCanonicalSlice!S5);
| static assert(isUniversalSlice!S5);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S6 = Slice!(int*, 3);
|
| static assert(!isContiguousVector!S6);
| static assert(!isUniversalVector!S6);
|
| static assert(!isContiguousMatrix!S6);
| static assert(!isCanonicalMatrix!S6);
| static assert(!isUniversalMatrix!S6);
|
| static assert(!isVector!S6);
| static assert(!isMatrix!S6);
|
| static assert(isContiguousSlice!S6);
| static assert(!isCanonicalSlice!S6);
| static assert(!isUniversalSlice!S6);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S7 = Slice!(int*, 3, Canonical);
|
| static assert(!isContiguousVector!S7);
| static assert(!isUniversalVector!S7);
|
| static assert(!isContiguousMatrix!S7);
| static assert(!isCanonicalMatrix!S7);
| static assert(!isUniversalMatrix!S7);
|
| static assert(!isVector!S7);
| static assert(!isMatrix!S7);
|
| static assert(!isContiguousSlice!S7);
| static assert(isCanonicalSlice!S7);
| static assert(!isUniversalSlice!S7);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias S8 = Slice!(int*, 3, Universal);
|
| static assert(!isContiguousVector!S8);
| static assert(!isUniversalVector!S8);
|
| static assert(!isContiguousMatrix!S8);
| static assert(!isCanonicalMatrix!S8);
| static assert(!isUniversalMatrix!S8);
|
| static assert(!isVector!S8);
| static assert(!isMatrix!S8);
|
| static assert(!isContiguousSlice!S8);
| static assert(!isCanonicalSlice!S8);
| static assert(isUniversalSlice!S8);
|}
|
|///
|template isIterator(T)
|{
| enum isIterator = __traits(compiles, (T a, T b)
| {
| sizediff_t diff = a - b;
| ++a;
| ++b;
| --a;
| --b;
| void foo(V)(auto ref V v)
| {
|
| }
| foo(a[sizediff_t(3)]);
| auto c = a + sizediff_t(3);
| auto d = a - sizediff_t(3);
| a += sizediff_t(3);
| a -= sizediff_t(3);
| foo(*a);
| });
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/traits.d has no code
<<<<<< EOF
# path=./source-mir-sparse-blas-package.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas;
|
|public import mir.sparse.blas.dot;
|public import mir.sparse.blas.axpy;
|public import mir.sparse.blas.gemv;
|public import mir.sparse.blas.gemm;
source/mir/sparse/blas/package.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-allocation.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|It contains allocation utilities.
|
|
|$(BOOKTABLE $(H2 Common utilities),
|$(T2 shape, Returns a shape of a common n-dimensional array. )
|)
|
|$(BOOKTABLE $(H2 GC Allocation utilities),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 slice, Allocates a slice using GC.)
|$(T2 bitSlice, GC-Allocates a bitwise packed n-dimensional boolean slice.)
|$(T2 ndarray, Allocates a common n-dimensional array from a slice. )
|$(T2 uninitSlice, Allocates an uninitialized slice using GC. )
|)
|
|$(BOOKTABLE $(H2 Ref counted allocation utilities),
|$(T2 rcslice, Allocates an n-dimensional reference-counted (thread-safe) slice.)
|$(T2 bitRcslice, Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice.)
|$(T2 mininitRcslice, Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice.)
|)
|
|$(BOOKTABLE $(H2 Custom allocation utilities),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 makeNdarray, Allocates a common n-dimensional array from a slice using an allocator. )
|$(T2 makeSlice, Allocates a slice using an allocator. )
|$(T2 makeUninitSlice, Allocates an uninitialized slice using an allocator. )
|)
|
|$(BOOKTABLE $(H2 CRuntime allocation utilities),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 stdcSlice, Allocates a slice copy using `core.stdc.stdlib.malloc`)
|$(T2 stdcUninitSlice, Allocates an uninitialized slice using `core.stdc.stdlib.malloc`.)
|$(T2 stdcFreeSlice, Frees memory using `core.stdc.stdlib.free`)
|)
|
|$(BOOKTABLE $(H2 Aligned allocation utilities),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 uninitAlignedSlice, Allocates an uninitialized aligned slice using GC. )
|$(T2 stdcUninitAlignedSlice, Allocates an uninitialized aligned slice using CRuntime.)
|$(T2 stdcFreeAlignedSlice, Frees memory using CRuntime)
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.allocation;
|
|import mir.math.common: optmath;
|import mir.ndslice.concatenation;
|import mir.ndslice.field: BitField;
|import mir.ndslice.internal;
|import mir.ndslice.iterator: FieldIterator;
|import mir.ndslice.slice;
|import mir.rc.array;
|import std.traits;
|import std.meta: staticMap;
|
|@optmath:
|
|/++
|Allocates an n-dimensional reference-counted (thread-safe) slice.
|Params:
| lengths = List of lengths for each dimension.
| init = Value to initialize with (optional).
| slice = Slice to copy shape and data from (optional).
|Returns:
| n-dimensional slice
|+/
|Slice!(RCI!T, N)
| rcslice(T, size_t N)(size_t[N] lengths...)
|{
| immutable len = lengths.lengthsProduct;
| auto _lengths = lengths;
| return typeof(return)(_lengths, RCI!T(RCArray!T(len)));
|}
|
|/// ditto
|Slice!(RCI!T, N)
| rcslice(T, size_t N)(size_t[N] lengths, T init)
|{
| auto ret = (()@trusted => mininitRcslice!T(lengths))();
| ret.lightScope.field[] = init;
| return ret;
|}
|
|/// ditto
|auto rcslice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
|{
| import mir.conv: emplaceRef;
| alias E = slice.DeepElement;
|
| auto result = (() @trusted => slice.shape.mininitRcslice!(Unqual!E))();
|
| import mir.algorithm.iteration: each;
| each!(emplaceRef!E)(result.lightScope, slice.lightScope);
|
| return *(() @trusted => cast(Slice!(RCI!E, N)*) &result)();
|}
|
|/// ditto
|auto rcslice(T)(T[] array)
|{
| return rcslice(array.sliced);
|}
|
|/// ditto
|auto rcslice(T, I)(I[] array)
| if (!isImplicitlyConvertible!(I[], T[]))
|{
| import mir.ndslice.topology: as;
| return rcslice(array.sliced.as!T);
|}
|
|///
|version(mir_test)
|@safe pure nothrow @nogc unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.rc.array: RCI;
| auto tensor = rcslice!int(5, 6, 7);
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| static assert(is(typeof(tensor) == Slice!(RCI!int, 3)));
|
| // creates duplicate using `rcslice`
| auto dup = tensor.rcslice;
| assert(dup == tensor);
|}
|
|///
|version(mir_test)
|@safe pure nothrow @nogc unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.rc.array: RCI;
| auto tensor = rcslice([2, 3], 5);
| assert(tensor.elementCount == 2 * 3);
| assert(tensor[1, 1] == 5);
|
| import mir.rc.array;
| static assert(is(typeof(tensor) == Slice!(RCI!int, 2)));
|}
|
|/// ditto
|auto rcslice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation)
|{
| alias T = Unqual!(concatenation.DeepElement);
| auto ret = (()@trusted => mininitRcslice!T(concatenation.shape))();
| ret.lightScope.opIndexAssign(concatenation);
| return ret;
|}
|
|///
|version(mir_test)
|@safe pure nothrow @nogc unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.ndslice.topology : iota;
| import mir.ndslice.concatenation;
| auto tensor = concatenation([2, 3].iota, [3].iota(6)).rcslice;
| assert(tensor == [3, 3].iota);
|
| static assert(is(typeof(tensor) == Slice!(RCI!ptrdiff_t, 2)));
|}
|
|/++
|Allocates an n-dimensional reference-counted (thread-safe) slice without memory initialisation.
|Params:
| lengths = List of lengths for each dimension.
|Returns:
| n-dimensional slice
|+/
|Slice!(RCI!T, N)
| uninitRCslice(T, size_t N)(size_t[N] lengths...)
|{
| immutable len = lengths.lengthsProduct;
| auto _lengths = lengths;
| return typeof(return)(_lengths, RCI!T(RCArray!T(len, false)));
|}
|
|///
|version(mir_test)
|@safe pure nothrow @nogc unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.rc.array: RCI;
| auto tensor = uninitRCslice!int(5, 6, 7);
| tensor[] = 1;
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| static assert(is(typeof(tensor) == Slice!(RCI!int, 3)));
|}
|
|/++
|Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice.
|Params:
| lengths = List of lengths for each dimension.
|Returns:
| n-dimensional bitwise rcslice
|See_also: $(SUBREF topology, bitwise).
|+/
|Slice!(FieldIterator!(BitField!(RCI!size_t)), N) bitRcslice(size_t N)(size_t[N] lengths...)
|{
| import mir.ndslice.topology: bitwise;
| enum elen = size_t.sizeof * 8;
| immutable len = lengths.lengthsProduct;
| immutable dlen = (len / elen + (len % elen != 0));
| return RCArray!size_t(dlen).asSlice.bitwise[0 .. len].sliced(lengths);
|}
|
|/// 1D
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto bitarray = 100.bitRcslice; // allocates 16 bytes total (plus RC context)
| assert(bitarray.shape == cast(size_t[1])[100]);
| assert(bitarray[72] == false);
| bitarray[72] = true;
| assert(bitarray[72] == true);
|}
|
|/// 2D
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto bitmatrix = bitRcslice(20, 6); // allocates 16 bytes total (plus RC context)
| assert(bitmatrix.shape == cast(size_t[2])[20, 6]);
| assert(bitmatrix[3, 4] == false);
| bitmatrix[3, 4] = true;
| assert(bitmatrix[3, 4] == true);
|}
|
|/++
|Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice.
|Params:
| lengths = list of lengths for each dimension
|Returns:
| contiguous minimally initialized n-dimensional reference-counted (thread-safe) slice
|+/
|Slice!(RCI!T, N) mininitRcslice(T, size_t N)(size_t[N] lengths...)
|{
| immutable len = lengths.lengthsProduct;
| auto _lengths = lengths;
| return Slice!(RCI!T, N)(_lengths, RCI!T(mininitRcarray!T(len)));
|}
|
|///
|version(mir_test)
|pure nothrow @nogc unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.rc.array: RCI;
| auto tensor = mininitRcslice!int(5, 6, 7);
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| static assert(is(typeof(tensor) == Slice!(RCI!int, 3)));
|}
|
|private alias Pointer(T) = T*;
|private alias Pointers(Args...) = staticMap!(Pointer, Args);
|
|/++
|GC-Allocates an n-dimensional slice.
|+/
|template slice(Args...)
| if (Args.length)
|{
| ///
| alias LabelTypes = Args[1 .. $];
| ///
| alias T = Args[0];
|
| /++
| Params:
| lengths = List of lengths for each dimension.
| init = Value to initialize with (optional).
| Returns:
| initialzed n-dimensional slice
| +/
| Slice!(T*, N, Contiguous, Pointers!LabelTypes)
| slice(size_t N)(size_t[N] lengths...)
| if (N >= LabelTypes.length)
| {
| auto shape = lengths; // DMD variadic bug workaround
| immutable len = shape.lengthsProduct;
| auto ret = typeof(return)(shape, len == 0 ? null : (()@trusted=>new T[len].ptr)());
| foreach (i, L; LabelTypes) // static
| ret._labels[i] = (()@trusted=>new L[shape[i]].ptr)();
| return ret;
| }
|
| /// ditto
| Slice!(T*, N, Contiguous, Pointers!LabelTypes)
| slice(size_t N)(size_t[N] lengths, T init)
| if (N >= LabelTypes.length)
| {
| import mir.conv: emplaceRef;
| import std.array : uninitializedArray;
| immutable len = lengths.lengthsProduct;
| auto arr = uninitializedArray!(Unqual!T[])(len);
| foreach (ref e; arr)
| emplaceRef(e, init);
| auto ret = typeof(return)(lengths, len == 0 ? null : (()@trusted=>cast(T*)arr.ptr)());
| foreach (i, L; LabelTypes) // static
| ret._labels[i] = (()@trusted=>new L[shape[i]].ptr)();
| return ret;
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.slice: Slice;
| auto tensor = slice!int(5, 6, 7);
| assert(tensor.length == 5);
| assert(tensor.length!1 == 6);
| assert(tensor.elementCount == 5 * 6 * 7);
| static assert(is(typeof(tensor) == Slice!(int*, 3)));
|}
|
|/// 2D DataFrame example
|version(mir_test)
|@safe pure unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
| import mir.date: Date;
|
| auto dataframe = slice!(double, Date, string)(4, 3);
| assert(dataframe.length == 4);
| assert(dataframe.length!1 == 3);
| assert(dataframe.elementCount == 4 * 3);
|
| static assert(is(typeof(dataframe) ==
| Slice!(double*, 2, Contiguous, Date*, string*)));
|
| // Dataframe labels are contiguous 1-dimensional slices.
|
| // Fill row labels
| dataframe.label[] = [
| Date(2019, 1, 24),
| Date(2019, 2, 2),
| Date(2019, 2, 4),
| Date(2019, 2, 5),
| ];
|
| assert(dataframe.label!0[2] == Date(2019, 2, 4));
|
| // Fill column labels
| dataframe.label!1[] = ["income", "outcome", "balance"];
|
| assert(dataframe.label!1[2] == "balance");
|
| // Change label element
| dataframe.label!1[2] = "total";
| assert(dataframe.label!1[2] == "total");
|
| // Attach a newly allocated label
| dataframe.label!1 = ["Income", "Outcome", "Balance"].sliced;
|
| assert(dataframe.label!1[2] == "Balance");
|}
|
|/++
|GC-Allocates an n-dimensional slice.
|Params:
| lengths = List of lengths for each dimension.
| init = Value to initialize with (optional).
|Returns:
| initialzed n-dimensional slice
|+/
|Slice!(T*, N)
| slice(size_t N, T)(size_t[N] lengths, T init)
|{
| return .slice!T(lengths, init);
|}
|
|// TODO: make it a dataframe compatible. This function performs copy.
|/// ditto
|auto slice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
|{
| if (__ctfe)
| {
| import mir.ndslice.topology: flattened;
| import mir.array.allocation: array;
| return slice.flattened.array.sliced(slice.shape);
| }
| else
| {
| import mir.conv: emplaceRef;
| alias E = slice.DeepElement;
|
| auto result = (() @trusted => slice.shape.uninitSlice!(Unqual!E))();
|
| import mir.algorithm.iteration: each;
| each!(emplaceRef!E)(result, slice);
|
| return (() @trusted => cast(Slice!(E*, N)) result)();
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| auto tensor = slice([2, 3], 5);
| assert(tensor.elementCount == 2 * 3);
| assert(tensor[1, 1] == 5);
|
| // creates duplicate using `slice`
| auto dup = tensor.slice;
| assert(dup == tensor);
|}
|
|/// ditto
|auto slice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation)
|{
| alias T = Unqual!(concatenation.DeepElement);
| static if (hasElaborateAssign!T)
| alias fun = .slice;
| else
| alias fun = .uninitSlice;
| auto ret = (()@trusted => fun!T(concatenation.shape))();
| ret.opIndexAssign(concatenation);
| return ret;
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.slice: Slice;
| import mir.ndslice.topology : iota;
| import mir.ndslice.concatenation;
| auto tensor = concatenation([2, 3].iota, [3].iota(6)).slice;
| assert(tensor == [3, 3].iota);
|
| static assert(is(typeof(tensor) == Slice!(ptrdiff_t*, 2)));
|}
|
|/++
|GC-Allocates a bitwise packed n-dimensional boolean slice.
|Params:
| lengths = List of lengths for each dimension.
|Returns:
| n-dimensional bitwise slice
|See_also: $(SUBREF topology, bitwise).
|+/
|Slice!(FieldIterator!(BitField!(size_t*)), N) bitSlice(size_t N)(size_t[N] lengths...)
|{
| import mir.ndslice.topology: bitwise;
| enum elen = size_t.sizeof * 8;
| immutable len = lengths.lengthsProduct;
| immutable dlen = (len / elen + (len % elen != 0));
| return new size_t[dlen].sliced.bitwise[0 .. len].sliced(lengths);
|}
|
|/// 1D
|@safe pure version(mir_test) unittest
|{
| auto bitarray = bitSlice(100); // allocates 16 bytes total
| assert(bitarray.shape == [100]);
| assert(bitarray[72] == false);
| bitarray[72] = true;
| assert(bitarray[72] == true);
|}
|
|/// 2D
|@safe pure version(mir_test) unittest
|{
| auto bitmatrix = bitSlice(20, 6); // allocates 16 bytes total
| assert(bitmatrix.shape == [20, 6]);
| assert(bitmatrix[3, 4] == false);
| bitmatrix[3, 4] = true;
| assert(bitmatrix[3, 4] == true);
|}
|
|/++
|GC-Allocates an uninitialized n-dimensional slice.
|Params:
| lengths = list of lengths for each dimension
|Returns:
| contiguous uninitialized n-dimensional slice
|+/
|auto uninitSlice(T, size_t N)(size_t[N] lengths...)
|{
| immutable len = lengths.lengthsProduct;
| import std.array : uninitializedArray;
| auto arr = uninitializedArray!(T[])(len);
| version (mir_secure_memory)
| {()@trusted{
| (cast(ubyte[])arr)[] = 0;
| }();}
| return arr.sliced(lengths);
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.slice: Slice;
| auto tensor = uninitSlice!int(5, 6, 7);
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| static assert(is(typeof(tensor) == Slice!(int*, 3)));
|}
|
|/++
|GC-Allocates an uninitialized aligned an n-dimensional slice.
|Params:
| lengths = list of lengths for each dimension
| alignment = memory alignment (bytes)
|Returns:
| contiguous uninitialized n-dimensional slice
|+/
|auto uninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system
|{
| immutable len = lengths.lengthsProduct;
| import std.array : uninitializedArray;
| assert((alignment != 0) && ((alignment & (alignment - 1)) == 0), "'alignment' must be a power of two");
| size_t offset = alignment <= 16 ? 0 : alignment - 1;
| void* basePtr = uninitializedArray!(byte[])(len * T.sizeof + offset).ptr;
| T* alignedPtr = cast(T*)((cast(size_t)(basePtr) + offset) & ~offset);
| return alignedPtr.sliced(lengths);
|}
|
|///
|version(mir_test)
|@system pure nothrow unittest
|{
| import mir.ndslice.slice: Slice;
| auto tensor = uninitAlignedSlice!double([5, 6, 7], 64);
| tensor[] = 0;
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| assert(cast(size_t)(tensor.ptr) % 64 == 0);
| static assert(is(typeof(tensor) == Slice!(double*, 3)));
|}
|
|/++
|Allocates an array through a specified allocator and creates an n-dimensional slice over it.
|See also $(MREF std, experimental, allocator).
|Params:
| alloc = allocator
| lengths = list of lengths for each dimension
| init = default value for array initialization
| slice = slice to copy shape and data from
|Returns:
| a structure with fields `array` and `slice`
|Note:
| `makeSlice` always returns slice with mutable elements
|+/
|auto makeSlice(Allocator, size_t N, Iterator)(auto ref Allocator alloc, Slice!(N, Iterator) slice)
|{
| alias T = Unqual!(slice.DeepElement);
| return makeSlice!(T)(alloc, slice);
|}
|
|/// ditto
|Slice!(T*, N)
|makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...)
|{
| import std.experimental.allocator : makeArray;
| return alloc.makeArray!T(lengths.lengthsProduct).sliced(lengths);
|}
|
|/// ditto
|Slice!(T*, N)
|makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths, T init)
|{
| import std.experimental.allocator : makeArray;
| immutable len = lengths.lengthsProduct;
| auto array = alloc.makeArray!T(len, init);
| return array.sliced(lengths);
|}
|
|/// ditto
|auto makeSlice(Allocator, Iterator, size_t N, SliceKind kind)
| (auto ref Allocator allocator, Slice!(Iterator, N, kind) slice)
|{
| import mir.conv: emplaceRef;
| alias E = slice.DeepElement;
|
| auto result = allocator.makeUninitSlice!(Unqual!E)(slice.shape);
|
| import mir.algorithm.iteration: each;
| each!(emplaceRef!E)(result, slice);
|
| return cast(Slice!(E*, N)) result;
|}
|
|/// Initialization with default value
|version(mir_test)
|@nogc unittest
|{
| import std.experimental.allocator;
| import std.experimental.allocator.mallocator;
| import mir.algorithm.iteration: all;
| import mir.ndslice.topology: map;
|
| auto sl = Mallocator.instance.makeSlice([2, 3, 4], 10);
| auto ar = sl.field;
| assert(sl.all!"a == 10");
|
| auto sl2 = Mallocator.instance.makeSlice(sl.map!"a * 2");
| auto ar2 = sl2.field;
| assert(sl2.all!"a == 20");
|
| Mallocator.instance.dispose(ar);
| Mallocator.instance.dispose(ar2);
|}
|
|version(mir_test)
|@nogc unittest
|{
| import std.experimental.allocator;
| import std.experimental.allocator.mallocator;
|
| // cast to your own type
| auto sl = makeSlice!double(Mallocator.instance, [2, 3, 4], 10);
| auto ar = sl.field;
| assert(sl[1, 1, 1] == 10.0);
| Mallocator.instance.dispose(ar);
|}
|
|/++
|Allocates an uninitialized array through a specified allocator and creates an n-dimensional slice over it.
|See also $(MREF std, experimental, allocator).
|Params:
| alloc = allocator
| lengths = list of lengths for each dimension
|Returns:
| a structure with fields `array` and `slice`
|+/
|Slice!(T*, N)
|makeUninitSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...)
| if (N)
|{
| if (immutable len = lengths.lengthsProduct)
| {
| auto mem = alloc.allocate(len * T.sizeof);
| if (mem.length == 0) assert(0);
| auto array = () @trusted { return cast(T[]) mem; }();
| version (mir_secure_memory)
| {() @trusted {
| (cast(ubyte[])array)[] = 0;
| }();}
| return array.sliced(lengths);
| }
| else
| {
| return T[].init.sliced(lengths);
| }
|}
|
|///
|version(mir_test)
|@system @nogc unittest
|{
| import std.experimental.allocator;
| import std.experimental.allocator.mallocator;
|
| auto sl = makeUninitSlice!int(Mallocator.instance, 2, 3, 4);
| auto ar = sl.field;
| assert(ar.ptr is sl.iterator);
| assert(ar.length == 24);
| assert(sl.elementCount == 24);
|
| Mallocator.instance.dispose(ar);
|}
|
|/++
|Allocates a common n-dimensional array from a slice.
|Params:
| slice = slice
|Returns:
| multidimensional D array
|+/
|auto ndarray(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
|{
| import mir.array.allocation : array;
| static if (slice.N == 1)
| {
| return array(slice);
| }
| else
| {
| import mir.ndslice.topology: ipack, map;
| return array(slice.ipack!1.map!(a => .ndarray(a)));
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.topology : iota;
| auto slice = iota(3, 4);
| auto m = slice.ndarray;
| static assert(is(typeof(m) == sizediff_t[][])); // sizediff_t is long for 64 bit platforms
| assert(m == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]);
|}
|
|/++
|Allocates a common n-dimensional array using data from a slice.
|Params:
| alloc = allocator (optional)
| slice = slice
|Returns:
| multidimensional D array
|+/
|auto makeNdarray(T, Allocator, Iterator, size_t N, SliceKind kind)(auto ref Allocator alloc, Slice!(Iterator, N, kind) slice)
|{
| import std.experimental.allocator : makeArray;
| static if (slice.N == 1)
| {
| return makeArray!T(alloc, slice);
| }
| else
| {
| alias E = typeof(makeNdarray!T(alloc, slice[0]));
| auto ret = makeArray!E(alloc, slice.length);
| foreach (i, ref e; ret)
| e = .makeNdarray!T(alloc, slice[i]);
| return ret;
| }
|}
|
|///
|version(mir_test)
|@nogc unittest
|{
| import std.experimental.allocator;
| import std.experimental.allocator.mallocator;
| import mir.ndslice.topology : iota;
|
| auto slice = iota(3, 4);
| auto m = Mallocator.instance.makeNdarray!long(slice);
|
| static assert(is(typeof(m) == long[][]));
|
| static immutable ar = [[0L, 1, 2, 3], [4L, 5, 6, 7], [8L, 9, 10, 11]];
| assert(m == ar);
|
| foreach (ref row; m)
| Mallocator.instance.dispose(row);
| Mallocator.instance.dispose(m);
|}
|
|/++
|Shape of a common n-dimensional array.
|Params:
| array = common n-dimensional array
| err = error flag passed by reference
|Returns:
| static array of dimensions type of `size_t[n]`
|+/
|auto shape(T)(T[] array, ref int err)
|{
| static if (isDynamicArray!T)
| {
| size_t[1 + typeof(shape(T.init, err)).length] ret;
|
| if (array.length)
| {
| ret[0] = array.length;
| ret[1..$] = shape(array[0], err);
| if (err)
| goto L;
| foreach (ar; array)
| {
| if (shape(ar, err) != ret[1..$])
| err = 1;
| if (err)
| goto L;
| }
| }
| }
| else
| {
| size_t[1] ret;
| ret[0] = array.length;
| }
| err = 0;
|L:
| return ret;
|}
|
|///
|version(mir_test)
|@safe pure unittest
|{
| int err;
| size_t[2] shape = [[1, 2, 3], [4, 5, 6]].shape(err);
| assert(err == 0);
| assert(shape == [2, 3]);
|
| [[1, 2], [4, 5, 6]].shape(err);
| assert(err == 1);
|}
|
|/// Slice from ndarray
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice, shape;
| int err;
| auto array = [[1, 2, 3], [4, 5, 6]];
| auto s = array.shape(err).slice!int;
| s[] = [[1, 2, 3], [4, 5, 6]];
| assert(s == array);
|}
|
|version(mir_test)
|@safe pure unittest
|{
| int err;
| size_t[2] shape = (int[][]).init.shape(err);
| assert(shape[0] == 0);
| assert(shape[1] == 0);
|}
|
|version(mir_test)
|nothrow unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.topology : iota;
|
| auto sl = iota([0, 0], 1);
|
| assert(sl.empty!0);
| assert(sl.empty!1);
|
| auto gcsl1 = sl.slice;
| auto gcsl2 = slice!double(0, 0);
|
| import std.experimental.allocator;
| import std.experimental.allocator.mallocator;
|
| auto sl2 = makeSlice!double(Mallocator.instance, 0, 0);
|
| Mallocator.instance.dispose(sl2.field);
|}
|
|/++
|Allocates an uninitialized array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it.
|Params:
| lengths = list of lengths for each dimension
|Returns:
| contiguous uninitialized n-dimensional slice
|See_also:
| $(LREF stdcSlice), $(LREF stdcFreeSlice)
|+/
|Slice!(T*, N) stdcUninitSlice(T, size_t N)(size_t[N] lengths...)
|{
| import core.stdc.stdlib: malloc;
| immutable len = lengths.lengthsProduct;
| auto p = malloc(len * T.sizeof);
| if (p is null) assert(0);
| version (mir_secure_memory)
| {
| (cast(ubyte*)p)[0 .. len * T.sizeof] = 0;
| }
| auto ptr = len ? cast(T*) p : null;
| return ptr.sliced(lengths);
|}
|
|/++
|Allocates a copy of a slice using `core.stdc.stdlib.malloc`.
|Params:
| slice = n-dimensional slice
|Returns:
| contiguous n-dimensional slice
|See_also:
| $(LREF stdcUninitSlice), $(LREF stdcFreeSlice)
|+/
|auto stdcSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
|{
| alias E = slice.DeepElement;
| alias T = Unqual!E;
| static assert (!hasElaborateAssign!T, "stdcSlice is not miplemented for slices that have elaborate assign");
| auto ret = stdcUninitSlice!T(slice.shape);
|
| import mir.conv: emplaceRef;
| import mir.algorithm.iteration: each;
| each!(emplaceRef!E)(ret, slice);
| return ret;
|}
|
|/++
|Frees memory using `core.stdc.stdlib.free`.
|Params:
| slice = n-dimensional slice
|See_also:
| $(LREF stdcSlice), $(LREF stdcUninitSlice)
|+/
|void stdcFreeSlice(T, size_t N)(Slice!(T*, N) slice)
|{
| import core.stdc.stdlib: free;
| version (mir_secure_memory)
| {
| (cast(ubyte[])slice.field)[] = 0;
| }
| slice._iterator.free;
|}
|
|///
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: iota;
|
| auto i = iota(3, 4);
| auto s = i.stdcSlice;
| auto t = s.shape.stdcUninitSlice!size_t;
|
| t[] = s;
|
| assert(t == i);
|
| s.stdcFreeSlice;
| t.stdcFreeSlice;
|}
|
|/++
|Allocates an uninitialized aligned array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it.
|Params:
| lengths = list of lengths for each dimension
| alignment = memory alignment (bytes)
|Returns:
| contiguous uninitialized n-dimensional slice
|+/
|auto stdcUninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system
|{
| immutable len = lengths.lengthsProduct;
| import mir.internal.memory: alignedAllocate;
| auto arr = (cast(T*)alignedAllocate(len * T.sizeof, alignment))[0 .. len];
| version (mir_secure_memory)
| {
| (cast(ubyte[])arr)[] = 0;
| }
| return arr.sliced(lengths);
|}
|
|///
|version(mir_test)
|@system pure nothrow unittest
|{
| auto tensor = stdcUninitAlignedSlice!double([5, 6, 7], 64);
| assert(tensor.length == 5);
| assert(tensor.elementCount == 5 * 6 * 7);
| assert(cast(size_t)(tensor.ptr) % 64 == 0);
| static assert(is(typeof(tensor) == Slice!(double*, 3)));
| stdcFreeAlignedSlice(tensor);
|}
|
|/++
|Frees aligned memory allocaged by CRuntime.
|Params:
| slice = n-dimensional slice
|See_also:
| $(LREF stdcSlice), $(LREF stdcUninitSlice)
|+/
|void stdcFreeAlignedSlice(T, size_t N)(Slice!(T*, N) slice)
|{
| import mir.internal.memory: alignedFree;
| version (mir_secure_memory)
| {
| (cast(ubyte[])slice.field)[] = 0;
| }
| slice._iterator.alignedFree;
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/allocation.d has no code
<<<<<< EOF
# path=./source-mir-sparse-package.lst
|/++
|$(H2 Sparse Tensors)
|
|License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|+/
|module mir.sparse;
|
|import std.traits;
|import std.meta;
|
|import mir.ndslice.slice;
|public import mir.ndslice.field: SparseField;
|public import mir.ndslice.iterator: ChopIterator, FieldIterator;
|public import mir.series: isSeries, Series, mir_series, series;
|public import mir.ndslice.slice: CoordinateValue, Slice, mir_slice;
|public import mir.ndslice.topology: chopped;
|
|//TODO: replace with `static foreach`
|private template Iota(size_t i, size_t j)
|{
| static assert(i <= j, "Iota: i should be less than or equal to j");
| static if (i == j)
| alias Iota = AliasSeq!();
| else
| alias Iota = AliasSeq!(i, Iota!(i + 1, j));
|}
|
|/++
|Sparse tensors represented in Dictionary of Keys (DOK) format.
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
|Returns:
| `N`-dimensional slice composed of indeces
|See_also: $(LREF Sparse)
|+/
|Sparse!(T, N) sparse(T, size_t N)(size_t[N] lengths...)
|{
12| T[size_t] table;
12| table[0] = 0;
12| table.remove(0);
12| assert(table !is null);
12| with (typeof(return)) return FieldIterator!(SparseField!T)(0, SparseField!T(table)).sliced(lengths);
|}
|
|///
|pure unittest
|{
1| auto slice = sparse!double(2, 3);
1| slice[0][] = 1;
1| slice[0, 1] = 2;
1| --slice[0, 0];
1| slice[1, 2] += 4;
|
1| assert(slice == [[0, 2, 1], [0, 0, 4]]);
|
| import std.range.primitives: isRandomAccessRange;
| static assert(isRandomAccessRange!(Sparse!(double, 2)));
|
| import mir.ndslice.slice: Slice, DeepElementType;
| static assert(is(Sparse!(double, 2) : Slice!(FieldIterator!(SparseField!double), 2)));
| static assert(is(DeepElementType!(Sparse!(double, 2)) == double));
|}
|
|/++
|Returns unsorted forward range of (coordinate, value) pairs.
|
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto byCoordinateValue(size_t N, T)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByCoordinateValue
| {
| private sizediff_t[N-1] _strides;
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| auto iv = _range.front;
5| size_t index = iv.key;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| CoordinateValue!(T, N) ret;
| foreach (i; Iota!(0, N - 1))
| {
5| ret.index[i] = index / _strides[i];
5| index %= _strides[i];
| }
5| ret.index[N - 1] = index;
5| ret.value = iv.value;
5| return ret;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByCoordinateValue(slice.strides[0..N-1], length, l, r, slice._iterator._field._table.byKeyValue);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
| alias CV = CoordinateValue!(double, 2);
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.byCoordinateValue.array.sort() == [
| CV([0, 1], 2),
| CV([0, 2], 1),
| CV([1, 2], 4),
| CV([2, 0], 6),
| CV([2, 1], 7)]);
|}
|
|/++
|Returns unsorted forward range of coordinates.
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto byCoordinate(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByCoordinate
| {
| private sizediff_t[N-1] _strides;
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKey()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| size_t index = _range.front;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| size_t[N] ret;
| foreach (i; Iota!(0, N - 1))
| {
5| ret[i] = index / _strides[i];
5| index %= _strides[i];
| }
5| ret[N - 1] = index;
5| return ret;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByCoordinate(slice.strides[0 .. N - 1], length, l, r, slice._iterator._field._table.byKey);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.byCoordinate.array.sort() == [
| [0, 1],
| [0, 2],
| [1, 2],
| [2, 0],
| [2, 1]]);
|}
|
|/++
|Returns unsorted forward range of values.
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto onlyByValue(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByValue
| {
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| auto iv = _range.front;
5| size_t index = iv.key;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| return iv.value;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByValue(length, l, r, slice._iterator._field._table.byKeyValue);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.onlyByValue.array.sort() == [1, 2, 4, 6, 7]);
|}
|
|pragma(inline, false)
|private size_t countInInterval(Range)(Range range, size_t l, size_t r)
|{
3| size_t count;
51| foreach(ref i; range)
30| if (l <= i && i < r)
15| count++;
3| return count;
|}
|
|private mixin template _sparse_range_methods(Range)
|{
| private size_t _length, _l, _r;
| private Range _range;
|
| void popFront()
| {
15| assert(!_range.empty);
15| _range.popFront;
15| _length--;
| }
|
| bool empty() const @property
| {
0000000| return _length == 0;
| }
|
| auto save() @property
| {
0000000| auto ret = this;
0000000| ret._range = ret._range.save;
0000000| return ret;
| }
|
| size_t length() const @property
| {
3| return _length;
| }
|}
|
|/++
|Returns compressed tensor.
|Note: allocates using GC.
|+/
|auto compress(I = uint, J = size_t, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) slice)
| if (N > 1)
|{
8| return compressWithType!(DeepElementType!(Slice!(Iterator, N, kind)), I, J)(slice);
|}
|
|/// Sparse tensor compression
|unittest
|{
1| auto sparse = sparse!double(5, 3);
1| sparse[] =
| [[0, 2, 1],
| [0, 0, 4],
| [0, 0, 0],
| [6, 0, 9],
| [0, 0, 5]];
|
1| auto crs = sparse.compressWithType!double;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 3,
| // [2, 1, 4, 6, 9, 5],
| // [1, 2, 2, 0, 2, 2],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Sparse tensor compression
|unittest
|{
1| auto sparse = sparse!double(5, 8);
1| sparse[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sparse.compressWithType!double;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Dense tensor compression
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 3);
1| sl[] =
| [[0, 2, 1],
| [0, 0, 4],
| [0, 0, 0],
| [6, 0, 9],
| [0, 0, 5]];
|
1| auto crs = sl.compressWithType!double;
|
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 3,
| // [2, 1, 4, 6, 9, 5],
| // [1, 2, 2, 0, 2, 2],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Dense tensor compression
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 8);
1| sl[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sl.compress;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/++
|Returns compressed tensor with different element type.
|Note: allocates using GC.
|+/
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1)
| compressWithType(V, I = uint, J = size_t, T, size_t N)
| (Slice!(FieldIterator!(SparseField!T), N) slice)
| if (is(T : V) && N > 1 && isUnsigned!I)
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: iota;
8| auto compressedData = slice
| .iterator
| ._field
| ._table
| .series!(size_t, T, I, V);
8| auto pointers = new J[slice.shape[0 .. N - 1].iota.elementCount + 1];
16| size_t k = 1, shift;
8| pointers[0] = 0;
8| pointers[1] = 0;
8| const rowLength = slice.length!(N - 1);
233| if(rowLength) foreach (ref index; compressedData.index.field)
| {
| for(;;)
| {
90| sizediff_t newIndex = index - shift;
90| if (newIndex >= rowLength)
| {
23| pointers[k + 1] = pointers[k];
23| shift += rowLength;
23| k++;
23| continue;
| }
67| index = cast(I)newIndex;
67| pointers[k] = cast(J) (pointers[k] + 1);
67| break;
| }
|
| }
8| pointers[k + 1 .. $] = pointers[k];
8| return compressedData.chopped(pointers);
|}
|
|
|/// ditto
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1)
| compressWithType(V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (!is(Iterator : FieldIterator!(SparseField!ST), ST) && is(DeepElementType!(Slice!(Iterator, N, kind)) : V) && N > 1 && isUnsigned!I)
|{
| import std.array: appender;
| import mir.ndslice.topology: pack, flattened;
4| auto vapp = appender!(V[]);
4| auto iapp = appender!(I[]);
4| auto psl = slice.pack!1;
4| auto count = psl.elementCount;
4| auto pointers = new J[count + 1];
|
4| pointers[0] = 0;
4| auto elems = psl.flattened;
4| size_t j = 0;
72| foreach (ref pointer; pointers[1 .. $])
| {
20| auto row = elems.front;
20| elems.popFront;
20| size_t i;
445| foreach (e; row)
| {
135| if (e)
| {
24| vapp.put(e);
24| iapp.put(cast(I)i);
24| j++;
| }
135| i++;
| }
20| pointer = cast(J)j;
| }
4| return iapp.data.series(vapp.data).chopped(pointers);
|}
|
|
|/++
|Re-compresses a compressed tensor. Makes all values, indeces and pointers consequent in memory.
|
|Sparse slice is iterated twice. The first tine it is iterated to get length of each sparse row, the second time - to copy the data.
|
|Note: allocates using GC.
|+/
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N)
| recompress
| (V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) sparseSlice)
| if (isSeries!(DeepElementType!(Slice!(Iterator, N, kind))))
|{
| import mir.algorithm.iteration: each;
| import mir.conv: to, emplaceRef;
| import mir.ndslice.allocation: uninitSlice;
| import mir.ndslice.topology: pack, flattened, as, member, zip;
|
| size_t count = sparseSlice.elementCount;
| size_t length;
| auto pointers = uninitSlice!J(count + 1);
| pointers.front = 0;
| sparseSlice
| .member!"data"
| .member!"elementCount"
| .each!((len, ref ptr) {ptr = length += len;})(pointers[1 .. $]);
|
| auto i = uninitSlice!I(length);
| auto v = uninitSlice!V(length);
|
| auto ret = i.series(v).chopped(pointers);
|
| sparseSlice
| .each!((a, b) {
| b.index[] = a.index.as!I;
| b.value.each!(emplaceRef!V)(a.value.as!V);
| })(ret);
|
| return ret;
|}
|
|///
|unittest
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 8);
1| sl[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sl.compress;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|
| import mir.ndslice.dynamic: reversed;
1| auto rec = crs.reversed.recompress!real;
1| auto rev = sl.universal.reversed.compressWithType!real;
1| assert(rev.structure == rec.structure);
| // assert(rev.iterator._field.values == rec.iterator._field.values);
| // assert(rev.iterator._field.indeces == rec.iterator._field.indeces);
| // assert(rev.iterator._field.pointers == rec.iterator._field.pointers);
|}
|
|/++
|Sparse Slice in Dictionary of Keys (DOK) format.
|+/
|alias Sparse(T, size_t N = 1) = Slice!(FieldIterator!(SparseField!T), N);
|
|///
|alias CompressedVector(T, I = uint) = Series!(T*, I*);
|
|///
|alias CompressedMatrix(T, I = uint) = Slice!(ChopIterator!(J*, Series!(T*, I*)));
|
|///
|alias CompressedTensor(T, size_t N, I = uint, J = size_t) = Slice!(ChopIterator!(J*, Series!(T*, I*)), N - 1);
|
|///ditto
|alias CompressedTensor(T, size_t N : 1, I = uint) = Series!(I*, T*);
source/mir/sparse/package.d is 92% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-mutation.lst
|/++
|$(H2 Multidimensional mutation algorithms)
|
|This is a submodule of $(MREF mir,ndslice).
|
|$(BOOKTABLE $(H2 Function),
|$(TR $(TH Function Name) $(TH Description))
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.mutation;
|
|import mir.ndslice.slice: Slice, SliceKind;
|
|/++
|Copies n-dimensional minor.
|+/
|void copyMinor(size_t N, IteratorFrom, SliceKind KindFrom, IteratorTo, SliceKind KindTo, IndexIterator)(
| Slice!(IteratorFrom, N, KindFrom) from,
| Slice!(IteratorTo, N, KindTo) to,
| Slice!IndexIterator[N] indices...
|)
|in {
| import mir.internal.utility: Iota;
| static foreach (i; Iota!N)
| assert(indices[i].length == to.length!i);
|}
|do {
| static if (N == 1)
| to[] = from[indices[0]];
| else
| foreach (i; 0 .. indices[0].length)
| {
| copyMinor!(N - 1)(from[indices[0][i]], to[i], indices[1 .. N]);
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice;
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| auto a = iota!int(3, 4);
| auto b = slice!int(2, 2);
| copyMinor(a, b, [2, 1].sliced, [0, 3].sliced);
| assert(b == [[8, 11], [4, 7]]);
|}
|
|/++
|Reverses data in the 1D slice.
|+/
|void reverseInPlace(Iterator)(Slice!Iterator slice)
|{
| import mir.utility : swap;
| foreach (i; 0 .. slice.length / 2)
| swap(slice[i], slice[$ - (i + 1)]);
|}
|
|///
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice;
| auto s = 5.iota.slice;
| s.reverseInPlace;
| assert([4, 3, 2, 1, 0]);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/mutation.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-chunks.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|The module contains $(LREF _chunks) routine.
|$(LREF Chunks) structure is multidimensional random access range with slicing.
|
|$(SUBREF slice, slicedField), $(SUBREF slice, slicedNdField) can be used to construct ndslice view on top of $(LREF Chunks).
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.chunks;
|
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.internal;
|import mir.ndslice.iterator: IotaIterator;
|import mir.ndslice.slice;
|
|import std.meta;
|import std.traits;
|
|/++
|Creates $(LREF Chunks).
|
|Params:
| Dimensions = compile time list of dimensions to chunk
|
|See_also: $(SUBREF topology, blocks) $(SUBREF fuse, fuseCells)
|+/
|template chunks(Dimensions...)
| if (Dimensions.length)
|{
| static if (allSatisfy!(isSize_t, Dimensions))
| /++
| Params:
| slice = Slice to chunk.
| chunkLengths = Chunk shape. It must not have a zero length.
| Returns: $(LREF Chunks).
| +/
| Chunks!([Dimensions], Iterator, N, kind == Contiguous && [Dimensions] != [0] ? Canonical : kind)
| chunks(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice, size_t[Dimensions.length] chunkLengths...)
| {
| static if (kindOf!(typeof(typeof(return).init._slice)) != kind)
| {
| import mir.ndslice.topology: canonical;
| auto p = slice.canonical;
| }
| else
| {
| alias p = slice;
| }
| auto ret = typeof(return)(chunkLengths, p);
| foreach (i; Iota!(Dimensions.length))
| ret._norm!i;
| return ret;
| }
| else
| alias chunks = .chunks!(staticMap!(toSize_t, Dimensions));
|}
|
|/// ditto
|Chunks!([0], Iterator, N, kind) chunks(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice, size_t chunkLength)
|{
| return .chunks!0(slice, chunkLength);
|}
|
|
|/// 1Dx1D
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.chunks: chunks, isChunks;
| import mir.ndslice.topology: iota;
|
| // 0 1 2 3 4 5 6 7 8 9 10
| auto sl = iota(11);
| // 0 1 2 | 3 4 5 | 6 7 8 | 9 10
| auto ch = sl.chunks(3);
|
| static assert(isChunks!(typeof(ch)) == [0]); // isChunks returns dimension indices
|
| assert(ch.length == 4);
| assert(ch.shape == cast(size_t[1])[4]);
|
| // 0 1 2
| assert(ch.front == iota([3], 0));
| ch.popFront;
|
| // 3 4 5
| assert(ch.front == iota([3], 3));
| assert(ch.length == 3);
|
| // 9 10
| assert(ch[$ - 1] == ch.back);
| assert(ch.back == iota([2], 9));
|
| ch.popBack;
| assert(ch.back == iota([3], 6));
|
| assert(ch[$ - 1 .. $].length == 1);
| assert(ch[$ .. $].length == 0);
| assert(ch[0 .. 0].empty);
|
| import std.range.primitives: isRandomAccessRange;
| static assert(isRandomAccessRange!(typeof(ch)));
|}
|
|/// 2Dx2D
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.chunks: chunks, isChunks;
| import mir.ndslice.topology: iota;
|
| // 0 1 2 3 4 5 6 7 8 9
| // 10 11 12 13 14 15 16 17 18 19
| // 20 21 22 23 24 25 26 27 28 29
| // 30 31 32 33 34 35 36 37 38 39
| // 40 41 42 43 44 45 46 47 48 49
| // 50 51 52 53 54 55 56 57 58 59
| // 60 61 62 63 64 65 66 67 68 69
| // 70 71 72 73 74 75 76 77 78 79
| // 80 81 82 83 84 85 86 87 88 89
| // 90 91 92 93 94 95 96 97 98 99
| // 100 101 102 103 104 105 106 107 108 109
| auto sl = iota(11, 10); // [0, 1, .. 10]
|
| // ---------------- ---------------- --------
| // | 0 1 2 3 | | 4 5 6 7 | | 8 9 |
| // | 10 11 12 13 | | 14 15 16 17 | | 18 19 |
| // | 20 21 22 23 | | 24 25 26 27 | | 28 29 |
| // |----------------| |----------------| |--------|
| // | 30 31 32 33 | | 34 35 36 37 | | 38 39 |
| // | 40 41 42 43 | | 44 45 46 47 | | 48 49 |
| // | 50 51 52 53 | | 54 55 56 57 | | 58 59 |
| // |----------------| |----------------| |--------|
| // | 60 61 62 63 | | 64 65 66 67 | | 68 69 |
| // | 70 71 72 73 | | 74 75 76 77 | | 78 79 |
| // | 80 81 82 83 | | 84 85 86 87 | | 88 89 |
| // |----------------| |----------------| |--------|
| // | 90 91 92 93 | | 94 95 96 97 | | 98 99 |
| // |100 101 102 103 | |104 105 106 107 | |108 109 |
| // ---------------- ---------------- --------
| // Chunk columns first, then blocks rows.
| auto ch = sl.chunks!(1, 0)(4, 3);
|
| assert(ch.shape == [3, 4]);
| assert(ch.slice == sl);
| assert(ch.front.slice == sl[0 .. $, 0 .. 4]);
|
| assert(ch.front.front == sl[0 .. 3, 0 .. 4]);
|
| assert(ch.front!0[1] == sl[3 .. 6, 0 .. 4]);
| assert(ch.front!1[1] == sl[0 .. 3, 4 .. 8]);
|
| assert (ch[$ - 1, $ - 1] == [[98, 99], [108, 109]]);
|
| static assert(isChunks!(typeof(ch)) == [1, 0]); // isChunks returns dimension indices
|
| assert(ch.length == 3);
| assert(ch.length!1 == 4);
|
| ch.popFront;
| assert(ch.front.front == sl[0 .. 3, 4 .. 8]);
| ch.popFront!1;
| assert(ch.front.front == sl[3 .. 6, 4 .. 8]);
|
| assert(ch.back.slice == sl[3 .. $, 8 .. $]);
| ch.popBack;
| assert(ch.back.slice == sl[3 .. $, 4 .. 8]);
|
| import std.range.primitives: isRandomAccessRange;
| static assert(isRandomAccessRange!(typeof(ch)));
|}
|
|/// 1Dx2D
|version(mir_test) unittest
|{
| import mir.ndslice.chunks: chunks, isChunks;
| import mir.ndslice.topology: iota;
|
| // 0 1 2 3 4 5 6 7 8 9
| // 10 11 12 13 14 15 16 17 18 19
| // 20 21 22 23 24 25 26 27 28 29
| // 30 31 32 33 34 35 36 37 38 39
| auto sl = iota(4, 10); // [0, 1, .. 10]
|
| // ---------------- ---------------- --------
| // | 0 1 2 3 | | 4 5 6 7 | | 8 9 |
| // | 10 11 12 13 | | 14 15 16 17 | | 18 19 |
| // | 20 21 22 23 | | 24 25 26 27 | | 28 29 |
| // | 30 31 32 33 | | 34 35 36 37 | | 38 39 |
| // ---------------- ---------------- --------
| // Chunk columns
| auto ch = sl.chunks!1(4);
|
| assert(ch.slice == sl);
| assert(ch.front == sl[0 .. $, 0 .. 4]);
|
| assert(ch.back == sl[0 .. $, 8 .. $]);
|
| import std.range.primitives: isRandomAccessRange;
| static assert(isRandomAccessRange!(typeof(ch)));
|}
|
|// conversion to ndslice
|version(mir_test) unittest
|{
| import mir.ndslice.slice : slicedField;
| import mir.ndslice.chunks: chunks;
| import mir.ndslice.topology: iota, map;
| import mir.math.sum: sum;
|
| // 0 1 2 3 4 5 6 7 8 9 10
| auto sl = iota(11);
| // 0 1 2 | 3 4 5 | 6 7 8 | 9 10
| auto ch = sl.chunks(3);
| // 3 | 12 | 21 | 19
| auto s = ch.slicedField.map!sum;
| assert(s == [3, 12, 21, 19]);
|}
|
|/++
|+/
|struct Chunks(size_t[] dimensions, Iterator, size_t N = 1, SliceKind kind = Contiguous)
|{
|@optmath:
|
| /++
| Chunk shape.
| +/
| size_t[dimensions.length] chunkLengths()() @property { return _chunkLengths; }
| /// ditto
| size_t[dimensions.length] _chunkLengths;
|
| ///
| auto lightConst()() const @property
| {
| import mir.qualifier;
| return Chunks!(dimensions, LightConstOf!Iterator, N, kind)(_chunkLengths, _slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import mir.qualifier;
| return Chunks!(dimensions, LightImmutableOf!Iterator, N, kind)(_chunkLengths, _slice.lightImmutable);
| }
|
| alias DeepElement = Slice!(Iterator, N, kind);
|
| /++
| Underlying ndslice.
| It always correspond to current chunks state.
| Its shape equal to the concatenation of the all chunks.
| +/
| Slice!(Iterator, N, kind) slice()() @property { return _slice; }
| ///
| Slice!(Iterator, N, kind) _slice;
|
| private auto _norm(size_t dimensionIndex = 0)() @property
| {
| assert(_chunkLengths[dimensionIndex]);
| enum dimension = dimensions[dimensionIndex];
| if (_expect(_slice._lengths[dimension] < _chunkLengths[dimensionIndex], false) && _slice._lengths[dimension])
| _chunkLengths[dimensionIndex] = _slice._lengths[dimension];
| }
|
| private auto _wrap(size_t dimensionIndex, S)(ref S ret)
| {
| static if (dimensions.length == 1)
| {
| return ret;
| }
| else
| {
| size_t[dimensions.length - 1] rcl;
| foreach (i, j; AliasSeq!(Iota!dimensionIndex, Iota!(dimensionIndex + 1, dimensions.length)))
| rcl[i] = _chunkLengths[j];
| enum newDims = dimensions[0 .. dimensionIndex] ~ dimensions[dimensionIndex + 1 .. $];
| return .Chunks!(newDims, Iterator, N, typeof(ret).kind)(rcl, ret);
| }
| }
|
| private ref size_t sliceLength(size_t dimensionIndex)() @property
| {
| enum dimension = dimensions[dimensionIndex];
| return _slice._lengths[dimension];
| }
|
| /// ndslice-like primitives
| bool empty(size_t dimensionIndex = 0)() const @property
| if (dimensionIndex < dimensions.length)
| {
| enum dimension = dimensions[dimensionIndex];
| return _slice.empty!(dimension);
| }
|
| ///
| size_t[dimensions.length] shape()() const @property
| {
| typeof(return) ret;
| foreach(dimensionIndex; Iota!(ret.length))
| {
| enum dimension = dimensions[dimensionIndex];
| auto l = _slice._lengths[dimension];
| auto n = _chunkLengths[dimensionIndex];
| ret[dimensionIndex] = l / n + (l % n != 0);
| }
| return ret;
| }
|
| /// ditto
| size_t length(size_t dimensionIndex = 0)() const @property
| if (dimensionIndex < dimensions.length)
| {
| enum dimension = dimensions[dimensionIndex];
| auto l = _slice._lengths[dimension];
| auto n = _chunkLengths[dimensionIndex];
| return l / n + (l % n != 0);
| }
|
| /// ditto
| auto front(size_t dimensionIndex = 0)() @property
| if (dimensionIndex < dimensions.length)
| {
| enum dimension = dimensions[dimensionIndex];
| assert(_chunkLengths[dimensionIndex] <= _slice._lengths[dimension]);
| auto ret = _slice.selectFront!dimension(_chunkLengths[dimensionIndex]);
| return _wrap!dimensionIndex(ret);
| }
|
| ///
| auto back(size_t dimensionIndex = 0)() @property
| if (dimensionIndex < dimensions.length)
| {
| assert(!empty!dimensionIndex);
| enum dimension = dimensions[dimensionIndex];
| auto l = _slice._lengths[dimension];
| auto n = _chunkLengths[dimensionIndex];
| auto rshift = l % n;
| rshift = !rshift ? n : rshift;
| auto len = _slice._lengths[dimension];
| auto ret = _slice.select!dimension(len - rshift, len);
| return _wrap!dimensionIndex(ret);
| }
|
| /// ditto
| void popFront(size_t dimensionIndex = 0)()
| if (dimensionIndex < dimensions.length)
| {
| enum dimension = dimensions[dimensionIndex];
| assert(!empty!dimensionIndex);
| _slice.popFrontExactly!dimension(_chunkLengths[dimensionIndex]);
| _norm!dimensionIndex;
| }
|
| /// ditto
| void popBack(size_t dimensionIndex = 0)()
| if (dimensionIndex < dimensions.length)
| {
| assert(!empty!dimensionIndex);
| enum dimension = dimensions[dimensionIndex];
| auto l = _slice._lengths[dimension];
| auto n = _chunkLengths[dimensionIndex];
| auto rshift = l % n;
| rshift = !rshift ? n : rshift;
| _slice.popBackExactly!dimension(rshift);
| _norm!dimensionIndex;
| }
|
| /// ditto
| Slice!(IotaIterator!size_t) opSlice(size_t dimensionIndex)(size_t i, size_t j) const
| if (dimensionIndex < dimensions.length)
| in
| {
| assert(i <= j,
| "Chunks.opSlice!" ~ dimensionIndex.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound.");
| enum errorMsg = ": the right opSlice boundary must be less than or equal to the length of the given dimensionIndex.";
| assert(j <= length!dimensionIndex,
| "Chunks.opSlice!" ~ dimensionIndex.stringof ~ errorMsg);
| }
| do
| {
| return typeof(return)(j - i, typeof(return).Iterator(i));
| }
|
| /// ditto
| ChunksSlice!() opSlice(size_t dimensionIndex)(size_t i, ChunksDollar!() j) const
| if (dimensionIndex < dimensions.length)
| in
| {
| assert(i <= j,
| "Chunks.opSlice!" ~ dimensionIndex.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound.");
| enum errorMsg = ": the right opSlice boundary must be less than or equal to the length of the given dimensionIndex.";
| assert(j <= length!dimensionIndex,
| "Chunks.opSlice!" ~ dimensionIndex.stringof ~ errorMsg);
| }
| do
| {
| return typeof(return)(i, j);
| }
|
| /// ditto
| ChunksDollar!() opDollar(size_t dimensionIndex)() @property
| {
| enum dimension = dimensions[dimensionIndex];
| return ChunksDollar!()(_slice._lengths[dimension], _chunkLengths[dimensionIndex]);
| }
|
| /// ditto
| auto opIndex(Slices...)(Slices slices)
| if (Slices.length <= dimensions.length)
| {
| static if (slices.length == 0)
| {
| return this;
| }
| else
| {
| alias slice = slices[0];
| alias S = Slices[0];
| static if (isIndex!S)
| {
| auto next = this.select!0(slice);
| }
| else
| static if (is_Slice!S)
| {
| auto i = slice._iterator._index;
| auto j = i + slice._lengths[0];
| auto next = this.select!0(i, j);
| }
| else
| {
| auto next = this.select!0(slice.i, slice.j);
| }
| static if (slices.length > 1)
| {
| return next[slices[1 .. $]];
| }
| else
| {
| return next;
| }
| }
| }
|
| /// ditto
| auto opIndex()(size_t[dimensions.length] index)
| {
| auto next = this.select!0(index[0]);
| static if (dimensions.length == 1)
| {
| return next;
| }
| else
| {
| return next[index[1 .. $]];
| }
| }
|
| /// ditto
| auto save()() @property
| {
| return this;
| }
|
| ///
| auto select(size_t dimensionIndex = 0)(size_t index) @property
| if (dimensionIndex < dimensions.length)
| {
| enum dimension = dimensions[dimensionIndex];
| auto chl = _chunkLengths[dimensionIndex];
| auto shiftL = chl * index;
| assert(shiftL <= _slice._lengths[dimension]);
| auto shiftR = shiftL + chl;
| if (_expect(shiftR > _slice._lengths[dimension], false))
| {
| shiftR = _slice._lengths[dimension];
| }
| auto ret = _slice.select!dimension(shiftL, shiftR);
| return _wrap!dimensionIndex(ret);
| }
|
| /// ditto
| auto select(size_t dimensionIndex = 0)(size_t i, size_t j) @property
| if (dimensionIndex < dimensions.length)
| {
| assert(i <= j);
| enum dimension = dimensions[dimensionIndex];
| auto chl = _chunkLengths[dimensionIndex];
| auto shiftL = chl * i;
| auto shiftR = chl * j;
| assert(shiftL <= _slice._lengths[dimension]);
| assert(shiftR <= _slice._lengths[dimension]);
| if (_expect(shiftR > _slice._lengths[dimension], false))
| {
| shiftR = _slice._lengths[dimension];
| if (_expect(shiftL > _slice._lengths[dimension], false))
| {
| shiftL = _slice._lengths[dimension];
| }
| }
| auto ret = _slice.select!dimension(shiftL, shiftR);
| import std.meta: aliasSeqOf;
| return ret.chunks!(aliasSeqOf!dimensions)(_chunkLengths);
| }
|
| // undocumented
| auto select(size_t dimensionIndex = 0)(ChunksSlice!() sl) @property
| if (dimensionIndex < dimensions.length)
| {
| assert(sl.i <= _slice._lengths[dimension]);
| assert(sl.chunkLength == _chunkLengths[dimensionIndex]);
| assert(sl.length == _slice._lengths[dimension]);
|
| enum dimension = dimensions[dimensionIndex];
| auto chl = _chunkLengths[dimensionIndex];
| auto len = sl.i * chl;
| assert(len <= _slice._lengths[dimension]);
| if (_expect(len > _slice._lengths[dimension], false))
| len = _slice._lengths[dimension];
| auto ret = _slice.selectBack!dimension(len);
| import std.meta: aliasSeqOf;
| return ret.chunks!(aliasSeqOf!dimensions)(_chunkLengths);
| }
|}
|
|// undocumented
|struct ChunksSlice()
|{
| size_t i;
| ChunksDollar!() j;
|}
|
|// undocumented
|struct ChunksDollar()
|{
| size_t length;
| size_t chunkLength;
| size_t value()() @property
| {
| return length / chunkLength + (length % chunkLength != 0);
| }
| alias value this;
|}
|
|/++
|Checks if T is $(LREF Chunks) type.
|Returns:
| array of dimension indices.
|+/
|template isChunks(T)
|{
| static if (is(T : Chunks!(dimensions, Iterator, N, kind), size_t[] dimensions, Iterator, size_t N, SliceKind kind))
| enum isChunks = dimensions;
| else
| enum isChunks = size_t[].init;
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.chunks: chunks, isChunks;
| import mir.ndslice.topology: iota;
|
| static assert(isChunks!int == null);
| static assert(isChunks!(typeof(iota(20, 30).chunks!(1, 0)(3, 7))) == [1, 0]);
|}
|
|/++
|Evaluates `popFront!dimmensionIndex` for multiple $(LREF Chunks) structures at once.
|All chunks structures must have for the appropriate dimension the same chunk lengths and the same underlying slice lengths.
|
|Params:
| dimmensionIndex = dimensionIndex
| master = the fist chunks structure
| followers = following chunks structures
|+/
|void popFrontTuple(size_t dimmensionIndex = 0, Master, Followers...)(ref Master master, ref Followers followers)
| if (isChunks!Master && allSatisfy!(isChunks, Followers))
|in
|{
| foreach (ref follower; followers)
| {
| assert(follower.sliceLength!dimmensionIndex == master.sliceLength!dimmensionIndex);
| assert(follower._chunkLengths[dimmensionIndex] == master._chunkLengths[dimmensionIndex]);
| }
|}
|do
|{
| master._slice.popFrontExactly!(isChunks!Master[dimmensionIndex])(master._chunkLengths[dimmensionIndex]);
| foreach (i, ref follower; followers)
| {
| follower._slice.popFrontExactly!(isChunks!(Followers[i])[dimmensionIndex])(master._chunkLengths[dimmensionIndex]);
| // hint for optimizer
| follower.sliceLength!dimmensionIndex = master.sliceLength!dimmensionIndex;
| }
| if (_expect(master.sliceLength!dimmensionIndex < master._chunkLengths[dimmensionIndex], false) && master.sliceLength!dimmensionIndex)
| {
| master._chunkLengths[dimmensionIndex] = master.sliceLength!dimmensionIndex;
| foreach(ref follower; followers)
| {
| follower._chunkLengths[dimmensionIndex] = master._chunkLengths[dimmensionIndex];
| }
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.chunks: chunks;
| import mir.ndslice.topology: iota;
|
| auto a = iota(10, 20).chunks!(0, 1)(3, 7);
| auto b = iota(20, 10).chunks!(1, 0)(3, 7);
|
| auto as = a;
| auto bs = b;
|
| as.popFront;
| bs.popFront;
|
| popFrontTuple(a, b);
|
| assert(as.slice == a.slice);
| assert(bs.slice == b.slice);
|
| assert(as.chunkLengths == a.chunkLengths);
| assert(bs.chunkLengths == b.chunkLengths);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/chunks.d has no code
<<<<<< EOF
# path=./source-mir-model-lda-hoffman.lst
|/**
|
|$(H3 Online variational Bayes for latent Dirichlet allocation)
|
|References:
| Hoffman, Matthew D., Blei, David M. and Bach, Francis R..
| "Online Learning for Latent Dirichlet Allocation.."
| Paper presented at the meeting of the NIPS, 2010.
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|*/
|module mir.model.lda.hoffman;
|
|import std.traits;
|
|/++
|Batch variational Bayes for LDA with mini-batches.
|+/
|struct LdaHoffman(F)
| if (isFloatingPoint!F)
|{
| import std.parallelism;
| import mir.ndslice.iterator: FieldIterator;
| import mir.ndslice.topology: iota;
|
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| import mir.math.common;
| import mir.sparse;
|
| private alias Vector = Slice!(F*);
| private alias Matrix = Slice!(F*, 2);
|
| private size_t D;
| private F alpha;
| private F eta;
| private F kappa;
| private F _tau;
| private F eps;
|
| private Matrix _lambda; // [k, w]
| private Matrix _beta; // [k, w]
|
| private TaskPool tp;
|
| private F[][] _lambdaTemp;
|
| @disable this();
| @disable this(this);
|
| /++
| Params:
| K = theme count
| W = dictionary size
| D = approximate total number of documents in a collection.
| alpha = Dirichlet document-topic prior (0.1)
| eta = Dirichlet word-topic prior (0.1)
| tau0 = tau0 ≧ 0 slows down the early iterations of the algorithm.
| kappa = `kappa belongs to $(LPAREN)0.5, 1]`, controls the rate at which old values of lambda are forgotten.
| `lambda = (1 - rho(tau)) lambda + rho lambda', rho(tau) = (tau0 + tau)^(-kappa)`. Use `kappa = 0` for Batch variational Bayes LDA.
| eps = Stop iterations if `||lambda - lambda'||_l1 < s * eps`, where `s` is a documents count in a batch.
| tp = task pool
| +/
0000000| this(size_t K, size_t W, size_t D, F alpha, F eta, F tau0, F kappa, F eps = 1e-5, TaskPool tp = taskPool())
| {
| import mir.random;
|
0000000| this.D = D;
0000000| this.alpha = alpha;
0000000| this.eta = eta;
0000000| this._tau = tau0;
0000000| this.kappa = kappa;
0000000| this.eps = eps;
0000000| this.tp = tp;
|
0000000| _lambda = slice!F(K, W);
0000000| _beta = slice!F(K, W);
0000000| _lambdaTemp = new F[][](tp.size + 1, W);
|
| import std.math: fabs;
0000000| auto gen = Random(unpredictableSeed);
0000000| foreach (r; _lambda)
0000000| foreach (ref e; r)
0000000| e = (gen.rand!F.fabs + 0.9) / 1.901;
|
0000000| updateBeta();
| }
|
| ///
| void updateBeta()
| {
0000000| foreach (i; tp.parallel(lambda.length.iota))
0000000| unparameterize(lambda[i], beta[i]);
| }
|
| /++
| Posterior over the topics
| +/
| Slice!(F*, 2) beta() @property
| {
0000000| return _beta;
| }
|
| /++
| Parameterized posterior over the topics.
| +/
| Slice!(F*, 2) lambda() @property
| {
0000000| return _lambda;
| }
|
| /++
| Count of already seen documents.
| Slows down the iterations of the algorithm.
| +/
| F tau() const @property
| {
0000000| return _tau;
| }
|
| /// ditto
| void tau(F v) @property
| {
0000000| _tau = v;
| }
|
| /++
| Accepts mini-batch and performs multiple E-step iterations for each document and single M-step.
|
| This implementation is optimized for sparse documents,
| which contain much less unique words than a dictionary.
|
| Params:
| n = mini-batch, a collection of compressed documents.
| maxIterations = maximal number of iterations for s This implementation is optimized for sparse documents,
|ingle document in a batch for E-step.
| +/
| size_t putBatch(SliceKind kind, C, I, J)(Slice!(ChopIterator!(J*, Series!(I*, C*)), 1, kind) n, size_t maxIterations)
| {
| return putBatchImpl(n.recompress!F, maxIterations);
| }
|
| private size_t putBatchImpl(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n, size_t maxIterations)
| {
| import std.math: isFinite;
| import mir.sparse.blas.dot;
| import mir.sparse.blas.gemv;
| import mir.ndslice.dynamic: transposed;
| import mir.ndslice.topology: universal;
| import mir.internal.utility;
|
| immutable S = n.length;
| immutable K = _lambda.length!0;
| immutable W = _lambda.length!1;
| _tau += S;
| auto theta = slice!F(S, K);
| auto nsave = saveN(n);
|
| immutable rho = pow!F(F(tau), -kappa);
| auto thetat = theta.universal.transposed;
| auto _gamma = slice!F(tp.size + 1, K);
| shared size_t ret;
| // E step
| foreach (d; tp.parallel(S.iota))
| {
| auto gamma = _gamma[tp.workerIndex];
| gamma[] = 1;
| auto nd = n[d];
| auto thetad = theta[d];
| for (size_t c; ;c++)
| {
| unparameterize(gamma, thetad);
|
| selectiveGemv!"/"(_beta.universal.transposed, thetad, nd);
| F sum = 0;
| {
| auto beta = _beta;
| auto th = thetad;
| foreach (ref g; gamma)
| {
| if (!th.front.isFinite)
| th.front = F.max;
| auto value = dot(nd, beta.front) * th.front + alpha;
| sum += fabs(value - g);
| g = value;
| beta.popFront;
| th.popFront;
| }
| }
| if (c < maxIterations && sum > eps * K)
| {
| nd.value[] = nsave[d].value;
| continue;
| }
| import core.atomic;
| ret.atomicOp!"+="(c);
| break;
| }
| }
| // M step
| foreach (k; tp.parallel(K.iota))
| {
| auto lambdaTemp = _lambdaTemp[tp.workerIndex];
| gemtv!F(F(1), n, thetat[k], F(0), lambdaTemp.sliced);
| import mir.algorithm.iteration: each;
| each!((ref l, bk, lt) {l = (1 - rho) * l +
| rho * (eta + (F(D) / F(S)) * bk * lt);})(_lambda[k], _beta[k],lambdaTemp.sliced);
| unparameterize(_lambda[k], _beta[k]);
| }
| return ret;
| }
|
| private auto saveN(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n)
| {
| import mir.series: series;
| import mir.ndslice.topology: chopped, universal;
0000000| return n.iterator._sliceable.index
| .series(n.iterator._sliceable.value.dup)
| .chopped(n.iterator._iterator.sliced(n.length + 1));
| }
|
| private static void unparameterize(Vector param, Vector posterior)
| {
0000000| assert(param.structure == posterior.structure);
| import mir.ndslice.topology: zip;
| import mir.math.func.expdigamma;
| import mir.math.sum: sum;
0000000| immutable c = 1 / expDigamma(sum(param));
0000000| foreach (e; zip(param, posterior))
0000000| e.b = c * expDigamma(e.a);
| }
|}
|
|unittest
|{
| alias ff = LdaHoffman!double;
|}
source/mir/model/lda/hoffman.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-iterator.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Iterator is a type with a pointer like behavior.
|An ndslice can be created on top of an iterator using $(SUBREF slice, sliced).
|
|$(BOOKTABLE $(H2 Iterators),
|$(TR $(TH Iterator Name) $(TH Used By))
|$(T2 BytegroupIterator, $(SUBREF topology, bytegroup).)
|$(T2 CachedIterator, $(SUBREF topology, cached), $(SUBREF topology, cachedGC).)
|$(T2 ChopIterator, $(SUBREF topology, chopped))
|$(T2 FieldIterator, $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.)
|$(T2 FlattenedIterator, $(SUBREF topology, flattened))
|$(T2 IndexIterator, $(SUBREF topology, indexed))
|$(T2 IotaIterator, $(SUBREF topology, iota))
|$(T2 MapIterator, $(SUBREF topology, map))
|$(T2 MemberIterator, $(SUBREF topology, member))
|$(T2 NeighboursIterator, $(SUBREF topology, withNeighboursSum))
|$(T2 RetroIterator, $(SUBREF topology, retro))
|$(T2 SliceIterator, $(SUBREF topology, map) in composition with $(LREF MapIterator) for packed slices.)
|$(T2 SlideIterator, $(SUBREF topology, diff), $(SUBREF topology, pairwise), and $(SUBREF topology, slide).)
|$(T2 StairsIterator, $(SUBREF topology, stairs))
|$(T2 StrideIterator, $(SUBREF topology, stride))
|$(T2 SubSliceIterator, $(SUBREF topology, subSlices))
|$(T2 TripletIterator, $(SUBREF topology, triplets))
|$(T2 ZipIterator, $(SUBREF topology, zip))
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.iterator;
|
|import mir.internal.utility: Iota;
|import mir.math.common: optmath;
|import mir.ndslice.field;
|import mir.ndslice.internal;
|import mir.ndslice.slice: SliceKind, Slice, Universal, Canonical, Contiguous, isSlice;
|import mir.qualifier;
|import mir.conv;
|import std.traits;
|
|private static immutable assumeZeroShiftExceptionMsg = "*.assumeFieldsHaveZeroShift: shift is not zero!";
|version(D_Exceptions)
| private static immutable assumeZeroShiftException = new Exception(assumeZeroShiftExceptionMsg);
|
|@optmath:
|
|enum std_ops = q{
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin(op ~ "_iterator;"); }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|};
|
|/++
|Step counter.
|
|`IotaIterator` is used by $(SUBREF topology, iota).
|+/
|struct IotaIterator(I)
| if (isIntegral!I || isPointer!I)
|{
|@optmath:
|
| ///
| I _index;
|
| static if (isPointer!I)
| ///
| auto lightConst()() const @property
| {
| static if (isIntegral!I)
| return IotaIterator!I(_index);
| else
| return IotaIterator!(LightConstOf!I)(_index);
| }
|
| static if (isPointer!I)
| ///
| auto lightImmutable()() immutable @property
| {
| static if (isIntegral!I)
| return IotaIterator!I(_index);
| else
| return IotaIterator!(LightImmutableOf!I)(_index);
| }
|
|pure:
|
| I opUnary(string op : "*")()
0000000| { return _index; }
|
| void opUnary(string op)()
| if (op == "--" || op == "++")
| { mixin(op ~ `_index;`); }
|
| I opIndex()(ptrdiff_t index) const
0000000| { return cast(I)(_index + index); }
|
| void opOpAssign(string op)(ptrdiff_t index)
| if (op == `+` || op == `-`)
| { mixin(`_index ` ~ op ~ `= index;`); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(const typeof(this) right) const
| { return cast(ptrdiff_t)(this._index - right._index); }
|
| bool opEquals()(const typeof(this) right) const
0000000| { return this._index == right._index; }
|
| auto opCmp()(const typeof(this) right) const
0000000| { return this._index - right._index; }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| assert(*iota == 0);
|
| // iteration
| ++iota;
| assert(*iota == 1);
|
| assert(iota[2] == 3);
| assert(iota[-1] == 0);
|
| --iota;
| assert(*iota == 0);
|
| // opBinary
| assert(*(iota + 2) == 2);
| assert(*(iota - 3) == -3);
| assert((iota - 3) - iota == -3);
|
| // construction
| assert(*IotaIterator!int(3) == 3);
| assert(iota - 1 < iota);
|}
|
|///
|pure nothrow @nogc version(mir_test) unittest
|{
| int[32] data;
| auto iota = IotaIterator!(int*)(data.ptr);
| assert(*iota == data.ptr);
|
| // iteration
| ++iota;
| assert(*iota == 1 + data.ptr);
|
| assert(iota[2] == 3 + data.ptr);
| assert(iota[-1] == 0 + data.ptr);
|
| --iota;
| assert(*iota == 0 + data.ptr);
|
| // opBinary
| assert(*(iota + 2) == 2 + data.ptr);
| assert(*(iota - 3) == -3 + data.ptr);
| assert((iota - 3) - iota == -3);
|
| // construction
| assert(*IotaIterator!(int*)(data.ptr) == data.ptr);
| assert(iota - 1 < iota);
|}
|
|auto RetroIterator__map(Iterator, alias fun)(ref RetroIterator!Iterator it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return RetroIterator!(typeof(iterator))(iterator);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota(9).retro.map!(a => a).slice;
| uint r;
| auto w = iota(9).retro.map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Reverse directions for an iterator.
|
|`RetroIterator` is used by $(SUBREF topology, retro).
|+/
|struct RetroIterator(Iterator)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return RetroIterator!(LightConstOf!Iterator)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return RetroIterator!(LightImmutableOf!Iterator)(.lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = RetroIterator__map!(Iterator, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op : "--")()
| { ++_iterator; }
|
| void opUnary(string op : "++")() pure
| { --_iterator; }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[-index]; }
|
| void opOpAssign(string op : "-")(ptrdiff_t index) scope
| { _iterator += index; }
|
| void opOpAssign(string op : "+")(ptrdiff_t index) scope
| { _iterator -= index; }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return right._iterator - this._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return right._iterator == this._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return right._iterator - this._iterator;
| else
| return right._iterator.opCmp(this._iterator);
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| RetroIterator!(IotaIterator!int) retro;
|
| ++iota;
| --retro;
| assert(*retro == *iota);
|
| --iota;
| ++retro;
| assert(*retro == *iota);
|
| assert(retro[-7] == iota[7]);
|
| iota += 100;
| retro -= 100;
| assert(*retro == *iota);
|
| iota -= 100;
| retro += 100;
| assert(*retro == *iota);
|
| assert(*(retro + 10) == *(iota - 10));
|
| assert(retro - 1 < retro);
|
| assert((retro - 5) - retro == -5);
|
| iota = IotaIterator!int(3);
| retro = RetroIterator!(IotaIterator!int)(iota);
| assert(*retro == *iota);
|}
|
|auto StrideIterator__map(Iterator, alias fun)(StrideIterator!Iterator it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return StrideIterator!(typeof(iterator))(it._stride, iterator);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota([3], 0, 3).map!(a => a).slice;
| uint r;
| auto w = iota([3], 0, 3).map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Iterates an iterator with a fixed strides.
|
|`StrideIterator` is used by $(SUBREF topology, stride).
|+/
|struct StrideIterator(Iterator)
|{
|@optmath:
| ///
| ptrdiff_t _stride;
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StrideIterator!(LightConstOf!Iterator)(_stride, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StrideIterator!(LightImmutableOf!Iterator)(_stride, .lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = StrideIterator__map!(Iterator, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= _stride;"); }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[index * _stride]; }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * _stride;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / _stride; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| ptrdiff_t ret = this._iterator - right._iterator;
| else
| ptrdiff_t ret = this._iterator.opCmp(right._iterator);
| return _stride >= 0 ? ret : -ret;
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| StrideIterator!(IotaIterator!int) stride;
| stride._stride = -3;
|
| iota -= stride._stride;
| --stride;
| assert(*stride == *iota);
|
| iota += stride._stride;
| ++stride;
| assert(*stride == *iota);
|
| assert(stride[7] == iota[7 * stride._stride]);
|
| iota -= 100 * stride._stride;
| stride -= 100;
| assert(*stride == *iota);
|
| iota += 100 * stride._stride;
| stride += 100;
| assert(*stride == *iota);
|
| assert(*(stride + 10) == *(iota + 10 * stride._stride));
|
| assert(stride - 1 < stride);
|
| assert((stride - 5) - stride == -5);
|
| iota = IotaIterator!int(3);
| stride = StrideIterator!(IotaIterator!int)(3, iota);
| assert(*stride == *iota);
|}
|
|auto StrideIterator__map(Iterator, size_t factor, alias fun)(StrideIterator!(Iterator, factor) it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return StrideIterator!(typeof(iterator), factor)(iterator);
|}
|
|/++
|Iterates an iterator with a fixed strides.
|
|`StrideIterator` is used by $(SUBREF topology, stride).
|+/
|struct StrideIterator(Iterator, ptrdiff_t factor)
|{
|@optmath:
| ///
| enum _stride = factor;
|
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StrideIterator!(LightConstOf!Iterator, _stride)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StrideIterator!(LightImmutableOf!Iterator, _stride)(.lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = StrideIterator__map!(Iterator, _stride, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= _stride;"); }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[index * _stride]; }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * _stride;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / _stride; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| ptrdiff_t ret = this._iterator - right._iterator;
| else
| ptrdiff_t ret = this._iterator.opCmp(right._iterator);
| return _stride >= 0 ? ret : -ret;
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| StrideIterator!(IotaIterator!int, -3) stride;
|
| iota -= stride._stride;
| --stride;
| assert(*stride == *iota);
|
| iota += stride._stride;
| ++stride;
| assert(*stride == *iota);
|
| assert(stride[7] == iota[7 * stride._stride]);
|
| iota -= 100 * stride._stride;
| stride -= 100;
| assert(*stride == *iota);
|
| iota += 100 * stride._stride;
| stride += 100;
| assert(*stride == *iota);
|
| assert(*(stride + 10) == *(iota + 10 * stride._stride));
|
| assert(stride - 1 < stride);
|
| assert((stride - 5) - stride == -5);
|}
|
|package template _zip_types(Iterators...)
|{
| alias AliasSeq(T...) = T;
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| alias T = typeof(Iterators[i].init[sizediff_t.init]);
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| {
| import mir.functional: Ref;
| alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), Ref!T);
| }
| else
| alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), T);
| }
| else
| alias _zip_types = AliasSeq!();
|}
|
|package template _zip_fronts(Iterators...)
|{
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "_ref(*_iterators[" ~ i.stringof ~ "]), ";
| else
| enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "*_iterators[" ~ i.stringof ~ "], ";
| }
| else
| enum _zip_fronts = "";
|}
|
|package template _zip_index(Iterators...)
|{
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_ref(_iterators[" ~ i.stringof ~ "][index]), ";
| else
| enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_iterators[" ~ i.stringof ~ "][index], ";
| }
| else
| enum _zip_index = "";
|}
|
|/++
|Iterates multiple iterators in lockstep.
|
|`ZipIterator` is used by $(SUBREF topology, zip).
|+/
|struct ZipIterator(Iterators...)
| if (Iterators.length > 1)
|{
|@optmath:
| import std.traits: ConstOf, ImmutableOf;
| import std.meta: staticMap;
| import mir.functional: RefTuple, Ref, _ref;
| ///
| Iterators _iterators;
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| alias Ret = ZipIterator!(staticMap!(LightConstOf, Iterators));
| enum ret = "Ret(%(.lightConst(_iterators[%s]),%)]))".format(_iterators.length.iota);
| return mixin(ret);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| alias Ret = ZipIterator!(staticMap!(LightImmutableOf, Iterators));
| enum ret = "Ret(%(.lightImmutable(_iterators[%s]),%)]))".format(_iterators.length.iota);
| return mixin(ret);
| }
|
| auto opUnary(string op : "*")()
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
|
| auto opUnary(string op : "*")() const
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
| auto opUnary(string op : "*")() immutable
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
| void opUnary(string op)() scope
| if (op == "++" || op == "--")
| {
| foreach (ref _iterator; _iterators)
| mixin(op ~ `_iterator;`);
| }
|
| auto opIndex()(ptrdiff_t index)
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_index!Iterators ~ ")"); }
|
| auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index)
| if (Types.length == Iterators.length)
| {
| foreach(i, ref val; value.expand)
| {
| import mir.functional: unref;
| _iterators[i][index] = unref(val);
| }
| return opIndex(index);
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "+" || op == "-")
| {
| foreach (ref _iterator; _iterators)
| mixin(`_iterator ` ~ op ~ `= index;`);
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterators[0] - right._iterators[0]; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterators[0] == right._iterators[0]; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!(Iterators[0]))
| return this._iterators[0] - right._iterators[0];
| else
| return this._iterators[0].opCmp(right._iterators[0]);
| }
|
| import std.meta: anySatisfy;
| static if (anySatisfy!(hasZeroShiftFieldMember, Iterators))
| /// Defined if at least one of `Iterators` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| import std.meta: staticMap;
| alias _fields = _iterators;
| return mixin("ZipField!(staticMap!(ZeroShiftField, Iterators))(" ~ applyAssumeZeroShift!Iterators ~ ")");
| }
|}
|
|///
|pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.traits: isIterator;
|
| double[10] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
| alias ItA = IotaIterator!int;
| alias ItB = double*;
| alias ItZ = ZipIterator!(ItA, ItB);
| auto zip = ItZ(ItA(3), data.ptr);
| assert((*zip).a == 3);
| assert((*zip).b == 1);
|
| // iteration
| ++zip;
| assert((*zip).a == 3 + 1);
| assert((*zip).b == 1 + 1);
| assert(&(*zip).b() == data.ptr + 1);
|
| assert(zip[4].a == 3 + 5);
| assert(zip[4].b == 1 + 5);
| assert(&zip[4].b() == data.ptr + 5);
|
| --zip;
| assert((*zip).a == 3);
| assert((*zip).b == 1);
|
| assert((*(zip + 2)).a == 3 + 2);
| assert((*(zip - 3)).a == 3 + -3);
| assert((*(zip + 2)).b == 1 + 2);
| assert((*(zip + 3 - 3)).b == 1);
| assert((zip - 3).opBinary!"-"(zip) == -3);
|
| assert(zip == zip);
| assert(zip - 1 < zip);
|
| static assert(isIterator!(ZipIterator!(double*, int*)));
| static assert(isIterator!(ZipIterator!(immutable(double)*, immutable(int)*)));
|}
|
|///
|struct CachedIterator(Iterator, CacheIterator, FlagIterator)
|{
| ///
| Iterator _iterator;
| ///
| CacheIterator _caches;
| ///
| FlagIterator _flags;
|
|@optmath:
|
| ///
| auto lightScope()() scope @property
| {
| return CachedIterator!(LightScopeOf!Iterator, LightScopeOf!CacheIterator, LightScopeOf!FlagIterator)(
| .lightScope(_iterator),
| .lightScope(_caches),
| .lightScope(_flags),
| );
| }
|
| ///
| auto lightScope()() scope const @property
| {
| return lightConst.lightScope;
| }
|
| ///
| auto lightScope()() scope immutable @property
| {
| return lightImmutable.lightScope;
| }
|
| ///
| auto lightConst()() const @property
| {
| return CachedIterator!(LightConstOf!Iterator, CacheIterator, FlagIterator)(
| .lightConst(_iterator),
| *cast(CacheIterator*)&_caches,
| *cast(FlagIterator*)&_flags,
| );
| }
|
| ///
| auto lightImmutable()() immutable @property @trusted
| {
| return CachedIterator!(LightImmutableOf!Iterator, CacheIterator, FlagIterator)(
| .lightImmutable(_iterator),
| *cast(CacheIterator*)&_caches,
| *cast(FlagIterator*)&_flags,
| );
| }
|
| private alias T = typeof(Iterator.init[0]);
| private alias UT = Unqual!T;
|
| auto opUnary(string op : "*")()
| {
| if (_expect(!*_flags, false))
| {
| _flags[0] = true;
| emplaceRef!T(*cast(UT*)&*_caches, *_iterator);
| }
| return *_caches;
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| if (_expect(!_flags[index], false))
| {
| _flags[index] = true;
| emplaceRef!T(*cast(UT*)&(_caches[index]), _iterator[index]);
| }
| return _caches[index];
| }
|
| auto ref opIndexAssign(T)(auto ref T val, ptrdiff_t index)
| {
| _flags[index] = true;
| return _caches[index] = val;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| mixin(op ~ "_iterator;");
| mixin(op ~ "_caches;");
| mixin(op ~ "_flags;");
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
| mixin("_iterator" ~ op ~ "= index;");
| mixin("_caches" ~ op ~ "= index;");
| mixin("_flags" ~ op ~ "= index;");
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|private enum map_primitives = q{
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|};
|
|/++
|`VmapIterator` is used by $(SUBREF topology, map).
|+/
|struct VmapIterator(Iterator, Fun)
|{
|@optmath:
|
| ///
| Iterator _iterator;
| ///
| Fun _fun;
|
| ///
| auto lightConst()() const @property
| {
| return VmapIterator!(LightConstOf!Iterator, LightConstOf!Fun)(.lightConst(_iterator), .lightConst(_fun));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return VmapIterator!(LightImmutableOf!Iterator, LightImmutableOf!Fun)(.lightImmutable(_iterator), .lightImmutable(_fun));
| }
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|
| static if (hasZeroShiftFieldMember!Iterator)
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
| return _vmapField(_iterator.assumeFieldsHaveZeroShift, _fun);
| }
|}
|
|auto MapIterator__map(Iterator, alias fun0, alias fun)(ref MapIterator!(Iterator, fun0) it)
|{
| return MapIterator!(Iterator, fun)(it._iterator);
|}
|
|/++
|`MapIterator` is used by $(SUBREF topology, map).
|+/
|struct MapIterator(Iterator, alias _fun)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return MapIterator!(LightConstOf!Iterator, _fun)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MapIterator!(LightImmutableOf!Iterator, _fun)(.lightImmutable(_iterator));
| }
|
| import mir.functional: pipe;
| ///
| static alias __map(alias fun1) = MapIterator__map!(Iterator, _fun, pipe!(_fun, fun1));
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|
| static if (hasZeroShiftFieldMember!Iterator)
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
| return _mapField!_fun(_iterator.assumeFieldsHaveZeroShift);
| }
|}
|
|/+
|Creates a mapped iterator. Uses `__map` if possible.
|+/
|auto _mapIterator(alias fun, Iterator)(Iterator iterator)
|{
| import core.lifetime: move;
| static if (__traits(hasMember, Iterator, "__map"))
| {
| static if (is(Iterator : MapIterator!(Iter0, fun0), Iter0, alias fun0)
| && !__traits(compiles, Iterator.__map!fun(iterator)))
| {
| // https://github.com/libmir/mir-algorithm/issues/111
| debug(mir) pragma(msg, __FUNCTION__~" not coalescing chained map calls into a single lambda, possibly because of multiple embedded context pointers");
| return MapIterator!(Iterator, fun)(move(iterator));
| }
| else
| return Iterator.__map!fun(iterator);
| }
| else
| return MapIterator!(Iterator, fun)(move(iterator));
|}
|
|
|/+
|Creates a mapped iterator. Uses `__vmap` if possible.
|+/
|auto _vmapIterator(Iterator, Fun)(Iterator iterator, Fun fun)
|{
| static if (__traits(hasMember, Iterator, "__vmap"))
| return Iterator.__vmap(iterator, fun);
| else
| return MapIterator!(Iterator, fun)(iterator);
|}
|
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| // https://github.com/libmir/mir-algorithm/issues/111
| import mir.ndslice.topology : iota, map;
| import mir.functional : pipe;
|
| static auto foo(T)(T x)
| {
| return x.map!(a => a + 1);
| }
|
| static auto bar(T)(T x)
| {
| return foo(x).map!(a => a + 2);
| }
|
| auto data = iota(5);
| auto result = iota([5], 3);
|
| auto x = data.map!(a => a + 1).map!(a => a + 2);
| assert(x == result);
|
| auto y = bar(data);
| assert(y == result);
|}
|
|/++
|`NeighboursIterator` is used by $(SUBREF topology, map).
|+/
|struct NeighboursIterator(Iterator, size_t N, alias _fun, bool around)
|{
| import std.meta: AliasSeq;
|@optmath:
| ///
| Iterator _iterator;
| static if (N)
| Iterator[2][N] _neighbours;
| else alias _neighbours = AliasSeq!();
|
| ///
| auto lightConst()() const @property
| {
| LightConstOf!Iterator[2][N] neighbours;
| foreach (i; 0 .. N)
| {
| neighbours[i][0] = .lightConst(_neighbours[i][0]);
| neighbours[i][1] = .lightConst(_neighbours[i][1]);
| }
| return NeighboursIterator!(LightConstOf!Iterator, N, _fun, around)(.lightConst(_iterator), neighbours);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| LightImmutableOf!Iterator[2][N] neighbours;
| foreach (i; 0 .. N)
| {
| neighbours[i][0] = .lightImmutable(_neighbours[i][0]);
| neighbours[i][1] = .lightImmutable(_neighbours[i][1]);
| }
| return NeighboursIterator!(LightImmutableOf!Iterator, N, _fun, around)(.lightImmutable(_iterator), neighbours);
| }
|
| import mir.functional: RefTuple, _ref;
|
| private alias RA = Unqual!(typeof(_fun(_iterator[-1], _iterator[+1])));
| private alias Result = RefTuple!(_zip_types!Iterator, RA);
|
| auto ref opUnary(string op : "*")()
| {
| return opIndex(0);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (around)
| RA result = _fun(_iterator[index - 1], _iterator[index + 1]);
|
| foreach (i; Iota!N)
| {
| static if (i == 0 && !around)
| RA result = _fun(_neighbours[i][0][index], _neighbours[i][1][index]);
| else
| result = _fun(result, _fun(_neighbours[i][0][index], _neighbours[i][1][index]));
| }
| static if (__traits(compiles, &_iterator[index]))
| return Result(_ref(_iterator[index]), result);
| else
| return Result(_iterator[index], result);
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| mixin(op ~ "_iterator;");
| foreach (i; Iota!N)
| {
| mixin(op ~ "_neighbours[i][0];");
| mixin(op ~ "_neighbours[i][1];");
| }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
|
| mixin("_iterator " ~ op ~ "= index;");
| foreach (i; Iota!N)
| {
| mixin("_neighbours[i][0] " ~ op ~ "= index;");
| mixin("_neighbours[i][1] " ~ op ~ "= index;");
| }
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|/++
|`MemberIterator` is used by $(SUBREF topology, member).
|+/
|struct MemberIterator(Iterator, string member)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return MemberIterator!(LightConstOf!Iterator, member)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MemberIterator!(LightImmutableOf!Iterator, member)(.lightImmutable(_iterator));
| }
|
| auto ref opUnary(string op : "*")()
| {
| return __traits(getMember, *_iterator, member);
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return __traits(getMember, _iterator[index], member);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| return __traits(getMember, _iterator[index], member) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| return mixin(op ~ "__traits(getMember, _iterator[index], member)");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| return mixin("__traits(getMember, _iterator[index], member)" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|}
|
|/++
|`BytegroupIterator` is used by $(SUBREF topology, Bytegroup) and $(SUBREF topology, bytegroup).
|+/
|struct BytegroupIterator(Iterator, size_t count, DestinationType)
| if (count)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return BytegroupIterator!(LightConstOf!Iterator, count, DestinationType)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BytegroupIterator!(LightImmutableOf!Iterator, count, DestinationType)(.lightImmutable(_iterator));
| }
|
| package(mir) alias Byte = Unqual!(typeof(_iterator[0]));
|
| version(LittleEndian)
| private enum BE = false;
| else
| private enum BE = true;
|
| private union U
| {
| DestinationType value;
| static if (DestinationType.sizeof > Byte[count].sizeof && BE && isScalarType!DestinationType)
| {
| struct
| {
| ubyte[DestinationType.sizeof - Byte[count].sizeof] shiftPayload;
| Byte[count] bytes;
| }
| }
| else
| {
| Byte[count] bytes;
| }
| }
|
| DestinationType opUnary(string op : "*")()
| {
| U ret = { value: DestinationType.init };
| foreach (i; Iota!count)
| ret.bytes[i] = _iterator[i];
| return ret.value;
| }
|
| DestinationType opIndex()(ptrdiff_t index)
| {
| return *(this + index);
| }
|
| DestinationType opIndexAssign(T)(T val, ptrdiff_t index) scope
| {
| auto it = this + index;
| U ret = { value: val };
| foreach (i; Iota!count)
| it._iterator[i] = ret.bytes[i];
| return ret.value;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= count;"); }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * count;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / count; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|auto SlideIterator__map(Iterator, size_t params, alias fun0, alias fun)(SlideIterator!(Iterator, params, fun0) it)
|{
| return SlideIterator!(Iterator, params, fun)(it._iterator);
|}
|
|/++
|`SlideIterator` is used by $(SUBREF topology, diff) and $(SUBREF topology, slide).
|+/
|struct SlideIterator(Iterator, size_t params, alias fun)
| if (params > 1)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return SlideIterator!(LightConstOf!Iterator, params, fun)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SlideIterator!(LightImmutableOf!Iterator, params, fun)(.lightImmutable(_iterator));
| }
|
| import mir.functional: pipe;
| ///
| static alias __map(alias fun1) = SlideIterator__map!(Iterator, params, fun, pipe!(fun, fun1));
|
| auto ref opUnary(string op : "*")()
| {
| return mixin("fun(" ~ _iotaArgs!(params, "_iterator[", "], ") ~ ")");
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return mixin("fun(" ~ _iotaArgs!(params, "_iterator[index + ", "], ") ~ ")");
| }
|
| mixin(std_ops);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.functional: naryFun;
| auto data = [1, 3, 8, 18];
| auto diff = SlideIterator!(int*, 2, naryFun!"b - a")(data.ptr);
| assert(*diff == 2);
| assert(diff[1] == 5);
| assert(diff[2] == 10);
|}
|
|auto IndexIterator__map(Iterator, Field, alias fun)(ref IndexIterator!(Iterator, Field) it)
|{
| auto field = it._field._mapField!fun;
| return IndexIterator!(Iterator, typeof(field))(it._iterator, field);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto indices = [4, 3, 1, 2, 0, 4].sliced;
| auto v = iota(5).indexed(indices).map!(a => a).slice;
| uint r;
| auto w = iota(5).indexed(indices).map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Iterates a field using an iterator.
|
|`IndexIterator` is used by $(SUBREF topology, indexed).
|+/
|struct IndexIterator(Iterator, Field)
|{
| import mir.functional: RefTuple, unref;
|
|@optmath:
| ///
| Iterator _iterator;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return IndexIterator!(LightConstOf!Iterator, LightConstOf!Field)(.lightConst(_iterator), .lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return IndexIterator!(LightImmutableOf!Iterator, LightImmutableOf!Field)(.lightImmutable(_iterator), _field.lightImmutable);
| }
|
| ///
| static alias __map(alias fun) = IndexIterator__map!(Iterator, Field, fun);
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return _field[*_iterator];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return _field[_iterator[index]];
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "] = value");
| }
| else
| return _field[_iterator[index]] = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return mixin(op ~ "_field[_iterator[index]]");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]" ~ op ~ "= value");
| }
| else
| return mixin("_field[_iterator[index]]" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates chunks in a sliceable using an iterator composed of indices.
|
|Definition:
|----
|auto index = iterator[i];
|auto elem = sliceable[index[0] .. index[1]];
|----
|+/
|struct SubSliceIterator(Iterator, Sliceable)
|{
|@optmath:
| ///
| Iterator _iterator;
| ///
| Sliceable _sliceable;
|
| ///
| auto lightConst()() const @property
| {
| return SubSliceIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SubSliceIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable);
| }
|
| auto ref opUnary(string op : "*")()
| {
| auto i = *_iterator;
| return _sliceable[i[0] .. i[1]];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| auto i = _iterator[index];
| return _sliceable[i[0] .. i[1]];
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates chunks in a sliceable using an iterator composed of indices stored consequently.
|
|Definition:
|----
|auto elem = _sliceable[_iterator[index] .. _iterator[index + 1]];
|----
|+/
|struct ChopIterator(Iterator, Sliceable)
|{
|@optmath:
| ///
| Iterator _iterator;
| ///
| Sliceable _sliceable;
|
| ///
| auto lightConst()() const @property
| {
| return ChopIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return ChopIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable);
| }
|
| auto ref opUnary(string op : "*")()
| {
| return _sliceable[*_iterator .. _iterator[1]];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return _sliceable[_iterator[index] .. _iterator[index + 1]];
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates on top of another iterator and returns a slice
|as a multidimensional window at the current position.
|
|`SliceIterator` is used by $(SUBREF topology, map) for packed slices.
|+/
|struct SliceIterator(Iterator, size_t N = 1, SliceKind kind = Contiguous)
|{
|@optmath:
| ///
| alias Element = Slice!(Iterator, N, kind);
| ///
| Element._Structure _structure;
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return SliceIterator!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SliceIterator!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator));
| }
|
| auto opUnary(string op : "*")()
| {
| return Element(_structure, _iterator);
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| return Element(_structure, _iterator + index);
| }
|
| mixin(std_ops);
|}
|
|public auto FieldIterator__map(Field, alias fun)(FieldIterator!(Field) it)
|{
| import mir.ndslice.field: _mapField;
| auto field = it._field._mapField!fun;
| return FieldIterator!(typeof(field))(it._index, field);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = ndiota(3, 3).map!(a => a).slice;
| uint r;
| auto w = ndiota(3, 3).map!(a => a).map!(a => a[0] * r).slice;
|}
|
|/++
|Creates an iterator on top of a field.
|
|`FieldIterator` is used by $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.
|+/
|struct FieldIterator(Field)
|{
|@optmath:
| ///
| ptrdiff_t _index;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
0000000| return FieldIterator!(LightConstOf!Field)(_index, .lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return FieldIterator!(LightImmutableOf!Field)(_index, .lightImmutable(_field));
| }
|
| ///
| static alias __map(alias fun) = FieldIterator__map!(Field, fun);
|
| ///
| Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) scope const
| {
| assert(i <= j);
| return typeof(return)(j - i, typeof(return).Iterator(i));
| }
|
| /++
| Returns:
| `_field[_index + sl.i .. _index + sl.j]`.
| +/
| auto opIndex()(Slice!(IotaIterator!size_t) sl)
| {
| auto idx = _index + sl._iterator._index;
| return _field[idx .. idx + sl.length];
| }
|
| auto ref opUnary(string op : "*")()
0000000| { return _field[_index]; }
|
| void opUnary(string op)() scope
| if (op == "++" || op == "--")
| { mixin(op ~ `_index;`); }
|
| auto ref opIndex()(ptrdiff_t index)
0000000| { return _field[_index + index]; }
|
| static if (!__traits(compiles, &_field[_index]))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index)
| { return _field[_index + index] = value; }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| { mixin (`return ` ~ op ~ `_field[_index + index];`); }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| { mixin (`return _field[_index + index] ` ~ op ~ `= value;`); }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "+" || op == "-")
| { mixin(`_index ` ~ op ~ `= index;`); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._index - right._index; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
0000000| { return this._index == right._index; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
0000000| { return this._index - right._index; }
|
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
0000000| if (_expect(_index != 0, false))
| {
| version (D_Exceptions)
0000000| throw assumeZeroShiftException;
| else
| assert(0, assumeZeroShiftExceptionMsg);
| }
| static if (hasZeroShiftFieldMember!Field)
| return _field.assumeFieldsHaveZeroShift;
| else
0000000| return _field;
| }
|}
|
|auto FlattenedIterator__map(Iterator, size_t N, SliceKind kind, alias fun)(FlattenedIterator!(Iterator, N, kind) it)
|{
| import mir.ndslice.topology: map;
| auto slice = it._slice.map!fun;
| return FlattenedIterator!(TemplateArgsOf!(typeof(slice)))(it._indices, slice);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota(3, 3).universal.flattened.map!(a => a).slice;
| uint r;
| auto w = iota(3, 3).universal.flattened.map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Creates an iterator on top of all elements in a slice.
|
|`FieldIterator` is used by $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.
|+/
|struct FlattenedIterator(Iterator, size_t N, SliceKind kind)
| if (N > 1 && (kind == Universal || kind == Canonical))
|{
|@optmath:
| ///
| ptrdiff_t[N] _indices;
| ///
| Slice!(Iterator, N, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return FlattenedIterator!(LightConstOf!Iterator, N, kind)(_indices, _slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return FlattenedIterator!(LightImmutableOf!Iterator, N, kind)(_indices, _slice.lightImmutable);
| }
|
| ///
| static alias __map(alias fun) = FlattenedIterator__map!(Iterator, N, kind, fun);
|
| private ptrdiff_t getShift()(ptrdiff_t n)
| {
| ptrdiff_t _shift;
| n += _indices[$ - 1];
| foreach_reverse (i; Iota!(1, N))
| {
| immutable v = n / ptrdiff_t(_slice._lengths[i]);
| n %= ptrdiff_t(_slice._lengths[i]);
| static if (i == _slice.S)
| _shift += (n - _indices[i]);
| else
| _shift += (n - _indices[i]) * _slice._strides[i];
| n = _indices[i - 1] + v;
| }
| _shift += (n - _indices[0]) * _slice._strides[0];
| return _shift;
| }
|
| auto ref opUnary(string op : "*")()
| {
| return *_slice._iterator;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| foreach_reverse (i; Iota!N)
| {
| static if (i == _slice.S)
| mixin(op ~ `_slice._iterator;`);
| else
| mixin(`_slice._iterator ` ~ op[0] ~ `= _slice._strides[i];`);
| mixin (op ~ `_indices[i];`);
| static if (i)
| {
| static if (op == "++")
| {
| if (_indices[i] < _slice._lengths[i])
| return;
| static if (i == _slice.S)
| _slice._iterator -= _slice._lengths[i];
| else
| _slice._iterator -= _slice._lengths[i] * _slice._strides[i];
| _indices[i] = 0;
| }
| else
| {
| if (_indices[i] >= 0)
| return;
| static if (i == _slice.S)
| _slice._iterator += _slice._lengths[i];
| else
| _slice._iterator += _slice._lengths[i] * _slice._strides[i];
| _indices[i] = _slice._lengths[i] - 1;
| }
| }
| }
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return _slice._iterator[getShift(index)];
| }
|
| static if (isMutable!(_slice.DeepElement) && !_slice.hasAccessByRef)
| ///
| auto ref opIndexAssign(E)(scope ref E elem, size_t index) scope return
| {
| return _slice._iterator[getShift(index)] = elem;
| }
|
| void opOpAssign(string op : "+")(ptrdiff_t n) scope
| {
| ptrdiff_t _shift;
| n += _indices[$ - 1];
| foreach_reverse (i; Iota!(1, N))
| {
| immutable v = n / ptrdiff_t(_slice._lengths[i]);
| n %= ptrdiff_t(_slice._lengths[i]);
| static if (i == _slice.S)
| _shift += (n - _indices[i]);
| else
| _shift += (n - _indices[i]) * _slice._strides[i];
| _indices[i] = n;
| n = _indices[i - 1] + v;
| }
| _shift += (n - _indices[0]) * _slice._strides[0];
| _indices[0] = n;
| foreach_reverse (i; Iota!(1, N))
| {
| if (_indices[i] >= 0)
| break;
| _indices[i] += _slice._lengths[i];
| _indices[i - 1]--;
| }
| _slice._iterator += _shift;
| }
|
| void opOpAssign(string op : "-")(ptrdiff_t n) scope
| { this += -n; }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| {
| ptrdiff_t ret = this._indices[0] - right._indices[0];
| foreach (i; Iota!(1, N))
| {
| ret *= _slice._lengths[i];
| ret += this._indices[i] - right._indices[i];
| }
| return ret;
| }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| {
| foreach_reverse (i; Iota!N)
| if (this._indices[i] != right._indices[i])
| return false;
| return true;
| }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| foreach (i; Iota!(N - 1))
| if (auto ret = this._indices[i] - right._indices[i])
| return ret;
| return this._indices[$ - 1] - right._indices[$ - 1];
| }
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.slice;
|
| auto it0 = iota(3, 4).universal.flattened._iterator;
| auto it1 = it0;
| assert(it0 == it1);
| it0 += 5;
| assert(it0 > it1);
| it0 -= 5;
| assert(*it0 == *it1);
| assert(it0 == it1);
| it0 += 5;
| it0 += 7;
| it0 -= 9;
| assert(it0 > it1);
| it1 += 3;
| assert(*it0 == *it1);
| assert(it0 == it1);
| assert(it0 <= it1);
| assert(it0 >= it1);
|
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
|
| assert(it0 - it1 == 9);
| assert(it1 - it0 == -9);
|
| ++it0;
|
| assert(it0 - it1 == 10);
| assert(it1 - it0 == -10);
|
| --it0;
|
| assert(it0 - it1 == 9);
| assert(it1 - it0 == -9);
| assert(it0[-9] == *it1);
| assert(*it0 == it1[9]);
|
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| assert(*it0 == *it1);
| assert(it0 == it1);
| assert(it0 <= it1);
| assert(it0 >= it1);
|}
|
|/++
|`StairsIterator` is used by $(SUBREF topology, stairs).
|+/
|struct StairsIterator(Iterator, string direction)
| if (direction == "+" || direction == "-")
|{
| ///
| size_t _length;
|
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StairsIterator!(LightConstOf!Iterator, direction)(_length, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StairsIterator!(LightImmutableOf!Iterator, direction)(_length, .lightImmutable(_iterator));
| }
|
|@optmath:
|
| ///
| Slice!Iterator opUnary(string op : "*")()
| {
| import mir.ndslice.slice: sliced;
| return _iterator.sliced(_length);
| }
|
| ///
| Slice!Iterator opIndex()(ptrdiff_t index)
| {
| import mir.ndslice.slice: sliced;
| static if (direction == "+")
| {
| auto newLength = _length + index;
| auto shift = ptrdiff_t(_length + newLength - 1) * index / 2;
| }
| else
| {
| auto newLength = _length - index;
| auto shift = ptrdiff_t(_length + newLength + 1) * index / 2;
| }
| assert(ptrdiff_t(newLength) >= 0);
| return (_iterator + shift).sliced(newLength);
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| static if (op == "++")
| {
| _iterator += _length;
| static if (direction == "+")
| ++_length;
| else
| --_length;
| }
| else
| {
| assert(_length);
| static if (direction == "+")
| --_length;
| else
| ++_length;
| _iterator -= _length;
| }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
| static if (op == direction)
| auto newLength = _length + index;
| else
| auto newLength = _length - index;
| static if (direction == "+")
| auto shift = ptrdiff_t(_length + newLength - 1) * index / 2;
| else
| auto shift = ptrdiff_t(_length + newLength + 1) * index / 2;
| assert(ptrdiff_t(newLength) >= 0);
| _length = newLength;
| static if (op == "+")
| _iterator += shift;
| else
| _iterator -= shift;
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| {
| static if (direction == "+")
| return this._length - right._length;
| else
| return right._length - this._length;
| }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._length == right._length; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| { return this - right; }
|}
|
|///
|version(mir_test) unittest
|{
| // 0
| // 1 2
| // 3 4 5
| // 6 7 8 9
| // 10 11 12 13 14
| auto it = StairsIterator!(IotaIterator!size_t, "+")(1, IotaIterator!size_t());
| assert(*it == [0]);
| assert(it[4] == [10, 11, 12, 13, 14]);
| assert(*(it + 4) == [10, 11, 12, 13, 14]);
| ++it;
| assert(*it == [1, 2]);
| it += 3;
| assert(*it == [10, 11, 12, 13, 14]);
| assert(it[-3] == [1, 2]);
| assert(*(it - 3) == [1, 2]);
| assert(it + 1 > it);
| assert(it + 1 - 1 == it);
| assert(it - 3 - it == -3);
| --it;
| assert(*it == [6, 7, 8, 9]);
|}
|
|///
|version(mir_test) unittest
|{
| // [0, 1, 2, 3, 4],
| // [5, 6, 7, 8],
| // [9, 10, 11],
| // [12, 13],
| // [14]]);
|
| auto it = StairsIterator!(IotaIterator!size_t, "-")(5, IotaIterator!size_t());
| assert(*it == [0, 1, 2, 3, 4]);
| assert(it[4] == [14]);
| assert(*(it + 4) == [14]);
| ++it;
| assert(*it == [5, 6, 7, 8]);
| it += 3;
| assert(*it == [14]);
| assert(it[-3] == [5, 6, 7, 8]);
| assert(*(it - 3) == [5, 6, 7, 8]);
| assert(it + 1 > it);
| assert(it + 1 - 1 == it);
| assert(it - 3 - it == -3);
| --it;
| assert(*it == [12, 13]);
|}
|
|/++
|Element type of $(LREF TripletIterator).
|+/
|struct Triplet(Iterator, SliceKind kind = Contiguous)
|{
|@optmath:
| ///
| size_t _iterator;
| ///
| Slice!(Iterator, 1, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return Triplet!(LightConstOf!Iterator, kind)(_iterator, slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return Triplet!(LightImmutableOf!Iterator, kind)(_iterator, slice.lightImmutable);
| }
|
| @property
| {
| ///
| auto ref center()
| {
| assert(_iterator < _slice.length);
| return _slice[_iterator];
| }
|
| ///
| Slice!(Iterator, 1, kind) left()
| {
| assert(_iterator < _slice.length);
| return _slice[0 .. _iterator];
| }
|
| ///
| Slice!(Iterator, 1, kind) right()
| {
| assert(_iterator < _slice.length);
| return _slice[_iterator + 1 .. $];
| }
| }
|}
|
|/++
|Iterates triplets position in a slice.
|
|`TripletIterator` is used by $(SUBREF topology, triplets).
|+/
|struct TripletIterator(Iterator, SliceKind kind = Contiguous)
|{
|@optmath:
|
| ///
| size_t _iterator;
| ///
| Slice!(Iterator, 1, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return TripletIterator!(LightConstOf!Iterator, kind)(_iterator, _slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return TripletIterator!(LightImmutableOf!Iterator, kind)(_iterator, _slice.lightImmutable);
| }
|
| ///
| Triplet!(Iterator, kind) opUnary(string op : "*")()
| {
| return typeof(return)(_iterator, _slice);
| }
|
| ///
| Triplet!(Iterator, kind) opIndex()(ptrdiff_t index)
| {
| return typeof(return)(_iterator + index, _slice);
| }
|
| mixin(std_ops);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/iterator.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-topology.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Selectors create new views and iteration patterns over the same data, without copying.
|
|$(BOOKTABLE $(H2 Sequence Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 cycle, Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice)
|$(T2 iota, Contiguous Slice with initial flattened (contiguous) index.)
|$(T2 linspace, Evenly spaced numbers over a specified interval.)
|$(T2 magic, Magic square.)
|$(T2 ndiota, Contiguous Slice with initial multidimensional index.)
|$(T2 repeat, Slice with identical values)
|)
|
|$(BOOKTABLE $(H2 Shape Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 blocks, n-dimensional slice composed of n-dimensional non-overlapping blocks. If the slice has two dimensions, it is a block matrix.)
|$(T2 diagonal, 1-dimensional slice composed of diagonal elements)
|$(T2 dropBorders, Drops borders for all dimensions.)
|$(T2 reshape, New slice view with changed dimensions)
|$(T2 squeeze, New slice view of an n-dimensional slice with dimension removed)
|$(T2 unsqueeze, New slice view of an n-dimensional slice with a dimension added)
|$(T2 windows, n-dimensional slice of n-dimensional overlapping windows. If the slice has two dimensions, it is a sliding window.)
|
|)
|
|
|$(BOOKTABLE $(H2 Subspace Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 alongDim , Returns a slice that can be iterated along dimension.)
|$(T2 byDim , Returns a slice that can be iterated by dimension.)
|$(T2 pack , Returns slice of slices.)
|$(T2 ipack , Returns slice of slices.)
|$(T2 unpack , Merges two hight dimension packs. See also $(SUBREF fuse, fuse).)
|$(T2 evertPack, Reverses dimension packs.)
|
|)
|
|$(BOOKTABLE $(H2 SliceKind Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 universal, Converts a slice to universal $(SUBREF slice, SliceKind).)
|$(T2 canonical, Converts a slice to canonical $(SUBREF slice, SliceKind).)
|$(T2 assumeCanonical, Converts a slice to canonical $(SUBREF slice, SliceKind). Does only `assert` checks.)
|$(T2 assumeContiguous, Converts a slice to contiguous $(SUBREF slice, SliceKind). Does only `assert` checks.)
|$(T2 assumeHypercube, Helps the compiler to use optimisations related to the shape form. Does only `assert` checks.)
|$(T2 assumeSameShape, Helps the compiler to use optimisations related to the shape form. Does only `assert` checks.)
|
|)
|
|$(BOOKTABLE $(H2 Products),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 cartesian, Cartesian product.)
|$(T2 kronecker, Kronecker product.)
|
|)
|
|$(BOOKTABLE $(H2 Representation Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 as, Convenience function that creates a lazy view,
|where each element of the original slice is converted to a type `T`.)
|$(T2 bitpack, Bitpack slice over an unsigned integral slice.)
|$(T2 bitwise, Bitwise slice over an unsigned integral slice.)
|$(T2 bytegroup, Groups existing slice into fixed length chunks and uses them as data store for destination type.)
|$(T2 cached, Random access cache. It is usefull in combiation with $(LREF map) and $(LREF vmap).)
|$(T2 cachedGC, Random access cache auto-allocated in GC heap. It is usefull in combiation with $(LREF map) and $(LREF vmap).)
|$(T2 diff, Differences between vector elements.)
|$(T2 flattened, Contiguous 1-dimensional slice of all elements of a slice.)
|$(T2 map, Multidimensional functional map.)
|$(T2 member, Field (element's member) projection.)
|$(T2 orthogonalReduceField, Functional deep-element wise reduce of a slice composed of fields or iterators.)
|$(T2 pairwise, Pairwise map for vectors.)
|$(T2 pairwiseMapSubSlices, Maps pairwise index pairs to subslices.)
|$(T2 retro, Reverses order of iteration for all dimensions.)
|$(T2 slide, Lazy convolution for tensors.)
|$(T2 slideAlong, Lazy convolution for tensors.)
|$(T2 stairs, Two functions to pack, unpack, and iterate triangular and symmetric matrix storage.)
|$(T2 stride, Strides 1-dimensional slice.)
|$(T2 subSlices, Maps index pairs to subslices.)
|$(T2 triplets, Constructs a lazy view of triplets with `left`, `center`, and `right` members. The topology is usefull for Math and Physics.)
|$(T2 unzip, Selects a slice from a zipped slice.)
|$(T2 withNeighboursSum, Zip view of elements packed with sum of their neighbours.)
|$(T2 zip, Zips slices into a slice of refTuples.)
|)
|
|Subspace selectors serve to generalize and combine other selectors easily.
|For a slice of `Slice!(Iterator, N, kind)` type `slice.pack!K` creates a slice of
|slices of `Slice!(kind, [N - K, K], Iterator)` type by packing
|the last `K` dimensions of the top dimension pack,
|and the type of element of $(LREF flattened) is `Slice!(Iterator, K)`.
|Another way to use $(LREF pack) is transposition of dimension packs using
|$(LREF evertPack).
|Examples of use of subspace selectors are available for selectors,
|$(SUBREF slice, Slice.shape), and $(SUBREF slice, Slice.elementCount).
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko, John Michael Hall, Shigeki Karita (original numir code)
|
|Sponsors: Part of this work has been sponsored by $(LINK2 http://symmetryinvestments.com, Symmetry Investments) and Kaleidic Associates.
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4))
|+/
|module mir.ndslice.topology;
|
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.field;
|import mir.ndslice.internal;
|import mir.ndslice.iterator;
|import mir.ndslice.ndfield;
|import mir.ndslice.slice;
|import mir.primitives;
|import mir.qualifier;
|import mir.utility: min;
|import std.meta: AliasSeq, allSatisfy, staticMap, templateOr, Repeat;
|
|private immutable choppedExceptionMsg = "bounds passed to chopped are out of sliceable bounds.";
|version (D_Exceptions) private immutable choppedException = new Exception(choppedExceptionMsg);
|
|@optmath:
|
|/++
|Converts a slice to universal kind.
|
|Params:
| slice = a slice
|Returns:
| universal slice
|See_also:
| $(LREF canonical),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|auto universal(Iterator, size_t N, SliceKind kind, Labels...)(Slice!(Iterator, N, kind, Labels) slice)
|{
| import core.lifetime: move;
|
| static if (kind == Universal)
| {
| return slice;
| }
| else
| static if (is(Iterator : RetroIterator!It, It))
| {
| return slice.move.retro.universal.retro;
| }
| else
| {
| alias Ret = Slice!(Iterator, N, Universal, Labels);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| static if (kind == Canonical)
| {
| foreach (i; Iota!(slice.S))
| strides[i] = slice._strides[i];
| strides[$-1] = 1;
| }
| else
| {
| ptrdiff_t ball = 1;
| foreach_reverse (i; Iota!(Ret.S))
| {
| strides[i] = ball;
| static if (i)
| ball *= slice._lengths[i];
| }
| }
| return Ret(lengths, strides, slice._iterator.move, slice._labels);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3, 1]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).canonical.universal;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3, 1]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto universaldf = dataframe.universal;
| assert(universaldf._lengths == [2, 3]);
| assert(universaldf._strides == [3, 1]);
|
| assert(is(typeof(universaldf) ==
| Slice!(double*, 2, Universal, int*, string*)));
| assert(universaldf.label!0[0] == 1);
| assert(universaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to canonical kind.
|
|Params:
| slice = contiguous or canonical slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, N == 1 ? Contiguous : Canonical, Labels)
| canonical
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
| if (kind == Contiguous || kind == Canonical)
|{
| import core.lifetime: move;
|
| static if (kind == Canonical || N == 1)
| return slice;
| else
| {
| alias Ret = typeof(return);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| ptrdiff_t ball = 1;
| foreach_reverse (i; Iota!(Ret.S))
| {
| ball *= slice._lengths[i + 1];
| strides[i] = ball;
| }
| return Ret(lengths, strides, slice._iterator.move, slice._labels);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).canonical;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto canonicaldf = dataframe.canonical;
| assert(canonicaldf._lengths == [2, 3]);
| assert(canonicaldf._strides == [3]);
|
| assert(is(typeof(canonicaldf) ==
| Slice!(double*, 2, Canonical, int*, string*)));
| assert(canonicaldf.label!0[0] == 1);
| assert(canonicaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to canonical kind (unsafe).
|
|Params:
| slice = a slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF canonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, Canonical, Labels)
| assumeCanonical
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (kind == Contiguous)
| return slice.canonical;
| else
| static if (kind == Canonical)
| return slice;
| else
| {
| import mir.utility: swap;
| assert(slice._lengths[N - 1] <= 1 || slice._strides[N - 1] == 1);
| typeof(return) ret;
| ret._lengths = slice._lengths;
| ret._strides = slice._strides[0 .. $ - 1];
| swap(ret._iterator, slice._iterator);
| foreach(i, _; Labels)
| swap(ret._labels[i], slice._labels[i]);
| return ret;
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal.assumeCanonical;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto assmcanonicaldf = dataframe.assumeCanonical;
| assert(assmcanonicaldf._lengths == [2, 3]);
| assert(assmcanonicaldf._strides == [3]);
|
| assert(is(typeof(assmcanonicaldf) ==
| Slice!(double*, 2, Canonical, int*, string*)));
| assert(assmcanonicaldf.label!0[0] == 1);
| assert(assmcanonicaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to contiguous kind (unsafe).
|
|Params:
| slice = a slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF canonical),
| $(LREF assumeCanonical).
|+/
|Slice!(Iterator, N, Contiguous, Labels)
| assumeContiguous
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (kind == Contiguous)
| return slice;
| else
| {
| import mir.utility: swap;
| typeof(return) ret;
| ret._lengths = slice._lengths;
| swap(ret._iterator, slice._iterator);
| foreach(i, _; Labels)
| swap(ret._labels[i], slice._labels[i]);
| return ret;
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal.assumeContiguous;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| static assert(slice._strides.length == 0);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto assmcontdf = dataframe.canonical.assumeContiguous;
| assert(assmcontdf._lengths == [2, 3]);
| static assert(assmcontdf._strides.length == 0);
|
| assert(is(typeof(assmcontdf) ==
| Slice!(double*, 2, Contiguous, int*, string*)));
| assert(assmcontdf.label!0[0] == 1);
| assert(assmcontdf.label!1[1] == "Label2");
|}
|
|/++
|Helps the compiler to use optimisations related to the shape form
|+/
|void assumeHypercube
| (Iterator, size_t N, SliceKind kind, Labels...)
| (ref scope Slice!(Iterator, N, kind, Labels) slice)
|{
| foreach (i; Iota!(1, N))
| {
| assert(slice._lengths[i] == slice._lengths[0]);
| slice._lengths[i] = slice._lengths[0];
| }
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| auto b = iota(5, 5);
|
| assumeHypercube(b);
|
| assert(b == iota(5, 5));
|}
|
|/++
|Helps the compiler to use optimisations related to the shape form
|+/
|void assumeSameShape(T...)
| (ref scope T slices)
| if (allSatisfy!(isSlice, T))
|{
| foreach (i; Iota!(1, T.length))
| {
| assert(slices[i]._lengths == slices[0]._lengths);
| slices[i]._lengths = slices[0]._lengths;
| }
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| auto a = iota(5, 5);
| auto b = iota(5, 5);
|
| assumeHypercube(a); // first use this one, if applicable
| assumeSameShape(a, b); //
|
| assert(a == iota(5, 5));
| assert(b == iota(5, 5));
|}
|
|/++
|+/
|auto assumeFieldsHaveZeroShift(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (__traits(hasMember, Iterator, "assumeFieldsHaveZeroShift"))
|{
| return slice._iterator.assumeFieldsHaveZeroShift.slicedField(slice._lengths);
|}
|
|/++
|Creates a packed slice, i.e. slice of slices.
|Packs the last `P` dimensions.
|The function does not allocate any data.
|
|Params:
| P = size of dimension pack
| slice = a slice to pack
|Returns:
| `slice.pack!p` returns `Slice!(kind, [N - p, p], Iterator)`
|See_also: $(LREF ipack)
|+/
|Slice!(SliceIterator!(Iterator, P, P == 1 && kind == Canonical ? Contiguous : kind), N - P, Universal)
|pack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| if (P && P < N)
|{
| import core.lifetime: move;
| return slice.move.ipack!(N - P);
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice : sliced, Slice;
|
| auto a = iota(3, 4, 5, 6);
| auto b = a.pack!2;
|
| static immutable res1 = [3, 4];
| static immutable res2 = [5, 6];
| assert(b.shape == res1);
| assert(b[0, 0].shape == res2);
| assert(a == b.unpack);
| assert(a.pack!2 == b);
| static assert(is(typeof(b) == typeof(a.pack!2)));
|}
|
|/++
|Creates a packed slice, i.e. slice of slices.
|Packs the last `N - P` dimensions.
|The function does not allocate any data.
|
|Params:
| + = size of dimension pack
| slice = a slice to pack
|See_also: $(LREF pack)
|+/
|Slice!(SliceIterator!(Iterator, N - P, N - P == 1 && kind == Canonical ? Contiguous : kind), P, Universal)
|ipack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| if (P && P < N)
|{
| import core.lifetime: move;
| alias Ret = typeof(return);
| alias It = Ret.Iterator;
| alias EN = It.Element.N;
| alias ES = It.Element.S;
| auto sl = slice.move.universal;
| static if (It.Element.kind == Contiguous)
| return Ret(
| cast( size_t[P]) sl._lengths[0 .. P],
| cast(ptrdiff_t[P]) sl._strides[0 .. P],
| It(
| cast(size_t[EN]) sl._lengths[P .. $],
| sl._iterator.move));
| else
| return Ret(
| cast( size_t[P]) sl._lengths[0 .. P],
| cast(ptrdiff_t[P]) sl._strides[0 .. P],
| It(
| cast( size_t[EN]) sl._lengths[P .. $],
| cast(ptrdiff_t[ES]) sl._strides[P .. $ - (It.Element.kind == Canonical)],
| sl._iterator.move));
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice : sliced, Slice;
|
| auto a = iota(3, 4, 5, 6);
| auto b = a.ipack!2;
|
| static immutable res1 = [3, 4];
| static immutable res2 = [5, 6];
| assert(b.shape == res1);
| assert(b[0, 0].shape == res2);
| assert(a.ipack!2 == b);
| static assert(is(typeof(b) == typeof(a.ipack!2)));
|}
|
|/++
|Unpacks a packed slice.
|
|The functions does not allocate any data.
|
|Params:
| slice = packed slice
|Returns:
| unpacked slice, that is a view on the same data.
|
|See_also: $(LREF pack), $(LREF evertPack)
|+/
|Slice!(Iterator, N + M, min(innerKind, Canonical))
| unpack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind)
| (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice)
|{
| alias Ret = typeof(return);
| size_t[N + M] lengths;
| auto strides = sizediff_t[Ret.S].init;
| auto outerStrides = slice.strides;
| auto innerStrides = Slice!(Iterator, M, innerKind)(
| slice._iterator._structure,
| slice._iterator._iterator,
| ).strides;
| foreach(i; Iota!N)
| lengths[i] = slice._lengths[i];
| foreach(i; Iota!N)
| strides[i] = outerStrides[i];
| foreach(i; Iota!M)
| lengths[N + i] = slice._iterator._structure[0][i];
| foreach(i; Iota!(Ret.S - N))
| strides[N + i] = innerStrides[i];
| return Ret(lengths, strides, slice._iterator._iterator);
|}
|
|/++
|Reverses the order of dimension packs.
|This function is used in a functional pipeline with other selectors.
|
|Params:
| slice = packed slice
|Returns:
| packed slice
|
|See_also: $(LREF pack), $(LREF unpack)
|+/
|Slice!(SliceIterator!(Iterator, N, outerKind), M, innerKind)
|evertPack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind)
| (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice)
|{
| import core.lifetime: move;
| return typeof(return)(
| slice._iterator._structure,
| typeof(return).Iterator(
| slice._structure,
| slice._iterator._iterator.move));
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : transposed;
| auto slice = iota(3, 4, 5, 6, 7, 8, 9, 10, 11).universal;
| assert(slice
| .pack!2
| .evertPack
| .unpack
| == slice.transposed!(
| slice.shape.length-2,
| slice.shape.length-1));
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.allocation: slice;
| static assert(is(typeof(
| slice!int(6)
| .sliced(1,2,3)
| .pack!1
| .evertPack
| )
| == Slice!(SliceIterator!(int*, 2, Universal), 1)));
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto a = iota(3, 4, 5, 6, 7, 8, 9, 10, 11);
| auto b = a.pack!2.unpack;
| static assert(is(typeof(a.canonical) == typeof(b)));
| assert(a == b);
|}
|
|/++
|Returns a slice, the elements of which are equal to the initial flattened index value.
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
| start = value of the first element in a slice (optional for integer `I`)
| stride = value of the stride between elements (optional)
|Returns:
| n-dimensional slice composed of indices
|See_also: $(LREF ndiota)
|+/
|Slice!(IotaIterator!I, N)
|iota
| (I = sizediff_t, size_t N)(size_t[N] lengths...)
| if (__traits(isIntegral, I))
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(I.init).sliced(lengths);
|}
|
|///ditto
|Slice!(IotaIterator!sizediff_t, N)
|iota
| (size_t N)(size_t[N] lengths, sizediff_t start)
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!sizediff_t(start).sliced(lengths);
|}
|
|///ditto
|Slice!(StrideIterator!(IotaIterator!sizediff_t), N)
|iota
| (size_t N)(size_t[N] lengths, sizediff_t start, size_t stride)
|{
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!sizediff_t)(stride, IotaIterator!sizediff_t(start)).sliced(lengths);
|}
|
|///ditto
|template iota(I)
| if (__traits(isIntegral, I))
|{
| ///
| Slice!(IotaIterator!I, N)
| iota
| (size_t N)(size_t[N] lengths, I start)
| if (__traits(isIntegral, I))
| {
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(start).sliced(lengths);
| }
|
| ///ditto
| Slice!(StrideIterator!(IotaIterator!I), N)
| iota
| (size_t N)(size_t[N] lengths, I start, size_t stride)
| if (__traits(isIntegral, I))
| {
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths);
| }
|}
|
|///ditto
|Slice!(IotaIterator!I, N)
|iota
| (I, size_t N)(size_t[N] lengths, I start)
| if (is(I P : P*))
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(start).sliced(lengths);
|}
|
|///ditto
|Slice!(StrideIterator!(IotaIterator!I), N)
|iota
| (I, size_t N)(size_t[N] lengths, I start, size_t stride)
| if (is(I P : P*))
|{
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(2, 3);
| static immutable array =
| [[0, 1, 2],
| [3, 4, 5]];
|
| assert(slice == array);
|
| static assert(is(DeepElementType!(typeof(slice)) == sizediff_t));
|}
|
|///
|pure nothrow @nogc
|version(mir_test) unittest
|{
| int[6] data;
| auto slice = iota([2, 3], data.ptr);
| assert(slice[0, 0] == data.ptr);
| assert(slice[0, 1] == data.ptr + 1);
| assert(slice[1, 0] == data.ptr + 3);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto im = iota([10, 5], 100);
| assert(im[2, 1] == 111); // 100 + 2 * 5 + 1
|
| //slicing works correctly
| auto cm = im[1 .. $, 3 .. $];
| assert(cm[2, 1] == 119); // 119 = 100 + (1 + 2) * 5 + (3 + 1)
|}
|
|/// `iota` with step
|@safe pure nothrow version(mir_test) unittest
|{
| auto sl = iota([2, 3], 10, 10);
|
| assert(sl == [[10, 20, 30],
| [40, 50, 60]]);
|}
|
|/++
|Returns a 1-dimensional slice over the main diagonal of an n-dimensional slice.
|`diagonal` can be generalized with other selectors such as
|$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice).
|
|Params:
| slice = input slice
|Returns:
| 1-dimensional slice composed of diagonal elements
|See_also: $(LREF antidiagonal)
|+/
|Slice!(Iterator, 1, N == 1 ? kind : Universal)
| diagonal
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
|{
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| alias Ret = typeof(return);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| lengths[0] = slice._lengths[0];
| foreach (i; Iota!(1, N))
| if (lengths[0] > slice._lengths[i])
| lengths[0] = slice._lengths[i];
| foreach (i; Iota!(1, Ret.N))
| lengths[i] = slice._lengths[i + N - 1];
| auto rstrides = slice.strides;
| strides[0] = rstrides[0];
| foreach (i; Iota!(1, N))
| strides[0] += rstrides[i];
| foreach (i; Iota!(1, Ret.S))
| strides[i] = rstrides[i + N - 1];
| return Ret(lengths, strides, slice._iterator);
| }
|}
|
|/// Matrix, main diagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 0 4 |
| static immutable d = [0, 4];
| assert(iota(2, 3).diagonal == d);
|}
|
|/// Non-square matrix
|@safe pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 |
| // | 2 3 |
| // | 4 5 |
| // -------
| //->
| // | 0 3 |
|
| assert(iota(3, 2).diagonal == iota([2], 0, 3));
|}
|
|/// Loop through diagonal
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
|
| auto slice = slice!int(3, 3);
| int i;
| foreach (ref e; slice.diagonal)
| e = ++i;
| assert(slice == [
| [1, 0, 0],
| [0, 2, 0],
| [0, 0, 3]]);
|}
|
|/// Matrix, subdiagonal
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 1 5 |
| static immutable d = [1, 5];
| auto a = iota(2, 3).canonical;
| a.popFront!1;
| assert(a.diagonal == d);
|}
|
|/// 3D, main diagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // - - - - - -
| // | 6 7 8 |
| // | 9 10 11 |
| // -----------
| //->
| // | 0 10 |
| static immutable d = [0, 10];
| assert(iota(2, 2, 3).diagonal == d);
|}
|
|/// 3D, subdiagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // - - - - - -
| // | 6 7 8 |
| // | 9 10 11 |
| // -----------
| //->
| // | 1 11 |
| static immutable d = [1, 11];
| auto a = iota(2, 2, 3).canonical;
| a.popFront!2;
| assert(a.diagonal == d);
|}
|
|/// 3D, diagonal plain
|@nogc @safe pure nothrow
|version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // | 6 7 8 |
| // - - - - - -
| // | 9 10 11 |
| // | 12 13 14 |
| // | 15 16 17 |
| // - - - - - -
| // | 18 20 21 |
| // | 22 23 24 |
| // | 24 25 26 |
| // -----------
| //->
| // -----------
| // | 0 4 8 |
| // | 9 13 17 |
| // | 18 23 26 |
| // -----------
|
| static immutable d =
| [[ 0, 4, 8],
| [ 9, 13, 17],
| [18, 22, 26]];
|
| auto slice = iota(3, 3, 3)
| .pack!2
| .evertPack
| .diagonal
| .evertPack;
|
| assert(slice == d);
|}
|
|/++
|Returns a 1-dimensional slice over the main antidiagonal of an 2D-dimensional slice.
|`antidiagonal` can be generalized with other selectors such as
|$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice).
|
|It runs from the top right corner to the bottom left corner.
|
|Pseudo_code:
|------
|auto antidiagonal = slice.dropToHypercube.reversed!1.diagonal;
|------
|
|Params:
| slice = input slice
|Returns:
| 1-dimensional slice composed of antidiagonal elements.
|See_also: $(LREF diagonal)
|+/
|Slice!(Iterator, 1, Universal)
| antidiagonal
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N == 2)
|{
| import mir.ndslice.dynamic : dropToHypercube, reversed;
| return slice.dropToHypercube.reversed!1.diagonal;
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----
| // | 0 1 |
| // | 2 3 |
| // -----
| //->
| // | 1 2 |
| static immutable c = [1, 2];
| import std.stdio;
| assert(iota(2, 2).antidiagonal == c);
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 1 3 |
| static immutable d = [1, 3];
| assert(iota(2, 3).antidiagonal == d);
|}
|
|/++
|Returns an n-dimensional slice of n-dimensional non-overlapping blocks.
|`blocks` can be generalized with other selectors.
|For example, `blocks` in combination with $(LREF diagonal) can be used to get a slice of diagonal blocks.
|For overlapped blocks, combine $(LREF windows) with $(SUBREF dynamic, strided).
|
|Params:
| N = dimension count
| slice = slice to be split into blocks
| rlengths_ = dimensions of block, residual blocks are ignored
|Returns:
| packed `N`-dimensional slice composed of `N`-dimensional slices
|
|See_also: $(SUBREF chunks, ._chunks)
|+/
|Slice!(SliceIterator!(Iterator, N, N == 1 ? Universal : min(kind, Canonical)), N, Universal)
| blocks
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, size_t[N] rlengths_...)
|in
|{
| foreach (i, length; rlengths_)
| assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive"
| ~ tailErrorMessage!());
|}
|do
|{
| size_t[N] lengths;
| size_t[N] rlengths = rlengths_;
| sizediff_t[N] strides;
| foreach (dimension; Iota!N)
| lengths[dimension] = slice._lengths[dimension] / rlengths[dimension];
| auto rstrides = slice.strides;
| foreach (i; Iota!N)
| {
| strides[i] = rstrides[i];
| if (lengths[i]) //do not remove `if (...)`
| strides[i] *= rlengths[i];
| }
| return typeof(return)(
| lengths,
| strides,
| typeof(return).Iterator(
| rlengths,
| rstrides[0 .. typeof(return).DeepElement.S],
| slice._iterator));
|}
|
|///
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
| auto slice = slice!int(5, 8);
| auto blocks = slice.blocks(2, 3);
| int i;
| foreach (blocksRaw; blocks)
| foreach (block; blocksRaw)
| block[] = ++i;
|
| assert(blocks ==
| [[[[1, 1, 1], [1, 1, 1]],
| [[2, 2, 2], [2, 2, 2]]],
| [[[3, 3, 3], [3, 3, 3]],
| [[4, 4, 4], [4, 4, 4]]]]);
|
| assert( slice ==
| [[1, 1, 1, 2, 2, 2, 0, 0],
| [1, 1, 1, 2, 2, 2, 0, 0],
|
| [3, 3, 3, 4, 4, 4, 0, 0],
| [3, 3, 3, 4, 4, 4, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Diagonal blocks
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
| auto slice = slice!int(5, 8);
| auto blocks = slice.blocks(2, 3);
| auto diagonalBlocks = blocks.diagonal.unpack;
|
| diagonalBlocks[0][] = 1;
| diagonalBlocks[1][] = 2;
|
| assert(diagonalBlocks ==
| [[[1, 1, 1], [1, 1, 1]],
| [[2, 2, 2], [2, 2, 2]]]);
|
| assert(blocks ==
| [[[[1, 1, 1], [1, 1, 1]],
| [[0, 0, 0], [0, 0, 0]]],
| [[[0, 0, 0], [0, 0, 0]],
| [[2, 2, 2], [2, 2, 2]]]]);
|
| assert(slice ==
| [[1, 1, 1, 0, 0, 0, 0, 0],
| [1, 1, 1, 0, 0, 0, 0, 0],
|
| [0, 0, 0, 2, 2, 2, 0, 0],
| [0, 0, 0, 2, 2, 2, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Matrix divided into vertical blocks
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 13);
| auto blocks = slice
| .pack!1
| .evertPack
| .blocks(3)
| .unpack;
|
| int i;
| foreach (block; blocks)
| block[] = ++i;
|
| assert(slice ==
| [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0]]);
|}
|
|/++
|Returns an n-dimensional slice of n-dimensional overlapping windows.
|`windows` can be generalized with other selectors.
|For example, `windows` in combination with $(LREF diagonal) can be used to get a multi-diagonal slice.
|
|Params:
| N = dimension count
| slice = slice to be iterated
| rlengths = dimensions of windows
|Returns:
| packed `N`-dimensional slice composed of `N`-dimensional slices
|+/
|Slice!(SliceIterator!(Iterator, N, N == 1 ? kind : min(kind, Canonical)), N, Universal)
| windows
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, size_t[N] rlengths...)
|in
|{
| foreach (i, length; rlengths)
| assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive"
| ~ tailErrorMessage!());
|}
|do
|{
| size_t[N] rls = rlengths;
| size_t[N] lengths;
| foreach (dimension; Iota!N)
| lengths[dimension] = slice._lengths[dimension] >= rls[dimension] ?
| slice._lengths[dimension] - rls[dimension] + 1 : 0;
| auto rstrides = slice.strides;
| static if (typeof(return).DeepElement.S)
| return typeof(return)(
| lengths,
| rstrides,
| typeof(return).Iterator(
| rls,
| rstrides[0 .. typeof(return).DeepElement.S],
| slice._iterator));
| else
| return typeof(return)(
| lengths,
| rstrides,
| typeof(return).Iterator(
| rls,
| slice._iterator));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice.windows(2, 3);
|
| int i;
| foreach (windowsRaw; windows)
| foreach (window; windowsRaw)
| ++window[];
|
| assert(slice ==
| [[1, 2, 3, 3, 3, 3, 2, 1],
|
| [2, 4, 6, 6, 6, 6, 4, 2],
| [2, 4, 6, 6, 6, 6, 4, 2],
| [2, 4, 6, 6, 6, 6, 4, 2],
|
| [1, 2, 3, 3, 3, 3, 2, 1]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice.windows(2, 3);
| windows[1, 2][] = 1;
| windows[1, 2][0, 1] += 1;
| windows.unpack[1, 2, 0, 1] += 1;
|
| assert(slice ==
| [[0, 0, 0, 0, 0, 0, 0, 0],
|
| [0, 0, 1, 3, 1, 0, 0, 0],
| [0, 0, 1, 1, 1, 0, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0],
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Multi-diagonal matrix
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(8, 8);
| auto windows = slice.windows(3, 3);
|
| auto multidiagonal = windows
| .diagonal
| .unpack;
| foreach (window; multidiagonal)
| window[] += 1;
|
| assert(slice ==
| [[ 1, 1, 1, 0, 0, 0, 0, 0],
| [ 1, 2, 2, 1, 0, 0, 0, 0],
| [ 1, 2, 3, 2, 1, 0, 0, 0],
| [0, 1, 2, 3, 2, 1, 0, 0],
| [0, 0, 1, 2, 3, 2, 1, 0],
| [0, 0, 0, 1, 2, 3, 2, 1],
| [0, 0, 0, 0, 1, 2, 2, 1],
| [0, 0, 0, 0, 0, 1, 1, 1]]);
|}
|
|/// Sliding window over matrix columns
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice
| .pack!1
| .evertPack
| .windows(3)
| .unpack;
|
| foreach (window; windows)
| window[] += 1;
|
| assert(slice ==
| [[1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1]]);
|}
|
|/// Overlapping blocks using windows
|@safe pure nothrow version(mir_test) unittest
|{
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // | 20 21 22 23 24 |
| // ----------------
| //->
| // ---------------------
| // | 0 1 2 | 2 3 4 |
| // | 5 6 7 | 7 8 9 |
| // | 10 11 12 | 12 13 14 |
| // | - - - - - - - - - - |
| // | 10 11 13 | 12 13 14 |
| // | 15 16 17 | 17 18 19 |
| // | 20 21 22 | 22 23 24 |
| // ---------------------
|
| import mir.ndslice.slice;
| import mir.ndslice.dynamic : strided;
|
| auto overlappingBlocks = iota(5, 5)
| .windows(3, 3)
| .universal
| .strided!(0, 1)(2, 2);
|
| assert(overlappingBlocks ==
| [[[[ 0, 1, 2], [ 5, 6, 7], [10, 11, 12]],
| [[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14]]],
| [[[10, 11, 12], [15, 16, 17], [20, 21, 22]],
| [[12, 13, 14], [17, 18, 19], [22, 23, 24]]]]);
|}
|
|version(mir_test) unittest
|{
| auto w = iota(9, 9).windows(3, 3);
| assert(w.front == w[0]);
|}
|
|/++
|Error codes for $(LREF reshape).
|+/
|enum ReshapeError
|{
| /// No error
| none,
| /// Slice should be not empty
| empty,
| /// Total element count should be the same
| total,
| /// Structure is incompatible with new shape
| incompatible,
|}
|
|/++
|Returns a new slice for the same data with different dimensions.
|
|Params:
| slice = slice to be reshaped
| rlengths = list of new dimensions. One of the lengths can be set to `-1`.
| In this case, the corresponding dimension is inferable.
| err = $(LREF ReshapeError) code
|Returns:
| reshaped slice
|+/
|Slice!(Iterator, M, kind) reshape
| (Iterator, size_t N, SliceKind kind, size_t M)
| (Slice!(Iterator, N, kind) slice, ptrdiff_t[M] rlengths, ref int err)
|{
| static if (kind == Canonical)
| {
| auto r = slice.universal.reshape(rlengths, err);
| assert(err || r._strides[$-1] == 1);
| r._strides[$-1] = 1;
| return r.assumeCanonical;
| }
| else
| {
| alias Ret = typeof(return);
| auto structure = Ret._Structure.init;
| alias lengths = structure[0];
| foreach (i; Iota!M)
| lengths[i] = rlengths[i];
|
| /// Code size optimization
| immutable size_t eco = slice.elementCount;
| size_t ecn = lengths[0 .. rlengths.length].iota.elementCount;
| if (eco == 0)
| {
| err = ReshapeError.empty;
| goto R;
| }
| foreach (i; Iota!M)
| if (lengths[i] == -1)
| {
| ecn = -ecn;
| lengths[i] = eco / ecn;
| ecn *= lengths[i];
| break;
| }
| if (eco != ecn)
| {
| err = ReshapeError.total;
| goto R;
| }
| static if (kind == Universal)
| {
| for (size_t oi, ni, oj, nj; oi < N && ni < M; oi = oj, ni = nj)
| {
| size_t op = slice._lengths[oj++];
| size_t np = lengths[nj++];
|
| for (;;)
| {
| if (op < np)
| op *= slice._lengths[oj++];
| if (op > np)
| np *= lengths[nj++];
| if (op == np)
| break;
| }
| while (oj < N && slice._lengths[oj] == 1) oj++;
| while (nj < M && lengths[nj] == 1) nj++;
|
| for (size_t l = oi, r = oi + 1; r < oj; r++)
| if (slice._lengths[r] != 1)
| {
| if (slice._strides[l] != slice._lengths[r] * slice._strides[r])
| {
| err = ReshapeError.incompatible;
| goto R;
| }
| l = r;
| }
| assert((oi == N) == (ni == M));
|
| structure[1][nj - 1] = slice._strides[oj - 1];
| foreach_reverse (i; ni .. nj - 1)
| structure[1][i] = lengths[i + 1] * structure[1][i + 1];
| }
| }
| foreach (i; Iota!(M, Ret.N))
| lengths[i] = slice._lengths[i + N - M];
| static if (M < Ret.S)
| foreach (i; Iota!(M, Ret.S))
| structure[1][i] = slice._strides[i + N - M];
| err = 0;
| return Ret(structure, slice._iterator);
| R:
| return Ret(structure, slice._iterator.init);
| }
|}
|
|///
|@safe nothrow pure
|version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
| int err;
| auto slice = iota(3, 4)
| .universal
| .allReversed
| .reshape([-1, 3], err);
| assert(err == 0);
| assert(slice ==
| [[11, 10, 9],
| [ 8, 7, 6],
| [ 5, 4, 3],
| [ 2, 1, 0]]);
|}
|
|/// Reshaping with memory allocation
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.allocation: slice;
| import mir.ndslice.dynamic : reversed;
|
| auto reshape2(S, size_t M)(S sl, ptrdiff_t[M] lengths)
| {
| int err;
| // Tries to reshape without allocation
| auto ret = sl.reshape(lengths, err);
| if (!err)
| return ret;
| if (err == ReshapeError.incompatible)
| // allocates, flattens, reshapes with `sliced`, converts to universal kind
| return sl.slice.flattened.sliced(cast(size_t[M])lengths).universal;
| throw new Exception("total elements count is different or equals to zero");
| }
|
| auto sl = iota!int(3, 4)
| .slice
| .universal
| .reversed!0;
|
| assert(reshape2(sl, [4, 3]) ==
| [[ 8, 9, 10],
| [11, 4, 5],
| [ 6, 7, 0],
| [ 1, 2, 3]]);
|}
|
|nothrow @safe pure version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
| auto slice = iota(1, 1, 3, 2, 1, 2, 1).universal.allReversed;
| int err;
| assert(slice.reshape([1, -1, 1, 1, 3, 1], err) ==
| [[[[[[11], [10], [9]]]],
| [[[[ 8], [ 7], [6]]]],
| [[[[ 5], [ 4], [3]]]],
| [[[[ 2], [ 1], [0]]]]]]);
| assert(err == 0);
|}
|
|// Issue 15919
|nothrow @nogc @safe pure
|version(mir_test) unittest
|{
| int err;
| assert(iota(3, 4, 5, 6, 7).pack!2.reshape([4, 3, 5], err)[0, 0, 0].shape == cast(size_t[2])[6, 7]);
| assert(err == 0);
|}
|
|nothrow @nogc @safe pure version(mir_test) unittest
|{
| import mir.ndslice.slice;
|
| int err;
| auto e = iota(1);
| // resize to the wrong dimension
| auto s = e.reshape([2], err);
| assert(err == ReshapeError.total);
| e.popFront;
| // test with an empty slice
| e.reshape([1], err);
| assert(err == ReshapeError.empty);
|}
|
|nothrow @nogc @safe pure
|version(mir_test) unittest
|{
| auto pElements = iota(3, 4, 5, 6, 7)
| .pack!2
| .flattened;
| assert(pElements[0][0] == iota(7));
| assert(pElements[$-1][$-1] == iota([7], 2513));
|}
|
|/++
|A contiguous 1-dimensional slice of all elements of a slice.
|`flattened` iterates existing data.
|The order of elements is preserved.
|
|`flattened` can be generalized with other selectors.
|
|Params:
| slice = slice to be iterated
|Returns:
| contiguous 1-dimensional slice of elements of the `slice`
|+/
|Slice!(FlattenedIterator!(Iterator, N, kind))
| flattened
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N != 1 && kind != Contiguous)
|{
| import core.lifetime: move;
| size_t[typeof(return).N] lengths;
| sizediff_t[typeof(return)._iterator._indices.length] indices;
| lengths[0] = slice.elementCount;
| return typeof(return)(lengths, FlattenedIterator!(Iterator, N, kind)(indices, slice.move));
|}
|
|/// ditto
|Slice!Iterator
| flattened
| (Iterator, size_t N)
| (Slice!(Iterator, N) slice)
|{
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| import core.lifetime: move;
0000000| size_t[typeof(return).N] lengths;
0000000| lengths[0] = slice.elementCount;
0000000| return typeof(return)(lengths, slice._iterator.move);
| }
|}
|
|/// ditto
|Slice!(StrideIterator!Iterator)
| flattened
| (Iterator)
| (Slice!(Iterator, 1, Universal) slice)
|{
| import core.lifetime: move;
| return slice.move.hideStride;
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| auto sl1 = iota(2, 3).slice.universal.pack!1.flattened;
| auto sl2 = iota(2, 3).slice.canonical.pack!1.flattened;
| auto sl3 = iota(2, 3).slice.pack!1.flattened;
|}
|
|/// Regular slice
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| assert(iota(4, 5).flattened == iota(20));
| assert(iota(4, 5).canonical.flattened == iota(20));
| assert(iota(4, 5).universal.flattened == iota(20));
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| assert(iota(4).flattened == iota(4));
| assert(iota(4).canonical.flattened == iota(4));
| assert(iota(4).universal.flattened == iota(4));
|}
|
|/// Packed slice
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.dynamic;
| assert(iota(3, 4, 5, 6, 7).pack!2.flattened[1] == iota([6, 7], 6 * 7));
|}
|
|/// Properties
|@safe pure nothrow version(mir_test) unittest
|{
| auto elems = iota(3, 4).universal.flattened;
|
| elems.popFrontExactly(2);
| assert(elems.front == 2);
| /// `_index` is available only for canonical and universal ndslices.
| assert(elems._iterator._indices == [0, 2]);
|
| elems.popBackExactly(2);
| assert(elems.back == 9);
| assert(elems.length == 8);
|}
|
|/// Index property
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| auto slice = new long[20].sliced(5, 4);
|
| for (auto elems = slice.universal.flattened; !elems.empty; elems.popFront)
| {
| ptrdiff_t[2] index = elems._iterator._indices;
| elems.front = index[0] * 10 + index[1] * 3;
| }
| assert(slice ==
| [[ 0, 3, 6, 9],
| [10, 13, 16, 19],
| [20, 23, 26, 29],
| [30, 33, 36, 39],
| [40, 43, 46, 49]]);
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| auto elems = iota(3, 4).universal.flattened;
| assert(elems.front == 0);
| assert(elems.save[1] == 1);
|}
|
|/++
|Random access and slicing
|+/
|nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| auto elems = iota(4, 5).slice.flattened;
|
| elems = elems[11 .. $ - 2];
|
| assert(elems.length == 7);
| assert(elems.front == 11);
| assert(elems.back == 17);
|
| foreach (i; 0 .. 7)
| assert(elems[i] == i + 11);
|
| // assign an element
| elems[2 .. 6] = -1;
| assert(elems[2 .. 6] == repeat(-1, 4));
|
| // assign an array
| static ar = [-1, -2, -3, -4];
| elems[2 .. 6] = ar;
| assert(elems[2 .. 6] == ar);
|
| // assign a slice
| ar[] *= 2;
| auto sl = ar.sliced(ar.length);
| elems[2 .. 6] = sl;
| assert(elems[2 .. 6] == sl);
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
|
| auto slice = iota(3, 4, 5);
|
| foreach (ref e; slice.universal.flattened.retro)
| {
| //...
| }
|
| foreach_reverse (ref e; slice.universal.flattened)
| {
| //...
| }
|
| foreach (ref e; slice.universal.allReversed.flattened)
| {
| //...
| }
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import std.range.primitives : isRandomAccessRange, hasSlicing;
| auto elems = iota(4, 5).flattened;
| static assert(isRandomAccessRange!(typeof(elems)));
| static assert(hasSlicing!(typeof(elems)));
|}
|
|// Checks strides
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic;
| import std.range.primitives : isRandomAccessRange;
| auto elems = iota(4, 5).universal.everted.flattened;
| static assert(isRandomAccessRange!(typeof(elems)));
|
| elems = elems[11 .. $ - 2];
| auto elems2 = elems;
| foreach (i; 0 .. 7)
| {
| assert(elems[i] == elems2.front);
| elems2.popFront;
| }
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.dynamic;
| import std.range.primitives : isRandomAccessRange, hasLength;
|
| auto range = (3 * 4 * 5 * 6 * 7).iota;
| auto slice0 = range.sliced(3, 4, 5, 6, 7).universal;
| auto slice1 = slice0.transposed!(2, 1).pack!2;
| auto elems0 = slice0.flattened;
| auto elems1 = slice1.flattened;
|
| foreach (S; AliasSeq!(typeof(elems0), typeof(elems1)))
| {
| static assert(isRandomAccessRange!S);
| static assert(hasLength!S);
| }
|
| assert(elems0.length == slice0.elementCount);
| assert(elems1.length == 5 * 4 * 3);
|
| auto elems2 = elems1;
| foreach (q; slice1)
| foreach (w; q)
| foreach (e; w)
| {
| assert(!elems2.empty);
| assert(e == elems2.front);
| elems2.popFront;
| }
| assert(elems2.empty);
|
| elems0.popFront();
| elems0.popFrontExactly(slice0.elementCount - 14);
| assert(elems0.length == 13);
| assert(elems0 == range[slice0.elementCount - 13 .. slice0.elementCount]);
|
| foreach (elem; elems0) {}
|}
|
|// Issue 15549
|version(mir_test) unittest
|{
| import std.range.primitives;
| import mir.ndslice.allocation;
| alias A = typeof(iota(1, 2, 3, 4).pack!1);
| static assert(isRandomAccessRange!A);
| static assert(hasLength!A);
| static assert(hasSlicing!A);
| alias B = typeof(slice!int(1, 2, 3, 4).pack!3);
| static assert(isRandomAccessRange!B);
| static assert(hasLength!B);
| static assert(hasSlicing!B);
|}
|
|// Issue 16010
|version(mir_test) unittest
|{
| auto s = iota(3, 4).flattened;
| foreach (_; 0 .. s.length)
| s = s[1 .. $];
|}
|
|/++
|Returns a slice, the elements of which are equal to the initial multidimensional index value.
|For a flattened (contiguous) index, see $(LREF iota).
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
|Returns:
| `N`-dimensional slice composed of indices
|See_also: $(LREF iota)
|+/
|Slice!(FieldIterator!(ndIotaField!N), N)
| ndiota
| (size_t N)
| (size_t[N] lengths...)
| if (N)
|{
| return FieldIterator!(ndIotaField!N)(0, ndIotaField!N(lengths[1 .. $])).sliced(lengths);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = ndiota(2, 3);
| static immutable array =
| [[[0, 0], [0, 1], [0, 2]],
| [[1, 0], [1, 1], [1, 2]]];
|
| assert(slice == array);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto im = ndiota(7, 9);
|
| assert(im[2, 1] == [2, 1]);
|
| //slicing works correctly
| auto cm = im[1 .. $, 4 .. $];
| assert(cm[2, 1] == [3, 5]);
|}
|
|version(mir_test) unittest
|{
| auto r = ndiota(1);
| auto d = r.front;
| r.popFront;
| import std.range.primitives;
| static assert(isRandomAccessRange!(typeof(r)));
|}
|
|/++
|Evenly spaced numbers over a specified interval.
|
|Params:
| T = floating point or complex numbers type
| lengths = list of dimension lengths. Each length must be greater then 1.
| intervals = list of [start, end] pairs.
|Returns:
| `n`-dimensional grid of evenly spaced numbers over specified intervals.
|See_also: $(LREF)
|+/
|auto linspace(T, size_t N)(size_t[N] lengths, T[2][N] intervals...)
| if (N && (isFloatingPoint!T || isComplex!T))
|{
0000000| Repeat!(N, LinspaceField!T) fields;
| foreach(i; Iota!N)
| {
0000000| assert(lengths[i] > 1, "linspace: all lengths must be greater then 1.");
0000000| fields[i] = LinspaceField!T(lengths[i], intervals[i][0], intervals[i][1]);
| }
| static if (N == 1)
0000000| return slicedField(fields);
| else
| return cartesian(fields);
|}
|
|// example from readme
|version(mir_test) unittest
|{
| import mir.ndslice;
| // import std.stdio: writefln;
|
| enum fmt = "%(%(%.2f %)\n%)\n";
|
| auto a = magic(5).as!float;
| // writefln(fmt, a);
|
| auto b = linspace!float([5, 5], [1f, 2f], [0f, 1f]).map!"a * a + b";
| // writefln(fmt, b);
|
| auto c = slice!float(5, 5);
| c[] = transposed(a + b / 2);
|}
|
|/// 1D
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto s = linspace!double([5], [1.0, 2.0]);
| assert(s == [1.0, 1.25, 1.5, 1.75, 2.0]);
|
| // reverse order
| assert(linspace!double([5], [2.0, 1.0]) == s.retro);
|
| // remove endpoint
| s.popBack;
| assert(s == [1.0, 1.25, 1.5, 1.75]);
|}
|
|/// 2D
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.functional: refTuple;
|
| auto s = linspace!double([5, 3], [1.0, 2.0], [0.0, 1.0]);
|
| assert(s == [
| [refTuple(1.00, 0.00), refTuple(1.00, 0.5), refTuple(1.00, 1.0)],
| [refTuple(1.25, 0.00), refTuple(1.25, 0.5), refTuple(1.25, 1.0)],
| [refTuple(1.50, 0.00), refTuple(1.50, 0.5), refTuple(1.50, 1.0)],
| [refTuple(1.75, 0.00), refTuple(1.75, 0.5), refTuple(1.75, 1.0)],
| [refTuple(2.00, 0.00), refTuple(2.00, 0.5), refTuple(2.00, 1.0)],
| ]);
|
| assert(s.map!"a * b" == [
| [0.0, 0.500, 1.00],
| [0.0, 0.625, 1.25],
| [0.0, 0.750, 1.50],
| [0.0, 0.875, 1.75],
| [0.0, 1.000, 2.00],
| ]);
|}
|
|/// Complex numbers
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto s = linspace!cdouble([3], [1.0 + 0i, 2.0 + 4i]);
| assert(s == [1.0 + 0i, 1.5 + 2i, 2.0 + 4i]);
|}
|
|/++
|Returns a slice with identical elements.
|`RepeatSlice` stores only single value.
|Params:
| lengths = list of dimension lengths
|Returns:
| `n`-dimensional slice composed of identical values, where `n` is dimension count.
|+/
|Slice!(FieldIterator!(RepeatField!T), M, Universal)
| repeat(T, size_t M)(T value, size_t[M] lengths...) @trusted
| if (M && !isSlice!T)
|{
| size_t[M] ls = lengths;
| return typeof(return)(
| ls,
| sizediff_t[M].init,
| typeof(return).Iterator(0, RepeatField!T(cast(RepeatField!T.UT) value)));
|}
|
|/// ditto
|Slice!(SliceIterator!(Iterator, N, kind), M, Universal)
| repeat
| (SliceKind kind, size_t N, Iterator, size_t M)
| (Slice!(Iterator, N, kind) slice, size_t[M] lengths...)
| if (M)
|{
| import core.lifetime: move;
| size_t[M] ls = lengths;
| return typeof(return)(
| ls,
| sizediff_t[M].init,
| typeof(return).Iterator(
| slice._structure,
| move(slice._iterator)));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto sl = iota(3).repeat(4);
| assert(sl == [[0, 1, 2],
| [0, 1, 2],
| [0, 1, 2],
| [0, 1, 2]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : transposed;
|
| auto sl = iota(3)
| .repeat(4)
| .unpack
| .universal
| .transposed;
|
| assert(sl == [[0, 0, 0, 0],
| [1, 1, 1, 1],
| [2, 2, 2, 2]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
|
| auto sl = iota([3], 6).slice;
| auto slC = sl.repeat(2, 3);
| sl[1] = 4;
| assert(slC == [[[6, 4, 8],
| [6, 4, 8],
| [6, 4, 8]],
| [[6, 4, 8],
| [6, 4, 8],
| [6, 4, 8]]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto sl = repeat(4.0, 2, 3);
| assert(sl == [[4.0, 4.0, 4.0],
| [4.0, 4.0, 4.0]]);
|
| static assert(is(DeepElementType!(typeof(sl)) == double));
|
| sl[1, 1] = 3;
| assert(sl == [[3.0, 3.0, 3.0],
| [3.0, 3.0, 3.0]]);
|}
|
|/++
|Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice.
|+/
|auto cycle(Field)(Field field, size_t loopLength, size_t length)
| if (!isSlice!Field && !is(Field : T[], T))
|{
| return CycleField!Field(loopLength, field).slicedField(length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, Field)(Field field, size_t length)
| if (!isSlice!Field && !is(Field : T[], T))
|{
| static assert(loopLength);
| return CycleField!(Field, loopLength)(field).slicedField(length);
|}
|
|/// ditto
|auto cycle(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length)
|{
| assert(slice.length);
| static if (kind == Universal)
| return slice.hideStride.cycle(length);
| else
| return CycleField!Iterator(slice._lengths[0], slice._iterator).slicedField(length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length)
|{
| static assert(loopLength);
| assert(loopLength <= slice.length);
| static if (kind == Universal)
| return slice.hideStride.cycle!loopLength(length);
| else
| return CycleField!(Iterator, loopLength)(slice._iterator).slicedField(length);
|}
|
|/// ditto
|auto cycle(T)(T[] array, size_t length)
|{
| return cycle(array.sliced, length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, T)(T[] array, size_t length)
|{
| return cycle!loopLength(array.sliced, length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, T)(T withAsSlice, size_t length)
| if (hasAsSlice!T)
|{
| return cycle!loopLength(withAsSlice.asSlice, length);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto slice = iota(3);
| assert(slice.cycle(7) == [0, 1, 2, 0, 1, 2, 0]);
| assert(slice.cycle!2(7) == [0, 1, 0, 1, 0, 1, 0]);
| assert([0, 1, 2].cycle(7) == [0, 1, 2, 0, 1, 2, 0]);
| assert([4, 3, 2, 1].cycle!4(7) == [4, 3, 2, 1, 4, 3, 2]);
|}
|
|/++
|Strides 1-dimensional slice.
|Params:
| slice = 1-dimensional unpacked slice.
| factor = positive stride size.
|Returns:
| Contiguous slice with strided iterator.
|See_also: $(SUBREF dynamic, strided)
|+/
|auto stride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, ptrdiff_t factor)
| if (N == 1)
|in
|{
| assert (factor > 0, "factor must be positive.");
|}
|do
|{
| static if (kind == Contiguous)
| return slice.universal.stride(factor);
| else
| {
| import mir.ndslice.dynamic: strided;
| return slice.strided!0(factor).hideStride;
| }
|}
|
|///ditto
|template stride(size_t factor = 2)
| if (factor > 1)
|{
| auto stride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1)
| {
| return stride(slice.move.ipack!1.map!(.stride!factor));
| }
| else
| static if (kind == Contiguous)
| {
| immutable rem = slice._lengths[0] % factor;
| slice._lengths[0] /= factor;
| if (rem)
| slice._lengths[0]++;
| return Slice!(StrideIterator!(Iterator, factor), 1, kind)(slice._structure, StrideIterator!(Iterator, factor)(move(slice._iterator)));
| }
| else
| {
| return .stride(slice.move, factor);
| }
| }
|
| /// ditto
| auto stride(T)(T[] array)
| {
| return stride(array.sliced);
| }
|
| /// ditto
| auto stride(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return stride(withAsSlice.asSlice);
| }
|}
|
|/// ditto
|auto stride(T)(T[] array, ptrdiff_t factor)
|{
| return stride(array.sliced, factor);
|}
|
|/// ditto
|auto stride(T)(T withAsSlice, ptrdiff_t factor)
| if (hasAsSlice!T)
|{
| return stride(withAsSlice.asSlice, factor);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(6);
| static immutable str = [0, 2, 4];
| assert(slice.stride(2) == str); // runtime factor
| assert(slice.stride!2 == str); // compile time factor
| assert(slice.stride == str); // default compile time factor is 2
| assert(slice.universal.stride(2) == str);
|}
|
|/// ND-compile time
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(4, 6);
| static immutable str = [[0, 2, 4], [12, 14, 16]];
| assert(slice.stride!2 == str); // compile time factor
| assert(slice.stride == str); // default compile time factor is 2
|}
|
|/++
|Reverses order of iteration for all dimensions.
|Params:
| slice = slice, range, or array.
|Returns:
| Slice/range with reversed order of iteration for all dimensions.
|See_also: $(SUBREF dynamic, reversed), $(SUBREF dynamic, allReversed).
|+/
|auto retro
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| @trusted
|{
| import core.lifetime: move;
| static if (kind == Contiguous || kind == Canonical)
| {
| size_t[slice.N] lengths;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| static if (slice.S)
| {
| sizediff_t[slice.S] strides;
| foreach (i; Iota!(slice.S))
| strides[i] = slice._strides[i];
| alias structure = AliasSeq!(lengths, strides);
| }
| else
| {
| alias structure = lengths;
| }
| static if (is(Iterator : RetroIterator!It, It))
| {
| alias Ret = Slice!(It, N, kind);
| slice._iterator._iterator -= slice.lastIndex;
| return Ret(structure, slice._iterator._iterator.move);
| }
| else
| {
| alias Ret = Slice!(RetroIterator!Iterator, N, kind);
| slice._iterator += slice.lastIndex;
| return Ret(structure, RetroIterator!Iterator(slice._iterator.move));
| }
| }
| else
| {
| import mir.ndslice.dynamic: allReversed;
| return slice.move.allReversed;
| }
|}
|
|/// ditto
|auto retro(T)(T[] array)
|{
| return retro(array.sliced);
|}
|
|/// ditto
|auto retro(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return retro(withAsSlice.asSlice);
|}
|
|/// ditto
|auto retro(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
|{
| import std.traits: Unqual;
|
| static if (is(Unqual!Range == Range))
| {
| import core.lifetime: move;
| static if (is(Range : RetroRange!R, R))
| {
| return move(r._source);
| }
| else
| {
| return RetroRange!Range(move(r));
| }
| }
| else
| {
| return .retro!(Unqual!Range)(r);
| }
|}
|
|/// ditto
|struct RetroRange(Range)
|{
| import mir.primitives: hasLength;
|
| ///
| Range _source;
|
| private enum hasAccessByRef = __traits(compiles, &_source.front);
|
| @property
| {
| bool empty()() const { return _source.empty; }
| static if (hasLength!Range)
| auto length()() const { return _source.length; }
| auto ref front()() { return _source.back; }
| auto ref back()() { return _source.front; }
| static if (__traits(hasMember, Range, "save"))
| auto save()() { return RetroRange(_source.save); }
| alias opDollar = length;
|
| static if (!hasAccessByRef)
| {
| import std.traits: ForeachType;
|
| void front()(ForeachType!R val)
| {
| import mir.functional: forward;
| _source.back = forward!val;
| }
|
| void back()(ForeachType!R val)
| {
| import mir.functional: forward;
| _source.front = forward!val;
| }
| }
| }
|
| void popFront()() { _source.popBack(); }
| void popBack()() { _source.popFront(); }
|
| static if (is(typeof(_source.moveBack())))
| auto moveFront()() { return _source.moveBack(); }
|
| static if (is(typeof(_source.moveFront())))
| auto moveBack()() { return _source.moveFront(); }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(2, 3);
| static immutable reversed = [[5, 4, 3], [2, 1, 0]];
| assert(slice.retro == reversed);
| assert(slice.canonical.retro == reversed);
| assert(slice.universal.retro == reversed);
|
| static assert(is(typeof(slice.retro.retro) == typeof(slice)));
| static assert(is(typeof(slice.canonical.retro.retro) == typeof(slice.canonical)));
| static assert(is(typeof(slice.universal.retro) == typeof(slice.universal)));
|}
|
|/// Ranges
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.algorithm.iteration: equal;
| import std.range: std_iota = iota;
|
| assert(std_iota(4).retro.equal(iota(4).retro));
| static assert(is(typeof(std_iota(4).retro.retro) == typeof(std_iota(4))));
|}
|
|/++
|Bitwise slice over an integral slice.
|Params:
| slice = a contiguous or canonical slice on top of integral iterator.
|Returns: A bitwise slice.
|+/
|auto bitwise
| (Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init]))
| (Slice!(Iterator, N, kind) slice)
| if (__traits(isIntegral, I) && (kind != Universal || N == 1))
|{
| import core.lifetime: move;
| static if (kind == Universal)
| {
| return slice.move.flattened.bitwise;
| }
| else
| {
| static if (is(Iterator : FieldIterator!Field, Field))
| {
| enum simplified = true;
| alias It = FieldIterator!(BitField!Field);
| }
| else
| {
| enum simplified = false;
| alias It = FieldIterator!(BitField!Iterator);
| }
| alias Ret = Slice!(It, N, kind);
| auto structure_ = Ret._Structure.init;
| foreach(i; Iota!(Ret.N))
| structure_[0][i] = slice._lengths[i];
| structure_[0][$ - 1] *= I.sizeof * 8;
| foreach(i; Iota!(Ret.S))
| structure_[1][i] = slice._strides[i];
| static if (simplified)
| return Ret(structure_, It(slice._iterator._index * I.sizeof * 8, BitField!Field(slice._iterator._field.move)));
| else
| return Ret(structure_, It(0, BitField!Iterator(slice._iterator.move)));
| }
|}
|
|/// ditto
|auto bitwise(T)(T[] array)
|{
| return bitwise(array.sliced);
|}
|
|/// ditto
|auto bitwise(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bitwise(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| auto bits = data[].bitwise;
| assert(bits.length == data.length * size_t.sizeof * 8);
| bits[111] = true;
| assert(bits[111]);
|
| bits.popFront;
| assert(bits[110]);
| bits[] = true;
| bits[110] = false;
| bits = bits[10 .. $];
| assert(bits[100] == false);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| auto slice = FieldIterator!(size_t[])(0, data[]).sliced(10);
| slice.popFrontExactly(2);
| auto bits_normal = data[].sliced.bitwise;
| auto bits = slice.bitwise;
| assert(bits.length == (data.length - 2) * size_t.sizeof * 8);
| bits[111] = true;
| assert(bits[111]);
| assert(bits_normal[111 + size_t.sizeof * 2 * 8]);
| auto ubits = slice.universal.bitwise;
| assert(bits.map!"~a" == bits.map!"!a");
| static assert (is(typeof(bits.map!"~a") == typeof(bits.map!"!a")));
| assert(bits.map!"~a" == bits.map!"!!!a");
| static assert (!is(typeof(bits.map!"~a") == typeof(bits.map!"!!!a")));
| assert(bits == ubits);
|
| bits.popFront;
| assert(bits[110]);
| bits[] = true;
| bits[110] = false;
| bits = bits[10 .. $];
| assert(bits[100] == false);
|}
|
|/++
|Bitwise field over an integral field.
|Params:
| field = an integral field.
|Returns: A bitwise field.
|+/
|auto bitwiseField(Field, I = typeof(Field.init[size_t.init]))(Field field)
| if (__traits(isUnsigned, I))
|{
| import core.lifetime: move;
| return BitField!(Field, I)(field.move);
|}
|
|/++
|Bitpack slice over an integral slice.
|
|Bitpack is used to represent unsigned integer slice with fewer number of bits in integer binary representation.
|
|Params:
| pack = counts of bits in the integer.
| slice = a contiguous or canonical slice on top of integral iterator.
|Returns: A bitpack slice.
|+/
|auto bitpack
| (size_t pack, Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init]))
| (Slice!(Iterator, N, kind) slice)
| if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical) && pack > 1)
|{
| import core.lifetime: move;
| static if (is(Iterator : FieldIterator!Field, Field) && I.sizeof * 8 % pack == 0)
| {
| enum simplified = true;
| alias It = FieldIterator!(BitpackField!(Field, pack));
| }
| else
| {
| enum simplified = false;
| alias It = FieldIterator!(BitpackField!(Iterator, pack));
| }
| alias Ret = Slice!(It, N, kind);
| auto structure = Ret._Structure.init;
| foreach(i; Iota!(Ret.N))
| structure[0][i] = slice._lengths[i];
| structure[0][$ - 1] *= I.sizeof * 8;
| structure[0][$ - 1] /= pack;
| foreach(i; Iota!(Ret.S))
| structure[1][i] = slice._strides[i];
| static if (simplified)
| return Ret(structure, It(slice._iterator._index * I.sizeof * 8 / pack, BitpackField!(Field, pack)(slice._iterator._field.move)));
| else
| return Ret(structure, It(0, BitpackField!(Iterator, pack)(slice._iterator.move)));
|}
|
|/// ditto
|auto bitpack(size_t pack, T)(T[] array)
|{
| return bitpack!pack(array.sliced);
|}
|
|/// ditto
|auto bitpack(size_t pack, T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bitpack!pack(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto packs = data[].bitpack!6;
| assert(packs.length == data.length * size_t.sizeof * 8 / 6);
| packs[$ - 1] = 24;
| assert(packs[$ - 1] == 24);
|
| packs.popFront;
| assert(packs[$ - 1] == 24);
|}
|
|/++
|Bytegroup slice over an integral slice.
|
|Groups existing slice into fixed length chunks and uses them as data store for destination type.
|
|Correctly handles scalar types on both little-endian and big-endian platforms.
|
|Params:
| group = count of iterator items used to store the destination type.
| DestinationType = deep element type of the result slice.
| slice = a contiguous or canonical slice.
|Returns: A bytegroup slice.
|+/
|Slice!(BytegroupIterator!(Iterator, group, DestinationType), N, kind)
|bytegroup
| (size_t group, DestinationType, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if ((kind == Contiguous || kind == Canonical) && group)
|{
| import core.lifetime: move;
| auto structure = slice._structure;
| structure[0][$ - 1] /= group;
| return typeof(return)(structure, BytegroupIterator!(Iterator, group, DestinationType)(slice._iterator.move));
|}
|
|/// ditto
|auto bytegroup(size_t pack, DestinationType, T)(T[] array)
|{
| return bytegroup!(pack, DestinationType)(array.sliced);
|}
|
|/// ditto
|auto bytegroup(size_t pack, DestinationType, T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bytegroup!(pack, DestinationType)(withAsSlice.asSlice);
|}
|
|/// 24 bit integers
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.slice : DeepElementType, sliced;
|
| ubyte[20] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto int24ar = data[].bytegroup!(3, int); // 24 bit integers
| assert(int24ar.length == data.length / 3);
|
| enum checkInt = ((1 << 20) - 1);
|
| int24ar[3] = checkInt;
| assert(int24ar[3] == checkInt);
|
| int24ar.popFront;
| assert(int24ar[2] == checkInt);
|
| static assert(is(DeepElementType!(typeof(int24ar)) == int));
|}
|
|/// 48 bit integers
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.slice : DeepElementType, sliced;
| ushort[20] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto int48ar = data[].sliced.bytegroup!(3, long); // 48 bit integers
| assert(int48ar.length == data.length / 3);
|
| enum checkInt = ((1L << 44) - 1);
|
| int48ar[3] = checkInt;
| assert(int48ar[3] == checkInt);
|
| int48ar.popFront;
| assert(int48ar[2] == checkInt);
|
| static assert(is(DeepElementType!(typeof(int48ar)) == long));
|}
|
|/++
|Implements the homonym function (also known as `transform`) present
|in many languages of functional flavor. The call `map!(fun)(slice)`
|returns a slice of which elements are obtained by applying `fun`
|for all elements in `slice`. The original slices are
|not changed. Evaluation is done lazily.
|
|Note:
| $(SUBREF dynamic, transposed) and
| $(SUBREF topology, pack) can be used to specify dimensions.
|Params:
| fun = One or more functions.
|See_Also:
| $(LREF cached), $(LREF vmap), $(LREF indexed),
| $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip),
| $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function))
|+/
|template map(fun...)
| if (fun.length)
|{
| import mir.functional: adjoin, naryFun, pipe;
| static if (fun.length == 1)
| {
| static if (__traits(isSame, naryFun!(fun[0]), fun[0]))
| {
| alias f = fun[0];
| @optmath:
| /++
| Params:
| slice = An ndslice, array, or an input range.
| Returns:
| ndslice or an input range with each fun applied to all the elements. If there is more than one
| fun, the element type will be `Tuple` containing one element for each fun.
| +/
| auto map(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| alias MIterator = typeof(_mapIterator!f(slice._iterator));
| import mir.ndslice.traits: isIterator;
| alias testIter = typeof(MIterator.init[0]);
| static assert(isIterator!MIterator, "mir.ndslice.map: probably the lambda function contains a compile time bug.");
| return Slice!(MIterator, N, kind)(slice._structure, _mapIterator!f(slice._iterator.move));
| }
|
| /// ditto
| auto map(T)(T[] array)
| {
| return map(array.sliced);
| }
|
| /// ditto
| auto map(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return map(withAsSlice.asSlice);
| }
|
| /// ditto
| auto map(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
| {
| import core.lifetime: forward;
| import std.range.primitives: isInputRange;
| static assert (isInputRange!Range, "map can work with ndslice, array, or an input range.");
| return MapRange!(f, ImplicitlyUnqual!Range)(forward!r);
| }
| }
| else alias map = .map!(staticMap!(naryFun, fun));
| }
| else alias map = .map!(adjoin!fun);
|}
|
|/// ditto
|struct MapRange(alias fun, Range)
|{
| import std.range.primitives;
|
| Range _input;
|
| static if (isInfinite!Range)
| {
| enum bool empty = false;
| }
| else
| {
| bool empty() @property
| {
| return _input.empty;
| }
| }
|
| void popFront()
| {
| assert(!empty, "Attempting to popFront an empty map.");
| _input.popFront();
| }
|
| auto ref front() @property
| {
| assert(!empty, "Attempting to fetch the front of an empty map.");
| return fun(_input.front);
| }
|
| static if (isBidirectionalRange!Range)
| auto ref back()() @property
| {
| assert(!empty, "Attempting to fetch the back of an empty map.");
| return fun(_input.back);
| }
|
| static if (isBidirectionalRange!Range)
| void popBack()()
| {
| assert(!empty, "Attempting to popBack an empty map.");
| _input.popBack();
| }
|
| static if (hasLength!Range)
| auto length() @property
| {
| return _input.length;
| }
|
| static if (isForwardRange!Range)
| auto save()() @property
| {
| return typeof(this)(_input.save);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto s = iota(2, 3).map!(a => a * 3);
| assert(s == [[ 0, 3, 6],
| [ 9, 12, 15]]);
|}
|
|/// String lambdas
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| assert(iota(2, 3).map!"a * 2" == [[0, 2, 4], [6, 8, 10]]);
|}
|
|/// Input ranges
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.algorithm.iteration: filter, equal;
| assert (6.iota.filter!"a % 2".map!"a * 10".equal([10, 30, 50]));
|}
|
|/// Packed tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, windows;
| import mir.math.sum: sum;
|
| // iota windows map sums ( reduce!"a + b" )
| // --------------
| // ------- | --- --- | ------
| // | 0 1 2 | => || 0 1 || 1 2 || => | 8 12 |
| // | 3 4 5 | || 3 4 || 4 5 || ------
| // ------- | --- --- |
| // --------------
| auto s = iota(2, 3)
| .windows(2, 2)
| .map!sum;
|
| assert(s == [[8, 12]]);
|}
|
|/// Zipped tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| auto z = zip(sl1, sl2);
|
| assert(zip(sl1, sl2).map!"a + b" == sl1 + sl2);
| assert(zip(sl1, sl2).map!((a, b) => a + b) == sl1 + sl2);
|}
|
|/++
|Multiple functions can be passed to `map`.
|In that case, the element type of `map` is a refTuple containing
|one element for each function.
|+/
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto sl = iota(2, 3);
| auto s = sl.map!("a + a", "a * a");
|
| auto sums = [[0, 2, 4], [6, 8, 10]];
| auto products = [[0, 1, 4], [9, 16, 25]];
|
| assert(s.map!"a[0]" == sl + sl);
| assert(s.map!"a[1]" == sl * sl);
|}
|
|/++
|`map` can be aliased to a symbol and be used separately:
|+/
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| alias halfs = map!"double(a) / 2";
| assert(halfs(iota(2, 3)) == [[0.0, 0.5, 1], [1.5, 2, 2.5]]);
|}
|
|/++
|Type normalization
|+/
|version(mir_test) unittest
|{
| import mir.functional : pipe;
| import mir.ndslice.topology : iota;
| auto a = iota(2, 3).map!"a + 10".map!(pipe!("a * 2", "a + 1"));
| auto b = iota(2, 3).map!(pipe!("a + 10", "a * 2", "a + 1"));
| assert(a == b);
| static assert(is(typeof(a) == typeof(b)));
|}
|
|/// Use map with byDim/alongDim to apply functions to each dimension
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: byDim, alongDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use byDim/alongDim with map to compute mean of row/column.
| assert(x.byDim!0.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
| assert(x.alongDim!1.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|}
|
|/++
|Use map with a lambda and with byDim/alongDim, but may need to allocate result.
|This example uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, byDim, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).byDim!0.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).byDim!1.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
| auto s3 = iota(2, 3).alongDim!1.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s4 = iota(2, 3).alongDim!0.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|///
|pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: reduce;
| import mir.math.common: fmax;
| import mir.math.stat: mean;
| import mir.math.sum;
| /// Returns maximal column average.
| auto maxAvg(S)(S matrix) {
| return reduce!fmax(0.0, matrix.alongDim!1.map!mean);
| }
| // 1 2
| // 3 4
| auto matrix = iota([2, 2], 1);
| assert(maxAvg(matrix) == 3.5);
|}
|
|/++
|Implements the homonym function (also known as `transform`) present
|in many languages of functional flavor. The call `slice.vmap(fun)`
|returns a slice of which elements are obtained by applying `fun`
|for all elements in `slice`. The original slices are
|not changed. Evaluation is done lazily.
|
|Note:
| $(SUBREF dynamic, transposed) and
| $(SUBREF topology, pack) can be used to specify dimensions.
|Params:
| slice = ndslice
| callable = callable object, structure, delegate, or function pointer.
|See_Also:
| $(LREF cached), $(LREF map), $(LREF indexed),
| $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip),
| $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function))
|+/
|@optmath auto vmap(Iterator, size_t N, SliceKind kind, Callable)
| (
| Slice!(Iterator, N, kind) slice,
| Callable callable,
| )
|{
| import core.lifetime: move;
| alias It = VmapIterator!(Iterator, Callable);
| return Slice!(It, N, kind)(slice._structure, It(slice._iterator.move, callable.move));
|}
|
|/// ditto
|auto vmap(T, Callable)(T[] array, Callable callable)
|{
| import core.lifetime: move;
| return vmap(array.sliced, callable.move);
|}
|
|/// ditto
|auto vmap(T, Callable)(T withAsSlice, Callable callable)
| if (hasAsSlice!T)
|{
| import core.lifetime: move;
| return vmap(withAsSlice.asSlice, callable.move);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| static struct Mul {
| double factor; this(double f) { factor = f; }
| auto opCall(long x) const {return x * factor; }
| auto lightConst()() const @property { return Mul(factor); }
| }
|
| auto callable = Mul(3);
| auto s = iota(2, 3).vmap(callable);
|
| assert(s == [[ 0, 3, 6],
| [ 9, 12, 15]]);
|}
|
|/// Packed tensors.
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.math.sum: sum;
| import mir.ndslice.topology : iota, windows;
|
| // iota windows vmap scaled sums
| // --------------
| // ------- | --- --- | -----
| // | 0 1 2 | => || 0 1 || 1 2 || => | 4 6 |
| // | 3 4 5 | || 3 4 || 4 5 || -----
| // ------- | --- --- |
| // --------------
|
| struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(S)(S x) { return x.sum * factor; }
|
| auto lightConst()() const @property { return Callable(factor); }
| auto lightImmutable()() immutable @property { return Callable(factor); }
| }
|
| auto callable = Callable(0.5);
|
| auto s = iota(2, 3)
| .windows(2, 2)
| .vmap(callable);
|
| assert(s == [[4, 6]]);
|}
|
|/// Zipped tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(S, T)(S x, T y) { return x + y * factor; }
|
| auto lightConst()() const { return Callable(factor); }
| auto lightImmutable()() immutable { return Callable(factor); }
| }
|
| auto callable = Callable(10);
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| auto z = zip(sl1, sl2);
|
| assert(zip(sl1, sl2).vmap(callable) ==
| [[10, 21, 32],
| [43, 54, 65]]);
|}
|
|// TODO
|/+
|Multiple functions can be passed to `vmap`.
|In that case, the element type of `vmap` is a refTuple containing
|one element for each function.
|+/
|@safe pure nothrow
|version(none) version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto s = iota(2, 3).vmap!("a + a", "a * a");
|
| auto sums = [[0, 2, 4], [6, 8, 10]];
| auto products = [[0, 1, 4], [9, 16, 25]];
|
| foreach (i; 0..s.length!0)
| foreach (j; 0..s.length!1)
| {
| auto values = s[i, j];
| assert(values.a == sums[i][j]);
| assert(values.b == products[i][j]);
| }
|}
|
|/// Use map with byDim/alongDim to apply functions to each dimension
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| static struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(U)(U x) const {return x.mean + factor; }
| auto lightConst()() const @property { return Callable(factor); }
| }
|
| auto callable = Callable(0.0);
|
| // Use byDim/alongDim with map to compute callable of row/column.
| assert(x.byDim!0.vmap(callable).all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.vmap(callable).all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
| assert(x.alongDim!1.vmap(callable).all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.vmap(callable).all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|}
|
|/++
|Use map with a lambda and with byDim/alongDim, but may need to allocate result.
|This example uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| static struct Mul(T)
| {
| T factor;
| this(T f) { factor = f; }
| auto opCall(U)(U x) {return x * factor; }
| auto lightConst()() const @property { return Mul!(typeof(factor.lightConst))(factor.lightConst); }
| }
|
| auto a = [1, 2, 3].sliced;
| auto b = [1, 2].sliced;
| auto A = Mul!(typeof(a))(a);
| auto B = Mul!(typeof(b))(b);
|
| auto x = [
| [0, 1, 2],
| [3, 4, 5]
| ].fuse;
|
| auto s1 = x.byDim!0.vmap(A).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = x.byDim!1.vmap(B).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
| auto s3 = x.alongDim!1.vmap(A).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s4 = x.alongDim!0.vmap(B).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|private auto hideStride
| (Iterator, SliceKind kind)
| (Slice!(Iterator, 1, kind) slice)
|{
| import core.lifetime: move;
| static if (kind == Universal)
| return Slice!(StrideIterator!Iterator)(
| slice._lengths,
| StrideIterator!Iterator(slice._strides[0], move(slice._iterator)));
| else
| return slice;
|}
|
|private auto unhideStride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
|{
| static if (is(Iterator : StrideIterator!It, It))
| {
| import core.lifetime: move;
| static if (kind == Universal)
| {
| alias Ret = SliceKind!(It, N, Universal);
| auto strides = slice._strides;
| foreach(i; Iota!(Ret.S))
| strides[i] = slice._strides[i] * slice._iterator._stride;
| return Slice!(It, N, Universal)(slice._lengths, strides, slice._iterator._iterator.move);
| }
| else
| return slice.move.universal.unhideStride;
| }
| else
| return slice;
|}
|
|/++
|Creates a random access cache for lazyly computed elements.
|Params:
| original = original ndslice
| caches = cached values
| flags = array composed of flags that indicates if values are already computed
|Returns:
| ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice.
|See_also: $(LREF cachedGC), $(LREF map), $(LREF vmap), $(LREF indexed)
|+/
|Slice!(CachedIterator!(Iterator, CacheIterator, FlagIterator), N, kind)
| cached(Iterator, SliceKind kind, size_t N, CacheIterator, FlagIterator)(
| Slice!(Iterator, N, kind) original,
| Slice!(CacheIterator, N, kind) caches,
| Slice!(FlagIterator, N, kind) flags,
| )
|{
| assert(original.shape == caches.shape, "caches.shape should be equal to original.shape");
| assert(original.shape == flags.shape, "flags.shape should be equal to original.shape");
| return typeof(return)(
| original._structure,
| IteratorOf!(typeof(return))(
| original._iterator,
| caches._iterator,
| flags._iterator,
| ));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology: cached, iota, map;
| import mir.ndslice.allocation: bitSlice, uninitSlice;
|
| int[] funCalls;
|
| auto v = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| });
| auto flags = v.length.bitSlice;
| auto cache = v.length.uninitSlice!int;
| // cached lazy slice: 1 2 4 8 16
| auto sl = v.cached(cache, flags);
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
| funCalls = [];
|
| // set values directly
| sl[1 .. 3] = 5;
| assert(sl[1] == 5);
| assert(sl[2] == 5);
| // no function calls
| assert(funCalls == []);
|}
|
|/// Cache of immutable elements
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice: DeepElementType;
| import mir.ndslice.topology: cached, iota, map, as;
| import mir.ndslice.allocation: bitSlice, uninitSlice;
|
| int[] funCalls;
|
| auto v = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .as!(immutable int);
| auto flags = v.length.bitSlice;
| auto cache = v.length.uninitSlice!(immutable int);
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = v.cached(cache, flags);
|
| static assert(is(DeepElementType!(typeof(sl)) == immutable int));
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
|}
|
|/++
|Creates a random access cache for lazyly computed elements.
|Params:
| original = ND Contiguous or 1D Universal ndslice.
|Returns:
| ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice.
|See_also: $(LREF cached), $(LREF map), $(LREF vmap), $(LREF indexed)
|+/
|Slice!(CachedIterator!(Iterator, typeof(Iterator.init[0])*, FieldIterator!(BitField!(size_t*))), N)
| cachedGC(Iterator, size_t N)(Slice!(Iterator, N) original) @trusted
|{
| import std.traits: hasElaborateAssign, Unqual;
| import mir.ndslice.allocation: bitSlice, slice, uninitSlice;
| alias C = typeof(Iterator.init[0]);
| alias UC = Unqual!C;
| static if (hasElaborateAssign!UC)
| alias newSlice = slice;
| else
| alias newSlice = uninitSlice;
| return typeof(return)(
| original._structure,
| IteratorOf!(typeof(return))(
| original._iterator,
| newSlice!C(original._lengths)._iterator,
| original._lengths.bitSlice._iterator,
| ));
|}
|
|/// ditto
|auto cachedGC(Iterator)(Slice!(Iterator, 1, Universal) from)
|{
| return from.flattened.cachedGC;
|}
|
|/// ditto
|auto cachedGC(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return cachedGC(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology: cachedGC, iota, map;
|
| int[] funCalls;
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .cachedGC;
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
| funCalls = [];
|
| // set values directly
| sl[1 .. 3] = 5;
| assert(sl[1] == 5);
| assert(sl[2] == 5);
| // no function calls
| assert(funCalls == []);
|}
|
|/// Cache of immutable elements
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice: DeepElementType;
| import mir.ndslice.topology: cachedGC, iota, map, as;
|
| int[] funCalls;
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .as!(immutable int)
| .cachedGC;
|
| static assert(is(DeepElementType!(typeof(sl)) == immutable int));
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
|}
|
|/++
|Convenience function that creates a lazy view,
|where each element of the original slice is converted to the type `T`.
|It uses $(LREF map) and $(REF_ALTTEXT $(TT to), to, mir,conv)$(NBSP)
|composition under the hood.
|Params:
| slice = a slice to create a view on.
|Returns:
| A lazy slice with elements converted to the type `T`.
|See_also: $(LREF map), $(LREF vmap)
|+/
|template as(T)
|{
| ///
| @optmath auto as(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| static if (is(slice.DeepElement == T))
| return slice;
| else
| static if (is(Iterator : T*))
| return slice.toConst;
| else
| {
| import core.lifetime: move;
| import mir.conv: to;
| return map!(to!T)(slice.move);
| }
| }
|
| /// ditto
| auto as(S)(S[] array)
| {
| return as(array.sliced);
| }
|
| /// ditto
| auto as(S)(S withAsSlice)
| if (hasAsSlice!S)
| {
| return as(withAsSlice.asSlice);
| }
|
| /// ditto
| auto as(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
| {
| static if (is(ForeachType!Range == T))
| return r;
| else
| {
| import core.lifetime: move;
| import mir.conv: to;
| return map!(to!T)(r.move);
| }
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : diagonal, as;
|
| auto matrix = slice!double([2, 2], 0);
| auto stringMatrixView = matrix.as!int;
| assert(stringMatrixView ==
| [[0, 0],
| [0, 0]]);
|
| matrix.diagonal[] = 1;
| assert(stringMatrixView ==
| [[1, 0],
| [0, 1]]);
|
| /// allocate new slice composed of strings
| Slice!(int*, 2) stringMatrix = stringMatrixView.slice;
|}
|
|/// Special behavior for pointers to a constant data.
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.slice : Contiguous, Slice;
|
| Slice!(double*, 2) matrix = slice!double([2, 2], 0);
| Slice!(const(double)*, 2) const_matrix = matrix.as!(const double);
|}
|
|/// Ranges
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.algorithm.iteration: filter, equal;
| assert(5.iota.filter!"a % 2".as!double.map!"a / 2".equal([0.5, 1.5]));
|}
|
|/++
|Takes a field `source` and a slice `indices`, and creates a view of source as if its elements were reordered according to indices.
|`indices` may include only a subset of the elements of `source` and may also repeat elements.
|
|Params:
| source = a filed, source of data. `source` must be an array or a pointer, or have `opIndex` primitive. Full random access range API is not required.
| indices = a slice, source of indices.
|Returns:
| n-dimensional slice with the same kind, shape and strides.
|
|See_also: `indexed` is similar to $(LREF vmap), but a field (`[]`) is used instead of a function (`()`), and order of arguments is reversed.
|+/
|Slice!(IndexIterator!(Iterator, Field), N, kind)
| indexed(Field, Iterator, size_t N, SliceKind kind)
| (Field source, Slice!(Iterator, N, kind) indices)
|{
| import core.lifetime: move;
| return typeof(return)(
| indices._structure,
| IndexIterator!(Iterator, Field)(
| indices._iterator.move,
| source));
|}
|
|/// ditto
|auto indexed(Field, S)(Field source, S[] indices)
|{
| return indexed(source, indices.sliced);
|}
|
|/// ditto
|auto indexed(Field, S)(Field source, S indices)
| if (hasAsSlice!S)
|{
| return indexed(source, indices.asSlice);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto source = [1, 2, 3, 4, 5];
| auto indices = [4, 3, 1, 2, 0, 4];
| auto ind = source.indexed(indices);
| assert(ind == [5, 4, 2, 3, 1, 5]);
|
| assert(ind.retro == source.indexed(indices.retro));
|
| ind[3] += 10; // for index 2
| // 0 1 2 3 4
| assert(source == [1, 2, 13, 4, 5]);
|}
|
|/++
|Maps index pairs to subslices.
|Params:
| sliceable = pointer, array, ndslice, series, or something sliceable with `[a .. b]`.
| slices = ndslice composed of index pairs.
|Returns:
| ndslice composed of subslices.
|See_also: $(LREF chopped), $(LREF pairwise).
|+/
|Slice!(SubSliceIterator!(Iterator, Sliceable), N, kind)
| subSlices(Iterator, size_t N, SliceKind kind, Sliceable)(
| Sliceable sliceable,
| Slice!(Iterator, N, kind) slices,
| )
|{
| import core.lifetime: move;
| return typeof(return)(
| slices._structure,
| SubSliceIterator!(Iterator, Sliceable)(slices._iterator.move, sliceable.move)
| );
|}
|
|/// ditto
|auto subSlices(S, Sliceable)(Sliceable sliceable, S[] slices)
|{
| return subSlices(sliceable, slices.sliced);
|}
|
|/// ditto
|auto subSlices(S, Sliceable)(Sliceable sliceable, S slices)
| if (hasAsSlice!S)
|{
| return subSlices(sliceable, slices.asSlice);
|}
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.functional: staticArray;
| auto subs =[
| staticArray(2, 4),
| staticArray(2, 10),
| ];
| auto sliceable = 10.iota;
|
| auto r = sliceable.subSlices(subs);
| assert(r == [
| iota([4 - 2], 2),
| iota([10 - 2], 2),
| ]);
|}
|
|/++
|Maps index pairs to subslices.
|Params:
| bounds = ndslice composed of consequent (`a_i <= a_(i+1)`) pairwise index bounds.
| sliceable = pointer, array, ndslice, series, or something sliceable with `[a_i .. a_(i+1)]`.
|Returns:
| ndslice composed of subslices.
|See_also: $(LREF pairwise), $(LREF subSlices).
|+/
|Slice!(ChopIterator!(Iterator, Sliceable)) chopped(Iterator, Sliceable)(
| Sliceable sliceable,
| Slice!Iterator bounds,
| )
|in
|{
| debug(mir)
| foreach(b; bounds.pairwise!"a <= b")
| assert(b);
|}
|do {
| import core.lifetime: move;
| sizediff_t length = bounds._lengths[0] <= 1 ? 0 : bounds._lengths[0] - 1;
| static if (hasLength!Sliceable)
| {
| if (length && bounds[length - 1] > sliceable.length)
| {
| version (D_Exceptions)
| throw choppedException;
| else
| assert(0, choppedExceptionMsg);
| }
| }
|
| return typeof(return)([size_t(length)], ChopIterator!(Iterator, Sliceable)(bounds._iterator.move, sliceable.move));
|}
|
|/// ditto
|auto chopped(S, Sliceable)(Sliceable sliceable, S[] bounds)
|{
| return chopped(sliceable, bounds.sliced);
|}
|
|/// ditto
|auto chopped(S, Sliceable)(Sliceable sliceable, S bounds)
| if (hasAsSlice!S)
|{
| return chopped(sliceable, bounds.asSlice);
|}
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.functional: staticArray;
| import mir.ndslice.slice : sliced;
| auto pairwiseIndexes = [2, 4, 10].sliced;
| auto sliceable = 10.iota;
|
| auto r = sliceable.chopped(pairwiseIndexes);
| assert(r == [
| iota([4 - 2], 2),
| iota([10 - 4], 4),
| ]);
|}
|
|/++
|Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional.
|Params:
| sameStrides = if `true` assumes that all slices has the same strides.
| slices = list of slices
|Returns:
| n-dimensional slice of elements refTuple
|See_also: $(SUBREF slice, Slice.strides).
|+/
|template zip(bool sameStrides = false)
|{
| /++
| Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional.
| Params:
| slices = list of slices
| Returns:
| n-dimensional slice of elements refTuple
| See_also: $(SUBREF slice, Slice.strides).
| +/
| @optmath
| auto zip(Slices...)(Slices slices)
| if (Slices.length > 1 && allSatisfy!(isConvertibleToSlice, Slices))
| {
| static if (allSatisfy!(isSlice, Slices))
| {
| enum N = Slices[0].N;
| foreach(i, S; Slices[1 .. $])
| {
| static assert(S.N == N, "zip: all Slices must have the same dimension count");
| assert(slices[i + 1]._lengths == slices[0]._lengths, "zip: all slices must have the same lengths");
| static if (sameStrides)
| assert(slices[i + 1].strides == slices[0].strides, "zip: all slices must have the same strides when unpacked");
| }
| static if (!sameStrides && minElem(staticMap!(kindOf, Slices)) != Contiguous)
| {
| static assert(N == 1, "zip: cannot zip canonical and universal multidimensional slices if `sameStrides` is false");
| mixin(`return .zip(` ~ _iotaArgs!(Slices.length, "slices[", "].hideStride, ") ~`);`);
| }
| else
| {
| enum kind = maxElem(staticMap!(kindOf, Slices));
| alias Iterator = ZipIterator!(staticMap!(_IteratorOf, Slices));
| alias Ret = Slice!(Iterator, N, kind);
| auto structure = Ret._Structure.init;
| structure[0] = slices[0]._lengths;
| foreach (i; Iota!(Ret.S))
| structure[1][i] = slices[0]._strides[i];
| return Ret(structure, mixin("Iterator(" ~ _iotaArgs!(Slices.length, "slices[", "]._iterator, ") ~ ")"));
| }
| }
| else
| {
| return .zip(toSlices!slices);
| }
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : flattened, iota;
|
| auto alpha = iota!int(4, 3);
| auto beta = slice!int(4, 3).universal;
|
| auto m = zip!true(alpha, beta);
| foreach (r; m)
| foreach (e; r)
| e.b = e.a;
| assert(alpha == beta);
|
| beta[] = 0;
| foreach (e; m.flattened)
| e.b = cast(int)e.a;
| assert(alpha == beta);
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : flattened, iota;
|
| auto alpha = iota!int(4).universal;
| auto beta = new int[4];
|
| auto m = zip(alpha, beta);
| foreach (e; m)
| e.b = e.a;
| assert(alpha == beta);
|}
|
|/++
|Selects a slice from a zipped slice.
|Params:
| name = name of a slice to unzip.
| slice = zipped slice
|Returns:
| unzipped slice
|+/
|auto unzip
| (char name, size_t N, SliceKind kind, Iterators...)
| (Slice!(ZipIterator!Iterators, N, kind) slice)
|{
| import core.lifetime: move;
| enum size_t i = name - 'a';
| static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`);
| return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i].move).unhideStride;
|}
|
|/// ditto
|auto unzip
| (char name, size_t N, SliceKind kind, Iterators...)
| (ref Slice!(ZipIterator!Iterators, N, kind) slice)
|{
| enum size_t i = name - 'a';
| static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`);
| return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i]).unhideStride;
|}
|
|///
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : iota;
|
| auto alpha = iota!int(4, 3);
| auto beta = iota!int([4, 3], 1).slice;
|
| auto m = zip(alpha, beta);
|
| static assert(is(typeof(unzip!'a'(m)) == typeof(alpha)));
| static assert(is(typeof(unzip!'b'(m)) == typeof(beta)));
|
| assert(m.unzip!'a' == alpha);
| assert(m.unzip!'b' == beta);
|}
|
|private enum TotalDim(NdFields...) = [staticMap!(DimensionCount, NdFields)].sum;
|
|private template applyInner(alias fun, size_t N)
|{
| static if (N == 0)
| alias applyInner = fun;
| else
| {
| import mir.functional: pipe;
| alias applyInner = pipe!(zip!true, map!(.applyInner!(fun, N - 1)));
| }
|}
|
|/++
|Lazy convolution for tensors.
|
|Suitable for advanced convolution algorithms.
|
|Params:
| params = convolution windows length.
| fun = one dimensional convolution function with `params` arity.
| SDimensions = dimensions to perform lazy convolution along. Negative dimensions are supported.
|See_also: $(LREF slide), $(LREF pairwise), $(LREF diff).
|+/
|template slideAlong(size_t params, alias fun, SDimensions...)
| if (params <= 'z' - 'a' + 1 && SDimensions.length > 0)
|{
| import mir.functional: naryFun;
|
| static if (allSatisfy!(isSizediff_t, SDimensions) && params > 1 && __traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params: slice = ndslice or array
| Returns: lazy convolution result
| +/
| auto slideAlong(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1 && kind == Contiguous)
| {
| return slideAlong(slice.move.canonical);
| }
| else
| static if (N == 1 && kind == Universal)
| {
| return slideAlong(slice.move.flattened);
| }
| else
| {
| alias Dimensions = staticMap!(ShiftNegativeWith!N, SDimensions);
| enum dimension = Dimensions[$ - 1];
| size_t len = slice._lengths[dimension] - (params - 1);
| if (sizediff_t(len) <= 0) // overfow
| len = 0;
| slice._lengths[dimension] = len;
| static if (dimension + 1 == N || kind == Universal)
| {
| alias I = SlideIterator!(Iterator, params, fun);
| auto ret = Slice!(I, N, kind)(slice._structure, I(move(slice._iterator)));
| }
| else
| {
| alias Z = ZipIterator!(Repeat!(params, Iterator));
| Z z;
| foreach_reverse (p; Iota!(1, params))
| z._iterators[p] = slice._iterator + slice._strides[dimension] * p;
| z._iterators[0] = move(slice._iterator);
| alias M = MapIterator!(Z, fun);
| auto ret = Slice!(M, N, kind)(slice._structure, M(move(z)));
| }
| static if (Dimensions.length == 1)
| {
| return ret;
| }
| else
| {
| return .slideAlong!(params, fun, Dimensions[0 .. $ - 1])(ret);
| }
| }
| }
|
| /// ditto
| auto slideAlong(S)(S[] slice)
| {
| return slideAlong(slice.sliced);
| }
|
| /// ditto
| auto slideAlong(S)(S slice)
| if (hasAsSlice!S)
| {
| return slideAlong(slice.asSlice);
| }
| }
| else
| static if (params == 1)
| alias slideAlong = .map!(naryFun!fun);
| else alias slideAlong = .slideAlong!(params, naryFun!fun, staticMap!(toSizediff_t, SDimensions));
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto data = [4, 5].iota;
|
| alias scaled = a => a * 0.25;
|
| auto v = data.slideAlong!(3, "a + 2 * b + c", 0).map!scaled;
| auto h = data.slideAlong!(3, "a + 2 * b + c", 1).map!scaled;
|
| assert(v == [4, 5].iota[1 .. $ - 1, 0 .. $]);
| assert(h == [4, 5].iota[0 .. $, 1 .. $ - 1]);
|}
|
|/++
|Lazy convolution for tensors.
|
|Suitable for simple convolution algorithms.
|
|Params:
| params = windows length.
| fun = one dimensional convolution function with `params` arity.
|See_also: $(LREF slideAlong), $(LREF withNeighboursSum), $(LREF pairwise), $(LREF diff).
|+/
|template slide(size_t params, alias fun)
| if (params <= 'z' - 'a' + 1)
|{
| import mir.functional: naryFun;
|
| static if (params > 1 && __traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params: slice = ndslice or array
| Returns: lazy convolution result
| +/
| auto slide(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| return slice.move.slideAlong!(params, fun, Iota!N);
| }
|
| /// ditto
| auto slide(S)(S[] slice)
| {
| return slide(slice.sliced);
| }
|
| /// ditto
| auto slide(S)(S slice)
| if (hasAsSlice!S)
| {
| return slide(slice.asSlice);
| }
| }
| else
| static if (params == 1)
| alias slide = .map!(naryFun!fun);
| else alias slide = .slide!(params, naryFun!fun);
|}
|
|///
|version(mir_test) unittest
|{
| auto data = 10.iota;
| auto sw = data.slide!(3, "a + 2 * b + c");
|
| import mir.utility: max;
| assert(sw.length == max(0, cast(ptrdiff_t)data.length - 3 + 1));
| assert(sw == sw.length.iota.map!"(a + 1) * 4");
| assert(sw == [4, 8, 12, 16, 20, 24, 28, 32]);
|}
|
|/++
|ND-use case
|+/
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto data = [4, 5].iota;
|
| enum factor = 1.0 / 4 ^^ data.shape.length;
| alias scaled = a => a * factor;
|
| auto sw = data.slide!(3, "a + 2 * b + c").map!scaled;
|
| assert(sw == [4, 5].iota[1 .. $ - 1, 1 .. $ - 1]);
|}
|
|/++
|Pairwise map for tensors.
|
|The computation is performed on request, when the element is accessed.
|
|Params:
| fun = function to accumulate
| lag = an integer indicating which lag to use
|Returns: lazy ndslice composed of `fun(a_n, a_n+1)` values.
|
|See_also: $(LREF slide), $(LREF slideAlong), $(LREF subSlices).
|+/
|alias pairwise(alias fun, size_t lag = 1) = slide!(lag + 1, fun);
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| assert([2, 4, 3, -1].sliced.pairwise!"a + b" == [6, 7, 2]);
|}
|
|/// N-dimensional
|@safe pure nothrow
|version(mir_test) unittest
|{
| // performs pairwise along each dimension
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| assert([3, 4].iota.pairwise!"a + b" == [[10, 14, 18], [26, 30, 34]]);
|}
|
|/++
|Differences between tensor elements.
|
|The computation is performed on request, when the element is accessed.
|
|Params:
| lag = an integer indicating which lag to use
|Returns: lazy differences.
|
|See_also: $(LREF slide), $(LREF slide).
|+/
|alias diff(size_t lag = 1) = pairwise!(('a' + lag) ~ " - a", lag);
|
|///
|version(mir_test) unittest
|{
| assert([2, 4, 3, -1].sliced.diff == [2, -1, -4]);
|}
|
|/// N-dimensional
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| // 0 1 2 3
| // 4 5 6 7 =>
| // 8 9 10 11
|
| // 1 1 1
| // 1 1 1 =>
| // 1 1 1
|
| // 0 0 0
| // 0 0 0
|
| assert([3, 4].iota.diff == repeat(0, [2, 3]));
|}
|
|/// packed slices
|version(mir_test) unittest
|{
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| auto s = iota(3, 4);
| import std.stdio;
| assert(iota(3, 4).byDim!0.diff == [
| [4, 4, 4, 4],
| [4, 4, 4, 4]]);
| assert(iota(3, 4).byDim!1.diff == [
| [1, 1, 1],
| [1, 1, 1],
| [1, 1, 1]]);
|}
|
|/++
|Drops borders for all dimensions.
|
|Params:
| slice = ndslice
|Returns:
| Tensors with striped borders
|See_also:
| $(LREF universal),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, N > 1 && kind == Contiguous ? Canonical : kind, Labels)
| dropBorders
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (N > 1 && kind == Contiguous)
| {
| import core.lifetime: move;
| auto ret = slice.move.canonical;
| }
| else
| {
| alias ret = slice;
| }
| ret.popFrontAll;
| ret.popBackAll;
| return ret;
|}
|
|///
|version(mir_test) unittest
|{
| assert([4, 5].iota.dropBorders == [[6, 7, 8], [11, 12, 13]]);
|}
|
|/++
|Lazy zip view of elements packed with sum of their neighbours.
|
|Params:
| fun = neighbours accumulation function.
|See_also: $(LREF slide), $(LREF slideAlong).
|+/
|template withNeighboursSum(alias fun = "a + b")
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params:
| slice = ndslice or array
| Returns:
| Lazy zip view of elements packed with sum of their neighbours.
| +/
| auto withNeighboursSum(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1 && kind == Contiguous)
| {
| return withNeighboursSum(slice.move.canonical);
| }
| else
| static if (N == 1 && kind == Universal)
| {
| return withNeighboursSum(slice.move.flattened);
| }
| else
| {
| enum around = kind != Universal;
| alias Z = NeighboursIterator!(Iterator, N - around, fun, around);
|
| size_t shift;
| foreach (dimension; Iota!N)
| {
| slice._lengths[dimension] -= 2;
| if (sizediff_t(slice._lengths[dimension]) <= 0) // overfow
| slice._lengths[dimension] = 0;
| shift += slice._stride!dimension;
| }
|
| Z z;
| z._iterator = move(slice._iterator);
| z._iterator += shift;
| foreach (dimension; Iota!(N - around))
| {
| z._neighbours[dimension][0] = z._iterator - slice._strides[dimension];
| z._neighbours[dimension][1] = z._iterator + slice._strides[dimension];
| }
| return Slice!(Z, N, kind)(slice._structure, move(z));
| }
| }
|
| /// ditto
| auto withNeighboursSum(S)(S[] slice)
| {
| return withNeighboursSum(slice.sliced);
| }
|
| /// ditto
| auto withNeighboursSum(S)(S slice)
| if (hasAsSlice!S)
| {
| return withNeighboursSum(slice.asSlice);
| }
| }
| else alias withNeighboursSum = .withNeighboursSum!(naryFun!fun);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.algorithm.iteration: all;
|
| auto wn = [4, 5].iota.withNeighboursSum;
| assert(wn.all!"a[0] == a[1] * 0.25");
| assert(wn.map!"a" == wn.map!"b * 0.25");
|}
|
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.algorithm.iteration: all;
|
| auto wn = [4, 5].iota.withNeighboursSum.universal;
| assert(wn.all!"a[0] == a[1] * 0.25");
| assert(wn.map!"a" == wn.map!"b * 0.25");
|}
|
|/++
|Cartesian product.
|
|Constructs lazy cartesian product $(SUBREF slice, Slice) without memory allocation.
|
|Params:
| fields = list of fields with lengths or ndFields with shapes
|Returns: $(SUBREF ndfield, Cartesian)`!NdFields(fields).`$(SUBREF slice, slicedNdField)`;`
|+/
|auto cartesian(NdFields...)(NdFields fields)
| if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields))
|{
| return Cartesian!NdFields(fields).slicedNdField;
|}
|
|/// 1D x 1D
|version(mir_test) unittest
|{
| auto a = [10, 20, 30];
| auto b = [ 1, 2, 3];
|
| auto c = cartesian(a, b)
| .map!"a + b";
|
| assert(c == [
| [11, 12, 13],
| [21, 22, 23],
| [31, 32, 33]]);
|}
|
|/// 1D x 2D
|version(mir_test) unittest
|{
| auto a = [10, 20, 30];
| auto b = iota([2, 3], 1);
|
| auto c = cartesian(a, b)
| .map!"a + b";
|
| assert(c.shape == [3, 2, 3]);
|
| assert(c == [
| [
| [11, 12, 13],
| [14, 15, 16],
| ],
| [
| [21, 22, 23],
| [24, 25, 26],
| ],
| [
| [31, 32, 33],
| [34, 35, 36],
| ]]);
|}
|
|/// 1D x 1D x 1D
|version(mir_test) unittest
|{
| auto u = [100, 200];
| auto v = [10, 20, 30];
| auto w = [1, 2];
|
| auto c = cartesian(u, v, w)
| .map!"a + b + c";
|
| assert(c.shape == [2, 3, 2]);
|
| assert(c == [
| [
| [111, 112],
| [121, 122],
| [131, 132],
| ],
| [
| [211, 212],
| [221, 222],
| [231, 232],
| ]]);
|}
|
|/++
|$(LINK2 https://en.wikipedia.org/wiki/Kronecker_product, Kronecker product).
|
|Constructs lazy kronecker product $(SUBREF slice, Slice) without memory allocation.
|+/
|template kronecker(alias fun = product)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
|
| /++
| Params:
| fields = list of either fields with lengths or ndFields with shapes.
| All ndFields must have the same dimension count.
| Returns:
| $(SUBREF ndfield, Kronecker)`!(fun, NdFields)(fields).`$(SUBREF slice, slicedNdField)
| +/
| @optmath auto kronecker(NdFields...)(NdFields fields)
| if (allSatisfy!(hasShape, NdFields) || allSatisfy!(hasLength, NdFields))
| {
| return Kronecker!(fun, NdFields)(fields).slicedNdField;
| }
| else
| alias kronecker = .kronecker!(naryFun!fun);
|}
|
|/// 2D
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| // eye
| auto a = slice!double([4, 4], 0);
| a.diagonal[] = 1;
|
| auto b = [ 1, -1,
| -1, 1].sliced(2, 2);
|
| auto c = kronecker(a, b);
|
| assert(c == [
| [ 1, -1, 0, 0, 0, 0, 0, 0],
| [-1, 1, 0, 0, 0, 0, 0, 0],
| [ 0, 0, 1, -1, 0, 0, 0, 0],
| [ 0, 0, -1, 1, 0, 0, 0, 0],
| [ 0, 0, 0, 0, 1, -1, 0, 0],
| [ 0, 0, 0, 0, -1, 1, 0, 0],
| [ 0, 0, 0, 0, 0, 0, 1, -1],
| [ 0, 0, 0, 0, 0, 0, -1, 1]]);
|}
|
|/// 1D
|version(mir_test) unittest
|{
| auto a = iota([3], 1);
|
| auto b = [ 1, -1];
|
| auto c = kronecker(a, b);
|
| assert(c == [1, -1, 2, -2, 3, -3]);
|}
|
|/// 2D with 3 arguments
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| auto a = [ 1, 2,
| 3, 4].sliced(2, 2);
|
| auto b = [ 1, 0,
| 0, 1].sliced(2, 2);
|
| auto c = [ 1, -1,
| -1, 1].sliced(2, 2);
|
| auto d = kronecker(a, b, c);
|
| assert(d == [
| [ 1, -1, 0, 0, 2, -2, 0, 0],
| [-1, 1, 0, 0, -2, 2, 0, 0],
| [ 0, 0, 1, -1, 0, 0, 2, -2],
| [ 0, 0, -1, 1, 0, 0, -2, 2],
| [ 3, -3, 0, 0, 4, -4, 0, 0],
| [-3, 3, 0, 0, -4, 4, 0, 0],
| [ 0, 0, 3, -3, 0, 0, 4, -4],
| [ 0, 0, -3, 3, 0, 0, -4, 4]]);
|}
|
|/++
|$(HTTPS en.wikipedia.org/wiki/Magic_square, Magic square).
|Params:
| length = square matrix length.
|Returns:
| Lazy magic matrix.
|+/
|auto magic(size_t length)
|{
0000000| assert(length > 0);
| static if (is(size_t == ulong))
| assert(length <= uint.max);
| else
0000000| assert(length <= ushort.max);
| import mir.ndslice.field: MagicField;
0000000| return MagicField(length).slicedField(length, length);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.math.sum;
| import mir.ndslice: slice, magic, byDim, map, as, repeat, diagonal, antidiagonal;
|
| bool isMagic(S)(S matrix)
| {
| auto n = matrix.length;
| auto c = n * (n * n + 1) / 2; // magic number
| return // check shape
| matrix.length!0 > 0 && matrix.length!0 == matrix.length!1
| && // each row sum should equal magic number
| matrix.byDim!0.map!sum == c.repeat(n)
| && // each columns sum should equal magic number
| matrix.byDim!1.map!sum == c.repeat(n)
| && // diagonal sum should equal magic number
| matrix.diagonal.sum == c
| && // antidiagonal sum should equal magic number
| matrix.antidiagonal.sum == c;
| }
|
| assert(isMagic(magic(1)));
| assert(!isMagic(magic(2))); // 2x2 magic square does not exist
| foreach(n; 3 .. 24)
| assert(isMagic(magic(n)));
| assert(isMagic(magic(3).as!double.slice));
|}
|
|/++
|Chops 1D input slice into n chunks with ascending or descending lengths.
|
|`stairs` can be used to pack and unpack symmetric and triangular matrix storage.
|
|Note: `stairs` is defined for 1D (packet) input and 2D (general) input.
| This part of documentation is for 1D input.
|
|Params:
| type = $(UL
| $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`.)
| $(LI `"+"` for stairs with lengths `1, 2, ..., n`;)
| )
| slice = input slice with length equal to `n * (n + 1) / 2`
| n = stairs count
|Returns:
| 1D contiguous slice composed of 1D contiguous slices.
|
|See_also: $(LREF triplets) $(LREF ._stairs.2)
|+/
|Slice!(StairsIterator!(Iterator, type)) stairs(string type, Iterator)(Slice!Iterator slice, size_t n)
| if (type == "+" || type == "-")
|{
| assert(slice.length == (n + 1) * n / 2, "stairs: slice length must be equal to n * (n + 1) / 2, where n is stairs count.");
| static if (type == "+")
| size_t length = 1;
| else
| size_t length = n;
| return StairsIterator!(Iterator, type)(length, slice._iterator).sliced(n);
|}
|
|/// ditto
|Slice!(StairsIterator!(S*, type)) stairs(string type, S)(S[] slice, size_t n)
| if (type == "+" || type == "-")
|{
| return stairs!type(slice.sliced, n);
|}
|
|/// ditto
|auto stairs(string type, S)(S slice, size_t n)
| if (hasAsSlice!S && (type == "+" || type == "-"))
|{
| return stairs!type(slice.asSlice, n);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota, stairs;
|
| auto pck = 15.iota;
| auto inc = pck.stairs!"+"(5);
| auto dec = pck.stairs!"-"(5);
|
| assert(inc == [
| [0],
| [1, 2],
| [3, 4, 5],
| [6, 7, 8, 9],
| [10, 11, 12, 13, 14]]);
| assert(inc[1 .. $][2] == [6, 7, 8, 9]);
|
| assert(dec == [
| [0, 1, 2, 3, 4],
| [5, 6, 7, 8],
| [9, 10, 11],
| [12, 13],
| [14]]);
| assert(dec[1 .. $][2] == [12, 13]);
|
| static assert(is(typeof(inc.front) == typeof(pck)));
| static assert(is(typeof(dec.front) == typeof(pck)));
|}
|
|/++
|Slice composed of rows of lower or upper triangular matrix.
|
|`stairs` can be used to pack and unpack symmetric and triangular matrix storage.
|
|Note: `stairs` is defined for 1D (packet) input and 2D (general) input.
| This part of documentation is for 2D input.
|
|Params:
| type = $(UL
| $(LI `"+"` for stairs with lengths `1, 2, ..., n`, lower matrix;)
| $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`, upper matrix.)
| )
| slice = input slice with length equal to `n * (n + 1) / 2`
|Returns:
| 1D slice composed of 1D contiguous slices.
|
|See_also: $(LREF _stairs) $(SUBREF dynamic, transposed), $(LREF universal)
|+/
|auto stairs(string type, Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice)
| if (type == "+" || type == "-")
|{
| assert(slice.length!0 == slice.length!1, "stairs: input slice must be a square matrix.");
| static if (type == "+")
| {
| return slice
| .pack!1
| .map!"a"
| .zip([slice.length].iota!size_t(1))
| .map!"a[0 .. b]";
| }
| else
| {
| return slice
| .pack!1
| .map!"a"
| .zip([slice.length].iota!size_t)
| .map!"a[b .. $]";
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota, as, stairs;
|
| auto gen = [3, 3].iota.as!double;
| auto inc = gen.stairs!"+";
| auto dec = gen.stairs!"-";
|
| assert(inc == [
| [0],
| [3, 4],
| [6, 7, 8]]);
|
| assert(dec == [
| [0, 1, 2],
| [4, 5],
| [8]]);
|
| static assert(is(typeof(inc.front) == typeof(gen.front)));
| static assert(is(typeof(dec.front) == typeof(gen.front)));
|
| /////////////////////////////////////////
| // Pack lower and upper matrix parts
| auto n = gen.length;
| auto m = n * (n + 1) / 2;
| // allocate memory
| import mir.ndslice.allocation: uninitSlice;
| auto lowerData = m.uninitSlice!double;
| auto upperData = m.uninitSlice!double;
| // construct packed stairs
| auto lower = lowerData.stairs!"+"(n);
| auto upper = upperData.stairs!"-"(n);
| // copy data
| import mir.algorithm.iteration: each;
| each!"a[] = b"(lower, inc);
| each!"a[] = b"(upper, dec);
|
| assert(&lower[0][0] is &lowerData[0]);
| assert(&upper[0][0] is &upperData[0]);
|
| assert(lowerData == [0, 3, 4, 6, 7, 8]);
| assert(upperData == [0, 1, 2, 4, 5, 8]);
|}
|
|/++
|Returns a slice that can be iterated along dimension. Transposes other dimensions on top and then packs them.
|
|Combines $(LREF byDim) and $(LREF evertPack).
|
|Params:
| SDimensions = dimensions to iterate along, length of d, `1 <= d < n`. Negative dimensions are supported.
|Returns:
| `(n-d)`-dimensional slice composed of d-dimensional slices
|See_also:
| $(LREF byDim),
| $(LREF iota),
| $(SUBREF allocation, slice),
| $(LREF ipack),
| $(SUBREF dynamic, transposed).
|+/
|template alongDim(SDimensions...)
| if (SDimensions.length > 0)
|{
| static if (allSatisfy!(isSizediff_t, SDimensions))
| {
| /++
| Params:
| slice = input n-dimensional slice, n > d
| Returns:
| `(n-d)`-dimensional slice composed of d-dimensional slices
| +/
| @optmath auto alongDim(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N > SDimensions.length)
| {
| import core.lifetime: move;
| return slice.move.byDim!SDimensions.evertPack;
| }
| }
| else
| {
| alias alongDim = .alongDim!(staticMap!(toSizediff_t, SDimensions));
| }
|}
|
|/// 2-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4);
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.alongDim!(-1); // -1 is the last dimension index, the same as 1 for this case.
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.alongDim!0; // alongDim!(-2) is the same for matrices.
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|/// 3-dimensional slice support, N-dimensional also supported
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto slice = iota(3, 4, 5);
|
| size_t[2] shape45 = [4, 5];
| size_t[2] shape35 = [3, 5];
| size_t[2] shape34 = [3, 4];
| size_t[2] shape54 = [5, 4];
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
| size_t[1] shape5 = [5];
|
| // ----------
| // | 0 20 40 |
| // | 5 25 45 |
| // | 10 30 50 |
| // | 15 35 55 |
| // - - - - -
| // | 1 21 41 |
| // | 6 26 46 |
| // | 11 31 51 |
| // | 16 36 56 |
| // - - - - -
| // | 2 22 42 |
| // | 7 27 47 |
| // | 12 32 52 |
| // | 17 37 57 |
| // - - - - -
| // | 3 23 43 |
| // | 8 28 48 |
| // | 13 33 53 |
| // | 18 38 58 |
| // - - - - -
| // | 4 24 44 |
| // | 9 29 49 |
| // | 14 34 54 |
| // | 19 39 59 |
| // ----------
| auto a = slice.alongDim!0.transposed;
| static assert(is(typeof(a) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 2, Universal)));
|
| assert(a.shape == shape54);
| assert(a.front.shape == shape4);
| assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed);
| a.popFront;
| assert(a.front.front == iota([3], 1, 20));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto x = slice.alongDim!(1, 2);
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape45);
| assert(x.front == iota([4, 5]));
| x.popFront;
| assert(x.front == iota([4, 5], (4 * 5)));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 20 21 22 23 24 |
| // | 40 41 42 43 44 |
| // - - - - - - - -
| // | 5 6 7 8 9 |
| // | 25 26 27 28 29 |
| // | 45 46 47 48 49 |
| // - - - - - - - -
| // | 10 11 12 13 14 |
| // | 30 31 32 33 34 |
| // | 50 51 52 53 54 |
| // - - - - - - - -
| // | 15 16 17 18 19 |
| // | 35 36 37 38 39 |
| // | 55 56 57 58 59 |
| // ----------------
| auto y = slice.alongDim!(0, 2);
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Canonical), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape35);
| int err;
| assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err));
| y.popFront;
| assert(y.front.front == iota([5], 5));
|
| // -------------
| // | 0 5 10 15 |
| // | 20 25 30 35 |
| // | 40 45 50 55 |
| // - - - - - - -
| // | 1 6 11 16 |
| // | 21 26 31 36 |
| // | 41 46 51 56 |
| // - - - - - - -
| // | 2 7 12 17 |
| // | 22 27 32 37 |
| // | 42 47 52 57 |
| // - - - - - - -
| // | 3 8 13 18 |
| // | 23 28 33 38 |
| // | 43 48 53 58 |
| // - - - - - - -
| // | 4 9 14 19 |
| // | 24 29 34 39 |
| // | 44 49 54 59 |
| // -------------
| auto z = slice.alongDim!(0, 1);
| static assert(is(typeof(z) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Universal))));
|
| assert(z.shape == shape5);
| assert(z.front.shape == shape34);
| assert(z.front == iota([3, 4], 0, 5));
| z.popFront;
| assert(z.front.front == iota([4], 1, 5));
|}
|
|/// Use alongDim to calculate column mean/row mean of 2-dimensional slice
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: alongDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use alongDim with map to compute mean of row/column.
| assert(x.alongDim!1.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|
| // FIXME
| // Without using map, computes the mean of the whole slice
| // assert(x.alongDim!1.mean == x.sliced.mean);
| // assert(x.alongDim!0.mean == x.sliced.mean);
|}
|
|/++
|Use alongDim and map with a lambda, but may need to allocate result. This example
|uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).alongDim!1.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).alongDim!0.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|/++
|Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them.
|
|Combines $(SUBREF dynamic, transposed), $(LREF ipack), and SliceKind Selectors.
|
|Params:
| SDimensions = dimensions to perform iteration on, length of d, `1 <= d <= n`. Negative dimensions are supported.
|Returns:
| d-dimensional slice composed of `(n-d)`-dimensional slices
|See_also:
| $(LREF alongDim),
| $(SUBREF allocation, slice),
| $(LREF ipack),
| $(SUBREF dynamic, transposed).
|+/
|template byDim(SDimensions...)
| if (SDimensions.length > 0)
|{
| static if (allSatisfy!(isSizediff_t, SDimensions))
| {
| /++
| Params:
| slice = input n-dimensional slice, n >= d
| Returns:
| d-dimensional slice composed of `(n-d)`-dimensional slices
| +/
| @optmath auto byDim(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N >= SDimensions.length)
| {
|
| alias Dimensions = staticMap!(ShiftNegativeWith!N, SDimensions);
|
| mixin DimensionsCountCTError;
|
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| import core.lifetime: move;
| import mir.ndslice.dynamic: transposed;
| import mir.algorithm.iteration: all;
|
| auto trans = slice
| .move
| .transposed!Dimensions;
| static if (Dimensions.length == N)
| {
| return trans;
| }
| else
| {
| auto ret = trans.move.ipack!(Dimensions.length);
| static if ((kind == Contiguous || kind == Canonical && N - Dimensions.length == 1) && [Dimensions].all!(a => a < Dimensions.length))
| {
| return ret
| .move
| .evertPack
| .assumeContiguous
| .evertPack;
| }
| else
| static if (kind == Canonical && [Dimensions].all!(a => a < N - 1))
| {
| return ret
| .move
| .evertPack
| .assumeCanonical
| .evertPack;
| }
| else
| static if ((kind == Contiguous || kind == Canonical && Dimensions.length == 1) && [Dimensions] == [Iota!(N - Dimensions.length, N)])
| {
| return ret.assumeContiguous;
| }
| else
| static if ((kind == Contiguous || kind == Canonical) && Dimensions[$-1] == N - 1)
| {
| return ret.assumeCanonical;
| }
| else
| {
| return ret;
| }
| }
| }
| }
| }
| else
| {
| alias byDim = .byDim!(staticMap!(toSizediff_t, SDimensions));
| }
|}
|
|/// 2-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4);
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0; // byDim!(-2) is the same for matrices.
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!(-1); // -1 is the last dimension index, the same as 1 for this case.
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|/// 3-dimensional slice support, N-dimensional also supported
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto slice = iota(3, 4, 5);
|
| size_t[2] shape45 = [4, 5];
| size_t[2] shape35 = [3, 5];
| size_t[2] shape34 = [3, 4];
| size_t[2] shape54 = [5, 4];
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
| size_t[1] shape5 = [5];
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape45);
| assert(x.front == iota([4, 5]));
| x.popFront;
| assert(x.front == iota([4, 5], (4 * 5)));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 20 21 22 23 24 |
| // | 40 41 42 43 44 |
| // - - - - - - - -
| // | 5 6 7 8 9 |
| // | 25 26 27 28 29 |
| // | 45 46 47 48 49 |
| // - - - - - - - -
| // | 10 11 12 13 14 |
| // | 30 31 32 33 34 |
| // | 50 51 52 53 54 |
| // - - - - - - - -
| // | 15 16 17 18 19 |
| // | 35 36 37 38 39 |
| // | 55 56 57 58 59 |
| // ----------------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Canonical), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape35);
| int err;
| assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err));
| y.popFront;
| assert(y.front.front == iota([5], 5));
|
| // -------------
| // | 0 5 10 15 |
| // | 20 25 30 35 |
| // | 40 45 50 55 |
| // - - - - - - -
| // | 1 6 11 16 |
| // | 21 26 31 36 |
| // | 41 46 51 56 |
| // - - - - - - -
| // | 2 7 12 17 |
| // | 22 27 32 37 |
| // | 42 47 52 57 |
| // - - - - - - -
| // | 3 8 13 18 |
| // | 23 28 33 38 |
| // | 43 48 53 58 |
| // - - - - - - -
| // | 4 9 14 19 |
| // | 24 29 34 39 |
| // | 44 49 54 59 |
| // -------------
| auto z = slice.byDim!2;
| static assert(is(typeof(z) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Universal))));
|
| assert(z.shape == shape5);
| assert(z.front.shape == shape34);
| assert(z.front == iota([3, 4], 0, 5));
| z.popFront;
| assert(z.front.front == iota([4], 1, 5));
|
| // ----------
| // | 0 20 40 |
| // | 5 25 45 |
| // | 10 30 50 |
| // | 15 35 55 |
| // - - - - -
| // | 1 21 41 |
| // | 6 26 46 |
| // | 11 31 51 |
| // | 16 36 56 |
| // - - - - -
| // | 2 22 42 |
| // | 7 27 47 |
| // | 12 32 52 |
| // | 17 37 57 |
| // - - - - -
| // | 3 23 43 |
| // | 8 28 48 |
| // | 13 33 53 |
| // | 18 38 58 |
| // - - - - -
| // | 4 24 44 |
| // | 9 29 49 |
| // | 14 34 54 |
| // | 19 39 59 |
| // ----------
| auto a = slice.byDim!(2, 1);
| static assert(is(typeof(a) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 2, Universal)));
|
| assert(a.shape == shape54);
| assert(a.front.shape == shape4);
| assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed);
| a.popFront;
| assert(a.front.front == iota([3], 1, 20));
|}
|
|/// Use byDim to calculate column mean/row mean of 2-dimensional slice
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: byDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use byDim with map to compute mean of row/column.
| assert(x.byDim!0.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|
| // FIXME
| // Without using map, computes the mean of the whole slice
| // assert(x.byDim!0.mean == x.sliced.mean);
| // assert(x.byDim!1.mean == x.sliced.mean);
|}
|
|/++
|Use byDim and map with a lambda, but may need to allocate result. This example
|uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, byDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).byDim!0.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).byDim!1.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|// Ensure works on canonical
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, canonical;
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4).canonical;
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|// Ensure works on universal
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, universal;
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4).universal;
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|// 1-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // -------
| // | 0 1 2 |
| // -------
| auto slice = iota(3);
| auto x = slice.byDim!0;
| static assert (is(typeof(x) == typeof(slice)));
| assert(x == slice);
|}
|
|/++
|Constructs a new view of an n-dimensional slice with dimension `axis` removed.
|
|Throws:
| `AssertError` if the length of the corresponding dimension doesn' equal 1.
|Params:
| axis = dimension to remove, if it is single-dimensional
| slice = n-dimensional slice
|Returns:
| new view of a slice with dimension removed
|See_also: $(LREF unsqueeze), $(LREF iota).
|+/
|template squeeze(sizediff_t axis = 0)
|{
| Slice!(Iterator, N - 1, kind != Canonical ? kind : ((axis == N - 1 || axis == -1) ? Universal : (N == 2 ? Contiguous : kind)))
| squeeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (-sizediff_t(N) <= axis && axis < sizediff_t(N) && N > 1)
| in {
| assert(slice._lengths[axis < 0 ? N + axis : axis] == 1);
| }
| do {
| import mir.utility: swap;
| enum sizediff_t a = axis < 0 ? N + axis : axis;
| typeof(return) ret;
| foreach (i; Iota!(0, a))
| ret._lengths[i] = slice._lengths[i];
| foreach (i; Iota!(a + 1, N))
| ret._lengths[i - 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a + 1, N))
| ret._strides[i - 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| static if (a == N - 1)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| }
| else
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a + 1, N - 1))
| ret._strides[i - 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice.topology : iota;
| import mir.ndslice.allocation : slice;
|
| // [[0, 1, 2]] -> [0, 1, 2]
| assert([1, 3].iota.squeeze == [3].iota);
| // [[0], [1], [2]] -> [0, 1, 2]
| assert([3, 1].iota.squeeze!1 == [3].iota);
| assert([3, 1].iota.squeeze!(-1) == [3].iota);
|
| assert([1, 3].iota.canonical.squeeze == [3].iota);
| assert([3, 1].iota.canonical.squeeze!1 == [3].iota);
| assert([3, 1].iota.canonical.squeeze!(-1) == [3].iota);
|
| assert([1, 3].iota.universal.squeeze == [3].iota);
| assert([3, 1].iota.universal.squeeze!1 == [3].iota);
| assert([3, 1].iota.universal.squeeze!(-1) == [3].iota);
|
| assert([1, 3, 4].iota.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.squeeze!(-1) == [3, 4].iota);
|
| assert([1, 3, 4].iota.canonical.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.canonical.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.canonical.squeeze!(-1) == [3, 4].iota);
|
| assert([1, 3, 4].iota.universal.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.universal.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.universal.squeeze!(-1) == [3, 4].iota);
|}
|
|/++
|Constructs a view of an n-dimensional slice with a dimension added at `axis`. Used
|to unsqueeze a squeezed slice.
|
|Params:
| slice = n-dimensional slice
| axis = dimension to be unsqueezed (add new dimension), default values is 0, the first dimension
|Returns:
| unsqueezed n+1-dimensional slice of the same slice kind
|See_also: $(LREF squeeze), $(LREF iota).
|+/
|Slice!(Iterator, N + 1, kind) unsqueeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, sizediff_t axis)
|in {
| assert(-sizediff_t(N + 1) <= axis && axis <= sizediff_t(N));
|}
|do {
| import mir.utility: swap;
| typeof(return) ret;
| auto a = axis < 0 ? axis + N + 1 : axis;
| foreach (i; 0 .. a)
| ret._lengths[i] = slice._lengths[i];
| ret._lengths[a] = 1;
| foreach (i; a .. N)
| ret._lengths[i + 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; 0 .. a)
| ret._strides[i] = slice._strides[i];
| foreach (i; a .. N)
| ret._strides[i + 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| if (a == N)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| ret._strides[N - 1] = 1;
| }
| else
| {
| foreach (i; 0 .. a)
| ret._strides[i] = slice._strides[i];
| foreach (i; a .. N - 1)
| ret._strides[i + 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
|}
|
|/// ditto
|template unsqueeze(sizediff_t axis = 0)
|{
| Slice!(Iterator, N + 1, kind) unsqueeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| in {
| assert(-sizediff_t(N + 1) <= axis && axis <= sizediff_t(N));
| }
| do {
| import mir.utility: swap;
| typeof(return) ret;
| enum a = axis < 0 ? axis + N + 1 : axis;
| foreach (i; Iota!a)
| ret._lengths[i] = slice._lengths[i];
| ret._lengths[a] = 1;
| foreach (i; Iota!(a, N))
| ret._lengths[i + 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; Iota!a)
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a, N))
| ret._strides[i + 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| static if (a == N)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| ret._strides[N - 1] = 1;
| }
| else
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a, N - 1))
| ret._strides[i + 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
| }
|}
|
|///
|version (mir_test)
|@safe pure nothrow @nogc
|unittest
|{
| // [0, 1, 2] -> [[0, 1, 2]]
| assert([3].iota.unsqueeze == [1, 3].iota);
|
| assert([3].iota.universal.unsqueeze == [1, 3].iota);
| assert([3, 4].iota.unsqueeze == [1, 3, 4].iota);
| assert([3, 4].iota.canonical.unsqueeze == [1, 3, 4].iota);
| assert([3, 4].iota.universal.unsqueeze == [1, 3, 4].iota);
|
| // [0, 1, 2] -> [[0], [1], [2]]
| assert([3].iota.unsqueeze(-1) == [3, 1].iota);
| assert([3].iota.unsqueeze!(-1) == [3, 1].iota);
|
| assert([3].iota.universal.unsqueeze(-1) == [3, 1].iota);
| assert([3].iota.universal.unsqueeze!(-1) == [3, 1].iota);
| assert([3, 4].iota.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.unsqueeze!(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.canonical.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.canonical.unsqueeze!(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.universal.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.universal.unsqueeze!(-1) == [3, 4, 1].iota);
|}
|
|/++
|Field (element's member) projection.
|
|Params:
| name = element's member name
|Returns:
| lazy n-dimensional slice of the same shape
|See_also:
| $(LREF map)
|+/
|
|template member(string name)
| if (name.length)
|{
| /++
| Params:
| slice = n-dimensional slice composed of structs, classes or unions
| Returns:
| lazy n-dimensional slice of the same shape
| +/
| Slice!(MemberIterator!(Iterator, name), N, kind) member(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| return typeof(return)(slice._structure, MemberIterator!(Iterator, name)(slice._iterator));
| }
|
| /// ditto
| Slice!(MemberIterator!(T*, name)) member(T)(T[] array)
| {
| return member(array.sliced);
| }
|
| /// ditto
| auto member(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return member(withAsSlice.asSlice);
| }
|}
|
|///
|version(mir_test)
|@safe pure unittest
|{
| // struct, union or class
| struct S
| {
| // Property support
| // Getter always must be defined.
| double _x;
| double x() @property
| {
| return x;
| }
| void x(double x) @property
| {
| _x = x;
| }
|
| /// Field support
| double y;
|
| /// Zero argument function support
| double f()
| {
| return _x * 2;
| }
| }
|
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto matrix = slice!S(2, 3);
| matrix.member!"x"[] = [2, 3].iota;
| matrix.member!"y"[] = matrix.member!"f";
| assert(matrix.member!"y" == [2, 3].iota * 2);
|}
|
|/++
|Functional deep-element wise reduce of a slice composed of fields or iterators.
|+/
|template orthogonalReduceField(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params:
| slice = Non empty input slice composed of fields or iterators.
| Returns:
| a lazy field with each element of which is reduced value of element of the same index of all iterators.
| +/
| OrthogonalReduceField!(Iterator, fun, I) orthogonalReduceField(I, Iterator)(I initialValue, Slice!Iterator slice)
| {
| return typeof(return)(slice, initialValue);
| }
|
| /// ditto
| OrthogonalReduceField!(T*, fun, I) orthogonalReduceField(I, T)(I initialValue, T[] array)
| {
| return orthogonalReduceField(initialValue, array.sliced);
| }
|
| /// ditto
| auto orthogonalReduceField(I, T)(I initialValue, T withAsSlice)
| if (hasAsSlice!T)
| {
| return orthogonalReduceField(initialValue, withAsSlice.asSlice);
| }
| }
| else alias orthogonalReduceField = .orthogonalReduceField!(naryFun!fun);
|}
|
|/// bit array operations
|version(mir_test)
|unittest
|{
| import mir.ndslice.slice: slicedField;
| import mir.ndslice.allocation: bitSlice;
| import mir.ndslice.dynamic: strided;
| import mir.ndslice.topology: iota, orthogonalReduceField;
| auto len = 100;
| auto a = len.bitSlice;
| auto b = len.bitSlice;
| auto c = len.bitSlice;
| a[len.iota.strided!0(7)][] = true;
| b[len.iota.strided!0(11)][] = true;
| c[len.iota.strided!0(13)][] = true;
|
| // this is valid since bitslices above are oroginal slices of allocated memory.
| auto and =
| orthogonalReduceField!"a & b"(size_t.max, [
| a.iterator._field._field, // get raw data pointers
| b.iterator._field._field,
| c.iterator._field._field,
| ]) // operation on size_t
| .bitwiseField
| .slicedField(len);
|
| assert(and == (a & b & c));
|}
|
|/++
|Constructs a lazy view of triplets with `left`, `center`, and `right` members.
|
|Returns: Slice of the same length composed of $(SUBREF iterator, Triplet) triplets.
|The `center` member is type of a slice element.
|The `left` and `right` members has the same type as slice.
|
|The module contains special function $(LREF collapse) to handle
|left and right side of triplets in one expression.
|
|Params:
| slice = a slice or an array to iterate over
|
|Example:
|------
|triplets(eeeeee) =>
|
|||c|lllll|
||r|c|llll|
||rr|c|lll|
||rrr|c|ll|
||rrrr|c|l|
||rrrrr|c||
|------
|
|See_also: $(LREF stairs).
|+/
|Slice!(TripletIterator!(Iterator, kind)) triplets(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice)
|{
| return typeof(return)(slice.length, typeof(return).Iterator(0, slice));
|}
|
|/// ditto
|Slice!(TripletIterator!(T*)) triplets(T)(scope return T[] slice)
|{
| return .triplets(slice.sliced);
|}
|
|/// ditto
|auto triplets(string type, S)(S slice, size_t n)
| if (hasAsSlice!S)
|{
| return .triplets(slice.asSlice);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: triplets, member, iota;
|
| auto a = [4, 5, 2, 8];
| auto h = a.triplets;
|
| assert(h[1].center == 5);
| assert(h[1].left == [4]);
| assert(h[1].right == [2, 8]);
|
| h[1].center = 9;
| assert(a[1] == 9);
|
| assert(h.member!"center" == a);
|
| // `triplets` topology can be used with iota to index a slice
| auto s = a.sliced;
| auto w = s.length.iota.triplets[1];
|
| assert(&s[w.center] == &a[1]);
| assert(s[w.left].field is a[0 .. 1]);
| assert(s[w.right].field is a[2 .. $]);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/topology.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-fuse.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Allocation routines that construct ndslices from ndranges.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|See_also: $(SUBMODULE concatenation) submodule.
|
|Macros:
|SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.fuse;
|
|import mir.internal.utility;
|import mir.ndslice.slice;
|import mir.primitives;
|import mir.qualifier;
|import std.meta;
|import std.traits;
|
|/++
|Fuses ndrange `r` into GC-allocated ($(LREF fuse)) or RC-allocated ($(LREF rcfuse)) ndslice.
|Can be used to join rows or columns into a matrix.
|
|Params:
| Dimensions = (optional) indices of dimensions to be brought to the first position
|Returns:
| ndslice
|+/
|alias fuse(Dimensions...) = fuseImpl!(false, void, Dimensions);
|/// ditto
|alias rcfuse(Dimensions...) = fuseImpl!(true, void, Dimensions);
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.slice : Contiguous, Slice;
| import mir.ndslice.topology: iota;
| import mir.rc.array: RCI;
|
| enum ror = [
| [0, 1, 2, 3],
| [4, 5, 6, 7],
| [8, 9,10,11]];
|
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| auto matrix = ror.fuse;
|
| auto rcmatrix = ror.rcfuse; // nogc version
|
| assert(matrix == [3, 4].iota);
| assert(rcmatrix == [3, 4].iota);
| static assert(ror.fuse == [3, 4].iota); // CTFE-able
|
| // matrix is contiguos
| static assert(is(typeof(matrix) == Slice!(int*, 2)));
| static assert(is(typeof(rcmatrix) == Slice!(RCI!int, 2)));
|}
|
|/// Transposed
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: iota;
| import mir.ndslice.dynamic: transposed;
| import mir.ndslice.slice : Contiguous, Slice;
|
| enum ror = [
| [0, 1, 2, 3],
| [4, 5, 6, 7],
| [8, 9,10,11]];
|
| // 0 4 8
| // 1 5 9
| // 2 6 10
| // 3 7 11
|
| // `!1` brings dimensions under index 1 to the front (0 index).
| auto matrix = ror.fuse!1;
|
| assert(matrix == [3, 4].iota.transposed!1);
| // TODO: CTFE
| // static assert(ror.fuse!1 == [3, 4].iota.transposed!1); // CTFE-able
| // matrix is contiguos
| static assert(is(typeof(matrix) == Slice!(int*, 2)));
|}
|
|/// 3D
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: iota;
| import mir.ndslice.dynamic: transposed;
|
| auto ror =
| [[[ 0, 1, 2, 3],
| [ 4, 5, 6, 7]],
| [[ 8, 9,10,11],
| [12,13,14,15]]];
|
| auto nd = [2, 2, 4].iota;
|
| assert(ror.fuse == nd);
| assert(ror.fuse!2 == nd.transposed!2);
| assert(ror.fuse!(1, 2) == nd.transposed!(1, 2));
| assert(ror.fuse!(2, 1) == nd.transposed!(2, 1));
|}
|
|/// Work with RC Arrays of RC Arrays
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.slice;
| import mir.ndslice.topology: map;
| import mir.rc.array;
|
| Slice!(const(double)*, 2) conv(RCArray!(const RCArray!(const double)) a)
| {
| return a[].map!"a[]".fuse;
| }
|}
|
|/++
|Fuses ndrange `r` into GC-allocated ($(LREF fuseAs)) or RC-allocated ($(LREF rcfuseAs)) ndslice.
|Can be used to join rows or columns into a matrix.
|
|Params:
| T = output type of ndslice elements
| Dimensions = (optional) indices of dimensions to be brought to the first position
|Returns:
| ndslice
|+/
|alias fuseAs(T, Dimensions...) = fuseImpl!(false, T, Dimensions);
|/// ditto
|alias rcfuseAs(T, Dimensions...) = fuseImpl!(true, T, Dimensions);
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.slice : Contiguous, Slice;
| import mir.ndslice.topology: iota;
| import mir.rc.array: RCI;
|
| enum ror = [
| [0, 1, 2, 3],
| [4, 5, 6, 7],
| [8, 9,10,11]];
|
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| auto matrix = ror.fuseAs!double;
|
| auto rcmatrix = ror.rcfuseAs!double; // nogc version
|
| assert(matrix == [3, 4].iota);
| assert(rcmatrix == [3, 4].iota);
| static assert(ror.fuseAs!double == [3, 4].iota); // CTFE-able
|
| // matrix is contiguos
| static assert(is(typeof(matrix) == Slice!(double*, 2)));
| static assert(is(typeof(rcmatrix) == Slice!(RCI!double, 2)));
|}
|
|///
|template fuseImpl(bool RC, T_, Dimensions...)
|{
| import mir.ndslice.internal: isSize_t, toSize_t;
| static if (allSatisfy!(isSize_t, Dimensions))
| /++
| Params:
| r = parallelotope (ndrange) with length/shape and input range primitives.
| +/
| auto fuseImpl(NDRange)(NDRange r)
| if (hasShape!NDRange)
| {
| import mir.conv: emplaceRef;
| import mir.algorithm.iteration: each;
| import mir.ndslice.allocation;
| auto shape = fuseShape(r);
| static if (is(T_ == void))
| alias T = FuseElementType!NDRange;
| else
| alias T = T_;
| alias UT = Unqual!T;
| static if (RC)
| {
| import mir.rc.array: RCI;
| alias R = Slice!(RCI!T, fuseDimensionCount!NDRange);
| Slice!(RCI!UT, fuseDimensionCount!NDRange) ret;
| }
| else
| {
| alias R = Slice!(T*, fuseDimensionCount!NDRange);
| Slice!(UT*, fuseDimensionCount!NDRange) ret;
| }
| static if (Dimensions.length)
| {
| import mir.ndslice.topology: iota;
| import mir.ndslice.dynamic: transposed, completeTranspose;
| enum perm = completeTranspose!(shape.length)([Dimensions]);
| size_t[shape.length] shapep;
| foreach(i; Iota!(shape.length))
| shapep[i] = shape[perm[i]];
| // enum iperm = perm.length.iota[completeTranspose!(shape.length)([Dimensions])[].sliced].slice;
| alias InverseDimensions = aliasSeqOf!(
| (size_t[] perm){
| auto ar = new size_t[perm.length];
| ar.sliced[perm.sliced] = perm.length.iota;
| return ar;
| }(perm)
| );
| static if (RC)
| {
| ret = shapep.uninitRcslice!UT;
| ret.lightScope.transposed!InverseDimensions.each!(emplaceRef!T)(r);
| }
| else
| {
| if (__ctfe)
| {
| ret = shapep.slice!UT;
| ret.transposed!InverseDimensions.each!"a = b"(r);
| }
| else
| {
| ret = shapep.uninitSlice!UT;
| ret.transposed!InverseDimensions.each!(emplaceRef!T)(r);
| }
|
| }
| }
| else
| {
| static if (RC)
| {
| ret = shape.uninitRCslice!UT;
| ret.lightScope.each!(emplaceRef!T)(r);
| }
| else
| {
| if (__ctfe)
| {
| ret = shape.slice!UT;
| ret.each!"a = b"(r);
| }
| else
| {
| ret = shape.uninitSlice!UT;
| ret.each!(emplaceRef!T)(r);
| }
| }
| }
| static if (RC)
| {
| import core.lifetime: move;
| return move(*(() @trusted => cast(R*)&ret)());
| }
| else
| {
| return *(() @trusted => cast(R*)&ret)();
| }
| }
| else
| alias fuseImpl = .fuseImpl!(RC, T_, staticMap!(toSize_t, Dimensions));
|}
|
|private template fuseDimensionCount(R)
|{
| static if (is(typeof(R.init.shape) : size_t[N], size_t N) && (isDynamicArray!R || __traits(hasMember, R, "front")))
| {
| import mir.ndslice.topology: repeat;
| enum size_t fuseDimensionCount = N + fuseDimensionCount!(DeepElementType!R);
| }
| else
| enum size_t fuseDimensionCount = 0;
|}
|
|private static immutable shapeExceptionMsg = "fuseShape Exception: elements have different shapes/lengths";
|
|version(D_Exceptions)
| static immutable shapeException = new Exception(shapeExceptionMsg);
|
|/+
|TODO docs
|+/
|size_t[fuseDimensionCount!Range] fuseShape(Range)(Range r)
| if (hasShape!Range)
|{
| // auto outerShape = r.shape;
| enum N = r.shape.length;
| enum RN = typeof(return).length;
| enum M = RN - N;
| static if (M == 0)
| {
| return r.shape;
| }
| else
| {
| import mir.ndslice.topology: repeat;
| typeof(return) ret;
| ret[0 .. N] = r.shape;
| if (!ret[0 .. N].anyEmptyShape)
| {
| ret[N .. $] = fuseShape(mixin("r" ~ ".front".repeat(N).fuseCells.field));
| import mir.algorithm.iteration: all;
| if (!all!((a) => cast(size_t[M]) ret[N .. $] == .fuseShape(a))(r))
| {
| version (D_Exceptions)
| throw shapeException;
| else
| assert(0, shapeExceptionMsg);
| }
| }
| return ret;
| }
|}
|
|private template FuseElementType(NDRange)
|{
| import mir.ndslice.topology: repeat;
| alias FuseElementType = typeof(mixin("NDRange.init" ~ ".front".repeat(fuseDimensionCount!NDRange).fuseCells.field));
|}
|
|/++
|Fuses `cells` into GC-allocated ndslice.
|
|Params:
| cells = ndrange of ndcells, ndrange and ndcell should have `shape` and multidimensional input range primivies (`front!d`, `empty!d`, `popFront!d`).
|Returns: ndslice composed of fused cells.
|See_also: $(SUBREF chunks, chunks)
|+/
|auto fuseCells(S)(S cells)
|{
| alias T = DeepElementType!(DeepElementType!S);
| alias UT = Unqual!T;
| if (__ctfe)
| {
| import mir.ndslice.allocation: slice;
| auto ret = cells.fuseCellsShape.slice!UT;
| ret.fuseCellsAssign!"a = b" = cells;
| static if (is(T == immutable))
| return (() @trusted => cast(immutable) ret)()[];
| else
| static if (is(T == const))
| return (() @trusted => cast(const) ret)()[];
| else
| return ret;
| }
| else
| {
| import mir.ndslice.allocation: uninitSlice;
| import mir.conv;
| auto ret = cells.fuseCellsShape.uninitSlice!UT;
| ret.fuseCellsAssign!(emplaceRef!T) = cells;
| alias R = Slice!(T*, ret.N);
| return R(ret._structure, (() @trusted => cast(T*)ret._iterator)());
| }
|}
|
|/// 1D
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.topology: iota;
| enum ar = [[0, 1], [], [2, 3, 4, 5], [6], [7, 8, 9]];
| static assert ([[0, 1], [], [2, 3, 4, 5], [6], [7, 8, 9]].fuseCells == 10.iota);
| assert (ar.fuseCells == 10.iota);
|}
|
|/// 2D
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.topology: iota;
| import mir.ndslice.chunks;
|
| auto sl = iota(11, 17);
| assert(sl.chunks!(0, 1)(3, 4).fuseCells == sl);
|}
|
|/+
|TODO docs
|+/
|auto fuseCellsAssign(alias fun = "a = b", Iterator, size_t N, SliceKind kind, S)(Slice!(Iterator, N, kind) to, S cells)
|{
| assert(to.shape == cells.fuseCellsShape, "'cells.fuseCellsShape' should be equal to 'to.shape'");
|
| if (cells.anyEmpty)
| goto R;
|
| import mir.functional: naryFun;
| import mir.ndslice.topology: canonical;
| static if (kind == Contiguous)
| fuseCellsEmplaceImpl!(naryFun!fun, 0, N)(to.canonical, cells);
| else
| fuseCellsEmplaceImpl!(naryFun!fun, 0, N)(to, cells);
| R: return to;
|}
|
|/+
|TODO docs
|+/
|size_t[S.init.shape.length] fuseCellsShape(S)(S cells) @property
|{
| typeof(return) ret;
| enum N = ret.length;
| static if (N == 1)
| {
| foreach (ref e; cells)
| ret[0] += e.length;
| }
| else
| {
| import mir.ndslice.topology: repeat;
| enum expr = "e" ~ ".front".repeat(N).fuseCells.field;
| foreach (i; Iota!N)
| for (auto e = cells.save; !e.empty!i; e.popFront!i)
| ret[i] += mixin(expr).length!i;
| }
| return ret;
|}
|
|private auto fuseCellsEmplaceImpl(alias fun, size_t i, size_t M, Iterator, size_t N, SliceKind kind, S)(Slice!(Iterator, N, kind) to, S cells)
|{
| do
| {
| auto from = cells.front;
| static if (M == 1)
| {
| auto n = from.length!i;
| }
| else
| {
| import mir.ndslice.topology: repeat;
| enum expr = "from" ~ ".front".repeat(N - 1 - i).fuseCells.field;
| auto n = mixin(expr).length!i;
| }
| assert (to.length!i >= n);
| static if (i + 1 == M)
| {
| import mir.algorithm.iteration: each;
| each!fun(to.selectFront!i(n), from);
| }
| else
| {
| .fuseCellsEmplaceImpl!(fun, i + 1, N)(to.selectFront!i(n), from);
| }
| to.popFrontExactly!i(n);
| cells.popFront;
| }
| while(!cells.empty);
| return to;
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/fuse.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-array-allocation.lst
|/**
|Functions and types that manipulate built-in arrays and associative arrays.
|
|This module provides all kinds of functions to create, manipulate or convert arrays:
|
|$(SCRIPT inhibitQuickIndex = 1;)
|$(BOOKTABLE ,
|$(TR $(TH Function Name) $(TH Description)
|)
| $(TR $(TD $(LREF _array))
| $(TD Returns a copy of the input in a newly allocated dynamic _array.
| ))
|)
|
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|
|Authors: $(HTTP erdani.org, Andrei Alexandrescu) and Jonathan M Davis
|
|Source: $(PHOBOSSRC std/_array.d)
|*/
|module mir.array.allocation;
|
|import mir.functional;
|import mir.primitives;
|
|import std.traits;
|import std.range.primitives: isInfinite, isInputRange, ElementType;
|
|/**
| * Allocates an array and initializes it with copies of the elements
| * of range $(D r).
| *
| * Narrow strings are handled as a special case in an overload.
| *
| * Params:
| * r = range (or aggregate with $(D opApply) function) whose elements are copied into the allocated array
| * Returns:
| * allocated and initialized array
| */
|auto array(Range)(Range r)
|if ((isInputRange!Range || isIterable!Range) && !isInfinite!Range && !isStaticArray!Range || isPointer!Range && (isInputRange!(PointerTarget!Range) || isIterable!(PointerTarget!Range)))
|{
| static if (isIterable!Range)
| alias E = ForeachType!Range;
| else
| static if (isPointer!Range && isIterable!(PointerTarget!Range))
| alias E = ForeachType!(PointerTarget!Range);
| else
| alias E = ElementType!Range;
|
| if (__ctfe)
| {
| // Compile-time version to avoid memcpy calls.
| // Also used to infer attributes of array().
| E[] result;
| static if (isInputRange!Range)
| for (; !r.empty; r.popFront)
| result ~= r.front;
| else
| static if (isPointer!Range)
| foreach (e; *r)
| result ~= e;
| else
| foreach (e; r)
| result ~= e;
| return result;
| }
|
| import mir.primitives: hasLength;
|
| static if (hasLength!Range)
| {
| auto length = r.length;
| if (length == 0)
| return null;
|
| import mir.conv : emplaceRef;
| import std.array: uninitializedArray;
|
| auto result = (() @trusted => uninitializedArray!(Unqual!E[])(length))();
|
| static if (isInputRange!Range)
| {
| foreach(ref e; result)
| {
| emplaceRef!E(e, r.front);
| r.popFront;
| }
| }
| else
| static if (isPointer!Range)
| {
| auto it = result;
| foreach(ref f; *r)
| {
| emplaceRef!E(it[0], f);
| it = it[1 .. $];
| }
| }
| else
| {
| auto it = result;
| foreach (f; r)
| {
| import mir.functional: forward;
| emplaceRef!E(it[0], forward!f);
| it = it[1 .. $];
| }
| }
|
| return (() @trusted => cast(E[]) result)();
| }
| else
| {
| import mir.appender: ScopedBuffer;
| ScopedBuffer!(Unqual!E) a;
| static if (isInputRange!Range)
| for (; !r.empty; r.popFront)
| a.put(r.front);
| else
| static if (isPointer!Range)
| {
| foreach (e; *r)
| a.put(forward!e);
| }
| else
| {
| foreach (e; r)
| a.put(forward!e);
| }
|
| return (() @trusted {
| import std.array: uninitializedArray;
| auto ret = uninitializedArray!(Unqual!E[])(a.length);
| a.moveDataAndEmplaceTo(ret);
| return ret;
| })();
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto a = array([1, 2, 3, 4, 5][]);
| assert(a == [ 1, 2, 3, 4, 5 ]);
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.algorithm.iteration : equal;
| struct Foo
| {
| int a;
| }
| auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]);
| assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)]));
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| struct MyRange
| {
| enum front = 123;
| enum empty = true;
| void popFront() {}
| }
|
| auto arr = (new MyRange).array;
| assert(arr.empty);
|}
|
|@system pure nothrow version(mir_test) unittest
|{
| immutable int[] a = [1, 2, 3, 4];
| auto b = (&a).array;
| assert(b == a);
|}
|
|@system version(mir_test) unittest
|{
| import mir.algorithm.iteration : equal;
| struct Foo
| {
| int a;
| void opAssign(Foo)
| {
| assert(0);
| }
| auto opEquals(Foo foo)
| {
| return a == foo.a;
| }
| }
| auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]);
| assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)]));
|}
|
|@safe version(mir_test) unittest
|{
| // Issue 12315
| static struct Bug12315 { immutable int i; }
| enum bug12315 = [Bug12315(123456789)].array();
| static assert(bug12315[0].i == 123456789);
|}
|
|@safe version(mir_test) unittest
|{
| import mir.ndslice.topology: repeat;
| static struct S{int* p;}
| auto a = array(immutable(S).init.repeat(5));
| assert(a.length == 5);
|}
|
|///
|@safe version(mir_test) unittest
|{
| assert("Hello D".array == "Hello D");
| assert("Hello D"w.array == "Hello D"w);
| assert("Hello D"d.array == "Hello D"d);
|}
|
|@system version(mir_test) unittest
|{
| // @system due to array!string
| import std.conv : to;
|
| static struct TestArray { int x; string toString() @safe { return to!string(x); } }
|
| static struct OpAssign
| {
| uint num;
| this(uint num) { this.num = num; }
|
| // Templating opAssign to make sure the bugs with opAssign being
| // templated are fixed.
| void opAssign(T)(T rhs) { this.num = rhs.num; }
| }
|
| static struct OpApply
| {
| int opApply(scope int delegate(ref int) dg)
| {
| int res;
| foreach (i; 0 .. 10)
| {
| res = dg(i);
| if (res) break;
| }
|
| return res;
| }
| }
|
| auto a = array([1, 2, 3, 4, 5][]);
| assert(a == [ 1, 2, 3, 4, 5 ]);
|
| auto b = array([TestArray(1), TestArray(2)][]);
| assert(b == [TestArray(1), TestArray(2)]);
|
| class C
| {
| int x;
| this(int y) { x = y; }
| override string toString() const @safe { return to!string(x); }
| }
| auto c = array([new C(1), new C(2)][]);
| assert(c[0].x == 1);
| assert(c[1].x == 2);
|
| auto d = array([1.0, 2.2, 3][]);
| assert(is(typeof(d) == double[]));
| assert(d == [1.0, 2.2, 3]);
|
| auto e = [OpAssign(1), OpAssign(2)];
| auto f = array(e);
| assert(e == f);
|
| assert(array(OpApply.init) == [0,1,2,3,4,5,6,7,8,9]);
| assert(array("ABC") == "ABC");
| assert(array("ABC".dup) == "ABC");
|}
|
|//Bug# 8233
|@safe version(mir_test) unittest
|{
| assert(array("hello world"d) == "hello world"d);
| immutable a = [1, 2, 3, 4, 5];
| assert(array(a) == a);
| const b = a;
| assert(array(b) == a);
|
| //To verify that the opAssign branch doesn't get screwed up by using Unqual.
| //EDIT: array no longer calls opAssign.
| struct S
| {
| ref S opAssign(S)(const ref S rhs)
| {
| assert(0);
| }
|
| int i;
| }
|
| alias AliasSeq(T...) = T;
| foreach (T; AliasSeq!(S, const S, immutable S))
| {
| auto arr = [T(1), T(2), T(3), T(4)];
| assert(array(arr) == arr);
| }
|}
|
|@safe version(mir_test) unittest
|{
| //9824
| static struct S
| {
| @disable void opAssign(S);
| int i;
| }
| auto arr = [S(0), S(1), S(2)];
| arr.array;
|}
|
|// Bugzilla 10220
|@safe version(mir_test) unittest
|{
| import mir.algorithm.iteration : equal;
| import std.exception;
| import mir.ndslice.topology: repeat;
|
| static struct S
| {
| int val;
|
| @disable this();
| this(int v) { val = v; }
| }
| static immutable r = S(1).repeat(2).array();
| assert(equal(r, [S(1), S(1)]));
|}
|
|@safe version(mir_test) unittest
|{
| //Turn down infinity:
| static assert(!is(typeof(
| repeat(1).array()
| )));
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/array/allocation.d has no code
<<<<<< EOF
# path=./source-mir-sparse-blas-gemm.lst
|/++
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|+/
|module mir.sparse.blas.gemm;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.ndslice.iterator;
|import mir.ndslice.allocation: slice;
|import mir.sparse;
|import mir.series;
|
|/++
|General matrix-matrix multiplication.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| b = dense matrix
| beta = scalar
| c = dense matrix
|Returns:
| `c = alpha * a × b + beta * c` if beta does not equal null and `c = alpha * a × b` otherwise.
|+/
|void gemm(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 2, kind2) b,
| in CL beta,
| Slice!(Iterator3, 2, kind3) c)
|in
|{
1| assert(a.length!0 == c.length!0);
1| assert(b.length!1 == c.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
1| auto ct = c.universal.transposed;
14| foreach (x; b.universal.transposed)
| {
| import mir.sparse.blas.gemv: gemv;
4| gemv(alpha, a, x, beta, ct.front);
4| ct.popFront;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto sp = sparse!int(3, 5);
1| sp[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto a = sp.compress;
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
1| auto c = slice!double(3, 4);
|
1| gemm(1.0, a, b, 0, c);
|
1| assert(c ==
| [[-42.0, 35, -7, 77],
| [-69.0, -21, -42, 21],
| [23.0, 69, 3, 29]]);
|}
|
|
|/++
|General matrix-matrix multiplication with transformation.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| b = dense matrix
| beta = scalar
| c = dense matrix
|Returns:
| `c = alpha * aᵀ × b + beta * c` if beta does not equal null and `c = alpha * aᵀ × b` otherwise.
|+/
|void gemtm(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 2, kind2) b,
| in CL beta,
| Slice!(Iterator3, 2, kind3) c)
|in
|{
1| assert(a.length!0 == b.length!0);
1| assert(b.length!1 == c.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
1| auto ct = c.universal.transposed;
14| foreach (x; b.universal.transposed)
| {
| import mir.sparse.blas.gemv: gemtv;
4| gemtv(alpha, a, x, beta, ct.front);
4| ct.popFront;
| }
|}
|
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto sp = sparse!int(5, 3);
1| sp[] =
| [[-5, -1, -5],
| [1, -5, -2],
| [7, 6, -3],
| [7, 3, 6],
| [-4, -3, 0]];
|
1| auto a = sp.compress;
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
1| auto c = slice!double(3, 4);
|
1| gemtm(1.0, a, b, 0, c);
|
1| assert(c ==
| [[-42.0, 35, -7, 77],
| [-69.0, -21, -42, 21],
| [23.0, 69, 3, 29]]);
|}
|
|/++
|Selective general matrix multiplication with selector sparse matrix.
|Params:
| a = dense matrix
| b = dense matrix
| c = sparse matrix (CSR format)
|Returns:
| `c[available indexes] = (a × b)[available indexes]`.
|+/
|void selectiveGemm(string op = "", SliceKind kind1, SliceKind kind2, SliceKind kind3, T, T3, I3, J3)
|(Slice!(T*, 2, kind1) a, Slice!(T*, 2, kind2) b, Slice!(ChopIterator!(J3*, Series!(I3*, T3*)), 1, kind3) c)
|in
|{
1| assert(a.length!1 == b.length!0);
1| assert(c.length!0 == a.length!0);
11| foreach (r; c)
3| if (r.index.length)
2| assert(r.index[$-1] < b.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
| import mir.sparse.blas.gemv: selectiveGemv;
|
1| auto bt = b.universal.transposed;
11| foreach (r; c)
| {
3| selectiveGemv!op(bt, a.front, r);
3| a.popFront;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto a = slice!double(3, 5);
1| a[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
| // a * b ==
| // [[-42.0, 35, -7, 77],
| // [-69.0, -21, -42, 21],
| // [23.0, 69, 3, 29]]);
|
1| auto cs = sparse!double(3, 4);
1| cs[0, 2] = 1;
1| cs[0, 1] = 3;
1| cs[2, 3] = 2;
|
1| auto c = cs.compress;
|
1| selectiveGemm!"*"(a, b, c);
1| assert(c.length == 3);
1| assert(c[0].index == [1, 2]);
1| assert(c[0].value == [105, -7]);
1| assert(c[1].empty);
1| assert(c[2].index == [3]);
1| assert(c[2].value == [58]);
|}
source/mir/sparse/blas/gemm.d is 100% covered
<<<<<< EOF
# path=./-tmp-dub_test_root_2eb57dcf_aec6_4649_bcc2_5e6e30c33284.lst
|module dub_test_root;
|import std.typetuple;
|static import mir.glas.l1;
|static import mir.glas.l2;
|static import mir.model.lda.hoffman;
|static import mir.sparse.blas.axpy;
|static import mir.sparse.blas.dot;
|static import mir.sparse.blas.gemm;
|static import mir.sparse.blas.gemv;
|alias allModules = TypeTuple!(mir.glas.l1, mir.glas.l2, mir.model.lda.hoffman, mir.sparse.blas.axpy, mir.sparse.blas.dot, mir.sparse.blas.gemm, mir.sparse.blas.gemv);
|
| import std.stdio;
| import core.runtime;
|
0000000| void main() { writeln("All unit tests have been run successfully."); }
| shared static this() {
| version (Have_tested) {
| import tested;
| import core.runtime;
| import std.exception;
| Runtime.moduleUnitTester = () => true;
| enforce(runUnitTests!allModules(new ConsoleTestResultWriter), "Unit tests failed.");
| }
| }
|
/tmp/dub_test_root_2eb57dcf_aec6_4649_bcc2_5e6e30c33284.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-concatenation.lst
|/++
|This is a submodule of $(MREF mir, ndslice).
|
|The module contains $(LREF ._concatenation) routine.
|It construct $(LREF Concatenation) structure that can be
|assigned to an ndslice of the same shape with `[] = ` or `[] op= `.
|
|$(SUBREF slice, slicedNdField) can be used to construct ndslice view on top of $(LREF Concatenation).
|
|$(SUBREF allocation, slice) has special overload for $(LREF Concatenation) that can be used to allocate new ndslice.
|
|$(BOOKTABLE $(H2 Concatenation constructors),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 ._concatenation, Creates a $(LREF Concatenation) view of multiple slices.)
|$(T2 pad, Pads with a constant value.)
|$(T2 padEdge, Pads with the edge values of slice.)
|$(T2 padSymmetric, Pads with the reflection of the slice mirrored along the edge of the slice.)
|$(T2 padWrap, Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning.)
|)
|
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|See_also: $(SUBMODULE fuse) submodule.
|
|Macros:
|SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.concatenation;
|
|import std.traits;
|import std.meta;
|
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.internal;
|import mir.ndslice.slice;
|import mir.primitives;
|
|@optmath:
|
|private template _expose(size_t maxN, size_t dim)
|{
| static @optmath auto _expose(S)(S s)
| {
| static if (s.N == maxN)
| {
| return s;
| }
| else
| {
| static assert(s.shape.length == s.N, "Cannot create concatenation for packed slice of smaller dimension.");
| import mir.ndslice.topology: repeat, unpack;
| auto r = s.repeat(1).unpack;
| static if (dim)
| {
| import mir.ndslice.dynamic: transposed;
| return r.transposed!(Iota!(1, dim + 1));
| }
| else
| {
| return r;
| }
| }
| }
|}
|
|private template _Expose(size_t maxN, size_t dim)
|{
| alias _expose = ._expose!(maxN, dim);
| alias _Expose(S) = ReturnType!(_expose!S);
|}
|
|
|/++
|Creates a $(LREF Concatenation) view of multiple slices.
|
|Can be used in combination with itself, $(LREF until), $(SUBREF allocation, slice),
|and $(SUBREF slice, Slice) assignment.
|
|Params:
| slices = tuple of slices and/or concatenations.
|
|Returns: $(LREF Concatenation).
|+/
|auto concatenation(size_t dim = 0, Slices...)(Slices slices)
|{
| static if (allSatisfy!(templateOr!(isSlice, isConcatenation), Slices))
| {
| import mir.algorithm.iteration: reduce;
| import mir.utility: min, max;
| enum NOf(S) = S.N;
| enum NArray = [staticMap!(NOf, Slices)];
| enum minN = size_t.max.reduce!min(NArray);
| enum maxN = size_t.min.reduce!max(NArray);
| static if (minN == maxN)
| {
| import core.lifetime: forward;
| return Concatenation!(dim, Slices)(forward!slices);
| }
| else
| {
| import core.lifetime: move;
| static assert(minN + 1 == maxN);
| alias S = staticMap!(_Expose!(maxN, dim), Slices);
| Concatenation!(dim, S) ret;
| foreach (i, ref e; ret._slices)
| e = _expose!(maxN, dim)(move(slices[i]));
| return ret;
| }
| }
| else
| {
| import core.lifetime: forward;
| return .concatenation(toSlices!(forward!slices));
| }
|}
|
|/// Concatenation of slices with different dimmensions.
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: repeat, iota;
|
| // 0 0 0
| auto vector = size_t.init.repeat([3]);
|
| // 1 2 3
| // 4 5 6
| auto matrix = iota([2, 3], 1);
|
| assert(concatenation(vector, matrix).slice == [
| [0, 0, 0],
| [1, 2, 3],
| [4, 5, 6],
| ]);
|
| vector.popFront;
| assert(concatenation!1(vector, matrix).slice == [
| [0, 1, 2, 3],
| [0, 4, 5, 6],
| ]);
|}
|
|/// Multidimensional
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
| import mir.ndslice.slice : slicedNdField;
|
| // 0, 1, 2
| // 3, 4, 5
| auto a = iota(2, 3);
| // 0, 1
| // 2, 3
| auto b = iota(2, 2);
| // 0, 1, 2, 3, 4
| auto c = iota(1, 5);
|
| // 0, 1, 2, 0, 1
| // 3, 4, 5, 2, 3
| //
| // 0, 1, 2, 3, 4
| // construction phase
| auto s = concatenation(concatenation!1(a, b), c);
|
| // allocation phase
| auto d = s.slice;
| assert(d == [
| [0, 1, 2, 0, 1],
| [3, 4, 5, 2, 3],
| [0, 1, 2, 3, 4],
| ]);
|
| // optimal fragmentation for output/writing/buffering
| auto testData = [
| [0, 1, 2], [0, 1],
| [3, 4, 5], [2, 3],
| [0, 1, 2, 3, 4],
| ];
| size_t i;
| s.forEachFragment!((fragment) {
| pragma(inline, false); //reduces template bloat
| assert(fragment == testData[i++]);
| });
| assert(i == testData.length);
|
| // lazy ndslice view
| assert(s.slicedNdField == d);
|}
|
|/// 1D
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
| import mir.ndslice.slice : slicedNdField;
|
| size_t i;
| auto a = 3.iota;
| auto b = iota([6], a.length);
| auto s = concatenation(a, b);
| assert(s.length == a.length + b.length);
| // fast iteration with until
| s.until!((elem){ assert(elem == i++); return false; });
| // allocation with slice
| assert(s.slice == s.length.iota);
| // 1D or multidimensional assignment
| auto d = slice!double(s.length);
| d[] = s;
| assert(d == s.length.iota);
| d.opIndexOpAssign!"+"(s);
| assert(d == iota([s.length], 0, 2));
|
| // lazy ndslice view
| assert(s.slicedNdField == s.length.iota);
|}
|
|///
|enum bool isConcatenation(T) = is(T : Concatenation!(dim, Slices), size_t dim, Slices...);
|///
|enum size_t concatenationDimension(T : Concatenation!(dim, Slices), size_t dim, Slices...) = dim;
|
|///
|struct Concatenation(size_t dim, Slices...)
| if (Slices.length > 1)
|{
| @optmath:
|
|
| /// Slices and sub-concatenations
| Slices _slices;
|
| package enum N = typeof(Slices[0].shape).length;
|
| static assert(dim < N);
|
| alias DeepElement = CommonType!(staticMap!(DeepElementType, Slices));
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.qualifier;
| import mir.ndslice.topology: iota;
| return mixin("Concatenation!(dim, staticMap!(LightConstOf, Slices))(%(_slices[%s].lightConst,%)].lightConst)".format(_slices.length.iota));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import mir.qualifier;
| return mixin("Concatenation!(dim, staticMap!(LightImmutableOf, Slices))(%(_slices[%s].lightImmutable,%)].lightImmutable)".format(_slices.length.iota));
| }
|
| /// Length primitive
| size_t length(size_t d = 0)() const @property
| {
| static if (d == dim)
| {
| size_t length;
| foreach(ref slice; _slices)
| length += slice.length!d;
| return length;
| }
| else
| {
| return _slices[0].length!d;
| }
| }
|
| /// Total elements count in the concatenation.
| size_t elementCount()() const @property
| {
| size_t count = 1;
| foreach(i; Iota!N)
| count *= length!i;
| return count;
| }
|
| deprecated("use elementCount instead")
| alias elementsCount = elementCount;
|
| /// Shape of the concatenation.
| size_t[N] shape()() const @property
| {
| typeof(return) ret;
| foreach(i; Iota!N)
| ret[i] = length!i;
| return ret;
| }
|
| /// Multidimensional input range primitives
| bool empty(size_t d = 0)() const @property
| {
| static if (d == dim)
| {
| foreach(ref slice; _slices)
| if (!slice.empty!d)
| return false;
| return true;
| }
| else
| {
| return _slices[0].empty!d;
| }
| }
|
| /// ditto
| void popFront(size_t d = 0)()
| {
| static if (d == dim)
| {
| foreach(i, ref slice; _slices)
| {
| static if (i != Slices.length - 1)
| if (slice.empty!d)
| continue;
| return slice.popFront!d;
| }
| }
| else
| {
| foreach_reverse (ref slice; _slices)
| slice.popFront!d;
| }
| }
|
| /// ditto
| auto front(size_t d = 0)()
| {
| static if (d == dim)
| {
| foreach(i, ref slice; _slices)
| {
| static if (i != Slices.length - 1)
| if (slice.empty!d)
| continue;
| return slice.front!d;
| }
| }
| else
| {
| import mir.ndslice.internal: frontOfDim;
| enum elemDim = d < dim ? dim - 1 : dim;
| return concatenation!elemDim(frontOfDim!(d, _slices));
| }
| }
|
| /// Simplest multidimensional random access primitive
| auto opIndex()(size_t[N] indices...)
| {
| foreach(i, ref slice; _slices[0 .. $-1])
| {
| ptrdiff_t diff = indices[dim] - slice.length!dim;
| if (diff < 0)
| return slice[indices];
| indices[dim] = diff;
| }
| assert(indices[dim] < _slices[$-1].length!dim);
| return _slices[$-1][indices];
| }
|}
|
|
|/++
|Performs `fun(st.front!d)`.
|
|This functions is useful when `st.front!d` has not a common type and fails to compile.
|
|Can be used instead of $(LREF .Concatenation.front)
|+/
|auto applyFront(size_t d = 0, alias fun, size_t dim, Slices...)(Concatenation!(dim, Slices) st)
|{
| static if (d == dim)
| {
| foreach(i, ref slice; st._slices)
| {
| static if (i != Slices.length - 1)
| if (slice.empty!d)
| continue;
| return fun(slice.front!d);
| }
| }
| else
| {
| import mir.ndslice.internal: frontOfDim;
| enum elemDim = d < dim ? dim - 1 : dim;
| auto slices = st._slices;
| return fun(concatenation!elemDim(frontOfDim!(d, slices)));
| }
|}
|
|/++
|Pads with a constant value.
|
|Params:
| direction = padding direction.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
| s = $(SUBREF slice, Slice) or ndField
| value = initial value for padding
| lengths = list of lengths
|
|Returns: $(LREF Concatenation)
|
|See_also: $(LREF ._concatenation) examples.
|+/
|auto pad(string direction = "both", S, T, size_t N)(S s, T value, size_t[N] lengths...)
| if (hasShape!S && N == typeof(S.shape).length)
|{
| return .pad!([Iota!N], [Repeat!(N, direction)])(s, value, lengths);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([3], 1)
| .pad(0, [2])
| .slice;
|
| assert(pad == [0, 0, 1, 2, 3, 0, 0]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .pad(0, [2, 1])
| .slice;
|
| assert(pad == [
| [0, 0, 0, 0],
| [0, 0, 0, 0],
|
| [0, 1, 2, 0],
| [0, 3, 4, 0],
|
| [0, 0, 0, 0],
| [0, 0, 0, 0]]);
|}
|
|/++
|Pads with a constant value.
|
|Params:
| dimensions = dimensions to pad.
| directions = padding directions.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
|
|Returns: $(LREF Concatenation)
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template pad(size_t[] dimensions, string[] directions)
| if (dimensions.length && dimensions.length == directions.length)
|{
| @optmath:
|
| /++
| Params:
| s = $(SUBREF slice, Slice) or ndField
| value = initial value for padding
| lengths = list of lengths
| Returns: $(LREF Concatenation)
| See_also: $(LREF ._concatenation) examples.
| +/
| auto pad(S, T)(S s, T value, size_t[dimensions.length] lengths...)
| {
| import mir.ndslice.topology: repeat;
|
| enum d = dimensions[$ - 1];
| enum q = directions[$ - 1];
| enum N = typeof(S.shape).length;
|
| size_t[N] len;
| auto _len = s.shape;
| foreach(i; Iota!(len.length))
| static if (i != d)
| len[i] = _len[i];
| else
| len[i] = lengths[$ - 1];
|
| auto p = repeat(value, len);
| static if (q == "both")
| auto r = concatenation!d(p, s, p);
| else
| static if (q == "pre")
| auto r = concatenation!d(p, s);
| else
| static if (q == "post")
| auto r = concatenation!d(s, p);
| else
| static assert(0, `allowed directions are "both", "pre", and "post"`);
|
| static if (dimensions.length == 1)
| return r;
| else
| return .pad!(dimensions[0 .. $ - 1], directions[0 .. $ - 1])(r, value, lengths[0 .. $ -1]);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .pad!([1], ["pre"])(0, [2])
| .slice;
|
| assert(pad == [
| [0, 0, 1, 2],
| [0, 0, 3, 4]]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .pad!([0, 1], ["both", "post"])(0, [2, 1])
| .slice;
|
| assert(pad == [
| [0, 0, 0],
| [0, 0, 0],
|
| [1, 2, 0],
| [3, 4, 0],
|
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|/++
|Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning.
|
|Params:
| direction = padding direction.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length.
|Returns: $(LREF Concatenation)
|See_also: $(LREF ._concatenation) examples.
|+/
|auto padWrap(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...)
|{
| return .padWrap!([Iota!N], [Repeat!(N, direction)])(s, lengths);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([3], 1)
| .padWrap([2])
| .slice;
|
| assert(pad == [2, 3, 1, 2, 3, 1, 2]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padWrap([2, 1])
| .slice;
|
| assert(pad == [
| [2, 1, 2, 1],
| [4, 3, 4, 3],
|
| [2, 1, 2, 1],
| [4, 3, 4, 3],
|
| [2, 1, 2, 1],
| [4, 3, 4, 3]]);
|}
|
|/++
|Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning.
|
|Params:
| dimensions = dimensions to pad.
| directions = padding directions.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
|
|Returns: $(LREF Concatenation)
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template padWrap(size_t[] dimensions, string[] directions)
| if (dimensions.length && dimensions.length == directions.length)
|{
| @optmath:
|
| /++
| Params:
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length.
| Returns: $(LREF Concatenation)
| See_also: $(LREF ._concatenation) examples.
| +/
| auto padWrap(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...)
| {
| enum d = dimensions[$ - 1];
| enum q = directions[$ - 1];
|
| static if (d == 0 || kind != Contiguous)
| {
| alias _s = s;
| }
| else
| {
| import mir.ndslice.topology: canonical;
| auto _s = s.canonical;
| }
|
| assert(lengths[$ - 1] <= s.length!d);
|
| static if (dimensions.length != 1)
| alias next = .padWrap!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]);
|
| static if (q == "pre" || q == "both")
| {
| auto _pre = _s;
| _pre.popFrontExactly!d(s.length!d - lengths[$ - 1]);
| static if (dimensions.length == 1)
| alias pre = _pre;
| else
| auto pre = next(_pre, lengths[0 .. $ - 1]);
| }
|
| static if (q == "post" || q == "both")
| {
| auto _post = _s;
| _post.popBackExactly!d(s.length!d - lengths[$ - 1]);
| static if (dimensions.length == 1)
| alias post = _post;
| else
| auto post = next(_post, lengths[0 .. $ - 1]);
| }
|
| static if (dimensions.length == 1)
| alias r = s;
| else
| auto r = next(s, lengths[0 .. $ - 1]);
|
| static if (q == "both")
| return concatenation!d(pre, r, post);
| else
| static if (q == "pre")
| return concatenation!d(pre, r);
| else
| static if (q == "post")
| return concatenation!d(r, post);
| else
| static assert(0, `allowed directions are "both", "pre", and "post"`);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 3], 1)
| .padWrap!([1], ["pre"])([1])
| .slice;
|
| assert(pad == [
| [3, 1, 2, 3],
| [6, 4, 5, 6]]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padWrap!([0, 1], ["both", "post"])([2, 1])
| .slice;
|
| assert(pad == [
| [1, 2, 1],
| [3, 4, 3],
|
| [1, 2, 1],
| [3, 4, 3],
|
| [1, 2, 1],
| [3, 4, 3]]);
|}
|
|/++
|Pads with the reflection of the slice mirrored along the edge of the slice.
|
|Params:
| direction = padding direction.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length.
|Returns: $(LREF Concatenation)
|See_also: $(LREF ._concatenation) examples.
|+/
|auto padSymmetric(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...)
|{
| return .padSymmetric!([Iota!N], [Repeat!(N, direction)])(s, lengths);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([3], 1)
| .padSymmetric([2])
| .slice;
|
| assert(pad == [2, 1, 1, 2, 3, 3, 2]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padSymmetric([2, 1])
| .slice;
|
| assert(pad == [
| [3, 3, 4, 4],
| [1, 1, 2, 2],
|
| [1, 1, 2, 2],
| [3, 3, 4, 4],
|
| [3, 3, 4, 4],
| [1, 1, 2, 2]]);
|}
|
|/++
|Pads with the reflection of the slice mirrored along the edge of the slice.
|
|Params:
| dimensions = dimensions to pad.
| directions = padding directions.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
|
|Returns: $(LREF Concatenation)
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template padSymmetric(size_t[] dimensions, string[] directions)
| if (dimensions.length && dimensions.length == directions.length)
|{
| @optmath:
|
| /++
| Params:
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length.
| Returns: $(LREF Concatenation)
| See_also: $(LREF ._concatenation) examples.
| +/
| auto padSymmetric(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...)
| {
| enum d = dimensions[$ - 1];
| enum q = directions[$ - 1];
| import mir.ndslice.dynamic: reversed;
|
|
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: canonical;
| auto __s = s.canonical;
| }
| else
| {
| alias __s = s;
| }
|
| static if (kind == Universal || d != N - 1)
| {
| auto _s = __s.reversed!d;
| }
| else
| static if (N == 1)
| {
| import mir.ndslice.topology: retro;
| auto _s = s.retro;
| }
| else
| {
| import mir.ndslice.topology: retro;
| auto _s = __s.retro.reversed!(Iota!d, Iota!(d + 1, N));
| }
|
| assert(lengths[$ - 1] <= s.length!d);
|
| static if (dimensions.length != 1)
| alias next = .padSymmetric!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]);
|
| static if (q == "pre" || q == "both")
| {
| auto _pre = _s;
| _pre.popFrontExactly!d(s.length!d - lengths[$ - 1]);
| static if (dimensions.length == 1)
| alias pre = _pre;
| else
| auto pre = next(_pre, lengths[0 .. $ - 1]);
| }
|
| static if (q == "post" || q == "both")
| {
| auto _post = _s;
| _post.popBackExactly!d(s.length!d - lengths[$ - 1]);
| static if (dimensions.length == 1)
| alias post = _post;
| else
| auto post = next(_post, lengths[0 .. $ - 1]);
| }
|
| static if (dimensions.length == 1)
| alias r = s;
| else
| auto r = next(s, lengths[0 .. $ - 1]);
|
| static if (q == "both")
| return concatenation!d(pre, r, post);
| else
| static if (q == "pre")
| return concatenation!d(pre, r);
| else
| static if (q == "post")
| return concatenation!d(r, post);
| else
| static assert(0, `allowed directions are "both", "pre", and "post"`);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 3], 1)
| .padSymmetric!([1], ["pre"])([2])
| .slice;
|
| assert(pad == [
| [2, 1, 1, 2, 3],
| [5, 4, 4, 5, 6]]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padSymmetric!([0, 1], ["both", "post"])([2, 1])
| .slice;
|
| assert(pad == [
| [3, 4, 4],
| [1, 2, 2],
|
| [1, 2, 2],
| [3, 4, 4],
|
| [3, 4, 4],
| [1, 2, 2]]);
|}
|
|/++
|Pads with the edge values of slice.
|
|Params:
| direction = padding direction.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension.
|Returns: $(LREF Concatenation)
|See_also: $(LREF ._concatenation) examples.
|+/
|auto padEdge(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...)
|{
| return .padEdge!([Iota!N], [Repeat!(N, direction)])(s, lengths);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([3], 1)
| .padEdge([2])
| .slice;
|
| assert(pad == [1, 1, 1, 2, 3, 3, 3]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padEdge([2, 1])
| .slice;
|
| assert(pad == [
| [1, 1, 2, 2],
| [1, 1, 2, 2],
|
| [1, 1, 2, 2],
| [3, 3, 4, 4],
|
| [3, 3, 4, 4],
| [3, 3, 4, 4]]);
|}
|
|/++
|Pads with the edge values of slice.
|
|Params:
| dimensions = dimensions to pad.
| directions = padding directions.
| Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`.
|
|Returns: $(LREF Concatenation)
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template padEdge(size_t[] dimensions, string[] directions)
| if (dimensions.length && dimensions.length == directions.length)
|{
| @optmath:
|
| /++
| Params:
| s = $(SUBREF slice, Slice)
| lengths = list of lengths for each dimension.
| Returns: $(LREF Concatenation)
| See_also: $(LREF ._concatenation) examples.
| +/
| auto padEdge(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...)
| {
| enum d = dimensions[$ - 1];
| enum q = directions[$ - 1];
|
| static if (kind == Universal)
| {
| alias _s = s;
| }
| else
| static if (d != N - 1)
| {
| import mir.ndslice.topology: canonical;
| auto _s = s.canonical;
| }
| else
| {
| import mir.ndslice.topology: universal;
| auto _s = s.universal;
| }
|
| static if (dimensions.length != 1)
| alias next = .padEdge!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]);
|
| static if (q == "pre" || q == "both")
| {
| auto _pre = _s;
| _pre._strides[d] = 0;
| _pre._lengths[d] = lengths[$ - 1];
| static if (dimensions.length == 1)
| alias pre = _pre;
| else
| auto pre = next(_pre, lengths[0 .. $ - 1]);
|
| }
|
| static if (q == "post" || q == "both")
| {
| auto _post = _s;
| _post._iterator += _post.backIndex!d;
| _post._strides[d] = 0;
| _post._lengths[d] = lengths[$ - 1];
| static if (dimensions.length == 1)
| alias post = _post;
| else
| auto post = next(_post, lengths[0 .. $ - 1]);
| }
|
| static if (dimensions.length == 1)
| alias r = s;
| else
| auto r = next( s, lengths[0 .. $ - 1]);
|
| static if (q == "both")
| return concatenation!d(pre, r, post);
| else
| static if (q == "pre")
| return concatenation!d(pre, r);
| else
| static if (q == "post")
| return concatenation!d(r, post);
| else
| static assert(0, `allowed directions are "both", "pre", and "post"`);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 3], 1)
| .padEdge!([0], ["pre"])([2])
| .slice;
|
| assert(pad == [
| [1, 2, 3],
| [1, 2, 3],
|
| [1, 2, 3],
| [4, 5, 6]]);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto pad = iota([2, 2], 1)
| .padEdge!([0, 1], ["both", "post"])([2, 1])
| .slice;
|
| assert(pad == [
| [1, 2, 2],
| [1, 2, 2],
|
| [1, 2, 2],
| [3, 4, 4],
|
| [3, 4, 4],
| [3, 4, 4]]);
|}
|
|/++
|Iterates 1D fragments in $(SUBREF slice, Slice) or $(LREF Concatenation) in optimal for buffering way.
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template forEachFragment(alias pred)
|{
| @optmath:
|
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Specialization for slices
| Params:
| sl = $(SUBREF slice, Slice)
| +/
| void forEachFragment(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) sl)
| {
| static if (N == 1)
| {
| pred(sl);
| }
| else
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: flattened;
| pred(sl.flattened);
| }
| else
| {
| if (!sl.empty) do
| {
| .forEachFragment!pred(sl.front);
| sl.popFront;
| }
| while(!sl.empty);
| }
| }
|
| /++
| Specialization for concatenations
| Params:
| st = $(LREF Concatenation)
| +/
| void forEachFragment(size_t dim, Slices...)(Concatenation!(dim, Slices) st)
| {
| static if (dim == 0)
| {
| foreach (i, ref slice; st._slices)
| .forEachFragment!pred(slice);
| }
| else
| {
| if (!st.empty) do
| {
| st.applyFront!(0, .forEachFragment!pred);
| st.popFront;
| }
| while(!st.empty);
| }
| }
| }
| else
| alias forEachFragment = .forEachFragment!(naryFun!pred);
|}
|
|/++
|Iterates elements in $(SUBREF slice, Slice) or $(LREF Concatenation)
|until pred returns true.
|
|Returns: false if pred returned false for all elements and true otherwise.
|
|See_also: $(LREF ._concatenation) examples.
|+/
|template until(alias pred)
|{
| @optmath:
|
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Specialization for slices
| Params:
| sl = $(SUBREF slice, Slice)
| +/
| bool until(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) sl)
| {
| static if (N == 1)
| {
| pragma(inline, false);
| alias f = pred;
| }
| else
| alias f = .until!pred;
| if (!sl.empty) do
| {
| if (f(sl.front))
| return true;
| sl.popFront;
| }
| while(!sl.empty);
| return false;
| }
|
| /++
| Specialization for concatenations
| Params:
| st = $(LREF Concatenation)
| +/
| bool until(size_t dim, Slices...)(Concatenation!(dim, Slices) st)
| {
| static if (dim == 0)
| {
| foreach (i, ref slice; st._slices)
| {
| if (.until!pred(slice))
| return true;
| }
| }
| else
| {
| if (!st.empty) do
| {
| if (st.applyFront!(0, .until!pred))
| return true;
| st.popFront;
| }
| while(!st.empty);
| }
| return false;
| }
| }
| else
| alias until = .until!(naryFun!pred);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/concatenation.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-core-1.1.51-mir-core-source-mir-functional.lst
|/++
|Functions that manipulate other functions.
|This module provides functions for compile time function composition. These
|functions are helpful when constructing predicates for the algorithms in
|$(MREF mir, ndslice).
|$(BOOKTABLE $(H2 Functions),
|$(TR $(TH Function Name) $(TH Description))
| $(TR $(TD $(LREF naryFun))
| $(TD Create a unary, binary or N-nary function from a string. Most often
| used when defining algorithms on ranges and slices.
| ))
| $(TR $(TD $(LREF pipe))
| $(TD Join a couple of functions into one that executes the original
| functions one after the other, using one function's result for the next
| function's argument.
| ))
| $(TR $(TD $(LREF not))
| $(TD Creates a function that negates another.
| ))
| $(TR $(TD $(LREF reverseArgs))
| $(TD Predicate that reverses the order of its arguments.
| ))
| $(TR $(TD $(LREF forward))
| $(TD Forwards function arguments with saving ref-ness.
| ))
| $(TR $(TD $(LREF refTuple))
| $(TD Removes $(LREF Ref) shell.
| ))
| $(TR $(TD $(LREF unref))
| $(TD Creates a $(LREF RefTuple) structure.
| ))
| $(TR $(TD $(LREF __ref))
| $(TD Creates a $(LREF Ref) structure.
| ))
|)
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Authors: Ilya Yaroshenko, $(HTTP erdani.org, Andrei Alexandrescu (some original code from std.functional))
|
|Macros:
|NDSLICE = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|+/
|module mir.functional;
|
|private enum isRef(T) = is(T : Ref!T0, T0);
|
|import mir.math.common: optmath;
|
|public import core.lifetime : forward;
|
|@optmath:
|
|/++
|Constructs static array.
|+/
|T[N] staticArray(T, size_t N)(T[N] a...)
|{
| return a;
|}
|
|/++
|Simple wrapper that holds a pointer.
|It is used for as workaround to return multiple auto ref values.
|+/
|struct Ref(T)
| if (!isRef!T)
|{
| @optmath:
|
| @disable this();
| ///
| this(ref T value) @trusted
| {
| __ptr = &value;
| }
| ///
| T* __ptr;
| ///
| ref inout(T) __value() inout @property { return *__ptr; }
| ///
| alias __value this;
| ///
| bool opEquals(scope Ref!T rhs) const scope
| {
| return __value == rhs.__value;
| }
|
| static if (__traits(hasMember, T, "toHash") || __traits(isScalar, T))
| ///
| size_t toHash() const
| {
| return hashOf(__value);
| }
|}
|
|/// Creates $(LREF Ref) wrapper.
|Ref!T _ref(T)(ref T value)
|{
| return Ref!T(value);
|}
|
|private mixin template _RefTupleMixin(T...)
| if (T.length <= 26)
|{
| static if (T.length)
| {
| enum i = T.length - 1;
| static if (isRef!(T[i]))
| mixin(`@optmath @property ref ` ~ cast(char)('a' + i) ~ `() { return *expand[` ~ i.stringof ~ `].__ptr; }` );
| else
| mixin(`alias ` ~ cast(char)('a' + i) ~ ` = expand[` ~ i.stringof ~ `];`);
| mixin ._RefTupleMixin!(T[0 .. $-1]);
| }
|}
|
|/++
|Simplified tuple structure. Some fields may be type of $(LREF Ref).
|Ref stores a pointer to a values.
|+/
|struct RefTuple(T...)
|{
| @optmath:
| T expand;
| alias expand this;
| mixin _RefTupleMixin!T;
|}
|
|/// Removes $(LREF Ref) shell.
|alias Unref(V : Ref!T, T) = T;
|/// ditto
|template Unref(V : RefTuple!T, T...)
|{
| import std.meta: staticMap;
| alias Unref = RefTuple!(staticMap!(.Unref, T));
|}
|
|/// ditto
|alias Unref(V) = V;
|
|/++
|Returns: a $(LREF RefTuple) structure.
|+/
|RefTuple!Args refTuple(Args...)(auto ref Args args)
|{
| return RefTuple!Args(args);
|}
|
|/// Removes $(LREF Ref) shell.
|ref T unref(V : Ref!T, T)(scope return V value)
|{
| return *value.__ptr;
|}
|
|/// ditto
|Unref!(RefTuple!T) unref(V : RefTuple!T, T...)(V value)
|{
| typeof(return) ret;
| foreach(i, ref elem; ret.expand)
| elem = unref(value.expand[i]);
| return ret;
|}
|
|/// ditto
|ref V unref(V)(scope return ref V value)
|{
| return value;
|}
|
|/// ditto
|V unref(V)(V value)
|{
| import std.traits: hasElaborateAssign;
| static if (hasElaborateAssign!V)
| {
| import core.lifetime: move;
| return move(value);
| }
| else
| return value;
|}
|
|private string joinStrings()(string[] strs)
|{
| if (strs.length)
| {
| auto ret = strs[0];
| foreach(s; strs[1 .. $])
| ret ~= s;
| return ret;
| }
| return null;
|}
|
|/++
|Takes multiple functions and adjoins them together. The result is a
|$(LREF RefTuple) with one element per passed-in function. Upon
|invocation, the returned tuple is the adjoined results of all
|functions.
|Note: In the special case where only a single function is provided
|(`F.length == 1`), adjoin simply aliases to the single passed function
|(`F[0]`).
|+/
|template adjoin(fun...) if (fun.length && fun.length <= 26)
|{
| static if (fun.length != 1)
| {
| import std.meta: staticMap, Filter;
| static if (Filter!(_needNary, fun).length == 0)
| {
| ///
| @optmath auto adjoin(Args...)(auto ref Args args)
| {
| template _adjoin(size_t i)
| {
| static if (__traits(compiles, &fun[i](forward!args)))
| enum _adjoin = "Ref!(typeof(fun[" ~ i.stringof ~ "](forward!args)))(fun[" ~ i.stringof ~ "](forward!args)), ";
| else
| enum _adjoin = "fun[" ~ i.stringof ~ "](forward!args), ";
| }
|
| import mir.internal.utility;
| mixin("return refTuple(" ~ [staticMap!(_adjoin, Iota!(fun.length))].joinStrings ~ ");");
| }
| }
| else alias adjoin = .adjoin!(staticMap!(naryFun, fun));
| }
| else alias adjoin = naryFun!(fun[0]);
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| static bool f1(int a) { return a != 0; }
| static int f2(int a) { return a / 2; }
| auto x = adjoin!(f1, f2)(5);
| assert(is(typeof(x) == RefTuple!(bool, int)));
| assert(x.a == true && x.b == 2);
|}
|
|@safe version(mir_core_test) unittest
|{
| static bool F1(int a) { return a != 0; }
| auto x1 = adjoin!(F1)(5);
| static int F2(int a) { return a / 2; }
| auto x2 = adjoin!(F1, F2)(5);
| assert(is(typeof(x2) == RefTuple!(bool, int)));
| assert(x2.a && x2.b == 2);
| auto x3 = adjoin!(F1, F2, F2)(5);
| assert(is(typeof(x3) == RefTuple!(bool, int, int)));
| assert(x3.a && x3.b == 2 && x3.c == 2);
|
| bool F4(int a) { return a != x1; }
| alias eff4 = adjoin!(F4);
| static struct S
| {
| bool delegate(int) @safe store;
| int fun() { return 42 + store(5); }
| }
| S s;
| s.store = (int a) { return eff4(a); };
| auto x4 = s.fun();
| assert(x4 == 43);
|}
|
|//@safe
|version(mir_core_test) unittest
|{
| import std.meta: staticMap;
| alias funs = staticMap!(naryFun, "a", "a * 2", "a * 3", "a * a", "-a");
| alias afun = adjoin!funs;
| int a = 5, b = 5;
| assert(afun(a) == refTuple(Ref!int(a), 10, 15, 25, -5));
| assert(afun(a) == refTuple(Ref!int(b), 10, 15, 25, -5));
|
| static class C{}
| alias IC = immutable(C);
| IC foo(){return typeof(return).init;}
| RefTuple!(IC, IC, IC, IC) ret1 = adjoin!(foo, foo, foo, foo)();
|
| static struct S{int* p;}
| alias IS = immutable(S);
| IS bar(){return typeof(return).init;}
| enum RefTuple!(IS, IS, IS, IS) ret2 = adjoin!(bar, bar, bar, bar)();
|}
|
|private template needOpCallAlias(alias fun)
|{
| /* Determine whether or not naryFun need to alias to fun or
| * fun.opCall. Basically, fun is a function object if fun(...) compiles. We
| * want is(naryFun!fun) (resp., is(naryFun!fun)) to be true if fun is
| * any function object. There are 4 possible cases:
| *
| * 1) fun is the type of a function object with static opCall;
| * 2) fun is an instance of a function object with static opCall;
| * 3) fun is the type of a function object with non-static opCall;
| * 4) fun is an instance of a function object with non-static opCall.
| *
| * In case (1), is(naryFun!fun) should compile, but does not if naryFun
| * aliases itself to fun, because typeof(fun) is an error when fun itself
| * is a type. So it must be aliased to fun.opCall instead. All other cases
| * should be aliased to fun directly.
| */
| static if (is(typeof(fun.opCall) == function))
| {
| import std.traits: Parameters;
| enum needOpCallAlias = !is(typeof(fun)) && __traits(compiles, () {
| return fun(Parameters!fun.init);
| });
| }
| else
| enum needOpCallAlias = false;
|}
|
|private template _naryAliases(size_t n)
| if (n <= 26)
|{
| static if (n == 0)
| enum _naryAliases = "";
| else
| {
| enum i = n - 1;
| enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n";
| }
|}
|
|/++
|Aliases itself to a set of functions.
|
|Transforms strings representing an expression into a binary function. The
|strings must use symbol names `a`, `b`, ..., `z` as the parameters.
|If `functions[i]` is not a string, `naryFun` aliases itself away to `functions[i]`.
|+/
|template naryFun(functions...)
| if (functions.length >= 1)
|{
| static foreach (fun; functions)
| {
| static if (is(typeof(fun) : string))
| {
| import mir.math.common;
| /// Specialization for string lambdas
| @optmath auto ref naryFun(Args...)(auto ref Args args)
| if (args.length <= 26)
| {
| mixin(_naryAliases!(Args.length));
| return mixin(fun);
| }
| }
| else static if (needOpCallAlias!fun)
| alias naryFun = fun.opCall;
| else
| alias naryFun = fun;
| }
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| // Strings are compiled into functions:
| alias isEven = naryFun!("(a & 1) == 0");
| assert(isEven(2) && !isEven(1));
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| alias less = naryFun!("a < b");
| assert(less(1, 2) && !less(2, 1));
| alias greater = naryFun!("a > b");
| assert(!greater("1", "2") && greater("2", "1"));
|}
|
|/// `naryFun` accepts up to 26 arguments.
|@safe version(mir_core_test) unittest
|{
| assert(naryFun!("a * b + c")(2, 3, 4) == 10);
|}
|
|/// `naryFun` can return by reference.
|version(mir_core_test) unittest
|{
| int a;
| assert(&naryFun!("a")(a) == &a);
|}
|
|/// `args` parameter tuple
|version(mir_core_test) unittest
|{
| assert(naryFun!("args[0] + args[1]")(2, 3) == 5);
|}
|
|/// Multiple functions
|@safe pure nothrow @nogc
|version(mir_core_test) unittest
|{
| alias fun = naryFun!(
| (uint a) => a,
| (ulong a) => a * 2,
| a => a * 3,
| );
|
| int a = 10;
| long b = 10;
| float c = 10;
|
| assert(fun(a) == 10);
| assert(fun(b) == 20);
| assert(fun(c) == 30);
|}
|
|@safe version(mir_core_test) unittest
|{
| static int f1(int a) { return a + 1; }
| static assert(is(typeof(naryFun!(f1)(1)) == int));
| assert(naryFun!(f1)(41) == 42);
| int f2(int a) { return a + 1; }
| static assert(is(typeof(naryFun!(f2)(1)) == int));
| assert(naryFun!(f2)(41) == 42);
| assert(naryFun!("a + 1")(41) == 42);
|
| int num = 41;
| assert(naryFun!"a + 1"(num) == 42);
|
| // Issue 9906
| struct Seen
| {
| static bool opCall(int n) { return true; }
| }
| static assert(needOpCallAlias!Seen);
| static assert(is(typeof(naryFun!Seen(1))));
| assert(naryFun!Seen(1));
|
| Seen s;
| static assert(!needOpCallAlias!s);
| static assert(is(typeof(naryFun!s(1))));
| assert(naryFun!s(1));
|
| struct FuncObj
| {
| bool opCall(int n) { return true; }
| }
| FuncObj fo;
| static assert(!needOpCallAlias!fo);
| static assert(is(typeof(naryFun!fo)));
| assert(naryFun!fo(1));
|
| // Function object with non-static opCall can only be called with an
| // instance, not with merely the type.
| static assert(!is(typeof(naryFun!FuncObj)));
|}
|
|@safe version(mir_core_test) unittest
|{
| static int f1(int a, string b) { return a + 1; }
| static assert(is(typeof(naryFun!(f1)(1, "2")) == int));
| assert(naryFun!(f1)(41, "a") == 42);
| string f2(int a, string b) { return b ~ "2"; }
| static assert(is(typeof(naryFun!(f2)(1, "1")) == string));
| assert(naryFun!(f2)(1, "4") == "42");
| assert(naryFun!("a + b")(41, 1) == 42);
| //@@BUG
| //assert(naryFun!("return a + b;")(41, 1) == 42);
|
| // Issue 9906
| struct Seen
| {
| static bool opCall(int x, int y) { return true; }
| }
| static assert(is(typeof(naryFun!Seen)));
| assert(naryFun!Seen(1,1));
|
| struct FuncObj
| {
| bool opCall(int x, int y) { return true; }
| }
| FuncObj fo;
| static assert(!needOpCallAlias!fo);
| static assert(is(typeof(naryFun!fo)));
| assert(naryFun!fo(1,1));
|
| // Function object with non-static opCall can only be called with an
| // instance, not with merely the type.
| static assert(!is(typeof(naryFun!FuncObj)));
|}
|
|
|/++
|N-ary predicate that reverses the order of arguments, e.g., given
|`pred(a, b, c)`, returns `pred(c, b, a)`.
|+/
|template reverseArgs(alias fun)
|{
| import std.meta: Reverse;
| ///
| @optmath auto ref reverseArgs(Args...)(auto ref Args args)
| if (is(typeof(fun(Reverse!args))))
| {
| return fun(Reverse!args);
| }
|
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| int abc(int a, int b, int c) { return a * b + c; }
| alias cba = reverseArgs!abc;
| assert(abc(91, 17, 32) == cba(32, 17, 91));
|}
|
|@safe version(mir_core_test) unittest
|{
| int a(int a) { return a * 2; }
| alias _a = reverseArgs!a;
| assert(a(2) == _a(2));
|}
|
|@safe version(mir_core_test) unittest
|{
| int b() { return 4; }
| alias _b = reverseArgs!b;
| assert(b() == _b());
|}
|
|@safe version(mir_core_test) unittest
|{
| alias gt = reverseArgs!(naryFun!("a < b"));
| assert(gt(2, 1) && !gt(1, 1));
| int x = 42;
| bool xyz(int a, int b) { return a * x < b / x; }
| auto foo = &xyz;
| foo(4, 5);
| alias zyx = reverseArgs!(foo);
| assert(zyx(5, 4) == foo(4, 5));
|}
|
|/++
|Negates predicate `pred`.
|+/
|template not(alias pred)
|{
| static if (!is(typeof(pred) : string) && !needOpCallAlias!pred)
| ///
| @optmath bool not(T...)(auto ref T args)
| {
| return !pred(args);
| }
| else
| alias not = .not!(naryFun!pred);
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| import std.algorithm.searching : find;
| import std.uni : isWhite;
| string a = " Hello, world!";
| assert(find!(not!isWhite)(a) == "Hello, world!");
|}
|
|@safe version(mir_core_test) unittest
|{
| assert(not!"a != 5"(5));
| assert(not!"a != b"(5, 5));
|
| assert(not!(() => false)());
| assert(not!(a => a != 5)(5));
| assert(not!((a, b) => a != b)(5, 5));
| assert(not!((a, b, c) => a * b * c != 125 )(5, 5, 5));
|}
|
|private template _pipe(size_t n)
|{
| static if (n)
| {
| enum i = n - 1;
| enum _pipe = "f[" ~ i.stringof ~ "](" ~ ._pipe!i ~ ")";
| }
| else
| enum _pipe = "args";
|}
|
|private template _unpipe(alias fun)
|{
| import std.traits: TemplateArgsOf, TemplateOf;
| static if (__traits(compiles, TemplateOf!fun))
| static if (__traits(isSame, TemplateOf!fun, .pipe))
| alias _unpipe = TemplateArgsOf!fun;
| else
| alias _unpipe = fun;
| else
| alias _unpipe = fun;
|
|}
|
|private enum _needNary(alias fun) = is(typeof(fun) : string) || needOpCallAlias!fun;
|
|/++
|Composes passed-in functions `fun[0], fun[1], ...` returning a
|function `f(x)` that in turn returns
|`...(fun[1](fun[0](x)))...`. Each function can be a regular
|functions, a delegate, a lambda, or a string.
|+/
|template pipe(fun...)
|{
| static if (fun.length != 1)
| {
| import std.meta: staticMap, Filter;
| alias f = staticMap!(_unpipe, fun);
| static if (f.length == fun.length && Filter!(_needNary, f).length == 0)
| {
| ///
| @optmath auto ref pipe(Args...)(auto ref Args args)
| {
| return mixin (_pipe!(fun.length));
| }
| }
| else alias pipe = .pipe!(staticMap!(naryFun, f));
| }
| else alias pipe = naryFun!(fun[0]);
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| assert(pipe!("a + b", a => a * 10)(2, 3) == 50);
|}
|
|/// `pipe` can return by reference.
|version(mir_core_test) unittest
|{
| int a;
| assert(&pipe!("a", "a")(a) == &a);
|}
|
|/// Template bloat reduction
|version(mir_core_test) unittest
|{
| enum a = "a * 2";
| alias b = e => e + 2;
|
| alias p0 = pipe!(pipe!(a, b), pipe!(b, a));
| alias p1 = pipe!(a, b, b, a);
|
| static assert(__traits(isSame, p0, p1));
|}
|
|@safe version(mir_core_test) unittest
|{
| import std.algorithm.comparison : equal;
| import std.algorithm.iteration : map;
| import std.array : split;
| import std.conv : to;
|
| // First split a string in whitespace-separated tokens and then
| // convert each token into an integer
| assert(pipe!(split, map!(to!(int)))("1 2 3").equal([1, 2, 3]));
|}
|
|
|struct AliasCall(T, string methodName, TemplateArgs...)
|{
| T __this;
| alias __this this;
|
| ///
| auto lightConst()() const @property
| {
| import mir.qualifier;
| return AliasCall!(LightConstOf!T, methodName, TemplateArgs)(__this.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import mir.qualifier;
| return AliasCall!(LightImmutableOf!T, methodName, TemplateArgs)(__this.lightImmutable);
| }
|
| this()(auto ref T value)
| {
| __this = value;
| }
| auto ref opCall(Args...)(auto ref Args args)
| {
| import std.traits: TemplateArgsOf;
| mixin("return __this." ~ methodName ~ (TemplateArgs.length ? "!TemplateArgs" : "") ~ "(forward!args);");
| }
|}
|
|/++
|Replaces call operator (`opCall`) for the value using its method.
|The funciton is designed to use with $(NDSLICE, topology, vmap) or $(NDSLICE, topology, map).
|Params:
| methodName = name of the methods to use for opCall and opIndex
| TemplateArgs = template arguments
|+/
|template aliasCall(string methodName, TemplateArgs...)
|{
| /++
| Params:
| value = the value to wrap
| Returns:
| wrapped value with implemented opCall and opIndex methods
| +/
| AliasCall!(T, methodName, TemplateArgs) aliasCall(T)(T value) @property
| {
| return typeof(return)(value);
| }
|
| /// ditto
| ref AliasCall!(T, methodName, TemplateArgs) aliasCall(T)(return ref T value) @property @trusted
| {
| return *cast(typeof(return)*) &value;
| }
|}
|
|///
|@safe pure nothrow version(mir_core_test) unittest
|{
| static struct S
| {
| auto lightConst()() const @property { return S(); }
|
| auto fun(size_t ct_param = 1)(size_t rt_param) const
| {
| return rt_param + ct_param;
| }
| }
|
| S s;
|
| auto sfun = aliasCall!"fun"(s);
| assert(sfun(3) == 4);
|
| auto sfun10 = aliasCall!("fun", 10)(s); // uses fun!10
| assert(sfun10(3) == 13);
|}
|
|/++
|+/
|template recurseTemplatePipe(alias Template, size_t N, Args...)
|{
| static if (N == 0)
| alias recurseTemplatePipe = Args;
| else
| {
| alias recurseTemplatePipe = Template!(.recurseTemplatePipe!(Template, N - 1, Args));
| }
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| // import mir.ndslice.topology: map;
| alias map(alias fun) = a => a; // some template
| static assert (__traits(isSame, recurseTemplatePipe!(map, 2, "a * 2"), map!(map!"a * 2")));
|}
|
|/++
|+/
|template selfAndRecurseTemplatePipe(alias Template, size_t N, Args...)
|{
| static if (N == 0)
| alias selfAndRecurseTemplatePipe = Args;
| else
| {
| alias selfAndRecurseTemplatePipe = Template!(.selfAndRecurseTemplatePipe!(Template, N - 1, Args));
| }
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| // import mir.ndslice.topology: map;
| alias map(alias fun) = a => a; // some template
| static assert (__traits(isSame, selfAndRecurseTemplatePipe!(map, 2, "a * 2"), map!(pipe!("a * 2", map!"a * 2"))));
|}
|
|/++
|+/
|template selfTemplatePipe(alias Template, size_t N, Args...)
|{
| static if (N == 0)
| alias selfTemplatePipe = Args;
| else
| {
| alias selfTemplatePipe = Template!(.selfTemplatePipe!(Template, N - 1, Args));
| }
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| // import mir.ndslice.topology: map;
| alias map(alias fun) = a => a; // some template
| static assert (__traits(isSame, selfTemplatePipe!(map, 2, "a * 2"), map!(pipe!("a * 2", map!"a * 2"))));
|}
../../../.dub/packages/mir-core-1.1.51/mir-core/source/mir/functional.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-math-sum.lst
|/++
|This module contains summation algorithms.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|
|Authors: Ilya Yaroshenko
|
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|+/
|module mir.math.sum;
|
|///
|version(mir_test)
|unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.topology: map;
| auto ar = [1, 1e100, 1, -1e100].sliced.map!"a * 10_000";
| const r = 20_000;
| assert(r == ar.sum!"kbn");
| assert(r == ar.sum!"kb2");
| assert(r == ar.sum!"precise");
|}
|
|///
|version(mir_test)
|unittest
|{
| import mir.ndslice.slice: sliced, slicedField;
| import mir.ndslice.topology: map, iota, retro;
| import mir.ndslice.concatenation: concatenation;
| import mir.math.common;
| auto ar = 1000
| .iota
| .map!(n => 1.7L.pow(n+1) - 1.7L.pow(n))
| ;
| real d = 1.7L.pow(1000);
| assert(sum!"precise"(concatenation(ar, [-d].sliced).slicedField) == -1);
| assert(sum!"precise"(ar.retro, -d) == -1);
|}
|
|/++
|`Naive`, `Pairwise` and `Kahan` algorithms can be used for user defined types.
|+/
|version(mir_test)
|unittest
|{
| import mir.internal.utility: isFloatingPoint;
| static struct Quaternion(F)
| if (isFloatingPoint!F)
| {
| F[4] rijk;
|
| /// + and - operator overloading
| Quaternion opBinary(string op)(auto ref const Quaternion rhs) const
| if (op == "+" || op == "-")
| {
| Quaternion ret ;
| foreach (i, ref e; ret.rijk)
| mixin("e = rijk[i] "~op~" rhs.rijk[i];");
| return ret;
| }
|
| /// += and -= operator overloading
| Quaternion opOpAssign(string op)(auto ref const Quaternion rhs)
| if (op == "+" || op == "-")
| {
| foreach (i, ref e; rijk)
| mixin("e "~op~"= rhs.rijk[i];");
| return this;
| }
|
| ///constructor with single FP argument
| this(F f)
| {
| rijk[] = f;
| }
|
| ///assigment with single FP argument
| void opAssign(F f)
| {
| rijk[] = f;
| }
| }
|
| Quaternion!double q, p, r;
| q.rijk = [0, 1, 2, 4];
| p.rijk = [3, 4, 5, 9];
| r.rijk = [3, 5, 7, 13];
|
| assert(r == [p, q].sum!"naive");
| assert(r == [p, q].sum!"pairwise");
| assert(r == [p, q].sum!"kahan");
|}
|
|/++
|All summation algorithms available for complex numbers.
|+/
|version(mir_test)
|unittest
|{
| cdouble[] ar = [1.0 + 2i, 2 + 3i, 3 + 4i, 4 + 5i];
| cdouble r = 10 + 14i;
| assert(r == ar.sum!"fast");
| assert(r == ar.sum!"naive");
| assert(r == ar.sum!"pairwise");
| assert(r == ar.sum!"kahan");
| version(LDC) // DMD Internal error: backend/cgxmm.c 628
| {
| assert(r == ar.sum!"kbn");
| assert(r == ar.sum!"kb2");
| }
| assert(r == ar.sum!"precise");
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.topology: repeat, iota;
|
| //simple integral summation
| assert(sum([ 1, 2, 3, 4]) == 10);
|
| //with initial value
| assert(sum([ 1, 2, 3, 4], 5) == 15);
|
| //with integral promotion
| assert(sum([false, true, true, false, true]) == 3);
| assert(sum(ubyte.max.repeat(100)) == 25_500);
|
| //The result may overflow
| assert(uint.max.repeat(3).sum == 4_294_967_293U );
| //But a seed can be used to change the summation primitive
| assert(uint.max.repeat(3).sum(ulong.init) == 12_884_901_885UL);
|
| //Floating point summation
| assert(sum([1.0, 2.0, 3.0, 4.0]) == 10);
|
| //Type overriding
| static assert(is(typeof(sum!double([1F, 2F, 3F, 4F])) == double));
| static assert(is(typeof(sum!double([1F, 2F, 3F, 4F], 5F)) == double));
| assert(sum([1F, 2, 3, 4]) == 10);
| assert(sum([1F, 2, 3, 4], 5F) == 15);
|
| //Force pair-wise floating point summation on large integers
| import mir.math : approxEqual;
| assert(iota!long([4096], uint.max / 2).sum(0.0)
| .approxEqual((uint.max / 2) * 4096.0 + 4096.0 * 4096.0 / 2));
|}
|
|/// Precise summation
|version(mir_test)
|nothrow @nogc unittest
|{
| import mir.ndslice.topology: iota, map;
| import core.stdc.tgmath: pow;
| assert(iota(1000).map!(n => 1.7L.pow(real(n)+1) - 1.7L.pow(real(n)))
| .sum!"precise" == -1 + 1.7L.pow(1000.0L));
|}
|
|/// Precise summation with output range
|version(mir_test)
|nothrow @nogc unittest
|{
| import mir.ndslice.topology: iota, map;
| import mir.math.common;
| auto r = iota(1000).map!(n => 1.7L.pow(n+1) - 1.7L.pow(n));
| Summator!(real, Summation.precise) s = 0.0;
| s.put(r);
| s -= 1.7L.pow(1000);
| assert(s.sum == -1);
|}
|
|/// Precise summation with output range
|version(mir_test)
|nothrow @nogc unittest
|{
| import mir.math.common;
| float M = 2.0f ^^ (float.max_exp-1);
| double N = 2.0 ^^ (float.max_exp-1);
| auto s = Summator!(float, Summation.precise)(0);
| s += M;
| s += M;
| assert(float.infinity == s.sum); //infinity
| auto e = cast(Summator!(double, Summation.precise)) s;
| assert(e.sum < double.infinity);
| assert(N+N == e.sum()); //finite number
|}
|
|/// Moving mean
|version(mir_test)
|@safe pure nothrow @nogc
|unittest
|{
| import mir.internal.utility: isFloatingPoint;
| import mir.math.sum;
| import mir.ndslice.topology: linspace;
| import mir.rc.array: rcarray;
|
| struct MovingAverage(T)
| if (isFloatingPoint!T)
| {
| import mir.math.stat: MeanAccumulator;
|
| MeanAccumulator!(T, Summation.precise) meanAccumulator;
| double[] circularBuffer;
| size_t frontIndex;
|
| @disable this(this);
|
| auto avg() @property const
| {
| return meanAccumulator.mean;
| }
|
| this(double[] buffer)
| {
| assert(buffer.length);
| circularBuffer = buffer;
| meanAccumulator.put(buffer);
| }
|
| ///operation without rounding
| void put(T x)
| {
| import mir.utility: swap;
| meanAccumulator.summator += x;
| swap(circularBuffer[frontIndex++], x);
| frontIndex = frontIndex == circularBuffer.length ? 0 : frontIndex;
| meanAccumulator.summator -= x;
| }
| }
|
| /// ma always keeps precise average of last 1000 elements
| auto x = linspace!double([1000], [0.0, 999]).rcarray;
| auto ma = MovingAverage!double(x[]);
| assert(ma.avg == (1000 * 999 / 2) / 1000.0);
| /// move by 10 elements
| foreach(e; linspace!double([10], [1000.0, 1009.0]))
| ma.put(e);
| assert(ma.avg == (1010 * 1009 / 2 - 10 * 9 / 2) / 1000.0);
|}
|
|/// Arbitrary sum
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| assert(sum(1, 2, 3, 4) == 10);
| assert(sum!float(1, 2, 3, 4) == 10f);
| assert(sum(1f, 2, 3, 4) == 10f);
| assert(sum(1.0 + 2i, 2 + 3i, 3 + 4i, 4 + 5i) == (10 + 14i));
|}
|
|version(X86)
| version = X86_Any;
|version(X86_64)
| version = X86_Any;
|
|/++
|SIMD Vectors
|Bugs: ICE 1662 (dmd only)
|+/
|version(LDC)
|version(X86_Any)
|version(mir_test)
|unittest
|{
| import core.simd;
| import std.meta : AliasSeq;
| double2 a = 1, b = 2, c = 3, d = 6;
| with(Summation)
| {
| foreach (algo; AliasSeq!(naive, fast, pairwise, kahan))
| {
| assert([a, b, c].sum!algo.array == d.array);
| assert([a, b].sum!algo(c).array == d.array);
| }
| }
|}
|
|import std.traits;
|private alias AliasSeq(T...) = T;
|import mir.internal.utility: Iota, isComplex;
|import mir.math.common: fabs;
|
|private alias isNaN = x => x != x;
|private alias isFinite = x => x.fabs < x.infinity;
|private alias isInfinity = x => x.fabs == x.infinity;
|
|
|private template chainSeq(size_t n)
|{
| static if (n)
| alias chainSeq = AliasSeq!(n, chainSeq!(n / 2));
| else
| alias chainSeq = AliasSeq!();
|}
|
|/++
|Summation algorithms.
|+/
|enum Summation
|{
| /++
| Performs `pairwise` summation for floating point based types and `fast` summation for integral based types.
| +/
| appropriate,
|
| /++
| $(WEB en.wikipedia.org/wiki/Pairwise_summation, Pairwise summation) algorithm.
| +/
| pairwise,
|
| /++
| Precise summation algorithm.
| The value of the sum is rounded to the nearest representable
| floating-point number using the $(LUCKY round-half-to-even rule).
| The result can differ from the exact value on `X86`, `nextDown(proir) <= result && result <= nextUp(proir)`.
| The current implementation re-establish special value semantics across iterations (i.e. handling ±inf).
|
| References: $(LINK2 http://www.cs.cmu.edu/afs/cs/project/quake/public/papers/robust-arithmetic.ps,
| "Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric Predicates", Jonathan Richard Shewchuk),
| $(LINK2 http://bugs.python.org/file10357/msum4.py, Mark Dickinson's post at bugs.python.org).
| +/
|
| /+
| Precise summation function as msum() by Raymond Hettinger in
| ,
| enhanced with the exact partials sum and roundoff from Mark
| Dickinson's post at .
| See those links for more details, proofs and other references.
| IEEE 754R floating point semantics are assumed.
| +/
| precise,
|
| /++
| $(WEB en.wikipedia.org/wiki/Kahan_summation, Kahan summation) algorithm.
| +/
| /+
| ---------------------
| s := x[1]
| c := 0
| FOR k := 2 TO n DO
| y := x[k] - c
| t := s + y
| c := (t - s) - y
| s := t
| END DO
| ---------------------
| +/
| kahan,
|
| /++
| $(LUCKY Kahan-Babuška-Neumaier summation algorithm). `KBN` gives more accurate results then `Kahan`.
| +/
| /+
| ---------------------
| s := x[1]
| c := 0
| FOR i := 2 TO n DO
| t := s + x[i]
| IF ABS(s) >= ABS(x[i]) THEN
| c := c + ((s-t)+x[i])
| ELSE
| c := c + ((x[i]-t)+s)
| END IF
| s := t
| END DO
| s := s + c
| ---------------------
| +/
| kbn,
|
| /++
| $(LUCKY Generalized Kahan-Babuška summation algorithm), order 2. `KB2` gives more accurate results then `Kahan` and `KBN`.
| +/
| /+
| ---------------------
| s := 0 ; cs := 0 ; ccs := 0
| FOR j := 1 TO n DO
| t := s + x[i]
| IF ABS(s) >= ABS(x[i]) THEN
| c := (s-t) + x[i]
| ELSE
| c := (x[i]-t) + s
| END IF
| s := t
| t := cs + c
| IF ABS(cs) >= ABS(c) THEN
| cc := (cs-t) + c
| ELSE
| cc := (c-t) + cs
| END IF
| cs := t
| ccs := ccs + cc
| END FOR
| RETURN s+cs+ccs
| ---------------------
| +/
| kb2,
|
| /++
| Naive algorithm (one by one).
| +/
| naive,
|
| /++
| SIMD optimized summation algorithm.
| +/
| fast,
|}
|
|/++
|Output range for summation.
|+/
|struct Summator(T, Summation summation)
| if (isMutable!T)
|{
| static if (is(T == class) || is(T == interface) || hasElaborateAssign!T)
| static assert (summation == Summation.naive,
| "Classes, interfaces, and structures with "
| ~ "elaborate constructor support only naive summation.");
|
| static if (summation == Summation.fast)
| {
| version (LDC)
| {
| import ldc.attributes: fastmath;
| alias attr = fastmath;
| }
| else
| {
| alias attr = AliasSeq!();
| }
| }
| else
| {
| alias attr = AliasSeq!();
| }
|
| @attr:
|
| static if (summation == Summation.pairwise)
| private enum bool fastPairwise =
| is(F == float) ||
| is(F == double) ||
| is(F == cfloat) ||
| is(F == cdouble) ||
| is(F : __vector(W[N]), W, size_t N);
| //false;
|
| alias F = T;
|
| static if (summation == Summation.precise)
| {
| import std.internal.scopebuffer;
| import mir.appender;
| import mir.math.ieee: signbit;
| private:
| enum F M = (cast(F)(2)) ^^ (T.max_exp - 1);
| ScopedBuffer!(F, 16) partials;
| //sum for NaN and infinity.
| F s = summationInitValue!F;
| //Overflow Degree. Count of 2^^F.max_exp minus count of -(2^^F.max_exp)
| sizediff_t o;
|
|
| /++
| Compute the sum of a list of nonoverlapping floats.
| On input, partials is a list of nonzero, nonspecial,
| nonoverlapping floats, strictly increasing in magnitude, but
| possibly not all having the same sign.
| On output, the sum of partials gives the error in the returned
| result, which is correctly rounded (using the round-half-to-even
| rule).
| Two floating point values x and y are non-overlapping if the least significant nonzero
| bit of x is more significant than the most significant nonzero bit of y, or vice-versa.
| +/
| static F partialsReduce(F s, in F[] partials)
| in
| {
| debug(numeric) assert(!partials.length || .isFinite(s));
| }
| do
| {
| bool _break;
| foreach_reverse (i, y; partials)
| {
| s = partialsReducePred(s, y, i ? partials[i-1] : 0, _break);
| if (_break)
| break;
| debug(numeric) assert(.isFinite(s));
| }
| return s;
| }
|
| static F partialsReducePred(F s, F y, F z, out bool _break)
| out(result)
| {
| debug(numeric) assert(.isFinite(result));
| }
| do
| {
| F x = s;
| s = x + y;
| F d = s - x;
| F l = y - d;
| debug(numeric)
| {
| assert(.isFinite(x));
| assert(.isFinite(y));
| assert(.isFinite(s));
| assert(fabs(y) < fabs(x));
| }
| if (l)
| {
| //Make half-even rounding work across multiple partials.
| //Needed so that sum([1e-16, 1, 1e16]) will round-up the last
| //digit to two instead of down to zero (the 1e-16 makes the 1
| //slightly closer to two). Can guarantee commutativity.
| if (z && !signbit(l * z))
| {
| l *= 2;
| x = s + l;
| F t = x - s;
| if (l == t)
| s = x;
| }
| _break = true;
| }
| return s;
| }
|
| //Returns corresponding infinity if is overflow and 0 otherwise.
| F overflow()() const
| {
| if (o == 0)
| return 0;
| if (partials.length && (o == -1 || o == 1) && signbit(o * partials.data[$-1]))
| {
| // problem case: decide whether result is representable
| F x = o * M;
| F y = partials.data[$-1] / 2;
| F h = x + y;
| F d = h - x;
| F l = (y - d) * 2;
| y = h * 2;
| d = h + l;
| F t = d - h;
| version(X86)
| {
| if (!.isInfinity(cast(T)y) || !.isInfinity(sum()))
| return 0;
| }
| else
| {
| if (!.isInfinity(cast(T)y) ||
| ((partials.length > 1 && !signbit(l * partials.data[$-2])) && t == l))
| return 0;
| }
| }
| return F.infinity * o;
| }
| }
| else
| static if (summation == Summation.kb2)
| {
| F s = summationInitValue!F;
| F cs = summationInitValue!F;
| F ccs = summationInitValue!F;
| }
| else
| static if (summation == Summation.kbn)
| {
| F s = summationInitValue!F;
| F c = summationInitValue!F;
| }
| else
| static if (summation == Summation.kahan)
| {
| F s = summationInitValue!F;
| F c = summationInitValue!F;
| F y = summationInitValue!F; // do not declare in the loop/put (algo can be used for matrixes and etc)
| F t = summationInitValue!F; // ditto
| }
| else
| static if (summation == Summation.pairwise)
| {
| package size_t counter;
| size_t index;
| static if (fastPairwise)
| {
| enum registersCount= 16;
| F[size_t.sizeof * 8] partials;
| }
| else
| {
| F[size_t.sizeof * 8] partials;
| }
| }
| else
| static if (summation == Summation.naive)
| {
| F s = summationInitValue!F;
| }
| else
| static if (summation == Summation.fast)
| {
| F s = summationInitValue!F;
| }
| else
| static assert(0, "Unsupported summation type for std.numeric.Summator.");
|
|
|public:
|
| ///
| this()(T n)
| {
| static if (summation == Summation.precise)
| {
| s = 0.0;
| o = 0;
| if (n) put(n);
| }
| else
| static if (summation == Summation.kb2)
| {
| s = n;
| static if (isComplex!T)
| {
| cs = 0 + 0fi;
| ccs = 0 + 0fi;
| }
| else
| {
| cs = 0.0;
| ccs = 0.0;
| }
| }
| else
| static if (summation == Summation.kbn)
| {
| s = n;
| static if (isComplex!T)
| c = 0 + 0fi;
| else
| c = 0.0;
| }
| else
| static if (summation == Summation.kahan)
| {
| s = n;
| static if (isComplex!T)
| c = 0 + 0fi;
| else
| c = 0.0;
| }
| else
| static if (summation == Summation.pairwise)
| {
| counter = index = 1;
| partials[0] = n;
| }
| else
| static if (summation == Summation.naive)
| {
| s = n;
| }
| else
| static if (summation == Summation.fast)
| {
| s = n;
| }
| else
| static assert(0);
| }
|
| ///Adds `n` to the internal partial sums.
| void put(N)(N n)
| if (__traits(compiles, {T a = n; a = n; a += n;}))
| {
| static if (isCompesatorAlgorithm!summation)
| F x = n;
| static if (summation == Summation.precise)
| {
| if (.isFinite(x))
| {
| size_t i;
| auto partials_data = partials.data;
| foreach (y; partials_data[])
| {
| F h = x + y;
| if (.isInfinity(cast(T)h))
| {
| if (fabs(x) < fabs(y))
| {
| F t = x; x = y; y = t;
| }
| //h == -F.infinity
| if (signbit(h))
| {
| x += M;
| x += M;
| o--;
| }
| //h == +F.infinity
| else
| {
| x -= M;
| x -= M;
| o++;
| }
| debug(numeric) assert(x.isFinite);
| h = x + y;
| }
| debug(numeric) assert(h.isFinite);
| F l;
| if (fabs(x) < fabs(y))
| {
| F t = h - y;
| l = x - t;
| }
| else
| {
| F t = h - x;
| l = y - t;
| }
| debug(numeric) assert(l.isFinite);
| if (l)
| {
| partials_data[i++] = l;
| }
| x = h;
| }
| partials.shrinkTo(i);
| if (x)
| {
| partials.put(x);
| }
| }
| else
| {
| s += x;
| }
| }
| else
| static if (summation == Summation.kb2)
| {
| static if (isFloatingPoint!F)
| {
| F t = s + x;
| F c = 0;
| if (fabs(s) >= fabs(x))
| {
| F d = s - t;
| c = d + x;
| }
| else
| {
| F d = x - t;
| c = d + s;
| }
| s = t;
| t = cs + c;
| if (fabs(cs) >= fabs(c))
| {
| F d = cs - t;
| d += c;
| ccs += d;
| }
| else
| {
| F d = c - t;
| d += cs;
| ccs += d;
| }
| cs = t;
| }
| else
| {
| F t = s + x;
| if (fabs(s.re) < fabs(x.re))
| {
| auto s_re = s.re;
| auto x_re = x.re;
| s = x_re + s.im * 1fi;
| x = s_re + x.im * 1fi;
| }
| if (fabs(s.im) < fabs(x.im))
| {
| auto s_im = s.im;
| auto x_im = x.im;
| s = s.re + x_im * 1fi;
| x = x.re + s_im * 1fi;
| }
| F c = (s-t)+x;
| s = t;
| if (fabs(cs.re) < fabs(c.re))
| {
| auto c_re = c.re;
| auto cs_re = cs.re;
| c = cs_re + c.im * 1fi;
| cs = c_re + cs.im * 1fi;
| }
| if (fabs(cs.im) < fabs(c.im))
| {
| auto c_im = c.im;
| auto cs_im = cs.im;
| c = c.re + cs_im * 1fi;
| cs = cs.re + c_im * 1fi;
| }
| F d = cs - t;
| d += c;
| ccs += d;
| cs = t;
| }
| }
| else
| static if (summation == Summation.kbn)
| {
| static if (isFloatingPoint!F)
| {
| F t = s + x;
| if (fabs(s) >= fabs(x))
| {
| F d = s - t;
| d += x;
| c += d;
| }
| else
| {
| F d = x - t;
| d += s;
| c += d;
| }
| s = t;
| }
| else
| {
| F t = s + x;
| if (fabs(s.re) < fabs(x.re))
| {
| auto s_re = s.re;
| auto x_re = x.re;
| s = x_re + s.im * 1fi;
| x = s_re + x.im * 1fi;
| }
| if (fabs(s.im) < fabs(x.im))
| {
| auto s_im = s.im;
| auto x_im = x.im;
| s = s.re + x_im * 1fi;
| x = x.re + s_im * 1fi;
| }
| F d = s - t;
| d += x;
| c += d;
| s = t;
| }
| }
| else
| static if (summation == Summation.kahan)
| {
| y = x - c;
| t = s + y;
| c = t - s;
| c -= y;
| s = t;
| }
| else
| static if (summation == Summation.pairwise)
| {
| import mir.bitop: cttz;
| ++counter;
| partials[index] = n;
| foreach (_; 0 .. cttz(counter))
| {
| immutable newIndex = index - 1;
| partials[newIndex] += partials[index];
| index = newIndex;
| }
| ++index;
| }
| else
| static if (summation == Summation.naive)
| {
| s += n;
| }
| else
| static if (summation == Summation.fast)
| {
| s += n;
| }
| else
| static assert(0);
| }
|
| ///ditto
| void put(Range)(Range r)
| if (isIterable!Range)
| {
| static if (summation == Summation.pairwise && fastPairwise && isDynamicArray!Range)
| {
| F[registersCount] v;
| foreach (i, n; chainSeq!registersCount)
| {
| if (r.length >= n * 2) do
| {
| foreach (j; Iota!n)
| v[j] = cast(F) r[j];
| foreach (j; Iota!n)
| v[j] += cast(F) r[n + j];
| foreach (m; chainSeq!(n / 2))
| foreach (j; Iota!m)
| v[j] += v[m + j];
| put(v[0]);
| r = r[n * 2 .. $];
| }
| while (!i && r.length >= n * 2);
| }
| if (r.length)
| {
| put(cast(F) r[0]);
| r = r[1 .. $];
| }
| assert(r.length == 0);
| }
| else
| static if (summation == Summation.fast)
| {
| static if (isComplex!T)
| F s0 = 0 + 0fi;
| else
| F s0 = 0;
| foreach (ref elem; r)
| s0 += elem;
| s += s0;
| }
| else
| {
| foreach (ref elem; r)
| put(elem);
| }
| }
|
| import mir.ndslice.slice;
|
| /// ditto
| void put(Range: Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind)(Range r)
| {
| static if (N > 1 && kind == Contiguous)
| {
| import mir.ndslice.topology: flattened;
| this.put(r.flattened);
| }
| else
| static if (isPointer!Iterator && kind == Contiguous)
| {
| this.put(r.field);
| }
| else
| static if (summation == Summation.fast && N == 1)
| {
| static if (isComplex!T)
| F s0 = 0 + 0fi;
| else
| F s0 = 0;
| import mir.algorithm.iteration: reduce;
| s0 = s0.reduce!"a + b"(r);
| s += s0;
| }
| else
| {
| foreach(elem; r)
| this.put(elem);
| }
| }
|
| /+
| Adds `x` to the internal partial sums.
| This operation doesn't re-establish special
| value semantics across iterations (i.e. handling ±inf).
| Preconditions: `isFinite(x)`.
| +/
| version(none)
| static if (summation == Summation.precise)
| package void unsafePut()(F x)
| in {
| assert(.isFinite(x));
| }
| do {
| size_t i;
| foreach (y; partials.data[])
| {
| F h = x + y;
| debug(numeric) assert(.isFinite(h));
| F l;
| if (fabs(x) < fabs(y))
| {
| F t = h - y;
| l = x - t;
| }
| else
| {
| F t = h - x;
| l = y - t;
| }
| debug(numeric) assert(.isFinite(l));
| if (l)
| {
| partials.data[i++] = l;
| }
| x = h;
| }
| partials.length = i;
| if (x)
| {
| partials.put(x);
| }
| }
|
| ///Returns the value of the sum.
| T sum()() scope const
| {
| /++
| Returns the value of the sum, rounded to the nearest representable
| floating-point number using the round-half-to-even rule.
| The result can differ from the exact value on `X86`, `nextDown`proir) <= result && result <= nextUp(proir)).
| +/
| static if (summation == Summation.precise)
| {
| debug(mir_sum)
| {
| foreach (y; partials.data[])
| {
| assert(y);
| assert(y.isFinite);
| }
| //TODO: Add Non-Overlapping check to std.math
| import mir.ndslice.slice: sliced;
| import mir.ndslice.sorting: isSorted;
| import mir.ndslice.topology: map;
| assert(partials.data[].sliced.map!fabs.isSorted);
| }
|
| if (s)
| return s;
| auto parts = partials.data[];
| F y = 0.0;
| //pick last
| if (parts.length)
| {
| y = parts[$-1];
| parts = parts[0..$-1];
| }
| if (o)
| {
| immutable F of = o;
| if (y && (o == -1 || o == 1) && signbit(of * y))
| {
| // problem case: decide whether result is representable
| y /= 2;
| F x = of * M;
| immutable F h = x + y;
| F t = h - x;
| F l = (y - t) * 2;
| y = h * 2;
| if (.isInfinity(cast(T)y))
| {
| // overflow, except in edge case...
| x = h + l;
| t = x - h;
| y = parts.length && t == l && !signbit(l*parts[$-1]) ?
| x * 2 :
| F.infinity * of;
| parts = null;
| }
| else if (l)
| {
| bool _break;
| y = partialsReducePred(y, l, parts.length ? parts[$-1] : 0, _break);
| if (_break)
| parts = null;
| }
| }
| else
| {
| y = F.infinity * of;
| parts = null;
| }
| }
| return partialsReduce(y, parts);
| }
| else
| static if (summation == Summation.kb2)
| {
| return s + (cs + ccs);
| }
| else
| static if (summation == Summation.kbn)
| {
| return s + c;
| }
| else
| static if (summation == Summation.kahan)
| {
| return s;
| }
| else
| static if (summation == Summation.pairwise)
| {
| F s = summationInitValue!T;
| assert((counter == 0) == (index == 0));
| foreach_reverse (ref e; partials[0 .. index])
| {
| static if (is(F : __vector(W[N]), W, size_t N))
| s += cast(Unqual!F) e; //DMD bug workaround
| else
| s += e;
| }
| return s;
| }
| else
| static if (summation == Summation.naive)
| {
| return s;
| }
| else
| static if (summation == Summation.fast)
| {
| return s;
| }
| else
| static assert(0);
| }
|
| version(none)
| static if (summation == Summation.precise)
| F partialsSum()() const
| {
| debug(numeric) partialsDebug;
| auto parts = partials.data[];
| F y = 0.0;
| //pick last
| if (parts.length)
| {
| y = parts[$-1];
| parts = parts[0..$-1];
| }
| return partialsReduce(y, parts);
| }
|
| ///Returns `Summator` with extended internal partial sums.
| C opCast(C : Summator!(P, _summation), P, Summation _summation)() const
| if (
| _summation == summation &&
| isMutable!C &&
| P.max_exp >= T.max_exp &&
| P.mant_dig >= T.mant_dig
| )
| {
| static if (is(P == T))
| return this;
| else
| static if (summation == Summation.precise)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| ret.o = o;
| foreach (p; partials.data[])
| {
| ret.partials.put(p);
| }
| enum exp_diff = P.max_exp / T.max_exp;
| static if (exp_diff)
| {
| if (ret.o)
| {
| immutable f = ret.o / exp_diff;
| immutable t = cast(int)(ret.o % exp_diff);
| ret.o = f;
| ret.put((P(2) ^^ T.max_exp) * t);
| }
| }
| return ret;
| }
| else
| static if (summation == Summation.kb2)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| ret.cs = cs;
| ret.ccs = ccs;
| return ret;
| }
| else
| static if (summation == Summation.kbn)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| ret.c = c;
| return ret;
| }
| else
| static if (summation == Summation.kahan)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| ret.c = c;
| return ret;
| }
| else
| static if (summation == Summation.pairwise)
| {
| auto ret = typeof(return).init;
| ret.counter = counter;
| ret.index = index;
| foreach (i; 0 .. index)
| ret.partials[i] = partials[i];
| return ret;
| }
| else
| static if (summation == Summation.naive)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| return ret;
| }
| else
| static if (summation == Summation.fast)
| {
| auto ret = typeof(return).init;
| ret.s = s;
| return ret;
| }
| else
| static assert(0);
| }
|
| /++
| `cast(C)` operator overloading. Returns `cast(C)sum()`.
| See also: `cast`
| +/
| C opCast(C)() const if (is(Unqual!C == T))
| {
| return cast(C)sum();
| }
|
| ///Operator overloading.
| // opAssign should initialize partials.
| void opAssign(T rhs)
| {
| static if (summation == Summation.precise)
| {
| partials.reset;
| s = 0.0;
| o = 0;
| if (rhs) put(rhs);
| }
| else
| static if (summation == Summation.kb2)
| {
| s = rhs;
| static if (isComplex!T)
| {
| cs = 0 + 0fi;
| ccs = 0 + 0fi;
| }
| else
| {
| cs = 0.0;
| ccs = 0.0;
| }
| }
| else
| static if (summation == Summation.kbn)
| {
| s = rhs;
| static if (isComplex!T)
| c = 0 + 0fi;
| else
| c = 0.0;
| }
| else
| static if (summation == Summation.kahan)
| {
| s = rhs;
| static if (isComplex!T)
| c = 0 + 0fi;
| else
| c = 0.0;
| }
| else
| static if (summation == Summation.pairwise)
| {
| counter = 1;
| index = 1;
| partials[0] = rhs;
| }
| else
| static if (summation == Summation.naive)
| {
| s = rhs;
| }
| else
| static if (summation == Summation.fast)
| {
| s = rhs;
| }
| else
| static assert(0);
| }
|
| ///ditto
| void opOpAssign(string op : "+")(T rhs)
| {
| put(rhs);
| }
|
| ///ditto
| void opOpAssign(string op : "+")(ref const Summator rhs)
| {
| static if (summation == Summation.precise)
| {
| s += rhs.s;
| o += rhs.o;
| foreach (f; rhs.partials.data[])
| put(f);
| }
| else
| static if (summation == Summation.kb2)
| {
| put(rhs.ccs);
| put(rhs.cs);
| put(rhs.s);
| }
| else
| static if (summation == Summation.kbn)
| {
| put(rhs.c);
| put(rhs.s);
| }
| else
| static if (summation == Summation.kahan)
| {
| put(rhs.s);
| }
| else
| static if (summation == Summation.pairwise)
| {
| foreach_reverse (e; rhs.partials[0 .. rhs.index])
| put(e);
| counter -= rhs.index;
| counter += rhs.counter;
| }
| else
| static if (summation == Summation.naive)
| {
| put(rhs.s);
| }
| else
| static if (summation == Summation.fast)
| {
| put(rhs.s);
| }
| else
| static assert(0);
| }
|
| ///ditto
| void opOpAssign(string op : "-")(T rhs)
| {
| static if (summation == Summation.precise)
| {
| put(-rhs);
| }
| else
| static if (summation == Summation.kb2)
| {
| put(-rhs);
| }
| else
| static if (summation == Summation.kbn)
| {
| put(-rhs);
| }
| else
| static if (summation == Summation.kahan)
| {
| y = 0.0;
| y -= rhs;
| y -= c;
| t = s + y;
| c = t - s;
| c -= y;
| s = t;
| }
| else
| static if (summation == Summation.pairwise)
| {
| put(-rhs);
| }
| else
| static if (summation == Summation.naive)
| {
| s -= rhs;
| }
| else
| static if (summation == Summation.fast)
| {
| s -= rhs;
| }
| else
| static assert(0);
| }
|
| ///ditto
| void opOpAssign(string op : "-")(ref const Summator rhs)
| {
| static if (summation == Summation.precise)
| {
| s -= rhs.s;
| o -= rhs.o;
| foreach (f; rhs.partials.data[])
| put(-f);
| }
| else
| static if (summation == Summation.kb2)
| {
| put(-rhs.ccs);
| put(-rhs.cs);
| put(-rhs.s);
| }
| else
| static if (summation == Summation.kbn)
| {
| put(-rhs.c);
| put(-rhs.s);
| }
| else
| static if (summation == Summation.kahan)
| {
| this -= rhs.s;
| }
| else
| static if (summation == Summation.pairwise)
| {
| foreach_reverse (e; rhs.partials[0 .. rhs.index])
| put(-e);
| counter -= rhs.index;
| counter += rhs.counter;
| }
| else
| static if (summation == Summation.naive)
| {
| s -= rhs.s;
| }
| else
| static if (summation == Summation.fast)
| {
| s -= rhs.s;
| }
| else
| static assert(0);
| }
|
| ///
|
| version(mir_test)
| @nogc nothrow unittest
| {
| import mir.math.common;
| import mir.ndslice.topology: iota, map;
| auto r1 = iota(500).map!(a => 1.7L.pow(a+1) - 1.7L.pow(a));
| auto r2 = iota([500], 500).map!(a => 1.7L.pow(a+1) - 1.7L.pow(a));
| Summator!(real, Summation.precise) s1 = 0, s2 = 0.0;
| foreach (e; r1) s1 += e;
| foreach (e; r2) s2 -= e;
| s1 -= s2;
| s1 -= 1.7L.pow(1000);
| assert(s1.sum == -1);
| }
|
|
| version(mir_test)
| @nogc nothrow unittest
| {
| with(Summation)
| foreach (summation; AliasSeq!(kahan, kbn, kb2, precise, pairwise))
| foreach (T; AliasSeq!(float, double, real))
| {
| Summator!(T, summation) sum = 1;
| sum += 3;
| assert(sum.sum == 4);
| sum -= 10;
| assert(sum.sum == -6);
| Summator!(T, summation) sum2 = 3;
| sum -= sum2;
| assert(sum.sum == -9);
| sum2 = 100;
| sum += 100;
| assert(sum.sum == 91);
| auto sum3 = cast(Summator!(real, summation))sum;
| assert(sum3.sum == 91);
| sum = sum2;
| }
| }
|
|
| version(mir_test)
| @nogc nothrow unittest
| {
| import mir.math.common: approxEqual;
| with(Summation)
| foreach (summation; AliasSeq!(naive, fast))
| foreach (T; AliasSeq!(float, double, real))
| {
| Summator!(T, summation) sum = 1;
| sum += 3.5;
| assert(sum.sum.approxEqual(4.5));
| sum = 2;
| assert(sum.sum == 2);
| sum -= 4;
| assert(sum.sum.approxEqual(-2));
| }
| }
|
| static if (summation == Summation.precise)
| {
| ///Returns `true` if current sum is a NaN.
| bool isNaN()() const
| {
| return .isNaN(s);
| }
|
| ///Returns `true` if current sum is finite (not infinite or NaN).
| bool isFinite()() const
| {
| if (s)
| return false;
| return !overflow;
| }
|
| ///Returns `true` if current sum is ±∞.
| bool isInfinity()() const
| {
| return .isInfinity(s) || overflow();
| }
| }
| else static if (isFloatingPoint!F)
| {
| ///Returns `true` if current sum is a NaN.
| bool isNaN()() const
| {
| return .isNaN(sum());
| }
|
| ///Returns `true` if current sum is finite (not infinite or NaN).
| bool isFinite()() const
| {
| return .isFinite(sum());
| }
|
| ///Returns `true` if current sum is ±∞.
| bool isInfinity()() const
| {
| return .isInfinity(sum());
| }
| }
| else
| {
| //User defined types
| }
|}
|
|version(mir_test)
|unittest
|{
| import mir.functional: RefTuple, refTuple;
| import mir.ndslice.topology: map, iota, retro;
| import mir.array.allocation: array;
| import std.math: isInfinity, isFinite, isNaN;
|
| Summator!(double, Summation.precise) summator = 0.0;
|
| enum double M = (cast(double)2) ^^ (double.max_exp - 1);
| RefTuple!(double[], double)[] tests = [
| refTuple(new double[0], 0.0),
| refTuple([0.0], 0.0),
| refTuple([1e100, 1.0, -1e100, 1e-100, 1e50, -1, -1e50], 1e-100),
| refTuple([1e308, 1e308, -1e308], 1e308),
| refTuple([-1e308, 1e308, 1e308], 1e308),
| refTuple([1e308, -1e308, 1e308], 1e308),
| refTuple([M, M, -2.0^^1000], 1.7976930277114552e+308),
| refTuple([M, M, M, M, -M, -M, -M], 8.9884656743115795e+307),
| refTuple([2.0^^53, -0.5, -2.0^^-54], 2.0^^53-1.0),
| refTuple([2.0^^53, 1.0, 2.0^^-100], 2.0^^53+2.0),
| refTuple([2.0^^53+10.0, 1.0, 2.0^^-100], 2.0^^53+12.0),
| refTuple([2.0^^53-4.0, 0.5, 2.0^^-54], 2.0^^53-3.0),
| refTuple([M-2.0^^970, -1, M], 1.7976931348623157e+308),
| refTuple([double.max, double.max*2.^^-54], double.max),
| refTuple([double.max, double.max*2.^^-53], double.infinity),
| refTuple(iota([1000], 1).map!(a => 1.0/a).array , 7.4854708605503451),
| refTuple(iota([1000], 1).map!(a => (-1.0)^^a/a).array, -0.69264743055982025), //0.693147180559945309417232121458176568075500134360255254120680...
| refTuple(iota([1000], 1).map!(a => 1.0/a).retro.array , 7.4854708605503451),
| refTuple(iota([1000], 1).map!(a => (-1.0)^^a/a).retro.array, -0.69264743055982025),
| refTuple([double.infinity, -double.infinity, double.nan], double.nan),
| refTuple([double.nan, double.infinity, -double.infinity], double.nan),
| refTuple([double.infinity, double.nan, double.infinity], double.nan),
| refTuple([double.infinity, double.infinity], double.infinity),
| refTuple([double.infinity, -double.infinity], double.nan),
| refTuple([-double.infinity, 1e308, 1e308, -double.infinity], -double.infinity),
| refTuple([M-2.0^^970, 0.0, M], double.infinity),
| refTuple([M-2.0^^970, 1.0, M], double.infinity),
| refTuple([M, M], double.infinity),
| refTuple([M, M, -1], double.infinity),
| refTuple([M, M, M, M, -M, -M], double.infinity),
| refTuple([M, M, M, M, -M, M], double.infinity),
| refTuple([-M, -M, -M, -M], -double.infinity),
| refTuple([M, M, -2.^^971], double.max),
| refTuple([M, M, -2.^^970], double.infinity),
| refTuple([-2.^^970, M, M, -0X0.0000000000001P-0 * 2.^^-1022], double.max),
| refTuple([M, M, -2.^^970, 0X0.0000000000001P-0 * 2.^^-1022], double.infinity),
| refTuple([-M, 2.^^971, -M], -double.max),
| refTuple([-M, -M, 2.^^970], -double.infinity),
| refTuple([-M, -M, 2.^^970, 0X0.0000000000001P-0 * 2.^^-1022], -double.max),
| refTuple([-0X0.0000000000001P-0 * 2.^^-1022, -M, -M, 2.^^970], -double.infinity),
| refTuple([2.^^930, -2.^^980, M, M, M, -M], 1.7976931348622137e+308),
| refTuple([M, M, -1e307], 1.6976931348623159e+308),
| refTuple([1e16, 1., 1e-16], 10_000_000_000_000_002.0),
| ];
| foreach (i, test; tests)
| {
| summator = 0.0;
| foreach (t; test.a) summator.put(t);
| auto r = test.b;
| auto s = summator.sum;
| assert(summator.isNaN() == r.isNaN());
| assert(summator.isFinite() == r.isFinite());
| assert(summator.isInfinity() == r.isInfinity());
| assert(s == r || s.isNaN && r.isNaN);
| }
|}
|
|/++
|Sums elements of `r`, which must be a finite
|iterable.
|
|A seed may be passed to `sum`. Not only will this seed be used as an initial
|value, but its type will be used if it is not specified.
|
|Note that these specialized summing algorithms execute more primitive operations
|than vanilla summation. Therefore, if in certain cases maximum speed is required
|at expense of precision, one can use $(LREF Summation.fast).
|
|Returns:
| The sum of all the elements in the range r.
|+/
|template sum(F, Summation summation = Summation.appropriate)
| if (isMutable!F)
|{
| ///
| template sum(Range)
| if (isIterable!Range)
| {
| import core.lifetime: move;
|
| ///
| F sum(Range r)
| {
| static if (isComplex!F && summation == Summation.precise)
| {
| return sum(r, summationInitValue!F);
| }
| else
| {
| Summator!(F, ResolveSummationType!(summation, Range, sumType!Range)) sum;
| sum.put(r.move);
| return sum.sum;
| }
| }
|
| ///
| F sum(Range r, F seed)
| {
| static if (isComplex!F && summation == Summation.precise)
| {
| alias T = typeof(F.init.re);
| auto sumRe = Summator!(T, Summation.precise)(seed.re);
| auto sumIm = Summator!(T, Summation.precise)(seed.im);
| import mir.ndslice.slice: isSlice;
| static if (isSlice!Range)
| {
| import mir.algorithm.iteration: each;
| r.each!((auto ref elem)
| {
| sumRe.put(elem.re);
| sumIm.put(elem.im);
| });
| }
| else
| {
| foreach (ref elem; r)
| {
| sumRe.put(elem.re);
| sumIm.put(elem.im);
| }
| }
| return sumRe.sum + sumIm.sum * 1fi;
| }
| else
| {
| auto sum = Summator!(F, ResolveSummationType!(summation, Range, F))(seed);
| sum.put(r.move);
| return sum.sum;
| }
| }
| }
|
| ///
| F sum(scope const F[] r...)
| {
| static if (isComplex!F && summation == Summation.precise)
| {
| return sum(r, summationInitValue!F);
| }
| else
| {
| Summator!(F, ResolveSummationType!(summation, const(F)[], F)) sum;
| sum.put(r);
| return sum.sum;
| }
| }
|}
|
|///ditto
|template sum(Summation summation = Summation.appropriate)
|{
| ///
| sumType!Range sum(Range)(Range r)
| if (isIterable!Range)
| {
| import core.lifetime: move;
| alias F = typeof(return);
| return .sum!(F, ResolveSummationType!(summation, Range, F))(r.move);
| }
|
| ///
| F sum(Range, F)(Range r, F seed)
| if (isIterable!Range)
| {
| import core.lifetime: move;
| return .sum!(F, ResolveSummationType!(summation, Range, F))(r.move, seed);
| }
|
| ///
| sumType!T sum(T)(scope const T[] ar...)
| {
| alias F = typeof(return);
| return .sum!(F, ResolveSummationType!(summation, F[], F))(ar);
| }
|}
|
|///ditto
|template sum(F, string summation)
| if (isMutable!F)
|{
| mixin("alias sum = .sum!(F, Summation." ~ summation ~ ");");
|}
|
|///ditto
|template sum(string summation)
|{
| mixin("alias sum = .sum!(Summation." ~ summation ~ ");");
|}
|
|
|
|version(mir_test)
|@safe pure nothrow unittest
|{
| static assert(is(typeof(sum([cast( byte)1])) == int));
| static assert(is(typeof(sum([cast(ubyte)1])) == int));
| static assert(is(typeof(sum([ 1, 2, 3, 4])) == int));
| static assert(is(typeof(sum([ 1U, 2U, 3U, 4U])) == uint));
| static assert(is(typeof(sum([ 1L, 2L, 3L, 4L])) == long));
| static assert(is(typeof(sum([1UL, 2UL, 3UL, 4UL])) == ulong));
|
| int[] empty;
| assert(sum(empty) == 0);
| assert(sum([42]) == 42);
| assert(sum([42, 43]) == 42 + 43);
| assert(sum([42, 43, 44]) == 42 + 43 + 44);
| assert(sum([42, 43, 44, 45]) == 42 + 43 + 44 + 45);
|}
|
|
|version(mir_test)
|@safe pure nothrow unittest
|{
| static assert(is(typeof(sum([1.0, 2.0, 3.0, 4.0])) == double));
| static assert(is(typeof(sum!double([ 1F, 2F, 3F, 4F])) == double));
| const(float[]) a = [1F, 2F, 3F, 4F];
| static assert(is(typeof(sum!double(a)) == double));
| const(float)[] b = [1F, 2F, 3F, 4F];
| static assert(is(typeof(sum!double(a)) == double));
|
| double[] empty;
| assert(sum(empty) == 0);
| assert(sum([42.]) == 42);
| assert(sum([42., 43.]) == 42 + 43);
| assert(sum([42., 43., 44.]) == 42 + 43 + 44);
| assert(sum([42., 43., 44., 45.5]) == 42 + 43 + 44 + 45.5);
|}
|
|version(mir_test)
|@safe pure nothrow unittest
|{
| import mir.ndslice.topology: iota;
| assert(iota(2, 3).sum == 15);
|}
|
|version(mir_test)
|@safe pure nothrow unittest
|{
| import std.container;
| static assert(is(typeof(sum!double(SList!float()[])) == double));
| static assert(is(typeof(sum(SList!double()[])) == double));
| static assert(is(typeof(sum(SList!real()[])) == real));
|
| assert(sum(SList!double()[]) == 0);
| assert(sum(SList!double(1)[]) == 1);
| assert(sum(SList!double(1, 2)[]) == 1 + 2);
| assert(sum(SList!double(1, 2, 3)[]) == 1 + 2 + 3);
| assert(sum(SList!double(1, 2, 3, 4)[]) == 10);
|}
|
|
|version(mir_test)
|pure nothrow unittest // 12434
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.topology: map;
| immutable a = [10, 20];
| auto s = a.sliced;
| auto s1 = sum(a); // Error
| auto s2 = s.map!(x => x).sum; // Error
|}
|
|version(mir_test)
|unittest
|{
| import std.bigint;
| import mir.ndslice.topology: repeat;
|
| auto a = BigInt("1_000_000_000_000_000_000").repeat(10);
| auto b = (ulong.max/2).repeat(10);
| auto sa = a.sum();
| auto sb = b.sum(BigInt(0)); //reduce ulongs into bigint
| assert(sa == BigInt("10_000_000_000_000_000_000"));
| assert(sb == (BigInt(ulong.max/2) * 10));
|}
|
|version(mir_test)
|unittest
|{
| with(Summation)
| foreach (F; AliasSeq!(float, double, real))
| {
| F[] ar = [1, 2, 3, 4];
| F r = 10;
| assert(r == ar.sum!fast());
| assert(r == ar.sum!pairwise());
| assert(r == ar.sum!kahan());
| assert(r == ar.sum!kbn());
| assert(r == ar.sum!kb2());
| }
|}
|
|version(mir_test)
|unittest
|{
| assert(sum(1) == 1);
| assert(sum(1, 2, 3) == 6);
| assert(sum(1.0, 2.0, 3.0) == 6);
| assert(sum(1.0 + 1i, 2.0 + 2i, 3.0 + 3i) == (6 + 6i));
|}
|
|version(mir_test)
|unittest
|{
| assert(sum!float(1) == 1f);
| assert(sum!float(1, 2, 3) == 6f);
| assert(sum!float(1.0, 2.0, 3.0) == 6f);
| assert(sum!cfloat(1.0 + 1i, 2.0 + 2i, 3.0 + 3i) == (6f + 6i));
|}
|
|version(LDC)
|version(X86_Any)
|version(mir_test)
|unittest
|{
| import core.simd;
| static if (__traits(compiles, double2.init + double2.init))
| {
|
| alias S = Summation;
| alias sums = AliasSeq!(S.kahan, S.pairwise, S.naive, S.fast);
|
| double2[] ar = [double2([1.0, 2]), double2([2, 3]), double2([3, 4]), double2([4, 6])];
| double2 c = double2([10, 15]);
|
| foreach (sumType; sums)
| {
| double2 s = ar.sum!(sumType);
| assert(s.array == c.array);
| }
| }
|}
|
|version(LDC)
|version(X86_Any)
|version(mir_test)
|unittest
|{
| import core.simd;
| import mir.ndslice.topology: iota, as;
|
| alias S = Summation;
| alias sums = AliasSeq!(S.kahan, S.pairwise, S.naive, S.fast, S.precise,
| S.kbn, S.kb2);
|
| int[2] ns = [9, 101];
|
| foreach (n; ns)
| {
| foreach (sumType; sums)
| {
| auto ar = iota(n).as!double;
| double c = n * (n - 1) / 2; // gauss for n=100
| double s = ar.sum!(sumType);
| assert(s == c);
| }
| }
|}
|
|package(mir)
|template ResolveSummationType(Summation summation, Range, F)
|{
| static if (summation == Summation.appropriate)
| {
| static if (isSummable!(Range, F))
| enum ResolveSummationType = Summation.pairwise;
| else
| static if (is(F == class) || is(F == struct) || is(F == interface))
| enum ResolveSummationType = Summation.naive;
| else
| enum ResolveSummationType = Summation.fast;
| }
| else
| {
| enum ResolveSummationType = summation;
| }
|}
|
|private T summationInitValue(T)()
|{
| static if (__traits(compiles, {T a = 0.0;}))
| {
| T a = 0.0;
| return a;
| }
| else
| static if (__traits(compiles, {T a = 0;}))
| {
| T a = 0;
| return a;
| }
| else
| static if (__traits(compiles, {T a = 0 + 0fi;}))
| {
| T a = 0 + 0fi;
| return a;
| }
| else
| {
| return T.init;
| }
|}
|
|package(mir)
|template elementType(T)
|{
| import mir.ndslice.slice: isSlice, DeepElementType;
| import std.traits: Unqual, ForeachType;
|
| static if (isIterable!T) {
| static if (isSlice!T)
| alias elementType = Unqual!(DeepElementType!(T.This));
| else
| alias elementType = Unqual!(ForeachType!T);
| } else {
| alias elementType = Unqual!T;
| }
|}
|
|package(mir)
|template sumType(Range)
|{
| alias T = elementType!Range;
|
| static if (__traits(compiles, {
| auto a = T.init + T.init;
| a += T.init;
| }))
| alias sumType = typeof(T.init + T.init);
| else
| static assert(0, "sumType: Can't sum elements of type " ~ T.stringof);
|}
|
|/++
|+/
|template fillCollapseSums(Summation summation, alias combineParts, combineElements...)
|{
| import mir.ndslice.slice: Slice, SliceKind;
| /++
| +/
| auto ref fillCollapseSums(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) data) @property
| {
| import mir.algorithm.iteration;
| import mir.functional: naryFun;
| import mir.ndslice.topology: iota, triplets;
| foreach (triplet; data.length.iota.triplets) with(triplet)
| {
| auto ref ce(size_t i)()
| {
| static if (summation == Summation.fast)
| {
| return
| sum!summation(naryFun!(combineElements[i])(center, left )) +
| sum!summation(naryFun!(combineElements[i])(center, right));
| }
| else
| {
| Summator!summation summator = 0;
| summator.put(naryFun!(combineElements[i])(center, left));
| summator.put(naryFun!(combineElements[i])(center, right));
| return summator.sum;
| }
| }
| alias sums = staticMap!(ce, Iota!(combineElements.length));
| data[center] = naryFun!combineParts(center, sums);
| }
| }
|}
|
|package:
|
|template isSummable(F)
|{
| enum bool isSummable =
| __traits(compiles,
| {
| F a = 0.1, b, c;
| b = 2.3;
| c = a + b;
| c = a - b;
| a += b;
| a -= b;
| });
|}
|
|template isSummable(Range, F)
|{
| enum bool isSummable =
| isIterable!Range &&
| isImplicitlyConvertible!(sumType!Range, F) &&
| isSummable!F;
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: iota;
| static assert(isSummable!(typeof(iota([size_t.init])), double));
|}
|
|private enum bool isCompesatorAlgorithm(Summation summation) =
| summation == Summation.precise
| || summation == Summation.kb2
| || summation == Summation.kbn
| || summation == Summation.kahan;
|
|
|version(mir_test)
|unittest
|{
| import mir.ndslice;
|
| auto p = iota([2, 3, 4, 5]);
| auto a = p.as!double;
| auto b = a.flattened;
| auto c = a.slice;
| auto d = c.flattened;
| auto s = p.flattened.sum;
|
| assert(a.sum == s);
| assert(b.sum == s);
| assert(c.sum == s);
| assert(d.sum == s);
|
| assert(a.canonical.sum == s);
| assert(b.canonical.sum == s);
| assert(c.canonical.sum == s);
| assert(d.canonical.sum == s);
|
| assert(a.universal.transposed!3.sum == s);
| assert(b.universal.sum == s);
| assert(c.universal.transposed!3.sum == s);
| assert(d.universal.sum == s);
|
| assert(a.sum!"fast" == s);
| assert(b.sum!"fast" == s);
| assert(c.sum!(float, "fast") == s);
| assert(d.sum!"fast" == s);
|
| assert(a.canonical.sum!"fast" == s);
| assert(b.canonical.sum!"fast" == s);
| assert(c.canonical.sum!"fast" == s);
| assert(d.canonical.sum!"fast" == s);
|
| assert(a.universal.transposed!3.sum!"fast" == s);
| assert(b.universal.sum!"fast" == s);
| assert(c.universal.transposed!3.sum!"fast" == s);
| assert(d.universal.sum!"fast" == s);
|
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/math/sum.d has no code
<<<<<< EOF
# path=./source-mir-sparse-blas-dot.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas.dot;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.sparse;
|import mir.series;
|
|/++
|Dot product of two vectors
|
|Params:
| x = sparse vector
| y = sparse vector
|Returns:
| scalar `xᵀ × y`
|+/
|Unqual!(CommonType!(T1, T2)) dot(
| V1 : Series!(I1*, T1*),
| V2 : Series!(I2*, T2*),
| T1, T2, I1, I2)
|(V1 x, V2 y)
|{
1| return dot!(typeof(return))(x, y);
|}
|
|/// ditto
|D dot(
| D,
| V1 : Series!(I1*, T1*),
| V2 : Series!(I2*, T2*),
| T1, T2, I1, I2)
|(V1 x, V2 y)
|{
|
2| typeof(return) s = 0;
|
2| uint done = 2;
2| Unqual!I1 ai0 = void;
2| Unqual!I2 bi0 = void;
|
4| if (x.length && y.length) for (;;)
| {
8| bi0 = y.index[0];
8| if (x.index[0] < bi0)
| {
| do
| {
4| x.popFront;
4| if (x.length == 0)
| {
0000000| break;
| }
| }
4| while (x.index[0] < bi0);
4| done = 2;
| }
8| if (--done == 0)
| {
2| goto L;
| }
6| ai0 = x.index[0];
6| if (y.index[0] < ai0)
| {
| do
| {
4| y.popFront;
4| if (y.length == 0)
| {
0000000| break;
| }
| }
4| while (y.index[0] < ai0);
4| done = 2;
| }
6| if (--done == 0)
| {
2| goto L;
| }
4| continue;
| L:
4| s = x.value[0] * y.value[0] + s;
4| x.popFront;
4| if (x.length == 0)
| {
0000000| break;
| }
4| y.popFront;
4| if (y.length == 0)
| {
2| break;
| }
| }
|
2| return s;
|}
|
|///
|unittest
|{
| import mir.series;
|
1| auto x = series([0u, 3, 5, 9, 100], [1, 3, 4, 9, 10]);
1| auto y = series([1u, 3, 4, 9], [1, 10, 100, 1000]);
| // x = [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, ... ,10]
| // y = [0, 1, 0, 10, 0, 0, 0, 0, 0, 1000]
1| assert(dot(x, y) == 9030);
1| assert(dot!double(x, y) == 9030);
|}
|
|/++
|Dot product of two vectors.
|Params:
| x = sparse vector
| y = dense vector
|Returns:
| scalar `x × y`
|+/
|Unqual!(CommonType!(T1, ForeachType!V2)) dot(
| V1 : Series!(I1*, T1*),
| T1, I1, V2)
|(V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|{
21| return dot!(typeof(return))(x, y);
|}
|
|///ditto
|D dot(
| D,
| V1 : Series!(I1*, T1*),
| T1, I1, V2)
|(V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|in
|{
21| if (x.length)
21| assert(x.index[$-1] < y.length);
|}
|body
|{
|
| import mir.internal.utility;
|
| alias T2 = ForeachType!V2;
|
| alias F = Unqual!(CommonType!(T1, T2));
21| F s = 0;
324| foreach (size_t i; 0 .. x.index.length)
| {
87| s = y[x.index[i]] * x.value[i] + s;
| }
|
21| return s;
|}
|
|///
|unittest
|{
| import mir.series;
|
1| auto x = [0u, 3, 5, 9, 10].series([1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
| // x: [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, 13, 0, 0, 0]
| // y: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
1| auto r = 0 + 3 * 3 + 4 * 5 + 9 * 9 + 13 * 10;
1| assert(dot(x, y) == r);
1| assert(dot(x, y.sliced) == r);
1| assert(dot(x, y.slicedField) == r);
|}
source/mir/sparse/blas/dot.d is 94% covered
<<<<<< EOF
# path=./source-mir-glas-l2.lst
|/++
|$(H2 Level 2)
|
|$(SCRIPT inhibitQuickIndex = 1;)
|
|This is a submodule of $(MREF mir,glas).
|
|The Level 2 BLAS perform matrix-vector operations.
|
|Note: GLAS is singe thread for now.
|
|$(BOOKTABLE $(H2 Matrix-vector operations),
|
|$(TR $(TH Function Name) $(TH Description))
|$(T2 gemv, general matrix-vector multiplication, $(RED partially optimized))
|)
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|
|Macros:
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP)
|NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|+/
|module mir.glas.l2;
|
|import std.traits;
|import std.meta;
|
|import mir.math.common;
|import mir.internal.utility;
|import mir.ndslice.slice;
|
|import mir.glas.l1;
|
|import mir.math.common: fastmath;
|
|@fastmath:
|
|/++
|$(RED DRAFT)
|Performs general matrix-vector multiplication.
|
|Pseudo_code: `y := alpha A × x + beta y`.
|
|Params:
| alpha = scalar
| asl = `m ⨉ n` matrix
| xsl = `n ⨉ 1` vector
| beta = scalar. When `beta` is supplied as zero then the vector `ysl` need not be set on input.
| ysl = `m ⨉ 1` vector
|
|Note:
| GLAS does not require transposition parameters.
| Use $(NDSLICEREF iteration, transposed)
| to perform zero cost `Slice` transposition.
|
|BLAS: SGEMV, DGEMV, (CGEMV, ZGEMV are not implemented for now)
|+/
|nothrow @nogc @system
|void gemv(A, B, C,
| SliceKind kindA,
| SliceKind kindB,
| SliceKind kindC,
| )
|(
| C alpha,
| Slice!(const(A)*, 2, kindA) asl,
| Slice!(const(B)*, 1, kindB) xsl,
| C beta,
| Slice!(C*, 1, kindC) ysl,
|)
| if (allSatisfy!(isNumeric, A, B, C))
|in
|{
1| assert(asl.length!0 == ysl.length, "constraint: asl.length!0 == ysl.length");
1| assert(asl.length!1 == xsl.length, "constraint: asl.length!1 == xsl.length");
|}
|body
|{
| import mir.ndslice.dynamic: reversed;
| static assert(is(Unqual!C == C), msgWrongType);
1| if (ysl.empty)
0000000| return;
1| if (beta == 0)
| {
1| ysl[] = 0;
| }
| else
0000000| if (beta == 1)
| {
0000000| ysl[] *= beta;
| }
1| if (xsl.empty)
0000000| return;
| do
| {
3| ysl.front += alpha * dot(asl.front, xsl);
3| asl.popFront;
3| ysl.popFront;
| }
3| while (ysl.length);
|}
|
|///
|unittest
|{
| import mir.ndslice;
|
1| auto a = slice!double(3, 5);
1| a[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto b = slice!double(5);
1| b[] =
| [-5.0,
| 4.0,
| -4.0,
| -1.0,
| 9.0];
|
1| auto c = slice!double(3);
|
1| gemv!(double, double, double)(1.0, a, b, 0.0, c);
|
1| assert(c ==
| [-42.0,
| -69.0,
| 23.0]);
|}
source/mir/glas/l2.d is 80% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-algorithm-iteration.lst
|// Written in the D programming language.
|/**
|This module contains generic _iteration algorithms.
|$(SCRIPT inhibitQuickIndex = 1;)
|
|$(BOOKTABLE $(H2 Function),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 all, Checks if all elements satisfy to a predicate.)
|$(T2 any, Checks if at least one element satisfy to a predicate.)
|$(T2 cmp, Compares two slices.)
|$(T2 count, Counts elements in a slices according to a predicate.)
|$(T2 each, Iterates elements.)
|$(T2 eachLower, Iterates lower triangle of matrix.)
|$(T2 eachOnBorder, Iterates elementes on tensors borders and corners.)
|$(T2 eachUploPair, Iterates upper and lower pairs of elements in square matrix.)
|$(T2 eachUpper, Iterates upper triangle of matrix.)
|$(T2 equal, Compares two slices for equality.)
|$(T2 filter, Filters elements in a range or an ndslice.)
|$(T2 find, Finds backward index.)
|$(T2 findIndex, Finds index.)
|$(T2 fold, Accumulates all elements (different parameter order than `reduce`).)
|$(T2 isSymmetric, Checks if the matrix is symmetric.)
|$(T2 maxIndex, Finds index of the maximum.)
|$(T2 maxPos, Finds backward index of the maximum.)
|$(T2 minIndex, Finds index of the minimum.)
|$(T2 minmaxIndex, Finds indices of the minimum and the maximum.)
|$(T2 minmaxPos, Finds backward indices of the minimum and the maximum.)
|$(T2 minPos, Finds backward index of the minimum.)
|$(T2 nBitsToCount, Сount bits until set bit count is reached.)
|$(T2 reduce, Accumulates all elements.)
|$(T2 Chequer, Chequer color selector to work with $(LREF each) .)
|$(T2 uniq, Iterates over the unique elements in a range or an ndslice, which is assumed sorted.)
|)
|
|Transform function is represented by $(NDSLICEREF topology, map).
|
|All operators are suitable to change slices using `ref` argument qualification in a function declaration.
|Note, that string lambdas in Mir are `auto ref` functions.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko, John Michael Hall, Andrei Alexandrescu (original Phobos code)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|
|Authors: , Ilya Yaroshenko (Mir & BetterC rework).
|Source: $(PHOBOSSRC std/algorithm/_iteration.d)
|Macros:
| NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
| T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
| */
|module mir.algorithm.iteration;
|
|import mir.functional: naryFun;
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.field: BitField;
|import mir.ndslice.internal;
|import mir.ndslice.iterator: FieldIterator, RetroIterator;
|import mir.ndslice.slice;
|import mir.primitives;
|import mir.qualifier;
|import std.meta;
|import std.range.primitives: isInputRange, isBidirectionalRange, isInfinite, isForwardRange, ElementType;
|import std.traits;
|
|/++
|Chequer color selector to work with $(LREF each)
|+/
|enum Chequer : bool
|{
| /// Main diagonal color
| black,
| /// First sub-diagonal color
| red,
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| auto s = [5, 4].slice!int;
|
| Chequer.black.each!"a = 1"(s);
| assert(s == [
| [1, 0, 1, 0],
| [0, 1, 0, 1],
| [1, 0, 1, 0],
| [0, 1, 0, 1],
| [1, 0, 1, 0],
| ]);
|
| Chequer.red.each!((ref b) => b = 2)(s);
| assert(s == [
| [1, 2, 1, 2],
| [2, 1, 2, 1],
| [1, 2, 1, 2],
| [2, 1, 2, 1],
| [1, 2, 1, 2],
| ]);
|
|}
|
|@optmath:
|
|/+
|Bitslice representation for accelerated bitwise algorithm.
|1-dimensional contiguousitslice can be split into three chunks: head bits, body chunks, and tail bits.
|
|Bitslice can have head bits because it has slicing and the zero bit may not be aligned to the zero of a body chunk.
|+/
|private struct BitSliceAccelerator(Field, I = typeof(Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
| import mir.bitop;
| import mir.qualifier: lightConst;
| import mir.ndslice.traits: isIterator;
| import mir.ndslice.iterator: FieldIterator;
| import mir.ndslice.field: BitField;
|
| ///
| alias U = typeof(I + 1u);
| /// body bits chunks
| static if (isIterator!Field)
| Slice!Field bodyChunks;
| else
| Slice!(FieldIterator!Field) bodyChunks;
| /// head length
| int headLength;
| /// tail length
| int tailLength;
|
|@optmath:
|
| this(Slice!(FieldIterator!(BitField!(Field, I))) slice)
| {
| enum mask = bitShiftMask!I;
| enum shift = bitElemShift!I;
| size_t length = slice.length;
| size_t index = slice._iterator._index;
| if (auto hlen = index & mask)
| {
| auto l = I.sizeof * 8 - hlen;
| if (l > length)
| {
| // central problem
| headLength = -cast(int) length;
| tailLength = cast(int) hlen;
| goto F;
| }
| else
| {
| headLength = cast(uint) l;
| length -= l;
| index += l;
| }
| }
| tailLength = cast(int) (length & mask);
| F:
| length >>= shift;
| index >>= shift;
| bodyChunks._lengths[0] = length;
| static if (isIterator!Field)
| {
| bodyChunks._iterator = slice._iterator._field._field;
| bodyChunks._iterator += index;
| }
| else
| {
| bodyChunks._iterator._index = index;
| bodyChunks._iterator._field = slice._iterator._field._field;
| }
| }
|
|scope const:
|
| bool isCentralProblem()
| {
| return headLength < 0;
| }
|
| U centralBits()
| {
| assert(isCentralProblem);
| return *bodyChunks._iterator.lightConst >>> tailLength;
| }
|
| uint centralLength()
| {
| assert(isCentralProblem);
| return -headLength;
| }
|
| /// head bits (last `headLength` bits are valid).
| U headBits()
| {
| assert(!isCentralProblem);
| if (headLength == 0)
| return U.init;
| static if (isIterator!Field)
| return bodyChunks._iterator.lightConst[-1];
| else
| return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index - 1];
| }
|
| /// tail bits (first `tailLength` bits are valid).
| U tailBits()
| {
| assert(!isCentralProblem);
| if (tailLength == 0)
| return U.init;
| static if (isIterator!Field)
| return bodyChunks._iterator.lightConst[bodyChunks.length];
| else
| return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index + bodyChunks.length];
| }
|
| U negCentralMask()
| {
| return U.max << centralLength;
| }
|
| U negHeadMask()
| {
| return U.max << headLength;
| }
|
| U negTailMask()
| {
| return U.max << tailLength;
| }
|
| U negCentralMaskS()
| {
| return U.max >> centralLength;
| }
|
| U negHeadMaskS()
| {
| return U.max >> headLength;
| }
|
| U negTailMaskS()
| {
| return U.max >> tailLength;
| }
|
| U centralBitsWithRemainingZeros()
| {
| return centralBits & ~negCentralMask;
| }
|
| U centralBitsWithRemainingZerosS()
| {
| return centralBits << (U.sizeof * 8 - centralLength);
| }
|
| U headBitsWithRemainingZeros()
| {
| return headBits >>> (I.sizeof * 8 - headLength);
| }
|
| U headBitsWithRemainingZerosS()
| {
| static if (U.sizeof > I.sizeof)
| return (headBits << (U.sizeof - I.sizeof) * 8) & ~negTailMaskS;
| else
| return headBits & ~negTailMaskS;
| }
|
| U tailBitsWithRemainingZeros()
| {
| return tailBits & ~negTailMask;
| }
|
| U tailBitsWithRemainingZerosS()
| {
| return tailBits << (U.sizeof * 8 - tailLength);
| }
|
| U centralBitsWithRemainingOnes()
| {
| return centralBits | negCentralMask;
| }
|
| U centralBitsWithRemainingOnesS()
| {
| return centralBitsWithRemainingZerosS | negCentralMaskS;
| }
|
| U headBitsWithRemainingOnes()
| {
| return headBitsWithRemainingZeros | negHeadMask;
| }
|
| U headBitsWithRemainingOnesS()
| {
| return headBitsWithRemainingZerosS | negHeadMaskS;
| }
|
| U tailBitsWithRemainingOnes()
| {
| return tailBits | negTailMask;
| }
|
| U tailBitsWithRemainingOnesS()
| {
| return tailBitsWithRemainingZerosS | negTailMaskS;
| }
|
| size_t ctpop()
| {
| import mir.bitop: ctpop;
| if (isCentralProblem)
| return centralBitsWithRemainingZeros.ctpop;
| size_t ret;
| if (headLength)
| ret = cast(size_t) headBitsWithRemainingZeros.ctpop;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| ret += cast(size_t) bc.front.ctpop;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBits)
| ret += cast(size_t) tailBitsWithRemainingZeros.ctpop;
| return ret;
| }
|
| bool any()
| {
| if (isCentralProblem)
| return centralBitsWithRemainingZeros != 0;
| if (headBitsWithRemainingZeros != 0)
| return true;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| if (bc.front != 0)
| return true;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBitsWithRemainingZeros != 0)
| return true;
| return false;
| }
|
| bool all()
| {
| if (isCentralProblem)
| return centralBitsWithRemainingOnes != U.max;
| size_t ret;
| if (headBitsWithRemainingOnes != U.max)
| return false;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| if (bc.front != I.max)
| return false;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBitsWithRemainingOnes != U.max)
| return false;
| return true;
| }
|
| size_t cttz()
| {
| U v;
| size_t ret;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = centralLength;
| goto L;
| }
| v = headBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = headLength;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.front;
| if (v)
| goto R;
| ret += I.sizeof * 8;
| bc.popFront;
| }
| while(bc.length);
| }
| v = tailBitsWithRemainingOnes;
| if (v)
| goto R;
| ret += tailLength;
| goto L;
| R:
| ret += v.cttz;
| L:
| return ret;
| }
|
| size_t ctlz()
| {
| U v;
| size_t ret;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = centralLength;
| goto L;
| }
| v = tailBitsWithRemainingOnesS;
| if (v)
| goto R;
| ret = tailLength;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.back;
| if (v)
| goto R;
| ret += I.sizeof * 8;
| bc.popBack;
| }
| while(bc.length);
| }
| v = headBitsWithRemainingOnesS;
| if (v)
| goto R;
| ret += headLength;
| goto L;
| R:
| ret += v.ctlz;
| L:
| return ret;
| }
|
| sizediff_t nBitsToCount(size_t count)
| {
| size_t ret;
| if (count == 0)
| return count;
| U v, cnt;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingZeros;
| goto E;
| }
| v = headBitsWithRemainingZeros;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += headLength;
| count -= cast(size_t) cnt;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.front;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += I.sizeof * 8;
| count -= cast(size_t) cnt;
| bc.popFront;
| }
| while(bc.length);
| }
| v = tailBitsWithRemainingZeros;
| E:
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| return -1;
| R:
| return ret + v.nTrailingBitsToCount(count);
| }
|
| sizediff_t retroNBitsToCount(size_t count)
| {
| if (count == 0)
| return count;
| size_t ret;
| U v, cnt;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingZerosS;
| goto E;
| }
| v = tailBitsWithRemainingZerosS;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += tailLength;
| count -= cast(size_t) cnt;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.back;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += I.sizeof * 8;
| count -= cast(size_t) cnt;
| bc.popBack;
| }
| while(bc.length);
| }
| v = headBitsWithRemainingZerosS;
| E:
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| return -1;
| R:
| return ret + v.nLeadingBitsToCount(count);
| }
|}
|
|/++
|Сount bits until set bit count is reached. Works with ndslices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|Returns: bit count if set bit count is reached or `-1` otherwise.
|+/
|sizediff_t nBitsToCount(Field, I)(Slice!(FieldIterator!(BitField!(Field, I))) bitSlice, size_t count)
|{
| return BitSliceAccelerator!(Field, I)(bitSlice).nBitsToCount(count);
|}
|
|///ditto
|sizediff_t nBitsToCount(Field, I)(Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))) bitSlice, size_t count)
|{
| import mir.ndslice.topology: retro;
| return BitSliceAccelerator!(Field, I)(bitSlice.retro).retroNBitsToCount(count);
|}
|
|///
|pure unittest
|{
| import mir.ndslice.allocation: bitSlice;
| import mir.ndslice.topology: retro;
| auto s = bitSlice(1000);
| s[50] = true;
| s[100] = true;
| s[200] = true;
| s[300] = true;
| s[400] = true;
| assert(s.nBitsToCount(4) == 301);
| assert(s.retro.nBitsToCount(4) == 900);
|}
|
|private void checkShapesMatch(
| string fun = __FUNCTION__,
| string pfun = __PRETTY_FUNCTION__,
| Slices...)
| (scope ref const Slices slices)
| if (Slices.length > 1)
|{
| enum msgShape = "all slices must have the same shape" ~ tailErrorMessage!(fun, pfun);
| enum N = slices[0].shape.length;
| foreach (i, Slice; Slices)
| {
| static if (i == 0)
0000000| continue;
| else
| static if (slices[i].shape.length == N)
0000000| assert(slices[i].shape == slices[0].shape, msgShape);
| else
| {
| import mir.ndslice.fuse: fuseShape;
| static assert(slices[i].fuseShape.length >= N);
| assert(cast(size_t[N])slices[i].fuseShape[0 .. N] == slices[0].shape, msgShape);
| }
| }
|}
|
|
|package(mir) template allFlattened(args...)
|{
| static if (args.length)
| {
| alias arg = args[0];
| @optmath @property ls()()
| {
| import mir.ndslice.topology: flattened;
0000000| return flattened(arg);
| }
| alias allFlattened = AliasSeq!(ls, allFlattened!(args[1..$]));
| }
| else
| alias allFlattened = AliasSeq!();
|}
|
|private template areAllContiguousSlices(Slices...)
|{
| import mir.ndslice.traits: isContiguousSlice;
| static if (allSatisfy!(isContiguousSlice, Slices))
| enum areAllContiguousSlices = Slices[0].N > 1 && areAllContiguousSlicesImpl!(Slices[0].N, Slices[1 .. $]);
| else
| enum areAllContiguousSlices = false;
|}
|
|private template areAllContiguousSlicesImpl(size_t N, Slices...)
|{
| static if (Slices.length == 0)
| enum areAllContiguousSlicesImpl = true;
| else
| enum areAllContiguousSlicesImpl = Slices[0].N == N && areAllContiguousSlicesImpl!(N, Slices[1 .. $]);
|}
|
|version(LDC) {}
|else version(GNU) {}
|else version (Windows) {}
|else version (X86_64)
|{
| //Compiling with DMD for x86-64 for Linux & OS X with optimizations enabled,
| //"Tensor mutation on-the-fly" unittest was failing. Disabling inlining
| //caused it to succeed.
| //TODO: Rework so this is unnecessary!
| version = Mir_disable_inlining_in_reduce;
|}
|
|version(Mir_disable_inlining_in_reduce)
|{
| private enum Mir_disable_inlining_in_reduce = true;
|
| private template _naryAliases(size_t n)
| {
| static if (n == 0)
| enum _naryAliases = "";
| else
| {
| enum i = n - 1;
| enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n";
| }
| }
|
| private template nonInlinedNaryFun(alias fun)
| {
| import mir.math.common : optmath;
| static if (is(typeof(fun) : string))
| {
| /// Specialization for string lambdas
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (args.length <= 26)
| {
| pragma(inline,false);
| mixin(_naryAliases!(Args.length));
| return mixin(fun);
| }
| }
| else static if (is(typeof(fun.opCall) == function))
| {
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (is(typeof(fun.opCall(args))))
| {
| pragma(inline,false);
| return fun.opCall(args);
| }
| }
| else
| {
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (is(typeof(fun(args))))
| {
| pragma(inline,false);
| return fun(args);
| }
| }
| }
|}
|else
|{
| private enum Mir_disable_inlining_in_reduce = false;
|}
|
|S reduceImpl(alias fun, S, Slices...)(S seed, scope Slices slices)
|{
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| seed = fun(seed, frontOf!slices);
| else
| seed = .reduceImpl!fun(seed, frontOf!slices);
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return seed;
|}
|
|/++
|Implements the homonym function (also known as `accumulate`,
|`compress`, `inject`, or `fold`) present in various programming
|languages of functional flavor. The call `reduce!(fun)(seed, slice1, ..., sliceN)`
|first assigns `seed` to an internal variable `result`,
|also called the accumulator. Then, for each set of element `x1, ..., xN` in
|`slice1, ..., sliceN`, `result = fun(result, x1, ..., xN)` gets evaluated. Finally,
|`result` is returned.
|
|`reduce` allows to iterate multiple slices in the lockstep.
|
|Note:
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|Params:
| fun = A function.
|See_Also:
| $(HTTP llvm.org/docs/LangRef.html#fast-math-flags, LLVM IR: Fast Math Flags)
|
| $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function))
|+/
|template reduce(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun)
| && !Mir_disable_inlining_in_reduce)
| /++
| Params:
| seed = An initial accumulation value.
| slices = One or more slices, range, and arrays.
| Returns:
| the accumulated `result`
| +/
| @optmath auto reduce(S, Slices...)(S seed, scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .reduce!fun(seed, allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return cast(Unqual!S) seed;
| static if (is(S : Unqual!S))
| alias UT = Unqual!S;
| else
| alias UT = S;
| return reduceImpl!(fun, UT, Slices)(seed, allLightScope!slices);
| }
| }
| else version(Mir_disable_inlining_in_reduce)
| //As above, but with inlining disabled.
| @optmath auto reduce(S, Slices...)(S seed, scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .reduce!fun(seed, allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return cast(Unqual!S) seed;
| static if (is(S : Unqual!S))
| alias UT = Unqual!S;
| else
| alias UT = S;
| return reduceImpl!(nonInlinedNaryFun!fun, UT, Slices)(seed, allLightScope!slices);
| }
| }
| else
| alias reduce = .reduce!(naryFun!fun);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| auto ar = [1, 2, 3];
| auto s = 0.reduce!"a + b"(ar);
| assert (s == 6);
|}
|
|/// Single slice
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology : iota;
|
| //| 0 1 2 | => 3 |
| //| 3 4 5 | => 12 | => 15
| auto sl = iota(2, 3);
|
| // sum of all element in the slice
| auto res = size_t(0).reduce!"a + b"(sl);
|
| assert(res == 15);
|}
|
|/// Multiple slices, dot product
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 1 2 3 |
| //| 4 5 6 |
| auto b = iota([2, 3], 1).as!double.slice;
|
| alias dot = reduce!"a + b * c";
| auto res = dot(0.0, a, b);
|
| // check the result:
| import mir.ndslice.topology : flattened;
| import std.numeric : dotProduct;
| assert(res == dotProduct(a.flattened, b.flattened));
|}
|
|/// Zipped slices, dot product
|pure
|version(mir_test) unittest
|{
| import std.typecons : Yes;
| import std.numeric : dotProduct;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota, zip, universal;
| import mir.math.common : optmath;
|
| static @optmath T fmuladd(T, Z)(const T a, Z z)
| {
| return a + z.a * z.b;
| }
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3).as!double.slice.universal;
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1).as!double.slice;
|
| // slices must have the same strides for `zip!true`.
| assert(sl1.strides == sl2.strides);
|
| auto z = zip!true(sl1, sl2);
|
| auto dot = reduce!fmuladd(0.0, z);
|
| assert(dot == dotProduct(iota(6), iota([6], 1)));
|}
|
|/// Tensor mutation on-the-fly
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
| import mir.math.common : optmath;
|
| static @optmath T fun(T)(const T a, ref T b)
| {
| return a + b++;
| }
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3).as!double.slice;
|
| auto res = reduce!fun(double(0), sl);
|
| assert(res == 15);
|
| //| 1 2 3 |
| //| 4 5 6 |
| assert(sl == iota([2, 3], 1));
|}
|
|/++
|Packed slices.
|
|Computes minimum value of maximum values for each row.
|+/
|version(mir_test)
|unittest
|{
| import mir.math.common;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.dynamic : transposed;
| import mir.ndslice.topology : as, iota, pack, map, universal;
|
| alias maxVal = (a) => reduce!fmax(-double.infinity, a);
| alias minVal = (a) => reduce!fmin(double.infinity, a);
| alias minimaxVal = (a) => minVal(a.pack!1.map!maxVal);
|
| auto sl = iota(2, 3).as!double.slice;
|
| // Vectorized computation: row stride equals 1.
| //| 0 1 2 | => | 2 |
| //| 3 4 5 | => | 5 | => 2
| auto res = minimaxVal(sl);
| assert(res == 2);
|
| // Common computation: row stride does not equal 1.
| //| 0 1 2 | | 0 3 | => | 3 |
| //| 3 4 5 | => | 1 4 | => | 4 |
| // | 2 5 | => | 5 | => 3
| auto resT = minimaxVal(sl.universal.transposed);
| assert(resT == 3);
|}
|
|/// Dlang Range API support.
|version(mir_test)
|unittest
|{
| import mir.algorithm.iteration: each;
| import std.range: phobos_iota = iota;
|
| int s;
| // 0 1 2 3
| 4.phobos_iota.each!(i => s += i);
| assert(s == 6);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto a = reduce!"a + b"(size_t(7), iota([0, 1], 1));
| assert(a == 7);
|}
|
|void eachImpl(alias fun, Slices...)(scope Slices slices)
|{
| foreach(ref slice; slices)
| assert(!slice.empty);
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| fun(frontOf!slices);
| else
| .eachImpl!fun(frontOf!slices);
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| }
| while(!slices[0].empty);
|}
|
|void chequerEachImpl(alias fun, Slices...)(Chequer color, scope Slices slices)
|{
| foreach(ref slice; slices)
| assert(!slice.empty);
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (color)
| {
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| if (slices[0].empty)
| return;
| }
| eachImpl!fun(strideOf!slices);
| }
| else
| {
| do
| {
| .chequerEachImpl!fun(color, frontOf!slices);
| color = cast(Chequer)!color;
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| }
| while(!slices[0].empty);
| }
|}
|
|/++
|The call `each!(fun)(slice1, ..., sliceN)`
|evaluates `fun` for each set of elements `x1, ..., xN` in
|the borders of `slice1, ..., sliceN` respectively.
|
|`each` allows to iterate multiple slices in the lockstep.
|
|Params:
| fun = A function.
|Note:
| $(NDSLICEREF dynamic, transposed) and
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|+/
|template eachOnBorder(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| slices = One or more slices.
| +/
| @optmath void eachOnBorder(Slices...)(Slices slices)
| if (allSatisfy!(isSlice, Slices))
| {
| import mir.ndslice.traits: isContiguousSlice;
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (!slices[0].anyEmpty)
| {
| alias N = DimensionCount!(Slices[0]);
| static if (N == 1)
| {
| fun(frontOf!slices);
| if (slices[0].length > 1)
| fun(backOf!slices);
| }
| else
| static if (anySatisfy!(isContiguousSlice, Slices))
| {
| import mir.ndslice.topology: canonical;
| template f(size_t i)
| {
| static if (isContiguousSlice!(Slices[i]))
| auto f () { return canonical(slices[i]); }
| else
| alias f = slices[i];
| }
| eachOnBorder(staticMap!(f, Iota!(Slices.length)));
| }
| else
| {
| foreach (dimension; Iota!N)
| {
| eachImpl!fun(frontOfD!(dimension, slices));
| foreach_reverse(ref slice; slices)
| slice.popFront!dimension;
| if (slices[0].empty!dimension)
| return;
| eachImpl!fun(backOfD!(dimension, slices));
| foreach_reverse(ref slice; slices)
| slice.popBack!dimension;
| if (slices[0].empty!dimension)
| return;
| }
| }
| }
| }
| else
| alias eachOnBorder = .eachOnBorder!(naryFun!fun);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : repeat, iota;
|
| auto sl = [3, 4].iota.slice;
| auto zeros = repeat(0, [3, 4]);
|
| sl.eachOnBorder!"a = b"(zeros);
|
| assert(sl ==
| [[0, 0, 0 ,0],
| [0, 5, 6, 0],
| [0, 0, 0 ,0]]);
|
| sl.eachOnBorder!"a = 1";
| sl[0].eachOnBorder!"a = 2";
|
| assert(sl ==
| [[2, 1, 1, 2],
| [1, 5, 6, 1],
| [1, 1, 1 ,1]]);
|}
|
|/++
|The call `each!(fun)(slice1, ..., sliceN)`
|evaluates `fun` for each set of elements `x1, ..., xN` in
|`slice1, ..., sliceN` respectively.
|
|`each` allows to iterate multiple slices in the lockstep.
|Params:
| fun = A function.
|Note:
| $(NDSLICEREF dynamic, transposed) and
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|See_Also:
| This is functionally similar to $(LREF reduce) but has not seed.
|+/
|template each(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
| +/
| @optmath auto each(Slices...)(scope Slices slices)
| if (Slices.length && !is(Slices[0] : Chequer))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| .each!fun(allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return;
| eachImpl!fun(allLightScope!slices);
| }
| }
|
| /++
| Iterates elements of selected $(LREF Chequer) color.
| Params:
| color = $(LREF Chequer).
| slices = One or more slices.
| +/
| @optmath auto each(Slices...)(Chequer color, scope Slices slices)
| if (Slices.length && allSatisfy!(isSlice, Slices))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
| chequerEachImpl!fun(color, allLightScope!slices);
| }
| }
| else
| alias each = .each!(naryFun!fun);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| auto ar = [1, 2, 3];
| ar.each!"a *= 2";
| assert (ar == [2, 4, 6]);
|}
|
|/// Single slice, multiply-add
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3).as!double.slice;
|
| sl.each!((ref a) { a = a * 10 + 5; });
|
| assert(sl ==
| [[ 5, 15, 25],
| [35, 45, 55]]);
|}
|
|/// Swap two slices
|version(mir_test)
|unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| auto b = iota([2, 3], 10).as!double.slice;
|
| each!swap(a, b);
|
| assert(a == iota([2, 3], 10));
| assert(b == iota([2, 3], 0));
|}
|
|/// Swap two zipped slices
|version(mir_test)
|unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| auto b = iota([2, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.each!(z => swap(z.a, z.b));
|
| assert(a == iota([2, 3], 10));
| assert(b == iota([2, 3], 0));
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| iota(0, 2).each!((a){i++;});
| assert(i == 0);
|}
|
|/++
|The call `eachUploPair!(fun)(matrix)`
|evaluates `fun` for each pair (`matrix[j, i]`, `matrix[i, j]`),
|for i <= j (default) or i < j (if includeDiagonal is false).
|
|Params:
| fun = A function.
| includeDiagonal = true if applying function to diagonal,
| false (default) otherwise.
|+/
|template eachUploPair(alias fun, bool includeDiagonal = false)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| matrix = Square matrix.
| +/
| auto eachUploPair(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix)
| in
| {
| assert(matrix.length!0 == matrix.length!1, "matrix must be square.");
| }
| do
| {
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: canonical;
| .eachUploPair!(fun, includeDiagonal)(matrix.canonical);
| }
| else
| {
| static if (includeDiagonal == true)
| {
| if (matrix.length) do
| {
| eachImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1);
| matrix.popFront!1;
| matrix.popFront!0;
| // hint for optimizer
| matrix._lengths[1] = matrix._lengths[0];
| }
| while (matrix.length);
| }
| else
| {
| if (matrix.length) for(;;)
| {
| assert(!matrix.empty!0);
| assert(!matrix.empty!1);
| auto l = matrix.lightScope.front!1;
| auto u = matrix.lightScope.front!0;
| matrix.popFront!1;
| matrix.popFront!0;
| l.popFront;
| u.popFront;
| // hint for optimizer
| matrix._lengths[1] = matrix._lengths[0] = l._lengths[0] = u._lengths[0];
| if (u.length == 0)
| break;
| eachImpl!fun(u, l);
| }
| }
| }
| }
| }
| else
| {
| alias eachUploPair = .eachUploPair!(naryFun!fun, includeDiagonal);
| }
|}
|
|/// Transpose matrix in place.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, universal;
| import mir.ndslice.dynamic: transposed;
| import mir.utility: swap;
|
| auto m = iota(4, 4).slice;
|
| m.eachUploPair!swap;
|
| assert(m == iota(4, 4).universal.transposed);
|}
|
|/// Reflect Upper matrix part to lower part.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, universal;
| import mir.ndslice.dynamic: transposed;
| import mir.utility: swap;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
|
| m.eachUploPair!((u, ref l) { l = u; });
|
| assert(m == [
| [0, 1, 2],
| [1, 4, 5],
| [2, 5, 8]]);
|}
|
|/// Fill lower triangle and diagonal with zeroes.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 1 2 3
| // 4 5 6
| // 7 8 9
| auto m = iota([3, 3], 1).slice;
|
| m.eachUploPair!((u, ref l) { l = 0; }, true);
|
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0]]);
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
| m.eachUploPair!((u, ref l) { l = l + 1; }, true);
| assert(m == [
| [1, 1, 2],
| [4, 5, 5],
| [7, 8, 9]]);
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
| m.eachUploPair!((u, ref l) { l = l + 1; }, false);
|
| assert(m == [
| [0, 1, 2],
| [4, 4, 5],
| [7, 8, 8]]);
|}
|
|/++
|Checks if the matrix is symmetric.
|+/
|template isSymmetric(alias fun = "a == b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| matrix = 2D ndslice.
| +/
| bool isSymmetric(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix)
| {
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: canonical;
| return .isSymmetric!fun(matrix.canonical);
| }
| else
| {
| if (matrix.length!0 != matrix.length!1)
| return false;
| if (matrix.length) do
| {
| if (!allImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1))
| {
| return false;
| }
| matrix.popFront!1;
| matrix.popFront!0;
| matrix._lengths[1] = matrix._lengths[0];
| }
| while (matrix.length);
| return true;
| }
| }
| else
| alias isSymmetric = .isSymmetric!(naryFun!fun);
|}
|
|///
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: iota;
| assert(iota(2, 2).isSymmetric == false);
|
| assert(
| [1, 2,
| 2, 3].sliced(2, 2).isSymmetric == true);
|}
|
|bool minPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[N] backwardIndex, scope ref Iterator iterator, Slice!(Iterator, N, kind) slice)
|{
| bool found;
| do
| {
| static if (slice.shape.length == 1)
| {
| if (fun(*slice._iterator, *iterator))
| {
| backwardIndex[0] = slice.length;
| iterator = slice._iterator;
| found = true;
| }
| }
| else
| {
| if (minPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front))
| {
| backwardIndex[0] = slice.length;
| found = true;
| }
| }
| slice.popFront;
| }
| while(!slice.empty);
| return found;
|}
|
|bool[2] minmaxPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[2][N] backwardIndex, scope ref Iterator[2] iterator, Slice!(Iterator, N, kind) slice)
|{
| bool[2] found;
| do
| {
| static if (slice.shape.length == 1)
| {
| if (fun(*slice._iterator, *iterator[0]))
| {
| backwardIndex[0][0] = slice.length;
| iterator[0] = slice._iterator;
| found[0] = true;
| }
| else
| if (fun(*iterator[1], *slice._iterator))
| {
| backwardIndex[0][1] = slice.length;
| iterator[1] = slice._iterator;
| found[1] = true;
| }
| }
| else
| {
| auto r = minmaxPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front);
| if (r[0])
| {
| backwardIndex[0][0] = slice.length;
| }
| if (r[1])
| {
| backwardIndex[0][1] = slice.length;
| }
| }
| slice.popFront;
| }
| while(!slice.empty);
| return found;
|}
|
|/++
|Finds a positions (ndslices) such that
|`position[0].first` is minimal and `position[1].first` is maximal elements in the slice.
|
|Position is sub-ndslice of the same dimension in the right-$(RPAREN)down-$(RPAREN)etc$(LPAREN)$(LPAREN) corner.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minmaxIndex),
| $(LREF minPos),
| $(LREF maxPos),
| $(NDSLICEREF slice, Slice.backward).
|+/
|template minmaxPos(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| 2 subslices with minimal and maximal `first` elements.
| +/
| @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)[2]
| minmaxPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) pret;
| if (!slice.anyEmpty)
| {
| size_t[2][N] ret;
| auto scopeSlice = lightScope(slice);
| auto it = scopeSlice._iterator;
| LightScopeOf!Iterator[2] iterator = [it, it];
| minmaxPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| {
| pret[0]._lengths[i] = ret[i][0];
| pret[1]._lengths[i] = ret[i][1];
| }
| pret[0]._iterator = slice._iterator + (iterator[0] - scopeSlice._iterator);
| pret[1]._iterator = slice._iterator + (iterator[1] - scopeSlice._iterator);
| }
| auto strides = slice.strides;
| foreach(i; Iota!(0, pret[0].S))
| {
| pret[0]._strides[i] = strides[i];
| pret[1]._strides[i] = strides[i];
| }
| return pret;
| }
| else
| alias minmaxPos = .minmaxPos!(naryFun!pred);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 2,
| ].sliced(3, 4);
|
| auto pos = s.minmaxPos;
|
| assert(pos[0] == s[$ - 2 .. $, $ - 3 .. $]);
| assert(pos[1] == s[$ - 1 .. $, $ - 2 .. $]);
|
| assert(pos[0].first == -4);
| assert(s.backward(pos[0].shape) == -4);
| assert(pos[1].first == 7);
| assert(s.backward(pos[1].shape) == 7);
|}
|
|/++
|Finds a backward indices such that
|`slice[indices[0]]` is minimal and `slice[indices[1]]` is maximal elements in the slice.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minmaxIndex),
| $(LREF minPos),
| $(LREF maxPos),
| $(REF Slice.backward, mir,ndslice,slice).
|+/
|template minmaxIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Subslice with minimal (maximal) `first` element.
| +/
| @optmath size_t[N][2] minmaxIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) pret = size_t.max;
| if (!slice.anyEmpty)
| {
| auto shape = slice.shape;
| size_t[2][N] ret;
| foreach (i; Iota!N)
| {
| ret[i][1] = ret[i][0] = shape[i];
| }
| auto scopeSlice = lightScope(slice);
| auto it = scopeSlice._iterator;
| LightScopeOf!Iterator[2] iterator = [it, it];
| minmaxPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| {
| pret[0][i] = slice._lengths[i] - ret[i][0];
| pret[1][i] = slice._lengths[i] - ret[i][1];
| }
| }
| return pret;
| }
| else
| alias minmaxIndex = .minmaxIndex!(naryFun!pred);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto indices = s.minmaxIndex;
|
| assert(indices == [[1, 1], [2, 3]]);
| assert(s[indices[0]] == -4);
| assert(s[indices[1]] == 8);
|}
|
|/++
|Finds a backward index such that
|`slice.backward(index)` is minimal(maximal).
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minIndex),
| $(LREF maxPos),
| $(LREF maxIndex),
| $(REF Slice.backward, mir,ndslice,slice).
|+/
|template minPos(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Multidimensional backward index such that element is minimal(maximal).
| Backward index equals zeros, if slice is empty.
| +/
| @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)
| minPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) ret;
| auto iterator = slice.lightScope._iterator;
| if (!slice.anyEmpty)
| {
| minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret._lengths, iterator, lightScope(slice));
| ret._iterator = slice._iterator + (iterator - slice.lightScope._iterator);
| }
| auto strides = slice.strides;
| foreach(i; Iota!(0, ret.S))
| {
| ret._strides[i] = strides[i];
| }
| return ret;
| }
| else
| alias minPos = .minPos!(naryFun!pred);
|}
|
|/// ditto
|template maxPos(alias pred = "a < b")
|{
| import mir.functional: naryFun, reverseArgs;
| alias maxPos = minPos!(reverseArgs!(naryFun!pred));
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 2,
| ].sliced(3, 4);
|
| auto pos = s.minPos;
|
| assert(pos == s[$ - 2 .. $, $ - 3 .. $]);
| assert(pos.first == -4);
| assert(s.backward(pos.shape) == -4);
|
| pos = s.maxPos;
|
| assert(pos == s[$ - 1 .. $, $ - 2 .. $]);
| assert(pos.first == 7);
| assert(s.backward(pos.shape) == 7);
|}
|
|/++
|Finds an index such that
|`slice[index]` is minimal(maximal).
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minIndex),
| $(LREF maxPos),
| $(LREF maxIndex).
|+/
|template minIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Multidimensional index such that element is minimal(maximal).
| Index elements equal to `size_t.max`, if slice is empty.
| +/
| @optmath size_t[N] minIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| size_t[N] ret = size_t.max;
| if (!slice.anyEmpty)
| {
| ret = slice.shape;
| auto scopeSlice = lightScope(slice);
| auto iterator = scopeSlice._iterator;
| minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| ret[i] = slice._lengths[i] - ret[i];
| }
| return ret;
| }
| else
| alias minIndex = .minIndex!(naryFun!pred);
|}
|
|/// ditto
|template maxIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun, reverseArgs;
| alias maxIndex = minIndex!(reverseArgs!(naryFun!pred));
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto index = s.minIndex;
|
| assert(index == [1, 1]);
| assert(s[index] == -4);
|
| index = s.maxIndex;
|
| assert(index == [2, 3]);
| assert(s[index] == 8);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| -8, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto index = s.minIndex;
|
| assert(index == [0, 0]);
| assert(s[index] == -8);
|}
|
|version(mir_test)
|unittest
|{
| auto s = [
| 0, 1, 2, 3,
| 4, 5, 6, 7,
| 8, 9, 10, 11
| ].sliced(3, 4);
|
| auto index = s.minIndex;
| assert(index == [0, 0]);
| assert(s[index] == 0);
|
| index = s.maxIndex;
| assert(index == [2, 3]);
| assert(s[index] == 11);
|}
|
|bool findImpl(alias fun, size_t N, Slices...)(scope ref size_t[N] backwardIndex, Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| auto cnt = BitSliceAccelerator!(Field, I)(slices[0]).cttz;
| if (cnt = -1)
| return false;
| backwardIndex[0] = slices[0].length - cnt;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| import mir.ndslice.topology: retro;
| auto cnt = BitSliceAccelerator!(Field, I)(slices[0].retro).ctlz;
| if (cnt = -1)
| return false;
| backwardIndex[0] = slices[0].length - cnt;
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (fun(frontOf!slices))
| {
| backwardIndex[0] = slices[0].length;
| return true;
| }
| }
| else
| {
| if (findImpl!fun(backwardIndex[1 .. $], frontOf!slices))
| {
| backwardIndex[0] = slices[0].length;
| return true;
| }
| }
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return false;
| }
|}
|
|/++
|Finds an index such that
|`pred(slices[0][index], ..., slices[$-1][index])` is `true`.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF find),
| $(LREF any).
|Optimization:
| `findIndex!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template findIndex(alias pred)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| Multidimensional index such that the predicate is true.
| Index equals `size_t.max`, if the predicate evaluates `false` for all indices.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) findIndex(Slices...)(Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| size_t[DimensionCount!(Slices[0])] ret = -1;
| auto lengths = slices[0].shape;
| if (!slices[0].anyEmpty && findImpl!pred(ret, allLightScope!slices))
| foreach (i; Iota!(DimensionCount!(Slices[0])))
| ret[i] = lengths[i] - ret[i];
| static if (DimensionCount!(Slices[0]) > 1)
| return ret;
| else
| return ret[0];
| }
| else
| alias findIndex = .findIndex!(naryFun!pred);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto sl = iota(5);
| size_t index = sl.findIndex!"a == 3";
|
| assert(index == 3);
| assert(sl[index] == 3);
|
| assert(sl.findIndex!(a => a == 8) == size_t.max);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
| size_t[2] index = sl.findIndex!(a => a == 3);
|
| assert(sl[index] == 3);
|
| index = sl.findIndex!"a == 6";
| assert(index[0] == size_t.max);
| assert(index[1] == size_t.max);
|}
|
|/++
|Finds a backward index such that
|`pred(slices[0].backward(index), ..., slices[$-1].backward(index))` is `true`.
|
|Params:
| pred = A predicate.
|
|Optimization:
| To check if any element was found
| use the last dimension (row index).
| This will slightly optimize the code.
|--------
|if (backwardIndex)
|{
| auto elem1 = slice1.backward(backwardIndex);
| //...
| auto elemK = sliceK.backward(backwardIndex);
|}
|else
|{
| // not found
|}
|--------
|
|See_also:
| $(LREF findIndex),
| $(LREF any),
| $(REF Slice.backward, mir,ndslice,slice).
|
|Optimization:
| `find!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template find(alias pred)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| Multidimensional backward index such that the predicate is true.
| Backward index equals zeros, if the predicate evaluates `false` for all indices.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) find(Slices...)(auto ref Slices slices)
| if (Slices.length && allSatisfy!(hasShape, Slices))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| size_t[DimensionCount!(Slices[0])] ret;
| if (!slices[0].anyEmpty)
| findImpl!pred(ret, allLightScope!slices);
| static if (DimensionCount!(Slices[0]) > 1)
| return ret;
| else
| return ret[0];
| }
| else
| alias find = .find!(naryFun!pred);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| import std.range : iota;
|
| auto sl = iota(10);
| size_t index = sl.find!"a == 3";
|
| assert(sl[$ - index] == 3);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
| size_t[2] bi = sl.find!"a == 3";
| assert(sl.backward(bi) == 3);
| assert(sl[$ - bi[0], $ - bi[1]] == 3);
|
| bi = sl.find!"a == 6";
| assert(bi[0] == 0);
| assert(bi[1] == 0);
|}
|
|/// Multiple slices
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| size_t[2] bi = find!((a, b) => a * b == 39)(a, b);
| assert(a.backward(bi) == 3);
| assert(b.backward(bi) == 13);
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| size_t[2] bi = zip!true(a, b).find!"a.a * a.b == 39";
|
| assert(a.backward(bi) == 3);
| assert(b.backward(bi) == 13);
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a == 5)
| return true;
| a = 8;
| return false;
| }
|
| size_t[2] bi = sl.find!pred;
|
| assert(bi == [1, 1]);
| assert(sl.backward(bi) == 5);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 8, 5]]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| size_t[2] bi = iota(2, 0).find!((elem){i++; return true;});
| assert(i == 0);
| assert(bi == [0, 0]);
|}
|
|size_t anyImpl(alias fun, Slices...)(scope Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| return BitSliceAccelerator!(Field, I)(slices[0]).any;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| return .anyImpl!fun(lightScope(slices[0]).retro);
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (fun(frontOf!slices))
| return true;
| }
| else
| {
| if (anyImpl!fun(frontOf!slices))
| return true;
| }
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return false;
| }
|}
|
|/++
|Like $(LREF find), but only returns whether or not the search was successful.
|
|Params:
| pred = The predicate.
|Optimization:
| `any!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template any(alias pred = "a")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
| Returns:
| `true` if the search was successful and `false` otherwise.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath bool any(Slices...)(scope Slices slices)
| if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .any!pred(allFlattened!(allLightScope!slices));
| }
| else
| {
| return !slices[0].anyEmpty && anyImpl!pred(allLightScope!slices);
| }
| }
| else
| alias any = .any!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.any!"a == 3");
| assert(!r.any!"a == 6");
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(sl.any!"a == 3");
| assert(!sl.any!"a == 6");
|}
|
|/// Multiple slices
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| assert(any!((a, b) => a * b == 39)(a, b));
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| // slices must have the same strides
|
| assert(zip!true(a, b).any!"a.a * a.b == 39");
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a == 5)
| return true;
| a = 8;
| return false;
| }
|
| assert(sl.any!pred);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 8, 5]]);
|}
|
|size_t allImpl(alias fun, Slices...)(scope Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| return BitSliceAccelerator!(LightScopeOf!Field, I)(lightScope(slices[0])).all;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| return .allImpl!fun(lightScope(slices[0]).retro);
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
0000000| if (!fun(frontOf!slices))
0000000| return false;
| }
| else
| {
| if (!allImpl!fun(frontOf!slices))
| return false;
| }
0000000| foreach_reverse(ref slice; slices)
0000000| slice.popFront;
| }
0000000| while(!slices[0].empty);
0000000| return true;
| }
|}
|
|/++
|Checks if all of the elements verify `pred`.
|
|Params:
| pred = The predicate.
|Optimization:
| `all!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template all(alias pred = "a")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| `true` all of the elements verify `pred` and `false` otherwise.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath bool all(Slices...)(scope Slices slices)
| if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length)
| {
| static if (Slices.length > 1)
0000000| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
0000000| return .all!pred(allFlattened!(allLightScope!slices));
| }
| else
| {
0000000| return slices[0].anyEmpty || allImpl!pred(allLightScope!slices);
| }
| }
| else
| alias all = .all!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.all!"a < 6");
| assert(!r.all!"a < 5");
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(sl.all!"a < 6");
| assert(!sl.all!"a < 5");
|}
|
|/// Multiple slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(all!"a - b == 0"(sl, sl));
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
|
| assert(zip!true(sl, sl).all!"a.a - a.b == 0");
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a < 4)
| {
| a = 8;
| return true;
| }
| return false;
| }
|
| assert(!sl.all!pred);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 4, 5]]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| assert(iota(2, 0).all!((elem){i++; return true;}));
| assert(i == 0);
|}
|
|/++
|Counts elements in slices according to the `fun`.
|Params:
| fun = A predicate.
|
|Optimization:
| `count!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template count(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
|
| Returns: The number elements according to the `fun`.
|
| Constraints:
| All slices must have the same shape.
| +/
| @optmath size_t count(Slices...)(scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (__traits(isSame, fun, naryFun!"true"))
| {
| return slices[0].elementCount;
| }
| else
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .count!fun(allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return 0;
| return countImpl!(fun)(allLightScope!slices);
| }
| }
| else
| alias count = .count!(naryFun!fun);
|
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.count!"true" == 6);
| assert(r.count!"a" == 5);
| assert(r.count!"a % 2" == 3);
|}
|
|/// Single slice
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology : iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3);
|
| assert(sl.count!"true" == 6);
| assert(sl.count!"a" == 5);
| assert(sl.count!"a % 2" == 3);
|}
|
|/// Accelerated set bit count
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: retro, iota, bitwise;
| import mir.ndslice.allocation: slice;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota!size_t(2, 3).bitwise;
|
| assert(sl.count!"true" == 6 * size_t.sizeof * 8);
|
| assert(sl.slice.count!"a" == 7);
|
| // accelerated
| assert(sl.count!"a" == 7);
| assert(sl.retro.count!"a" == 7);
|
| auto sl2 = iota!ubyte([6], 128).bitwise;
| // accelerated
| assert(sl2.count!"a" == 13);
| assert(sl2[4 .. $].count!"a" == 13);
| assert(sl2[4 .. $ - 1].count!"a" == 12);
| assert(sl2[4 .. $ - 1].count!"a" == 12);
| assert(sl2[41 .. $ - 1].count!"a" == 1);
|}
|
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: bitwise, assumeFieldsHaveZeroShift;
| auto sl = slice!uint([6]).bitwise;
| auto slb = slice!ubyte([6]).bitwise;
| slb[4] = true;
| auto d = slb[4];
| auto c = assumeFieldsHaveZeroShift(slb & ~slb);
| // pragma(msg, typeof(c));
| assert(!sl.any);
| assert((~sl).all);
| // pragma(msg, typeof(~slb));
| // pragma(msg, typeof(~slb));
| // assert(sl.findIndex);
|}
|
|/++
|Compares two or more slices for equality, as defined by predicate `pred`.
|
|See_also: $(NDSLICEREF slice, Slice.opEquals)
|
|Params:
| pred = The predicate.
|+/
|template equal(alias pred = "a == b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| slices = Two or more ndslices, ranges, and arrays.
|
| Returns:
| `true` any of the elements verify `pred` and `false` otherwise.
| +/
| bool equal(Slices...)(scope Slices slices)
| if (Slices.length >= 2)
| {
| import mir.internal.utility;
| static if (allSatisfy!(hasShape, Slices))
| {
0000000| auto shape0 = slices[0].shape;
| enum N = DimensionCount!(Slices[0]);
0000000| foreach (ref slice; slices[1 .. $])
| {
0000000| if (slice.shape != shape0)
0000000| goto False;
| }
0000000| return all!pred(allLightScope!slices);
| }
| else
| {
| for(;;)
| {
| auto empty = slices[0].empty;
| foreach (ref slice; slices[1 .. $])
| {
| if (slice.empty != empty)
| goto False;
| }
| if (empty)
| return true;
| if (!pred(frontOf!slices))
| goto False;
| foreach (ref slice; slices)
| slice.popFront;
| }
| }
0000000| False: return false;
| }
| }
| else
| alias equal = .equal!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow
|version(mir_test) unittest
|{
| import std.range : iota;
| auto r = iota(6);
| assert(r.equal([0, 1, 2, 3, 4, 5]));
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| assert(equal(sl1, sl1));
| assert(sl1 == sl1); //can also use opEquals for two Slices
| assert(equal!"2 * a == b + c"(sl1, sl1, sl1));
|
| assert(equal!"a < b"(sl1, sl2));
|
| assert(!equal(sl1[0 .. $ - 1], sl1));
| assert(!equal(sl1[0 .. $, 0 .. $ - 1], sl1));
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.math.common: approxEqual;
| import mir.ndslice.allocation: rcslice;
| import mir.ndslice.topology: as, iota;
|
| auto x = 5.iota.as!double.rcslice;
| auto y = x.rcslice;
|
| assert(equal(x, y));
| assert(equal!approxEqual(x, y));
|}
|
|ptrdiff_t cmpImpl(alias pred, A, B)
| (scope A sl1, scope B sl2)
| if (DimensionCount!A == DimensionCount!B)
|{
| for (;;)
| {
| static if (DimensionCount!A == 1)
| {
| import mir.functional : naryFun;
| if (naryFun!pred(sl1.front, sl2.front))
| return -1;
| if (naryFun!pred(sl2.front, sl1.front))
| return 1;
| }
| else
| {
| if (auto res = .cmpImpl!pred(sl1.front, sl2.front))
| return res;
| }
| sl1.popFront;
| if (sl1.empty)
| return -cast(ptrdiff_t)(sl2.length > 1);
| sl2.popFront;
| if (sl2.empty)
| return 1;
| }
|}
|
|/++
|Performs three-way recursive lexicographical comparison on two slices according to predicate `pred`.
|Iterating `sl1` and `sl2` in lockstep, `cmp` compares each `N-1` dimensional element `e1` of `sl1`
|with the corresponding element `e2` in `sl2` recursively.
|If one of the slices has been finished,`cmp` returns a negative value if `sl1` has fewer elements than `sl2`,
|a positive value if `sl1` has more elements than `sl2`,
|and `0` if the ranges have the same number of elements.
|
|Params:
| pred = The predicate.
|+/
|template cmp(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| sl1 = First slice, range, or array.
| sl2 = Second slice, range, or array.
|
| Returns:
| `0` if both ranges compare equal.
| Negative value if the first differing element of `sl1` is less than the corresponding
| element of `sl2` according to `pred`.
| Positive value if the first differing element of `sl2` is less than the corresponding
| element of `sl1` according to `pred`.
| +/
| auto cmp(A, B)
| (scope A sl1, scope B sl2)
| if (DimensionCount!A == DimensionCount!B)
| {
| auto b = sl2.anyEmpty;
| if (sl1.anyEmpty)
| {
| if (!b)
| return -1;
| auto sh1 = sl1.shape;
| auto sh2 = sl2.shape;
| foreach (i; Iota!(DimensionCount!A))
| if (sh1[i] != sh2[i])
| return sh1[i] > sh2[i] ? 1 : -1;
| return 0;
| }
| if (b)
| return 1;
| return cmpImpl!pred(lightScope(sl1), lightScope(sl2));
| }
| else
| alias cmp = .cmp!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow
|version(mir_test) unittest
|{
| import std.range : iota;
|
| // 0 1 2 3 4 5
| auto r1 = iota(0, 6);
| // 1 2 3 4 5 6
| auto r2 = iota(1, 7);
|
| assert(cmp(r1, r1) == 0);
| assert(cmp(r1, r2) < 0);
| assert(cmp!"a >= b"(r1, r2) > 0);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| assert(cmp(sl1, sl1) == 0);
| assert(cmp(sl1, sl2) < 0);
| assert(cmp!"a >= b"(sl1, sl2) > 0);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto sl1 = iota(2, 3);
| auto sl2 = iota([2, 3], 1);
|
| assert(cmp(sl1[0 .. $ - 1], sl1) < 0);
| assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 1]) > 0);
|
| assert(cmp(sl1[0 .. $ - 2], sl1) < 0);
| assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 3]) > 0);
| assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) == 0);
| assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $ - 1, 0 .. $ - 3]) > 0);
| assert(cmp(sl1[0 .. $ - 1, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) < 0);
|}
|
|size_t countImpl(alias fun, Slices...)(scope Slices slices)
|{
| size_t ret;
| alias S = Slices[0];
| import mir.functional: naryFun;
| import mir.ndslice.iterator: FieldIterator, RetroIterator;
| import mir.ndslice.field: BitField;
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| ret = BitSliceAccelerator!(Field, I)(slices[0]).ctpop;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| ret = .countImpl!fun(lightScope(slices[0]).retro);
| }
| else
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if(fun(frontOf!slices))
| ret++;
| }
| else
| ret += .countImpl!fun(frontOf!slices);
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return ret;
|}
|
|/++
|Returns: max length across all dimensions.
|+/
|size_t maxLength(S)(auto ref scope S s)
| if (hasShape!S)
|{
| auto shape = s.shape;
| size_t length = 0;
| foreach(i; Iota!(shape.length))
| if (shape[i] > length)
| length = shape[i];
| return length;
|}
|
|/++
|The call `eachLower!(fun)(slice1, ..., sliceN)` evaluates `fun` on the lower
|triangle in `slice1, ..., sliceN` respectively.
|
|`eachLower` allows iterating multiple slices in the lockstep.
|
|Params:
| fun = A function
|See_Also:
| This is functionally similar to $(LREF each).
|+/
|template eachLower(alias fun)
|{
| import mir.functional : naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| inputs = One or more two-dimensional slices and an optional
| integer, `k`.
|
| The value `k` determines which diagonals will have the function
| applied:
| For k = 0, the function is also applied to the main diagonal
| For k = 1 (default), only the non-main diagonals below the main
| diagonal will have the function applied.
| For k > 1, fewer diagonals below the main diagonal will have the
| function applied.
| For k < 0, more diagonals above the main diagonal will have the
| function applied.
| +/
| void eachLower(Inputs...)(scope Inputs inputs)
| if (((Inputs.length > 1) &&
| (isIntegral!(Inputs[$ - 1]))) ||
| (Inputs.length))
| {
| import mir.ndslice.traits : isMatrix;
|
| size_t val;
|
| static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1])))
| {
| immutable(sizediff_t) k = inputs[$ - 1];
| alias Slices = Inputs[0..($ - 1)];
| alias slices = inputs[0..($ - 1)];
| }
| else
| {
| enum sizediff_t k = 1;
| alias Slices = Inputs;
| alias slices = inputs;
| }
|
| static assert (allSatisfy!(isMatrix, Slices),
| "eachLower: Every slice input must be a two-dimensional slice");
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
|
| foreach(ref slice; slices)
| assert(!slice.empty);
|
| immutable(size_t) m = slices[0].length!0;
| immutable(size_t) n = slices[0].length!1;
|
| if ((n + k) < m)
| {
| val = m - (n + k);
| .eachImpl!fun(selectBackOf!(val, slices));
| }
|
| size_t i;
|
| if (k > 0)
| {
| foreach(ref slice; slices)
| slice.popFrontExactly!0(k);
| i = k;
| }
|
| do
| {
| val = i - k + 1;
| .eachImpl!fun(frontSelectFrontOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFront!0;
| i++;
| } while ((i < (n + k)) && (i < m));
| }
| }
| else
| {
| alias eachLower = .eachLower!(naryFun!fun);
| }
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, canonical, universal;
| alias AliasSeq(T...) = T;
|
| pure nothrow
| void test(alias func)()
| {
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = func(iota([3, 3], 1).slice);
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0]]);
| }
|
| @safe pure nothrow @nogc
| T identity(T)(T x)
| {
| return x;
| }
|
| alias kinds = AliasSeq!(identity, canonical, universal);
| test!(kinds[0]);
| test!(kinds[1]);
| test!(kinds[2]);
|}
|
|///
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3],
| [0, 5, 6],
| [0, 0, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3],
| [4, 5, 6],
| [0, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3, 4],
| [0, 0, 7, 8],
| [0, 0, 0, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3, 4],
| [0, 6, 7, 8],
| [0, 0, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3, 4],
| [0, 0, 0, 8],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3, 4],
| [5, 6, 7, 8],
| [0, 10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0, 4],
| [0, 0, 0, 0],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3],
| [0, 5, 6],
| [0, 0, 9],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3],
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3],
| [4, 5, 6],
| [0, 8, 9],
| [0, 0, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|/// Swap two slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| eachLower!swap(a, b);
|
| assert(a == [
| [ 0, 1, 2],
| [13, 4, 5],
| [16, 17, 8]]);
| assert(b == [
| [10, 11, 12],
| [ 3, 14, 15],
| [ 6, 7, 18]]);
|}
|
|/// Swap two zipped slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.eachLower!(z => swap(z.a, z.b));
|
| assert(a == [
| [ 0, 1, 2],
| [13, 4, 5],
| [16, 17, 8]]);
| assert(b == [
| [10, 11, 12],
| [ 3, 14, 15],
| [ 6, 7, 18]]);
|}
|
|/++
|The call `eachUpper!(fun)(slice1, ..., sliceN)` evaluates `fun` on the upper
|triangle in `slice1, ..., sliceN`, respectively.
|
|`eachUpper` allows iterating multiple slices in the lockstep.
|
|Params:
| fun = A function
|See_Also:
| This is functionally similar to $(LREF each).
|+/
|template eachUpper(alias fun)
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| inputs = One or more two-dimensional slices and an optional
| integer, `k`.
|
| The value `k` determines which diagonals will have the function
| applied:
| For k = 0, the function is also applied to the main diagonal
| For k = 1 (default), only the non-main diagonals above the main
| diagonal will have the function applied.
| For k > 1, fewer diagonals below the main diagonal will have the
| function applied.
| For k < 0, more diagonals above the main diagonal will have the
| function applied.
| +/
| void eachUpper(Inputs...)(scope Inputs inputs)
| if (((Inputs.length > 1) &&
| (isIntegral!(Inputs[$ - 1]))) ||
| (Inputs.length))
| {
| import mir.ndslice.traits : isMatrix;
|
| size_t val;
|
| static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1])))
| {
| immutable(sizediff_t) k = inputs[$ - 1];
| alias Slices = Inputs[0..($ - 1)];
| alias slices = inputs[0..($ - 1)];
| }
| else
| {
| enum sizediff_t k = 1;
| alias Slices = Inputs;
| alias slices = inputs;
| }
|
| static assert (allSatisfy!(isMatrix, Slices),
| "eachUpper: Every slice input must be a two-dimensional slice");
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
|
| foreach(ref slice; slices)
| assert(!slice.empty);
|
| immutable(size_t) m = slices[0].length!0;
| immutable(size_t) n = slices[0].length!1;
|
| size_t i;
|
| if (k < 0)
| {
| val = -k;
| .eachImpl!fun(selectFrontOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFrontExactly!0(-k);
| i = -k;
| }
|
| do
| {
| val = (n - k) - i;
| .eachImpl!fun(frontSelectBackOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFront;
| i++;
| } while ((i < (n - k)) && (i < m));
| }
| }
| else
| {
| alias eachUpper = .eachUpper!(naryFun!fun);
| }
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, canonical, universal;
|
| pure nothrow
| void test(alias func)()
| {
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = func(iota([3, 3], 1).slice);
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0],
| [4, 0, 0],
| [7, 8, 0]]);
| }
|
| @safe pure nothrow @nogc
| T identity(T)(T x)
| {
| return x;
| }
|
| alias kinds = AliasSeq!(identity, canonical, universal);
| test!(kinds[0]);
| test!(kinds[1]);
| test!(kinds[2]);
|}
|
|///
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0],
| [4, 5, 0],
| [7, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [7, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0],
| [4, 5, 6],
| [7, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0, 0],
| [5, 0, 0, 0],
| [9, 10, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0, 0],
| [5, 6, 0, 0],
| [9, 10, 11, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0, 0],
| [0, 0, 0, 0],
| [9, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0, 0],
| [5, 6, 7, 0],
| [9, 10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0, 0],
| [0, 0, 0, 0],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0],
| [4, 0, 0],
| [7, 8, 0],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0],
| [4, 5, 0],
| [7, 8, 9],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [7, 0, 0],
| [10, 11, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0],
| [4, 5, 6],
| [7, 8, 9],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0],
| [10, 0, 0]]);
|}
|
|/// Swap two slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| eachUpper!swap(a, b);
|
| assert(a == [
| [0, 11, 12],
| [3, 4, 15],
| [6, 7, 8]]);
| assert(b == [
| [10, 1, 2],
| [13, 14, 5],
| [16, 17, 18]]);
|}
|
|/// Swap two zipped slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.eachUpper!(z => swap(z.a, z.b));
|
| assert(a == [
| [0, 11, 12],
| [3, 4, 15],
| [6, 7, 8]]);
| assert(b == [
| [10, 1, 2],
| [13, 14, 5],
| [16, 17, 18]]);
|}
|
|// uniq
|/**
|Lazily iterates unique consecutive elements of the given range (functionality
|akin to the $(HTTP wikipedia.org/wiki/_Uniq, _uniq) system
|utility). Equivalence of elements is assessed by using the predicate
|$(D pred), by default $(D "a == b"). The predicate is passed to
|$(REF nary, mir,functional), and can either accept a string, or any callable
|that can be executed via $(D pred(element, element)). If the given range is
|bidirectional, $(D uniq) also yields a
|`std,range,primitives`.
|Params:
| pred = Predicate for determining equivalence between range elements.
|*/
|template uniq(alias pred = "a == b")
|{
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| r = An input range of elements to filter.
| Returns:
| An input range of
| consecutively unique elements in the original range. If `r` is also a
| forward range or bidirectional range, the returned range will be likewise.
| +/
| Uniq!(naryFun!pred, Range) uniq(Range)(Range r)
| if (isInputRange!Range && !isSlice!Range)
| {
| import core.lifetime: move;
| return typeof(return)(r.move);
| }
|
| /// ditto
| auto uniq(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| import mir.ndslice.topology: flattened;
| import core.lifetime: move;
| auto r = slice.move.flattened;
| return Uniq!(pred, typeof(r))(move(r));
| }
| }
| else
| alias uniq = .uniq!(naryFun!pred);
|}
|
|///
|@safe version(mir_test) unittest
|{
| int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ];
| assert(equal(uniq(arr), [ 1, 2, 3, 4, 5 ]));
|
| import std.algorithm.mutation : copy;
| // Filter duplicates in-place using copy
| arr.length -= arr.uniq.copy(arr).length;
| assert(arr == [ 1, 2, 3, 4, 5 ]);
|
| // Note that uniqueness is only determined consecutively; duplicated
| // elements separated by an intervening different element will not be
| // eliminated:
| assert(equal(uniq([ 1, 1, 2, 1, 1, 3, 1]), [1, 2, 1, 3, 1]));
|}
|
|/// N-dimensional case
|version(mir_test)
|@safe pure unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: byDim, map, iota;
|
| auto matrix = [ [1, 2, 2], [2, 2, 3], [4, 4, 4] ].fuse;
|
| assert(matrix.uniq.equal([ 1, 2, 3, 4 ]));
|
| // unique elements for each row
| assert(matrix.byDim!0.map!uniq.equal!equal([ [1, 2], [2, 3], [4] ]));
|}
|
|/++
|Authros: $(HTTP erdani.com, Andrei Alexandrescu) (original Phobos code), Ilya Yaroshenko (betterC rework)
|+/
|struct Uniq(alias pred, Range)
|{
| Range _input;
|
| ref opSlice() inout
| {
| return this;
| }
|
| void popFront() scope
| {
| assert(!empty, "Attempting to popFront an empty uniq.");
| auto last = _input.front;
| do
| {
| _input.popFront();
| }
| while (!_input.empty && pred(last, _input.front));
| }
|
| auto ref front() @property
| {
| assert(!empty, "Attempting to fetch the front of an empty uniq.");
| return _input.front;
| }
|
| static if (isBidirectionalRange!Range)
| {
| void popBack() scope
| {
| assert(!empty, "Attempting to popBack an empty uniq.");
| auto last = _input.back;
| do
| {
| _input.popBack();
| }
| while (!_input.empty && pred(last, _input.back));
| }
|
| auto ref back() scope return @property
| {
| assert(!empty, "Attempting to fetch the back of an empty uniq.");
| return _input.back;
| }
| }
|
| static if (isInfinite!Range)
| {
| enum bool empty = false; // Propagate infiniteness.
| }
| else
| {
| @property bool empty() const { return _input.empty; }
| }
|
| static if (isForwardRange!Range)
| {
| @property typeof(this) save() scope return
| {
| return typeof(this)(_input.save);
| }
| }
|}
|
|version(none)
|@safe version(mir_test) unittest
|{
| import std.internal.test.dummyrange;
| import std.range;
|
| int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ];
| auto r = uniq(arr);
| static assert(isForwardRange!(typeof(r)));
|
| assert(equal(r, [ 1, 2, 3, 4, 5 ][]));
| assert(equal(retro(r), retro([ 1, 2, 3, 4, 5 ][])));
|
| foreach (DummyType; AllDummyRanges)
| {
| DummyType d;
| auto u = uniq(d);
| assert(equal(u, [1,2,3,4,5,6,7,8,9,10]));
|
| static assert(d.rt == RangeType.Input || isForwardRange!(typeof(u)));
|
| static if (d.rt >= RangeType.Bidirectional)
| {
| assert(equal(retro(u), [10,9,8,7,6,5,4,3,2,1]));
| }
| }
|}
|
|@safe version(mir_test) unittest // https://issues.dlang.org/show_bug.cgi?id=17264
|{
| const(int)[] var = [0, 1, 1, 2];
| assert(var.uniq.equal([0, 1, 2]));
|}
|
|@safe version(mir_test) unittest {
| import mir.ndslice.allocation;
| import mir.math.common: approxEqual;
| auto x = rcslice!double(2);
| auto y = rcslice!double(2);
| x[] = [2, 3];
| y[] = [2, 3];
| assert(equal!approxEqual(x,y));
|}
|
|/++
|Implements the higher order filter function. The predicate is passed to
|`mir.functional.naryFun`, and can either accept a string, or any callable
|that can be executed via `pred(element)`.
|Params:
| pred = Function to apply to each element of range
|Returns:
| `filter!(pred)(range)` returns a new range containing only elements `x` in `range` for
| which `pred(x)` returns `true`.
|See_Also:
| $(HTTP en.wikipedia.org/wiki/Filter_(higher-order_function), Filter (higher-order function))
|Note:
| $(RED User and library code MUST call `empty` method ahead each call of pair or one of `front` and `popFront` methods.)
|+/
|template filter(alias pred = "a")
|{
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| r = An input range of elements to filter.
| Returns:
| A new range containing only elements `x` in `range` for which `predicate(x)` returns `true`.
| +/
| Filter!(naryFun!pred, Range) filter(Range)(Range r)
| if (isInputRange!Range && !isSlice!Range)
| {
| import core.lifetime: move;
| return typeof(return)(r.move);
| }
|
| /// ditto
| auto filter(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| import mir.ndslice.topology: flattened;
| import core.lifetime: move;
| auto r = slice.move.flattened;
| return Filter!(pred, typeof(r))(move(r));
| }
| }
| else
| alias filter = .filter!(naryFun!pred);
|}
|
|/// ditto
|struct Filter(alias pred, Range)
|{
| Range _input;
| version(assert) bool _freshEmpty;
|
| ref opSlice() inout
| {
| return this;
| }
|
| void popFront() scope
| {
| assert(!_input.empty, "Attempting to popFront an empty Filter.");
| version(assert) assert(_freshEmpty, "Attempting to pop the front of a Filter without calling '.empty' method ahead.");
| version(assert) _freshEmpty = false;
| _input.popFront;
| }
|
| auto ref front() @property
| {
| assert(!_input.empty, "Attempting to fetch the front of an empty Filter.");
| version(assert) assert(_freshEmpty, "Attempting to fetch the front of a Filter without calling '.empty' method ahead.");
| return _input.front;
| }
|
| bool empty() @property
| {
| version(assert) _freshEmpty = true;
| for (;;)
| {
| if (auto r = _input.empty)
| return true;
| if (pred(_input.front))
| return false;
| _input.popFront;
| }
| }
|
| static if (isForwardRange!Range)
| {
| @property typeof(this) save() scope return
| {
| return typeof(this)(_input.save);
| }
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| int[] arr = [ 0, 1, 2, 3, 4, 5 ];
|
| // Filter below 3
| auto small = filter!(a => a < 3)(arr);
| assert(equal(small, [ 0, 1, 2 ]));
|
| // Filter again, but with Uniform Function Call Syntax (UFCS)
| auto sum = arr.filter!(a => a < 3);
| assert(equal(sum, [ 0, 1, 2 ]));
|
| // Filter with the default predicate
| auto nonZeros = arr.filter;
| assert(equal(nonZeros, [ 1, 2, 3, 4, 5 ]));
|
| // In combination with concatenation() to span multiple ranges
| import mir.ndslice.concatenation;
|
| int[] a = [ 3, -2, 400 ];
| int[] b = [ 100, -101, 102 ];
| auto r = concatenation(a, b).filter!(a => a > 0);
| assert(equal(r, [ 3, 400, 100, 102 ]));
|
| // Mixing convertible types is fair game, too
| double[] c = [ 2.5, 3.0 ];
| auto r1 = concatenation(c, a, b).filter!(a => cast(int) a != a);
| assert(equal(r1, [ 2.5 ]));
|}
|
|/// N-dimensional filtering
|version(mir_test)
|@safe pure unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: byDim, map;
|
| auto matrix =
| [[ 3, -2, 400 ],
| [ 100, -101, 102 ]].fuse;
|
| alias filterPositive = filter!"a > 0";
|
| // filter all elements in the matrix
| auto r = filterPositive(matrix);
| assert(equal(r, [ 3, 400, 100, 102 ]));
|
| // filter all elements for each row
| auto rr = matrix.byDim!0.map!filterPositive;
| assert(equal!equal(rr, [ [3, 400], [100, 102] ]));
|
| // filter all elements for each column
| auto rc = matrix.byDim!1.map!filterPositive;
| assert(equal!equal(rc, [ [3, 100], [], [400, 102] ]));
|}
|
|/++
|Implements the homonym function (also known as `accumulate`, $(D
|compress), `inject`, or `foldl`) present in various programming
|languages of functional flavor. The call `fold!(fun)(slice, seed)`
|first assigns `seed` to an internal variable `result`,
|also called the accumulator. Then, for each element `x` in $(D
|slice), `result = fun(result, x)` gets evaluated. Finally, $(D
|result) is returned.
|
|Params:
| fun = the predicate function to apply to the elements
|
|See_Also:
| $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function))
| $(LREF sum) is similar to `fold!((a, b) => a + b)` that offers
| precise summing of floating point numbers.
| This is functionally equivalent to $(LREF reduce) with the argument order
| reversed.
|+/
|template fold(alias fun)
|{
| /++
| Params:
| slice = A slice, range, and array.
| seed = An initial accumulation value.
| Returns:
| the accumulated result
| +/
| @optmath auto fold(Slice, S)(scope Slice slice, S seed)
| {
| import core.lifetime: move;
| return reduce!fun(seed, slice.move);
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.topology: map;
|
| auto arr = [1, 2, 3, 4, 5].sliced;
|
| // Sum all elements
| assert(arr.fold!((a, b) => a + b)(0) == 15);
| assert(arr.fold!((a, b) => a + b)(6) == 21);
|
| // Can be used in a UFCS chain
| assert(arr.map!(a => a + 1).fold!((a, b) => a + b)(0) == 20);
|
| // Return the last element of any range
| assert(arr.fold!((a, b) => b)(0) == 5);
|}
|
|/// Works for matrices
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.fuse: fuse;
|
| auto arr = [
| [1, 2, 3],
| [4, 5, 6]
| ].fuse;
|
| assert(arr.fold!((a, b) => a + b)(0) == 21);
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice.topology: map;
|
| int[] arr = [1, 2, 3, 4, 5];
|
| // Sum all elements
| assert(arr.fold!((a, b) => a + b)(0) == 15);
| assert(arr.fold!((a, b) => a + b)(6) == 21);
|
| // Can be used in a UFCS chain
| assert(arr.map!(a => a + 1).fold!((a, b) => a + b)(0) == 20);
|
| // Return the last element of any range
| assert(arr.fold!((a, b) => b)(0) == 5);
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| int[] arr = [1];
| static assert(!is(typeof(arr.fold!()(0))));
| static assert(!is(typeof(arr.fold!(a => a)(0))));
| static assert(is(typeof(arr.fold!((a, b) => a)(0))));
| assert(arr.length == 1);
|}
|
|unittest
|{
| import mir.rc.array: RCArray;
| import mir.algorithm.iteration: minmaxPos, minPos, maxPos, minmaxIndex, minIndex, maxIndex;
|
| static immutable a = [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
|
| auto x = RCArray!double(12);
| foreach(i, ref e; x)
| e = a[i];
| auto y = x.asSlice;
| auto z0 = y.minmaxPos;
| auto z1 = y.minPos;
| auto z2 = y.maxPos;
| auto z3 = y.minmaxIndex;
| auto z4 = y.minIndex;
| auto z5 = y.maxIndex;
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/algorithm/iteration.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-core-1.1.51-mir-core-source-mir-utility.lst
|/++
|Generic utilities.
|
|$(BOOKTABLE Cheat Sheet,
|$(TR $(TH Function Name) $(TH Description))
|$(T2 swap, Swaps two values.)
|$(T2 extMul, Extended unsigned multiplications.)
|$(T2 min, Minimum value.)
|$(T2 max, Maximum value.)
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Authors: Ilya Yaroshenko, $(HTTP erdani.com, Andrei Alexandrescu) (original std.* modules),
|Macros:
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.utility;
|
|import std.traits;
|
|import mir.math.common: optmath;
|
|version(LDC)
|pragma(LDC_inline_ir) R inlineIR(string s, R, P...)(P) @safe pure nothrow @nogc;
|
|@optmath:
|
|version(LDC)
|{
| ///
| public import ldc.intrinsics: _expect = llvm_expect;
|}
|else version(GNU)
|{
| import gcc.builtins: __builtin_expect, __builtin_clong;
|
| ///
| T _expect(T)(in T val, in T expected_val) if (__traits(isIntegral, T))
| {
| static if (T.sizeof <= __builtin_clong.sizeof)
| return cast(T) __builtin_expect(val, expected_val);
| else
| return val;
| }
|}
|else
|{
| ///
| T _expect(T)(in T val, in T expected_val) if (__traits(isIntegral, T))
| {
| return val;
| }
|}
|
|public import std.algorithm.mutation: swap;
|
|void swapStars(I1, I2)(auto ref I1 i1, auto ref I2 i2)
|{
| static if (__traits(compiles, swap(*i1, *i2)))
| {
| swap(*i1, *i2);
| }
| else
| {
| import mir.functional: unref;
| auto e = unref(*i1);
| i1[0] = *i2;
| i2[0] = e;
| }
|}
|
|/++
|Iterates the passed arguments and returns the minimum value.
|Params: args = The values to select the minimum from. At least two arguments
| must be passed, and they must be comparable with `<`.
|Returns: The minimum of the passed-in values.
|+/
|auto min(T...)(T args)
| if (T.length >= 2)
|{
| //Get "a"
| static if (T.length <= 2)
| alias a = args[0];
| else
| auto a = min(args[0 .. ($+1)/2]);
| alias T0 = typeof(a);
|
| //Get "b"
| static if (T.length <= 3)
| alias b = args[$-1];
| else
| auto b = min(args[($+1)/2 .. $]);
| alias T1 = typeof(b);
|
| static assert (is(typeof(a < b)), "Invalid arguments: Cannot compare types " ~ T0.stringof ~ " and " ~ T1.stringof ~ ".");
|
| static if ((isFloatingPoint!T0 && isNumeric!T1) || (isFloatingPoint!T1 && isNumeric!T0))
| {
| import mir.math.common: fmin;
| return fmin(a, b);
| }
| else
| {
| static if (isIntegral!T0 && isIntegral!T1)
| static assert(isSigned!T0 == isSigned!T1,
| "mir.utility.min is not defined for signed + unsigned pairs because of security reasons."
| ~ "Please unify type or use a Phobos analog.");
| //Do the "min" proper with a and b
| return a < b ? a : b;
| }
|}
|
|@safe version(mir_core_test) unittest
|{
| int a = 5;
| short b = 6;
| double c = 2;
| auto d = min(a, b);
| static assert(is(typeof(d) == int));
| assert(d == 5);
| auto e = min(a, b, c);
| static assert(is(typeof(e) == double));
| assert(e == 2);
|}
|
|/++
|`min` is not defined for arguments of mixed signedness because of security reasons.
|Please unify type or use a Phobos analog.
|+/
|version(mir_core_test) unittest
|{
| int a = -10;
| uint b = 10;
| static assert(!is(typeof(min(a, b))));
|}
|
|
|/++
|Iterates the passed arguments and returns the minimum value.
|Params: args = The values to select the minimum from. At least two arguments
| must be passed, and they must be comparable with `<`.
|Returns: The minimum of the passed-in values.
|+/
|auto max(T...)(T args)
| if (T.length >= 2)
|{
| //Get "a"
| static if (T.length <= 2)
| alias a = args[0];
| else
| auto a = max(args[0 .. ($+1)/2]);
| alias T0 = typeof(a);
|
| //Get "b"
| static if (T.length <= 3)
| alias b = args[$-1];
| else
| auto b = max(args[($+1)/2 .. $]);
| alias T1 = typeof(b);
|
| static assert (is(typeof(a < b)), "Invalid arguments: Cannot compare types " ~ T0.stringof ~ " and " ~ T1.stringof ~ ".");
|
| static if ((isFloatingPoint!T0 && isNumeric!T1) || (isFloatingPoint!T1 && isNumeric!T0))
| {
| import mir.math.common: fmax;
| return fmax(a, b);
| }
| else
| {
| static if (isIntegral!T0 && isIntegral!T1)
| static assert(isSigned!T0 == isSigned!T1,
| "mir.utility.max is not defined for signed + unsigned pairs because of security reasons."
| ~ "Please unify type or use a Phobos analog.");
| //Do the "max" proper with a and b
| return a > b ? a : b;
| }
|}
|
|///
|@safe version(mir_core_test) unittest
|{
| int a = 5;
| short b = 6;
| double c = 2;
| auto d = max(a, b);
| static assert(is(typeof(d) == int));
| assert(d == 6);
| auto e = min(a, b, c);
| static assert(is(typeof(e) == double));
| assert(e == 2);
|}
|
|/++
|`max` is not defined for arguments of mixed signedness because of security reasons.
|Please unify type or use a Phobos analog.
|+/
|version(mir_core_test) unittest
|{
| int a = -10;
| uint b = 10;
| static assert(!is(typeof(max(a, b))));
|}
|
|/++
|Return type for $(LREF extMul);
|
|The payload order of `low` and `high` parts depends on the endianness.
|+/
|struct ExtMulResult(I)
| if (isUnsigned!I)
|{
| version (LittleEndian)
| {
| /// Lower I.sizeof * 8 bits
| I low;
| /// Higher I.sizeof * 8 bits
| I high;
| }
| else
| {
| /// Higher I.sizeof * 8 bits
| I high;
| /// Lower I.sizeof * 8 bits
| I low;
| }
|
| T opCast(T : ulong)()
| {
| static if (is(I == ulong))
| {
| return cast(T)low;
| }
| else
| {
| return cast(T)(low | (ulong(high) << (I.sizeof * 8)));
| }
| }
|}
|
|/++
|Extended unsigned multiplications.
|Performs U x U multiplication and returns $(LREF ExtMulResult)!U that contains extended result.
|Params:
| a = unsigned integer
| b = unsigned integer
|Returns:
| 128bit result if U is ulong or 256bit result if U is ucent.
|Optimization:
| Algorithm is optimized for LDC (LLVM IR, any target) and for DMD (X86_64).
|+/
|ExtMulResult!U extMul(U)(in U a, in U b) @nogc nothrow pure @trusted
| if(isUnsigned!U)
|{
| static if (is(U == ulong))
| alias H = uint;
| else // ucent
| alias H = ulong;
|
| enum hbc = H.sizeof * 8;
|
| static if (U.sizeof < 4)
| {
| auto ret = uint(a) * b;
| version (LittleEndian)
| return typeof(return)(cast(U) ret, cast(U)(ret >>> (U.sizeof * 8)));
| else
| return typeof(return)(cast(U)(ret >>> (U.sizeof * 8)), cast(U) ret);
| }
| else
| static if (is(U == uint))
| {
| auto ret = ulong(a) * b;
| version (LittleEndian)
| return typeof(return)(cast(uint) ret, cast(uint)(ret >>> 32));
| else
| return typeof(return)(cast(uint)(ret >>> 32), cast(uint) ret);
| }
| else
| static if (is(U == ulong) && __traits(compiles, ucent.init))
| {
| auto ret = ucent(a) * b;
| version (LittleEndian)
| return typeof(return)(cast(ulong) ret, cast(ulong)(ret >>> 64));
| else
| return typeof(return)(cast(ulong)(ret >>> 64), cast(ulong) ret);
| }
| else
| {
| if (!__ctfe)
| {
| static if (size_t.sizeof == 4)
| {
| // https://github.com/ldc-developers/ldc/issues/2391
| }
| else
| version(LDC)
| {
| // LLVM IR by n8sh
| pragma(inline, true);
| static if (is(U == ulong))
| {
| auto r = inlineIR!(`
| %a = zext i64 %0 to i128
| %b = zext i64 %1 to i128
| %m = mul i128 %a, %b
| %n = lshr i128 %m, 64
| %h = trunc i128 %n to i64
| %l = trunc i128 %m to i64
| %agg1 = insertvalue [2 x i64] undef, i64 %l, 0
| %agg2 = insertvalue [2 x i64] %agg1, i64 %h, 1
| ret [2 x i64] %agg2`, ulong[2])(a, b);
| version (LittleEndian)
| return ExtMulResult!U(r[0], r[1]);
| else
| return ExtMulResult!U(r[1], r[0]);
| }
| else
| static if (false)
| {
| auto r = inlineIR!(`
| %a = zext i128 %0 to i256
| %b = zext i128 %1 to i256
| %m = mul i256 %a, %b
| %n = lshr i256 %m, 128
| %h = trunc i256 %n to i128
| %l = trunc i256 %m to i128
| %agg1 = insertvalue [2 x i128] undef, i128 %l, 0
| %agg2 = insertvalue [2 x i128] %agg1, i128 %h, 1
| ret [2 x i128] %agg2`, ucent[2])(a, b);
| version (LittleEndian)
| return ExtMulResult!U(r[0], r[1]);
| else
| return ExtMulResult!U(r[1], r[0]);
| }
| }
| else
| version(D_InlineAsm_X86_64)
| {
| static if (is(U == ulong))
| {
| version(Windows)
| {
| ulong[2] r = extMul_X86_64(a, b);
| return ExtMulResult!ulong(r[0], r[1]);
| }
| else
| {
| return extMul_X86_64(a, b);
| }
| }
| }
| }
|
| U al = cast(H)a;
| U ah = a >>> hbc;
| U bl = cast(H)b;
| U bh = b >>> hbc;
|
| U p0 = al * bl;
| U p1 = al * bh;
| U p2 = ah * bl;
| U p3 = ah * bh;
|
| H cy = cast(H)(((p0 >>> hbc) + cast(H)p1 + cast(H)p2) >>> hbc);
| U lo = p0 + (p1 << hbc) + (p2 << hbc);
| U hi = p3 + (p1 >>> hbc) + (p2 >>> hbc) + cy;
|
| version(LittleEndian)
| return typeof(return)(lo, hi);
| else
| return typeof(return)(hi, lo);
| }
|}
|
|/// 64bit x 64bit -> 128bit
|version(mir_core_test) unittest
|{
| immutable a = 0x93_8d_28_00_0f_50_a5_56;
| immutable b = 0x54_c3_2f_e8_cc_a5_97_10;
| enum c = extMul(a, b); // Compile time algorithm
| assert(extMul(a, b) == c); // Fast runtime algorithm
| static assert(c.high == 0x30_da_d1_42_95_4a_50_78);
| static assert(c.low == 0x27_9b_4b_b4_9e_fe_0f_60);
|}
|
|/// 32bit x 32bit -> 64bit
|version(mir_core_test) unittest
|{
| immutable a = 0x0f_50_a5_56;
| immutable b = 0xcc_a5_97_10;
| static assert(cast(ulong)extMul(a, b) == ulong(a) * b);
|}
|
|///
|version(mir_core_test) unittest
|{
| immutable ushort a = 0xa5_56;
| immutable ushort b = 0x97_10;
| static assert(cast(uint)extMul(a, b) == a * b);
|}
|
|///
|version(mir_core_test) unittest
|{
| immutable ubyte a = 0x56;
| immutable ubyte b = 0x10;
| static assert(cast(ushort)extMul(a, b) == a * b);
|}
|
|version(D_InlineAsm_X86_64)
|{
| version(Windows)
| private ulong[2] extMul_X86_64()(ulong a, ulong b)
| {
| asm @safe pure nothrow @nogc
| {
| naked;
| mov RAX, RCX;
| mul RDX;
| ret;
| }
| }
| else
| private ExtMulResult!ulong extMul_X86_64()(ulong a, ulong b)
| {
| asm @safe pure nothrow @nogc
| {
| naked;
| mov RAX, RDI;
| mul RSI;
| ret;
| }
| }
|}
|
|version(LDC) {} else version(D_InlineAsm_X86_64)
|@nogc nothrow pure @safe version(mir_core_test) unittest
|{
| immutable a = 0x93_8d_28_00_0f_50_a5_56;
| immutable b = 0x54_c3_2f_e8_cc_a5_97_10;
|
| version(Windows)
| {
| immutable ulong[2] r = extMul_X86_64(a, b);
| immutable ExtMulResult!ulong c = ExtMulResult!ulong(r[0], r[1]);
| }
| else
| {
| immutable ExtMulResult!ulong c = extMul_X86_64(a, b);
| }
|
| assert(c.high == 0x30_da_d1_42_95_4a_50_78);
| assert(c.low == 0x27_9b_4b_b4_9e_fe_0f_60);
|}
|
|// draft
|// https://www.codeproject.com/Tips/785014/UInt-Division-Modulus
|private ulong divmod128by64(const ulong u1, const ulong u0, ulong v, out ulong r)
|{
0000000| const ulong b = 1L << 32;
0000000| ulong un1, un0, vn1, vn0, q1, q0, un32, un21, un10, rhat, left, right;
|
| import mir.bitop;
|
0000000| auto s = ctlz(v);
0000000| v <<= s;
0000000| vn1 = v >> 32;
0000000| vn0 = v & 0xffffffff;
|
0000000| un32 = (u1 << s) | (u0 >> (64 - s));
0000000| un10 = u0 << s;
|
0000000| un1 = un10 >> 32;
0000000| un0 = un10 & 0xffffffff;
|
0000000| q1 = un32 / vn1;
0000000| rhat = un32 % vn1;
|
0000000| left = q1 * vn0;
0000000| right = (rhat << 32) + un1;
|
0000000| while ((q1 >= b) || (left > right))
| {
0000000| --q1;
0000000| rhat += vn1;
0000000| if (rhat >= b)
0000000| break;
0000000| left -= vn0;
0000000| right = (rhat << 32) | un1;
| }
|
0000000| un21 = (un32 << 32) + (un1 - (q1 * v));
|
0000000| q0 = un21 / vn1;
0000000| rhat = un21 % vn1;
|
0000000| left = q0 * vn0;
0000000| right = (rhat << 32) | un0;
|
0000000| while ((q0 >= b) || (left > right))
| {
0000000| --q0;
0000000| rhat += vn1;
0000000| if (rhat >= b)
0000000| break;
0000000| left -= vn0;
0000000| right = (rhat << 32) | un0;
| }
|
0000000| r = ((un21 << 32) + (un0 - (q0 * v))) >> s;
0000000| return (q1 << 32) | q0;
|}
|
|/++
|Simple sort algorithm usefull for CTFE code.
|+/
|template simpleSort(alias cmp = "a < b")
|{
| ///
| T[] simpleSort(T)(return T[] array)
| {
| size_t i = 1;
| while (i < array.length)
| {
| size_t j = i;
| import mir.functional: naryFun;
| while (j > 0 && !naryFun!cmp(array[j - 1], array[j]))
| {
| swap(array[j - 1], array[j]);
| j--;
| }
| i++;
| }
| return array;
| }
|}
../../../.dub/packages/mir-core-1.1.51/mir-core/source/mir/utility.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-package.lst
|/+
|## Guide for Slice/BLAS contributors
|
|1. Make sure functions are
| a. inlined(!),
| b. `@nogc`,
| c. `nothrow`,
| d. `pure`.
| For this reason, it is preferable to use _simple_ `assert`s with messages
| that can be computed at compile time.
| The goals are:
| 1. to reduce executable size for _any_ compilation mode
| 2. to reduce template bloat in object files
| 3. to reduce compilation time
| 4. to allow users to write extern C bindings for code libraries on `Slice` type.
|
|2. `std.format`, `std.string`, and `std.conv` should not be used in error
| message formatting.`"Use" ~ Concatenation.stringof`.
|
|3. `mixin template`s may be used for pretty error message formatting.
|
|4. `Exception`s/`enforce`s should no be used to check indices and lengths.
| Exceptions are only allowed for algorithms where validation of input data is
| too complicated for the user. `reshape` function is a good example of a case
| where Exceptions are required.
| If a function might throw an exception, an example with exception handing should be added.
|
|5. For simple checks like matrix transposition, compile time flags should not be used.
| It is much better to opt for runtime matrix transposition.
| Furthermore, Slice type provides runtime matrix transposition out of the box.
|
|6. _Fortran_VS_C_ flags should not be used. They are about notation,
| but not about the algorithm itself. For math world users,
| a corresponding code example might be included in the documentation.
| `transposed` / `everted` can be used in cache-friendly codes.
|
|7. Compile time evaluation should not be used to produce dummy types like `IdentityMatrix`.
|
|8. Memory allocation and algorithm logic should be separated whenever possible.
|
|9. CTFE version(mir_test) unittests should be added to new functions.
|+/
|
|/**
|$(H1 Multidimensional Random Access Ranges)
|
|The package provides a multidimensional array implementation.
|It would be well suited to creating machine learning and image
|processing algorithms, but should also be general enough for use anywhere with
|homogeneously-typed multidimensional data.
|In addition, it includes various functions for iteration, accessing, and manipulation.
|
|Quick_Start:
|$(SUBREF slice, sliced) is a function designed to create
|a multidimensional view over a range.
|Multidimensional view is presented by $(SUBREF slice, Slice) type.
|
|------
|import mir.ndslice;
|
|auto matrix = slice!double(3, 4);
|matrix[] = 0;
|matrix.diagonal[] = 1;
|
|auto row = matrix[2];
|row[3] = 6;
|assert(matrix[2, 3] == 6); // D & C index order
|------
|
|Note:
|In many examples $(REF iota, mir,_ndslice,topology) is used
|instead of a regular array, which makes it
|possible to carry out tests without memory allocation.
|
|$(SCRIPT inhibitQuickIndex = 1;)
|
|$(DIVC quickindex,
|$(BOOKTABLE,
|
|$(TR $(TH Submodule) $(TH Declarations))
|
|$(TR $(TDNW $(SUBMODULE slice) $(BR)
| $(SMALL $(SUBREF slice, Slice) structure
| $(BR) Basic constructors))
| $(TD
| $(SUBREF slice, Canonical)
| $(SUBREF slice, Contiguous)
| $(SUBREF slice, DeepElementType)
| $(SUBREF slice, isSlice)
| $(SUBREF slice, kindOf)
| $(SUBREF slice, Slice)
| $(SUBREF slice, sliced)
| $(SUBREF slice, slicedField)
| $(SUBREF slice, slicedNdField)
| $(SUBREF slice, SliceKind)
| $(SUBREF slice, Structure)
| $(SUBREF slice, Universal)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE allocation) $(BR)
| $(SMALL Allocation utilities))
| $(TD
| $(SUBREF allocation, bitRcslice)
| $(SUBREF allocation, bitSlice)
| $(SUBREF allocation, makeNdarray)
| $(SUBREF allocation, makeSlice)
| $(SUBREF allocation, makeUninitSlice)
| $(SUBREF allocation, mininitRcslice)
| $(SUBREF allocation, ndarray)
| $(SUBREF allocation, rcslice)
| $(SUBREF allocation, shape)
| $(SUBREF allocation, slice)
| $(SUBREF allocation, stdcFreeAlignedSlice)
| $(SUBREF allocation, stdcFreeSlice)
| $(SUBREF allocation, stdcSlice)
| $(SUBREF allocation, stdcUninitAlignedSlice)
| $(SUBREF allocation, stdcUninitSlice)
| $(SUBREF allocation, uninitAlignedSlice)
| $(SUBREF allocation, uninitSlice)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE topology) $(BR)
| $(SMALL Subspace manipulations
| $(BR) Advanced constructors
| $(BR) SliceKind conversion utilities))
| $(TD
| $(SUBREF topology, alongDim)
| $(SUBREF topology, as)
| $(SUBREF topology, assumeCanonical)
| $(SUBREF topology, assumeContiguous)
| $(SUBREF topology, assumeHypercube)
| $(SUBREF topology, assumeSameShape)
| $(SUBREF topology, bitpack)
| $(SUBREF topology, bitwise)
| $(SUBREF topology, blocks)
| $(SUBREF topology, byDim)
| $(SUBREF topology, bytegroup)
| $(SUBREF topology, cached)
| $(SUBREF topology, cachedGC)
| $(SUBREF topology, canonical)
| $(SUBREF topology, cartesian)
| $(SUBREF topology, chopped)
| $(SUBREF topology, cycle)
| $(SUBREF topology, diagonal)
| $(SUBREF topology, diff)
| $(SUBREF topology, dropBorders)
| $(SUBREF topology, evertPack)
| $(SUBREF topology, flattened)
| $(SUBREF topology, indexed)
| $(SUBREF topology, iota)
| $(SUBREF topology, ipack)
| $(SUBREF topology, kronecker)
| $(SUBREF topology, linspace)
| $(SUBREF topology, magic)
| $(SUBREF topology, map)
| $(SUBREF topology, member)
| $(SUBREF topology, ndiota)
| $(SUBREF topology, orthogonalReduceField)
| $(SUBREF topology, pack)
| $(SUBREF topology, pairwise)
| $(SUBREF topology, repeat)
| $(SUBREF topology, reshape)
| $(SUBREF topology, ReshapeError)
| $(SUBREF topology, retro)
| $(SUBREF topology, slide)
| $(SUBREF topology, slideAlong)
| $(SUBREF topology, squeeze)
| $(SUBREF topology, stairs)
| $(SUBREF topology, stride)
| $(SUBREF topology, subSlices)
| $(SUBREF topology, triplets)
| $(SUBREF topology, universal)
| $(SUBREF topology, unsqueeze)
| $(SUBREF topology, unzip)
| $(SUBREF topology, vmap)
| $(SUBREF topology, windows)
| $(SUBREF topology, zip)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE filling) $(BR)
| $(SMALL Specialized initialisation routines))
| $(TD
| $(SUBREF filling, fillVandermonde)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE fuse) $(BR)
| $(SMALL Data fusing (stacking)
| $(BR) See also $(SUBMODULE concatenation) submodule.
| ))
| $(TD
| $(SUBREF fuse, fuse)
| $(SUBREF fuse, fuseAs)
| $(SUBREF fuse, rcfuse)
| $(SUBREF fuse, rcfuseAs)
| $(SUBREF fuse, fuseCells)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE concatenation) $(BR)
| $(SMALL Concatenation, padding, and algorithms
| $(BR) See also $(SUBMODULE fuse) submodule.
| ))
| $(TD
| $(SUBREF concatenation, forEachFragment)
| $(SUBREF concatenation, isConcatenation)
| $(SUBREF concatenation, pad)
| $(SUBREF concatenation, padEdge)
| $(SUBREF concatenation, padWrap)
| $(SUBREF concatenation, padSymmetric)
| $(SUBREF concatenation, concatenation)
| $(SUBREF concatenation, Concatenation)
| $(SUBREF concatenation, concatenationDimension)
| $(SUBREF concatenation, until)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE dynamic)
| $(BR) $(SMALL Dynamic dimension manipulators))
| $(TD
| $(SUBREF dynamic, allReversed)
| $(SUBREF dynamic, dropToHypercube)
| $(SUBREF dynamic, everted)
| $(SUBREF dynamic, normalizeStructure)
| $(SUBREF dynamic, reversed)
| $(SUBREF dynamic, rotated)
| $(SUBREF dynamic, strided)
| $(SUBREF dynamic, swapped)
| $(SUBREF dynamic, transposed)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE sorting)
| $(BR) $(SMALL Sorting utilities))
| $(TD
| $(SUBREF sorting, sort)
| Examples for `isSorted`, `isStrictlyMonotonic`, `makeIndex`, and `schwartzSort`.
| )
|)
|
|$(TR $(TDNW $(SUBMODULE mutation)
| $(BR) $(SMALL Mutation utilities))
| $(TD
| $(SUBREF mutation, copyMinor)
| $(SUBREF mutation, reverseInPlace)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE iterator)
| $(BR) $(SMALL Declarations))
| $(TD
| $(SUBREF iterator, BytegroupIterator)
| $(SUBREF iterator, CachedIterator)
| $(SUBREF iterator, ChopIterator)
| $(SUBREF iterator, FieldIterator)
| $(SUBREF iterator, FlattenedIterator)
| $(SUBREF iterator, IndexIterator)
| $(SUBREF iterator, IotaIterator)
| $(SUBREF iterator, MapIterator)
| $(SUBREF iterator, MemberIterator)
| $(SUBREF iterator, RetroIterator)
| $(SUBREF iterator, SliceIterator)
| $(SUBREF iterator, SlideIterator)
| $(SUBREF iterator, StairsIterator)
| $(SUBREF iterator, StrideIterator)
| $(SUBREF iterator, SubSliceIterator)
| $(SUBREF iterator, Triplet)
| $(SUBREF iterator, TripletIterator)
| $(SUBREF iterator, ZipIterator)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE field)
| $(BR) $(SMALL Declarations))
| $(TD
| $(SUBREF field, BitField)
| $(SUBREF field, BitpackField)
| $(SUBREF field, CycleField)
| $(SUBREF field, LinspaceField)
| $(SUBREF field, MagicField)
| $(SUBREF field, MapField)
| $(SUBREF field, ndIotaField)
| $(SUBREF field, OrthogonalReduceField)
| $(SUBREF field, RepeatField)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE ndfield)
| $(BR) $(SMALL Declarations))
| $(TD
| $(SUBREF ndfield, Cartesian)
| $(SUBREF ndfield, Kronecker)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE chunks)
| $(BR) $(SMALL Declarations))
| $(TD
| $(SUBREF field, chunks)
| $(SUBREF field, Chunks)
| $(SUBREF field, isChunks)
| $(SUBREF field, popFrontTuple)
| )
|)
|
|$(TR $(TDNW $(SUBMODULE traits)
| $(BR) $(SMALL Declarations))
| $(TD
| $(SUBREF traits, isIterator)
| $(SUBREF traits, isVector)
| $(SUBREF traits, isMatrix)
| $(SUBREF traits, isContiguousSlice)
| $(SUBREF traits, isCanonicalSlice)
| $(SUBREF traits, isUniversalSlice)
| $(SUBREF traits, isContiguousVector)
| $(SUBREF traits, isUniversalVector)
| $(SUBREF traits, isContiguousMatrix)
| $(SUBREF traits, isCanonicalMatrix)
| $(SUBREF traits, isUniversalMatrix)
| )
|)
|
|))
|
|$(H2 Example: Image Processing)
|
|A median filter is implemented as an example. The function
|`movingWindowByChannel` can also be used with other filters that use a sliding
|window as the argument, in particular with convolution matrices such as the
|$(LINK2 https://en.wikipedia.org/wiki/Sobel_operator, Sobel operator).
|
|`movingWindowByChannel` iterates over an image in sliding window mode.
|Each window is transferred to a `filter`, which calculates the value of the
|pixel that corresponds to the given window.
|
|This function does not calculate border cases in which a window overlaps
|the image partially. However, the function can still be used to carry out such
|calculations. That can be done by creating an amplified image, with the edges
|reflected from the original image, and then applying the given function to the
|new file.
|
|Note: You can find the example at
|$(LINK2 https://github.com/libmir/mir/blob/master/examples/median_filter.d, GitHub).
|
|-------
|/++
|Params:
| filter = unary function. Dimension window 2D is the argument.
| image = image dimensions `(h, w, c)`,
| where с is the number of channels in the image
| nr = number of rows in the window
| nс = number of columns in the window
|
|Returns:
| image dimensions `(h - nr + 1, w - nc + 1, c)`,
| where с is the number of channels in the image.
| Dense data layout is guaranteed.
|+/
|Slice!(ubyte*, 3) movingWindowByChannel
|(Slice!(Universal, [3], ubyte*) image, size_t nr, size_t nc, ubyte delegate(Slice!(Universal, [2], ubyte*)) filter)
|{
| // 0. 3D
| // The last dimension represents the color channel.
| return image
| // 1. 2D composed of 1D
| // Packs the last dimension.
| .pack!1
| // 2. 2D composed of 2D composed of 1D
| // Splits image into overlapping windows.
| .windows(nr, nc)
| // 3. 5D
| // Unpacks the windows.
| .unpack
| .transposed!(0, 1, 4)
| // 4. 5D
| // Brings the color channel dimension to the third position.
| .pack!2
| // 2D to pixel lazy conversion.
| .map!filter
| // Creates the new image. The only memory allocation in this function.
| .slice;
|}
|-------
|
|A function that calculates the value of iterator median is also necessary.
|
|-------
|/++
|
|Params:
| r = input range
| buf = buffer with length no less than the number of elements in `r`
|Returns:
| median value over the range `r`
|+/
|T median(Range, T)(Slice!(Universal, [2], Range) sl, T[] buf)
|{
| import std.algorithm.sorting : topN;
| // copy sl to the buffer
| auto retPtr = reduce!(
| (ptr, elem) { *ptr = elem; return ptr + 1;} )(buf.ptr, sl);
| auto n = retPtr - buf.ptr;
| buf[0 .. n].topN(n / 2);
| return buf[n / 2];
|}
|-------
|
|The `main` function:
|
|-------
|void main(string[] args)
|{
| import std.conv : to;
| import std.getopt : getopt, defaultGetoptPrinter;
| import std.path : stripExtension;
|
| uint nr, nc, def = 3;
| auto helpInformation = args.getopt(
| "nr", "number of rows in window, default value is " ~ def.to!string, &nr,
| "nc", "number of columns in window, default value is equal to nr", &nc);
| if (helpInformation.helpWanted)
| {
| defaultGetoptPrinter(
| "Usage: median-filter [] []\noptions:",
| helpInformation.options);
| return;
| }
| if (!nr) nr = def;
| if (!nc) nc = nr;
|
| auto buf = new ubyte[nr * nc];
|
| foreach (name; args[1 .. $])
| {
| import imageformats; // can be found at code.dlang.org
|
| IFImage image = read_image(name);
|
| auto ret = image.pixels
| .sliced(cast(size_t)image.h, cast(size_t)image.w, cast(size_t)image.c)
| .movingWindowByChannel
| !(window => median(window, buf))
| (nr, nc);
|
| write_image(
| name.stripExtension ~ "_filtered.png",
| ret.length!1,
| ret.length!0,
| (&ret[0, 0, 0])[0 .. ret.elementCount]);
| }
|}
|-------
|
|This program works both with color and grayscale images.
|
|-------
|$ median-filter --help
|Usage: median-filter [] []
|options:
| --nr number of rows in window, default value is 3
| --nc number of columns in window default value equals to nr
|-h --help This help information.
|-------
|
|$(H2 Compared with `numpy.ndarray`)
|
|numpy is undoubtedly one of the most effective software packages that has
|facilitated the work of many engineers and scientists. However, due to the
|specifics of implementation of Python, a programmer who wishes to use the
|functions not represented in numpy may find that the built-in functions
|implemented specifically for numpy are not enough, and their Python
|implementations work at a very low speed. Extending numpy can be done, but
|is somewhat laborious as even the most basic numpy functions that refer
|directly to `ndarray` data must be implemented in C for reasonable performance.
|
|At the same time, while working with `ndslice`, an engineer has access to the
|whole set of standard D library, so the functions he creates will be as
|efficient as if they were written in C.
|
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|Acknowledgements: John Loughran Colvin
|
|Macros:
|SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|TDNW2 = $0 |
|*/
|module mir.ndslice;
|
|public import mir.algorithm.iteration;
|public import mir.ndslice.allocation;
|public import mir.ndslice.chunks;
|public import mir.ndslice.concatenation;
|public import mir.ndslice.dynamic;
|public import mir.ndslice.field;
|public import mir.ndslice.filling;
|public import mir.ndslice.fuse;
|public import mir.ndslice.iterator;
|public import mir.ndslice.mutation;
|public import mir.ndslice.ndfield;
|public import mir.ndslice.slice;
|public import mir.ndslice.topology;
|public import mir.ndslice.traits;
|
|
|version(mir_test) unittest
|{
| auto matrix = new double[12].sliced(3, 4);
| matrix[] = 0;
| matrix.diagonal[] = 1;
|
| auto row = matrix[2];
| row[3] = 6;
| assert(matrix[2, 3] == 6); // D & C index order
| //assert(matrix(3, 2) == 6); // Math & Fortran index order
|}
|
|// relaxed example
|version(mir_test) unittest
|{
| import mir.qualifier;
|
| static Slice!(ubyte*, 3) movingWindowByChannel
| (Slice!(ubyte*, 3, Universal) image, size_t nr, size_t nc, ubyte delegate(LightConstOf!(Slice!(ubyte*, 2, Universal))) filter)
| {
| return image
| .pack!1
| .windows(nr, nc)
| .unpack
| .unpack
| .transposed!(0, 1, 4)
| .pack!2
| .map!filter
| .slice;
| }
|
| static T median(Iterator, T)(Slice!(Iterator, 2, Universal) sl, T[] buf)
| {
| import std.algorithm.sorting : topN;
| // copy sl to the buffer
| auto retPtr = reduce!(
| (ptr, elem) {
| *ptr = elem;
| return ptr + 1;
| } )(buf.ptr, sl);
| auto n = retPtr - buf.ptr;
| buf[0 .. n].topN(n / 2);
| return buf[n / 2];
| }
|
| import std.conv : to;
| import std.getopt : getopt, defaultGetoptPrinter;
| import std.path : stripExtension;
|
| auto args = ["std"];
| uint nr, nc, def = 3;
| auto helpInformation = args.getopt(
| "nr", "number of rows in window, default value is " ~ def.to!string, &nr,
| "nc", "number of columns in window default value equals to nr", &nc);
| if (helpInformation.helpWanted)
| {
| defaultGetoptPrinter(
| "Usage: median-filter [] []\noptions:",
| helpInformation.options);
| return;
| }
| if (!nr) nr = def;
| if (!nc) nc = nr;
|
| auto buf = new ubyte[nr * nc];
|
| foreach (name; args[1 .. $])
| {
| auto ret =
| movingWindowByChannel
| (new ubyte[300].sliced(10, 10, 3).universal, nr, nc, window => median(window, buf));
| }
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| immutable r = 1000.iota;
|
| auto t0 = r.sliced(1000);
| assert(t0.front == 0);
| assert(t0.back == 999);
| assert(t0[9] == 9);
|
| auto t1 = t0[10 .. 20];
| assert(t1.front == 10);
| assert(t1.back == 19);
| assert(t1[9] == 19);
|
| t1.popFront();
| assert(t1.front == 11);
| t1.popFront();
| assert(t1.front == 12);
|
| t1.popBack();
| assert(t1.back == 18);
| t1.popBack();
| assert(t1.back == 17);
|
| assert(t1 == iota([6], 12));
|}
|
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| import mir.array.allocation : array;
| auto r = 1000.iota.array;
|
| auto t0 = r.sliced(1000);
| assert(t0.length == 1000);
| assert(t0.front == 0);
| assert(t0.back == 999);
| assert(t0[9] == 9);
|
| auto t1 = t0[10 .. 20];
| assert(t1.front == 10);
| assert(t1.back == 19);
| assert(t1[9] == 19);
|
| t1.popFront();
| assert(t1.front == 11);
| t1.popFront();
| assert(t1.front == 12);
|
| t1.popBack();
| assert(t1.back == 18);
| t1.popBack();
| assert(t1.back == 17);
|
| assert(t1 == iota([6], 12));
|
| t1.front = 13;
| assert(t1.front == 13);
| t1.front++;
| assert(t1.front == 14);
| t1.front += 2;
| assert(t1.front == 16);
| t1.front = 12;
| assert((t1.front = 12) == 12);
|
| t1.back = 13;
| assert(t1.back == 13);
| t1.back++;
| assert(t1.back == 14);
| t1.back += 2;
| assert(t1.back == 16);
| t1.back = 12;
| assert((t1.back = 12) == 12);
|
| t1[3] = 13;
| assert(t1[3] == 13);
| t1[3]++;
| assert(t1[3] == 14);
| t1[3] += 2;
| assert(t1[3] == 16);
| t1[3] = 12;
| assert((t1[3] = 12) == 12);
|
| t1[3 .. 5] = 100;
| assert(t1[2] != 100);
| assert(t1[3] == 100);
| assert(t1[4] == 100);
| assert(t1[5] != 100);
|
| t1[3 .. 5] += 100;
| assert(t1[2] < 100);
| assert(t1[3] == 200);
| assert(t1[4] == 200);
| assert(t1[5] < 100);
|
| --t1[3 .. 5];
|
| assert(t1[2] < 100);
| assert(t1[3] == 199);
| assert(t1[4] == 199);
| assert(t1[5] < 100);
|
| --t1[];
| assert(t1[3] == 198);
| assert(t1[4] == 198);
|
| t1[] += 2;
| assert(t1[3] == 200);
| assert(t1[4] == 200);
|
| t1[].opIndexOpAssign!"*"(t1);
| assert(t1[3] == 40000);
| assert(t1[4] == 40000);
|
|
| assert(&t1[$ - 1] is &(t1.back()));
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import std.range : iota;
| auto r = (10_000L * 2 * 3 * 4).iota;
|
| auto t0 = r.slicedField(10, 20, 30, 40);
| assert(t0.length == 10);
| assert(t0.length!0 == 10);
| assert(t0.length!1 == 20);
| assert(t0.length!2 == 30);
| assert(t0.length!3 == 40);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto tensor = new int[3 * 4 * 8].sliced(3, 4, 8);
| assert(&(tensor.back.back.back()) is &tensor[2, 3, 7]);
| assert(&(tensor.front.front.front()) is &tensor[0, 0, 0]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto slice = new int[24].sliced(2, 3, 4);
| auto r0 = slice.pack!1[1, 2];
| slice.pack!1[1, 2][] = 4;
| auto r1 = slice[1, 2];
| assert(slice[1, 2, 3] == 4);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto ar = new int[3 * 8 * 9];
|
| auto tensor = ar.sliced(3, 8, 9);
| tensor[0, 1, 2] = 4;
| tensor[0, 1, 2]++;
| assert(tensor[0, 1, 2] == 5);
| tensor[0, 1, 2]--;
| assert(tensor[0, 1, 2] == 4);
| tensor[0, 1, 2] += 2;
| assert(tensor[0, 1, 2] == 6);
|
| auto matrix = tensor[0 .. $, 1, 0 .. $];
| matrix[] = 10;
| assert(tensor[0, 1, 2] == 10);
| assert(matrix[0, 2] == tensor[0, 1, 2]);
| assert(&matrix[0, 2] is &tensor[0, 1, 2]);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/package.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-field.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Field is a type with `opIndex()(ptrdiff_t index)` primitive.
|An iterator can be created on top of a field using $(SUBREF iterator, FieldIterator).
|An ndslice can be created on top of a field using $(SUBREF slice, slicedField).
|
|$(BOOKTABLE $(H2 Fields),
|$(TR $(TH Field Name) $(TH Used By))
|$(T2 BitField, $(SUBREF topology, bitwise))
|$(T2 BitpackField, $(SUBREF topology, bitpack))
|$(T2 CycleField, $(SUBREF topology, cycle) (2 kinds))
|$(T2 LinspaceField, $(SUBREF topology, linspace))
|$(T2 MagicField, $(SUBREF topology, magic))
|$(T2 MapField, $(SUBREF topology, map) and $(SUBREF topology, mapField))
|$(T2 ndIotaField, $(SUBREF topology, ndiota))
|$(T2 OrthogonalReduceField, $(SUBREF topology, orthogonalReduceField))
|$(T2 RepeatField, $(SUBREF topology, repeat))
|$(T2 SparseField, Used for mutable DOK sparse matrixes )
|)
|
|
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.field;
|
|import mir.internal.utility: Iota;
|import mir.math.common: optmath;
|import mir.ndslice.internal;
|import mir.qualifier;
|
|@optmath:
|
|package template ZeroShiftField(T)
|{
| static if (hasZeroShiftFieldMember!T)
| alias ZeroShiftField = typeof(T.init.assumeFieldsHaveZeroShift());
| else
| alias ZeroShiftField = T;
|}
|
|package enum hasZeroShiftFieldMember(T) = __traits(hasMember, T, "assumeFieldsHaveZeroShift");
|
|package auto applyAssumeZeroShift(Types...)()
|{
| import mir.ndslice.topology;
| string str;
| foreach(i, T; Types)
| static if (hasZeroShiftFieldMember!T)
| str ~= "_fields[" ~ i.stringof ~ "].assumeFieldsHaveZeroShift, ";
| else
| str ~= "_fields[" ~ i.stringof ~ "], ";
| return str;
|}
|
|auto MapField__map(Field, alias fun, alias fun1)(ref MapField!(Field, fun) f)
|{
| import core.lifetime: move;
| import mir.functional: pipe;
| return MapField!(Field, pipe!(fun, fun1))(move(f._field));
|}
|
|
|/++
|`MapField` is used by $(SUBREF topology, map).
|+/
|struct MapField(Field, alias _fun)
|{
|@optmath:
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return MapField!(LightConstOf!Field, _fun)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field));
| }
|
| /++
| User defined constructor used by $(LREF mapField).
| +/
| static alias __map(alias fun1) = MapField__map!(Field, _fun, fun1);
|
| auto ref opIndex(T...)(auto ref T index)
| {
| import mir.functional: RefTuple, unref;
| static if (is(typeof(_field[index]) : RefTuple!K, K...))
| {
| auto t = _field[index];
| return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_field[index]);
| }
|
| static if (__traits(hasMember, Field, "length"))
| auto length() const @property
| {
| return _field.length;
| }
|
| static if (__traits(hasMember, Field, "shape"))
| auto shape() const @property
| {
| return _field.shape;
| }
|
| static if (__traits(hasMember, Field, "elementCount"))
| auto elementCount() const @property
| {
| return _field.elementCount;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return _mapField!_fun(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|/++
|`VmapField` is used by $(SUBREF topology, map).
|+/
|struct VmapField(Field, Fun)
|{
|@optmath:
| ///
| Field _field;
| ///
| Fun _fun;
|
| ///
| auto lightConst()() const @property
| {
| return VmapField!(LightConstOf!Field, _fun)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return VmapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field));
| }
|
| auto ref opIndex(T...)(auto ref T index)
| {
| import mir.functional: RefTuple, unref;
| static if (is(typeof(_field[index]) : RefTuple!K, K...))
| {
| auto t = _field[index];
| return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_field[index]);
| }
|
| static if (__traits(hasMember, Field, "length"))
| auto length() const @property
| {
| return _field.length;
| }
|
| static if (__traits(hasMember, Field, "shape"))
| auto shape() const @property
| {
| return _field.shape;
| }
|
| static if (__traits(hasMember, Field, "elementCount"))
| auto elementCount()const @property
| {
| return _field.elementCount;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return _vmapField(_field.assumeFieldsHaveZeroShift, _fun);
| }
|}
|
|/+
|Creates a mapped field. Uses `__map` if possible.
|+/
|auto _mapField(alias fun, Field)(Field field)
|{
| import mir.functional: naryFun;
| static if ((
| __traits(isSame, fun, naryFun!"a|b") ||
| __traits(isSame, fun, naryFun!"a^b") ||
| __traits(isSame, fun, naryFun!"a&b") ||
| __traits(isSame, fun, naryFun!"a | b") ||
| __traits(isSame, fun, naryFun!"a ^ b") ||
| __traits(isSame, fun, naryFun!"a & b")) &&
| is(Field : ZipField!(BitField!(LeftField, I), BitField!(RightField, I)), LeftField, RightField, I))
| {
| import mir.ndslice.topology: bitwiseField;
| auto f = ZipField!(LeftField, RightField)(field._fields[0]._field, field._fields[1]._field)._mapField!fun;
| return f.bitwiseField!(typeof(f), I);
| }
| else
| static if (__traits(hasMember, Field, "__map"))
| return Field.__map!fun(field);
| else
| return MapField!(Field, fun)(field);
|}
|
|/+
|Creates a mapped field. Uses `__vmap` if possible.
|+/
|auto _vmapField(Field, Fun)(Field field, Fun fun)
|{
| static if (__traits(hasMember, Field, "__vmap"))
| return Field.__vmap(field, fun);
| else
| return VmapField!(Field, Fun)(field, fun);
|}
|
|/++
|Iterates multiple fields in lockstep.
|
|`ZipField` is used by $(SUBREF topology, zipFields).
|+/
|struct ZipField(Fields...)
| if (Fields.length > 1)
|{
|@optmath:
| import mir.functional: RefTuple, Ref, _ref;
| import std.meta: anySatisfy;
|
| ///
| Fields _fields;
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(LightConstOf, Fields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(LightImmutableOf, Fields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota));
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| alias Iterators = Fields;
| alias _iterators = _fields;
| import mir.ndslice.iterator: _zip_types, _zip_index;
| return mixin("RefTuple!(_zip_types!Fields)(" ~ _zip_index!Fields ~ ")");
| }
|
| auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index)
| if (Types.length == Fields.length)
| {
| foreach(i, ref val; value.expand)
| {
| _fields[i][index] = val;
| }
| return opIndex(index);
| }
|
| static if (anySatisfy!(hasZeroShiftFieldMember, Fields))
| /// Defined if at least one of `Fields` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(ZeroShiftField, Fields))(" ~ applyAssumeZeroShift!Fields ~ ")");
| }
|}
|
|/++
|`RepeatField` is used by $(SUBREF topology, repeat).
|+/
|struct RepeatField(T)
|{
| import std.traits: Unqual;
|
|@optmath:
| alias UT = Unqual!T;
|
| ///
| UT _value;
|
| ///
| auto lightConst()() const @property @trusted
| {
| return RepeatField!(const T)(cast(UT) _value);
| }
|
| ///
| auto lightImmutable()() immutable @property @trusted
| {
| return RepeatField!(immutable T)(cast(UT) _value);
| }
|
| auto ref T opIndex()(ptrdiff_t) @trusted
| { return cast(T) _value; }
|}
|
|/++
|`BitField` is used by $(SUBREF topology, bitwise).
|+/
|struct BitField(Field, I = typeof(cast()Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
|@optmath:
| import mir.bitop: ctlz;
| package(mir) alias E = I;
| package(mir) enum shift = ctlz(I.sizeof) + 3;
|
| ///
| Field _field;
|
| /// optimization for bitwise operations
| auto __vmap(Fun : LeftOp!(op, bool), string op)(Fun fun)
| if (op == "|" || op == "&" || op == "^")
| {
| import mir.ndslice.topology: bitwiseField;
| return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField;
| }
|
| /// ditto
| auto __vmap(Fun : RightOp!(op, bool), string op)(Fun fun)
| if (op == "|" || op == "&" || op == "^")
| {
| import mir.ndslice.topology: bitwiseField;
| return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField;
| }
|
| /// ditto
| auto __vmap(Fun)(Fun fun)
| {
| return VmapField!(typeof(this), Fun)(this, fun);
| }
|
| /// ditto
| alias __map(alias fun) = BitField__map!(Field, I, fun);
|
| ///
| auto lightConst()() const @property
| {
| return BitField!(LightConstOf!Field, I)(mir.qualifier.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BitField!(LightImmutableOf!Field, I)(mir.qualifier.lightImmutable(_field));
| }
|
| bool opIndex()(size_t index)
| {
| import mir.bitop: bt;
| return bt!(Field, I)(_field, index) != 0;
| }
|
| bool opIndexAssign()(bool value, size_t index)
| {
| import mir.bitop: bta;
| bta!(Field, I)(_field, index, value);
| return value;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return BitField!(ZeroShiftField!Field, I)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.iterator: FieldIterator;
| ushort[10] data;
| auto f = FieldIterator!(BitField!(ushort*))(0, BitField!(ushort*)(data.ptr));
| f[123] = true;
| f++;
| assert(f[122]);
|}
|
|auto BitField__map(Field, I, alias fun)(BitField!(Field, I) field)
|{
| import core.lifetime: move;
| import mir.functional: naryFun;
| static if (__traits(isSame, fun, naryFun!"~a") || __traits(isSame, fun, naryFun!"!a"))
| {
| import mir.ndslice.topology: bitwiseField;
| auto f = _mapField!(naryFun!"~a")(move(field._field));
| return f.bitwiseField!(typeof(f), I);
| }
| else
| {
| return MapField!(BitField!(Field, I), fun)(move(field));
| }
|}
|
|/++
|`BitpackField` is used by $(SUBREF topology, bitpack).
|+/
|struct BitpackField(Field, uint pack, I = typeof(cast()Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
| //static assert();
|@optmath:
| package(mir) alias E = I;
| package(mir) enum mask = (I(1) << pack) - 1;
| package(mir) enum bits = I.sizeof * 8;
|
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return BitpackField!(LightConstOf!Field, pack)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BitpackField!(LightImmutableOf!Field, pack)(.lightImmutable(_field));
| }
|
| I opIndex()(size_t index)
| {
| index *= pack;
| size_t start = index % bits;
| index /= bits;
| auto ret = (_field[index] >>> start) & mask;
| static if (bits % pack)
| {
| sizediff_t end = start - (bits - pack);
| if (end > 0)
| ret ^= cast(I)(_field[index + 1] << (bits - end)) >>> (bits - pack);
| }
| return cast(I) ret;
| }
|
| I opIndexAssign()(I value, size_t index)
| {
| import std.traits: Unsigned;
| assert(cast(Unsigned!I)value <= mask);
| index *= pack;
| size_t start = index % bits;
| index /= bits;
| _field[index] = cast(I)((_field[index] & ~(mask << start)) ^ (value << start));
| static if (bits % pack)
| {
| sizediff_t end = start - (bits - pack);
| if (end > 0)
| _field[index + 1] = cast(I)((_field[index + 1] & ~((I(1) << end) - 1)) ^ (value >>> (pack - end)));
| }
| return value;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return BitpackField!(ZeroShiftField!Field, pack, I)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|unittest
|{
| import mir.ndslice.iterator: FieldIterator;
| ushort[10] data;
| auto f = FieldIterator!(BitpackField!(ushort*, 6))(0, BitpackField!(ushort*, 6)(data.ptr));
| f[0] = cast(ushort) 31;
| f[1] = cast(ushort) 13;
| f[2] = cast(ushort) 8;
| f[3] = cast(ushort) 43;
| f[4] = cast(ushort) 28;
| f[5] = cast(ushort) 63;
| f[6] = cast(ushort) 39;
| f[7] = cast(ushort) 23;
| f[8] = cast(ushort) 44;
|
| assert(f[0] == 31);
| assert(f[1] == 13);
| assert(f[2] == 8);
| assert(f[3] == 43);
| assert(f[4] == 28);
| assert(f[5] == 63);
| assert(f[6] == 39);
| assert(f[7] == 23);
| assert(f[8] == 44);
| assert(f[9] == 0);
| assert(f[10] == 0);
| assert(f[11] == 0);
|}
|
|unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.topology;
| import mir.ndslice.sorting;
| uint[2] data;
| auto packed = data[].sliced.bitpack!18;
| assert(packed.length == 3);
| packed[0] = 5;
| packed[1] = 3;
| packed[2] = 2;
| packed.sort;
| assert(packed[0] == 2);
| assert(packed[1] == 3);
| assert(packed[2] == 5);
|}
|
|///
|struct OrthogonalReduceField(FieldsIterator, alias fun, T)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// non empty slice
|
| Slice!FieldsIterator _fields;
|
| ///
| T _initialValue;
|
| ///
| auto lightConst()() const @property
| {
| auto fields = _fields.lightConst;
| return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto fields = _fields.lightImmutable;
| return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue);
| }
|
| /// `r = fun(r, fields[i][index]);` reduction by `i`
| auto opIndex()(size_t index)
| {
| import std.traits: Unqual;
| auto fields = _fields;
| T r = _initialValue;
| if (!fields.empty) do
| {
| r = cast(T) fun(r, fields.front[index]);
| fields.popFront;
| }
| while(!fields.empty);
| return r;
| }
|}
|
|///
|struct CycleField(Field)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// Cycle length
| size_t _length;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| auto field = .lightConst(_field);
| return CycleField!(typeof(field))(_length, field);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto field = .lightImmutable(_field);
| return CycleField!(typeof(field))(_length, field);
| }
|
| ///
| auto ref opIndex()(size_t index)
| {
| return _field[index % _length];
| }
|
| ///
| static if (!__traits(compiles, &opIndex(size_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, size_t index)
| {
| return _field[index % _length] = value;
| }
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return CycleField!(ZeroShiftField!Field)(_length, _field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|struct CycleField(Field, size_t length)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// Cycle length
| enum _length = length;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| auto field = .lightConst(_field);
| return CycleField!(typeof(field), _length)(field);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto field = .lightImmutable(_field);
| return CycleField!(typeof(field), _length)(field);
| }
|
| ///
| auto ref opIndex()(size_t index)
| {
| return _field[index % _length];
| }
|
| ///
| static if (!__traits(compiles, &opIndex(size_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, size_t index)
| {
| return _field[index % _length] = value;
| }
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return CycleField!(ZeroShiftField!Field, _length)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|/++
|`ndIotaField` is used by $(SUBREF topology, ndiota).
|+/
|struct ndIotaField(size_t N)
| if (N)
|{
|@optmath:
| ///
| size_t[N - 1] _lengths;
|
| ///
| auto lightConst()() const @property
| {
| return ndIotaField!N(_lengths);
| }
|
| ///
| auto lightImmutable()() const @property
| {
| return ndIotaField!N(_lengths);
| }
|
| ///
| size_t[N] opIndex()(size_t index) const
| {
| size_t[N] indices;
| foreach_reverse (i; Iota!(N - 1))
| {
| indices[i + 1] = index % _lengths[i];
| index /= _lengths[i];
| }
| indices[0] = index;
| return indices;
| }
|}
|
|/++
|`LinspaceField` is used by $(SUBREF topology, linspace).
|+/
|struct LinspaceField(T)
|{
| ///
| size_t _length;
|
| ///
| T _start = cast(T) 0, _stop = cast(T) 0;
|
| ///
| auto lightConst()() scope const @property
| {
0000000| return LinspaceField!T(_length, _start, _stop);
| }
|
| ///
| auto lightImmutable()() scope const @property
| {
| return LinspaceField!T(_length, _start, _stop);
| }
|
| // no fastmath
| ///
| T opIndex()(sizediff_t index) scope const
| {
0000000| sizediff_t d = _length - 1;
0000000| auto v = typeof(T.init.re)(d - index);
0000000| auto w = typeof(T.init.re)(index);
0000000| v /= d;
0000000| w /= d;
0000000| auto a = v * _start;
0000000| auto b = w * _stop;
0000000| return a + b;
| }
|
|@optmath:
|
| ///
| size_t length(size_t dimension = 0)() scope const @property
| if (dimension == 0)
| {
0000000| return _length;
| }
|
| ///
| size_t[1] shape()() scope const @property @nogc
| {
| return [_length];
| }
|}
|
|/++
|Magic square field.
|+/
|struct MagicField
|{
|@optmath:
|@safe pure nothrow @nogc:
|
| /++
| Magic Square size.
| +/
| size_t _n;
|
|scope const:
|
| ///
| MagicField lightConst()() @property
| {
0000000| return this;
| }
|
| ///
| MagicField lightImmutable()() @property
| {
| return this;
| }
|
| ///
| size_t length(size_t dimension = 0)() @property
| if(dimension <= 2)
| {
0000000| return _n * _n;
| }
|
| ///
| size_t[1] shape() @property
| {
0000000| return [_n * _n];
| }
|
| ///
| size_t opIndex(size_t index)
| {
| pragma(inline, false);
0000000| auto d = index / _n;
0000000| auto m = index % _n;
0000000| if (_n & 1)
| {
| //d = _n - 1 - d; // MATLAB synchronization
| //index = d * _n + m; // ditto
0000000| auto r = (index + 1 - d + (_n - 3) / 2) % _n;
0000000| auto c = (_n * _n - index + 2 * d) % _n;
0000000| return r * _n + c + 1;
| }
| else
0000000| if ((_n & 2) == 0)
| {
0000000| auto a = (d + 1) & 2;
0000000| auto b = (m + 1) & 2;
0000000| return a != b ? index + 1: _n * _n - index;
| }
| else
| {
0000000| auto n = _n / 2 ;
0000000| size_t shift;
0000000| ptrdiff_t q;
0000000| ptrdiff_t p = m - n;
0000000| if (p >= 0)
| {
0000000| m = p;
0000000| shift = n * n;
0000000| auto mul = m <= n / 2 + 1;
0000000| q = d - n;
0000000| if (q >= 0)
| {
0000000| d = q;
0000000| mul = !mul;
| }
0000000| if (mul)
| {
0000000| shift *= 2;
| }
| }
| else
| {
0000000| auto mul = m < n / 2;
0000000| q = d - n;
0000000| if (q >= 0)
| {
0000000| d = q;
0000000| mul = !mul;
| }
0000000| if (d == n / 2 && (m == 0 || m == n / 2))
| {
0000000| mul = !mul;
| }
0000000| if (mul)
| {
0000000| shift = n * n * 3;
| }
| }
0000000| index = d * n + m;
0000000| auto r = (index + 1 - d + (n - 3) / 2) % n;
0000000| auto c = (n * n - index + 2 * d) % n;
0000000| return r * n + c + 1 + shift;
| }
| }
|}
|
|/++
|`SparseField` is used to represent Sparse ndarrays in mutable DOK format.
|+/
|struct SparseField(T)
|{
| ///
| T[size_t] _table;
|
| ///
| auto lightConst()() const @trusted
| {
| return SparseField!(const T)(cast(const(T)[size_t])_table);
| }
|
| ///
| auto lightImmutable()() immutable @trusted
| {
| return SparseField!(immutable T)(cast(immutable(T)[size_t])_table);
| }
|
| ///
| T opIndex()(size_t index)
| {
| import std.traits: isScalarType;
| static if (isScalarType!T)
| return _table.get(index, cast(T)0);
| else
| return _table.get(index, null);
| }
|
| ///
| T opIndexAssign()(T value, size_t index)
| {
| import std.traits: isScalarType;
| static if (isScalarType!T)
| {
| if (value != 0)
| _table[index] = value;
| else
| _table.remove(index);
| }
| else
| {
| if (value !is null)
| _table[index] = value;
| else
| _table.remove(index);
| }
| return value;
| }
|
| ///
| T opIndexUnary(string op)(size_t index)
| if (op == `++` || op == `--`)
| {
| import std.traits: isScalarType;
| mixin (`auto value = ` ~ op ~ `_table[index];`);
| static if (isScalarType!T)
| {
| if (value == 0)
| _table.remove(index);
| }
| else
| {
| if (value is null)
| _table.remove(index);
| }
| return value;
| }
|
| ///
| T opIndexOpAssign(string op)(T value, size_t index)
| if (op == `+` || op == `-`)
| {
| import std.traits: isScalarType;
| mixin (`value = _table[index] ` ~ op ~ `= value;`); // this works
| static if (isScalarType!T)
| {
| if (value == 0)
| _table.remove(index);
| }
| else
| {
| if (value is null)
| _table.remove(index);
| }
| return value;
| }
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/field.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-package.lst
|/++
|$(SCRIPT inhibitQuickIndex = 1;)
|
|Basic API to construct non-uniform random number generators and stochastic algorithms.
|Non-uniform and uniform random variable can be found at `mir.random.variable`.
|
|$(TABLE $(H2 Generation functions),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 rand, Generates real, integral, boolean, and enumerated uniformly distributed values.)
|$(T2 randIndex, Generates uniformly distributed index.)
|$(T2 randGeometric, Generates geometric distribution with `p = 1/2`.)
|$(T2 randExponential2, Generates scaled Exponential distribution.)
|)
|
|$(TABLE $(H2 Phobos Compatibility),
|$(TR $(TH Template Name) $(TH Description))
|$(T2 PhobosRandom, Extends a Mir random number engine to meet Phobos `std.random` interface)
|$(T2 isPhobosUniformRNG, Tests if type is a Phobos-style uniform RNG)
|)
|
|Publicly includes `mir.random.engine`.
|
|Authors: Ilya Yaroshenko, Nathan Sashihara
|Copyright: Copyright, Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, random, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|
|+/
|module mir.random;
|
|import std.traits;
|import mir.bitop: cttz;
|import mir.math.common: log2;
|
|public import mir.random.engine;
|
|version (LDC)
|{
| import ldc.intrinsics: llvm_expect;
| // LDC 1.8.0 supports llvm_expect in CTFE.
| private template _ctfeExpect(string expr, string expected)
| {
| static if (__traits(compiles, { enum a = llvm_expect(123, 456); static assert(a == 123); }))
| private enum _ctfeExpect = "llvm_expect("~expr~","~expected~")";
| else
| private enum _ctfeExpect = expr;
| }
|}
|else version (GNU)
|{
| import gcc.builtins: __builtin_expect;
| private enum _ctfeExpect(string expr, string expected) = `__builtin_expect(`~expr~`,`~expected~`)`;
|}
|else
|{
| private enum _ctfeExpect(string expr, string expected) = expr;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed integer for interval `[T.min .. T.max]`.
|+/
|T rand(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum))
|{
| alias R = EngineReturnType!G;
| enum P = T.sizeof / R.sizeof;
| static if (P > 1)
| {
| _Uab!(R[P],T) u = void;
| version(LittleEndian)
| foreach (ref e; u.asArray)
| e = gen();
| else
| foreach_reverse (ref e; u.asArray)
| e = gen();
| return u.asInteger;
| }
| else static if (preferHighBits!G && P == 0)
| {
| version(LDC) pragma(inline, true);
| return cast(T) (gen() >>> ((R.sizeof - T.sizeof) * 8));
| }
| else
| {
| version(LDC) pragma(inline, true);
| return cast(T) gen();
| }
|}
|
|/// ditto
|T rand(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum))
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|T rand(T)()
| if (isIntegral!T && !is(T == enum))
|{
| return rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = rand!short;
| auto n = rand!ulong;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.rand!short;
| auto n = gen.rand!ulong;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed boolean.
|+/
|bool rand(T : bool, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G)
|{
| import std.traits : Signed;
| return 0 > cast(Signed!(EngineReturnType!G)) gen();
|}
|
|/// ditto
|bool rand(T : bool, G)(scope G* gen)
| if (isSaturatedRandomEngine!G)
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|bool rand(T : bool)()
|{
| return rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = rand!bool;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.rand!bool;
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| auto s = gen.rand!bool;
|}
|
|private alias Iota(size_t j) = Iota!(0, j);
|
|private template Iota(size_t i, size_t j)
|{
| import std.meta;
| static assert(i <= j, "Iota: i should be less than or equal to j");
| static if (i == j)
| alias Iota = AliasSeq!();
| else
| alias Iota = AliasSeq!(i, Iota!(i + 1, j));
|}
|
|/+
|Returns pseudo-random integer with the low `bitsWanted` bits set to
|random values and the remaining high bits all 0.
|+/
|private T _randBits(T, uint bitsWanted, G)(scope ref G gen)
|if (bitsWanted >= 0 && bitsWanted <= T.sizeof * 8
| && (is(T == uint) || is(T == ulong) || is(T == size_t)))
|{
| static if (EngineReturnType!G.sizeof >= T.sizeof)
| auto bits = gen();
| else
| auto bits = gen.rand!T;
| static if (preferHighBits!G)
| {
| enum rshift = (typeof(bits).sizeof * 8) - bitsWanted;
| return cast(T) (bits >>> rshift);
| }
| else
| {
| enum mask = (typeof(bits)(1) << bitsWanted) - 1;
| return cast(T) (bits & typeof(bits)(mask));
| }
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed enumeration.
|+/
|T rand(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && is(T == enum))
|{
| static if (is(T : long))
| enum tiny = [EnumMembers!T] == [Iota!(EnumMembers!T.length)];
| else
| enum tiny = false;
| enum n = [EnumMembers!T].length;
| // If `gen` produces 32 bits or fewer at a time and we have fewer
| // than 2^^32 elements, use a `uint` index.
| static if (n <= uint.max && EngineReturnType!G.max <= uint.max)
| alias IndexType = uint;
| else
| alias IndexType = size_t;
|
| static if ((n & (n - 1)) == 0)
| {
| // Optimized case: power of 2.
| import core.bitop : bsr;
| enum bitsWanted = bsr(n);
| IndexType index = _randBits!(IndexType, bitsWanted)(gen);
| }
| else
| {
| // General case.
| IndexType index = gen.randIndex!IndexType(n);
| }
|
| static if (tiny)
| {
| return cast(T) index;
| }
| else
| {
| static immutable T[EnumMembers!T.length] members = [EnumMembers!T];
| return members[index];
| }
|}
|
|/// ditto
|T rand(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && is(T == enum))
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|T rand(T)()
| if (is(T == enum))
|{
| return .rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| enum A { a, b, c }
| auto e = rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A { a, b, c }
| auto e = gen.rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A : dchar { a, b, c }
| auto e = gen.rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A : string { a = "a", b = "b", c = "c" }
| auto e = gen.rand!A;
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| enum A : dchar { a, b, c, d }
| auto e = gen.rand!A;
|}
|
|private static union _U
|{
| real r;
| struct
| {
| version(LittleEndian)
| {
| ulong m;
| ushort e;
| }
| else
| {
| ushort e;
| align(2)
| ulong m;
| }
| }
|}
|
|private static union _Uab(A,B) if (A.sizeof == B.sizeof && !is(Unqual!A == Unqual!B))
|{
| A a;
| B b;
|
| private import std.traits: isArray, isIntegral, isFloatingPoint;
|
| static if (isArray!A && !isArray!B)
| alias asArray = a;
| static if (isArray!B && !isArray!A)
| alias asArray = b;
|
| static if (isIntegral!A && !isIntegral!B)
| alias asInteger = a;
| static if (isIntegral!B && !isIntegral!A)
| alias asInteger = b;
|
| static if (isFloatingPoint!A && !isFloatingPoint!B)
| alias asFloatingPoint = a;
| static if (isFloatingPoint!B && !isFloatingPoint!A)
| alias asFloatingPoint = b;
|}
|
|/++
|Params:
| gen = saturated random number generator
| boundExp = bound exponent (optional). `boundExp` must be less or equal to `T.max_exp`.
|Returns:
| Uniformly distributed real for interval `(-2^^boundExp , 2^^boundExp)`.
|Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`.
|+/
|T rand(T, G)(scope ref G gen, sizediff_t boundExp = 0)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| assert(boundExp <= T.max_exp);
| static if (T.mant_dig == float.mant_dig)
| {
| enum W = T.sizeof * 8 - T.mant_dig;//8
| _Uab!(int,float) u = void;
| u.asInteger = gen.rand!uint;
| enum uint EXPMASK = 0x7F80_0000;
| boundExp -= T.min_exp - 1;
| size_t exp = EXPMASK & u.asInteger;
| exp = boundExp - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W);
| u.asInteger &= ~EXPMASK;
| if(cast(sizediff_t)exp < 0)
| {
| exp = -cast(sizediff_t)exp;
| uint m = u.asInteger & int.max;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| u.asInteger = (u.asInteger & ~int.max) ^ m;
| exp = 0;
| }
| u.asInteger = cast(uint)(exp << (T.mant_dig - 1)) ^ u.asInteger;
| return u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == double.mant_dig)
| {
| enum W = T.sizeof * 8 - T.mant_dig; //11
| _Uab!(long,double) u = void;
| u.asInteger = gen.rand!ulong;
| enum ulong EXPMASK = 0x7FF0_0000_0000_0000;
| boundExp -= T.min_exp - 1;
| ulong exp = EXPMASK & u.asInteger;
| exp = ulong(boundExp) - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W);
| u.asInteger &= ~EXPMASK;
| if(cast(long)exp < 0)
| {
| exp = -cast(sizediff_t)exp;
| ulong m = u.asInteger & long.max;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| u.asInteger = (u.asInteger & ~long.max) ^ m;
| exp = 0;
| }
| u.asInteger = (exp << (T.mant_dig - 1)) ^ u.asInteger;
| return u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == 64)
| {
| enum W = 15;
| auto d = gen.rand!uint;
| auto m = gen.rand!ulong;
| enum uint EXPMASK = 0x7FFF;
| boundExp -= T.min_exp - 1;
| size_t exp = EXPMASK & d;
| exp = boundExp - (exp ? cttz(exp) : gen.randGeometric + W);
| if (cast(sizediff_t)exp > 0)
| m |= ~long.max;
| else
| {
| m &= long.max;
| exp = -cast(sizediff_t)exp;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| exp = 0;
| }
| d = cast(uint) exp ^ (d & ~EXPMASK);
| _U ret = void;
| ret.e = cast(ushort)d;
| ret.m = m;
| return ret.r;
| }
| /// TODO: quadruple
| else static assert(0);
|}
|
|/// ditto
|T rand(T, G)(scope G* gen, sizediff_t boundExp = 0)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| return rand!(T, G)(*gen, boundExp);
|}
|
|/// ditto
|T rand(T)(sizediff_t boundExp = 0)
| if (isFloatingPoint!T)
|{
| return rand!T(rne, boundExp);
|}
|
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| import mir.math.common: fabs;
|
| auto a = rand!float;
| assert(-1 < a && a < +1);
|
| auto b = rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.math.common: fabs;
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
|
| auto a = gen.rand!float;
| assert(-1 < a && a < +1);
|
| auto b = gen.rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = gen.rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = gen.rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|}
|
|/// Subnormal numbers
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto x = gen.rand!double(double.min_exp-1);
| assert(-double.min_normal < x && x < double.min_normal);
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| import mir.math.common: fabs;
| import std.meta: AliasSeq;
|
| auto a = rne.rand!float;
| assert(-1 < a && a < +1);
|
| auto b = rne.rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = rne.rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = rne.rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|
| foreach(T; AliasSeq!(float, double, real))
| {
| auto f = rne.rand!T(T.min_exp-1);
| assert(f.fabs < T.min_normal, T.stringof);
| }
|}
|
|/++
|Params:
| gen = uniform random number generator
| m = positive module
|Returns:
| Uniformly distributed integer for interval `[0 .. m$(RPAREN)`.
|+/
|T randIndex(T, G)(scope ref G gen, T _m)
| if(isSaturatedRandomEngine!G && isUnsigned!T)
|{
| immutable m = _m + 0u;
| static if (EngineReturnType!G.sizeof >= T.sizeof * 2)
| alias MaybeR = EngineReturnType!G;
| else static if (uint.sizeof >= T.sizeof * 2)
| alias MaybeR = uint;
| else static if (ulong.sizeof >= T.sizeof * 2)
| alias MaybeR = ulong;
| else static if (is(ucent) && __traits(compiles, {static assert(ucent.sizeof >= T.sizeof * 2);}))
| mixin ("alias MaybeR = ucent;");
| else
| alias MaybeR = void;
|
| static if (!is(MaybeR == void))
| {
| alias R = MaybeR;
| static assert(R.sizeof >= T.sizeof * 2);
| //Use Daniel Lemire's fast alternative to modulo reduction:
| //https://lemire.me/blog/2016/06/30/fast-random-shuffling/
| R randombits = cast(R) gen.rand!T;
| R multiresult = randombits * m;
| T leftover = cast(T) multiresult;
| if (mixin(_ctfeExpect!(`leftover < m`, `false`)))
| {
| immutable threshold = -m % m ;
| while (leftover < threshold)
| {
| randombits = cast(R) gen.rand!T;
| multiresult = randombits * m;
| leftover = cast(T) multiresult;
| }
| }
| enum finalshift = T.sizeof * 8;
| return cast(T) (multiresult >>> finalshift);
| }
| else
| {
| import mir.utility : extMul;
| //Use Daniel Lemire's fast alternative to modulo reduction:
| //https://lemire.me/blog/2016/06/30/fast-random-shuffling/
| auto u = extMul!T(gen.rand!T, m);
| if (mixin(_ctfeExpect!(`u.low < m`, `false`)))
| {
| immutable T threshold = -m % m;
| while (u.low < threshold)
| {
| u = extMul!T(gen.rand!T, m);
| }
| }
| return u.high;
| }
|}
|
|/// ditto
|T randIndex(T, G)(scope G* gen, T m)
| if(isSaturatedRandomEngine!G && isUnsigned!T)
|{
| return randIndex!(T, G)(*gen, m);
|}
|
|/// ditto
|T randIndex(T)(T m)
| if(isUnsigned!T)
|{
| return randIndex!T(rne, m);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = randIndex(100u);
| auto n = randIndex!ulong(-100);
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random;
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.randIndex!uint(100);
| auto n = gen.randIndex!ulong(-100);
|}
|
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| //CTFE check.
| import std.meta : AliasSeq;
| import mir.random.engine.xoshiro : Xoroshiro128Plus;
| foreach (IntType; AliasSeq!(ubyte,ushort,uint,ulong))
| {
| enum IntType e = (){auto g = Xoroshiro128Plus(1); return g.randIndex!IntType(100);}();
| auto gen = Xoroshiro128Plus(1);
| assert(e == gen.randIndex!IntType(100));
| }
|}
|
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| //Test production of ulong from ulong generator.
| import mir.random.engine.xoshiro;
| auto gen = Xoroshiro128Plus(1);
| enum ulong limit = 10;
| enum count = 10;
| ulong[limit] buckets;
| foreach (_; 0 .. count)
| {
| ulong x = gen.randIndex!ulong(limit);
| assert(x < limit);
| buckets[cast(size_t) x] += 1;
| }
| foreach (i, x; buckets)
| assert(x != count, "All values were the same!");
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| auto s = gen.randIndex!uint(100);
| auto n = gen.randIndex!ulong(-100);
|}
|
|/++
| Returns: `n >= 0` such that `P(n) := 1 / (2^^(n + 1))`.
|+/
|size_t randGeometric(G)(scope ref G gen)
| if(isSaturatedRandomEngine!G)
|{
| alias R = EngineReturnType!G;
| static if (R.sizeof >= size_t.sizeof)
| alias T = size_t;
| else
| alias T = R;
| for(size_t count = 0;; count += T.sizeof * 8)
| if(auto val = gen.rand!T())
| return count + cttz(val);
|}
|
|/// ditto
|size_t randGeometric(G)(scope G* gen)
| if(isSaturatedRandomEngine!G)
|{
| return randGeometric!(G)(*gen);
|}
|
|/// ditto
|size_t randGeometric()()
|{
| return randGeometric(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| size_t s = randGeometric;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xoshiro;
| auto gen = Xoroshiro128Plus(1);
|
| size_t s = gen.randGeometric;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| `X ~ Exp(1) / log(2)`.
|Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`.
|+/
|T randExponential2(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| enum W = T.sizeof * 8 - T.mant_dig - 1 - bool(T.mant_dig == 64);
| static if (is(T == float))
| {
| _Uab!(uint,float) u = void;
| u.asInteger = gen.rand!uint;
| enum uint EXPMASK = 0xFF80_0000;
| auto exp = EXPMASK & u.asInteger;
| u.asInteger &= ~EXPMASK;
| u.asInteger ^= 0x3F000000; // 0.5
| auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W;
| auto x = u.asFloatingPoint;
| }
| else
| static if (is(T == double))
| {
| _Uab!(ulong,double) u = void;
| u.asInteger = gen.rand!ulong;
| enum ulong EXPMASK = 0xFFF0_0000_0000_0000;
| auto exp = EXPMASK & u.asInteger;
| u.asInteger &= ~EXPMASK;
| u.asInteger ^= 0x3FE0000000000000; // 0.5
| auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W;
| auto x = u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == 64)
| {
| _U ret = void;
| ret.e = 0x3FFE;
| ret.m = gen.rand!ulong | ~long.max;
| auto y = gen.randGeometric;
| auto x = ret.r;
| }
| /// TODO: quadruple
| else static assert(0);
|
| if (x == 0.5f)
| return y;
| else
| return -log2(x) + y;
|}
|
|/// ditto
|T randExponential2(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| return randExponential2!(T, G)(*gen);
|}
|
|/// ditto
|T randExponential2(T)()
| if (isFloatingPoint!T)
|{
| return randExponential2!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto v = randExponential2!double;
|}
|
|///
|@nogc nothrow @safe pure version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto v = gen.randExponential2!double();
|}
|
|/++
|$(LINK2 https://dlang.org/phobos/std_random.html#.isUniformRNG,
|Tests if T is a Phobos-style uniform RNG.)
|+/
|template isPhobosUniformRNG(T)
|{
| import std.random: isUniformRNG;
| enum bool isPhobosUniformRNG = isUniformRNG!T;
|}
|
|/++
|Extends a Mir-style random number generator to also be a Phobos-style
|uniform RNG. If `Engine` is already a Phobos-style uniform RNG,
|`PhobosRandom` is just an alias for `Engine`.
|+/
|struct PhobosRandom(Engine) if (isRandomEngine!Engine && !isPhobosUniformRNG!Engine)//Doesn't need to be saturated.
|{
| alias Uint = EngineReturnType!Engine;
| private Engine _engine;
| private Uint _front;
|
| /// Default constructor and copy constructor are disabled.
| @disable this();
| /// ditto
| @disable this(this);
|
| /// Forward constructor arguments to `Engine`.
| this(A...)(auto ref A args)
| if (is(typeof(Engine(args))))
| {
| _engine = Engine(args);
| _front = _engine.opCall();
| }
|
| /// Phobos-style random interface.
| enum bool isUniformRandom = true;
| /// ditto
| enum Uint min = Uint.min;//Always normalized.
| /// ditto
| enum Uint max = Engine.max;//Might not be saturated.
| /// ditto
| enum bool empty = false;
| /// ditto
| @property Uint front()() const { return _front; }
| /// ditto
| void popFront()() { _front = _engine.opCall(); }
| /// ditto
| void seed(A...)(auto ref A args) if (is(typeof(Engine(args))))
| {
| _engine.__ctor(args);
| _front = _engine.opCall();
| }
|
| /// Retain support for Mir-style random interface.
| enum bool isRandomEngine = true;
| /// ditto
| enum bool preferHighBits = .preferHighBits!Engine;
| /// ditto
| Uint opCall()()
| {
| Uint result = _front;
| _front = _engine.opCall();
| return result;
| }
|
| ///
| @property ref inout(Engine) engine()() inout @nogc nothrow pure @safe
| {
| return _engine;
| }
|}
|
|/// ditto
|template PhobosRandom(Engine) if (isRandomEngine!Engine && isPhobosUniformRNG!Engine)
|{
| alias PhobosRandom = Engine;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift: Xorshift1024StarPhi;
| import std.random: isSeedable, isPhobosUniformRNG = isUniformRNG;
|
| alias RNG = PhobosRandom!Xorshift1024StarPhi;
|
| //Phobos interface
| static assert(isPhobosUniformRNG!(RNG, ulong));
| static assert(isSeedable!(RNG, ulong));
| //Mir interface
| static assert(isSaturatedRandomEngine!RNG);
| static assert(is(EngineReturnType!RNG == ulong));
|
| auto gen = Xorshift1024StarPhi(1);
| auto rng = RNG(1);
| assert(gen() == rng.front);
| rng.popFront();
| assert(gen() == rng.front);
| rng.popFront();
| assert(gen() == rng());
|
| gen.__ctor(1);
| rng.seed(1);
| assert(gen() == rng());
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/package.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-rc-array.lst
|/++
|$(H1 Thread-safe reference-counted arrays and iterators).
|+/
|module mir.rc.array;
|
|import mir.primitives: hasLength;
|import mir.qualifier;
|import mir.rc.context;
|import mir.type_info;
|import std.traits;
|
|package static immutable allocationExcMsg = "mir_rcarray: out of memory error.";
|
|version (D_Exceptions)
|{
| import core.exception: OutOfMemoryError;
| package static immutable allocationError = new OutOfMemoryError(allocationExcMsg);
|}
|
|/++
|Thread safe reference counting array.
|
|The implementation never adds roots into the GC.
|+/
|struct mir_rcarray(T)
|{
| ///
| package T* _payload;
| package ref mir_rc_context context() inout scope return pure nothrow @nogc @trusted @property
| {
| assert(_payload);
| return (cast(mir_rc_context*)_payload)[-1];
| }
| package void _reset() { _payload = null; }
|
| package alias ThisTemplate = .mir_rcarray;
| package alias _thisPtr = _payload;
|
| ///
| alias serdeKeysProxy = T;
|
| ///
| void proxySwap(ref typeof(this) rhs) pure nothrow @nogc @safe
| {
| auto t = this._payload;
| this._payload = rhs._payload;
| rhs._payload = t;
| }
|
| ///
| this(typeof(null))
| {
| }
|
| ///
| mixin CommonRCImpl;
|
| ///
| ~this() nothrow
| {
| static if (hasElaborateDestructor!T || hasDestructor!T)
| {
| if (false) // break @safe and pure attributes
| {
| Unqual!T* object;
| (*object).__xdtor;
| }
| }
| if (this)
| {
| (() @trusted { mir_rc_decrease_counter(context); })();
| }
| }
|
| ///
| size_t length() @trusted scope pure nothrow @nogc const @property
| {
| return _payload !is null ? context.length : 0;
| }
|
| ///
| inout(T)* ptr() @system scope inout
| {
| return _payload;
| }
|
| ///
| ref opIndex(size_t i) @trusted scope inout
| {
| assert(_payload);
| assert(i < context.length);
| return _payload[i];
| }
|
| ///
| inout(T)[] opIndex() @trusted scope inout
| {
| return _payload !is null ? _payload[0 .. context.length] : null;
| }
|
| ///
| size_t opDollar(size_t pos : 0)() @trusted scope pure nothrow @nogc const
| {
| return length;
| }
|
| ///
| auto asSlice() @property
| {
| import mir.ndslice.slice: mir_slice;
| alias It = mir_rci!T;
| return mir_slice!It([length], It(this));
| }
|
| ///
| auto asSlice() const @property
| {
| import mir.ndslice.slice: mir_slice;
| alias It = mir_rci!(const T);
| return mir_slice!It([length], It(this.lightConst));
| }
|
| ///
| auto asSlice() immutable @property
| {
| import mir.ndslice.slice: mir_slice;
| alias It = mir_rci!(immutable T);
| return mir_slice!It([length], It(this.lightImmutable));
| }
|
| ///
| auto moveToSlice() @property
| {
| import core.lifetime: move;
| import mir.ndslice.slice: mir_slice;
| alias It = mir_rci!T;
| return mir_slice!It([length], It(move(this)));
| }
|
| /++
| Params:
| length = array length
| initialize = Flag, don't initialize memory with default value if `false`.
| deallocate = Flag, never deallocates memory if `false`.
| +/
| this(size_t length, bool initialize = true, bool deallocate = true) @trusted @nogc
| {
| if (length == 0)
| return;
| Unqual!T[] ar;
| () @trusted {
| static if (is(T == class) || is(T == interface))
| auto ctx = mir_rc_create(mir_get_type_info!T, length, mir_get_payload_ptr!T, initialize, deallocate);
| else
| auto ctx = mir_rc_create(mir_get_type_info!T, length, mir_get_payload_ptr!T, initialize, deallocate);
| if (!ctx)
| {
| version(D_Exceptions)
| throw allocationError;
| else
| assert(0, allocationExcMsg);
| }
| _payload = cast(T*)(ctx + 1);
| ar = cast(Unqual!T[])_payload[0 .. length];
| } ();
| if (initialize || hasElaborateAssign!(Unqual!T))
| {
| import mir.conv: uninitializedFillDefault;
| uninitializedFillDefault(ar);
| }
| }
|
| static if (isImplicitlyConvertible!(const T, T))
| static if (isImplicitlyConvertible!(const Unqual!T, T))
| package alias V = const Unqual!T;
| else
| package alias V = const T;
| else
| package alias V = T;
|
| static if (is(T == const) || is(T == immutable))
| this(return ref scope const typeof(this) rhs) @trusted pure nothrow @nogc
| {
| if (rhs)
| {
| this._payload = cast(typeof(this._payload))rhs._payload;
| mir_rc_increase_counter(context);
| }
| }
|
| static if (is(T == immutable))
| this(return ref scope const typeof(this) rhs) immutable @trusted pure nothrow @nogc
| {
| if (rhs)
| {
| this._payload = cast(typeof(this._payload))rhs._payload;
| mir_rc_increase_counter(context);
| }
| }
|
| static if (is(T == immutable))
| this(return ref scope const typeof(this) rhs) const @trusted pure nothrow @nogc
| {
| if (rhs)
| {
| this._payload = cast(typeof(this._payload))rhs._payload;
| mir_rc_increase_counter(context);
| }
| }
|
| this(return ref scope inout typeof(this) rhs) inout @trusted pure nothrow @nogc
| {
| if (rhs)
| {
| this._payload = rhs._payload;
| mir_rc_increase_counter(context);
| }
| }
|
| ///
| ref opAssign(typeof(null)) return @trusted // pure nothrow @nogc
| {
| this = typeof(this).init;
| }
|
| ///
| ref opAssign(return typeof(this) rhs) return @trusted // pure nothrow @nogc
| {
| this.proxySwap(rhs);
| return this;
| }
|
| ///
| ref opAssign(Q)(return ThisTemplate!Q rhs) return @trusted // pure nothrow @nogc
| if (isImplicitlyConvertible!(Q*, T*))
| {
| this.proxySwap(*()@trusted{return cast(typeof(this)*)&rhs;}());
| return this;
| }
|}
|
|/// ditto
|alias RCArray = mir_rcarray;
|
|///
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| auto a = RCArray!double(10);
| foreach(i, ref e; a)
| e = i;
| auto b = a;
| assert(b[$ - 1] == 9);
| foreach(i, ref e; b)
| assert(e == i);
| b[4] = 100;
| assert(a[4] == 100);
|
| import mir.ndslice.slice;
|
| auto s = a.asSlice; // as RC random access range (ndslice)
| static assert(is(typeof(s) == Slice!(RCI!double)));
| static assert(is(typeof(s) == mir_slice!(mir_rci!double)));
|
| auto r = a[]; // scope array
| static assert(is(typeof(r) == double[]));
|
| auto fs = r.sliced; // scope fast random access range (ndslice)
| static assert(is(typeof(fs) == Slice!(double*)));
|}
|
|package template LikeArray(Range)
|{
| static if (__traits(identifier, Range) == "mir_slice")
| {
| import mir.ndslice.slice;
| enum LikeArray = is(Range : Slice!(T*, N, kind), T, size_t N, SliceKind kind);
| }
| else
| {
| enum LikeArray = false;
| }
|}
|
|///
|auto rcarray(T = void, Range)(ref Range range)
| if (is(T == void) && !is(Range == LightScopeOf!Range))
|{
| return .rcarray(range.lightScope);
|}
|
|/// ditto
|auto rcarray(T = void, Range)(Range range)
| if (is(T == void) && isIterable!Range && is(Range == LightScopeOf!Range) && !isArray!Range)
|{
| static if (LikeArray!Range)
| {
| return .rcarray(range.field);
| }
| else
| {
| return .rcarray!(ForeachType!Range)(range);
| }
|}
|
|/// ditto
|RCArray!V rcarray(T = void, V)(V[] values...)
| if (is(T == void) && hasIndirections!V)
|{
| return .rcarray(values, true);
|}
|
|/// ditto
|RCArray!V rcarray(T = void, V)(scope V[] values...)
| if (is(T == void) && !hasIndirections!V)
|{
| return .rcarray(values, true);
|}
|
|/// ditto
|RCArray!V rcarray(T = void, V)(V[] values, bool deallocate)
| if (is(T == void) && hasIndirections!V)
|{
| return .rcarray!V(values, deallocate);
|}
|
|/// ditto
|RCArray!V rcarray(T = void, V)(scope V[] values, bool deallocate)
| if (is(T == void) && !hasIndirections!V)
|{
| return .rcarray!V(values, deallocate);
|}
|
|/// ditto
|template rcarray(T)
| if(!is(T == E[], E) && !is(T == void))
|{
| import std.range.primitives: isInputRange, isInfinite;
|
| ///
| auto rcarray(Range)(ref Range range)
| if (!is(Range == LightScopeOf!Range))
| {
| return .rcarray!T(range.lightScope);
| }
|
| /// ditto
| auto rcarray(Range)(Range range)
| if ((isInputRange!Range || isIterable!Range) && !isInfinite!Range && !isArray!Range || isPointer!Range && (isInputRange!(PointerTarget!Range) || isIterable!(PointerTarget!Range)))
| {
| static if (LikeArray!Range)
| {
| return .rcarray!T(range.field);
| }
| else static if (hasLength!Range)
| {
| import mir.conv: emplaceRef;
| auto ret = RCArray!T(range.length, false);
| size_t i;
| static if (isInputRange!Range)
| for (; !range.empty; range.popFront)
| ret[i++].emplaceRef!T(range.front);
| else
| static if (isPointer!Range)
| foreach (e; *range)
| ret[i++].emplaceRef!T(e);
| else
| foreach (e; range)
| ret[i++].emplaceRef!T(e);
| return ret;
| }
| else
| {
| import mir.appender: ScopedBuffer;
| import mir.conv: emplaceRef;
| ScopedBuffer!T a;
| static if (isInputRange!Range)
| for (; !range.empty; range.popFront)
| a.put(range.front);
| else
| static if (isPointer!Range)
| foreach (e; *range)
| a.put(e);
| else
| foreach (e; range)
| a.put(e);
| scope values = a.data;
| auto ret = RCArray!T(values.length, false);
| ()@trusted {
| a.moveDataAndEmplaceTo(ret[]);
| }();
| return ret;
| }
| }
|
| /// ditto
| RCArray!T rcarray(V)(V[] values...)
| if (hasIndirections!V)
| {
| return .rcarray!T(values, true);
| }
|
| /// ditto
| RCArray!T rcarray(V)(scope V[] values...)
| if (!hasIndirections!V)
| {
| return .rcarray!T(values, true);
| }
|
| /// ditto
| RCArray!T rcarray(V)(V[] values, bool deallocate)
| if (hasIndirections!V)
| {
| auto ret = mir_rcarray!T(values.length, false, deallocate);
| static if (!hasElaborateAssign!(Unqual!T) && is(Unqual!V == Unqual!T))
| {
| ()@trusted {
| import core.stdc.string: memcpy;
| memcpy(cast(void*)ret.ptr, cast(const void*)values.ptr, values.length * T.sizeof);
| }();
| }
| else
| {
| import mir.conv: emplaceRef;
| auto lhs = ret[];
| foreach (i, ref e; values)
| lhs[i].emplaceRef!T(e);
| }
| return ret;
| }
|
| /// ditto
| RCArray!T rcarray(V)(scope V[] values, bool deallocate)
| if (!hasIndirections!V)
| {
| auto ret = mir_rcarray!T(values.length, false);
| static if (!hasElaborateAssign!(Unqual!T) && is(Unqual!V == Unqual!T))
| {
| ()@trusted {
| import core.stdc.string: memcpy;
| memcpy(cast(void*)ret.ptr, cast(const void*)values.ptr, values.length * T.sizeof);
| }();
| }
| else
| {
| import mir.conv: emplaceRef;
| auto lhs = ret[];
| foreach (i, ref e; values)
| lhs[i].emplaceRef!T(e);
| }
| return ret;
| }
|}
|
|///
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| RCArray!double a = rcarray!double(1.0, 2, 5, 3);
| assert(a[0] == 1);
| assert(a[$ - 1] == 3);
|
| auto s = rcarray!char("hello!");
| assert(s[0] == 'h');
| assert(s[$ - 1] == '!');
|
| alias rcstring = rcarray!(immutable char);
| auto r = rcstring("string");
| assert(r[0] == 's');
| assert(r[$ - 1] == 'g');
|}
|
|/// With Input Ranges
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.algorithm.iteration: filter;
| static immutable numbers = [3, 2, 5, 2, 3, 7, 3];
| static immutable filtered = [5.0, 7];
| auto result = numbers.filter!"a > 3".rcarray!(immutable double);
| static assert(is(typeof(result) == RCArray!(immutable double)));
| assert (result[] == filtered);
|}
|
|/++
|Params:
| length = array length
| deallocate = Flag, never deallocates memory if `false`.
|Returns: minimally initialized rcarray.
|+/
|RCArray!T mininitRcarray(T)(size_t length, bool deallocate = true)
|{
| return RCArray!T(length, false, deallocate);
|}
|
|///
|@safe pure nothrow @nogc unittest
|{
| auto a = mininitRcarray!double(5);
| assert(a.length == 5);
| assert(a._counter == 1);
| a[][] = 0; // a.opIndex()[] = 0;
|}
|
|/++
|Thread safe reference counting iterator.
|+/
|struct mir_rci(T)
|{
| import mir.ndslice.slice: Slice;
| import mir.ndslice.iterator: IotaIterator;
|
| ///
| T* _iterator;
|
| ///
| RCArray!T _array;
|
| ///
| this(RCArray!T array)
| {
| this._iterator = (()@trusted => array.ptr)();
| this._array.proxySwap(array);
| }
|
| ///
| this(T* _iterator, RCArray!T array)
| {
| this._iterator = _iterator;
| this._array.proxySwap(array);
| }
|
| ///
| inout(T)* lightScope()() scope return inout @property @trusted
| {
| debug
| {
| assert(_array._payload <= _iterator);
| assert(_iterator is null || _iterator <= _array._payload + _array.length);
| }
| return _iterator;
| }
|
| ///
| ref opAssign(typeof(null)) scope return nothrow
| {
| pragma(inline, true);
| _iterator = null;
| _array = null;
| return this;
| }
|
| ///
| ref opAssign(return typeof(this) rhs) scope return @trusted
| {
| _iterator = rhs._iterator;
| _array.proxySwap(rhs._array);
| return this;
| }
|
| ///
| ref opAssign(Q)(return mir_rci!Q rhs) scope return nothrow
| if (isImplicitlyConvertible!(Q*, T*))
| {
| import core.lifetime: move;
| _iterator = rhs._iterator;
| _array = move(rhs._array);
| return this;
| }
|
| ///
| mir_rci!(const T) lightConst()() scope return const nothrow @property
| { return typeof(return)(_iterator, _array.lightConst); }
|
| ///
| mir_rci!(immutable T) lightImmutable()() scope return immutable nothrow @property
| { return typeof(return)(_iterator, _array.lightImmutable); }
|
| ///
| ref inout(T) opUnary(string op : "*")() inout scope return
| {
| debug
| {
| assert(_iterator);
| assert(_array._payload);
| assert(_array._payload <= _iterator);
| assert(_iterator <= _array._payload + _array.length);
| }
| return *_iterator;
| }
|
| ///
| ref inout(T) opIndex(ptrdiff_t index) inout scope return @trusted
| {
| debug
| {
| assert(_iterator);
| assert(_array._payload);
| assert(_array._payload <= _iterator + index);
| assert(_iterator + index <= _array._payload + _array.length);
| }
| return _iterator[index];
| }
|
| /// Returns: slice type of `Slice!(IotaIterator!size_t)`
| Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) @safe scope const
| if (dimension == 0)
| in
| {
| assert(i <= j, "RCI!T.opSlice!0: the left opSlice boundary must be less than or equal to the right bound.");
| }
| do
| {
| return typeof(return)(j - i, typeof(return).Iterator(i));
| }
|
| /// Returns: ndslice on top of the refcounted iterator
| auto opIndex(Slice!(IotaIterator!size_t) slice)
| {
| import core.lifetime: move;
| auto it = this;
| it += slice._iterator._index;
| return Slice!(RCI!T)(slice.length, it.move);
| }
|
| /// ditto
| auto opIndex(Slice!(IotaIterator!size_t) slice) const
| {
| import core.lifetime: move;
| auto it = lightConst;
| it += slice._iterator._index;
| return Slice!(RCI!(const T))(slice.length, it.move);
| }
|
| ///
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin(op ~ "_iterator;"); }
|
| ///
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index;"); }
|
| ///
| mir_rci!T opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| { return mir_rci!T(_iterator + index, _array); }
|
| ///
| mir_rci!(const T) opBinary(string op)(ptrdiff_t index) const
| if (op == "+" || op == "-")
| { return mir_rci!T(_iterator + index, _array); }
|
| ///
| mir_rci!(immutable T) opBinary(string op)(ptrdiff_t index) immutable
| if (op == "+" || op == "-")
| { return mir_rci!T(_iterator + index, _array); }
|
| ///
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| ///
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ///
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|}
|
|/// ditto
|alias RCI = mir_rci;
|
|///
|version(mir_test)
|@safe @nogc unittest
|{
|
| import mir.ndslice.traits: isIterator;
| import mir.ndslice.slice;
| import mir.rc.array;
| auto slice = mir_rcarray!double(10).asSlice;
| static assert(isIterator!(RCI!double));
| static assert(is(typeof(slice) == Slice!(RCI!double)));
| auto matrix = slice.sliced(2, 5);
| static assert(is(typeof(matrix) == Slice!(RCI!double, 2)));
| slice[7] = 44;
| assert(matrix[1, 2] == 44);
|}
|
|///
|version(mir_test)
|@safe @nogc unittest
|{
| import mir.ndslice.slice;
| import mir.rc.array;
|
| alias rcvec = Slice!(RCI!double);
|
| RCI!double a, b;
| a = b;
|
| RCI!(const double) ca, cb;
| ca = cb;
| ca = cast(const) cb;
|
| void foo(scope ref rcvec x, scope ref rcvec y)
| {
| x[] = y[];
| x[1] = y[1];
| x[1 .. $] += y[1 .. $];
| x = x.save;
| }
|}
|
|version(mir_test)
|@safe @nogc unittest
|{
| import mir.ndslice;
| import mir.rc.array;
| import mir.series;
|
| @safe void bar(ref const mir_rcarray!(const double) a, ref mir_rcarray!(const double) b)
| {
| b = a;
| }
|
| @safe void bari(ref immutable mir_rcarray!(immutable double) a, ref mir_rcarray!(immutable double) b)
| {
| b = a;
| }
|
| @safe void foo(ref const RCI!(const double) a, ref RCI!(const double) b)
| {
| b = a;
| }
|
| @safe void fooi(ref immutable RCI!(immutable double) a, ref RCI!(immutable double) b)
| {
| b = a;
| }
|
| struct S
| {
| uint i;
| @safe pure:
| ~this() {}
| }
|
| @safe void goo(ref const Series!(RCI!(const double), RCI!(const S)) a, ref Series!(RCI!(const double), RCI!(const S)) b)
| {
| b = a;
| }
|
| @safe void gooi(ref immutable Series!(RCI!(immutable double), RCI!(const S)) a, ref Series!(RCI!(immutable double), RCI!(const S)) b)
| {
| b = a;
| }
|
| struct C
| {
| Series!(RCI!(const S), RCI!(const S)) a;
| Series!(RCI!(const S), RCI!(const S)) b;
| }
|
| C a, b;
| a = b;
| a = cast(const) b;
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.slice: Slice;
| static RCArray!int foo() @safe
| {
| auto ret = RCArray!int(10);
| return ret;
| }
|
|
| static Slice!(RCI!int) bat() @safe
| {
| auto ret = RCArray!int(10);
| return ret.asSlice;
| }
|
| static Slice!(RCI!int) bar() @safe
| {
| auto ret = RCArray!int(10);
| auto d = ret.asSlice;
| return d;
| }
|}
|
|version(mir_test)
|@safe unittest
|{
| import core.stdc.stdio;
|
| struct S
| {
| uint s;
| this(this) @nogc nothrow @safe
| {
| // () @trusted {
| // puts("this(this)\n");
| // } ();
| }
|
| ~this() nothrow @nogc @safe
| {
| // () @trusted {
| // if (s)
| // puts("~this()\n");
| // else
| // puts("~this() - zero\n");
| // } ();
| }
| }
|
| struct C
| {
| S s;
| }
|
| S[1] d = [S(1)];
| auto r = rcarray(d);
|}
|
|version(mir_test)
|unittest
|{
| import mir.small_string;
| alias S = SmallString!32u;
| auto ars = [S("123"), S("422")];
| alias R = mir_rcarray!S;
| auto rc = ars.rcarray!S;
|
| RCArray!int value = null;
| value = null;
|}
|
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/rc/array.d has no code
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-series.lst
|/++
|$(H1 Index-series)
|
|The module contains $(LREF Series) data structure with special iteration and indexing methods.
|It is aimed to construct index or time-series using Mir and Phobos algorithms.
|
|Public_imports: $(MREF mir,ndslice,slice).
|
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|NDSLICE = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.series;
|
|public import mir.ndslice.slice;
|public import mir.ndslice.sorting: sort;
|import mir.ndslice.iterator: IotaIterator;
|import mir.ndslice.sorting: transitionIndex;
|import mir.qualifier;
|import std.traits;
|
|/++
|See_also: $(LREF unionSeries), $(LREF troykaSeries), $(LREF troykaGalop).
|+/
|@safe version(mir_test) unittest
|{
| import mir.ndslice;
| import mir.series;
|
| import mir.array.allocation: array;
| import mir.algorithm.setops: multiwayUnion;
|
| import mir.date: Date;
| import core.lifetime: move;
| import std.exception: collectExceptionMsg;
|
| //////////////////////////////////////
| // Constructs two time-series.
| //////////////////////////////////////
| auto index0 = [
| Date(2017, 01, 01),
| Date(2017, 03, 01),
| Date(2017, 04, 01)];
|
| auto data0 = [1.0, 3, 4];
| auto series0 = index0.series(data0);
|
| auto index1 = [
| Date(2017, 01, 01),
| Date(2017, 02, 01),
| Date(2017, 05, 01)];
|
| auto data1 = [10.0, 20, 50];
| auto series1 = index1.series(data1);
|
| //////////////////////////////////////
| // asSlice method
| //////////////////////////////////////
| assert(series0
| .asSlice
| // ref qualifier is optional
| .map!((ref key, ref value) => key.yearMonthDay.month == value)
| .all);
|
| //////////////////////////////////////
| // get* methods
| //////////////////////////////////////
|
| auto refDate = Date(2017, 03, 01);
| auto missingDate = Date(2016, 03, 01);
|
| // default value
| double defaultValue = 100;
| assert(series0.get(refDate, defaultValue) == 3);
| assert(series0.get(missingDate, defaultValue) == defaultValue);
|
| // Exceptions handlers
| assert(series0.get(refDate) == 3);
| assert(series0.get(refDate, new Exception("My exception msg")) == 3);
| assert(series0.getVerbose(refDate) == 3);
| assert(series0.getExtraVerbose(refDate, "My exception msg") == 3);
|
| assert(collectExceptionMsg!Exception(
| series0.get(missingDate)
| ) == "Series double[date]: Missing required key");
|
| assert(collectExceptionMsg!Exception(
| series0.get(missingDate, new Exception("My exception msg"))
| ) == "My exception msg");
|
| assert(collectExceptionMsg!Exception(
| series0.getVerbose(missingDate)
| ) == "Series double[date]: Missing 2016-03-01 key");
|
| assert(collectExceptionMsg!Exception(
| series0.getExtraVerbose(missingDate, "My exception msg")
| ) == "My exception msg. Series double[date]: Missing 2016-03-01 key");
|
| // assign with get*
| series0.get(refDate) = 100;
| assert(series0.get(refDate) == 100);
| series0.get(refDate) = 3;
|
| // tryGet
| double val;
| assert(series0.tryGet(refDate, val));
| assert(val == 3);
| assert(!series0.tryGet(missingDate, val));
| assert(val == 3); // val was not changed
|
| //////////////////////////////////////
| // Merges multiple series into one.
| // Allocates using GC. M
| // Makes exactly two allocations per merge:
| // one for index/time and one for data.
| //////////////////////////////////////
| auto m0 = unionSeries(series0, series1);
| auto m1 = unionSeries(series1, series0); // order is matter
|
| assert(m0.index == [
| Date(2017, 01, 01),
| Date(2017, 02, 01),
| Date(2017, 03, 01),
| Date(2017, 04, 01),
| Date(2017, 05, 01)]);
|
| assert(m0.index == m1.index);
| assert(m0.data == [ 1, 20, 3, 4, 50]);
| assert(m1.data == [10, 20, 3, 4, 50]);
|
| //////////////////////////////////////
| // Joins two time-series into a one with two columns.
| //////////////////////////////////////
| auto u = [index0, index1].multiwayUnion;
| auto index = u.move.array;
| auto data = slice!double([index.length, 2], 0); // initialized to 0 value
| auto series = index.series(data);
|
| series[0 .. $, 0][] = series0; // fill first column
| series[0 .. $, 1][] = series1; // fill second column
|
| assert(data == [
| [1, 10],
| [0, 20],
| [3, 0],
| [4, 0],
| [0, 50]]);
|}
|
|///
|unittest{
|
| import mir.series;
|
| double[int] map;
| map[1] = 4.0;
| map[2] = 5.0;
| map[4] = 6.0;
| map[5] = 10.0;
| map[10] = 11.0;
|
| const s = series(map);
|
| double value;
| int key;
| assert(s.tryGet(2, value) && value == 5.0);
| assert(!s.tryGet(8, value));
|
| assert(s.tryGetNext(2, value) && value == 5.0);
| assert(s.tryGetPrev(2, value) && value == 5.0);
| assert(s.tryGetNext(8, value) && value == 11.0);
| assert(s.tryGetPrev(8, value) && value == 10.0);
| assert(!s.tryGetFirst(8, 9, value));
| assert(s.tryGetFirst(2, 10, value) && value == 5.0);
| assert(s.tryGetLast(2, 10, value) && value == 11.0);
| assert(s.tryGetLast(2, 8, value) && value == 10.0);
|
| key = 2; assert(s.tryGetNextUpdateKey(key, value) && key == 2 && value == 5.0);
| key = 2; assert(s.tryGetPrevUpdateKey(key, value) && key == 2 && value == 5.0);
| key = 8; assert(s.tryGetNextUpdateKey(key, value) && key == 10 && value == 11.0);
| key = 8; assert(s.tryGetPrevUpdateKey(key, value) && key == 5 && value == 10.0);
| key = 2; assert(s.tryGetFirstUpdateLower(key, 10, value) && key == 2 && value == 5.0);
| key = 10; assert(s.tryGetLastUpdateKey(2, key, value) && key == 10 && value == 11.0);
| key = 8; assert(s.tryGetLastUpdateKey(2, key, value) && key == 5 && value == 10.0);
|}
|
|import mir.ndslice.slice;
|import mir.ndslice.internal: is_Slice, isIndex;
|import mir.math.common: optmath;
|
|import std.meta;
|
|@optmath:
|
|/++
|Plain index/time observation data structure.
|Observation are used as return tuple for for indexing $(LREF Series).
|+/
|struct mir_observation(Index, Data)
|{
| /// Date, date-time, time, or index.
| Index index;
| /// An alias for time-series index.
| alias time = index;
| /// An alias for key-value representation.
| alias key = index;
| /// Value or ndslice.
| Data data;
| /// An alias for key-value representation.
| alias value = data;
|}
|
|/// ditto
|alias Observation = mir_observation;
|
|/// Convenient function for $(LREF Observation) construction.
|auto observation(Index, Data)(Index index, Data data)
|{
| return mir_observation!(Index, Data)(index, data);
|}
|
|/++
|Convinient alias for 1D Contiguous $(LREF Series).
|+/
|alias SeriesMap(K, V) = mir_series!(K*, V*);
|
|///
|version(mir_test) unittest
|{
| import std.traits;
| import mir.series;
|
| static assert (is(SeriesMap!(string, double) == Series!(string*, double*)));
|
| /// LHS, RHS
| static assert (isAssignable!(SeriesMap!(string, double), SeriesMap!(string, double)));
| static assert (isAssignable!(SeriesMap!(string, double), typeof(null)));
|
| static assert (isAssignable!(SeriesMap!(const string, double), SeriesMap!(string, double)));
| static assert (isAssignable!(SeriesMap!(string, const double), SeriesMap!(string, double)));
| static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, double)));
|
| static assert (isAssignable!(SeriesMap!(immutable string, double), SeriesMap!(immutable string, double)));
| static assert (isAssignable!(SeriesMap!(immutable string, const double), SeriesMap!(immutable string, double)));
| static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(immutable string, double)));
| static assert (isAssignable!(SeriesMap!(string, immutable double), SeriesMap!(string, immutable double)));
| static assert (isAssignable!(SeriesMap!(const string, immutable double), SeriesMap!(string, immutable double)));
| static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, immutable double)));
| // etc
|}
|
|/++
|Plain index series data structure.
|
|`*.index[i]`/`*.key[i]`/`*.time` corresponds to `*.data[i]`/`*.value`.
|
|Index is assumed to be sorted.
|$(LREF sort) can be used to normalise a series.
|+/
|struct mir_series(IndexIterator_, Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous)
|{
| private enum doUnittest = is(typeof(this) == Series!(int*, double*));
|
| ///
| alias IndexIterator = IndexIterator_;
|
| ///
| alias Iterator = Iterator_;
|
| ///
| enum size_t N = N_;
|
| ///
| enum SliceKind kind = kind_;
|
| ///
| Slice!(Iterator, N, kind) _data;
|
| ///
| IndexIterator _index;
|
| /// Index / Key / Time type aliases
| alias Index = typeof(this.front.index);
| /// ditto
| alias Key = Index;
| /// ditto
| alias Time = Index;
| /// Data / Value type aliases
| alias Data = typeof(this.front.data);
| /// ditto
| alias Value = Data;
|
| /// An alias for time-series index.
| alias time = index;
| /// An alias for key-value representation.
| alias key = index;
| /// An alias for key-value representation.
| alias value = data;
|
| private enum defaultMsg() = "Series " ~ Unqual!(this.Data).stringof ~ "[" ~ Unqual!(this.Index).stringof ~ "]: Missing";
| private static immutable defaultExc() = new Exception(defaultMsg!() ~ " required key");
|
|@optmath:
|
| ///
| this()(Slice!IndexIterator index, Slice!(Iterator, N, kind) data)
| {
| assert(index.length == data.length, "Series constructor: index and data lengths must be equal.");
| _data = data;
| _index = index._iterator;
| }
|
|
| /// Construct from null
| this(typeof(null))
| {
| _data = _data.init;
| _index = _index.init;
| }
|
| ///
| bool opEquals(RIndexIterator, RIterator, size_t RN, SliceKind rkind, )(Series!(RIndexIterator, RIterator, RN, rkind) rhs) const
| {
| return this.lightScopeIndex == rhs.lightScopeIndex && this._data.lightScope == rhs._data.lightScope;
| }
|
| /++
| Index series is assumed to be sorted.
|
| `IndexIterator` is an iterator on top of date, date-time, time, or numbers or user defined types with defined `opCmp`.
| For example, `Date*`, `DateTime*`, `immutable(long)*`, `mir.ndslice.iterator.IotaIterator`.
| +/
| auto index()() @property @trusted
| {
| return _index.sliced(_data._lengths[0]);
| }
|
| /// ditto
| auto index()() @property @trusted const
| {
| return _index.lightConst.sliced(_data._lengths[0]);
| }
|
| /// ditto
| auto index()() @property @trusted immutable
| {
| return _index.lightImmutable.sliced(_data._lengths[0]);
| }
|
| private auto lightScopeIndex()() @property @trusted
| {
| return .lightScope(_index).sliced(_data._lengths[0]);
| }
|
| private auto lightScopeIndex()() @property @trusted const
| {
| return .lightScope(_index).sliced(_data._lengths[0]);
| }
|
| private auto lightScopeIndex()() @property @trusted immutable
| {
| return .lightScope(_index).sliced(_data._lengths[0]);
| }
|
| /++
| Data is any ndslice with only one constraints,
| `data` and `index` lengths should be equal.
| +/
| auto data()() @property @trusted
| {
| return _data;
| }
|
| /// ditto
| auto data()() @property @trusted const
| {
| return _data[];
| }
|
| /// ditto
| auto data()() @property @trusted immutable
| {
| return _data[];
| }
|
| ///
| typeof(this) opBinary(string op : "~")(typeof(this) rhs)
| {
| return unionSeries(this.lightScope, rhs.lightScope);
| }
|
| /// ditto
| auto opBinary(string op : "~")(const typeof(this) rhs) const
| {
| return unionSeries(this.lightScope, rhs.lightScope);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.date: Date;
|
| //////////////////////////////////////
| // Constructs two time-series.
| //////////////////////////////////////
| auto index0 = [1,3,4];
| auto data0 = [1.0, 3, 4];
| auto series0 = index0.series(data0);
|
| auto index1 = [1,2,5];
| auto data1 = [10.0, 20, 50];
| auto series1 = index1.series(data1);
|
| //////////////////////////////////////
| // Merges multiple series into one.
| //////////////////////////////////////
| // Order is matter.
| // The first slice has higher priority.
| auto m0 = series0 ~ series1;
| auto m1 = series1 ~ series0;
|
| assert(m0.index == m1.index);
| assert(m0.data == [ 1, 20, 3, 4, 50]);
| assert(m1.data == [10, 20, 3, 4, 50]);
| }
|
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.date: Date;
|
| //////////////////////////////////////
| // Constructs two time-series.
| //////////////////////////////////////
| auto index0 = [1,3,4];
| auto data0 = [1.0, 3, 4];
| auto series0 = index0.series(data0);
|
| auto index1 = [1,2,5];
| auto data1 = [10.0, 20, 50];
| const series1 = index1.series(data1);
|
| //////////////////////////////////////
| // Merges multiple series into one.
| //////////////////////////////////////
| // Order is matter.
| // The first slice has higher priority.
| auto m0 = series0 ~ series1;
| auto m1 = series1 ~ series0;
|
| assert(m0.index == m1.index);
| assert(m0.data == [ 1, 20, 3, 4, 50]);
| assert(m1.data == [10, 20, 3, 4, 50]);
| }
|
| /++
| Special `[] =` index-assign operator for index-series.
| Assigns data from `r` with index intersection.
| If a index index in `r` is not in the index index for this series, then no op-assign will take place.
| This and r series are assumed to be sorted.
|
| Params:
| r = rvalue index-series
| +/
| void opIndexAssign(IndexIterator_, Iterator_, size_t N_, SliceKind kind_)
| (Series!(IndexIterator_, Iterator_, N_, kind_) r)
| {
| opIndexOpAssign!("", IndexIterator_, Iterator_, N_, kind_)(r);
| }
|
| static if (doUnittest)
| ///
| version(mir_test) unittest
| {
| auto index = [1, 2, 3, 4];
| auto data = [10.0, 10, 10, 10];
| auto series = index.series(data);
|
| auto rindex = [0, 2, 4, 5];
| auto rdata = [1.0, 2, 3, 4];
| auto rseries = rindex.series(rdata);
|
| // series[] = rseries;
| series[] = rseries;
| assert(series.data == [10, 2, 10, 3]);
| }
|
| /++
| Special `[] op=` index-op-assign operator for index-series.
| Op-assigns data from `r` with index intersection.
| If a index index in `r` is not in the index index for this series, then no op-assign will take place.
| This and r series are assumed to be sorted.
|
| Params:
| rSeries = rvalue index-series
| +/
| void opIndexOpAssign(string op, IndexIterator_, Iterator_, size_t N_, SliceKind kind_)
| (auto ref Series!(IndexIterator_, Iterator_, N_, kind_) rSeries)
| {
| auto l = this.lightScope;
| auto r = rSeries.lightScope;
| if (r.empty)
| return;
| if (l.empty)
| return;
| Unqual!(typeof(*r._index)) rf = *r._index;
| Unqual!(typeof(*l._index)) lf = *l._index;
| goto Begin;
| R:
| r.popFront;
| if (r.empty)
| goto End;
| rf = *r._index;
| Begin:
| if (lf > rf)
| goto R;
| if (lf < rf)
| goto L;
| E:
| static if (N != 1)
| mixin("l.data.front[] " ~ op ~ "= r.data.front;");
| else
| mixin("l.data.front " ~ op ~ "= r.data.front;");
|
| r.popFront;
| if (r.empty)
| goto End;
| rf = *r._index;
| L:
| l.popFront;
| if (l.empty)
| goto End;
| lf = *l._index;
|
| if (lf < rf)
| goto L;
| if (lf == rf)
| goto E;
| goto R;
| End:
| }
|
| static if (doUnittest)
| ///
| version(mir_test) unittest
| {
| auto index = [1, 2, 3, 4];
| auto data = [10.0, 10, 10, 10];
| auto series = index.series(data);
|
| auto rindex = [0, 2, 4, 5];
| auto rdata = [1.0, 2, 3, 4];
| auto rseries = rindex.series(rdata);
|
| series[] += rseries;
| assert(series.data == [10, 12, 10, 13]);
| }
|
| /++
| This function uses a search with policy sp to find the largest left subrange on which
| `t < key` is true for all `t`.
| The search schedule and its complexity are documented in `std.range.SearchPolicy`.
| +/
| auto lowerBound(Index)(auto ref scope const Index key)
| {
| return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key)));
| }
|
| /// ditto
| auto lowerBound(Index)(auto ref scope const Index key) const
| {
| return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key)));
| }
|
|
| /++
| This function uses a search with policy sp to find the largest right subrange on which
| `t > key` is true for all `t`.
| The search schedule and its complexity are documented in `std.range.SearchPolicy`.
| +/
| auto upperBound(Index)(auto ref scope const Index key)
| {
| return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length));
| }
|
| /// ditto
| auto upperBound(Index)(auto ref scope const Index key) const
| {
| return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length));
| }
|
| /**
| Gets data for the index.
| Params:
| key = index
| _default = default value is returned if the series does not contains the index.
| Returns:
| data that corresponds to the index or default value.
| */
| ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) @trusted
| if (!is(Value : const(Exception)))
| {
| size_t idx = lightScopeIndex.transitionIndex(key);
| return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default;
| }
|
| /// ditto
| ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) const
| if (!is(Value : const(Exception)))
| {
| return this.lightScope.get(key, _default);
| }
|
| /// ditto
| ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) immutable
| if (!is(Value : const(Exception)))
| {
| return this.lightScope.get(key, _default);
| }
|
| auto get(Index, Value)(auto ref scope const Index key, Value _default) @trusted
| if (!is(Value : const(Exception)))
| {
| size_t idx = lightScopeIndex.transitionIndex(key);
| return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default;
| }
|
| /// ditto
| auto get(Index, Value)(auto ref scope const Index key, Value _default) const
| if (!is(Value : const(Exception)))
| {
| import core.lifetime: forward;
| return this.lightScope.get(key, forward!_default);
| }
|
| /// ditto
| auto get(Index, Value)(auto ref scope const Index key, Value _default) immutable
| if (!is(Value : const(Exception)))
| {
| import core.lifetime: forward;
| return this.lightScope.get(key, forward!_default);
| }
|
| /**
| Gets data for the index.
| Params:
| key = index
| exc = (lazy, optional) exception to throw if the series does not contains the index.
| Returns: data that corresponds to the index.
| Throws:
| Exception if the series does not contains the index.
| See_also: $(LREF Series.getVerbose), $(LREF Series.tryGet)
| */
| auto ref get(Index)(auto ref scope const Index key) @trusted
| {
| size_t idx = lightScopeIndex.transitionIndex(key);
| if (idx < _data._lengths[0] && _index[idx] == key)
| {
| return _data[idx];
| }
| throw defaultExc!();
| }
|
| /// ditto
| auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) @trusted
| {
| size_t idx = lightScopeIndex.transitionIndex(key);
| if (idx < _data._lengths[0] && _index[idx] == key)
| {
| return _data[idx];
| }
| throw exc;
| }
|
| /// ditto
| auto ref get(Index)(auto ref scope const Index key) const
| {
| return this.lightScope.get(key);
| }
|
| /// ditto
| auto ref get(Index)(