TRAVIS_OS_NAME=osx <<<<<< ENV .dscanner.ini .gitmodules benchmarks/ndslice/binarization.d benchmarks/ndslice/convolution.d benchmarks/ndslice/dot_product.d benchmarks/ndslice/euclidean_distance.d doc/Makefile doc/artwork doc/custom.css doc/custom.ddoc doc/dlang.org doc/gen_modlist.d dub.json examples/data/stop_words examples/data/trndocs.dat examples/data/words examples/lda_hoffman_sparse.d examples/means_of_columns.d examples/median_filter.d index.d source/mir/glas/l1.d source/mir/glas/l2.d source/mir/glas/package.d source/mir/model/lda/hoffman.d source/mir/sparse/blas/axpy.d source/mir/sparse/blas/dot.d source/mir/sparse/blas/gemm.d source/mir/sparse/blas/gemv.d source/mir/sparse/blas/package.d source/mir/sparse/package.d <<<<<< network # path=-var-folders-nz-vv4_9tw56nv9k3tkvyszvwg80000gn-T-dub_test_root_4cd38736_032e_4af3_82f5_f2e5cfaf92e8.lst |module dub_test_root; |import std.typetuple; |static import mir.glas.l1; |static import mir.glas.l2; |static import mir.model.lda.hoffman; |static import mir.sparse.blas.axpy; |static import mir.sparse.blas.dot; |static import mir.sparse.blas.gemm; |static import mir.sparse.blas.gemv; |alias allModules = TypeTuple!(mir.glas.l1, mir.glas.l2, mir.model.lda.hoffman, mir.sparse.blas.axpy, mir.sparse.blas.dot, mir.sparse.blas.gemm, mir.sparse.blas.gemv); | | import std.stdio; | import core.runtime; | 1| void main() { writeln("All unit tests have been run successfully."); } | shared static this() { | version (Have_tested) { | import tested; | import core.runtime; | import std.exception; | Runtime.moduleUnitTester = () => true; | //runUnitTests!app(new JsonTestResultWriter("results.json")); | enforce(runUnitTests!allModules(new ConsoleTestResultWriter), "Unit tests failed."); | } | } | /var/folders/nz/vv4_9tw56nv9k3tkvyszvwg80000gn/T/dub_test_root_4cd38736_032e_4af3_82f5_f2e5cfaf92e8.d is 100% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-algorithm-iteration.lst |// Written in the D programming language. |/** |This module contains generic _iteration algorithms. |$(SCRIPT inhibitQuickIndex = 1;) | |$(BOOKTABLE $(H2 Function), |$(TR $(TH Function Name) $(TH Description)) |$(T2 all, Checks if all elements satisfy to a predicate.) |$(T2 any, Checks if at least one element satisfy to a predicate.) |$(T2 cmp, Compares two slices.) |$(T2 count, Counts elements in a slices according to a predicate.) |$(T2 each, Iterates all elements.) |$(T2 eachLower, Iterates lower triangle of matrix.) |$(T2 eachUploPair, Iterates upper and lower pairs of elements in square matrix.) |$(T2 eachUpper, Iterates upper triangle of matrix.) |$(T2 equal, Compares two slices for equality.) |$(T2 find, Finds backward index.) |$(T2 findIndex, Finds index.) |$(T2 isSymmetric, Checks if the matrix is symmetric.) |$(T2 maxIndex, Finds index of the maximum.) |$(T2 maxPos, Finds backward index of the maximum.) |$(T2 minIndex, Finds index of the minimum.) |$(T2 minmaxIndex, Finds indexes of the minimum and the maximum.) |$(T2 minmaxPos, Finds backward indexes of the minimum and the maximum.) |$(T2 minPos, Finds backward index of the minimum.) |$(T2 nBitsToCount, Сount bits until set bit count is reached.) |$(T2 reduce, Accumulates all elements.) |$(T2 uniq, Iterates over the unique elements in a range, which is assumed sorted.) |) | |All operators are suitable to change slices using `ref` argument qualification in a function declaration. |Note, that string lambdas in Mir are `auto ref` functions. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-2018, Ilya Yaroshenko, 2018-, Mir community |Authors: Ilya Yaroshenko, John Michael Hall, Andrei Alexandrescu (original Phobos code) | |Copyright: Andrei Alexandrescu 2008-. Ilya Yaroshenko 2017- |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: , Ilya Yaroshenko (Mir & BetterC rework). |Source: $(PHOBOSSRC std/algorithm/_iteration.d) |Macros: | NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) | T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | */ |module mir.algorithm.iteration; | |import mir.primitives; |import mir.functional: naryFun; |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.field: BitField; |import mir.ndslice.internal; |import mir.ndslice.iterator: FieldIterator, RetroIterator; |import mir.ndslice.slice; |import mir.primitives; |import std.meta; |import std.range.primitives: isInputRange, isBidirectionalRange, isInfinite, isForwardRange, ElementType; |import std.traits; | |@optmath: | | |/+ |Bitslice representation for accelerated bitwise algorithm. |1-dimensional contiguousitslice can be split into three chunks: head bits, body chunks, and tail bits. | |Bitslice can have head bits because it has slicing and the zero bit may not be aligned to the zero of a body chunk. |+/ |private struct BitSliceAccelerator(Field, I = typeof(Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ | import mir.bitop; | import mir.qualifier: lightConst; | import mir.ndslice.traits: isIterator; | import mir.ndslice.iterator: FieldIterator; | import mir.ndslice.field: BitField; | | /// | alias U = typeof(I + 1u); | /// body bits chunks | static if (isIterator!Field) | Slice!Field bodyChunks; | else | Slice!(FieldIterator!Field) bodyChunks; | /// head length | int headLength; | /// tail length | int tailLength; | |@optmath: | | this(Slice!(FieldIterator!(BitField!(Field, I))) slice) | { | enum mask = bitShiftMask!I; | enum shift = bitElemShift!I; | size_t length = slice.length; | size_t index = slice._iterator._index; | if (auto hlen = index & mask) | { | auto l = I.sizeof * 8 - hlen; | if (l > length) | { | // central problem | headLength = -cast(int) length; | tailLength = cast(int) hlen; | goto F; | } | else | { | headLength = cast(uint) l; | length -= l; | index += l; | } | } | tailLength = cast(int) (length & mask); | F: | length >>= shift; | index >>= shift; | bodyChunks._lengths[0] = length; | static if (isIterator!Field) | { | bodyChunks._iterator = slice._iterator._field._field; | bodyChunks._iterator += index; | } | else | { | bodyChunks._iterator._index = index; | bodyChunks._iterator._field = slice._iterator._field._field; | } | } | |scope const: | | bool isCentralProblem() | { | return headLength < 0; | } | | U centralBits() | { | assert(isCentralProblem); | return *bodyChunks._iterator.lightConst >>> tailLength; | } | | uint centralLength() | { | assert(isCentralProblem); | return -headLength; | } | | /// head bits (last `headLength` bits are valid). | U headBits() | { | assert(!isCentralProblem); | if (headLength == 0) | return U.init; | static if (isIterator!Field) | return bodyChunks._iterator.lightConst[-1]; | else | return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index - 1]; | } | | /// tail bits (first `tailLength` bits are valid). | U tailBits() | { | assert(!isCentralProblem); | if (tailLength == 0) | return U.init; | static if (isIterator!Field) | return bodyChunks._iterator.lightConst[bodyChunks.length]; | else | return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index + bodyChunks.length]; | } | | U negCentralMask() | { | return U.max << centralLength; | } | | U negHeadMask() | { | return U.max << headLength; | } | | U negTailMask() | { | return U.max << tailLength; | } | | U negCentralMaskS() | { | return U.max >> centralLength; | } | | U negHeadMaskS() | { | return U.max >> headLength; | } | | U negTailMaskS() | { | return U.max >> tailLength; | } | | U centralBitsWithRemainingZeros() | { | return centralBits & ~negCentralMask; | } | | U centralBitsWithRemainingZerosS() | { | return centralBits << (U.sizeof * 8 - centralLength); | } | | U headBitsWithRemainingZeros() | { | return headBits >>> (I.sizeof * 8 - headLength); | } | | U headBitsWithRemainingZerosS() | { | static if (U.sizeof > I.sizeof) | return (headBits << (U.sizeof - I.sizeof) * 8) & ~negTailMaskS; | else | return headBits & ~negTailMaskS; | } | | U tailBitsWithRemainingZeros() | { | return tailBits & ~negTailMask; | } | | U tailBitsWithRemainingZerosS() | { | return tailBits << (U.sizeof * 8 - tailLength); | } | | U centralBitsWithRemainingOnes() | { | return centralBits | negCentralMask; | } | | U centralBitsWithRemainingOnesS() | { | return centralBitsWithRemainingZerosS | negCentralMaskS; | } | | U headBitsWithRemainingOnes() | { | return headBitsWithRemainingZeros | negHeadMask; | } | | U headBitsWithRemainingOnesS() | { | return headBitsWithRemainingZerosS | negHeadMaskS; | } | | U tailBitsWithRemainingOnes() | { | return tailBits | negTailMask; | } | | U tailBitsWithRemainingOnesS() | { | return tailBitsWithRemainingZerosS | negTailMaskS; | } | | size_t ctpop() | { | import mir.bitop: ctpop; | if (isCentralProblem) | return centralBitsWithRemainingZeros.ctpop; | size_t ret; | if (headLength) | ret = cast(size_t) headBitsWithRemainingZeros.ctpop; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | ret += cast(size_t) bc.front.ctpop; | bc.popFront; | } | while(bc.length); | } | if (tailBits) | ret += cast(size_t) tailBitsWithRemainingZeros.ctpop; | return ret; | } | | bool any() | { | if (isCentralProblem) | return centralBitsWithRemainingZeros != 0; | if (headBitsWithRemainingZeros != 0) | return true; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | if (bc.front != 0) | return true; | bc.popFront; | } | while(bc.length); | } | if (tailBitsWithRemainingZeros != 0) | return true; | return false; | } | | bool all() | { | if (isCentralProblem) | return centralBitsWithRemainingOnes != U.max; | size_t ret; | if (headBitsWithRemainingOnes != U.max) | return false; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | if (bc.front != I.max) | return false; | bc.popFront; | } | while(bc.length); | } | if (tailBitsWithRemainingOnes != U.max) | return false; | return true; | } | | size_t cttz() | { | U v; | size_t ret; | if (isCentralProblem) | { | v = centralBitsWithRemainingOnes; | if (v) | goto R; | ret = centralLength; | goto L; | } | v = headBitsWithRemainingOnes; | if (v) | goto R; | ret = headLength; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.front; | if (v) | goto R; | ret += I.sizeof * 8; | bc.popFront; | } | while(bc.length); | } | v = tailBitsWithRemainingOnes; | if (v) | goto R; | ret += tailLength; | goto L; | R: | ret += v.cttz; | L: | return ret; | } | | size_t ctlz() | { | U v; | size_t ret; | if (isCentralProblem) | { | v = centralBitsWithRemainingOnes; | if (v) | goto R; | ret = centralLength; | goto L; | } | v = tailBitsWithRemainingOnesS; | if (v) | goto R; | ret = tailLength; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.back; | if (v) | goto R; | ret += I.sizeof * 8; | bc.popBack; | } | while(bc.length); | } | v = headBitsWithRemainingOnesS; | if (v) | goto R; | ret += headLength; | goto L; | R: | ret += v.ctlz; | L: | return ret; | } | | sizediff_t nBitsToCount(size_t count) | { | size_t ret; | if (count == 0) | return count; | U v, cnt; | if (isCentralProblem) | { | v = centralBitsWithRemainingZeros; | goto E; | } | v = headBitsWithRemainingZeros; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += headLength; | count -= cast(size_t) cnt; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.front; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += I.sizeof * 8; | count -= cast(size_t) cnt; | bc.popFront; | } | while(bc.length); | } | v = tailBitsWithRemainingZeros; | E: | cnt = v.ctpop; | if (cnt >= count) | goto R; | return -1; | R: | return ret + v.nTrailingBitsToCount(count); | } | | sizediff_t retroNBitsToCount(size_t count) | { | if (count == 0) | return count; | size_t ret; | U v, cnt; | if (isCentralProblem) | { | v = centralBitsWithRemainingZerosS; | goto E; | } | v = tailBitsWithRemainingZerosS; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += tailLength; | count -= cast(size_t) cnt; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.back; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += I.sizeof * 8; | count -= cast(size_t) cnt; | bc.popBack; | } | while(bc.length); | } | v = headBitsWithRemainingZerosS; | E: | cnt = v.ctpop; | if (cnt >= count) | goto R; | return -1; | R: | return ret + v.nLeadingBitsToCount(count); | } |} | |/++ |Сount bits until set bit count is reached. Works with ndslices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |Returns: bit count if set bit count is reached or `-1` otherwise. |+/ |sizediff_t nBitsToCount(Field, I)(scope Slice!(FieldIterator!(BitField!(Field, I))) bitSlice, size_t count) |{ | return BitSliceAccelerator!(Field, I)(bitSlice).nBitsToCount(count); |} | |///ditto |sizediff_t nBitsToCount(Field, I)(scope Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))) bitSlice, size_t count) |{ | import mir.ndslice.topology: retro; | return BitSliceAccelerator!(Field, I)(bitSlice.retro).retroNBitsToCount(count); |} | |/// |pure unittest |{ | import mir.ndslice.allocation: bitSlice; | import mir.ndslice.topology: retro; | auto s = bitSlice(1000); | s[50] = true; | s[100] = true; | s[200] = true; | s[300] = true; | s[400] = true; | assert(s.nBitsToCount(4) == 301); | assert(s.retro.nBitsToCount(4) == 900); |} | |private void checkShapesMatch( | string fun = __FUNCTION__, | string pfun = __PRETTY_FUNCTION__, | Slices...) | (scope ref const Slices slices) | if (Slices.length > 1) |{ | enum msg = "all arguments must be slices" ~ tailErrorMessage!(fun, pfun); | enum msgShape = "all slices must have the same shape" ~ tailErrorMessage!(fun, pfun); | enum N = slices[0].shape.length; | foreach (i, Slice; Slices) | { | static if (i == 0) 0000000| continue; | else | static if (slices[i].shape.length == N) 0000000| assert(slices[i].shape == slices[0].shape, msgShape); | else | { | import mir.ndslice.fuse: fuseShape; | static assert(slices[i].fuseShape.length >= N); | assert(slices[i].fuseShape[0 .. N] == slices[0].shape, msgShape); | } | } |} | |template frontOf(size_t N) |{ | static if (N == 0) | enum frontOf = ""; | else | { | enum i = N - 1; | enum frontOf = frontOf!i ~ "slices[" ~ i.stringof ~ "].front, "; | } |} | |template allFlattened(size_t N) | if (N) |{ | enum i = N - 1; | static if (i) | enum allFlattened = .allFlattened!i ~ ("slices[" ~ i.stringof ~ "].flattened, "); | else | enum allFlattened = "slices[" ~ i.stringof ~ "].flattened, "; |} | |private template areAllContiguousSlices(Slices...) |{ | import mir.ndslice.traits: isContiguousSlice; | static if (allSatisfy!(isContiguousSlice, Slices)) | enum areAllContiguousSlices = Slices[0].N > 1; | else | enum areAllContiguousSlices = false; |} | |version(LDC) {} |else version(GNU) {} |else version (Windows) {} |else version (X86_64) |{ | //Compiling with DMD for x86-64 for Linux & OS X with optimizations enabled, | //"Tensor mutation on-the-fly" unittest was failing. Disabling inlining | //caused it to succeed. | //TODO: Rework so this is unnecessary! | version = Mir_disable_inlining_in_reduce; |} | |version(Mir_disable_inlining_in_reduce) |{ | private enum Mir_disable_inlining_in_reduce = true; | | private template _naryAliases(size_t n) | { | static if (n == 0) | enum _naryAliases = ""; | else | { | enum i = n - 1; | enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n"; | } | } | | private template nonInlinedNaryFun(alias fun) | { | import mir.math.common : optmath; | static if (is(typeof(fun) : string)) | { | /// Specialization for string lambdas | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (args.length <= 26) | { | pragma(inline,false); | mixin(_naryAliases!(Args.length)); | return mixin(fun); | } | } | else static if (is(typeof(fun.opCall) == function)) | { | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (is(typeof(fun.opCall(args)))) | { | pragma(inline,false); | return fun.opCall(args); | } | } | else | { | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (is(typeof(fun(args)))) | { | pragma(inline,false); | return fun(args); | } | } | } |} |else |{ | private enum Mir_disable_inlining_in_reduce = false; |} | |S reduceImpl(alias fun, S, Slices...)(S seed, scope Slices slices) |{ | do | { | static if (DimensionCount!(Slices[0]) == 1) | seed = mixin("fun(seed, " ~ frontOf!(Slices.length) ~ ")"); | else | seed = mixin(".reduceImpl!fun(seed," ~ frontOf!(Slices.length) ~ ")"); | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return seed; |} | |/++ |Implements the homonym function (also known as `accumulate`, |`compress`, `inject`, or `fold`) present in various programming |languages of functional flavor. The call `reduce!(fun)(seed, slice1, ..., sliceN)` |first assigns `seed` to an internal variable `result`, |also called the accumulator. Then, for each set of element `x1, ..., xN` in |`slice1, ..., sliceN`, `result = fun(result, x1, ..., xN)` gets evaluated. Finally, |`result` is returned. | |`reduce` allows to iterate multiple slices in the lockstep. | |Note: | $(NDSLICEREF topology, pack) can be used to specify dimensions. |Params: | fun = A function. |See_Also: | $(HTTP llvm.org/docs/LangRef.html#fast-math-flags, LLVM IR: Fast Math Flags) | | $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function)) |+/ |template reduce(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun) | && !Mir_disable_inlining_in_reduce) | /++ | Params: | seed = An initial accumulation value. | slices = One or more slices, range, and arrays. | Returns: | the accumulated `result` | +/ | @optmath auto reduce(S, Slices...)(S seed, scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return mixin(`.reduce!fun(seed, ` ~ allFlattened!(Slices.length) ~`)`); | } | else | { | if (slices[0].anyEmpty) | return cast(Unqual!S) seed; | static if (is(S : Unqual!S)) | alias UT = Unqual!S; | else | alias UT = S; | return reduceImpl!(fun, UT, Slices)(seed, slices); | } | } | else version(Mir_disable_inlining_in_reduce) | //As above, but with inlining disabled. | @optmath auto reduce(S, Slices...)(S seed, scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return mixin(`.reduce!fun(seed, ` ~ allFlattened!(Slices.length) ~`)`); | } | else | { | if (slices[0].anyEmpty) | return cast(Unqual!S) seed; | static if (is(S : Unqual!S)) | alias UT = Unqual!S; | else | alias UT = S; | return reduceImpl!(nonInlinedNaryFun!fun, UT, Slices)(seed, slices); | } | } | else | alias reduce = .reduce!(naryFun!fun); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | auto ar = [1, 2, 3]; | auto s = 0.reduce!"a + b"(ar); | assert (s == 6); |} | |/// Single slice |version(mir_test) |unittest |{ | import mir.ndslice.topology : iota; | | //| 0 1 2 | => 3 | | //| 3 4 5 | => 12 | => 15 | auto sl = iota(2, 3); | | // sum of all element in the slice | auto res = size_t(0).reduce!"a + b"(sl); | | assert(res == 15); |} | |/// Multiple slices, dot product |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 1 2 3 | | //| 4 5 6 | | auto b = iota([2, 3], 1).as!double.slice; | | alias dot = reduce!"a + b * c"; | auto res = dot(0.0, a, b); | | // check the result: | import mir.ndslice.topology : flattened; | import std.numeric : dotProduct; | assert(res == dotProduct(a.flattened, b.flattened)); |} | |/// Zipped slices, dot product |pure |version(mir_test) unittest |{ | import std.typecons : Yes; | import std.numeric : dotProduct; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota, zip, universal; | import mir.math.common : optmath; | | static @optmath T fmuladd(T, Z)(const T a, Z z) | { | return a + z.a * z.b; | } | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3).as!double.slice.universal; | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1).as!double.slice; | | // slices must have the same strides for `zip!true`. | assert(sl1.strides == sl2.strides); | | auto z = zip!true(sl1, sl2); | | auto dot = reduce!fmuladd(0.0, z); | | assert(dot == dotProduct(iota(6), iota([6], 1))); |} | |/// Tensor mutation on-the-fly |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | import mir.math.common : optmath; | | static @optmath T fun(T)(const T a, ref T b) | { | return a + b++; | } | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3).as!double.slice; | | auto res = reduce!fun(double(0), sl); | | assert(res == 15); | | //| 1 2 3 | | //| 4 5 6 | | assert(sl == iota([2, 3], 1)); |} | |/++ |Packed slices. | |Computes minimum value of maximum values for each row. |+/ |version(mir_test) |unittest |{ | import mir.math.common; | import mir.ndslice.allocation : slice; | import mir.ndslice.dynamic : transposed; | import mir.ndslice.topology : as, iota, pack, map, universal; | | alias maxVal = (a) => reduce!fmax(-double.infinity, a); | alias minVal = (a) => reduce!fmin(double.infinity, a); | alias minimaxVal = (a) => minVal(a.pack!1.map!maxVal); | | auto sl = iota(2, 3).as!double.slice; | | // Vectorized computation: row stride equals 1. | //| 0 1 2 | => | 2 | | //| 3 4 5 | => | 5 | => 2 | auto res = minimaxVal(sl); | assert(res == 2); | | // Common computation: row stride does not equal 1. | //| 0 1 2 | | 0 3 | => | 3 | | //| 3 4 5 | => | 1 4 | => | 4 | | // | 2 5 | => | 5 | => 3 | auto resT = minimaxVal(sl.universal.transposed); | assert(resT == 3); |} | |/// Dlang Range API support. |version(mir_test) |unittest |{ | import mir.algorithm.iteration: each; | import std.range: phobos_iota = iota; | | int s; | // 0 1 2 3 | 4.phobos_iota.each!(i => s += i); | assert(s == 6); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto a = reduce!"a + b"(size_t(7), iota([0, 1], 1)); | assert(a == 7); |} | |void eachImpl(alias fun, Slices...)(scope Slices slices) |{ | foreach(ref slice; slices) | assert(!slice.empty); | do | { | static if (DimensionCount!(Slices[0]) == 1) | mixin("fun(" ~ frontOf!(Slices.length) ~ ");"); | else | mixin(".eachImpl!fun(" ~ frontOf!(Slices.length) ~ ");"); | foreach_reverse(i; Iota!(Slices.length)) | slices[i].popFront; | } | while(!slices[0].empty); |} | |/++ |The call `each!(fun)(slice1, ..., sliceN)` |evaluates `fun` for each set of elements `x1, ..., xN` in |`slice1, ..., sliceN` respectively. | |`each` allows to iterate multiple slices in the lockstep. |Params: | fun = A function. |Note: | $(NDSLICEREF dynamic, transposed) and | $(NDSLICEREF topology, pack) can be used to specify dimensions. |See_Also: | This is functionally similar to $(LREF reduce) but has not seed. |+/ |template each(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | +/ | @optmath auto each(Slices...)(scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | mixin(`.each!fun(` ~ allFlattened!(Slices.length) ~`);`); | } | else | { | if (slices[0].anyEmpty) | return; | eachImpl!fun(slices); | } | } | else | alias each = .each!(naryFun!fun); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | auto ar = [1, 2, 3]; | ar.each!"a *= 2"; | assert (ar == [2, 4, 6]); |} | |/// Single slice, multiply-add |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3).as!double.slice; | | sl.each!((ref a) { a = a * 10 + 5; }); | | assert(sl == | [[ 5, 15, 25], | [35, 45, 55]]); |} | |/// Swap two slices |version(mir_test) |unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | auto b = iota([2, 3], 10).as!double.slice; | | each!swap(a, b); | | assert(a == iota([2, 3], 10)); | assert(b == iota([2, 3], 0)); |} | |/// Swap two zipped slices |version(mir_test) |unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | auto b = iota([2, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.each!(z => swap(z.a, z.b)); | | assert(a == iota([2, 3], 10)); | assert(b == iota([2, 3], 0)); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | iota(0, 2).each!((a){i++;}); | assert(i == 0); |} | |/++ |The call `eachUploPair!(fun)(matrix)` |evaluates `fun` for each pair (`matrix[j, i]`, `matrix[i, j]`), |for i <= j (default) or i < j (if includeDiagonal is false). | |Params: | fun = A function. | includeDiagonal = true if applying function to diagonal, | false (default) otherwise. |+/ |template eachUploPair(alias fun, bool includeDiagonal = false) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | matrix = Square matrix. | +/ | auto eachUploPair(Iterator, SliceKind kind)(scope Slice!(Iterator, 2, kind) matrix) | in | { | assert(matrix.length!0 == matrix.length!1, "matrix must be square."); | } | body | { | static if (kind == Contiguous) | { | import mir.ndslice.topology: canonical; | .eachUploPair!(fun, includeDiagonal)(matrix.canonical); | } | else | { | static if (includeDiagonal == true) | { | if (matrix.length) do | { | eachImpl!fun(matrix.front!0, matrix.front!1); | matrix.popFront!1; | matrix.popFront!0; | // hint for optimizer | matrix._lengths[1] = matrix._lengths[0]; | } | while (matrix.length); | } | else | { | if (matrix.length) for(;;) | { | assert(!matrix.empty!0); | assert(!matrix.empty!1); | auto l = matrix.front!1; | auto u = matrix.front!0; | matrix.popFront!1; | matrix.popFront!0; | l.popFront; | u.popFront; | // hint for optimizer | matrix._lengths[1] = matrix._lengths[0] = l._lengths[0] = u._lengths[0]; | if (u.length == 0) | break; | eachImpl!fun(u, l); | } | } | } | } | } | else | { | alias eachUploPair = .eachUploPair!(naryFun!fun, includeDiagonal); | } |} | |/// Transpose matrix in place. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, universal; | import mir.ndslice.dynamic: transposed; | import mir.utility: swap; | | auto m = iota(4, 4).slice; | | m.eachUploPair!swap; | | assert(m == iota(4, 4).universal.transposed); |} | |/// Reflect Upper matrix part to lower part. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, universal; | import mir.ndslice.dynamic: transposed; | import mir.utility: swap; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | | m.eachUploPair!((u, ref l) { l = u; }); | | assert(m == [ | [0, 1, 2], | [1, 4, 5], | [2, 5, 8]]); |} | |/// Fill lower triangle and diagonal with zeroes. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 1 2 3 | // 4 5 6 | // 7 8 9 | auto m = iota([3, 3], 1).slice; | | m.eachUploPair!((u, ref l) { l = 0; }, true); | | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0]]); |} | |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | m.eachUploPair!((u, ref l) { l = l + 1; }, true); | assert(m == [ | [1, 1, 2], | [4, 5, 5], | [7, 8, 9]]); |} | |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | m.eachUploPair!((u, ref l) { l = l + 1; }, false); | | assert(m == [ | [0, 1, 2], | [4, 4, 5], | [7, 8, 8]]); |} | |/++ |Checks if the matrix is symmetric. |+/ |template isSymmetric(alias fun = "a == b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | matrix = 2D ndslice. | +/ | bool isSymmetric(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix) | { | static if (kind == Contiguous) | { | import mir.ndslice.topology: canonical; | return .isSymmetric!fun(matrix.canonical); | } | else | { | if (matrix.length!0 != matrix.length!1) | return false; | if (matrix.length) do | { | if (!allImpl!fun(matrix.front!0, matrix.front!1)) | { | return false; | } | matrix.popFront!1; | matrix.popFront!0; | matrix._lengths[1] = matrix._lengths[0]; | } | while (matrix.length); | return true; | } | } | else | alias isSymmetric = .isSymmetric!(naryFun!fun); |} | |/// |version(mir_test) |unittest |{ | import mir.ndslice.topology: iota; | assert(iota(2, 2).isSymmetric == false); | | assert( | [1, 2, | 2, 3].sliced(2, 2).isSymmetric == true); |} | |bool minPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[N] backwardIndex, scope ref Iterator iterator, Slice!(Iterator, N, kind) slice) |{ | bool found; | do | { | static if (slice.shape.length == 1) | { | if (fun(*slice._iterator, *iterator)) | { | backwardIndex[0] = slice.length; | iterator = slice._iterator; | found = true; | } | } | else | { | if (minPosImpl!(fun, Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, slice.front)) | { | backwardIndex[0] = slice.length; | found = true; | } | } | slice.popFront; | } | while(!slice.empty); | return found; |} | |bool[2] minmaxPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[2][N] backwardIndex, scope ref Iterator[2] iterator, Slice!(Iterator, N, kind) slice) |{ | bool[2] found; | do | { | static if (slice.shape.length == 1) | { | if (fun(*slice._iterator, *iterator[0])) | { | backwardIndex[0][0] = slice.length; | iterator[0] = slice._iterator; | found[0] = true; | } | else | if (fun(*iterator[1], *slice._iterator)) | { | backwardIndex[0][1] = slice.length; | iterator[1] = slice._iterator; | found[1] = true; | } | } | else | { | auto r = minmaxPosImpl!(fun, Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, slice.front); | if (r[0]) | { | backwardIndex[0][0] = slice.length; | } | if (r[1]) | { | backwardIndex[0][1] = slice.length; | } | } | slice.popFront; | } | while(!slice.empty); | return found; |} | |/++ |Finds a positions (ndslices) such that |`position[0].first` is minimal and `position[1].first` is maximal elements in the slice. | |Position is sub-ndslice of the same dimension in the right-$(RPAREN)down-$(RPAREN)etc$(LPAREN)$(LPAREN) corner. | |Params: | pred = A predicate. | |See_also: | $(LREF minmaxIndex), | $(LREF minPos), | $(LREF maxPos), | $(NDSLICEREF slice, Slice.backward). |+/ |template minmaxPos(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | 2 subslices with minimal and maximal `first` elements. | +/ | @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)[2] | minmaxPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) pret; | if (!slice.anyEmpty) | { | size_t[2][N] ret; | auto it = slice._iterator; | Iterator[2] iterator = [it, it]; | minmaxPosImpl!(pred, Iterator, N, kind)(ret, iterator, slice); | foreach (i; Iota!N) | { | pret[0]._lengths[i] = ret[i][0]; | pret[1]._lengths[i] = ret[i][1]; | } | pret[0]._iterator = iterator[0]; | pret[1]._iterator = iterator[1]; | } | auto strides = slice.strides; | foreach(i; Iota!(0, pret[0].S)) | { | pret[0]._strides[i] = strides[i]; | pret[1]._strides[i] = strides[i]; | } | return pret; | } | else | alias minmaxPos = .minmaxPos!(naryFun!pred); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 2, | ].sliced(3, 4); | | auto pos = s.minmaxPos; | | assert(pos[0] == s[$ - 2 .. $, $ - 3 .. $]); | assert(pos[1] == s[$ - 1 .. $, $ - 2 .. $]); | | assert(pos[0].first == -4); | assert(s.backward(pos[0].shape) == -4); | assert(pos[1].first == 7); | assert(s.backward(pos[1].shape) == 7); |} | |/++ |Finds a backward indexes such that |`slice[indexes[0]]` is minimal and `slice[indexes[1]]` is maximal elements in the slice. | |Params: | pred = A predicate. | |See_also: | $(LREF minmaxIndex), | $(LREF minPos), | $(LREF maxPos), | $(REF Slice.backward, mir,ndslice,slice). |+/ |template minmaxIndex(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Subslice with minimal (maximal) `first` element. | +/ | @optmath size_t[N][2] minmaxIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) pret = size_t.max; | if (!slice.anyEmpty) | { | auto shape = slice.shape; | size_t[2][N] ret; | foreach (i; Iota!N) | { | ret[i][1] = ret[i][0] = shape[i]; | } | auto it = slice._iterator; | Iterator[2] iterator = [it, it]; | minmaxPosImpl!(pred, Iterator, N, kind)(ret, iterator, slice); | foreach (i; Iota!N) | { | pret[0][i] = slice._lengths[i] - ret[i][0]; | pret[1][i] = slice._lengths[i] - ret[i][1]; | } | } | return pret; | } | else | alias minmaxIndex = .minmaxIndex!(naryFun!pred); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto indexes = s.minmaxIndex; | | assert(indexes == [[1, 1], [2, 3]]); | assert(s[indexes[0]] == -4); | assert(s[indexes[1]] == 8); |} | |/++ |Finds a backward index such that |`slice.backward(index)` is minimal(maximal). | |Params: | pred = A predicate. | |See_also: | $(LREF minIndex), | $(LREF maxPos), | $(LREF maxIndex), | $(REF Slice.backward, mir,ndslice,slice). |+/ |template minPos(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Multidimensional backward index such that element is minimal(maximal). | Backward index equals zeros, if slice is empty. | +/ | @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind) | minPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) ret = { _iterator : slice._iterator }; | if (!slice.anyEmpty) | { | minPosImpl!(pred, Iterator, N, kind)(ret._lengths, ret._iterator, slice); | } | auto strides = slice.strides; | foreach(i; Iota!(0, ret.S)) | { | ret._strides[i] = strides[i]; | } | return ret; | } | else | alias minPos = .minPos!(naryFun!pred); |} | |/// ditto |template maxPos(alias pred = "a < b") |{ | import mir.functional: naryFun, reverseArgs; | alias maxPos = minPos!(reverseArgs!(naryFun!pred)); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 2, | ].sliced(3, 4); | | auto pos = s.minPos; | | assert(pos == s[$ - 2 .. $, $ - 3 .. $]); | assert(pos.first == -4); | assert(s.backward(pos.shape) == -4); | | pos = s.maxPos; | | assert(pos == s[$ - 1 .. $, $ - 2 .. $]); | assert(pos.first == 7); | assert(s.backward(pos.shape) == 7); |} | |/++ |Finds an index such that |`slice[index]` is minimal(maximal). | |Params: | pred = A predicate. | |See_also: | $(LREF minIndex), | $(LREF maxPos), | $(LREF maxIndex). |+/ |template minIndex(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Multidimensional index such that element is minimal(maximal). | Index elements equal to `size_t.max`, if slice is empty. | +/ | @optmath size_t[N] minIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | size_t[N] ret = size_t.max; | if (!slice.anyEmpty) | { | ret = slice.shape; | minPosImpl!(pred, Iterator, N, kind)(ret, slice._iterator, slice); | foreach (i; Iota!N) | ret[i] = slice._lengths[i] - ret[i]; | } | return ret; | } | else | alias minIndex = .minIndex!(naryFun!pred); |} | |/// ditto |template maxIndex(alias pred = "a < b") |{ | import mir.functional: naryFun, reverseArgs; | alias maxIndex = minIndex!(reverseArgs!(naryFun!pred)); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto index = s.minIndex; | | assert(index == [1, 1]); | assert(s[index] == -4); | | index = s.maxIndex; | | assert(index == [2, 3]); | assert(s[index] == 8); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | -8, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto index = s.minIndex; | | assert(index == [0, 0]); | assert(s[index] == -8); |} | |version(mir_test) |unittest |{ | auto s = [ | 0, 1, 2, 3, | 4, 5, 6, 7, | 8, 9, 10, 11 | ].sliced(3, 4); | | auto index = s.minIndex; | assert(index == [0, 0]); | assert(s[index] == 0); | | index = s.maxIndex; | assert(index == [2, 3]); | assert(s[index] == 11); |} | |bool findImpl(alias fun, size_t N, Slices...)(scope ref size_t[N] backwardIndex, Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | auto cnt = BitSliceAccelerator!(Field, I)(slices[0]).cttz; | if (cnt = -1) | return false; | backwardIndex[0] = slices[0].length - cnt; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | import mir.ndslice.topology: retro; | auto cnt = BitSliceAccelerator!(Field, I)(slices[0].retro).ctlz; | if (cnt = -1) | return false; | backwardIndex[0] = slices[0].length - cnt; | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if (mixin("fun(" ~ frontOf!(Slices.length) ~ ")")) | { | backwardIndex[0] = slices[0].length; | return true; | } | } | else | { | if (mixin("findImpl!fun(backwardIndex[1 .. $], " ~ frontOf!(Slices.length) ~ ")")) | { | backwardIndex[0] = slices[0].length; | return true; | } | } | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return false; | } |} | |/++ |Finds an index such that |`pred(slices[0][index], ..., slices[$-1][index])` is `true`. | |Params: | pred = A predicate. | |See_also: | $(LREF find), | $(LREF any). |Optimization: | `findIndex!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template findIndex(alias pred) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | Multidimensional index such that the predicate is true. | Index equals `size_t.max`, if the predicate evaluates `false` for all indexes. | Constraints: | All slices must have the same shape. | +/ | @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) findIndex(Slices...)(Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | size_t[DimensionCount!(Slices[0])] ret = -1; | auto lengths = slices[0].shape; | if (!slices[0].anyEmpty && findImpl!pred(ret, slices)) | foreach (i; Iota!(DimensionCount!(Slices[0]))) | ret[i] = lengths[i] - ret[i]; | static if (DimensionCount!(Slices[0]) > 1) | return ret; | else | return ret[0]; | } | else | alias findIndex = .findIndex!(naryFun!pred); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto sl = iota(5); | size_t index = sl.findIndex!"a == 3"; | | assert(index == 3); | assert(sl[index] == 3); | | assert(sl.findIndex!(a => a == 8) == size_t.max); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | size_t[2] index = sl.findIndex!(a => a == 3); | | assert(sl[index] == 3); | | index = sl.findIndex!"a == 6"; | assert(index[0] == size_t.max); | assert(index[1] == size_t.max); |} | |/++ |Finds a backward index such that |`pred(slices[0].backward(index), ..., slices[$-1].backward(index))` is `true`. | |Params: | pred = A predicate. | |Optimization: | To check if any element was found | use the last dimension (row index). | This will slightly optimize the code. |-------- |// $-1 instead of 0 |if (backwardIndex[$-1]) |{ | auto elem1 = slice1.backward(backwardIndex); | //... | auto elemK = sliceK.backward(backwardIndex); |} |else |{ | // not found |} |-------- | |See_also: | $(LREF findIndex), | $(LREF any), | $(REF Slice.backward, mir,ndslice,slice). | |Optimization: | `find!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template find(alias pred) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | Multidimensional backward index such that the predicate is true. | Backward index equals zeros, if the predicate evaluates `false` for all indexes. | Constraints: | All slices must have the same shape. | +/ | @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) find(Slices...)(auto ref Slices slices) | if (Slices.length && allSatisfy!(hasShape, Slices)) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | size_t[DimensionCount!(Slices[0])] ret; | if (!slices[0].anyEmpty) | findImpl!pred(ret, slices); | static if (DimensionCount!(Slices[0]) > 1) | return ret; | else | return ret[0]; | } | else | alias find = .find!(naryFun!pred); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | import std.range : iota; | | auto sl = iota(10); | size_t index = sl.find!"a == 3"; | | assert(sl[$ - index] == 3); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | size_t[2] bi = sl.find!"a == 3"; | assert(sl.backward(bi) == 3); | assert(sl[$ - bi[0], $ - bi[1]] == 3); | | bi = sl.find!"a == 6"; | assert(bi[0] == 0); | assert(bi[1] == 0); |} | |/// Multiple slices |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | size_t[2] bi = find!((a, b) => a * b == 39)(a, b); | assert(a.backward(bi) == 3); | assert(b.backward(bi) == 13); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | size_t[2] bi = zip!true(a, b).find!"a.a * a.b == 39"; | | assert(a.backward(bi) == 3); | assert(b.backward(bi) == 13); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a == 5) | return true; | a = 8; | return false; | } | | size_t[2] bi = sl.find!pred; | | assert(bi == [1, 1]); | assert(sl.backward(bi) == 5); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 8, 5]]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | size_t[2] bi = iota(2, 0).find!((elem){i++; return true;}); | assert(i == 0); | assert(bi == [0, 0]); |} | |size_t anyImpl(alias fun, Slices...)(scope Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | return BitSliceAccelerator!(Field, I)(slices[0]).any; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | return .anyImpl!fun(slices[0].retro); | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if (mixin("fun(" ~ frontOf!(Slices.length) ~ ")")) | return true; | } | else | { | if (mixin("anyImpl!fun(" ~ frontOf!(Slices.length) ~ ")")) | return true; | } | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return false; | } |} | |/++ |Like $(LREF find), but only returns whether or not the search was successful. | |Params: | pred = The predicate. |Optimization: | `any!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template any(alias pred = "a") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | Returns: | `true` if the search was successful and `false` otherwise. | Constraints: | All slices must have the same shape. | +/ | @optmath bool any(Slices...)(scope Slices slices) | if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return mixin(`.any!pred(` ~ allFlattened!(Slices.length) ~`)`); | } | else | { | return !slices[0].anyEmpty && anyImpl!pred(slices); | } | } | else | alias any = .any!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.any!"a == 3"); | assert(!r.any!"a == 6"); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(sl.any!"a == 3"); | assert(!sl.any!"a == 6"); |} | |/// Multiple slices |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | assert(any!((a, b) => a * b == 39)(a, b)); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | // slices must have the same strides | | assert(zip!true(a, b).any!"a.a * a.b == 39"); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a == 5) | return true; | a = 8; | return false; | } | | assert(sl.any!pred); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 8, 5]]); |} | |size_t allImpl(alias fun, Slices...)(scope Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | return BitSliceAccelerator!(Field, I)(slices[0]).all; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | return .allImpl!fun(slices[0].retro); | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { 0000000| if (!mixin("fun(" ~ frontOf!(Slices.length) ~ ")")) 0000000| return false; | } | else | { | if (!mixin("allImpl!fun(" ~ frontOf!(Slices.length) ~ ")")) | return false; | } 0000000| foreach_reverse(ref slice; slices) 0000000| slice.popFront; | } 0000000| while(!slices[0].empty); 0000000| return true; | } |} | |/++ |Checks if all of the elements verify `pred`. | |Params: | pred = The predicate. |Optimization: | `all!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template all(alias pred = "a") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | `true` all of the elements verify `pred` and `false` otherwise. | Constraints: | All slices must have the same shape. | +/ | @optmath bool all(Slices...)(scope Slices slices) | if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length) | { | static if (Slices.length > 1) 0000000| slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; 0000000| return mixin(`.all!pred(` ~ allFlattened!(Slices.length) ~`)`); | } | else | { 0000000| return slices[0].anyEmpty || allImpl!pred(slices); | } | } | else | alias all = .all!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.all!"a < 6"); | assert(!r.all!"a < 5"); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(sl.all!"a < 6"); | assert(!sl.all!"a < 5"); |} | |/// Multiple slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(all!"a - b == 0"(sl, sl)); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | | assert(zip!true(sl, sl).all!"a.a - a.b == 0"); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a < 4) | { | a = 8; | return true; | } | return false; | } | | assert(!sl.all!pred); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 4, 5]]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | assert(iota(2, 0).all!((elem){i++; return true;})); | assert(i == 0); |} | |/++ |Counts elements in slices according to the `fun`. |Params: | fun = A predicate. | |Optimization: | `count!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template count(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | | Returns: The number elements according to the `fun`. | | Constraints: | All slices must have the same shape. | +/ | @optmath size_t count(Slices...)(scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (__traits(isSame, fun, naryFun!"true")) | { | return slices[0].elementCount; | } | else | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return mixin(`.count!fun(` ~ allFlattened!(Slices.length) ~`)`); | } | else | { | if (slices[0].anyEmpty) | return 0; | return countImpl!(fun, Slices)(slices); | } | } | else | alias count = .count!(naryFun!fun); | |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.count!"true" == 6); | assert(r.count!"a" == 5); | assert(r.count!"a % 2" == 3); |} | |/// Single slice |version(mir_test) |unittest |{ | import mir.ndslice.topology : iota; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3); | | assert(sl.count!"true" == 6); | assert(sl.count!"a" == 5); | assert(sl.count!"a % 2" == 3); |} | |/// Accelerated set bit count |version(mir_test) |unittest |{ | import mir.ndslice.topology: retro, iota, bitwise; | import mir.ndslice.allocation: slice; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota!size_t(2, 3).bitwise; | | assert(sl.count!"true" == 6 * size_t.sizeof * 8); | | assert(sl.slice.count!"a" == 7); | | // accelerated | assert(sl.count!"a" == 7); | assert(sl.retro.count!"a" == 7); | | auto sl2 = iota!ubyte([6], 128).bitwise; | // accelerated | assert(sl2.count!"a" == 13); | assert(sl2[4 .. $].count!"a" == 13); | assert(sl2[4 .. $ - 1].count!"a" == 12); | assert(sl2[4 .. $ - 1].count!"a" == 12); | assert(sl2[41 .. $ - 1].count!"a" == 1); |} | |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: bitwise, assumeFieldsHaveZeroShift; | auto sl = slice!uint([6]).bitwise; | auto slb = slice!ubyte([6]).bitwise; | slb[4] = true; | auto d = slb[4]; | auto c = assumeFieldsHaveZeroShift(slb & ~slb); | // pragma(msg, typeof(c)); | assert(!sl.any); | assert((~sl).all); | // pragma(msg, typeof(~slb)); | // pragma(msg, typeof(~slb)); | // assert(sl.findIndex); |} | |/++ |Compares two or more slices for equality, as defined by predicate `pred`. | |See_also: $(NDSLICEREF slice, Slice.opEquals) | |Params: | pred = The predicate. |+/ |template equal(alias pred = "a == b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = Two or more slices, slices, ranges, and arrays. | | Returns: | `true` any of the elements verify `pred` and `false` otherwise. | +/ | bool equal(Slices...)(scope Slices slices) | if (Slices.length >= 2) | { | enum msg = "all arguments must be slices" ~ tailErrorMessage!(); | enum msgShape = "all slices must have the same dimension count" ~ tailErrorMessage!(); | import mir.internal.utility; | foreach (i, Slice; Slices) | { | // static assert (isSlice!Slice, msg); | static if (i) | { | static assert (DimensionCount!(Slices[i]) == DimensionCount!(Slices[0])); | foreach (j; Iota!(DimensionCount!(Slices[0]))) 0000000| if (slices[i].shape[j] != slices[0].shape[j]) 0000000| goto False; | } | } 0000000| return all!pred(slices); 0000000| False: return false; | } | else | alias equal = .equal!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow |version(mir_test) unittest |{ | import std.range : iota; | auto r = iota(6); | assert(r.equal([0, 1, 2, 3, 4, 5])); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | assert(equal(sl1, sl1)); | assert(sl1 == sl1); //can also use opEquals for two Slices | assert(equal!"2 * a == b + c"(sl1, sl1, sl1)); | | assert(equal!"a < b"(sl1, sl2)); | | assert(!equal(sl1[0 .. $ - 1], sl1)); | assert(!equal(sl1[0 .. $, 0 .. $ - 1], sl1)); |} | |ptrdiff_t cmpImpl(alias pred, A, B) | (scope A sl1, scope B sl2) | if (DimensionCount!A == DimensionCount!B) |{ | for (;;) | { | static if (DimensionCount!A == 1) | { | import mir.functional : naryFun; | if (naryFun!pred(sl1.front, sl2.front)) | return -1; | if (naryFun!pred(sl2.front, sl1.front)) | return 1; | } | else | { | if (auto res = .cmpImpl!pred(sl1.front, sl2.front)) | return res; | } | sl1.popFront; | if (sl1.empty) | return -cast(ptrdiff_t)(sl2.length > 1); | sl2.popFront; | if (sl2.empty) | return 1; | } |} | |/++ |Performs three-way recursive lexicographical comparison on two slices according to predicate `pred`. |Iterating `sl1` and `sl2` in lockstep, `cmp` compares each `N-1` dimensional element `e1` of `sl1` |with the corresponding element `e2` in `sl2` recursively. |If one of the slices has been finished,`cmp` returns a negative value if `sl1` has fewer elements than `sl2`, |a positive value if `sl1` has more elements than `sl2`, |and `0` if the ranges have the same number of elements. | |Params: | pred = The predicate. |+/ |template cmp(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | sl1 = First slice, range, or array. | sl2 = Second slice, range, or array. | | Returns: | `0` if both ranges compare equal. | Negative value if the first differing element of `sl1` is less than the corresponding | element of `sl2` according to `pred`. | Positive value if the first differing element of `sl2` is less than the corresponding | element of `sl1` according to `pred`. | +/ | ptrdiff_t cmp(A, B) | (scope A sl1, scope B sl2) | if (DimensionCount!A == DimensionCount!B) | { | auto b = sl2.anyEmpty; | if (sl1.anyEmpty) | { | if (!b) | return -1; | auto sh1 = sl1.shape; | auto sh2 = sl2.shape; | foreach (i; Iota!(DimensionCount!A)) | if (ptrdiff_t ret = sh1[i] - sh2[i]) | return ret; | return 0; | } | if (b) | return 1; | return cmpImpl!pred(sl1, sl2); | } | else | alias cmp = .cmp!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow |version(mir_test) unittest |{ | import std.range : iota; | | // 0 1 2 3 4 5 | auto r1 = iota(0, 6); | // 1 2 3 4 5 6 | auto r2 = iota(1, 7); | | assert(cmp(r1, r1) == 0); | assert(cmp(r1, r2) < 0); | assert(cmp!"a >= b"(r1, r2) > 0); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | assert(cmp(sl1, sl1) == 0); | assert(cmp(sl1, sl2) < 0); | assert(cmp!"a >= b"(sl1, sl2) > 0); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto sl1 = iota(2, 3); | auto sl2 = iota([2, 3], 1); | | assert(cmp(sl1[0 .. $ - 1], sl1) < 0); | assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 1]) > 0); | | assert(cmp(sl1[0 .. $ - 2], sl1) < 0); | assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 3]) > 0); | assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) == 0); | assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $ - 1, 0 .. $ - 3]) > 0); | assert(cmp(sl1[0 .. $ - 1, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) < 0); |} | |size_t countImpl(alias fun, Slices...)(scope Slices slices) |{ | size_t ret; | alias S = Slices[0]; | import mir.functional: naryFun; | import mir.ndslice.iterator: FieldIterator, RetroIterator; | import mir.ndslice.field: BitField; | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | ret = BitSliceAccelerator!(Field, I)(slices[0]).ctpop; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | ret = .countImpl!fun(slices[0].retro); | } | else | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if(mixin("fun(" ~ frontOf!(Slices.length) ~ ")")) | ret++; | } | else | ret += mixin(".countImpl!fun(" ~ frontOf!(Slices.length) ~ ")"); | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return ret; |} | |private template selectBackOf(size_t N, string input) |{ | static if (N == 0) | enum selectBackOf = ""; | else | { | enum i = N - 1; | enum selectBackOf = selectBackOf!(i, input) ~ | "slices[" ~ i.stringof ~ "].selectBack!0(" ~ input ~ "), "; | } |} | |private template frontSelectFrontOf(size_t N, string input) |{ | static if (N == 0) | enum frontSelectFrontOf = ""; | else | { | enum i = N - 1; | enum frontSelectFrontOf = frontSelectFrontOf!(i, input) ~ | "slices[" ~ i.stringof ~ "].front!0.selectFront!0(" ~ input ~ "), "; | } |} | |/++ |Returns: max length across all dimensions. |+/ |size_t maxLength(S)(auto ref scope S s) | if (hasShape!S) |{ | auto shape = s.shape; | size_t length = 0; | foreach(i; Iota!(shape.length)) | if (shape[i] > length) | length = shape[i]; | return length; |} | |/++ |The call `eachLower!(fun)(slice1, ..., sliceN)` evaluates `fun` on the lower |triangle in `slice1, ..., sliceN` respectively. | |`eachLower` allows iterating multiple slices in the lockstep. | |Params: | fun = A function |See_Also: | This is functionally similar to $(LREF each). |+/ |template eachLower(alias fun) |{ | import mir.functional : naryFun; | | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | inputs = One or more two-dimensional slices and an optional | integer, `k`. | | The value `k` determines which diagonals will have the function | applied: | For k = 0, the function is also applied to the main diagonal | For k = 1 (default), only the non-main diagonals below the main | diagonal will have the function applied. | For k > 1, fewer diagonals below the main diagonal will have the | function applied. | For k < 0, more diagonals above the main diagonal will have the | function applied. | +/ | void eachLower(Inputs...)(scope Inputs inputs) | if (((Inputs.length > 1) && | (isIntegral!(Inputs[$ - 1]))) || | (Inputs.length)) | { | import mir.ndslice.traits : isMatrix; | | size_t val; | | static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1]))) | { | immutable(sizediff_t) k = inputs[$ - 1]; | alias Slices = Inputs[0..($ - 1)]; | alias slices = inputs[0..($ - 1)]; | } | else | { | enum sizediff_t k = 1; | alias Slices = Inputs; | alias slices = inputs; | } | | static assert (allSatisfy!(isMatrix, Slices), | "eachLower: Every slice input must be a two-dimensional slice"); | static if (Slices.length > 1) | slices.checkShapesMatch; | if (slices[0].anyEmpty) | return; | | foreach(ref slice; slices) | assert(!slice.empty); | | immutable(size_t) m = slices[0].length!0; | immutable(size_t) n = slices[0].length!1; | | if ((n + k) < m) | { | val = m - (n + k); | mixin(".eachImpl!fun(" ~ selectBackOf!(Slices.length, "val") ~ ");"); | } | | size_t i; | | if (k > 0) | { | foreach(ref slice; slices) | slice.popFrontExactly!0(k); | i = k; | } | | do | { | val = i - k + 1; | mixin(".eachImpl!fun(" ~ frontSelectFrontOf!(Slices.length, "val") ~ ");"); | | foreach(ref slice; slices) | slice.popFront!0; | i++; | } while ((i < (n + k)) && (i < m)); | } | } | else | { | alias eachLower = .eachLower!(naryFun!fun); | } |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, canonical, universal; | alias AliasSeq(T...) = T; | | pure nothrow | void test(alias func)() | { | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = func(iota([3, 3], 1).slice); | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0]]); | } | | @safe pure nothrow @nogc | T identity(T)(T x) | { | return x; | } | | alias kinds = AliasSeq!(identity, canonical, universal); | test!(kinds[0]); | test!(kinds[1]); | test!(kinds[2]); |} | |/// |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3], | [0, 5, 6], | [0, 0, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3], | [4, 5, 6], | [0, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3, 4], | [0, 0, 7, 8], | [0, 0, 0, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3, 4], | [0, 6, 7, 8], | [0, 0, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3, 4], | [0, 0, 0, 8], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3, 4], | [5, 6, 7, 8], | [0, 10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0, 4], | [0, 0, 0, 0], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3], | [0, 5, 6], | [0, 0, 9], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3], | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3], | [4, 5, 6], | [0, 8, 9], | [0, 0, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |/// Swap two slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | eachLower!swap(a, b); | | assert(a == [ | [ 0, 1, 2], | [13, 4, 5], | [16, 17, 8]]); | assert(b == [ | [10, 11, 12], | [ 3, 14, 15], | [ 6, 7, 18]]); |} | |/// Swap two zipped slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.eachLower!(z => swap(z.a, z.b)); | | assert(a == [ | [ 0, 1, 2], | [13, 4, 5], | [16, 17, 8]]); | assert(b == [ | [10, 11, 12], | [ 3, 14, 15], | [ 6, 7, 18]]); |} | |private template frontSelectBackOf(size_t N, string input) |{ | static if (N == 0) | enum frontSelectBackOf = ""; | else | { | enum i = N - 1; | enum frontSelectBackOf = frontSelectBackOf!(i, input) ~ | "slices[" ~ i.stringof ~ "].front.selectBack!0(" ~ input ~ "), "; | } |} | |private template selectFrontOf(size_t N, string input) |{ | static if (N == 0) | enum selectFrontOf = ""; | else | { | enum i = N - 1; | enum selectFrontOf = selectFrontOf!(i, input) ~ | "slices[" ~ i.stringof ~ "].selectFront!0(" ~ input ~ "), "; | } |} | |/++ |The call `eachUpper!(fun)(slice1, ..., sliceN)` evaluates `fun` on the upper |triangle in `slice1, ..., sliceN`, respectively. | |`eachUpper` allows iterating multiple slices in the lockstep. | |Params: | fun = A function |See_Also: | This is functionally similar to $(LREF each). |+/ |template eachUpper(alias fun) |{ | import mir.functional: naryFun; | | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | inputs = One or more two-dimensional slices and an optional | integer, `k`. | | The value `k` determines which diagonals will have the function | applied: | For k = 0, the function is also applied to the main diagonal | For k = 1 (default), only the non-main diagonals above the main | diagonal will have the function applied. | For k > 1, fewer diagonals below the main diagonal will have the | function applied. | For k < 0, more diagonals above the main diagonal will have the | function applied. | +/ | void eachUpper(Inputs...)(scope Inputs inputs) | if (((Inputs.length > 1) && | (isIntegral!(Inputs[$ - 1]))) || | (Inputs.length)) | { | import mir.ndslice.traits : isMatrix; | | size_t val; | | static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1]))) | { | immutable(sizediff_t) k = inputs[$ - 1]; | alias Slices = Inputs[0..($ - 1)]; | alias slices = inputs[0..($ - 1)]; | } | else | { | enum sizediff_t k = 1; | alias Slices = Inputs; | alias slices = inputs; | } | | static assert (allSatisfy!(isMatrix, Slices), | "eachUpper: Every slice input must be a two-dimensional slice"); | static if (Slices.length > 1) | slices.checkShapesMatch; | if (slices[0].anyEmpty) | return; | | foreach(ref slice; slices) | assert(!slice.empty); | | immutable(size_t) m = slices[0].length!0; | immutable(size_t) n = slices[0].length!1; | | size_t i; | | if (k < 0) | { | val = -k; | mixin(".eachImpl!fun(" ~ selectFrontOf!(Slices.length, "val") ~ ");"); | | foreach(ref slice; slices) | slice.popFrontExactly!0(-k); | i = -k; | } | | do | { | val = (n - k) - i; | mixin(".eachImpl!fun(" ~ frontSelectBackOf!(Slices.length, "val") ~ ");"); | | foreach(ref slice; slices) | slice.popFront; | i++; | } while ((i < (n - k)) && (i < m)); | } | } | else | { | alias eachUpper = .eachUpper!(naryFun!fun); | } |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, canonical, universal; | | pure nothrow | void test(alias func)() | { | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = func(iota([3, 3], 1).slice); | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0], | [4, 0, 0], | [7, 8, 0]]); | } | | @safe pure nothrow @nogc | T identity(T)(T x) | { | return x; | } | | alias kinds = AliasSeq!(identity, canonical, universal); | test!(kinds[0]); | test!(kinds[1]); | test!(kinds[2]); |} | |/// |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0], | [4, 5, 0], | [7, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [7, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0], | [4, 5, 6], | [7, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0, 0], | [5, 0, 0, 0], | [9, 10, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0, 0], | [5, 6, 0, 0], | [9, 10, 11, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0, 0], | [0, 0, 0, 0], | [9, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0, 0], | [5, 6, 7, 0], | [9, 10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0, 0], | [0, 0, 0, 0], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0], | [4, 0, 0], | [7, 8, 0], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0], | [4, 5, 0], | [7, 8, 9], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [7, 0, 0], | [10, 11, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0], | [4, 5, 6], | [7, 8, 9], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0], | [10, 0, 0]]); |} | |/// Swap two slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | eachUpper!swap(a, b); | | assert(a == [ | [0, 11, 12], | [3, 4, 15], | [6, 7, 8]]); | assert(b == [ | [10, 1, 2], | [13, 14, 5], | [16, 17, 18]]); |} | |/// Swap two zipped slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.eachUpper!(z => swap(z.a, z.b)); | | assert(a == [ | [0, 11, 12], | [3, 4, 15], | [6, 7, 8]]); | assert(b == [ | [10, 1, 2], | [13, 14, 5], | [16, 17, 18]]); |} | |// uniq |/** |Lazily iterates unique consecutive elements of the given range (functionality |akin to the $(HTTP wikipedia.org/wiki/_Uniq, _uniq) system |utility). Equivalence of elements is assessed by using the predicate |$(D pred), by default $(D "a == b"). The predicate is passed to |$(REF nary, mir,functional), and can either accept a string, or any callable |that can be executed via $(D pred(element, element)). If the given range is |bidirectional, $(D uniq) also yields a |`std,range,primitives`. |Params: | pred = Predicate for determining equivalence between range elements. | r = An input range of elements to filter. |Returns: | An input range of | consecutively unique elements in the original range. If `r` is also a | forward range or bidirectional range, the returned range will be likewise. |*/ |Uniq!(naryFun!pred, Range) uniq(alias pred = "a == b", Range)(auto ref Range r) |if (isInputRange!Range && is(typeof(naryFun!pred(r.front, r.front)) == bool)) |{ | return typeof(return)(r); |} | |/// |@safe version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.algorithm.mutation : copy; | | int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ]; | assert(equal(uniq(arr), [ 1, 2, 3, 4, 5 ][])); | | // Filter duplicates in-place using copy | arr.length -= arr.uniq().copy(arr).length; | assert(arr == [ 1, 2, 3, 4, 5 ]); | | // Note that uniqueness is only determined consecutively; duplicated | // elements separated by an intervening different element will not be | // eliminated: | assert(equal(uniq([ 1, 1, 2, 1, 1, 3, 1]), [1, 2, 1, 3, 1])); |} | |/++ |Authros: $(HTTP erdani.com, Andrei Alexandrescu) (original Phobos code), Ilya Yaroshenko (betterC rework) |+/ |struct Uniq(alias pred, Range) |{ | Range _input; | | // this()(auto ref Range input) | // { | // alias AliasSeq(T...) = T; | // import mir.functional: forward; | // AliasSeq!_input = forward!input; | // } | | ref opSlice() inout | { | return this; | } | | void popFront() scope | { | assert(!empty, "Attempting to popFront an empty uniq."); | auto last = _input.front; | do | { | _input.popFront(); | } | while (!_input.empty && pred(last, _input.front)); | } | | @property ElementType!Range front() | { | assert(!empty, "Attempting to fetch the front of an empty uniq."); | return _input.front; | } | | static if (isBidirectionalRange!Range) | { | void popBack() scope | { | assert(!empty, "Attempting to popBack an empty uniq."); | auto last = _input.back; | do | { | _input.popBack(); | } | while (!_input.empty && pred(last, _input.back)); | } | | @property ElementType!Range back() scope return | { | assert(!empty, "Attempting to fetch the back of an empty uniq."); | return _input.back; | } | } | | static if (isInfinite!Range) | { | enum bool empty = false; // Propagate infiniteness. | } | else | { | @property bool empty() const { return _input.empty; } | } | | static if (isForwardRange!Range) | { | @property typeof(this) save() scope return { | return typeof(this)(_input.save); | } | } |} | |version(none) |@safe version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.internal.test.dummyrange; | import std.range; | | int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ]; | auto r = uniq(arr); | static assert(isForwardRange!(typeof(r))); | | assert(equal(r, [ 1, 2, 3, 4, 5 ][])); | assert(equal(retro(r), retro([ 1, 2, 3, 4, 5 ][]))); | | foreach (DummyType; AllDummyRanges) | { | DummyType d; | auto u = uniq(d); | assert(equal(u, [1,2,3,4,5,6,7,8,9,10])); | | static assert(d.rt == RangeType.Input || isForwardRange!(typeof(u))); | | static if (d.rt >= RangeType.Bidirectional) | { | assert(equal(retro(u), [10,9,8,7,6,5,4,3,2,1])); | } | } |} | |@safe version(mir_test) unittest // https://issues.dlang.org/show_bug.cgi?id=17264 |{ | import std.algorithm.comparison : equal; | | const(int)[] var = [0, 1, 1, 2]; | assert(var.uniq.equal([0, 1, 2])); |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/algorithm/iteration.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-array-allocation.lst |/** |Functions and types that manipulate built-in arrays and associative arrays. | |This module provides all kinds of functions to create, manipulate or convert arrays: | |$(SCRIPT inhibitQuickIndex = 1;) |$(BOOKTABLE , |$(TR $(TH Function Name) $(TH Description) |) | $(TR $(TD $(LREF _array)) | $(TD Returns a copy of the input in a newly allocated dynamic _array. | )) |) | |Copyright: Copyright Andrei Alexandrescu 2008-, Jonathan M Davis 2011-, and Ilya Yaroshenko (Mir rework) 2018- | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: $(HTTP erdani.org, Andrei Alexandrescu) and Jonathan M Davis | |Source: $(PHOBOSSRC std/_array.d) |*/ |module mir.array.allocation; | |import mir.functional; |import mir.primitives; | |import std.traits; |import std.range.primitives: isInfinite, isInputRange, ElementType; | |/** | * Allocates an array and initializes it with copies of the elements | * of range $(D r). | * | * Narrow strings are handled as a special case in an overload. | * | * Params: | * r = range (or aggregate with $(D opApply) function) whose elements are copied into the allocated array | * Returns: | * allocated and initialized array | */ |auto array(Range)(Range r) |if ((isInputRange!Range || isIterable!Range) && !isInfinite!Range && !isStaticArray!Range || isPointer!Range && isIterable!(PointerTarget!Range)) |{ | static if (isIterable!Range) | alias E = ForeachType!Range; | else | static if (isPointer!Range && isIterable!(PointerTarget!Range)) | alias E = ForeachType!(PointerTarget!Range); | else | alias E = ElementType!Range; | | if (__ctfe) | { | // Compile-time version to avoid memcpy calls. | // Also used to infer attributes of array(). | E[] result; | static if (isInputRange!Range) | for (; !r.empty; r.popFront) | result ~= r.front; | else | static if (isPointer!Range) | foreach (e; *r) | result ~= e; | else | foreach (e; r) | result ~= e; | return result; | } | | import mir.primitives: hasLength; | | static if (hasLength!Range) | { | auto length = r.length; | if (length == 0) | return null; | | import mir.conv : emplaceRef; | import std.array: uninitializedArray; | | auto result = (() @trusted => uninitializedArray!(Unqual!E[])(length))(); | | static if (isInputRange!Range) | { | foreach(ref e; result) | { | emplaceRef!E(e, r.front); | r.popFront; | } | } | else | static if (isPointer!Range) | { | auto it = result; | foreach(ref f; *r) | { | emplaceRef!E(it[0], f); | it = it[1 .. $]; | } | } | else | { | auto it = result; | foreach(ref f; r) | { | emplaceRef!E(it[0], f); | it = it[1 .. $]; | } | } | | return (() @trusted => cast(E[]) result)(); | } | else | { | import std.array: appender; | auto a = appender!(E[])(); | static if (isInputRange!Range) | for (; !r.empty; r.popFront) | a.put(r.front); | else | static if (isPointer!Range) | foreach (e; *r) | a.put(e); | else | foreach (e; r) | a.put(e); | return a.data; | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = array([1, 2, 3, 4, 5][]); | assert(a == [ 1, 2, 3, 4, 5 ]); |} | |@safe pure nothrow version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | struct Foo | { | int a; | } | auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]); | assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)])); |} | |@safe pure nothrow version(mir_test) unittest |{ | struct MyRange | { | enum front = 123; | enum empty = true; | void popFront() {} | } | | auto arr = (new MyRange).array; | assert(arr.empty); |} | |@system pure nothrow version(mir_test) unittest |{ | immutable int[] a = [1, 2, 3, 4]; | auto b = (&a).array; | assert(b == a); |} | |@system version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | struct Foo | { | int a; | void opAssign(Foo) | { | assert(0); | } | auto opEquals(Foo foo) | { | return a == foo.a; | } | } | auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]); | assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)])); |} | |@safe version(mir_test) unittest |{ | // Issue 12315 | static struct Bug12315 { immutable int i; } | enum bug12315 = [Bug12315(123456789)].array(); | static assert(bug12315[0].i == 123456789); |} | |@safe version(mir_test) unittest |{ | import mir.ndslice.topology: repeat; | static struct S{int* p;} | auto a = array(immutable(S).init.repeat(5)); | assert(a.length == 5); |} | |/// |@safe version(mir_test) unittest |{ | assert("Hello D".array == "Hello D"); | assert("Hello D"w.array == "Hello D"w); | assert("Hello D"d.array == "Hello D"d); |} | |@system version(mir_test) unittest |{ | // @system due to array!string | import std.conv : to; | | static struct TestArray { int x; string toString() @safe { return to!string(x); } } | | static struct OpAssign | { | uint num; | this(uint num) { this.num = num; } | | // Templating opAssign to make sure the bugs with opAssign being | // templated are fixed. | void opAssign(T)(T rhs) { this.num = rhs.num; } | } | | static struct OpApply | { | int opApply(scope int delegate(ref int) dg) | { | int res; | foreach (i; 0 .. 10) | { | res = dg(i); | if (res) break; | } | | return res; | } | } | | auto a = array([1, 2, 3, 4, 5][]); | assert(a == [ 1, 2, 3, 4, 5 ]); | | auto b = array([TestArray(1), TestArray(2)][]); | assert(b == [TestArray(1), TestArray(2)]); | | class C | { | int x; | this(int y) { x = y; } | override string toString() const @safe { return to!string(x); } | } | auto c = array([new C(1), new C(2)][]); | assert(c[0].x == 1); | assert(c[1].x == 2); | | auto d = array([1.0, 2.2, 3][]); | assert(is(typeof(d) == double[])); | assert(d == [1.0, 2.2, 3]); | | auto e = [OpAssign(1), OpAssign(2)]; | auto f = array(e); | assert(e == f); | | assert(array(OpApply.init) == [0,1,2,3,4,5,6,7,8,9]); | assert(array("ABC") == "ABC"); | assert(array("ABC".dup) == "ABC"); |} | |//Bug# 8233 |@safe version(mir_test) unittest |{ | assert(array("hello world"d) == "hello world"d); | immutable a = [1, 2, 3, 4, 5]; | assert(array(a) == a); | const b = a; | assert(array(b) == a); | | //To verify that the opAssign branch doesn't get screwed up by using Unqual. | //EDIT: array no longer calls opAssign. | struct S | { | ref S opAssign(S)(const ref S rhs) | { | assert(0); | } | | int i; | } | | alias AliasSeq(T...) = T; | foreach (T; AliasSeq!(S, const S, immutable S)) | { | auto arr = [T(1), T(2), T(3), T(4)]; | assert(array(arr) == arr); | } |} | |@safe version(mir_test) unittest |{ | //9824 | static struct S | { | @disable void opAssign(S); | int i; | } | auto arr = [S(0), S(1), S(2)]; | arr.array; |} | |// Bugzilla 10220 |@safe version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | import std.exception; | import mir.ndslice.topology: repeat; | | static struct S | { | int val; | | @disable this(); | this(int v) { val = v; } | } | static immutable r = S(1).repeat(2).array(); | assert(equal(r, [S(1), S(1)])); |} | |@safe version(mir_test) unittest |{ | //Turn down infinity: | static assert(!is(typeof( | repeat(1).array() | ))); |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/array/allocation.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-ndslice-allocation.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |It contains allocation utilities. | | |$(BOOKTABLE $(H2 Common utilities), |$(T2 shape, Returns a shape of a common n-dimensional array. ) |) | |$(BOOKTABLE $(H2 GC Allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 slice, Allocates a slice using GC.) |$(T2 bitSlice, GC-Allocates a bitwise packed n-dimensional boolean slice.) |$(T2 ndarray, Allocates a common n-dimensional array from a slice. ) |$(T2 uninitSlice, Allocates an uninitialized slice using GC. ) |) | |$(BOOKTABLE $(H2 Ref counted allocation utilities), |$(T2 rcslice, Allocates an an n-dimensional reference-counted (thread-safe) slice.) |$(T2 bitRcslice, Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice.) |$(T2 mininitRcslice, Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice.) |) | |$(BOOKTABLE $(H2 Custom allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 makeNdarray, Allocates a common n-dimensional array from a slice using an allocator. ) |$(T2 makeSlice, Allocates a slice using an allocator. ) |$(T2 makeUninitSlice, Allocates an uninitialized slice using an allocator. ) |) | |$(BOOKTABLE $(H2 CRuntime allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 stdcSlice, Allocates a slice copy using `core.stdc.stdlib.malloc`) |$(T2 stdcUninitSlice, Allocates an uninitialized slice using `core.stdc.stdlib.malloc`.) |$(T2 stdcFreeSlice, Frees memory using `core.stdc.stdlib.free`) |) | |$(BOOKTABLE $(H2 Aligned allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 uninitAlignedSlice, Allocates an uninitialized aligned slice using GC. ) |$(T2 stdcUninitAlignedSlice, Allocates an uninitialized aligned slice using CRuntime.) |$(T2 stdcFreeAlignedSlice, Frees memory using CRuntime) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.allocation; | |import mir.math.common: optmath; |import mir.ndslice.concatenation; |import mir.ndslice.field: BitField; |import mir.ndslice.internal; |import mir.ndslice.iterator: FieldIterator; |import mir.ndslice.slice; |import mir.rc.array; |import std.traits; | |@optmath: | |/++ |Allocates an an n-dimensional reference-counted (thread-safe) slice. |Params: | lengths = List of lengths for each dimension. | init = Value to initialize with (optional). | slice = Slice to copy shape and data from (optional). |Returns: | n-dimensional slice |+/ |Slice!(RCI!T, N) | rcslice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | auto _lengths = lengths; | return typeof(return)(_lengths, RCI!T(RCArray!T(len))); |} | |/// ditto |Slice!(RCI!T, N) | rcslice(T, size_t N)(size_t[N] lengths, T init) |{ | auto ret = (()@trusted => mininitRcslice!T(lengths))(); | ret.lightScope.field[] = init; | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | return move(ret); |} | |/// ditto |auto rcslice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = (() @trusted => slice.shape.mininitRcslice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result.lightScope, slice.lightScope); | | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | return move(*(() @trusted => cast(Slice!(RCI!E, N)*) &result)()); |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = rcslice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(RCI!int, 3))); | | // creates duplicate using `rcslice` | auto dup = tensor.rcslice; | assert(dup == tensor); |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = rcslice([2, 3], 5); | assert(tensor.elementCount == 2 * 3); | assert(tensor[1, 1] == 5); | | import mir.rc.array; | static assert(is(typeof(tensor) == Slice!(RCI!int, 2))); |} | |/// ditto |auto rcslice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation) |{ | alias T = Unqual!(concatenation.DeepElement); | auto ret = (()@trusted => mininitRcslice!T(concatenation.shape))(); | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | ret.lightScope.opIndexAssign(concatenation); | return ret; |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.ndslice.topology : iota; | import mir.ndslice.concatenation; | auto tensor = concatenation([2, 3].iota, [3].iota(6)).rcslice; | assert(tensor == [3, 3].iota); | | static assert(is(typeof(tensor) == Slice!(RCI!ptrdiff_t, 2))); |} | |/++ |Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice. |Params: | lengths = List of lengths for each dimension. |Returns: | n-dimensional bitwise rcslice |See_also: $(SUBREF topology, bitwise). |+/ |Slice!(FieldIterator!(BitField!(RCI!size_t)), N) bitRcslice(size_t N)(size_t[N] lengths...) |{ | import mir.ndslice.topology: bitwise; | enum elen = size_t.sizeof * 8; | immutable len = lengths.lengthsProduct; | immutable dlen = (len / elen + (len % elen != 0)); | return RCArray!size_t(dlen).asSlice.bitwise[0 .. len].sliced(lengths); |} | |/// 1D |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto bitarray = 100.bitRcslice; // allocates 16 bytes total (plus RC context) | assert(bitarray.shape == cast(size_t[1])[100]); | assert(bitarray[72] == false); | bitarray[72] = true; | assert(bitarray[72] == true); |} | |/// 2D |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto bitmatrix = bitRcslice(20, 6); // allocates 16 bytes total (plus RC context) | assert(bitmatrix.shape == cast(size_t[2])[20, 6]); | assert(bitmatrix[3, 4] == false); | bitmatrix[3, 4] = true; | assert(bitmatrix[3, 4] == true); |} | |/++ |Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous minimally initialized n-dimensional reference-counted (thread-safe) slice |+/ |Slice!(RCI!T, N) mininitRcslice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | auto _lengths = lengths; | return Slice!(RCI!T, N)(_lengths, RCI!T(mininitRcarray!T(len))); |} | |/// |version(mir_test) |pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = mininitRcslice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(RCI!int, 3))); |} | |/++ |GC-Allocates an an n-dimensional slice. |Params: | lengths = List of lengths for each dimension. | init = Value to initialize with (optional). | slice = Slice to copy shape and data from (optional). |Returns: | n-dimensional slice |+/ |Slice!(T*, N) | slice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | return new T[len].sliced(lengths); |} | |/// ditto |Slice!(T*, N) | slice(T, size_t N)(size_t[N] lengths, T init) |{ | immutable len = lengths.lengthsProduct; | static if (!hasElaborateAssign!T) | { | import std.array : uninitializedArray; | auto arr = uninitializedArray!(Unqual!T[])(len); | } | else | { | auto arr = new Unqual!T[len]; | } | arr[] = init; | auto ret = .sliced(cast(T[])arr, lengths); | return ret; |} | |/// ditto |auto slice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return slice.flattened.array.sliced(slice.shape); | } | else | { | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = (() @trusted => slice.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result, slice); | | return (() @trusted => cast(Slice!(E*, N)) result)(); | } |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = slice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(int*, 3))); | | // creates duplicate using `slice` | auto dup = tensor.slice; | assert(dup == tensor); |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | auto tensor = slice([2, 3], 5); | assert(tensor.elementCount == 2 * 3); | assert(tensor[1, 1] == 5); |} | |/// ditto |auto slice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation) |{ | alias T = Unqual!(concatenation.DeepElement); | static if (hasElaborateAssign!T) | alias fun = .slice; | else | alias fun = .uninitSlice; | auto ret = (()@trusted => fun!T(concatenation.shape))(); | ret.opIndexAssign(concatenation); | return ret; |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | import mir.ndslice.topology : iota; | import mir.ndslice.concatenation; | auto tensor = concatenation([2, 3].iota, [3].iota(6)).slice; | assert(tensor == [3, 3].iota); | | static assert(is(typeof(tensor) == Slice!(ptrdiff_t*, 2))); |} | |/++ |GC-Allocates a bitwise packed n-dimensional boolean slice. |Params: | lengths = List of lengths for each dimension. |Returns: | n-dimensional bitwise slice |See_also: $(SUBREF topology, bitwise). |+/ |Slice!(FieldIterator!(BitField!(size_t*)), N) bitSlice(size_t N)(size_t[N] lengths...) |{ | import mir.ndslice.topology: bitwise; | enum elen = size_t.sizeof * 8; | immutable len = lengths.lengthsProduct; | immutable dlen = (len / elen + (len % elen != 0)); | return new size_t[dlen].sliced.bitwise[0 .. len].sliced(lengths); |} | |/// 1D |@safe pure version(mir_test) unittest |{ | auto bitarray = bitSlice(100); // allocates 16 bytes total | assert(bitarray.shape == [100]); | assert(bitarray[72] == false); | bitarray[72] = true; | assert(bitarray[72] == true); |} | |/// 2D |@safe pure version(mir_test) unittest |{ | auto bitmatrix = bitSlice(20, 6); // allocates 16 bytes total | assert(bitmatrix.shape == [20, 6]); | assert(bitmatrix[3, 4] == false); | bitmatrix[3, 4] = true; | assert(bitmatrix[3, 4] == true); |} | |/++ |GC-Allocates an uninitialized n-dimensional slice. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto uninitSlice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | import std.array : uninitializedArray; | auto arr = uninitializedArray!(T[])(len); | return arr.sliced(lengths); |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = uninitSlice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(int*, 3))); |} | |/++ |GC-Allocates an uninitialized aligned an n-dimensional slice. |Params: | lengths = list of lengths for each dimension | alignment = memory alignment (bytes) |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto uninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system |{ | immutable len = lengths.lengthsProduct; | import std.array : uninitializedArray; | assert((alignment != 0) && ((alignment & (alignment - 1)) == 0), "'alignment' must be a power of two"); | size_t offset = alignment <= 16 ? 0 : alignment - 1; | void* basePtr = uninitializedArray!(byte[])(len * T.sizeof + offset).ptr; | T* alignedPtr = cast(T*)((cast(size_t)(basePtr) + offset) & ~offset); | return alignedPtr.sliced(lengths); |} | |/// |version(mir_test) |@system pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = uninitAlignedSlice!double([5, 6, 7], 64); | tensor[] = 0; | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | assert(cast(size_t)(tensor.ptr) % 64 == 0); | static assert(is(typeof(tensor) == Slice!(double*, 3))); |} | |/++ |Allocates an array through a specified allocator and creates an n-dimensional slice over it. |See also $(MREF std, experimental, allocator). |Params: | alloc = allocator | lengths = list of lengths for each dimension | init = default value for array initialization | slice = slice to copy shape and data from |Returns: | a structure with fields `array` and `slice` |Note: | `makeSlice` always returns slice with mutable elements |+/ |auto makeSlice(Allocator, size_t N, Iterator)(auto ref Allocator alloc, Slice!(N, Iterator) slice) |{ | alias T = Unqual!(slice.DeepElement); | return makeSlice!(T)(alloc, slice); |} | |/// ditto |Slice!(T*, N) |makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...) |{ | import std.experimental.allocator : makeArray; | return alloc.makeArray!T(lengths.lengthsProduct).sliced(lengths); |} | |/// ditto |Slice!(T*, N) |makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths, T init) |{ | import std.experimental.allocator : makeArray; | immutable len = lengths.lengthsProduct; | auto array = alloc.makeArray!T(len, init); | return array.sliced(lengths); |} | |/// ditto |auto makeSlice(Allocator, Iterator, size_t N, SliceKind kind) | (auto ref Allocator allocator, Slice!(Iterator, N, kind) slice) |{ | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = allocator.makeUninitSlice!(Unqual!E)(slice.shape); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result, slice); | | return cast(Slice!(E*, N)) result; |} | |/// Initialization with default value |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | import mir.algorithm.iteration: all; | import mir.ndslice.topology: map; | | auto sl = Mallocator.instance.makeSlice([2, 3, 4], 10); | auto ar = sl.field; | assert(sl.all!"a == 10"); | | auto sl2 = Mallocator.instance.makeSlice(sl.map!"a * 2"); | auto ar2 = sl2.field; | assert(sl2.all!"a == 20"); | | Mallocator.instance.dispose(ar); | Mallocator.instance.dispose(ar2); |} | |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | // cast to your own type | auto sl = makeSlice!double(Mallocator.instance, [2, 3, 4], 10); | auto ar = sl.field; | assert(sl[1, 1, 1] == 10.0); | Mallocator.instance.dispose(ar); |} | |/++ |Allocates an uninitialized array through a specified allocator and creates an n-dimensional slice over it. |See also $(MREF std, experimental, allocator). |Params: | alloc = allocator | lengths = list of lengths for each dimension |Returns: | a structure with fields `array` and `slice` |+/ |Slice!(T*, N) |makeUninitSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...) | if (N) |{ | if (immutable len = lengths.lengthsProduct) | { | auto mem = alloc.allocate(len * T.sizeof); | if (mem.length == 0) assert(0); | auto array = () @trusted { return cast(T[]) mem; }(); | return array.sliced(lengths); | } | else | { | return T[].init.sliced(lengths); | } |} | |/// |version(mir_test) |@system @nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | auto sl = makeUninitSlice!int(Mallocator.instance, 2, 3, 4); | auto ar = sl.field; | assert(ar.ptr is sl.iterator); | assert(ar.length == 24); | assert(sl.elementCount == 24); | | Mallocator.instance.dispose(ar); |} | |/++ |Allocates a common n-dimensional array from a slice. |Params: | slice = slice |Returns: | multidimensional D array |+/ |auto ndarray(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | import mir.array.allocation : array; | static if (slice.N == 1) | { | return array(slice); | } | else | { | import mir.ndslice.topology: ipack, map; | return array(slice.ipack!1.map!(a => .ndarray(a))); | } |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.topology : iota; | auto slice = iota(3, 4); | auto m = slice.ndarray; | static assert(is(typeof(m) == sizediff_t[][])); // sizediff_t is long for 64 bit platforms | assert(m == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]); |} | |/++ |Allocates a common n-dimensional array using data from a slice. |Params: | alloc = allocator (optional) | slice = slice |Returns: | multidimensional D array |+/ |auto makeNdarray(T, Allocator, Iterator, size_t N, SliceKind kind)(auto ref Allocator alloc, Slice!(Iterator, N, kind) slice) |{ | import std.experimental.allocator : makeArray; | static if (slice.N == 1) | { | return makeArray!T(alloc, slice); | } | else | { | alias E = typeof(makeNdarray!T(alloc, slice[0])); | auto ret = makeArray!E(alloc, slice.length); | foreach (i, ref e; ret) | e = .makeNdarray!T(alloc, slice[i]); | return ret; | } |} | |/// |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | import mir.ndslice.topology : iota; | | auto slice = iota(3, 4); | auto m = Mallocator.instance.makeNdarray!long(slice); | | static assert(is(typeof(m) == long[][])); | | static immutable ar = [[0L, 1, 2, 3], [4L, 5, 6, 7], [8L, 9, 10, 11]]; | assert(m == ar); | | foreach (ref row; m) | Mallocator.instance.dispose(row); | Mallocator.instance.dispose(m); |} | |/++ |Shape of a common n-dimensional array. |Params: | array = common n-dimensional array | err = error flag passed by reference |Returns: | static array of dimensions type of `size_t[n]` |+/ |auto shape(T)(T[] array, ref int err) |{ | static if (isDynamicArray!T) | { | size_t[1 + typeof(shape(T.init, err)).length] ret; | | if (array.length) | { | ret[0] = array.length; | ret[1..$] = shape(array[0], err); | if (err) | goto L; | foreach (ar; array) | { | if (shape(ar, err) != ret[1..$]) | err = 1; | if (err) | goto L; | } | } | } | else | { | size_t[1] ret; | ret[0] = array.length; | } | err = 0; |L: | return ret; |} | |/// |version(mir_test) |@safe pure unittest |{ | int err; | size_t[2] shape = [[1, 2, 3], [4, 5, 6]].shape(err); | assert(err == 0); | assert(shape == [2, 3]); | | [[1, 2], [4, 5, 6]].shape(err); | assert(err == 1); |} | |/// Slice from ndarray |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice, shape; | int err; | auto array = [[1, 2, 3], [4, 5, 6]]; | auto s = array.shape(err).slice!int; | s[] = [[1, 2, 3], [4, 5, 6]]; | assert(s == array); |} | |version(mir_test) |@safe pure unittest |{ | int err; | size_t[2] shape = (int[][]).init.shape(err); | assert(shape[0] == 0); | assert(shape[1] == 0); |} | |version(mir_test) |nothrow unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto sl = iota([0, 0], 1); | | assert(sl.empty!0); | assert(sl.empty!1); | | auto gcsl1 = sl.slice; | auto gcsl2 = slice!double(0, 0); | | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | auto sl2 = makeSlice!double(Mallocator.instance, 0, 0); | | Mallocator.instance.dispose(sl2.field); |} | |/++ |Allocates an uninitialized array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous uninitialized n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcFreeSlice) |+/ |Slice!(T*, N) stdcUninitSlice(T, size_t N)(size_t[N] lengths...) |{ | import core.stdc.stdlib: malloc; | immutable len = lengths.lengthsProduct; | auto ptr = len ? cast(T*) malloc(len * T.sizeof) : null; | return ptr.sliced(lengths); |} | |/++ |Allocates a copy of a slice using `core.stdc.stdlib.malloc`. |Params: | slice = n-dimensional slice |Returns: | contiguous n-dimensional slice |See_also: | $(LREF stdcUninitSlice), $(LREF stdcFreeSlice) |+/ |auto stdcSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | alias E = slice.DeepElement; | alias T = Unqual!E; | static assert (!hasElaborateAssign!T, "stdcSlice is not miplemented for slices that have elaborate assign"); | auto ret = stdcUninitSlice!T(slice.shape); | | import mir.conv: emplaceRef; | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(ret, slice); | return ret; |} | |/++ |Frees memory using `core.stdc.stdlib.free`. |Params: | slice = n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcUninitSlice) |+/ |void stdcFreeSlice(T, size_t N)(Slice!(T*, N) slice) |{ | import core.stdc.stdlib: free; | slice._iterator.free; |} | |/// |version(mir_test) |unittest |{ | import mir.ndslice.topology: iota; | | auto i = iota(3, 4); | auto s = i.stdcSlice; | auto t = s.shape.stdcUninitSlice!size_t; | | t[] = s; | | assert(t == i); | | s.stdcFreeSlice; | t.stdcFreeSlice; |} | |/++ |Allocates an uninitialized aligned array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it. |Params: | lengths = list of lengths for each dimension | alignment = memory alignment (bytes) |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto stdcUninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system |{ | immutable len = lengths.lengthsProduct; | import mir.internal.memory: alignedAllocate; | auto arr = (cast(T*)alignedAllocate(len * T.sizeof, alignment))[0 .. len]; | return arr.sliced(lengths); |} | |/// |version(mir_test) |@system pure nothrow unittest |{ | auto tensor = stdcUninitAlignedSlice!double([5, 6, 7], 64); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | assert(cast(size_t)(tensor.ptr) % 64 == 0); | static assert(is(typeof(tensor) == Slice!(double*, 3))); | stdcFreeAlignedSlice(tensor); |} | |/++ |Frees aligned memory allocaged by CRuntime. |Params: | slice = n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcUninitSlice) |+/ |void stdcFreeAlignedSlice(T, size_t N)(Slice!(T*, N) slice) |{ | import mir.internal.memory: alignedFree; | slice._iterator.alignedFree; |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/ndslice/allocation.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-ndslice-field.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Field is a type with `opIndex()(ptrdiff_t index)` primitive. |An iterator can be created on top of a field using $(SUBREF iterator, FieldIterator). |An ndslice can be created on top of a field using $(SUBREF slice, slicedField). | |$(BOOKTABLE $(H2 Fields), |$(TR $(TH Field Name) $(TH Used By)) |$(T2 BitField, $(SUBREF topology, bitwise)) |$(T2 BitpackField, $(SUBREF topology, bitpack)) |$(T2 CycleField, $(SUBREF topology, cycle) (2 kinds)) |$(T2 LinspaceField, $(SUBREF topology, linspace)) |$(T2 MagicField, $(SUBREF topology, magic)) |$(T2 MapField, $(SUBREF topology, map) and $(SUBREF topology, mapField)) |$(T2 ndIotaField, $(SUBREF topology, ndiota)) |$(T2 OrthogonalReduceField, $(SUBREF topology, orthogonalReduceField)) |$(T2 RepeatField, $(SUBREF topology, repeat)) |$(T2 SparseField, Used for mutable DOK sparse matrixes ) |) | | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.field; | |import mir.internal.utility: Iota; |import mir.math.common: optmath; |import mir.ndslice.internal; |import mir.qualifier; | |@optmath: | |package template ZeroShiftField(T) |{ | static if (hasZeroShiftFieldMember!T) | alias ZeroShiftField = typeof(T.init.assumeFieldsHaveZeroShift()); | else | alias ZeroShiftField = T; |} | |package enum hasZeroShiftFieldMember(T) = __traits(hasMember, T, "assumeFieldsHaveZeroShift"); | |package auto applyAssumeZeroShift(Types...)() |{ | import mir.ndslice.topology; | string str; | foreach(i, T; Types) | static if (hasZeroShiftFieldMember!T) | str ~= "_fields[" ~ i.stringof ~ "].assumeFieldsHaveZeroShift, "; | else | str ~= "_fields[" ~ i.stringof ~ "], "; | return str; |} | |auto MapField__map(Field, alias fun, alias fun1)(ref MapField!(Field, fun) f) |{ | import mir.functional: pipe; | return MapField!(Field, pipe!(fun, fun1))(f._field); |} | | |/++ |`MapField` is used by $(SUBREF topology, map). |+/ |struct MapField(Field, alias _fun) |{ |@optmath: | /// | Field _field; | | /// | auto lightConst()() const @property | { | return MapField!(LightConstOf!Field, _fun)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return MapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field)); | } | | /++ | User defined constructor used by $(LREF mapField). | +/ | static alias __map(alias fun1) = MapField__map!(Field, _fun, fun1); | | auto ref opIndex(T...)(auto ref T index) | { | import mir.functional: RefTuple, unref; | static if (is(typeof(_field[index]) : RefTuple!K, K...)) | { | auto t = _field[index]; | return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_field[index]); | } | | static if (__traits(hasMember, Field, "length")) | auto length() const @property | { | return _field.length; | } | | static if (__traits(hasMember, Field, "shape")) | auto shape() const @property | { | return _field.shape; | } | | static if (__traits(hasMember, Field, "elementCount")) | auto elementCount() const @property | { | return _field.elementCount; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return _mapField!_fun(_field.assumeFieldsHaveZeroShift); | } |} | |/++ |`VmapField` is used by $(SUBREF topology, map). |+/ |struct VmapField(Field, Fun) |{ |@optmath: | /// | Field _field; | /// | Fun _fun; | | /// | auto lightConst()() const @property | { | return VmapField!(LightConstOf!Field, _fun)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return VmapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field)); | } | | auto ref opIndex(T...)(auto ref T index) | { | import mir.functional: RefTuple, unref; | static if (is(typeof(_field[index]) : RefTuple!K, K...)) | { | auto t = _field[index]; | return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_field[index]); | } | | static if (__traits(hasMember, Field, "length")) | auto length() const @property | { | return _field.length; | } | | static if (__traits(hasMember, Field, "shape")) | auto shape() const @property | { | return _field.shape; | } | | static if (__traits(hasMember, Field, "elementCount")) | auto elementCount()const @property | { | return _field.elementCount; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return _vmapField(_field.assumeFieldsHaveZeroShift, _fun); | } |} | |/+ |Creates a mapped field. Uses `__map` if possible. |+/ |auto _mapField(alias fun, Field)(Field field) |{ | import mir.functional: naryFun; | static if (( | __traits(isSame, fun, naryFun!"a|b") || | __traits(isSame, fun, naryFun!"a^b") || | __traits(isSame, fun, naryFun!"a&b") || | __traits(isSame, fun, naryFun!"a | b") || | __traits(isSame, fun, naryFun!"a ^ b") || | __traits(isSame, fun, naryFun!"a & b")) && | is(Field : ZipField!(BitField!(LeftField, I), BitField!(RightField, I)), LeftField, RightField, I)) | { | import mir.ndslice.topology: bitwiseField; | auto f = ZipField!(LeftField, RightField)(field._fields[0]._field, field._fields[1]._field)._mapField!fun; | return f.bitwiseField!(typeof(f), I); | } | else | static if (__traits(hasMember, Field, "__map")) | return Field.__map!fun(field); | else | return MapField!(Field, fun)(field); |} | |/+ |Creates a mapped field. Uses `__vmap` if possible. |+/ |auto _vmapField(Field, Fun)(Field field, Fun fun) |{ | static if (__traits(hasMember, Field, "__vmap")) | return Field.__vmap(field, fun); | else | return VmapField!(Field, Fun)(field, fun); |} | |/++ |Iterates multiple fields in lockstep. | |`ZipField` is used by $(SUBREF topology, zipFields). |+/ |struct ZipField(Fields...) | if (Fields.length > 1) |{ |@optmath: | import mir.functional: RefTuple, Ref, _ref; | import std.meta: anySatisfy; | | /// | Fields _fields; | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(LightConstOf, Fields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota)); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(LightImmutableOf, Fields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota)); | } | | auto opIndex()(ptrdiff_t index) | { | alias Iterators = Fields; | alias _iterators = _fields; | import mir.ndslice.iterator: _zip_types, _zip_index; | return mixin("RefTuple!(_zip_types!Fields)(" ~ _zip_index!Fields ~ ")"); | } | | auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index) | if (Types.length == Fields.length) | { | foreach(i, ref val; value.expand) | { | _fields[i][index] = val; | } | return opIndex(index); | } | | static if (anySatisfy!(hasZeroShiftFieldMember, Fields)) | /// Defined if at least one of `Fields` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(ZeroShiftField, Fields))(" ~ applyAssumeZeroShift!Fields ~ ")"); | } |} | |/++ |`RepeatField` is used by $(SUBREF topology, repeat). |+/ |struct RepeatField(T) |{ | import std.traits: Unqual; | |@optmath: | alias UT = Unqual!T; | | /// | UT _value; | | /// | auto lightConst()() const @property @trusted | { | return RepeatField!(const T)(cast(UT) _value); | } | | /// | auto lightImmutable()() immutable @property @trusted | { | return RepeatField!(immutable T)(cast(UT) _value); | } | | auto ref T opIndex()(ptrdiff_t) @trusted | { return cast(T) _value; } |} | |/++ |`BitField` is used by $(SUBREF topology, bitwise). |+/ |struct BitField(Field, I = typeof(cast()Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ |@optmath: | import mir.bitop: ctlz; | package(mir) alias E = I; | package(mir) enum shift = ctlz(I.sizeof) + 3; | | /// | Field _field; | | /// optimization for bitwise operations | auto __vmap(Fun : LeftOp!(op, bool), string op)(Fun fun) | if (op == "|" || op == "&" || op == "^") | { | import mir.ndslice.topology: bitwiseField; | return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField; | } | | /// ditto | auto __vmap(Fun : RightOp!(op, bool), string op)(Fun fun) | if (op == "|" || op == "&" || op == "^") | { | import mir.ndslice.topology: bitwiseField; | return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField; | } | | /// ditto | auto __vmap(Fun)(Fun fun) | { | return VmapField!(typeof(this), Fun)(this, fun); | } | | /// ditto | alias __map(alias fun) = BitField__map!(Field, I, fun); | | /// | auto lightConst()() const @property | { | return BitField!(LightConstOf!Field, I)(mir.qualifier.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return BitField!(LightImmutableOf!Field, I)(mir.qualifier.lightImmutable(_field)); | } | | bool opIndex()(size_t index) | { | import mir.bitop: bt; | return bt!(Field, I)(_field, index) != 0; | } | | bool opIndexAssign()(bool value, size_t index) | { | import mir.bitop: bta; | bta!(Field, I)(_field, index, value); | return value; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return BitField!(ZeroShiftField!Field, I)(_field.assumeFieldsHaveZeroShift); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.iterator: FieldIterator; | ushort[10] data; | auto f = FieldIterator!(BitField!(ushort*))(0, BitField!(ushort*)(data.ptr)); | f[123] = true; | f++; | assert(f[122]); |} | |auto BitField__map(Field, I, alias fun)(BitField!(Field, I) field) |{ | import mir.functional: naryFun; | static if (__traits(isSame, fun, naryFun!"~a")) | { | import mir.ndslice.topology: bitwiseField; | auto f = _mapField!fun(field._field); | return f.bitwiseField!(typeof(f), I); | } | else | { | return field; | } |} | |/++ |`BitpackField` is used by $(SUBREF topology, bitpack). |+/ |struct BitpackField(Field, uint pack, I = typeof(cast()Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ | //static assert(); |@optmath: | package(mir) alias E = I; | package(mir) enum mask = (I(1) << pack) - 1; | package(mir) enum bits = I.sizeof * 8; | | /// | Field _field; | | /// | auto lightConst()() const @property | { | return BitpackField!(LightConstOf!Field, pack)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return BitpackField!(LightImmutableOf!Field, pack)(.lightImmutable(_field)); | } | | I opIndex()(size_t index) | { | index *= pack; | size_t start = index % bits; | index /= bits; | auto ret = (_field[index] >>> start) & mask; | static if (bits % pack) | { | sizediff_t end = start - (bits - pack); | if (end > 0) | ret ^= cast(I)(_field[index + 1] << (bits - end)) >>> (bits - pack); | } | return cast(I) ret; | } | | I opIndexAssign()(I value, size_t index) | { | import std.traits: Unsigned; | assert(cast(Unsigned!I)value <= mask); | index *= pack; | size_t start = index % bits; | index /= bits; | _field[index] = cast(I)((_field[index] & ~(mask << start)) ^ (value << start)); | static if (bits % pack) | { | sizediff_t end = start - (bits - pack); | if (end > 0) | _field[index + 1] = cast(I)((_field[index + 1] & ~((I(1) << end) - 1)) ^ (value >>> (pack - end))); | } | return value; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return BitpackField!(ZeroShiftField!Field, pack, I)(_field.assumeFieldsHaveZeroShift); | } |} | |/// |unittest |{ | import mir.ndslice.iterator: FieldIterator; | ushort[10] data; | auto f = FieldIterator!(BitpackField!(ushort*, 6))(0, BitpackField!(ushort*, 6)(data.ptr)); | f[0] = cast(ushort) 31; | f[1] = cast(ushort) 13; | f[2] = cast(ushort) 8; | f[3] = cast(ushort) 43; | f[4] = cast(ushort) 28; | f[5] = cast(ushort) 63; | f[6] = cast(ushort) 39; | f[7] = cast(ushort) 23; | f[8] = cast(ushort) 44; | | assert(f[0] == 31); | assert(f[1] == 13); | assert(f[2] == 8); | assert(f[3] == 43); | assert(f[4] == 28); | assert(f[5] == 63); | assert(f[6] == 39); | assert(f[7] == 23); | assert(f[8] == 44); | assert(f[9] == 0); | assert(f[10] == 0); | assert(f[11] == 0); |} | |unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology; | import mir.ndslice.sorting; | uint[2] data; | auto packed = data[].sliced.bitpack!18; | assert(packed.length == 3); | packed[0] = 5; | packed[1] = 3; | packed[2] = 2; | packed.sort; | assert(packed[0] == 2); | assert(packed[1] == 3); | assert(packed[2] == 5); |} | |/// |struct OrthogonalReduceField(FieldsIterator, alias fun, T) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// non empty slice | | Slice!FieldsIterator _fields; | | /// | T _initialValue; | | /// | auto lightConst()() const @property | { | auto fields = _fields.lightConst; | return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue); | } | | /// | auto lightImmutable()() immutable @property | { | auto fields = _fields.lightImmutable; | return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue); | } | | /// `r = fun(r, fields[i][index]);` reduction by `i` | auto opIndex()(size_t index) | { | import std.traits: Unqual; | auto fields = _fields; | T r = _initialValue; | if (!fields.empty) do | { | r = cast(T) fun(r, fields.front[index]); | fields.popFront; | } | while(!fields.empty); | return r; | } |} | |/// |struct CycleField(Field) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// Cycle length | size_t _length; | /// | Field _field; | | /// | auto lightConst()() const @property | { | auto field = .lightConst(_field); | return CycleField!(typeof(field))(_length, field); | } | | /// | auto lightImmutable()() immutable @property | { | auto field = .lightImmutable(_field); | return CycleField!(typeof(field))(_length, field); | } | | /// | auto ref opIndex()(size_t index) | { | return _field[index % _length]; | } | | /// | static if (!__traits(compiles, &opIndex(size_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, size_t index) | { | return _field[index % _length] = value; | } | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return CycleField!(ZeroShiftField!Field)(_length, _field.assumeFieldsHaveZeroShift); | } |} | |/// |struct CycleField(Field, size_t length) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// Cycle length | enum _length = length; | /// | Field _field; | | /// | auto lightConst()() const @property | { | auto field = .lightConst(_field); | return CycleField!(typeof(field), _length)(field); | } | | /// | auto lightImmutable()() immutable @property | { | auto field = .lightImmutable(_field); | return CycleField!(typeof(field), _length)(field); | } | | /// | auto ref opIndex()(size_t index) | { | return _field[index % _length]; | } | | /// | static if (!__traits(compiles, &opIndex(size_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, size_t index) | { | return _field[index % _length] = value; | } | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return CycleField!(ZeroShiftField!Field, _length)(_field.assumeFieldsHaveZeroShift); | } |} | |/++ |`ndIotaField` is used by $(SUBREF topology, ndiota). |+/ |struct ndIotaField(size_t N) | if (N) |{ |@optmath: | /// | size_t[N - 1] _lengths; | | /// | auto lightConst()() const @property | { | return ndIotaField!N(_lengths); | } | | /// | auto lightImmutable()() const @property | { | return ndIotaField!N(_lengths); | } | | /// | size_t[N] opIndex()(size_t index) const | { | size_t[N] indexes; | foreach_reverse (i; Iota!(N - 1)) | { | indexes[i + 1] = index % _lengths[i]; | index /= _lengths[i]; | } | indexes[0] = index; | return indexes; | } |} | |/++ |`LinspaceField` is used by $(SUBREF topology, linspace). |+/ |struct LinspaceField(T) |{ | /// | size_t _length; | | /// | T _start = cast(T) 0, _stop = cast(T) 0; | | /// | auto lightConst()() scope const @property | { 0000000| return LinspaceField!T(_length, _start, _stop); | } | | /// | auto lightImmutable()() scope const @property | { | return LinspaceField!T(_length, _start, _stop); | } | | // no fastmath | /// | T opIndex()(sizediff_t index) scope const | { 0000000| sizediff_t d = _length - 1; 0000000| auto v = typeof(T.init.re)(d - index); 0000000| auto w = typeof(T.init.re)(index); 0000000| v /= d; 0000000| w /= d; 0000000| auto a = v * _start; 0000000| auto b = w * _stop; 0000000| return a + b; | } | |@optmath: | | /// | size_t length(size_t dimension = 0)() scope const @property | if (dimension == 0) | { 0000000| return _length; | } | | /// | size_t[1] shape()() scope const @property @nogc | { | return [_length]; | } |} | |/++ |Magic square field. |+/ |struct MagicField |{ |@optmath: |@safe pure nothrow @nogc: | | /++ | Magic Square size. | +/ | size_t _n; | |scope const: | | /// | MagicField lightConst()() @property | { 0000000| return this; | } | | /// | MagicField lightImmutable()() @property | { | return this; | } | | /// | size_t length(size_t dimension = 0)() @property | if(dimension <= 2) | { 0000000| return _n * _n; | } | | /// | size_t[1] shape() @property | { 0000000| return [_n * _n]; | } | | /// | size_t opIndex(size_t index) | { | pragma(inline, false); 0000000| auto d = index / _n; 0000000| auto m = index % _n; 0000000| if (_n & 1) | { | //d = _n - 1 - d; // MATLAB synchronization | //index = d * _n + m; // ditto 0000000| auto r = (index + 1 - d + (_n - 3) / 2) % _n; 0000000| auto c = (_n * _n - index + 2 * d) % _n; 0000000| return r * _n + c + 1; | } | else 0000000| if ((_n & 2) == 0) | { 0000000| auto a = (d + 1) & 2; 0000000| auto b = (m + 1) & 2; 0000000| return a != b ? index + 1: _n * _n - index; | } | else | { 0000000| auto n = _n / 2 ; 0000000| size_t shift; 0000000| ptrdiff_t q; 0000000| ptrdiff_t p = m - n; 0000000| if (p >= 0) | { 0000000| m = p; 0000000| shift = n * n; 0000000| auto mul = m <= n / 2 + 1; 0000000| q = d - n; 0000000| if (q >= 0) | { 0000000| d = q; 0000000| mul = !mul; | } 0000000| if (mul) | { 0000000| shift *= 2; | } | } | else | { 0000000| auto mul = m < n / 2; 0000000| q = d - n; 0000000| if (q >= 0) | { 0000000| d = q; 0000000| mul = !mul; | } 0000000| if (d == n / 2 && (m == 0 || m == n / 2)) | { 0000000| mul = !mul; | } 0000000| if (mul) | { 0000000| shift = n * n * 3; | } | } 0000000| index = d * n + m; 0000000| auto r = (index + 1 - d + (n - 3) / 2) % n; 0000000| auto c = (n * n - index + 2 * d) % n; 0000000| return r * n + c + 1 + shift; | } | } |} | |/++ |`SparseField` is used to represent Sparse ndarrays in mutable DOK format. |+/ |struct SparseField(T) |{ | /// | T[size_t] _table; | | /// | auto lightConst()() const @trusted | { | return SparseField!(const T)(cast(const(T)[size_t])_table); | } | | /// | auto lightImmutable()() immutable @trusted | { | return SparseField!(immutable T)(cast(immutable(T)[size_t])_table); | } | | /// | T opIndex()(size_t index) | { | import std.traits: isScalarType; | static if (isScalarType!T) | return _table.get(index, cast(T)0); | else | return _table.get(index, null); | } | | /// | T opIndexAssign()(T value, size_t index) | { | import std.traits: isScalarType; | static if (isScalarType!T) | { | if (value != 0) | _table[index] = value; | else | _table.remove(index); | } | else | { | if (value !is null) | _table[index] = value; | else | _table.remove(index); | } | return value; | } | | /// | T opIndexUnary(string op)(size_t index) | if (op == `++` || op == `--`) | { | import std.traits: isScalarType; | mixin (`auto value = ` ~ op ~ `_table[index];`); | static if (isScalarType!T) | { | if (value == 0) | _table.remove(index); | } | else | { | if (value is null) | _table.remove(index); | } | return value; | } | | /// | T opIndexOpAssign(string op)(T value, size_t index) | if (op == `+` || op == `-`) | { | import std.traits: isScalarType; | mixin (`value = _table[index] ` ~ op ~ `= value;`); // this works | static if (isScalarType!T) | { | if (value == 0) | _table.remove(index); | } | else | { | if (value is null) | _table.remove(index); | } | return value; | } |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/ndslice/field.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-ndslice-iterator.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Iterator is a type with a pointer like behavior. |An ndslice can be created on top of an iterator using $(SUBREF slice, sliced). | |$(BOOKTABLE $(H2 Iterators), |$(TR $(TH Iterator Name) $(TH Used By)) |$(T2 BytegroupIterator, $(SUBREF topology, bytegroup).) |$(T2 CachedIterator, $(SUBREF topology, cached), $(SUBREF topology, cachedGC).) |$(T2 ChopIterator, $(SUBREF topology, chopped)) |$(T2 FieldIterator, $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.) |$(T2 FlattenedIterator, $(SUBREF topology, flattened)) |$(T2 IndexIterator, $(SUBREF topology, indexed)) |$(T2 IotaIterator, $(SUBREF topology, iota)) |$(T2 MapIterator, $(SUBREF topology, map)) |$(T2 MemberIterator, $(SUBREF topology, member)) |$(T2 RetroIterator, $(SUBREF topology, retro)) |$(T2 SliceIterator, $(SUBREF topology, map) in composition with $(LREF MapIterator) for packed slices.) |$(T2 SlideIterator, $(SUBREF topology, diff), $(SUBREF topology, pairwise), and $(SUBREF topology, slide).) |$(T2 StairsIterator, $(SUBREF topology, stairs)) |$(T2 StrideIterator, $(SUBREF topology, stride)) |$(T2 SubSliceIterator, $(SUBREF topology, subSlices)) |$(T2 TripletIterator, $(SUBREF topology, triplets)) |$(T2 ZipIterator, $(SUBREF topology, zip)) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.iterator; | |import mir.internal.utility: Iota; |import mir.math.common: optmath; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.slice: SliceKind, Slice, Universal, Canonical, Contiguous, isSlice; |import mir.qualifier; |import mir.conv; |import std.traits; | |private static immutable assumeZeroShiftExceptionMsg = "*.assumeFieldsHaveZeroShift: shift is not zero!"; |version(D_Exceptions) | private static immutable assumeZeroShiftException = new Exception(assumeZeroShiftExceptionMsg); | |@optmath: | |enum std_ops = q{ | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin(op ~ "_iterator;"); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |}; | |/++ |Step counter. | |`IotaIterator` is used by $(SUBREF topology, iota). |+/ |struct IotaIterator(I) | if (isIntegral!I || isPointer!I) |{ |@optmath: | | /// | I _index; | | static if (isPointer!I) | /// | auto lightConst()() const @property | { | static if (isIntegral!I) | return IotaIterator!I(_index); | else | return IotaIterator!(LightConstOf!I)(_index); | } | | static if (isPointer!I) | /// | auto lightImmutable()() immutable @property | { | static if (isIntegral!I) | return IotaIterator!I(_index); | else | return IotaIterator!(LightImmutableOf!I)(_index); | } | | I opUnary(string op : "*")() 0000000| { return _index; } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin(op ~ `_index;`); } | | I opIndex()(ptrdiff_t index) scope const 0000000| { return cast(I)(_index + index); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == `+` || op == `-`) | { mixin(`_index ` ~ op ~ `= index;`); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(const typeof(this) right) scope const | { return cast(ptrdiff_t)(this._index - right._index); } | | bool opEquals()(const typeof(this) right) scope const 0000000| { return this._index == right._index; } | | auto opCmp()(const typeof(this) right) scope const 0000000| { return this._index - right._index; } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | assert(*iota == 0); | | // iteration | ++iota; | assert(*iota == 1); | | assert(iota[2] == 3); | assert(iota[-1] == 0); | | --iota; | assert(*iota == 0); | | // opBinary | assert(*(iota + 2) == 2); | assert(*(iota - 3) == -3); | assert((iota - 3) - iota == -3); | | // construction | assert(*IotaIterator!int(3) == 3); | assert(iota - 1 < iota); |} | |/// |pure nothrow @nogc version(mir_test) unittest |{ | int[32] data; | auto iota = IotaIterator!(int*)(data.ptr); | assert(*iota == data.ptr); | | // iteration | ++iota; | assert(*iota == 1 + data.ptr); | | assert(iota[2] == 3 + data.ptr); | assert(iota[-1] == 0 + data.ptr); | | --iota; | assert(*iota == 0 + data.ptr); | | // opBinary | assert(*(iota + 2) == 2 + data.ptr); | assert(*(iota - 3) == -3 + data.ptr); | assert((iota - 3) - iota == -3); | | // construction | assert(*IotaIterator!(int*)(data.ptr) == data.ptr); | assert(iota - 1 < iota); |} | |auto RetroIterator__map(Iterator, alias fun)(ref RetroIterator!Iterator it) |{ | auto iterator = it._iterator._mapIterator!fun; | return RetroIterator!(typeof(iterator))(iterator); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota(9).retro.map!(a => a).slice; | uint r; | auto w = iota(9).retro.map!(a => a).map!(a => a * r).slice; |} | |/++ |Reverse directions for an iterator. | |`RetroIterator` is used by $(SUBREF topology, retro). |+/ |struct RetroIterator(Iterator) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return RetroIterator!(LightConstOf!Iterator)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return RetroIterator!(LightImmutableOf!Iterator)(.lightImmutable(_iterator)); | } | | /// | static alias __map(alias fun) = RetroIterator__map!(Iterator, fun); | | auto ref opUnary(string op : "*")() | { return *_iterator; } | | void opUnary(string op : "--")() | { ++_iterator; } | | void opUnary(string op : "++")() | { --_iterator; } | | auto ref opIndex()(ptrdiff_t index) | { return _iterator[-index]; } | | void opOpAssign(string op : "-")(ptrdiff_t index) scope | { _iterator += index; } | | void opOpAssign(string op : "+")(ptrdiff_t index) scope | { _iterator -= index; } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return right._iterator - this._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return right._iterator == this._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return right._iterator - this._iterator; | else | return right._iterator.opCmp(this._iterator); | } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | RetroIterator!(IotaIterator!int) retro; | | ++iota; | --retro; | assert(*retro == *iota); | | --iota; | ++retro; | assert(*retro == *iota); | | assert(retro[-7] == iota[7]); | | iota += 100; | retro -= 100; | assert(*retro == *iota); | | iota -= 100; | retro += 100; | assert(*retro == *iota); | | assert(*(retro + 10) == *(iota - 10)); | | assert(retro - 1 < retro); | | assert((retro - 5) - retro == -5); | | iota = IotaIterator!int(3); | retro = RetroIterator!(IotaIterator!int)(iota); | assert(*retro == *iota); |} | |auto StrideIterator__map(Iterator, alias fun)(StrideIterator!Iterator it) |{ | auto iterator = it._iterator._mapIterator!fun; | return StrideIterator!(typeof(iterator))(it._stride, iterator); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota([3], 0, 3).map!(a => a).slice; | uint r; | auto w = iota([3], 0, 3).map!(a => a).map!(a => a * r).slice; |} | |/++ |Iterates an iterator with a fixed strides. | |`StrideIterator` is used by $(SUBREF topology, stride). |+/ |struct StrideIterator(Iterator) |{ |@optmath: | /// | ptrdiff_t _stride; | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return StrideIterator!(LightConstOf!Iterator)(_stride, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return StrideIterator!(LightImmutableOf!Iterator)(_stride, .lightImmutable(_iterator)); | } | | /// | static alias __map(alias fun) = StrideIterator__map!(Iterator, fun); | | auto ref opUnary(string op : "*")() | { return *_iterator; } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin("_iterator " ~ op[0] ~ "= _stride;"); } | | auto ref opIndex()(ptrdiff_t index) | { return _iterator[index * _stride]; } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index * _stride;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return (this._iterator - right._iterator) / _stride; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | ptrdiff_t ret = this._iterator - right._iterator; | else | ptrdiff_t ret = this._iterator.opCmp(right._iterator); | return _stride >= 0 ? ret : -ret; | } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | StrideIterator!(IotaIterator!int) stride; | stride._stride = -3; | | iota -= stride._stride; | --stride; | assert(*stride == *iota); | | iota += stride._stride; | ++stride; | assert(*stride == *iota); | | assert(stride[7] == iota[7 * stride._stride]); | | iota -= 100 * stride._stride; | stride -= 100; | assert(*stride == *iota); | | iota += 100 * stride._stride; | stride += 100; | assert(*stride == *iota); | | assert(*(stride + 10) == *(iota + 10 * stride._stride)); | | assert(stride - 1 < stride); | | assert((stride - 5) - stride == -5); | | iota = IotaIterator!int(3); | stride = StrideIterator!(IotaIterator!int)(3, iota); | assert(*stride == *iota); |} | |package template _zip_types(Iterators...) |{ | alias AliasSeq(T...) = T; | static if (Iterators.length) | { | enum i = Iterators.length - 1; | alias T = typeof(Iterators[i].init[sizediff_t.init]); | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | { | import mir.functional: Ref; | alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), Ref!T); | } | else | alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), T); | } | else | alias _zip_types = AliasSeq!(); |} | |package template _zip_fronts(Iterators...) |{ | static if (Iterators.length) | { | enum i = Iterators.length - 1; | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "_ref(*_iterators[" ~ i.stringof ~ "]), "; | else | enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "*_iterators[" ~ i.stringof ~ "], "; | } | else | enum _zip_fronts = ""; |} | |package template _zip_index(Iterators...) |{ | static if (Iterators.length) | { | enum i = Iterators.length - 1; | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_ref(_iterators[" ~ i.stringof ~ "][index]), "; | else | enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_iterators[" ~ i.stringof ~ "][index], "; | } | else | enum _zip_index = ""; |} | |/++ |Iterates multiple iterators in lockstep. | |`ZipIterator` is used by $(SUBREF topology, zip). |+/ |struct ZipIterator(Iterators...) | if (Iterators.length > 1) |{ |@optmath: | import std.traits: ConstOf, ImmutableOf; | import std.meta: staticMap; | import mir.functional: RefTuple, Ref, _ref; | /// | Iterators _iterators; | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | alias Ret = ZipIterator!(staticMap!(LightConstOf, Iterators)); | enum ret = "Ret(%(.lightConst(_iterators[%s]),%)]))".format(_iterators.length.iota); | return mixin(ret); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | alias Ret = ZipIterator!(staticMap!(LightImmutableOf, Iterators)); | enum ret = "Ret(%(.lightImmutable(_iterators[%s]),%)]))".format(_iterators.length.iota); | return mixin(ret); | } | | auto opUnary(string op : "*")() | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | | auto opUnary(string op : "*")() const | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | auto opUnary(string op : "*")() immutable | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | void opUnary(string op)() scope | if (op == "++" || op == "--") | { | foreach (ref _iterator; _iterators) | mixin(op ~ `_iterator;`); | } | | auto opIndex()(ptrdiff_t index) | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_index!Iterators ~ ")"); } | | auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index) | if (Types.length == Iterators.length) | { | foreach(i, ref val; value.expand) | { | _iterators[i][index] = val; | } | return opIndex(index); | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "+" || op == "-") | { | foreach (ref _iterator; _iterators) | mixin(`_iterator ` ~ op ~ `= index;`); | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterators[0] - right._iterators[0]; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterators[0] == right._iterators[0]; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!(Iterators[0])) | return this._iterators[0] - right._iterators[0]; | else | return this._iterators[0].opCmp(right._iterators[0]); | } | | import std.meta: anySatisfy; | static if (anySatisfy!(hasZeroShiftFieldMember, Iterators)) | /// Defined if at least one of `Iterators` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | import std.meta: staticMap; | alias _fields = _iterators; | return mixin("ZipField!(staticMap!(ZeroShiftField, Iterators))(" ~ applyAssumeZeroShift!Iterators ~ ")"); | } |} | |/// |pure nothrow @nogc version(mir_test) unittest |{ | import mir.ndslice.traits: isIterator; | | double[10] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; | alias ItA = IotaIterator!int; | alias ItB = double*; | alias ItZ = ZipIterator!(ItA, ItB); | auto zip = ItZ(ItA(3), data.ptr); | assert((*zip).a == 3); | assert((*zip).b == 1); | | // iteration | ++zip; | assert((*zip).a == 3 + 1); | assert((*zip).b == 1 + 1); | assert(&(*zip).b() == data.ptr + 1); | | assert(zip[4].a == 3 + 5); | assert(zip[4].b == 1 + 5); | assert(&zip[4].b() == data.ptr + 5); | | --zip; | assert((*zip).a == 3); | assert((*zip).b == 1); | | assert((*(zip + 2)).a == 3 + 2); | assert((*(zip - 3)).a == 3 + -3); | assert((*(zip + 2)).b == 1 + 2); | assert((*(zip + 3 - 3)).b == 1); | assert((zip - 3).opBinary!"-"(zip) == -3); | | assert(zip == zip); | assert(zip - 1 < zip); | | static assert(isIterator!(ZipIterator!(double*, int*))); | static assert(isIterator!(ZipIterator!(immutable(double)*, immutable(int)*))); |} | |/// |struct CachedIterator(Iterator, CacheIterator, FlagIterator) |{ | /// | Iterator _iterator; | /// | CacheIterator _caches; | /// | FlagIterator _flags; | |@optmath: | | /// | auto lightScope()() scope @property | { | return CachedIterator!(LightScopeOf!Iterator, LightScopeOf!CacheIterator, LightScopeOf!FlagIterator)( | .lightScope(_iterator), | .lightScope(_caches), | .lightScope(_flags), | ); | } | | /// | auto lightScope()() scope const @property | { | return lightConst.lightScope; | } | | /// | auto lightScope()() scope immutable @property | { | return lightImmutable.lightScope; | } | | /// | auto lightConst()() const @property | { | return CachedIterator!(LightConstOf!Iterator, CacheIterator, FlagIterator)( | .lightConst(_iterator), | *cast(CacheIterator*)&_caches, | *cast(FlagIterator*)&_flags, | ); | } | | /// | auto lightImmutable()() immutable @property @trusted | { | return CachedIterator!(LightImmutableOf!Iterator, CacheIterator, FlagIterator)( | .lightImmutable(_iterator), | *cast(CacheIterator*)&_caches, | *cast(FlagIterator*)&_flags, | ); | } | | private alias T = typeof(Iterator.init[0]); | private alias UT = Unqual!T; | | auto opUnary(string op : "*")() | { | if (_expect(!*_flags, false)) | { | _flags[0] = true; | emplaceRef!T(*cast(UT*)&*_caches, *_iterator); | } | return *_caches; | } | | auto opIndex()(ptrdiff_t index) | { | if (_expect(!_flags[index], false)) | { | _flags[index] = true; | emplaceRef!T(*cast(UT*)&(_caches[index]), _iterator[index]); | } | return _caches[index]; | } | | auto ref opIndexAssign(T)(auto ref T val, ptrdiff_t index) | { | _flags[index] = true; | return _caches[index] = val; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | mixin(op ~ "_iterator;"); | mixin(op ~ "_caches;"); | mixin(op ~ "_flags;"); | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { | mixin("_iterator" ~ op ~ "= index;"); | mixin("_caches" ~ op ~ "= index;"); | mixin("_flags" ~ op ~ "= index;"); | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |} | |private enum map_primitives = q{ | | import mir.functional: RefTuple, unref; | | auto ref opUnary(string op : "*")() | { | static if (is(typeof(*_iterator) : RefTuple!T, T...)) | { | auto t = *_iterator; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(*_iterator); | } | | auto ref opIndex(ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_iterator[index]); | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value"); | } | else | return _fun(_iterator[index]) = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return mixin(op ~ "_fun(_iterator[index])"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value"); | } | else | return mixin("_fun(_iterator[index])" ~ op ~ "= value"); | } | } |}; | |/++ |`VmapIterator` is used by $(SUBREF topology, map). |+/ |struct VmapIterator(Iterator, Fun) |{ |@optmath: | | /// | Iterator _iterator; | /// | Fun _fun; | | /// | auto lightConst()() const @property | { | return VmapIterator!(LightConstOf!Iterator, LightConstOf!Fun)(.lightConst(_iterator), .lightConst(_fun)); | } | | /// | auto lightImmutable()() immutable @property | { | return VmapIterator!(LightImmutableOf!Iterator, LightImmutableOf!Fun)(.lightImmutable(_iterator), .lightImmutable(_fun)); | } | | mixin(map_primitives); | mixin(std_ops); | | static if (hasZeroShiftFieldMember!Iterator) | /// | auto assumeFieldsHaveZeroShift() @property | { | return _vmapField(_iterator.assumeFieldsHaveZeroShift, _fun); | } |} | |auto MapIterator__map(Iterator, alias fun0, alias fun)(ref MapIterator!(Iterator, fun0) it) |{ | return MapIterator!(Iterator, fun)(it._iterator); |} | |/++ |`MapIterator` is used by $(SUBREF topology, map). |+/ |struct MapIterator(Iterator, alias _fun) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return MapIterator!(LightConstOf!Iterator, _fun)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return MapIterator!(LightImmutableOf!Iterator, _fun)(.lightImmutable(_iterator)); | } | | import mir.functional: pipe; | /// | static alias __map(alias fun1) = MapIterator__map!(Iterator, _fun, pipe!(_fun, fun1)); | | mixin(map_primitives); | mixin(std_ops); | | static if (hasZeroShiftFieldMember!Iterator) | /// | auto assumeFieldsHaveZeroShift() @property | { | return _mapField!_fun(_iterator.assumeFieldsHaveZeroShift); | } |} | |/+ |Creates a mapped iterator. Uses `__map` if possible. |+/ |auto _mapIterator(alias fun, Iterator)(Iterator iterator) |{ | static if (__traits(hasMember, Iterator, "__map")) | { | static if (is(Iterator : MapIterator!(Iter0, fun0), Iter0, alias fun0) | && !__traits(compiles, Iterator.__map!fun(iterator))) | { | // https://github.com/libmir/mir-algorithm/issues/111 | debug(mir) pragma(msg, __FUNCTION__~" not coalescing chained map calls into a single lambda, possibly because of multiple embedded context pointers"); | return MapIterator!(Iterator, fun)(iterator); | } | else | return Iterator.__map!fun(iterator); | } | else | return MapIterator!(Iterator, fun)(iterator); |} | | |/+ |Creates a mapped iterator. Uses `__vmap` if possible. |+/ |auto _vmapIterator(Iterator, Fun)(Iterator iterator, Fun fun) |{ | static if (__traits(hasMember, Iterator, "__vmap")) | return Iterator.__vmap(iterator, fun); | else | return MapIterator!(Iterator, fun)(iterator); |} | |@safe pure nothrow @nogc version(mir_test) unittest |{ | // https://github.com/libmir/mir-algorithm/issues/111 | import mir.ndslice.topology : iota, map; | import mir.functional : pipe; | | static auto foo(T)(T x) | { | return x.map!(a => a + 1); | } | | static auto bar(T)(T x) | { | return foo(x).map!(a => a + 2); | } | | auto data = iota(5); | auto result = iota([5], 3); | | auto x = data.map!(a => a + 1).map!(a => a + 2); | assert(x == result); | | auto y = bar(data); | assert(y == result); |} | |/++ |`MemberIterator` is used by $(SUBREF topology, member). |+/ |struct MemberIterator(Iterator, string member) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return MemberIterator!(LightConstOf!Iterator, member)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return MemberIterator!(LightImmutableOf!Iterator, member)(.lightImmutable(_iterator)); | } | | auto ref opUnary(string op : "*")() | { | return __traits(getMember, *_iterator, member); | } | | auto ref opIndex()(ptrdiff_t index) | { | return __traits(getMember, _iterator[index], member); | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | return __traits(getMember, _iterator[index], member) = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | return mixin(op ~ "__traits(getMember, _iterator[index], member)"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | return mixin("__traits(getMember, _iterator[index], member)" ~ op ~ "= value"); | } | } | | mixin(std_ops); |} | |/++ |`BytegroupIterator` is used by $(SUBREF topology, Bytegroup) and $(SUBREF topology, bytegroup). |+/ |struct BytegroupIterator(Iterator, size_t count, DestinationType) | if (count) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return BytegroupIterator!(LightConstOf!Iterator, count, DestinationType)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return BytegroupIterator!(LightImmutableOf!Iterator, count, DestinationType)(.lightImmutable(_iterator)); | } | | package(mir) alias Byte = Unqual!(typeof(_iterator[0])); | | version(LittleEndian) | private enum BE = false; | else | private enum BE = true; | | private union U | { | DestinationType value; | static if (DestinationType.sizeof > Byte[count].sizeof && BE && isScalarType!DestinationType) | { | struct | { | ubyte[DestinationType.sizeof - Byte[count].sizeof] shiftPayload; | Byte[count] bytes; | } | } | else | { | Byte[count] bytes; | } | } | | DestinationType opUnary(string op : "*")() | { | U ret = { value: DestinationType.init }; | foreach (i; Iota!count) | ret.bytes[i] = _iterator[i]; | return ret.value; | } | | DestinationType opIndex()(ptrdiff_t index) | { | return *(this + index); | } | | DestinationType opIndexAssign(T)(T val, ptrdiff_t index) scope | { | auto it = this + index; | U ret = { value: val }; | foreach (i; Iota!count) | it._iterator[i] = ret.bytes[i]; | return ret.value; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin("_iterator " ~ op[0] ~ "= count;"); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index * count;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return (this._iterator - right._iterator) / count; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |} | |auto SlideIterator__map(Iterator, size_t params, alias fun0, alias fun)(SlideIterator!(Iterator, params, fun0) it) |{ | return SlideIterator!(Iterator, params, fun)(it._iterator); |} | |/++ |`SlideIterator` is used by $(SUBREF topology, diff) and $(SUBREF topology, slide). |+/ |struct SlideIterator(Iterator, size_t params, alias fun) | if (params > 1) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return SlideIterator!(LightConstOf!Iterator, params, fun)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return SlideIterator!(LightImmutableOf!Iterator, params, fun)(.lightImmutable(_iterator)); | } | | import mir.functional: pipe; | /// | static alias __map(alias fun1) = SlideIterator__map!(Iterator, params, fun, pipe!(fun, fun1)); | | auto ref opUnary(string op : "*")() | { | return mixin("fun(" ~ _iotaArgs!(params, "_iterator[", "], ") ~ ")"); | } | | auto ref opIndex()(ptrdiff_t index) | { | return mixin("fun(" ~ _iotaArgs!(params, "_iterator[index + ", "], ") ~ ")"); | } | | mixin(std_ops); |} | |/// |version(mir_test) unittest |{ | import mir.functional: naryFun; | auto data = [1, 3, 8, 18]; | auto diff = SlideIterator!(int*, 2, naryFun!"b - a")(data.ptr); | assert(*diff == 2); | assert(diff[1] == 5); | assert(diff[2] == 10); |} | |auto IndexIterator__map(Iterator, Field, alias fun)(ref IndexIterator!(Iterator, Field) it) |{ | auto field = it._field._mapField!fun; | return IndexIterator!(Iterator, typeof(field))(it._iterator, field); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto indices = [4, 3, 1, 2, 0, 4].sliced; | auto v = iota(5).indexed(indices).map!(a => a).slice; | uint r; | auto w = iota(5).indexed(indices).map!(a => a).map!(a => a * r).slice; |} | |/++ |Iterates a field using an iterator. | |`IndexIterator` is used by $(SUBREF topology, indexed). |+/ |struct IndexIterator(Iterator, Field) |{ | import mir.functional: RefTuple, unref; | |@optmath: | /// | Iterator _iterator; | /// | Field _field; | | /// | auto lightConst()() const @property | { | return IndexIterator!(LightConstOf!Iterator, LightConstOf!Field)(.lightConst(_iterator), .lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return IndexIterator!(LightImmutableOf!Iterator, LightImmutableOf!Field)(.lightImmutable(_iterator), _field.lightImmutable); | } | | /// | static alias __map(alias fun) = IndexIterator__map!(Iterator, Field, fun); | | auto ref opUnary(string op : "*")() | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = *_iterator; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return _field[*_iterator]; | } | | auto ref opIndex()(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return _field[_iterator[index]]; | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "] = value"); | } | else | return _field[_iterator[index]] = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin(op ~ "_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return mixin(op ~ "_field[_iterator[index]]"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]" ~ op ~ "= value"); | } | else | return mixin("_field[_iterator[index]]" ~ op ~ "= value"); | } | } | | mixin(std_ops); |} | |/++ |Iterates chunks in a sliceable using an iterator composed of indexes. | |Definition: |---- |auto index = iterator[i]; |auto elem = sliceable[index[0] .. index[1]]; |---- |+/ |struct SubSliceIterator(Iterator, Sliceable) |{ |@optmath: | /// | Iterator _iterator; | /// | Sliceable _sliceable; | | /// | auto lightConst()() const @property | { | return SubSliceIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return SubSliceIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable); | } | | auto ref opUnary(string op : "*")() | { | auto i = *_iterator; | return _sliceable[i[0] .. i[1]]; | } | | auto ref opIndex()(ptrdiff_t index) | { | auto i = _iterator[index]; | return _sliceable[i[0] .. i[1]]; | } | | mixin(std_ops); |} | |/++ |Iterates chunks in a sliceable using an iterator composed of indexes stored consequently. | |Definition: |---- |auto elem = _sliceable[_iterator[index] .. _iterator[index + 1]]; |---- |+/ |struct ChopIterator(Iterator, Sliceable) |{ |@optmath: | /// | Iterator _iterator; | /// | Sliceable _sliceable; | | /// | auto lightConst()() const @property | { | return ChopIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return ChopIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable); | } | | auto ref opUnary(string op : "*")() | { | return _sliceable[*_iterator .. _iterator[1]]; | } | | auto ref opIndex()(ptrdiff_t index) | { | return _sliceable[_iterator[index] .. _iterator[index + 1]]; | } | | mixin(std_ops); |} | |/++ |Iterates on top of another iterator and returns a slice |as a multidimensional window at the current position. | |`SliceIterator` is used by $(SUBREF topology, map) for packed slices. |+/ |struct SliceIterator(Iterator, size_t N = 1, SliceKind kind = Contiguous) |{ |@optmath: | /// | alias Element = Slice!(Iterator, N, kind); | /// | Element._Structure _structure; | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return SliceIterator!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return SliceIterator!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator)); | } | | auto opUnary(string op : "*")() | { | return Element(_structure, _iterator); | } | | auto opIndex()(ptrdiff_t index) | { | return Element(_structure, _iterator + index); | } | | mixin(std_ops); |} | |public auto FieldIterator__map(Field, alias fun)(FieldIterator!(Field) it) |{ | import mir.ndslice.field: _mapField; | auto field = it._field._mapField!fun; | return FieldIterator!(typeof(field))(it._index, field); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = ndiota(3, 3).map!(a => a).slice; | uint r; | auto w = ndiota(3, 3).map!(a => a).map!(a => a[0] * r).slice; |} | |/++ |Creates an iterator on top of a field. | |`FieldIterator` is used by $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others. |+/ |struct FieldIterator(Field) |{ |@optmath: | /// | ptrdiff_t _index; | /// | Field _field; | | /// | auto lightConst()() const @property | { 0000000| return FieldIterator!(LightConstOf!Field)(_index, .lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return FieldIterator!(LightImmutableOf!Field)(_index, .lightImmutable(_field)); | } | | /// | static alias __map(alias fun) = FieldIterator__map!(Field, fun); | | /// | Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) scope const | { | assert(i <= j); | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /++ | Returns: | `_field[_index + sl.i .. _index + sl.j]`. | +/ | auto opIndex()(Slice!(IotaIterator!size_t) sl) | { | auto idx = _index + sl._iterator._index; | return _field[idx .. idx + sl.length]; | } | | auto ref opUnary(string op : "*")() 0000000| { return _field[_index]; } | | void opUnary(string op)() scope | if (op == "++" || op == "--") | { mixin(op ~ `_index;`); } | | auto ref opIndex()(ptrdiff_t index) 0000000| { return _field[_index + index]; } | | static if (!__traits(compiles, &_field[_index])) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) | { return _field[_index + index] = value; } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { mixin (`return ` ~ op ~ `_field[_index + index];`); } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { mixin (`return _field[_index + index] ` ~ op ~ `= value;`); } | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "+" || op == "-") | { mixin(`_index ` ~ op ~ `= index;`); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._index - right._index; } | | bool opEquals()(scope ref const typeof(this) right) scope const 0000000| { return this._index == right._index; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const 0000000| { return this._index - right._index; } | | /// | auto assumeFieldsHaveZeroShift() @property | { 0000000| if (_expect(_index != 0, false)) | { | version (D_Exceptions) 0000000| throw assumeZeroShiftException; | else | assert(0, assumeZeroShiftExceptionMsg); | } | static if (hasZeroShiftFieldMember!Field) | return _field.assumeFieldsHaveZeroShift; | else 0000000| return _field; | } |} | |auto FlattenedIterator__map(Iterator, size_t N, SliceKind kind, alias fun)(FlattenedIterator!(Iterator, N, kind) it) |{ | import mir.ndslice.topology: map; | auto slice = it._slice.map!fun; | return FlattenedIterator!(TemplateArgsOf!(typeof(slice)))(it._indexes, slice); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota(3, 3).universal.flattened.map!(a => a).slice; | uint r; | auto w = iota(3, 3).universal.flattened.map!(a => a).map!(a => a * r).slice; |} | |/++ |Creates an iterator on top of all elements in a slice. | |`FieldIterator` is used by $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others. |+/ |struct FlattenedIterator(Iterator, size_t N, SliceKind kind) | if (N > 1 && (kind == Universal || kind == Canonical)) |{ |@optmath: | /// | ptrdiff_t[N] _indexes; | /// | Slice!(Iterator, N, kind) _slice; | | /// | auto lightConst()() const @property | { | return FlattenedIterator!(LightConstOf!Iterator, N, kind)(_indexes, _slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return FlattenedIterator!(LightImmutableOf!Iterator, N, kind)(_indexes, _slice.lightImmutable); | } | | /// | static alias __map(alias fun) = FlattenedIterator__map!(Iterator, N, kind, fun); | | private ptrdiff_t getShift()(ptrdiff_t n) | { | ptrdiff_t _shift; | n += _indexes[$ - 1]; | foreach_reverse (i; Iota!(1, N)) | { | immutable v = n / ptrdiff_t(_slice._lengths[i]); | n %= ptrdiff_t(_slice._lengths[i]); | static if (i == _slice.S) | _shift += (n - _indexes[i]); | else | _shift += (n - _indexes[i]) * _slice._strides[i]; | n = _indexes[i - 1] + v; | } | _shift += (n - _indexes[0]) * _slice._strides[0]; | return _shift; | } | | auto ref opUnary(string op : "*")() | { | return *_slice._iterator; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | foreach_reverse (i; Iota!N) | { | static if (i == _slice.S) | mixin(op ~ `_slice._iterator;`); | else | mixin(`_slice._iterator ` ~ op[0] ~ `= _slice._strides[i];`); | mixin (op ~ `_indexes[i];`); | static if (i) | { | static if (op == "++") | { | if (_indexes[i] < _slice._lengths[i]) | return; | static if (i == _slice.S) | _slice._iterator -= _slice._lengths[i]; | else | _slice._iterator -= _slice._lengths[i] * _slice._strides[i]; | _indexes[i] = 0; | } | else | { | if (_indexes[i] >= 0) | return; | static if (i == _slice.S) | _slice._iterator += _slice._lengths[i]; | else | _slice._iterator += _slice._lengths[i] * _slice._strides[i]; | _indexes[i] = _slice._lengths[i] - 1; | } | } | } | } | | auto ref opIndex()(ptrdiff_t index) | { | return _slice._iterator[getShift(index)]; | } | | static if (isMutable!(_slice.DeepElement) && !_slice.hasAccessByRef) | /// | auto ref opIndexAssign(E)(scope ref E elem, size_t index) scope return | { | return _slice._iterator[getShift(index)] = elem; | } | | void opOpAssign(string op : "+")(ptrdiff_t n) scope | { | ptrdiff_t _shift; | n += _indexes[$ - 1]; | foreach_reverse (i; Iota!(1, N)) | { | immutable v = n / ptrdiff_t(_slice._lengths[i]); | n %= ptrdiff_t(_slice._lengths[i]); | static if (i == _slice.S) | _shift += (n - _indexes[i]); | else | _shift += (n - _indexes[i]) * _slice._strides[i]; | _indexes[i] = n; | n = _indexes[i - 1] + v; | } | _shift += (n - _indexes[0]) * _slice._strides[0]; | _indexes[0] = n; | foreach_reverse (i; Iota!(1, N)) | { | if (_indexes[i] >= 0) | break; | _indexes[i] += _slice._lengths[i]; | _indexes[i - 1]--; | } | _slice._iterator += _shift; | } | | void opOpAssign(string op : "-")(ptrdiff_t n) scope | { this += -n; } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { | ptrdiff_t ret = this._indexes[0] - right._indexes[0]; | foreach (i; Iota!(1, N)) | { | ret *= _slice._lengths[i]; | ret += this._indexes[i] - right._indexes[i]; | } | return ret; | } | | bool opEquals()(scope ref const typeof(this) right) scope const | { | foreach_reverse (i; Iota!N) | if (this._indexes[i] != right._indexes[i]) | return false; | return true; | } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | foreach (i; Iota!(N - 1)) | if (auto ret = this._indexes[i] - right._indexes[i]) | return ret; | return this._indexes[$ - 1] - right._indexes[$ - 1]; | } |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.slice; | | auto it0 = iota(3, 4).universal.flattened._iterator; | auto it1 = it0; | assert(it0 == it1); | it0 += 5; | assert(it0 > it1); | it0 -= 5; | assert(*it0 == *it1); | assert(it0 == it1); | it0 += 5; | it0 += 7; | it0 -= 9; | assert(it0 > it1); | it1 += 3; | assert(*it0 == *it1); | assert(it0 == it1); | assert(it0 <= it1); | assert(it0 >= it1); | | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | | assert(it0 - it1 == 9); | assert(it1 - it0 == -9); | | ++it0; | | assert(it0 - it1 == 10); | assert(it1 - it0 == -10); | | --it0; | | assert(it0 - it1 == 9); | assert(it1 - it0 == -9); | assert(it0[-9] == *it1); | assert(*it0 == it1[9]); | | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | assert(*it0 == *it1); | assert(it0 == it1); | assert(it0 <= it1); | assert(it0 >= it1); |} | |/++ |`StairsIterator` is used by $(SUBREF topology, stairs). |+/ |struct StairsIterator(Iterator, string direction) | if (direction == "+" || direction == "-") |{ | /// | size_t _length; | | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return StairsIterator!(LightConstOf!Iterator, direction)(_length, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return StairsIterator!(LightImmutableOf!Iterator, direction)(_length, .lightImmutable(_iterator)); | } | |@optmath: | | /// | Slice!Iterator opUnary(string op : "*")() | { | import mir.ndslice.slice: sliced; | return _iterator.sliced(_length); | } | | /// | Slice!Iterator opIndex()(ptrdiff_t index) | { | import mir.ndslice.slice: sliced; | static if (direction == "+") | { | auto newLength = _length + index; | auto shift = ptrdiff_t(_length + newLength - 1) * index / 2; | } | else | { | auto newLength = _length - index; | auto shift = ptrdiff_t(_length + newLength + 1) * index / 2; | } | assert(ptrdiff_t(newLength) >= 0); | return (_iterator + shift).sliced(newLength); | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | static if (op == "++") | { | _iterator += _length; | static if (direction == "+") | ++_length; | else | --_length; | } | else | { | assert(_length); | static if (direction == "+") | --_length; | else | ++_length; | _iterator -= _length; | } | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { | static if (op == direction) | auto newLength = _length + index; | else | auto newLength = _length - index; | static if (direction == "+") | auto shift = ptrdiff_t(_length + newLength - 1) * index / 2; | else | auto shift = ptrdiff_t(_length + newLength + 1) * index / 2; | assert(ptrdiff_t(newLength) >= 0); | _length = newLength; | static if (op == "+") | _iterator += shift; | else | _iterator -= shift; | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { | static if (direction == "+") | return this._length - right._length; | else | return right._length - this._length; | } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._length == right._length; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { return this - right; } |} | |/// |version(mir_test) unittest |{ | // 0 | // 1 2 | // 3 4 5 | // 6 7 8 9 | // 10 11 12 13 14 | auto it = StairsIterator!(IotaIterator!size_t, "+")(1, IotaIterator!size_t()); | assert(*it == [0]); | assert(it[4] == [10, 11, 12, 13, 14]); | assert(*(it + 4) == [10, 11, 12, 13, 14]); | ++it; | assert(*it == [1, 2]); | it += 3; | assert(*it == [10, 11, 12, 13, 14]); | assert(it[-3] == [1, 2]); | assert(*(it - 3) == [1, 2]); | assert(it + 1 > it); | assert(it + 1 - 1 == it); | assert(it - 3 - it == -3); | --it; | assert(*it == [6, 7, 8, 9]); |} | |/// |version(mir_test) unittest |{ | // [0, 1, 2, 3, 4], | // [5, 6, 7, 8], | // [9, 10, 11], | // [12, 13], | // [14]]); | | auto it = StairsIterator!(IotaIterator!size_t, "-")(5, IotaIterator!size_t()); | assert(*it == [0, 1, 2, 3, 4]); | assert(it[4] == [14]); | assert(*(it + 4) == [14]); | ++it; | assert(*it == [5, 6, 7, 8]); | it += 3; | assert(*it == [14]); | assert(it[-3] == [5, 6, 7, 8]); | assert(*(it - 3) == [5, 6, 7, 8]); | assert(it + 1 > it); | assert(it + 1 - 1 == it); | assert(it - 3 - it == -3); | --it; | assert(*it == [12, 13]); |} | |/++ |Element type of $(LREF TripletIterator). |+/ |struct Triplet(Iterator, SliceKind kind = Contiguous) |{ |@optmath: | /// | size_t _iterator; | /// | Slice!(Iterator, 1, kind) _slice; | | /// | auto lightConst()() const @property | { | return Triplet!(LightConstOf!Iterator, kind)(_iterator, slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return Triplet!(LightImmutableOf!Iterator, kind)(_iterator, slice.lightImmutable); | } | | @property | { | /// | auto ref center() | { | assert(_iterator < _slice.length); | return _slice[_iterator]; | } | | /// | Slice!(Iterator, 1, kind) left() | { | assert(_iterator < _slice.length); | return _slice[0 .. _iterator]; | } | | /// | Slice!(Iterator, 1, kind) right() | { | assert(_iterator < _slice.length); | return _slice[_iterator + 1 .. $]; | } | } |} | |/++ |Iterates triplets position in a slice. | |`TripletIterator` is used by $(SUBREF topology, triplets). |+/ |struct TripletIterator(Iterator, SliceKind kind = Contiguous) |{ |@optmath: | | /// | size_t _iterator; | /// | Slice!(Iterator, 1, kind) _slice; | | /// | auto lightConst()() const @property | { | return TripletIterator!(LightConstOf!Iterator, kind)(_iterator, _slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return TripletIterator!(LightImmutableOf!Iterator, kind)(_iterator, _slice.lightImmutable); | } | | /// | Triplet!(Iterator, kind) opUnary(string op : "*")() | { | return typeof(return)(_iterator, _slice); | } | | /// | Triplet!(Iterator, kind) opIndex()(ptrdiff_t index) | { | return typeof(return)(_iterator + index, _slice); | } | | mixin(std_ops); |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/ndslice/iterator.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-ndslice-slice.lst |/++ |This is a submodule of $(MREF mir, ndslice). | |Safety_note: | User-defined iterators $(RED must) care about their safety except bounds checks. | Bounds are checked in ndslice code. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |$(BOOKTABLE $(H2 Definitions), |$(TR $(TH Name) $(TH Description)) |$(T2 Slice, N-dimensional slice.) |$(T2 SliceKind, SliceKind of $(LREF Slice) enumeration.) |$(T2 Universal, Alias for $(LREF .SliceKind.universal).) |$(T2 Canonical, Alias for $(LREF .SliceKind.canonical).) |$(T2 Contiguous, Alias for $(LREF .SliceKind.contiguous).) |$(T2 sliced, Creates a slice on top of an iterator, a pointer, or an array's pointer.) |$(T2 slicedField, Creates a slice on top of a field, a random access range, or an array.) |$(T2 slicedNdField, Creates a slice on top of an ndField.) |$(T2 kindOf, Extracts $(LREF SliceKind).) |$(T2 isSlice, Extracts dimension count from a type. Extracts `null` if the template argument is not a `Slice`.) |$(T2 Structure, A tuple of lengths and strides.) |) | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4)) |STD = $(TD $(SMALL $0)) |+/ |module mir.ndslice.slice; | |import mir.internal.utility : Iota; |import mir.math.common : optmath; |import mir.ndslice.concatenation; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.iterator; |import mir.ndslice.traits: isIterator; |import mir.primitives; |import mir.qualifier; |import mir.utility; |import std.meta; |import std.traits; | |public import mir.primitives: DeepElementType; | |@optmath: | |/++ |Checks if type T has asSlice property and its returns a slices. |Aliases itself to a dimension count |+/ |template hasAsSlice(T) |{ | static if (__traits(hasMember, T, "asSlice")) | enum size_t hasAsSlice = typeof(T.init.asSlice).N; | else | enum size_t hasAsSlice = 0; |} | |/// |version(mir_test) unittest |{ | import mir.series; | static assert(!hasAsSlice!(int[])); | static assert(hasAsSlice!(SeriesMap!(int, string)) == 1); |} | |/++ |Check if $(LREF toConst) function can be called with type T. |+/ |enum isConvertibleToSlice(T) = isSlice!T || isDynamicArray!T || hasAsSlice!T; | |/// |version(mir_test) unittest |{ | import mir.series: SeriesMap; | static assert(isConvertibleToSlice!(immutable int[])); | static assert(isConvertibleToSlice!(string[])); | static assert(isConvertibleToSlice!(SeriesMap!(string, int))); | static assert(isConvertibleToSlice!(Slice!(int*))); |} | |/++ |Reurns: | Ndslice view in the same data. |See_also: $(LREF isConvertibleToSlice). |+/ |auto toSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) val) |{ | return val; |} | |/// ditto |auto toSlice(Iterator, size_t N, SliceKind kind)(const Slice!(Iterator, N, kind) val) |{ | return val[]; |} | |/// ditto |auto toSlice(Iterator, size_t N, SliceKind kind)(immutable Slice!(Iterator, N, kind) val) |{ | return val[]; |} | |/// ditto |auto toSlice(T)(T[] val) |{ | return val.sliced; |} | |/// ditto |auto toSlice(T)(T val) | if (hasAsSlice!T) |{ | return val.asSlice; |} | |/// |template toSlices(args...) |{ | static if (args.length) | { | alias arg = args[0]; | @optmath @property auto ref slc()() | { | return toSlice(arg); | } | alias toSlices = AliasSeq!(slc, toSlices!(args[1..$])); | } | else | alias toSlices = AliasSeq!(); |} | |/// |template isSlice(T) |{ | static if (is(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind)) | enum bool isSlice = true; | else | enum bool isSlice = false; |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias A = uint[]; | alias S = Slice!(int*); | | static assert(isSlice!S); | static assert(!isSlice!A); |} | |/++ |SliceKind of $(LREF Slice). |See_also: | $(SUBREF topology, universal), | $(SUBREF topology, canonical), | $(SUBREF topology, assumeCanonical), | $(SUBREF topology, assumeContiguous). |+/ |alias SliceKind = mir_slice_kind; |/// ditto |enum mir_slice_kind |{ | /// A slice has strides for all dimensions. | universal, | /// A slice has >=2 dimensions and row dimension is contiguous. | canonical, | /// A slice is a flat contiguous data without strides. | contiguous, |} | |/++ |Alias for $(LREF .SliceKind.universal). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Universal = SliceKind.universal; |/++ |Alias for $(LREF .SliceKind.canonical). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Canonical = SliceKind.canonical; |/++ |Alias for $(LREF .SliceKind.contiguous). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Contiguous = SliceKind.contiguous; | |/// Extracts $(LREF SliceKind). |enum kindOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = kind; | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | static assert(kindOf!(Slice!(int*, 1, Universal)) == Universal); |} | |/// Extracts iterator type from a $(LREF Slice). |alias IteratorOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = Iterator; | |private template SkipDimension(size_t dimension, size_t index) |{ | static if (index < dimension) | enum SkipDimension = index; | else | static if (index == dimension) | static assert (0, "SkipInex: wrong index"); | else | enum SkipDimension = index - 1; |} | |/++ |Creates an n-dimensional slice-shell over an iterator. |Params: | iterator = An iterator, a pointer, or an array. | lengths = A list of lengths for each dimension |Returns: | n-dimensional slice |+/ |auto sliced(size_t N, Iterator)(Iterator iterator, size_t[N] lengths...) | if (!isStaticArray!Iterator && N | && !is(Iterator : Slice!(_Iterator, _N, kind), _Iterator, size_t _N, SliceKind kind)) |{ | alias C = ImplicitlyUnqual!(typeof(iterator)); 0000000| size_t[N] _lengths; | foreach (i; Iota!N) 0000000| _lengths[i] = lengths[i]; 0000000| ptrdiff_t[1] _strides = 0; | static if (isDynamicArray!Iterator) | { | assert(lengthsProduct(_lengths) <= iterator.length, | "array length should be greater or equal to the product of constructed ndslice lengths"); | auto ptr = iterator.length ? &iterator[0] : null; | return Slice!(typeof(C.init[0])*, N)(_lengths, ptr); | } | else | { | // break safety 0000000| if (false) | { 0000000| ++iterator; 0000000| --iterator; 0000000| iterator += 34; 0000000| iterator -= 34; | } 0000000| return Slice!(C, N)(_lengths, iterator); | } |} | |/// $(LINK2 https://en.wikipedia.org/wiki/Vandermonde_matrix, Vandermonde matrix) |@safe pure nothrow version(mir_test) unittest |{ | auto vandermondeMatrix(Slice!(double*) x) | @safe nothrow pure | { | import mir.ndslice.allocation: slice; | auto ret = slice!double(x.length, x.length); | foreach (i; 0 .. x.length) | foreach (j; 0 .. x.length) | ret[i][j] = x[i] ^^ j; | return ret; | } | | import mir.ndslice.topology: universal; | auto x = [1.0, 2, 3, 4, 5].sliced; | auto v = vandermondeMatrix(x); | assert(v == | [[ 1.0, 1, 1, 1, 1], | [ 1.0, 2, 4, 8, 16], | [ 1.0, 3, 9, 27, 81], | [ 1.0, 4, 16, 64, 256], | [ 1.0, 5, 25, 125, 625]]); |} | |/// Random access range primitives for slices over user defined types |@safe pure nothrow @nogc version(mir_test) unittest |{ | struct MyIota | { | //`[index]` operator overloading | auto opIndex(size_t index) @safe nothrow | { | return index; | } | | auto lightConst()() const @property { return MyIota(); } | auto lightImmutable()() immutable @property { return MyIota(); } | } | import mir.ndslice.iterator: FieldIterator; | alias Iterator = FieldIterator!MyIota; | alias S = Slice!(Iterator, 2); | import std.range.primitives; | static assert(hasLength!S); | static assert(hasSlicing!S); | static assert(isRandomAccessRange!S); | | auto slice = Iterator().sliced(20, 10); | assert(slice[1, 2] == 12); | auto sCopy = slice.save; | assert(slice[1, 2] == 12); |} | |/++ |Creates an 1-dimensional slice-shell over an array. |Params: | array = An array. |Returns: | 1-dimensional slice |+/ |Slice!(T*) sliced(T)(T[] array) @trusted |{ | version(LDC) pragma(inline, true); | return Slice!(T*)([array.length], array.ptr); |} | |/// Creates a slice from an array. |@safe pure nothrow version(mir_test) unittest |{ | auto slice = new int[10].sliced; | assert(slice.length == 10); | static assert(is(typeof(slice) == Slice!(int*))); |} | |/++ |Creates an n-dimensional slice-shell over the 1-dimensional input slice. |Params: | slice = slice | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |+/ |Slice!(Iterator, N, kind) | sliced | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, 1, kind) slice, size_t[N] lengths...) | if (N) |{ | auto structure = typeof(return)._Structure.init; | structure[0] = lengths; | static if (kind != Contiguous) | { | import mir.ndslice.topology: iota; | structure[1] = structure[0].iota.strides; | } | return typeof(return)(structure, slice._iterator); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto data = new int[24]; | foreach (i, ref e; data) | e = cast(int)i; | auto a = data[0..10].sliced(10)[0..6].sliced(2, 3); | auto b = iota!int(10)[0..6].sliced(2, 3); | assert(a == b); | a[] += b; | foreach (i, e; data[0..6]) | assert(e == 2*i); | foreach (i, e; data[6..$]) | assert(e == i+6); |} | |/++ |Creates an n-dimensional slice-shell over a field. |Params: | field = A field. The length of the | array should be equal to or less then the product of | lengths. | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |+/ |Slice!(FieldIterator!Field, N) |slicedField(Field, size_t N)(Field field, size_t[N] lengths...) | if (N) |{ | static if (hasLength!Field) 0000000| assert(lengths.lengthsProduct <= field.length, "Length product should be less or equal to the field length."); 0000000| return FieldIterator!Field(0, field).sliced(lengths); |} | |///ditto |auto slicedField(Field)(Field field) | if(hasLength!Field) |{ 0000000| return .slicedField(field, field.length); |} | |/// Creates an 1-dimensional slice over a field, array, or random access range. |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto slice = 10.iota.slicedField; | assert(slice.length == 10); |} | |/++ |Creates an n-dimensional slice-shell over an ndField. |Params: | field = A ndField. Lengths should fit into field's shape. | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |See_also: $(SUBREF concatenation, concatenation) examples. |+/ |Slice!(IndexIterator!(FieldIterator!(ndIotaField!N), ndField), N) |slicedNdField(ndField, size_t N)(ndField field, size_t[N] lengths...) | if (N) |{ | static if(hasShape!ndField) | { | auto shape = field.shape; | foreach (i; 0 .. N) | assert(lengths[i] <= shape[i], "Lengths should fit into ndfield's shape."); | } | import mir.ndslice.topology: indexed, ndiota; | return indexed(field, ndiota(lengths)); |} | |///ditto |auto slicedNdField(ndField)(ndField field) | if(hasShape!ndField) |{ | return .slicedNdField(field, field.shape); |} | |/++ |Combination of coordinate(s) and value. |+/ |struct CoordinateValue(T, size_t N = 1) |{ | /// | size_t[N] index; | | /// | T value; | | /// | sizediff_t opCmp()(scope auto ref const typeof(this) rht) const | { | return cmpCoo(this.index, rht.index); | } |} | |private sizediff_t cmpCoo(size_t N)(scope const auto ref size_t[N] a, scope const auto ref size_t[N] b) |{ | foreach (i; Iota!(0, N)) | if (auto d = a[i] - b[i]) | return d; | return 0; |} | |/++ |Presents $(LREF .Slice.structure). |+/ |struct Structure(size_t N) |{ | /// | size_t[N] lengths; | /// | sizediff_t[N] strides; |} | |/++ |Presents an n-dimensional view over a range. | |$(H3 Definitions) | |In order to change data in a slice using |overloaded operators such as `=`, `+=`, `++`, |a syntactic structure of type |`[]` must be used. |It is worth noting that just like for regular arrays, operations `a = b` |and `a[] = b` have different meanings. |In the first case, after the operation is carried out, `a` simply points at the same data as `b` |does, and the data which `a` previously pointed at remains unmodified. |Here, `а` and `b` must be of the same type. |In the second case, `a` points at the same data as before, |but the data itself will be changed. In this instance, the number of dimensions of `b` |may be less than the number of dimensions of `а`; and `b` can be a Slice, |a regular multidimensional array, or simply a value (e.g. a number). | |In the following table you will find the definitions you might come across |in comments on operator overloading. | |$(BOOKTABLE |$(TR $(TH Operator Overloading) $(TH Examples at `N == 3`)) |$(TR $(TD An $(B interval) is a part of a sequence of type `i .. j`.) | $(STD `2..$-3`, `0..4`)) |$(TR $(TD An $(B index) is a part of a sequence of type `i`.) | $(STD `3`, `$-1`)) |$(TR $(TD A $(B partially defined slice) is a sequence composed of | $(B intervals) and $(B indexes) with an overall length strictly less than `N`.) | $(STD `[3]`, `[0..$]`, `[3, 3]`, `[0..$,0..3]`, `[0..$,2]`)) |$(TR $(TD A $(B fully defined index) is a sequence | composed only of $(B indexes) with an overall length equal to `N`.) | $(STD `[2,3,1]`)) |$(TR $(TD A $(B fully defined slice) is an empty sequence | or a sequence composed of $(B indexes) and at least one | $(B interval) with an overall length equal to `N`.) | $(STD `[]`, `[3..$,0..3,0..$-1]`, `[2,0..$,1]`)) |$(TR $(TD An $(B indexed slice) is syntax sugar for $(SUBREF topology, indexed) and $(SUBREF topology, cartesian).) | $(STD `[anNdslice]`, `[$.iota, anNdsliceForCartesian1, $.iota]`)) |) | |See_also: | $(SUBREF topology, iota). | |$(H3 Internal Binary Representation) | |Multidimensional Slice is a structure that consists of lengths, strides, and a iterator (pointer). | |$(SUBREF topology, FieldIterator) shell is used to wrap fields and random access ranges. |FieldIterator contains a shift of the current initial element of a multidimensional slice |and the field itself. | |With the exception of $(MREF mir,ndslice,allocation) module, no functions in this |package move or copy data. The operations are only carried out on lengths, strides, |and pointers. If a slice is defined over a range, only the shift of the initial element |changes instead of the range. | |$(H4 Internal Representation for Universal Slices) | |Type definition | |------- |Slice!(Iterator, N, Universal) |------- | |Schema | |------- |Slice!(Iterator, N, Universal) | size_t[N] _lengths | sizediff_t[N] _strides | Iterator _iterator |------- | |$(H5 Example) | |Definitions | |------- |import mir.ndslice; |auto a = new double[24]; |Slice!(double*, 3, Universal) s = a.sliced(2, 3, 4).universal; |Slice!(double*, 3, Universal) t = s.transposed!(1, 2, 0); |Slice!(double*, 3, Universal) r = t.reversed!1; |------- | |Representation | |------- |s________________________ | lengths[0] ::= 2 | lengths[1] ::= 3 | lengths[2] ::= 4 | | strides[0] ::= 12 | strides[1] ::= 4 | strides[2] ::= 1 | | iterator ::= &a[0] | |t____transposed!(1, 2, 0) | lengths[0] ::= 3 | lengths[1] ::= 4 | lengths[2] ::= 2 | | strides[0] ::= 4 | strides[1] ::= 1 | strides[2] ::= 12 | | iterator ::= &a[0] | |r______________reversed!1 | lengths[0] ::= 2 | lengths[1] ::= 3 | lengths[2] ::= 4 | | strides[0] ::= 12 | strides[1] ::= -4 | strides[2] ::= 1 | | iterator ::= &a[8] // (old_strides[1] * (lengths[1] - 1)) = 8 |------- | |$(H4 Internal Representation for Canonical Slices) | |Type definition | |------- |Slice!(Iterator, N, Canonical) |------- | |Schema | |------- |Slice!(Iterator, N, Canonical) | size_t[N] _lengths | sizediff_t[N-1] _strides | Iterator _iterator |------- | |$(H4 Internal Representation for Contiguous Slices) | |Type definition | |------- |Slice!(Iterator, N) |------- | |Schema | |------- |Slice!(Iterator, N, Contiguous) | size_t[N] _lengths | sizediff_t[0] _strides | Iterator _iterator |------- |+/ |struct mir_slice(Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous, Labels_...) | if (0 < N_ && N_ < 255 && !(kind_ == Canonical && N_ == 1) && Labels_.length <= N_ && isIterator!Iterator_) |{ |@optmath: | | /// | enum SliceKind kind = kind_; | | /// | enum size_t N = N_; | | /// | enum size_t S = kind == Universal ? N : kind == Canonical ? N - 1 : 0; | | /// | enum size_t L = Labels_.length; | | /// | alias Iterator = Iterator_; | | /// | alias This = Slice!(Iterator, N, kind); | | /// | alias DeepElement = typeof(Iterator.init[size_t.init]); | | /// | alias Labels = Labels_; | | /// | template Element(size_t dimension) | if (dimension < N) | { | static if (N == 1) | alias Element = DeepElement; | else | { | static if (kind == Universal || dimension == N - 1) | alias Element = mir_slice!(Iterator, N - 1, Universal); | else | static if (N == 2 || kind == Contiguous && dimension == 0) | alias Element = mir_slice!(Iterator, N - 1); | else | alias Element = mir_slice!(Iterator, N - 1, Canonical); | } | } | | package(mir): | | enum doUnittest = is(Iterator == int*) && N == 1 && kind == Contiguous; | | enum hasAccessByRef = __traits(compiles, &_iterator[0]); | | enum PureIndexLength(Slices...) = Filter!(isIndex, Slices).length; | | enum isPureSlice(Slices...) = | Slices.length == 0 | || Slices.length <= N | && PureIndexLength!Slices < N | && Filter!(isIndex, Slices).length < Slices.length | && allSatisfy!(templateOr!(isIndex, is_Slice), Slices); | | | enum isFullPureSlice(Slices...) = | Slices.length == 0 | || Slices.length == N | && PureIndexLength!Slices < N | && allSatisfy!(templateOr!(isIndex, is_Slice), Slices); | | enum isIndexedSlice(Slices...) = | Slices.length | && Slices.length <= N | && allSatisfy!(isSlice, Slices) | && anySatisfy!(templateNot!is_Slice, Slices); | | static if (S) | { | /// | public alias _Structure = AliasSeq!(size_t[N], ptrdiff_t[S]); | /// | _Structure _structure; | /// | public alias _lengths = _structure[0]; | /// | public alias _strides = _structure[1]; | } | else | { | /// | public alias _Structure = AliasSeq!(size_t[N]); | /// | _Structure _structure; | /// | public alias _lengths = _structure[0]; | /// | public enum ptrdiff_t[S] _strides = ptrdiff_t[S].init; | } | | /// | public Iterator _iterator; | /// | public Labels _labels; | | sizediff_t backIndex(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | return _stride!dimension * (_lengths[dimension] - 1); | } | | size_t indexStride(size_t I)(size_t[I] _indexes) @safe scope const | { | static if (_indexes.length) | { | static if (kind == Contiguous) | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | ptrdiff_t ball = this._stride!E; | ptrdiff_t stride = _indexes[E] * ball; | foreach_reverse (i; Iota!E) //static | { | ball *= _lengths[i + 1]; | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += ball * _indexes[i]; | } | } | else | static if (kind == Canonical) | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | static if (I == N) | size_t stride = _indexes[E]; | else | size_t stride = _strides[E] * _indexes[E]; | foreach_reverse (i; Iota!E) //static | { | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += _strides[i] * _indexes[i]; | } | } | else | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | size_t stride = _strides[E] * _indexes[E]; | foreach_reverse (i; Iota!E) //static | { | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += _strides[i] * _indexes[i]; | } | } | return stride; | } | else | { | return 0; | } | } | |public: | | // static if (S == 0) | // { | /// Defined for Contiguous Slice only | // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // assert(empty.length == 0); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // assert(empty.length == 0); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._iterator = iterator; | // } | // } | | // version(LDC) | // private enum classicConstructor = true; | // else | // private enum classicConstructor = S > 0; | | // static if (classicConstructor) | // { | /// Defined for Canonical and Universal Slices (DMD, GDC, LDC) and for Contiguous Slices (LDC) | // this()(size_t[N] lengths, ptrdiff_t[S] strides, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._strides = strides; | // this._iterator = iterator; | // this._labels = labels; | // } | | // /// ditto | // this()(size_t[N] lengths, ptrdiff_t[S] strides, ref Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._strides = strides; | // this._iterator = iterator; | // this._labels = labels; | // } | // } | | // /// Construct from null | // this()(typeof(null)) | // { | // version(LDC) pragma(inline, true); | // } | | // static if (doUnittest) | // /// | // @safe pure version(mir_test) unittest | // { | // import mir.ndslice.slice; | // alias Array = Slice!(double*); | // Array a = null; | // auto b = Array(null); | // assert(a.empty); | // assert(b.empty); | | // auto fun(Array a = null) | // { | | // } | // } | | static if (doUnittest) | /// Creates a 2-dimentional slice with custom strides. | nothrow pure | version(mir_test) unittest | { | uint[8] array = [1, 2, 3, 4, 5, 6, 7, 8]; | auto slice = Slice!(uint*, 2, Universal)([2, 2], [4, 1], array.ptr); | | assert(&slice[0, 0] == &array[0]); | assert(&slice[0, 1] == &array[1]); | assert(&slice[1, 0] == &array[4]); | assert(&slice[1, 1] == &array[5]); | assert(slice == [[1, 2], [5, 6]]); | | array[2] = 42; | assert(slice == [[1, 2], [5, 6]]); | | array[1] = 99; | assert(slice == [[1, 99], [5, 6]]); | } | | /// | auto lightScope()() scope return @property | { | return Slice!(LightScopeOf!Iterator, N, kind)(_structure, .lightScope(_iterator)); | } | | /// ditto | auto lightScope()() scope const return @property | { 0000000| return Slice!(LightConstOf!(LightScopeOf!Iterator), N, kind)(_structure, .lightScope(_iterator)); | } | | /// ditto | auto lightScope()() scope immutable return @property | { | return Slice!(LightImmutableOf!(LightScopeOf!Iterator), N, kind)(_structure, .lightScope(_iterator)); | } | | /// | Slice!(LightImmutableOf!Iterator, N, kind) lightImmutable()() scope return immutable @property | { | return typeof(return)(_structure, .lightImmutable(_iterator)); | } | | /// ditto | Slice!(LightConstOf!Iterator, N, kind) lightConst()() scope return const @property @trusted | { | return typeof(return)(_structure, .lightConst(_iterator)); | } | | /// ditto | auto lightConst()() scope return immutable @property | { | return this.lightImmutable; | } | | /// ditto | auto ref opIndex(Indexes...)(Indexes indexes) const @trusted | if (isPureSlice!Indexes || isIndexedSlice!Indexes) | { | return lightConst.opIndex(indexes); | } | | /// ditto | auto ref opIndex(Indexes...)(Indexes indexes) scope return immutable @trusted | if (isPureSlice!Indexes || isIndexedSlice!Indexes) | { | return lightImmutable.opIndex(indexes); | } | | static if (isPointer!Iterator) | { | private alias ConstThis = Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind); | private alias ImmutableThis = Slice!(immutable(Unqual!(PointerTarget!Iterator))*, N, kind); | | /++ | Cast to const and immutable slices in case of underlying range is a pointer. | +/ | auto toImmutable()() scope return immutable @trusted pure nothrow @nogc | { | alias It = immutable(Unqual!(PointerTarget!Iterator))*; | return Slice!(It, N, kind)(_structure, _iterator); | } | | /// ditto | auto toConst()() scope return const @trusted pure nothrow @nogc | { | version(LDC) pragma(inline, true); | alias It = const(Unqual!(PointerTarget!Iterator))*; | return Slice!(It, N, kind)(_structure, _iterator); | } | | static if (!is(Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind) == This)) | /// ditto | alias toConst this; | | static if (doUnittest) | /// | version(mir_test) unittest | { | static struct Foo | { | Slice!(int*) bar; | | int get(size_t i) immutable | { | return bar[i]; | } | | int get(size_t i) const | { | return bar[i]; | } | | int get(size_t i) inout | { | return bar[i]; | } | } | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | Slice!(double*, 2, Universal) nn; | Slice!(immutable(double)*, 2, Universal) ni; | Slice!(const(double)*, 2, Universal) nc; | | const Slice!(double*, 2, Universal) cn; | const Slice!(immutable(double)*, 2, Universal) ci; | const Slice!(const(double)*, 2, Universal) cc; | | immutable Slice!(double*, 2, Universal) in_; | immutable Slice!(immutable(double)*, 2, Universal) ii; | immutable Slice!(const(double)*, 2, Universal) ic; | | nc = nc; nc = cn; nc = in_; | nc = nc; nc = cc; nc = ic; | nc = ni; nc = ci; nc = ii; | | void fun(T, size_t N)(Slice!(const(T)*, N, Universal) sl) | { | //... | } | | fun(nn); fun(cn); fun(in_); | fun(nc); fun(cc); fun(ic); | fun(ni); fun(ci); fun(ii); | | static assert(is(typeof(cn[]) == typeof(nc))); | static assert(is(typeof(ci[]) == typeof(ni))); | static assert(is(typeof(cc[]) == typeof(nc))); | | static assert(is(typeof(in_[]) == typeof(ni))); | static assert(is(typeof(ii[]) == typeof(ni))); | static assert(is(typeof(ic[]) == typeof(ni))); | | ni = ci[]; | ni = in_[]; | ni = ii[]; | ni = ic[]; | } | } | | /++ | Iterator | Returns: | Iterator (pointer) to the $(LREF Slice.first) element. | +/ | auto iterator()() inout scope return @property | { | return _iterator; | } | | /++ | +/ | auto label(size_t dimension)() scope return @trusted inout @property | if (dimension <= L) | { | return label.sliced(_lengths[dimension]); | } | | static if (kind == Contiguous && isPointer!Iterator) | /++ | `ptr` alias is available only if the slice kind is $(LREF Contiguous) contiguous and the $(LREF Slice.iterator) is a pointers. | +/ | alias ptr = iterator; | else | { | import mir.rc.array: mir_rci; | static if (kind == Contiguous && is(Iterator : mir_rci!ET, ET)) | auto ptr() scope return inout @property | { | return _iterator._iterator; | } | } | | /++ | Field (array) data. | Returns: | Raw data slice. | Constraints: | Field is defined only for contiguous slices. | +/ | auto field()() scope return @trusted @property | { | static assert(kind == Contiguous, "Slice.field is defined only for contiguous slices. Slice kind is " ~ kind.stringof); | static if (is(typeof(_iterator[size_t(0) .. elementCount]))) | { | return _iterator[size_t(0) .. elementCount]; | } | else | { | import mir.ndslice.topology: flattened; | return this.flattened; | } | } | | /// ditto | auto field()() scope const return @trusted @property | { | return this.lightConst.field; | } | | /// ditto | auto field()() scope immutable return @trusted @property | { | return this.lightImmutable.field; | } | | static if (doUnittest) | /// | @safe version(mir_test) unittest | { | auto arr = [1, 2, 3, 4]; | auto sl0 = arr.sliced; | auto sl1 = arr.slicedField; | | assert(sl0.field is arr); | assert(sl1.field is arr); | | arr = arr[1 .. $]; | sl0 = sl0[1 .. $]; | sl1 = sl1[1 .. $]; | | assert(sl0.field is arr); | assert(sl1.field is arr); | assert((cast(const)sl1).field is arr); | ()@trusted{ assert((cast(immutable)sl1).field is arr); }(); | } | | /++ | Returns: static array of lengths | See_also: $(LREF .Slice.structure) | +/ | size_t[N] shape()() @trusted @property scope const | { 0000000| return _lengths[0 .. N]; | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5).shape == cast(size_t[3])[3, 4, 5]); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow | version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | size_t[3] s = [3, 4, 5]; | assert(iota(3, 4, 5, 6, 7).pack!2.shape == s); | } | | /++ | Returns: static array of lengths | See_also: $(LREF .Slice.structure) | +/ | ptrdiff_t[N] strides()() @trusted @property scope const | { | static if (N <= S) | return _strides[0 .. N]; | else | { | typeof(return) ret; | static if (kind == Canonical) | { | foreach (i; Iota!S) | ret[i] = _strides[i]; | ret[$-1] = 1; | } | else | { | ret[$ - 1] = _stride!(N - 1); | foreach_reverse (i; Iota!(N - 1)) | ret[i] = ret[i + 1] * _lengths[i + 1]; | } | return ret; | } | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow | version(mir_test) unittest | { | import mir.ndslice.topology : iota; | size_t[3] s = [20, 5, 1]; | assert(iota(3, 4, 5).strides == s); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota, universal; | import mir.ndslice.dynamic : reversed, strided, transposed; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes corresponding length | .transposed!2 //brings dimension `2` to the first position | .strides == cast(ptrdiff_t[3])[-6, 200, 50]); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | size_t[3] s = [20 * 42, 5 * 42, 1 * 42]; | assert(iota(3, 4, 5, 6, 7) | .pack!2 | .strides == s); | } | | /++ | Returns: static array of lengths and static array of strides | See_also: $(LREF .Slice.shape) | +/ | Structure!N structure()() @safe @property scope const | { | return typeof(return)(_lengths, strides); | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5) | .structure == Structure!3([3, 4, 5], [20, 5, 1])); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota, universal; | import mir.ndslice.dynamic : reversed, strided, transposed; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes corresponding length | .transposed!2 //brings dimension `2` to the first position | .structure == Structure!3([9, 3, 4], [-6, 200, 50])); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | assert(iota(3, 4, 5, 6, 7) | .pack!2 | .structure == Structure!3([3, 4, 5], [20 * 42, 5 * 42, 1 * 42])); | } | | /++ | Save primitive. | +/ | auto save()() scope return inout @property | { | return this; | } | | static if (doUnittest) | /// Save range | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(2, 3).save; | } | | static if (doUnittest) | /// Pointer type. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | //sl type is `Slice!(2, int*)` | auto sl = slice!int(2, 3).save; | } | | /++ | Multidimensional `length` property. | Returns: length of the corresponding dimension | See_also: $(LREF .Slice.shape), $(LREF .Slice.structure) | +/ | size_t length(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | return _lengths[dimension]; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(3, 4, 5); | assert(slice.length == 3); | assert(slice.length!0 == 3); | assert(slice.length!1 == 4); | assert(slice.length!2 == 5); | } | | alias opDollar = length; | | /++ | Multidimensional `stride` property. | Returns: stride of the corresponding dimension | See_also: $(LREF .Slice.structure) | +/ | sizediff_t _stride(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | static if (dimension < S) | { | return _strides[dimension]; | } | else | static if (dimension + 1 == N) | { | return 1; | } | else | { | size_t ball = _lengths[$ - 1]; | foreach_reverse(i; Iota!(dimension + 1, N - 1)) | ball *= _lengths[i]; | return ball; | } | | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(3, 4, 5); | assert(slice._stride == 20); | assert(slice._stride!0 == 20); | assert(slice._stride!1 == 5); | assert(slice._stride!2 == 1); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.dynamic : reversed, strided, swapped; | import mir.ndslice.topology : universal, iota; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes the corresponding length | .swapped!(1, 2) //swaps dimensions `1` and `2` | ._stride!1 == -6); | } | | /++ | Multidimensional input range primitive. | +/ | bool empty(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { 0000000| return _lengths[dimension] == 0; | } | | ///ditto | static if (N == 1) | auto ref front(size_t dimension = 0)() scope return @trusted @property | if (dimension == 0) | { 0000000| assert(!empty!dimension); 0000000| return *_iterator; | } | else | Element!dimension front(size_t dimension = 0)() scope return @property | if (dimension < N) | { | typeof(return)._Structure structure_ = typeof(return)._Structure.init; | | foreach (i; Iota!(typeof(return).N)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[0][i] = _lengths[j]; | } | | static if (!typeof(return).S || typeof(return).S + 1 == S) | alias s = _strides; | else | auto s = strides; | | foreach (i; Iota!(typeof(return).S)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[1][i] = s[j]; | } | | return typeof(return)(structure_, _iterator); | } | | static if (N == 1 && isMutable!DeepElement && !hasAccessByRef) | { | ///ditto | auto ref front(size_t dimension = 0, T)(T value) scope return @trusted @property | if (dimension == 0) | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | assert(!empty!dimension); | static if (__traits(compiles, *_iterator = value)) | return *_iterator = value; | else | return _iterator[0] = value; | } | } | | ///ditto | static if (N == 1) | auto ref Element!dimension | back(size_t dimension = 0)() scope return @trusted @property | if (dimension < N) | { | assert(!empty!dimension); | return _iterator[backIndex]; | } | else | auto ref Element!dimension | back(size_t dimension = 0)() scope return @trusted @property | if (dimension < N) | { | assert(!empty!dimension); | auto structure_ = typeof(return)._Structure.init; | | foreach (i; Iota!(typeof(return).N)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[0][i] = _lengths[j]; | } | | static if (!typeof(return).S || typeof(return).S + 1 == S) | alias s =_strides; | else | auto s = strides; | | foreach (i; Iota!(typeof(return).S)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[1][i] = s[j]; | } | | return typeof(return)(structure_, _iterator + backIndex!dimension); | } | | static if (N == 1 && isMutable!DeepElement && !hasAccessByRef) | { | ///ditto | auto ref back(size_t dimension = 0, T)(T value) scope return @trusted @property | if (dimension == 0) | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | assert(!empty!dimension); | return _iterator[backIndex] = value; | } | } | | ///ditto | void popFront(size_t dimension = 0)() @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { 0000000| assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0."); 0000000| _lengths[dimension]--; | static if ((kind == Contiguous || kind == Canonical) && dimension + 1 == N) 0000000| ++_iterator; | else | static if (kind == Canonical || kind == Universal) | _iterator += _strides[dimension]; | else | _iterator += _stride!dimension; | } | | ///ditto | void popBack(size_t dimension = 0)() @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0."); | --_lengths[dimension]; | } | | ///ditto | void popFrontExactly(size_t dimension = 0)(size_t n) @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(n <= _lengths[dimension], | __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof); | _lengths[dimension] -= n; | _iterator += _stride!dimension * n; | } | | ///ditto | void popBackExactly(size_t dimension = 0)(size_t n) @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(n <= _lengths[dimension], | __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof); | _lengths[dimension] -= n; | } | | ///ditto | void popFrontN(size_t dimension = 0)(size_t n) @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | popFrontExactly!dimension(min(n, _lengths[dimension])); | } | | ///ditto | void popBackN(size_t dimension = 0)(size_t n) @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | popBackExactly!dimension(min(n, _lengths[dimension])); | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import std.range.primitives; | import mir.ndslice.topology : iota, canonical; | auto slice = iota(10, 20, 30).canonical; | | static assert(isRandomAccessRange!(typeof(slice))); | static assert(hasSlicing!(typeof(slice))); | static assert(hasLength!(typeof(slice))); | | assert(slice.shape == cast(size_t[3])[10, 20, 30]); | slice.popFront; | slice.popFront!1; | slice.popBackExactly!2(4); | assert(slice.shape == cast(size_t[3])[9, 19, 26]); | | auto matrix = slice.front!1; | assert(matrix.shape == cast(size_t[2])[9, 26]); | | auto column = matrix.back!1; | assert(column.shape == cast(size_t[1])[9]); | | slice.popFrontExactly!1(slice.length!1); | assert(slice.empty == false); | assert(slice.empty!1 == true); | assert(slice.empty!2 == false); | assert(slice.shape == cast(size_t[3])[9, 0, 26]); | | assert(slice.back.front!1.empty); | | slice.popFrontN!0(40); | slice.popFrontN!2(40); | assert(slice.shape == cast(size_t[3])[0, 0, 0]); | } | | package(mir) ptrdiff_t lastIndex()() @safe @property scope const | { | static if (kind == Contiguous) | { | return elementCount - 1; | } | else | { | auto strides = strides; | ptrdiff_t shift = 0; | foreach(i; Iota!N) | shift += strides[i] * (_lengths[i] - 1); | return shift; | } | } | | static if (N > 1) | { | /// Accesses the first deep element of the slice. | auto ref first()() scope return @trusted @property | { | assert(!anyEmpty); | return *_iterator; | } | | static if (isMutable!DeepElement && !hasAccessByRef) | ///ditto | auto ref first(T)(T value) scope return @trusted @property | { | assert(!anyEmpty); | static if (__traits(compiles, *_iterator = value)) | return *_iterator = value; | else | return _iterator[0] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota, universal, canonical; | auto f = 5; | assert([2, 3].iota(f).first == f); | } | | /// Accesses the last deep element of the slice. | auto ref last()() @trusted scope return @property | { | assert(!anyEmpty); | return _iterator[lastIndex]; | } | | static if (isMutable!DeepElement && !hasAccessByRef) | ///ditto | auto ref last(T)(T value) @trusted scope return @property | { | assert(!anyEmpty); | return _iterator[lastIndex] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota; | auto f = 5; | assert([2, 3].iota(f).last == f + 2 * 3 - 1); | } | } | else | { | alias first = front; | alias last = back; | } | | /+ | Returns: `true` if for any dimension of completely unpacked slice the length equals to `0`, and `false` otherwise. | +/ | private bool anyRUEmpty()() @trusted @property scope const | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | import mir.ndslice.topology: unpack; | return this.lightScope.unpack.anyRUEmpty; | } | else | return _lengths[0 .. N].anyEmptyShape; | } | | | /++ | Returns: `true` if for any dimension the length equals to `0`, and `false` otherwise. | +/ | bool anyEmpty()() @trusted @property scope const | { 0000000| return _lengths[0 .. N].anyEmptyShape; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology : iota, canonical; | auto s = iota(2, 3).canonical; | assert(!s.anyEmpty); | s.popFrontExactly!1(3); | assert(s.anyEmpty); | } | | /++ | Convenience function for backward indexing. | | Returns: `this[$-index[0], $-index[1], ..., $-index[N-1]]` | +/ | auto ref backward()(size_t[N] index) scope return | { | foreach (i; Iota!N) | index[i] = _lengths[i] - index[i]; | return this[index]; | } | | /// ditto | auto ref backward()(size_t[N] index) scope return const | { | return this.lightConst.backward(index); | } | | /// ditto | auto ref backward()(size_t[N] index) scope return const | { | return this.lightConst.backward(index); | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto s = iota(2, 3); | assert(s[$ - 1, $ - 2] == s.backward([1, 2])); | } | | /++ | Returns: Total number of elements in a slice | +/ | size_t elementCount()() @safe @property scope const | { 0000000| size_t len = 1; | foreach (i; Iota!N) 0000000| len *= _lengths[i]; 0000000| return len; | } | | deprecated("use elementCount instead") | alias elementsCount = elementCount; | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5).elementCount == 60); | } | | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, evertPack, iota; | auto slice = iota(3, 4, 5, 6, 7, 8); | auto p = slice.pack!2; | assert(p.elementCount == 360); | assert(p[0, 0, 0, 0].elementCount == 56); | assert(p.evertPack.elementCount == 56); | } | | /++ | Slice selected dimension. | Params: | begin = initial index of the sub-slice (inclusive) | end = final index of the sub-slice (noninclusive) | Returns: ndslice with `length!dimension` equal to `end - begin`. | +/ | auto select(size_t dimension)(size_t begin, size_t end) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | auto len = end - begin; | assert(len <= ret._lengths[dimension]); | ret._lengths[dimension] = len; | ret._iterator += ret._stride!dimension * begin; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.select!1(1, 3) == sl[0 .. $, 1 .. 3]); | } | | /++ | Select the first n elements for the dimension. | Params: | dimension = Dimension to slice. | n = count of elements for the dimension | Returns: ndslice with `length!dimension` equal to `n`. | +/ | auto selectFront(size_t dimension)(size_t n) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | assert(n <= ret._lengths[dimension]); | ret._lengths[dimension] = n; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.selectFront!1(2) == sl[0 .. $, 0 .. 2]); | } | | /++ | Select the last n elements for the dimension. | Params: | dimension = Dimension to slice. | n = count of elements for the dimension | Returns: ndslice with `length!dimension` equal to `n`. | +/ | auto selectBack(size_t dimension)(size_t n) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | assert(n <= ret._lengths[dimension]); | ret._iterator += ret._stride!dimension * (ret._lengths[dimension] - n); | ret._lengths[dimension] = n; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.selectBack!1(2) == sl[0 .. $, $ - 2 .. $]); | } | | /++ | Overloading `==` and `!=` | +/ | bool opEquals(scope const ref typeof(this) rslice) @trusted scope const | { | static if (!hasReference!(typeof(this))) | { 0000000| if (this._lengths != rslice._lengths) 0000000| return false; 0000000| if (this._iterator == rslice._iterator) 0000000| return true; | } | import mir.algorithm.iteration : equal; | static if (__traits(compiles, this.lightScope)) 0000000| return equal(this.lightScope, rslice.lightScope); | else | return equal(*cast(This*)&this, *cast(This*)&rslice); | } | | ///ditto | bool opEquals(IteratorR, SliceKind rkind)(auto ref const Slice!(IteratorR, N, rkind) rslice) @trusted scope const | { | static if ( | !hasReference!(typeof(this)) | && !hasReference!(typeof(rslice)) | && __traits(compiles, this._iterator == rslice._iterator) | ) | { | if (this._lengths != rslice._lengths) | return false; | if (this._iterator == rslice._iterator) | return true; | } | import mir.algorithm.iteration : equal; | return equal(this.lightScope, rslice.lightScope); | } | | /// ditto | bool opEquals(T)(scope const(T)[] arr) @trusted scope const | { | auto slice = this.lightConst; | if (slice.length != arr.length) | return false; | if (arr.length) do | { | if (slice.front != arr[0]) | return false; | slice.popFront; | arr = arr[1 .. $]; | } | while (arr.length); | return true; | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | auto a = [1, 2, 3, 4].sliced(2, 2); | | assert(a != [1, 2, 3, 4, 5, 6].sliced(2, 3)); | assert(a != [[1, 2, 3], [4, 5, 6]]); | | assert(a == [1, 2, 3, 4].sliced(2, 2)); | assert(a == [[1, 2], [3, 4]]); | | assert(a != [9, 2, 3, 4].sliced(2, 2)); | assert(a != [[9, 2], [3, 4]]); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation: slice; | import mir.ndslice.topology : iota; | assert(iota(2, 3).slice[0 .. $ - 2] == iota([4, 3], 2)[0 .. $ - 4]); | } | | /++ | `Slice!(IotaIterator!size_t)` is the basic type for `[a .. b]` syntax for all ndslice based code. | +/ | Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) @safe scope const | if (dimension < N) | in | { | assert(i <= j, | "Slice.opSlice!" ~ dimension.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": right opSlice boundary must equal to the length of the given dimension."; | assert(j <= _lengths[dimension], | "Slice.opSlice!" ~ dimension.stringof ~ errorMsg); | } | body | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /++ | $(BOLD Fully defined index) | +/ | auto ref opIndex()(size_t[N] _indexes...) scope return @trusted | { | return _iterator[indexStride(_indexes)]; | } | | /// ditto | auto ref opIndex()(size_t[N] _indexes...) scope return const @trusted | { | static if (is(typeof(_iterator[indexStride(_indexes)]))) | return _iterator[indexStride(_indexes)]; | else | return .lightConst(.lightScope(_iterator))[indexStride(_indexes)]; | } | | /// ditto | auto ref opIndex()(size_t[N] _indexes...) scope return immutable @trusted | { | static if (is(typeof(_iterator[indexStride(_indexes)]))) | return _iterator[indexStride(_indexes)]; | else | return .lightImmutable(.lightScope(_iterator))[indexStride(_indexes)]; | } | | /++ | $(BOLD Partially defined index) | +/ | auto opIndex(size_t I)(size_t[I] _indexes...) scope return @trusted | if (I && I < N) | { | enum size_t diff = N - I; | alias Ret = Slice!(Iterator, diff, diff == 1 && kind == Canonical ? Contiguous : kind); | static if (I < S) | return Ret(_lengths[I .. N], _strides[I .. S], _iterator + indexStride(_indexes)); | else | return Ret(_lengths[I .. N], _iterator + indexStride(_indexes)); | } | | /// ditto | auto opIndex(size_t I)(size_t[I] _indexes...) scope return const | if (I && I < N) | { | return this.lightConst.opIndex(_indexes); | } | | /// ditto | auto opIndex(size_t I)(size_t[I] _indexes...) scope return immutable | if (I && I < N) | { | return this.lightImmutable.opIndex(_indexes); | } | | /++ | $(BOLD Partially or fully defined slice.) | +/ | auto opIndex(Slices...)(Slices slices) scope return @trusted | if (isPureSlice!Slices) | { | static if (Slices.length) | { | enum size_t j(size_t n) = n - Filter!(isIndex, Slices[0 .. n]).length; | enum size_t F = PureIndexLength!Slices; | enum size_t S = Slices.length; | static assert(N - F > 0); | size_t stride; | static if (Slices.length == 1) | enum K = kind; | else | static if (kind == Universal || Slices.length == N && isIndex!(Slices[$-1])) | enum K = Universal; | else | static if (Filter!(isIndex, Slices[0 .. $-1]).length == Slices.length - 1 || N - F == 1) | enum K = Contiguous; | else | enum K = Canonical; | alias Ret = Slice!(Iterator, N - F, K); | auto structure_ = Ret._Structure.init; | | enum bool shrink = kind == Canonical && slices.length == N; | static if (shrink) | { | { | enum i = Slices.length - 1; | auto slice = slices[i]; | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += slice; | } | else | { | stride += slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | } | } | } | static if (kind == Universal || kind == Canonical) | { | foreach_reverse (i, slice; slices[0 .. $ - shrink]) //static | { | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += _strides[i] * slice; | } | else | { | stride += _strides[i] * slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | structure_[1][j!i] = _strides[i]; | } | } | } | else | { | ptrdiff_t ball = this._stride!(slices.length - 1); | foreach_reverse (i, slice; slices) //static | { | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += ball * slice; | } | else | { | stride += ball * slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | static if (j!i < Ret.S) | structure_[1][j!i] = ball; | } | static if (i) | ball *= _lengths[i]; | } | } | foreach (i; Iota!(Slices.length, N)) | structure_[0][i - F] = _lengths[i]; | foreach (i; Iota!(Slices.length, N)) | static if (Ret.S > i - F) | structure_[1][i - F] = _strides[i]; | | return Ret(structure_, _iterator + stride); | } | else | { | return this; | } | } | | static if (doUnittest) | /// | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto slice = slice!int(5, 3); | | /// Fully defined slice | assert(slice[] == slice); | auto sublice = slice[0..$-2, 1..$]; | | /// Partially defined slice | auto row = slice[3]; | auto col = slice[0..$, 1]; | } | | /++ | $(BOLD Indexed slice.) | +/ | auto opIndex(Slices...)(scope return Slices slices) scope return | if (isIndexedSlice!Slices) | { | import mir.ndslice.topology: indexed, cartesian; | static if (Slices.length == 1) | alias index = slices[0]; | else | auto index = slices.cartesian; | return this.indexed(index); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation: slice; | auto sli = slice!int(4, 3); | auto idx = slice!(size_t[2])(3); | idx[] = [ | cast(size_t[2])[0, 2], | cast(size_t[2])[3, 1], | cast(size_t[2])[2, 0]]; | | // equivalent to: | // import mir.ndslice.topology: indexed; | // sli.indexed(indx)[] = 1; | sli[idx][] = 1; | | assert(sli == [ | [0, 0, 1], | [0, 0, 0], | [1, 0, 0], | [0, 1, 0], | ]); | | foreach (row; sli[[1, 3].sliced]) | row[] += 2; | | assert(sli == [ | [0, 0, 1], | [2, 2, 2], // <-- += 2 | [1, 0, 0], | [2, 3, 2], // <-- += 2 | ]); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology: iota; | import mir.ndslice.allocation: slice; | auto sli = slice!int(5, 6); | | // equivalent to | // import mir.ndslice.topology: indexed, cartesian; | // auto a = [0, sli.length!0 / 2, sli.length!0 - 1].sliced; | // auto b = [0, sli.length!1 / 2, sli.length!1 - 1].sliced; | // auto c = cartesian(a, b); | // auto minor = sli.indexed(c); | auto minor = sli[[0, $ / 2, $ - 1].sliced, [0, $ / 2, $ - 1].sliced]; | | minor[] = iota!int([3, 3], 1); | | assert(sli == [ | // ↓ ↓ ↓︎ | [1, 0, 0, 2, 0, 3], // <--- | [0, 0, 0, 0, 0, 0], | [4, 0, 0, 5, 0, 6], // <--- | [0, 0, 0, 0, 0, 0], | [7, 0, 0, 8, 0, 9], // <--- | ]); | } | | /++ | Element-wise binary operator overloading. | Returns: | lazy slice of the same kind and the same structure | Note: | Does not allocate neither new slice nor a closure. | +/ | auto opUnary(string op)() scope return | if (op == "*" || op == "~" || op == "-" || op == "+") | { | import mir.ndslice.topology: map; | static if (op == "+") | return this; | else | return this.map!(op ~ "a"); | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | import mir.ndslice.topology; | | auto payload = [1, 2, 3, 4]; | auto s = iota([payload.length], payload.ptr); // slice of references; | assert(s[1] == payload.ptr + 1); | | auto c = *s; // the same as s.map!"*a" | assert(c[1] == *s[1]); | | *s[1] = 3; | assert(c[1] == *s[1]); | } | | /++ | Element-wise operator overloading for scalars. | Params: | value = a scalar | Returns: | lazy slice of the same kind and the same structure | Note: | Does not allocate neither new slice nor a closure. | +/ | auto opBinary(string op, T)(scope return T value) scope return | if(!isSlice!T) | { | import mir.ndslice.topology: vmap; | return this.vmap(LeftOp!(op, ImplicitlyUnqual!T)(value)); | } | | /// ditto | auto opBinaryRight(string op, T)(scope return T value) scope return | if(!isSlice!T) | { | import mir.ndslice.topology: vmap; | return this.vmap(RightOp!(op, ImplicitlyUnqual!T)(value)); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology; | | // 0 1 2 3 | auto s = iota([4]); | // 0 1 2 0 | assert(s % 3 == iota([4]).map!"a % 3"); | // 0 2 4 6 | assert(2 * s == iota([4], 0, 2)); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology; | | // 0 1 2 3 | auto s = iota([4]); | // 0 1 4 9 | assert(s ^^ 2.0 == iota([4]).map!"a ^^ 2.0"); | } | | /++ | Element-wise operator overloading for slices. | Params: | rhs = a slice of the same shape. | Returns: | lazy slice the same shape that has $(LREF Contiguous) kind | Note: | Binary operator overloading is allowed if both slices are contiguous or one-dimensional. | $(BR) | Does not allocate neither new slice nor a closure. | +/ | auto opBinary(string op, RIterator, size_t RN, SliceKind rkind) | (scope return Slice!(RIterator, RN, rkind) rhs) scope return | if(N == RN && (kind == Contiguous && rkind == Contiguous || N == 1) && op != "~") | { | import mir.ndslice.topology: zip, map; | return zip(this, rhs).map!("a " ~ op ~ " b"); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota, map, zip; | | auto s = iota([2, 3]); | auto c = iota([2, 3], 5, 8); | assert(s * s + c == s.map!"a * a".zip(c).map!"a + b"); | } | | /++ | Duplicates slice. | Returns: GC-allocated Contiguous mutable slice. | See_also: $(LREF Slice.idup) | +/ | Slice!(Unqual!DeepElement*, N) | dup()() scope @property | { | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return this.flattened.array.dup.sliced(this.shape); | } | else | { | import mir.ndslice.allocation: uninitSlice; | import mir.conv: emplaceRef; | alias E = this.DeepElement; | | auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!(Unqual!E))(result, this); | | return result; | } | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | dup()() scope const @property | { | this.lightScope.dup; | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | dup()() scope immutable @property | { | this.lightScope.dup; | } | | static if (doUnittest) | /// | @safe pure version(mir_test) unittest | { | import mir.ndslice; | auto x = 3.iota!int; | Slice!(immutable(int)*) imm = x.idup; | Slice!(int*) mut = imm.dup; | assert(imm == x); | assert(mut == x); | } | | /++ | Duplicates slice. | Returns: GC-allocated Contiguous immutable slice. | See_also: $(LREF Slice.dup) | +/ | Slice!(immutable(DeepElement)*, N) | idup()() scope @property | { | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return this.flattened.array.idup.sliced(this.shape); | } | else | { | import mir.ndslice.allocation: uninitSlice; | import mir.conv: emplaceRef; | alias E = this.DeepElement; | | auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!(immutable E))(result, this); | alias R = typeof(return); | return (() @trusted => cast(R) result)(); | } | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | idup()() scope const @property | { | this.lightScope.idup; | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | idup()() scope immutable @property | { | this.lightScope.idup; | } | | static if (doUnittest) | /// | @safe pure version(mir_test) unittest | { | import mir.ndslice; | auto x = 3.iota!int; | Slice!(int*) mut = x.dup; | Slice!(immutable(int)*) imm = mut.idup; | assert(imm == x); | assert(mut == x); | } | | static if (isMutable!DeepElement) | { | private void opIndexOpAssignImplSlice(string op, RIterator, size_t RN, SliceKind rkind) | (scope Slice!(RIterator, RN, rkind) value) scope | { | static if (N > 1 && RN == N && kind == Contiguous && rkind == Contiguous) | { | import mir.ndslice.topology : flattened; | this.flattened.opIndexOpAssignImplSlice!op(value.flattened); | } | else | { | auto ls = this; | do | { | static if (N > RN) | { | ls.front.opIndexOpAssignImplSlice!op(value); | } | else | { | static if (ls.N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | static if (isSlice!(typeof(value.front))) | ls.front.opIndexOpAssignImplSlice!op(value.front); | else | static if (isDynamicArray!(typeof(value.front))) | ls.front.opIndexOpAssignImplSlice!op(value.front); | else | ls.front.opIndexOpAssignImplValue!op(value.front); | } | else | static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value.front))) | { | import mir.math.common: pow; | ls.front = pow(ls.front, value.front); | } | else | mixin("ls.front " ~ op ~ "= value.front;"); | } | else | static if (RN == 1) | ls.front.opIndexOpAssignImplValue!op(value.front); | else | ls.front.opIndexOpAssignImplSlice!op(value.front); | value.popFront; | } | ls.popFront; | } | while (ls._lengths[0]); | } | } | | /++ | Assignment of a value of `Slice` type to a $(B fully defined slice). | +/ | void opIndexAssign(RIterator, size_t RN, SliceKind rkind, Slices...) | (scope Slice!(RIterator, RN, rkind) value, Slices slices) scope return | if (isFullPureSlice!Slices || isIndexedSlice!Slices) | { | auto sl = this.lightScope.opIndex(slices); | assert(_checkAssignLengths(sl, value)); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplSlice!""(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] = b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | // fills both rows with b[0] | a[0..$, 0..$-1] = b[0]; | assert(a == [[1, 2, 0], [1, 2, 0]]); | | a[1, 0..$-1] = b[1]; | assert(a[1] == [3, 4, 0]); | | a[1, 0..$-1][] = b[0]; | assert(a[1] == [1, 2, 0]); | } | | static if (doUnittest) | /// Left slice is packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : blocks, iota; | import mir.ndslice.allocation : slice; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = iota!int(2, 2); | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | static if (doUnittest) | /// Both slices are packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : blocks, iota, pack; | import mir.ndslice.allocation : slice; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = iota!int(2, 2, 2).pack!1; | | assert(a == | [[0, 1, 2, 3], | [0, 1, 2, 3], | [4, 5, 6, 7], | [4, 5, 6, 7]]); | } | | void opIndexOpAssignImplArray(string op, T, Slices...)(T[] value) scope | { | auto ls = this; | assert(ls.length == value.length, __FUNCTION__ ~ ": argument must have the same length."); | static if (N == 1) | { | do | { | static if (ls.N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | static if (isSlice!(typeof(value[0]))) | ls.front.opIndexOpAssignImplSlice!op(value[0]); | else | static if (isDynamicArray!(typeof(value[0]))) | ls.front.opIndexOpAssignImplSlice!op(value[0]); | else | ls.front.opIndexOpAssignImplValue!op(value[0]); | } | else | static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value[0]))) | { | import mir.math.common: pow; | ls.front = pow(ls.front, value[0]); | } | else | mixin("ls.front " ~ op ~ "= value[0];"); | } | else | mixin("ls.front[] " ~ op ~ "= value[0];"); | value = value[1 .. $]; | ls.popFront; | } | while (ls.length); | } | else | static if (N == DynamicArrayDimensionsCount!(T[])) | { | do | { | ls.front.opIndexOpAssignImplArray!op(value[0]); | value = value[1 .. $]; | ls.popFront; | } | while (ls.length); | } | else | { | do | { | ls.front.opIndexOpAssignImplArray!op(value); | ls.popFront; | } | while (ls.length); | } | } | | /++ | Assignment of a regular multidimensional array to a $(B fully defined slice). | +/ | void opIndexAssign(T, Slices...)(T[] value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && !isDynamicArray!DeepElement | && DynamicArrayDimensionsCount!(T[]) <= typeof(this.opIndex(slices)).N) | { | auto sl = this.lightScope.opIndex(slices); | sl.opIndexOpAssignImplArray!""(value); | } | | static if (doUnittest) | /// | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [[1, 2], [3, 4]]; | | a[] = [[1, 2, 3], [4, 5, 6]]; | assert(a == [[1, 2, 3], [4, 5, 6]]); | | a[0..$, 0..$-1] = [[1, 2], [3, 4]]; | assert(a == [[1, 2, 3], [3, 4, 6]]); | | a[0..$, 0..$-1] = [1, 2]; | assert(a == [[1, 2, 3], [1, 2, 6]]); | | a[1, 0..$-1] = [3, 4]; | assert(a[1] == [3, 4, 6]); | | a[1, 0..$-1][] = [3, 4]; | assert(a[1] == [3, 4, 6]); | } | | static if (doUnittest) | /// Packed slices | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = [[0, 1], [2, 3]]; | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | | private void opIndexOpAssignImplConcatenation(string op, T)(T value) scope | { | auto sl = this; | static if (concatenationDimension!T) | { | if (!sl.empty) do | { | static if (op == "") | sl.front.opIndexAssign(value.front); | else | sl.front.opIndexOpAssign!op(value.front); | value.popFront; | sl.popFront; | } | while(!sl.empty); | } | else | { | foreach (ref slice; value._slices) | { | static if (op == "") | sl[0 .. slice.length].opIndexAssign(slice); | else | sl[0 .. slice.length].opIndexOpAssign!op(slice); | | sl = sl[slice.length .. $]; | } | assert(sl.empty); | } | } | | /// | void opIndexAssign(T, Slices...)(T concatenation, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | static assert(typeof(sl).N == T.N, "incompatible dimension count"); | sl.opIndexOpAssignImplConcatenation!""(concatenation); | } | | /++ | Assignment of a value (e.g. a number) to a $(B fully defined slice). | +/ | void opIndexAssign(T, Slices...)(T value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && (!isDynamicArray!T || isDynamicArray!DeepElement) | && !isSlice!T | && !isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplValue!""(value); | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[] = 9; | assert(a == [[9, 9, 9], [9, 9, 9]]); | | a[0..$, 0..$-1] = 1; | assert(a == [[1, 1, 9], [1, 1, 9]]); | | a[0..$, 0..$-1] = 2; | assert(a == [[2, 2, 9], [2, 2, 9]]); | | a[1, 0..$-1] = 3; | //assert(a[1] == [3, 3, 9]); | | a[1, 0..$-1] = 4; | //assert(a[1] == [4, 4, 9]); | | a[1, 0..$-1][] = 5; | | assert(a[1] == [5, 5, 9]); | } | | static if (doUnittest) | /// Packed slices have the same behavior. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | import mir.ndslice.topology : pack; | auto a = slice!int(2, 3).pack!1; | | a[] = 9; | //assert(a == [[9, 9, 9], [9, 9, 9]]); | } | | /++ | Assignment of a value (e.g. a number) to a $(B fully defined index). | +/ | auto ref opIndexAssign(T)(T value, size_t[N] _indexes...) scope return @trusted | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | return _iterator[indexStride(_indexes)] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[1, 2] = 3; | assert(a[1, 2] == 3); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | a[[1, 2]] = 3; | assert(a[[1, 2]] == 3); | } | | /++ | Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined index). | +/ | auto ref opIndexOpAssign(string op, T)(T value, size_t[N] _indexes...) scope return @trusted | { | // check op safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return mixin(`t` ~ op ~ `= v`); | } | auto str = indexStride(_indexes); | static if (op == "^^" && isFloatingPoint!DeepElement && isFloatingPoint!(typeof(value))) | { | import mir.math.common: pow; | _iterator[str] = pow(_iterator[str], value); | } | else | return mixin (`_iterator[str] ` ~ op ~ `= value`); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[1, 2] += 3; | assert(a[1, 2] == 3); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | a[[1, 2]] += 3; | assert(a[[1, 2]] == 3); | } | | /++ | Op Assignment `op=` of a value of `Slice` type to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, RIterator, SliceKind rkind, size_t RN, Slices...) | (Slice!(RIterator, RN, rkind) value, Slices slices) scope return | if (isFullPureSlice!Slices || isIndexedSlice!Slices) | { | auto sl = this.lightScope.opIndex(slices); | assert(_checkAssignLengths(sl, value)); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplSlice!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] += b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += b[0]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += b[1]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += b[0]; | assert(a[1] == [8, 12, 0]); | } | | static if (doUnittest) | /// Left slice is packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks, iota; | auto a = slice!size_t(4, 4); | a.blocks(2, 2)[] += iota(2, 2); | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | static if (doUnittest) | /// Both slices are packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks, iota, pack; | auto a = slice!size_t(4, 4); | a.blocks(2, 2)[] += iota(2, 2, 2).pack!1; | | assert(a == | [[0, 1, 2, 3], | [0, 1, 2, 3], | [4, 5, 6, 7], | [4, 5, 6, 7]]); | } | | /++ | Op Assignment `op=` of a regular multidimensional array to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, T, Slices...)(T[] value, Slices slices) scope return | if (isFullPureSlice!Slices | && !isDynamicArray!DeepElement | && DynamicArrayDimensionsCount!(T[]) <= typeof(this.opIndex(slices)).N) | { | auto sl = this.lightScope.opIndex(slices); | sl.opIndexOpAssignImplArray!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | auto a = slice!int(2, 3); | | a[0..$, 0..$-1] += [[1, 2], [3, 4]]; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += [1, 2]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += [3, 4]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += [1, 2]; | assert(a[1] == [8, 12, 0]); | } | | static if (doUnittest) | /// Packed slices | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks; | auto a = slice!int(4, 4); | a.blocks(2, 2)[].opIndexOpAssign!"+"([[0, 1], [2, 3]]); | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | private void opIndexOpAssignImplValue(string op, T)(T value) scope return | { | static if (N > 1 && kind == Contiguous) | { | import mir.ndslice.topology : flattened; | this.flattened.opIndexOpAssignImplValue!op(value); | } | else | { | auto ls = this; | do | { | static if (N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | ls.front.opIndexOpAssignImplValue!op(value); | else | mixin (`ls.front ` ~ op ~ `= value;`); | } | else | ls.front.opIndexOpAssignImplValue!op(value); | ls.popFront; | } | while(ls._lengths[0]); | } | } | | /++ | Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, T, Slices...)(T value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && (!isDynamicArray!T || isDynamicArray!DeepElement) | && !isSlice!T | && !isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplValue!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[] += 1; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | a[0..$, 0..$-1] += 2; | assert(a == [[3, 3, 1], [3, 3, 1]]); | | a[1, 0..$-1] += 3; | assert(a[1] == [6, 6, 1]); | } | | /// | void opIndexOpAssign(string op,T, Slices...)(T concatenation, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | static assert(typeof(sl).N == concatenation.N); | sl.opIndexOpAssignImplConcatenation!op(concatenation); | } | | static if (doUnittest) | /// Packed slices have the same behavior. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | import mir.ndslice.topology : pack; | auto a = slice!int(2, 3).pack!1; | | a[] += 9; | assert(a == [[9, 9, 9], [9, 9, 9]]); | } | | | /++ | Increment `++` and Decrement `--` operators for a $(B fully defined index). | +/ | auto ref opIndexUnary(string op)(size_t[N] _indexes...) scope return | @trusted | // @@@workaround@@@ for Issue 16473 | //if (op == `++` || op == `--`) | { | // check op safety | static auto ref fun(DeepElement t) @safe | { | return mixin(op ~ `t`); | } | return mixin (op ~ `_iterator[indexStride(_indexes)]`); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | ++a[1, 2]; | assert(a[1, 2] == 1); | } | | // Issue 16473 | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto sl = slice!double(2, 5); | auto d = -sl[0, 1]; | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | ++a[[1, 2]]; | assert(a[[1, 2]] == 1); | } | | private void opIndexUnaryImpl(string op, Slices...)(Slices slices) scope | { | auto ls = this; | do | { | static if (N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | ls.front.opIndexUnaryImpl!op; | else | mixin (op ~ `ls.front;`); | } | else | ls.front.opIndexUnaryImpl!op; | ls.popFront; | } | while(ls._lengths[0]); | } | | /++ | Increment `++` and Decrement `--` operators for a $(B fully defined slice). | +/ | void opIndexUnary(string op, Slices...)(Slices slices) scope return | if (isFullPureSlice!Slices && (op == `++` || op == `--`)) | { | auto sl = this.lightScope.opIndex(slices); | if (!sl.anyRUEmpty) | sl.opIndexUnaryImpl!op; | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | ++a[]; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | --a[1, 0..$-1]; | | assert(a[1] == [0, 0, 1]); | } | } |} | |/// ditto |alias Slice = mir_slice; | |/++ |Slicing, indexing, and arithmetic operations. |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.dynamic : transposed; | import mir.ndslice.topology : iota, universal; | auto tensor = iota(3, 4, 5).slice; | | assert(tensor[1, 2] == tensor[1][2]); | assert(tensor[1, 2, 3] == tensor[1][2][3]); | | assert( tensor[0..$, 0..$, 4] == tensor.universal.transposed!2[4]); | assert(&tensor[0..$, 0..$, 4][1, 2] is &tensor[1, 2, 4]); | | tensor[1, 2, 3]++; //`opIndex` returns value by reference. | --tensor[1, 2, 3]; //`opUnary` | | ++tensor[]; | tensor[] -= 1; | | // `opIndexAssing` accepts only fully defined indexes and slices. | // Use an additional empty slice `[]`. | static assert(!__traits(compiles, tensor[0 .. 2] *= 2)); | | tensor[0 .. 2][] *= 2; //OK, empty slice | tensor[0 .. 2, 3, 0..$] /= 2; //OK, 3 index or slice positions are defined. | | //fully defined index may be replaced by a static array | size_t[3] index = [1, 2, 3]; | assert(tensor[index] == tensor[1, 2, 3]); |} | |/++ |Operations with rvalue slices. |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed, everted; | | auto tensor = slice!int(3, 4, 5).universal; | auto matrix = slice!int(3, 4).universal; | auto vector = slice!int(3); | | foreach (i; 0..3) | vector[i] = i; | | // fills matrix columns | matrix.transposed[] = vector; | | // fills tensor with vector | // transposed tensor shape is (4, 5, 3) | // vector shape is ( 3) | tensor.transposed!(1, 2)[] = vector; | | // transposed tensor shape is (5, 3, 4) | // matrix shape is ( 3, 4) | tensor.transposed!2[] += matrix; | | // transposed tensor shape is (5, 4, 3) | // transposed matrix shape is ( 4, 3) | tensor.everted[] ^= matrix.transposed; // XOR |} | |/++ |Creating a slice from text. |See also $(MREF std, format). |+/ |version(mir_test) unittest |{ | import mir.ndslice.allocation; | import std.algorithm, std.conv, std.exception, std.format, | std.functional, std.string, std.range; | | Slice!(int*, 2) toMatrix(string str) | { | string[][] data = str.lineSplitter.filter!(not!empty).map!split.array; | | size_t rows = data .length.enforce("empty input"); | size_t columns = data[0].length.enforce("empty first row"); | | data.each!(a => enforce(a.length == columns, "rows have different lengths")); | auto slice = slice!int(rows, columns); | foreach (i, line; data) | foreach (j, num; line) | slice[i, j] = num.to!int; | return slice; | } | | auto input = "\r1 2 3\r\n 4 5 6\n"; | | auto matrix = toMatrix(input); | assert(matrix == [[1, 2, 3], [4, 5, 6]]); | | // back to text | auto text2 = format("%(%(%s %)\n%)\n", matrix); | assert(text2 == "1 2 3\n4 5 6\n"); |} | |// Slicing |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto a = iota(10, 20, 30, 40); | auto b = a[0..$, 10, 4 .. 27, 4]; | auto c = b[2 .. 9, 5 .. 10]; | auto d = b[3..$, $-2]; | assert(b[4, 17] == a[4, 10, 21, 4]); | assert(c[1, 2] == a[3, 10, 11, 4]); | assert(d[3] == a[6, 10, 25, 4]); |} | |// Operator overloading. # 1 |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto fun(ref sizediff_t x) { x *= 3; } | | auto tensor = iota(8, 9, 10).slice; | | ++tensor[]; | fun(tensor[0, 0, 0]); | | assert(tensor[0, 0, 0] == 3); | | tensor[0, 0, 0] *= 4; | tensor[0, 0, 0]--; | assert(tensor[0, 0, 0] == 11); |} | |// Operator overloading. # 2 |pure nothrow version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import mir.array.allocation : array; | //import std.bigint; | import std.range : iota; | | auto matrix = 72 | .iota | //.map!(i => BigInt(i)) | .array | .sliced(8, 9); | | matrix[3 .. 6, 2] += 100; | foreach (i; 0 .. 8) | foreach (j; 0 .. 9) | if (i >= 3 && i < 6 && j == 2) | assert(matrix[i, j] >= 100); | else | assert(matrix[i, j] < 100); |} | |// Operator overloading. # 3 |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto matrix = iota(8, 9).slice; | matrix[] = matrix; | matrix[] += matrix; | assert(matrix[2, 3] == (2 * 9 + 3) * 2); | | auto vec = iota([9], 100); | matrix[] = vec; | foreach (v; matrix) | assert(v == vec); | | matrix[] += vec; | foreach (vector; matrix) | foreach (elem; vector) | assert(elem >= 200); |} | |// Type deduction |version(mir_test) unittest |{ | // Arrays | foreach (T; AliasSeq!(int, const int, immutable int)) | static assert(is(typeof((T[]).init.sliced(3, 4)) == Slice!(T*, 2))); | | // Container Array | import std.container.array; | Array!int ar; | ar.length = 12; | auto arSl = ar[].slicedField(3, 4); |} | |// Test for map #1 |version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import std.range.primitives; | auto slice = [1, 2, 3, 4].sliced(2, 2); | | auto r = slice.map!(a => a.map!(a => a * 6)); | assert(r.front.front == 6); | assert(r.front.back == 12); | assert(r.back.front == 18); | assert(r.back.back == 24); | assert(r[0][0] == 6); | assert(r[0][1] == 12); | assert(r[1][0] == 18); | assert(r[1][1] == 24); | static assert(hasSlicing!(typeof(r))); | static assert(isForwardRange!(typeof(r))); | static assert(isRandomAccessRange!(typeof(r))); |} | |// Test for map #2 |version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import std.range.primitives; | auto data = [1, 2, 3, 4] | //.map!(a => a * 2) | ; | static assert(hasSlicing!(typeof(data))); | static assert(isForwardRange!(typeof(data))); | static assert(isRandomAccessRange!(typeof(data))); | auto slice = data.sliced(2, 2); | static assert(hasSlicing!(typeof(slice))); | static assert(isForwardRange!(typeof(slice))); | static assert(isRandomAccessRange!(typeof(slice))); | auto r = slice.map!(a => a.map!(a => a * 6)); | static assert(hasSlicing!(typeof(r))); | static assert(isForwardRange!(typeof(r))); | static assert(isRandomAccessRange!(typeof(r))); | assert(r.front.front == 6); | assert(r.front.back == 12); | assert(r.back.front == 18); | assert(r.back.back == 24); | assert(r[0][0] == 6); | assert(r[0][1] == 12); | assert(r[1][0] == 18); | assert(r[1][1] == 24); |} | |private enum bool isType(alias T) = false; | |private enum bool isType(T) = true; | |private enum isStringValue(alias T) = is(typeof(T) : string); | | |private bool _checkAssignLengths( | LIterator, RIterator, | size_t LN, size_t RN, | SliceKind lkind, SliceKind rkind, | ) | (scope Slice!(LIterator, LN, lkind) ls, | scope Slice!(RIterator, RN, rkind) rs) |{ | static if (isInstanceOf!(SliceIterator, LIterator)) | { | import mir.ndslice.topology: unpack; | return _checkAssignLengths(ls.unpack, rs); | } | else | static if (isInstanceOf!(SliceIterator, RIterator)) | { | import mir.ndslice.topology: unpack; | return _checkAssignLengths(ls, rs.unpack); | } | else | { | foreach (i; Iota!(0, RN)) | if (ls._lengths[i + LN - RN] != rs._lengths[i]) | return false; | return true; | } |} | |@safe pure nothrow @nogc version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | assert(_checkAssignLengths(iota(2, 2), iota(2, 2))); | assert(!_checkAssignLengths(iota(2, 2), iota(2, 3))); | assert(!_checkAssignLengths(iota(2, 2), iota(3, 2))); | assert(!_checkAssignLengths(iota(2, 2), iota(3, 3))); |} | |pure nothrow version(mir_test) unittest |{ | auto slice = new int[15].slicedField(5, 3); | | /// Fully defined slice | assert(slice[] == slice); | auto sublice = slice[0..$-2, 1..$]; | | /// Partially defined slice | auto row = slice[3]; | auto col = slice[0..$, 1]; |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] = b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] = b[0]; | assert(a == [[1, 2, 0], [1, 2, 0]]); | | a[1, 0..$-1] = b[1]; | assert(a[1] == [3, 4, 0]); | | a[1, 0..$-1][] = b[0]; | assert(a[1] == [1, 2, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [[1, 2], [3, 4]]; | | a[] = [[1, 2, 3], [4, 5, 6]]; | assert(a == [[1, 2, 3], [4, 5, 6]]); | | a[0..$, 0..$-1] = [[1, 2], [3, 4]]; | assert(a == [[1, 2, 3], [3, 4, 6]]); | | a[0..$, 0..$-1] = [1, 2]; | assert(a == [[1, 2, 3], [1, 2, 6]]); | | a[1, 0..$-1] = [3, 4]; | assert(a[1] == [3, 4, 6]); | | a[1, 0..$-1][] = [3, 4]; | assert(a[1] == [3, 4, 6]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[] = 9; | //assert(a == [[9, 9, 9], [9, 9, 9]]); | | a[0..$, 0..$-1] = 1; | //assert(a == [[1, 1, 9], [1, 1, 9]]); | | a[0..$, 0..$-1] = 2; | //assert(a == [[2, 2, 9], [2, 2, 9]]); | | a[1, 0..$-1] = 3; | //assert(a[1] == [3, 3, 9]); | | a[1, 0..$-1] = 4; | //assert(a[1] == [4, 4, 9]); | | a[1, 0..$-1][] = 5; | //assert(a[1] == [5, 5, 9]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[1, 2] = 3; | assert(a[1, 2] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[[1, 2]] = 3; | assert(a[[1, 2]] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[1, 2] += 3; | assert(a[1, 2] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[[1, 2]] += 3; | assert(a[[1, 2]] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] += b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += b[0]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += b[1]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += b[0]; | assert(a[1] == [8, 12, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[0..$, 0..$-1] += [[1, 2], [3, 4]]; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += [1, 2]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += [3, 4]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += [1, 2]; | assert(a[1] == [8, 12, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[] += 1; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | a[0..$, 0..$-1] += 2; | assert(a == [[3, 3, 1], [3, 3, 1]]); | | a[1, 0..$-1] += 3; | assert(a[1] == [6, 6, 1]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[1, 2]; | assert(a[1, 2] == 1); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[[1, 2]]; | assert(a[[1, 2]] == 1); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[]; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | --a[1, 0..$-1]; | assert(a[1] == [0, 0, 1]); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, universal; | | auto sl = iota(3, 4).universal; | assert(sl[0 .. $] == sl); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: canonical, iota; | static assert(kindOf!(typeof(iota([1, 2]).canonical[1])) == Contiguous); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | auto s = iota(2, 3); | assert(s.front!1 == [0, 3]); | assert(s.back!1 == [2, 5]); |} | |/++ |Assignment utility for generic code that works both with scalars and with ndslices. |Params: | op = assign operation (generic, optional) | lside = left side | rside = right side |Returns: | expression value |+/ |auto ndassign(string op = "", L, R)(ref L lside, auto ref R rside) @property | if (!isSlice!L && (op.length == 0 || op[$-1] != '=')) |{ | return mixin(`lside ` ~ op ~ `= rside`); |} | |/// ditto |auto ndassign(string op = "", L, R)(L lside, auto ref R rside) @property | if (isSlice!L && (op.length == 0 || op[$-1] != '=')) |{ | static if (op == "") | return lside.opIndexAssign(rside); | else | return lside.opIndexOpAssign!op(rside); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | import mir.ndslice.allocation: slice; | auto scalar = 3; | auto vector = 3.iota.slice; // [0, 1, 2] | | // scalar = 5; | scalar.ndassign = 5; | assert(scalar == 5); | | // vector[] = vector * 2; | vector.ndassign = vector * 2; | assert(vector == [0, 2, 4]); | | // vector[] += scalar; | vector.ndassign!"+"= scalar; | assert(vector == [5, 7, 9]); |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/ndslice/slice.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-ndslice-topology.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Selectors create new views and iteration patterns over the same data, without copying. | |$(BOOKTABLE $(H2 SliceKind Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 universal, Converts a slice to universal $(SUBREF slice, SliceKind).) |$(T2 canonical, Converts a slice to canonical $(SUBREF slice, SliceKind).) |$(T2 assumeCanonical, Converts a slice to canonical $(SUBREF slice, SliceKind) (unsafe).) |$(T2 assumeContiguous, Converts a slice to contiguous $(SUBREF slice, SliceKind) (unsafe).) | |) | |$(BOOKTABLE $(H2 Sequence Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 cycle, Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice) |$(T2 iota, Contiguous Slice with initial flattened (contiguous) index.) |$(T2 linspace, Evenly spaced numbers over a specified interval.) |$(T2 magic, Magic square.) |$(T2 ndiota, Contiguous Slice with initial multidimensional index.) |$(T2 repeat, Slice with identical values) |) | |. | |$(BOOKTABLE $(H2 Products), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 cartesian, Cartesian product.) |$(T2 kronecker, Kronecker product.) | |) | |$(BOOKTABLE $(H2 Representation Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 as, Convenience function that creates a lazy view, |where each element of the original slice is converted to a type `T`.) |$(T2 bitpack, Bitpack slice over an unsigned integral slice.) |$(T2 bitwise, Bitwise slice over an unsigned integral slice.) |$(T2 bytegroup, Groups existing slice into fixed length chunks and uses them as data store for destination type.) |$(T2 cached, Random access cache. It is usefull in combiation with $(LREF map) and $(LREF vmap).) |$(T2 cachedGC, Random access cache auto-allocated in GC heap. It is usefull in combiation with $(LREF map) and $(LREF vmap).) |$(T2 diff, Differences between vector elements.) |$(T2 flattened, Contiguous 1-dimensional slice of all elements of a slice.) |$(T2 map, Multidimensional functional map.) |$(T2 member, Field (element's member) projection.) |$(T2 orthogonalReduceField, Functional deep-element wise reduce of a slice composed of fields or iterators.) |$(T2 pairwise, Pairwise map for vectors.) |$(T2 pairwiseMapSubSlices, Maps pairwise indexes pairs to subslices.) |$(T2 retro, Reverses order of iteration for all dimensions.) |$(T2 slide, Sliding map for vectors.) |$(T2 stairs, Two functions to pack, unpack, and iterate triangular and symmetric matrix storage.) |$(T2 stride, Strides 1-dimensional slice.) |$(T2 subSlices, Maps indexes pairs to subslices.) |$(T2 triplets, Constructs a lazy view of triplets with `left`, `center`, and `right` members. The topology is usefull for Math and Physics.) |$(T2 unzip, Selects a slice from a zipped slice.) |$(T2 zip, Zips slices into a slice of refTuples.) |) | | |$(BOOKTABLE $(H2 Shape Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 blocks, n-dimensional slice composed of n-dimensional non-overlapping blocks. If the slice has two dimensions, it is a block matrix.) |$(T2 diagonal, 1-dimensional slice composed of diagonal elements) |$(T2 reshape, New slice with changed dimensions for the same data) |$(T2 windows, n-dimensional slice of n-dimensional overlapping windows. If the slice has two dimensions, it is a sliding window.) | |) | |$(BOOKTABLE $(H2 Subspace Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 pack , Returns slice of slices.) |$(T2 ipack , Returns slice of slices.) |$(T2 unpack , Merges two hight dimension packs. See also $(SUBREF fuse, fuse).) |$(T2 evertPack, Reverses dimension packs.) |$(T2 byDim , Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them.) | |) | |Subspace selectors serve to generalize and combine other selectors easily. |For a slice of `Slice!(Iterator, N, kind)` type `slice.pack!K` creates a slice of |slices of `Slice!(kind, [N - K, K], Iterator)` type by packing |the last `K` dimensions of the top dimension pack, |and the type of element of $(LREF flattened) is `Slice!(Iterator, K)`. |Another way to use $(LREF pack) is transposition of dimension packs using |$(LREF evertPack). |Examples of use of subspace selectors are available for selectors, |$(SUBREF slice, Slice.shape), and $(SUBREF slice, Slice.elementCount). | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Sponsors: Part of this work has been sponsored by $(LINK2 http://symmetryinvestments.com, Symmetry Investments) and Kaleidic Associates. | | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4)) |+/ |module mir.ndslice.topology; | |import std.meta; | |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.iterator; |import mir.ndslice.ndfield; |import mir.ndslice.slice; |import mir.primitives; |import mir.qualifier; |import mir.utility: min; | |private immutable choppedExceptionMsg = "bounds passed to chopped are out of sliceable bounds."; |version (D_Exceptions) private immutable choppedException = new Exception(choppedExceptionMsg); | |@optmath: | |/++ |Converts a slice to universal kind. | |Params: | slice = a slice |Returns: | universal slice |See_also: | $(LREF canonical), | $(LREF assumeCanonical), | $(LREF assumeContiguous). |+/ |auto universal(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | static if (kind == Universal) | { | return slice; | } | else | static if (is(Iterator : RetroIterator!It, It)) | { | return slice.retro.universal.retro; | } | else | { | alias Ret = Slice!(Iterator, N, Universal); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | static if (kind == Canonical) | { | foreach (i; Iota!(slice.S)) | strides[i] = slice._strides[i]; | strides[$-1] = 1; | } | else | { | ptrdiff_t ball = 1; | foreach_reverse (i; Iota!(Ret.S)) | { | strides[i] = ball; | static if (i) | ball *= slice._lengths[i]; | } | } | return Ret(lengths, strides, slice._iterator); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3, 1]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).canonical.universal; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3, 1]); |} | |/++ |Converts a slice to canonical kind. | |Params: | slice = contiguous or canonical slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF assumeCanonical), | $(LREF assumeContiguous). |+/ |Slice!(Iterator, N, N == 1 ? Contiguous : Canonical) | canonical | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (kind == Contiguous || kind == Canonical) |{ | static if (kind == Canonical || N == 1) | return slice; | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | ptrdiff_t ball = 1; | foreach_reverse (i; Iota!(Ret.S)) | { | ball *= slice._lengths[i + 1]; | strides[i] = ball; | } | return Ret(lengths, strides, slice._iterator); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).canonical; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3]); |} | |/++ |Converts a slice to canonical kind (unsafe). | |Params: | slice = a slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF canonical), | $(LREF assumeContiguous). |+/ |Slice!(Iterator, N, Canonical) | assumeCanonical | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (kind == Contiguous) | return slice.canonical; | else | static if (kind == Canonical) | return slice; | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | foreach (i; Iota!(Ret.S)) | strides[i] = slice._strides[i]; | return Ret(lengths, strides, slice._iterator); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal.assumeCanonical; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3]); |} | | |/++ |Converts a slice to contiguous kind (unsafe). | |Params: | slice = a slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF canonical), | $(LREF assumeCanonical). |+/ |Slice!(Iterator, N) | assumeContiguous | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (kind == Contiguous) | return slice; | else | { | return typeof(return)(slice._lengths, slice._iterator); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal.assumeContiguous; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | static assert(slice._strides.length == 0); |} | |/++ |+/ |auto assumeFieldsHaveZeroShift(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (__traits(hasMember, Iterator, "assumeFieldsHaveZeroShift")) |{ | return slice._iterator.assumeFieldsHaveZeroShift.slicedField(slice._lengths); |} | |/++ |Creates a packed slice, i.e. slice of slices. |Packs the last `P` dimensions. |The function does not allocate any data. | |Params: | P = size of dimension pack | slice = a slice to pack |Returns: | `slice.pack!p` returns `Slice!(kind, [N - p, p], Iterator)` |See_also: $(LREF ipack) |+/ |Slice!(SliceIterator!(Iterator, P, P == 1 && kind == Canonical ? Contiguous : kind), N - P, Universal) |pack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | if (P && P < N) |{ | return slice.ipack!(N - P); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice : sliced, Slice; | | auto a = iota(3, 4, 5, 6); | auto b = a.pack!2; | | static immutable res1 = [3, 4]; | static immutable res2 = [5, 6]; | assert(b.shape == res1); | assert(b[0, 0].shape == res2); | assert(a == b.unpack); | assert(a.pack!2 == b); | static assert(is(typeof(b) == typeof(a.pack!2))); |} | |/++ |Creates a packed slice, i.e. slice of slices. |Packs the last `N - P` dimensions. |The function does not allocate any data. | |Params: | + = size of dimension pack | slice = a slice to pack |See_also: $(LREF pack) |+/ |Slice!(SliceIterator!(Iterator, N - P, N - P == 1 && kind == Canonical ? Contiguous : kind), P, Universal) |ipack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | if (P && P < N) |{ | alias Ret = typeof(return); | alias It = Ret.Iterator; | alias EN = It.Element.N; | alias ES = It.Element.S; | auto sl = slice.universal; | static if (It.Element.kind == Contiguous) | return Ret( | cast( size_t[P]) sl._lengths[0 .. P], | cast(ptrdiff_t[P]) sl._strides[0 .. P], | It( | cast(size_t[EN]) sl._lengths[P .. $], | sl._iterator)); | else | return Ret( | cast( size_t[P]) sl._lengths[0 .. P], | cast(ptrdiff_t[P]) sl._strides[0 .. P], | It( | cast( size_t[EN]) sl._lengths[P .. $], | cast(ptrdiff_t[ES]) sl._strides[P .. $ - (It.Element.kind == Canonical)], | sl._iterator)); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice : sliced, Slice; | | auto a = iota(3, 4, 5, 6); | auto b = a.ipack!2; | | static immutable res1 = [3, 4]; | static immutable res2 = [5, 6]; | assert(b.shape == res1); | assert(b[0, 0].shape == res2); | assert(a.ipack!2 == b); | static assert(is(typeof(b) == typeof(a.ipack!2))); |} | |/++ |Unpacks a packed slice. | |The functions does not allocate any data. | |Params: | slice = packed slice |Returns: | unpacked slice, that is a view on the same data. | |See_also: $(LREF pack), $(LREF evertPack) |+/ |Slice!(Iterator, N + M, min(innerKind, Canonical)) | unpack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind) | (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice) |{ | alias Ret = typeof(return); | size_t[N + M] lengths; | auto strides = sizediff_t[Ret.S].init; | auto outerStrides = slice.strides; | auto innerStrides = Slice!(Iterator, M, innerKind)( | slice._iterator._structure, | slice._iterator._iterator, | ).strides; | foreach(i; Iota!N) | lengths[i] = slice._lengths[i]; | foreach(i; Iota!N) | strides[i] = outerStrides[i]; | foreach(i; Iota!M) | lengths[N + i] = slice._iterator._structure[0][i]; | foreach(i; Iota!(Ret.S - N)) | strides[N + i] = innerStrides[i]; | return Ret(lengths, strides, slice._iterator._iterator); |} | |/++ |Reverses the order of dimension packs. |This function is used in a functional pipeline with other selectors. | |Params: | slice = packed slice |Returns: | packed slice | |See_also: $(LREF pack), $(LREF unpack) |+/ |Slice!(SliceIterator!(Iterator, N, outerKind), M, innerKind) |evertPack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind) | (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice) |{ | return typeof(return)( | slice._iterator._structure, | typeof(return).Iterator( | slice._structure, | slice._iterator._iterator)); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : transposed; | auto slice = iota(3, 4, 5, 6, 7, 8, 9, 10, 11).universal; | assert(slice | .pack!2 | .evertPack | .unpack | == slice.transposed!( | slice.shape.length-2, | slice.shape.length-1)); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.allocation: slice; | static assert(is(typeof( | slice!int(6) | .sliced(1,2,3) | .pack!1 | .evertPack() | ) | == Slice!(SliceIterator!(int*, 2, Universal), 1))); |} | | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto a = iota(3, 4, 5, 6, 7, 8, 9, 10, 11); | auto b = a.pack!2.unpack; | static assert(is(typeof(a.canonical) == typeof(b))); | assert(a == b); |} | |/++ |Returns a slice, the elements of which are equal to the initial flattened index value. | |Params: | N = dimension count | lengths = list of dimension lengths | start = value of the first element in a slice (optional for integer `I`) | stride = value of the stride between elements (optional) |Returns: | n-dimensional slice composed of indexes |See_also: $(LREF ndiota) |+/ |Slice!(IotaIterator!I, N) |iota | (I = sizediff_t, size_t N)(size_t[N] lengths...) | if (__traits(isIntegral, I)) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!I(I.init).sliced(lengths); |} | |///ditto |Slice!(IotaIterator!sizediff_t, N) |iota | (size_t N)(size_t[N] lengths, sizediff_t start) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!sizediff_t(start).sliced(lengths); |} | |///ditto |Slice!(StrideIterator!(IotaIterator!sizediff_t), N) |iota | (size_t N)(size_t[N] lengths, sizediff_t start, size_t stride) |{ | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!sizediff_t)(stride, IotaIterator!sizediff_t(start)).sliced(lengths); |} | |///ditto |template iota(I) | if (__traits(isIntegral, I)) |{ | /// | Slice!(IotaIterator!I, N) | iota | (size_t N)(size_t[N] lengths, I start) | if (__traits(isIntegral, I)) | { | import mir.ndslice.slice : sliced; | return IotaIterator!I(start).sliced(lengths); | } | | ///ditto | Slice!(StrideIterator!(IotaIterator!I), N) | iota | (size_t N)(size_t[N] lengths, I start, size_t stride) | if (__traits(isIntegral, I)) | { | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths); | } |} | |///ditto |Slice!(IotaIterator!I, N) |iota | (I, size_t N)(size_t[N] lengths, I start) | if (is(I P : P*)) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!I(start).sliced(lengths); |} | |///ditto |Slice!(StrideIterator!(IotaIterator!I), N) |iota | (I, size_t N)(size_t[N] lengths, I start, size_t stride) | if (is(I P : P*)) |{ | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(2, 3); | static immutable array = | [[0, 1, 2], | [3, 4, 5]]; | | assert(slice == array); | | static assert(is(DeepElementType!(typeof(slice)) == sizediff_t)); |} | |/// |pure nothrow @nogc |version(mir_test) unittest |{ | int[6] data; | auto slice = iota([2, 3], data.ptr); | assert(slice[0, 0] == data.ptr); | assert(slice[0, 1] == data.ptr + 1); | assert(slice[1, 0] == data.ptr + 3); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto im = iota([10, 5], 100); | assert(im[2, 1] == 111); // 100 + 2 * 5 + 1 | | //slicing works correctly | auto cm = im[1 .. $, 3 .. $]; | assert(cm[2, 1] == 119); // 119 = 100 + (1 + 2) * 5 + (3 + 1) |} | |/// `iota` with step |@safe pure nothrow version(mir_test) unittest |{ | auto sl = iota([2, 3], 10, 10); | | assert(sl == [[10, 20, 30], | [40, 50, 60]]); |} | |/++ |Returns a 1-dimensional slice over the main diagonal of an n-dimensional slice. |`diagonal` can be generalized with other selectors such as |$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice). | |Params: | slice = input slice |Returns: | 1-dimensional slice composed of diagonal elements |See_also: $(LREF antidiagonal) |+/ |Slice!(Iterator, 1, N == 1 ? kind : Universal) | diagonal | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (N == 1) | { | return slice; | } | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | lengths[0] = slice._lengths[0]; | foreach (i; Iota!(1, N)) | if (lengths[0] > slice._lengths[i]) | lengths[0] = slice._lengths[i]; | foreach (i; Iota!(1, Ret.N)) | lengths[i] = slice._lengths[i + N - 1]; | auto rstrides = slice.strides; | strides[0] = rstrides[0]; | foreach (i; Iota!(1, N)) | strides[0] += rstrides[i]; | foreach (i; Iota!(1, Ret.S)) | strides[i] = rstrides[i + N - 1]; | return Ret(lengths, strides, slice._iterator); | } |} | |/// Matrix, main diagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 0 4 | | static immutable d = [0, 4]; | assert(iota(2, 3).diagonal == d); |} | |/// Non-square matrix |@safe pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 | | // | 2 3 | | // | 4 5 | | // ------- | //-> | // | 0 3 | | | assert(iota(3, 2).diagonal == iota([2], 0, 3)); |} | |/// Loop through diagonal |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | | auto slice = slice!int(3, 3); | int i; | foreach (ref e; slice.diagonal) | e = ++i; | assert(slice == [ | [1, 0, 0], | [0, 2, 0], | [0, 0, 3]]); |} | |/// Matrix, subdiagonal |@safe @nogc pure nothrow |version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 1 5 | | static immutable d = [1, 5]; | auto a = iota(2, 3).canonical; | a.popFront!1; | assert(a.diagonal == d); |} | |/// 3D, main diagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // - - - - - - | // | 6 7 8 | | // | 9 10 11 | | // ----------- | //-> | // | 0 10 | | static immutable d = [0, 10]; | assert(iota(2, 2, 3).diagonal == d); |} | |/// 3D, subdiagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // - - - - - - | // | 6 7 8 | | // | 9 10 11 | | // ----------- | //-> | // | 1 11 | | static immutable d = [1, 11]; | auto a = iota(2, 2, 3).canonical; | a.popFront!2; | assert(a.diagonal == d); |} | |/// 3D, diagonal plain |@nogc @safe pure nothrow |version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // | 6 7 8 | | // - - - - - - | // | 9 10 11 | | // | 12 13 14 | | // | 15 16 17 | | // - - - - - - | // | 18 20 21 | | // | 22 23 24 | | // | 24 25 26 | | // ----------- | //-> | // ----------- | // | 0 4 8 | | // | 9 13 17 | | // | 18 23 26 | | // ----------- | | static immutable d = | [[ 0, 4, 8], | [ 9, 13, 17], | [18, 22, 26]]; | | auto slice = iota(3, 3, 3) | .pack!2 | .evertPack | .diagonal | .evertPack; | | assert(slice == d); |} | |/++ |Returns a 1-dimensional slice over the main antidiagonal of an 2D-dimensional slice. |`antidiagonal` can be generalized with other selectors such as |$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice). | |It runs from the top right corner to the bottom left corner. | |Pseudo_code: |------ |auto antidiagonal = slice.dropToHypercube.reversed!1.diagonal; |------ | |Params: | slice = input slice |Returns: | 1-dimensional slice composed of antidiagonal elements. |See_also: $(LREF diagonal) |+/ |Slice!(Iterator, 1, Universal) | antidiagonal | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N == 2) |{ | import mir.ndslice.dynamic : dropToHypercube, reversed; | return slice.dropToHypercube.reversed!1.diagonal; |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----- | // | 0 1 | | // | 2 3 | | // ----- | //-> | // | 1 2 | | static immutable c = [1, 2]; | import std.stdio; | assert(iota(2, 2).antidiagonal == c); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 1 3 | | static immutable d = [1, 3]; | assert(iota(2, 3).antidiagonal == d); |} | |/++ |Returns an n-dimensional slice of n-dimensional non-overlapping blocks. |`blocks` can be generalized with other selectors. |For example, `blocks` in combination with $(LREF diagonal) can be used to get a slice of diagonal blocks. |For overlapped blocks, combine $(LREF windows) with $(SUBREF dynamic, strided). | |Params: | N = dimension count | slice = slice to be split into blocks | rlengths_ = dimensions of block, residual blocks are ignored |Returns: | packed `N`-dimensional slice composed of `N`-dimensional slices | |See_also: $(SUBREF chunks, ._chunks) |+/ |Slice!(SliceIterator!(Iterator, N, N == 1 ? Universal : min(kind, Canonical)), N, Universal) | blocks | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, size_t[N] rlengths_...) |in |{ | foreach (i, length; rlengths_) | assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive" | ~ tailErrorMessage!()); |} |body |{ | size_t[N] lengths; | size_t[N] rlengths = rlengths_; | sizediff_t[N] strides; | foreach (dimension; Iota!N) | lengths[dimension] = slice._lengths[dimension] / rlengths[dimension]; | auto rstrides = slice.strides; | foreach (i; Iota!N) | { | strides[i] = rstrides[i]; | if (lengths[i]) //do not remove `if (...)` | strides[i] *= rlengths[i]; | } | return typeof(return)( | lengths, | strides, | typeof(return).Iterator( | rlengths, | rstrides[0 .. typeof(return).DeepElement.S], | slice._iterator)); |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | auto slice = slice!int(5, 8); | auto blocks = slice.blocks(2, 3); | int i; | foreach (blocksRaw; blocks) | foreach (block; blocksRaw) | block[] = ++i; | | assert(blocks == | [[[[1, 1, 1], [1, 1, 1]], | [[2, 2, 2], [2, 2, 2]]], | [[[3, 3, 3], [3, 3, 3]], | [[4, 4, 4], [4, 4, 4]]]]); | | assert( slice == | [[1, 1, 1, 2, 2, 2, 0, 0], | [1, 1, 1, 2, 2, 2, 0, 0], | | [3, 3, 3, 4, 4, 4, 0, 0], | [3, 3, 3, 4, 4, 4, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Diagonal blocks |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | auto slice = slice!int(5, 8); | auto blocks = slice.blocks(2, 3); | auto diagonalBlocks = blocks.diagonal.unpack; | | diagonalBlocks[0][] = 1; | diagonalBlocks[1][] = 2; | | assert(diagonalBlocks == | [[[1, 1, 1], [1, 1, 1]], | [[2, 2, 2], [2, 2, 2]]]); | | assert(blocks == | [[[[1, 1, 1], [1, 1, 1]], | [[0, 0, 0], [0, 0, 0]]], | [[[0, 0, 0], [0, 0, 0]], | [[2, 2, 2], [2, 2, 2]]]]); | | assert(slice == | [[1, 1, 1, 0, 0, 0, 0, 0], | [1, 1, 1, 0, 0, 0, 0, 0], | | [0, 0, 0, 2, 2, 2, 0, 0], | [0, 0, 0, 2, 2, 2, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Matrix divided into vertical blocks |@safe pure version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 13); | auto blocks = slice | .pack!1 | .evertPack | .blocks(3) | .unpack; | | int i; | foreach (block; blocks) | block[] = ++i; | | assert(slice == | [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0]]); |} | |/++ |Returns an n-dimensional slice of n-dimensional overlapping windows. |`windows` can be generalized with other selectors. |For example, `windows` in combination with $(LREF diagonal) can be used to get a multi-diagonal slice. | |Params: | N = dimension count | slice = slice to be iterated | rlengths = dimensions of windows |Returns: | packed `N`-dimensional slice composed of `N`-dimensional slices |+/ |Slice!(SliceIterator!(Iterator, N, N == 1 ? kind : min(kind, Canonical)), N, Universal) | windows | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, size_t[N] rlengths...) |in |{ | foreach (i, length; rlengths) | assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive" | ~ tailErrorMessage!()); |} |body |{ | size_t[N] rls = rlengths; | size_t[N] lengths; | foreach (dimension; Iota!N) | lengths[dimension] = slice._lengths[dimension] >= rls[dimension] ? | slice._lengths[dimension] - rls[dimension] + 1 : 0; | auto rstrides = slice.strides; | static if (typeof(return).DeepElement.S) | return typeof(return)( | lengths, | rstrides, | typeof(return).Iterator( | rls, | rstrides[0 .. typeof(return).DeepElement.S], | slice._iterator)); | else | return typeof(return)( | lengths, | rstrides, | typeof(return).Iterator( | rls, | slice._iterator)); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice.windows(2, 3); | | int i; | foreach (windowsRaw; windows) | foreach (window; windowsRaw) | ++window[]; | | assert(slice == | [[1, 2, 3, 3, 3, 3, 2, 1], | | [2, 4, 6, 6, 6, 6, 4, 2], | [2, 4, 6, 6, 6, 6, 4, 2], | [2, 4, 6, 6, 6, 6, 4, 2], | | [1, 2, 3, 3, 3, 3, 2, 1]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice.windows(2, 3); | windows[1, 2][] = 1; | windows[1, 2][0, 1] += 1; | windows.unpack[1, 2, 0, 1] += 1; | | assert(slice == | [[0, 0, 0, 0, 0, 0, 0, 0], | | [0, 0, 1, 3, 1, 0, 0, 0], | [0, 0, 1, 1, 1, 0, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0], | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Multi-diagonal matrix |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(8, 8); | auto windows = slice.windows(3, 3); | | auto multidiagonal = windows | .diagonal | .unpack; | foreach (window; multidiagonal) | window[] += 1; | | assert(slice == | [[ 1, 1, 1, 0, 0, 0, 0, 0], | [ 1, 2, 2, 1, 0, 0, 0, 0], | [ 1, 2, 3, 2, 1, 0, 0, 0], | [0, 1, 2, 3, 2, 1, 0, 0], | [0, 0, 1, 2, 3, 2, 1, 0], | [0, 0, 0, 1, 2, 3, 2, 1], | [0, 0, 0, 0, 1, 2, 2, 1], | [0, 0, 0, 0, 0, 1, 1, 1]]); |} | |/// Sliding window over matrix columns |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice | .pack!1 | .evertPack | .windows(3) | .unpack; | | foreach (window; windows) | window[] += 1; | | assert(slice == | [[1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1]]); |} | |/// Overlapping blocks using windows |@safe pure nothrow version(mir_test) unittest |{ | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // | 20 21 22 23 24 | | // ---------------- | //-> | // --------------------- | // | 0 1 2 | 2 3 4 | | // | 5 6 7 | 7 8 9 | | // | 10 11 12 | 12 13 14 | | // | - - - - - - - - - - | | // | 10 11 13 | 12 13 14 | | // | 15 16 17 | 17 18 19 | | // | 20 21 22 | 22 23 24 | | // --------------------- | | import mir.ndslice.slice; | import mir.ndslice.dynamic : strided; | | auto overlappingBlocks = iota(5, 5) | .windows(3, 3) | .universal | .strided!(0, 1)(2, 2); | | assert(overlappingBlocks == | [[[[ 0, 1, 2], [ 5, 6, 7], [10, 11, 12]], | [[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14]]], | [[[10, 11, 12], [15, 16, 17], [20, 21, 22]], | [[12, 13, 14], [17, 18, 19], [22, 23, 24]]]]); |} | |version(mir_test) unittest |{ | auto w = iota(9, 9).windows(3, 3); | assert(w.front == w[0]); |} | |/++ |Error codes for $(LREF reshape). |+/ |enum ReshapeError |{ | /// No error | none, | /// Slice should be not empty | empty, | /// Total element count should be the same | total, | /// Structure is incompatible with new shape | incompatible, |} | |/++ |Returns a new slice for the same data with different dimensions. | |Params: | slice = slice to be reshaped | rlengths = list of new dimensions. One of the lengths can be set to `-1`. | In this case, the corresponding dimension is inferable. | err = $(LREF ReshapeError) code |Returns: | reshaped slice |+/ |Slice!(Iterator, M, kind) reshape | (Iterator, size_t N, SliceKind kind, size_t M) | (Slice!(Iterator, N, kind) slice, ptrdiff_t[M] rlengths, ref int err) |{ | static if (kind == Canonical) | { | auto r = slice.universal.reshape(rlengths, err); | assert(err || r._strides[$-1] == 1); | r._strides[$-1] = 1; | return r.assumeCanonical; | } | else | { | alias Ret = typeof(return); | auto structure = Ret._Structure.init; | alias lengths = structure[0]; | foreach (i; Iota!M) | lengths[i] = rlengths[i]; | | /// Code size optimization | immutable size_t eco = slice.elementCount; | size_t ecn = lengths[0 .. rlengths.length].iota.elementCount; | if (eco == 0) | { | err = ReshapeError.empty; | goto R; | } | foreach (i; Iota!M) | if (lengths[i] == -1) | { | ecn = -ecn; | lengths[i] = eco / ecn; | ecn *= lengths[i]; | break; | } | if (eco != ecn) | { | err = ReshapeError.total; | goto R; | } | static if (kind == Universal) | { | for (size_t oi, ni, oj, nj; oi < N && ni < M; oi = oj, ni = nj) | { | size_t op = slice._lengths[oj++]; | size_t np = lengths[nj++]; | | for (;;) | { | if (op < np) | op *= slice._lengths[oj++]; | if (op > np) | np *= lengths[nj++]; | if (op == np) | break; | } | while (oj < N && slice._lengths[oj] == 1) oj++; | while (nj < M && lengths[nj] == 1) nj++; | | for (size_t l = oi, r = oi + 1; r < oj; r++) | if (slice._lengths[r] != 1) | { | if (slice._strides[l] != slice._lengths[r] * slice._strides[r]) | { | err = ReshapeError.incompatible; | goto R; | } | l = r; | } | assert((oi == N) == (ni == M)); | | structure[1][nj - 1] = slice._strides[oj - 1]; | foreach_reverse (i; ni .. nj - 1) | structure[1][i] = lengths[i + 1] * structure[1][i + 1]; | } | } | foreach (i; Iota!(M, Ret.N)) | lengths[i] = slice._lengths[i + N - M]; | static if (M < Ret.S) | foreach (i; Iota!(M, Ret.S)) | structure[1][i] = slice._strides[i + N - M]; | err = 0; | return Ret(structure, slice._iterator); | R: | return Ret(structure, slice._iterator.init); | } |} | |/// |@safe nothrow pure |version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | int err; | auto slice = iota(3, 4) | .universal | .allReversed | .reshape([-1, 3], err); | assert(err == 0); | assert(slice == | [[11, 10, 9], | [ 8, 7, 6], | [ 5, 4, 3], | [ 2, 1, 0]]); |} | |/// Reshaping with memory allocation |@safe pure version(mir_test) unittest |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.allocation: slice; | import mir.ndslice.dynamic : reversed; | | auto reshape2(S, size_t M)(S sl, ptrdiff_t[M] lengths) | { | int err; | // Tries to reshape without allocation | auto ret = sl.reshape(lengths, err); | if (!err) | return ret; | if (err == ReshapeError.incompatible) | // allocates, flattens, reshapes with `sliced`, converts to universal kind | return sl.slice.flattened.sliced(cast(size_t[M])lengths).universal; | throw new Exception("total elements count is different or equals to zero"); | } | | auto sl = iota!int(3, 4) | .slice | .universal | .reversed!0; | | assert(reshape2(sl, [4, 3]) == | [[ 8, 9, 10], | [11, 4, 5], | [ 6, 7, 0], | [ 1, 2, 3]]); |} | |nothrow @safe pure version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | auto slice = iota(1, 1, 3, 2, 1, 2, 1).universal.allReversed; | int err; | assert(slice.reshape([1, -1, 1, 1, 3, 1], err) == | [[[[[[11], [10], [9]]]], | [[[[ 8], [ 7], [6]]]], | [[[[ 5], [ 4], [3]]]], | [[[[ 2], [ 1], [0]]]]]]); | assert(err == 0); |} | |// Issue 15919 |nothrow @nogc @safe pure |version(mir_test) unittest |{ | int err; | assert(iota(3, 4, 5, 6, 7).pack!2.reshape([4, 3, 5], err)[0, 0, 0].shape == cast(size_t[2])[6, 7]); | assert(err == 0); |} | |nothrow @nogc @safe pure version(mir_test) unittest |{ | import mir.ndslice.slice; | | int err; | auto e = iota(1); | // resize to the wrong dimension | auto s = e.reshape([2], err); | assert(err == ReshapeError.total); | e.popFront; | // test with an empty slice | e.reshape([1], err); | assert(err == ReshapeError.empty); |} | |nothrow @nogc @safe pure |version(mir_test) unittest |{ | auto pElements = iota(3, 4, 5, 6, 7) | .pack!2 | .flattened; | assert(pElements[0][0] == iota(7)); | assert(pElements[$-1][$-1] == iota([7], 2513)); |} | |/++ |A contiguous 1-dimensional slice of all elements of a slice. |`flattened` iterates existing data. |The order of elements is preserved. | |`flattened` can be generalized with other selectors. | |Params: | slice = slice to be iterated |Returns: | contiguous 1-dimensional slice of elements of the `slice` |+/ |Slice!(FlattenedIterator!(Iterator, N, kind)) | flattened | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N != 1 && kind != Contiguous) |{ | size_t[typeof(return).N] lengths; | sizediff_t[typeof(return)._iterator._indexes.length] indexes; | lengths[0] = slice.elementCount; | return typeof(return)(lengths, FlattenedIterator!(Iterator, N, kind)(indexes, slice)); |} | |/// ditto |Slice!Iterator | flattened | (Iterator, size_t N) | (Slice!(Iterator, N) slice) |{ | static if (N == 1) | { | return slice; | } | else | { 0000000| size_t[typeof(return).N] lengths; 0000000| lengths[0] = slice.elementCount; 0000000| return typeof(return)(lengths, slice._iterator); | } |} | |/// ditto |Slice!(StrideIterator!Iterator) | flattened | (Iterator) | (Slice!(Iterator, 1, Universal) slice) |{ | return slice.hideStride; |} | |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | auto sl1 = iota(2, 3).slice.universal.pack!1.flattened; | auto sl2 = iota(2, 3).slice.canonical.pack!1.flattened; | auto sl3 = iota(2, 3).slice.pack!1.flattened; |} | |/// Regular slice |@safe @nogc pure nothrow version(mir_test) unittest |{ | assert(iota(4, 5).flattened == iota(20)); | assert(iota(4, 5).canonical.flattened == iota(20)); | assert(iota(4, 5).universal.flattened == iota(20)); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | assert(iota(4).flattened == iota(4)); | assert(iota(4).canonical.flattened == iota(4)); | assert(iota(4).universal.flattened == iota(4)); |} | |/// Packed slice |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.dynamic; | assert(iota(3, 4, 5, 6, 7).pack!2.flattened[1] == iota([6, 7], 6 * 7)); |} | |/// Properties |@safe pure nothrow version(mir_test) unittest |{ | auto elems = iota(3, 4).universal.flattened; | | elems.popFrontExactly(2); | assert(elems.front == 2); | /// `_index` is available only for canonical and universal ndslices. | assert(elems._iterator._indexes == [0, 2]); | | elems.popBackExactly(2); | assert(elems.back == 9); | assert(elems.length == 8); |} | |/// Index property |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | auto slice = new long[20].sliced(5, 4); | | for (auto elems = slice.universal.flattened; !elems.empty; elems.popFront) | { | ptrdiff_t[2] index = elems._iterator._indexes; | elems.front = index[0] * 10 + index[1] * 3; | } | assert(slice == | [[ 0, 3, 6, 9], | [10, 13, 16, 19], | [20, 23, 26, 29], | [30, 33, 36, 39], | [40, 43, 46, 49]]); |} | |@safe pure nothrow version(mir_test) unittest |{ | auto elems = iota(3, 4).universal.flattened; | assert(elems.front == 0); | assert(elems.save[1] == 1); |} | |/++ |Random access and slicing |+/ |nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | auto elems = iota(4, 5).slice.flattened; | | elems = elems[11 .. $ - 2]; | | assert(elems.length == 7); | assert(elems.front == 11); | assert(elems.back == 17); | | foreach (i; 0 .. 7) | assert(elems[i] == i + 11); | | // assign an element | elems[2 .. 6] = -1; | assert(elems[2 .. 6] == repeat(-1, 4)); | | // assign an array | static ar = [-1, -2, -3, -4]; | elems[2 .. 6] = ar; | assert(elems[2 .. 6] == ar); | | // assign a slice | ar[] *= 2; | auto sl = ar.sliced(ar.length); | elems[2 .. 6] = sl; | assert(elems[2 .. 6] == sl); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | | auto slice = iota(3, 4, 5); | | foreach (ref e; slice.universal.flattened.retro) | { | //... | } | | foreach_reverse (ref e; slice.universal.flattened) | { | //... | } | | foreach (ref e; slice.universal.allReversed.flattened) | { | //... | } |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import std.range.primitives : isRandomAccessRange, hasSlicing; | auto elems = iota(4, 5).flattened; | static assert(isRandomAccessRange!(typeof(elems))); | static assert(hasSlicing!(typeof(elems))); |} | |// Checks strides |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic; | import std.range.primitives : isRandomAccessRange; | auto elems = iota(4, 5).universal.everted.flattened; | static assert(isRandomAccessRange!(typeof(elems))); | | elems = elems[11 .. $ - 2]; | auto elems2 = elems; | foreach (i; 0 .. 7) | { | assert(elems[i] == elems2.front); | elems2.popFront; | } |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.dynamic; | import std.range.primitives : isRandomAccessRange, hasLength; | | auto range = (3 * 4 * 5 * 6 * 7).iota; | auto slice0 = range.sliced(3, 4, 5, 6, 7).universal; | auto slice1 = slice0.transposed!(2, 1).pack!2; | auto elems0 = slice0.flattened; | auto elems1 = slice1.flattened; | | foreach (S; AliasSeq!(typeof(elems0), typeof(elems1))) | { | static assert(isRandomAccessRange!S); | static assert(hasLength!S); | } | | assert(elems0.length == slice0.elementCount); | assert(elems1.length == 5 * 4 * 3); | | auto elems2 = elems1; | foreach (q; slice1) | foreach (w; q) | foreach (e; w) | { | assert(!elems2.empty); | assert(e == elems2.front); | elems2.popFront; | } | assert(elems2.empty); | | elems0.popFront(); | elems0.popFrontExactly(slice0.elementCount - 14); | assert(elems0.length == 13); | assert(elems0 == range[slice0.elementCount - 13 .. slice0.elementCount]); | | foreach (elem; elems0) {} |} | |// Issue 15549 |version(mir_test) unittest |{ | import std.range.primitives; | import mir.ndslice.allocation; | alias A = typeof(iota(1, 2, 3, 4).pack!1); | static assert(isRandomAccessRange!A); | static assert(hasLength!A); | static assert(hasSlicing!A); | alias B = typeof(slice!int(1, 2, 3, 4).pack!3); | static assert(isRandomAccessRange!B); | static assert(hasLength!B); | static assert(hasSlicing!B); |} | |// Issue 16010 |version(mir_test) unittest |{ | auto s = iota(3, 4).flattened; | foreach (_; 0 .. s.length) | s = s[1 .. $]; |} | |/++ |Returns a slice, the elements of which are equal to the initial multidimensional index value. |For a flattened (contiguous) index, see $(LREF iota). | |Params: | N = dimension count | lengths = list of dimension lengths |Returns: | `N`-dimensional slice composed of indexes |See_also: $(LREF iota) |+/ |Slice!(FieldIterator!(ndIotaField!N), N) | ndiota | (size_t N) | (size_t[N] lengths...) | if (N) |{ | return FieldIterator!(ndIotaField!N)(0, ndIotaField!N(lengths[1 .. $])).sliced(lengths); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = ndiota(2, 3); | static immutable array = | [[[0, 0], [0, 1], [0, 2]], | [[1, 0], [1, 1], [1, 2]]]; | | assert(slice == array); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto im = ndiota(7, 9); | | assert(im[2, 1] == [2, 1]); | | //slicing works correctly | auto cm = im[1 .. $, 4 .. $]; | assert(cm[2, 1] == [3, 5]); |} | |version(mir_test) unittest |{ | auto r = ndiota(1); | auto d = r.front; | r.popFront; | import std.range.primitives; | static assert(isRandomAccessRange!(typeof(r))); |} | |/++ |Evenly spaced numbers over a specified interval. | |Params: | T = floating point or complex numbers type | lengths = list of dimension lengths. Each length must be greater then 1. | intervals = list of [start, end] pairs. |Returns: | `n`-dimensional grid of evenly spaced numbers over specified intervals. |See_also: $(LREF) |+/ |auto linspace(T, size_t N)(size_t[N] lengths, T[2][N] intervals...) | if (N && (isFloatingPoint!T || isComplex!T)) |{ 0000000| Repeat!(N, LinspaceField!T) fields; | foreach(i; Iota!N) | { 0000000| assert(lengths[i] > 1, "linspace: all lengths must be greater then 1."); 0000000| fields[i] = LinspaceField!T(lengths[i], intervals[i][0], intervals[i][1]); | } | static if (N == 1) 0000000| return slicedField(fields); | else | return cartesian(fields); |} | |// example from readme |version(mir_test) unittest |{ | import mir.ndslice; | // import std.stdio: writefln; | | enum fmt = "%(%(%.2f %)\n%)\n"; | | auto a = magic(5).as!float; | // writefln(fmt, a); | | auto b = linspace!float([5, 5], [1f, 2f], [0f, 1f]).map!"a * a + b"; | // writefln(fmt, b); | | auto c = slice!float(5, 5); | c[] = transposed(a + b / 2); |} | |/// 1D |@safe pure nothrow |version(mir_test) unittest |{ | auto s = linspace!double([5], [1.0, 2.0]); | assert(s == [1.0, 1.25, 1.5, 1.75, 2.0]); | | // reverse order | assert(linspace!double([5], [2.0, 1.0]) == s.retro); | | // remove endpoint | s.popBack; | assert(s == [1.0, 1.25, 1.5, 1.75]); |} | |/// 2D |@safe pure nothrow |version(mir_test) unittest |{ | import mir.functional: refTuple; | | auto s = linspace!double([5, 3], [1.0, 2.0], [0.0, 1.0]); | | assert(s == [ | [refTuple(1.00, 0.00), refTuple(1.00, 0.5), refTuple(1.00, 1.0)], | [refTuple(1.25, 0.00), refTuple(1.25, 0.5), refTuple(1.25, 1.0)], | [refTuple(1.50, 0.00), refTuple(1.50, 0.5), refTuple(1.50, 1.0)], | [refTuple(1.75, 0.00), refTuple(1.75, 0.5), refTuple(1.75, 1.0)], | [refTuple(2.00, 0.00), refTuple(2.00, 0.5), refTuple(2.00, 1.0)], | ]); | | assert(s.map!"a * b" == [ | [0.0, 0.500, 1.00], | [0.0, 0.625, 1.25], | [0.0, 0.750, 1.50], | [0.0, 0.875, 1.75], | [0.0, 1.000, 2.00], | ]); |} | |/// Complex numbers |@safe pure nothrow |version(mir_test) unittest |{ | auto s = linspace!cdouble([3], [1.0 + 0i, 2.0 + 4i]); | assert(s == [1.0 + 0i, 1.5 + 2i, 2.0 + 4i]); |} | |/++ |Returns a slice with identical elements. |`RepeatSlice` stores only single value. |Params: | lengths = list of dimension lengths |Returns: | `n`-dimensional slice composed of identical values, where `n` is dimension count. |+/ |Slice!(FieldIterator!(RepeatField!T), M, Universal) | repeat(T, size_t M)(T value, size_t[M] lengths...) @trusted | if (M && !isSlice!T) |{ | size_t[M] ls = lengths; | return typeof(return)( | ls, | sizediff_t[M].init, | typeof(return).Iterator(0, RepeatField!T(cast(RepeatField!T.UT) value))); |} | |/// ditto |Slice!(SliceIterator!(Iterator, N, kind), M, Universal) | repeat | (SliceKind kind, size_t N, Iterator, size_t M) | (Slice!(Iterator, N, kind) slice, size_t[M] lengths...) | if (M) |{ | size_t[M] ls = lengths; | return typeof(return)( | ls, | sizediff_t[M].init, | typeof(return).Iterator( | slice._structure, | slice._iterator)); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto sl = iota(3).repeat(4); | assert(sl == [[0, 1, 2], | [0, 1, 2], | [0, 1, 2], | [0, 1, 2]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : transposed; | | auto sl = iota(3) | .repeat(4) | .unpack | .universal | .transposed; | | assert(sl == [[0, 0, 0, 0], | [1, 1, 1, 1], | [2, 2, 2, 2]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | | auto sl = iota([3], 6).slice; | auto slC = sl.repeat(2, 3); | sl[1] = 4; | assert(slC == [[[6, 4, 8], | [6, 4, 8], | [6, 4, 8]], | [[6, 4, 8], | [6, 4, 8], | [6, 4, 8]]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto sl = repeat(4.0, 2, 3); | assert(sl == [[4.0, 4.0, 4.0], | [4.0, 4.0, 4.0]]); | | static assert(is(DeepElementType!(typeof(sl)) == double)); | | sl[1, 1] = 3; | assert(sl == [[3.0, 3.0, 3.0], | [3.0, 3.0, 3.0]]); |} | |/++ |Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice. |+/ |auto cycle(Field)(Field field, size_t loopLength, size_t length) | if (!isSlice!Field && !is(Field : T[], T)) |{ | return CycleField!Field(loopLength, field).slicedField(length); |} | |/// ditto |auto cycle(size_t loopLength, Field)(Field field, size_t length) | if (!isSlice!Field && !is(Field : T[], T)) |{ | static assert(loopLength); | return CycleField!(Field, loopLength)(field).slicedField(length); |} | |/// ditto |auto cycle(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length) |{ | assert(slice.length); | static if (kind == Universal) | return slice.hideStride.cycle(length); | else | return CycleField!Iterator(slice._lengths[0], slice._iterator).slicedField(length); |} | |/// ditto |auto cycle(size_t loopLength, Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length) |{ | static assert(loopLength); | assert(loopLength <= slice.length); | static if (kind == Universal) | return slice.hideStride.cycle!loopLength(length); | else | return CycleField!(Iterator, loopLength)(slice._iterator).slicedField(length); |} | |/// ditto |auto cycle(T)(T[] array, size_t length) |{ | return cycle(array.sliced, length); |} | |/// ditto |auto cycle(size_t loopLength, T)(T[] array, size_t length) |{ | return cycle!loopLength(array.sliced, length); |} | | |/// ditto |auto cycle(size_t loopLength, T)(T withAsSlice, size_t length) | if (hasAsSlice!T) |{ | return cycle!loopLength(withAsSlice.asSlice, length); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto slice = iota(3); | assert(slice.cycle(7) == [0, 1, 2, 0, 1, 2, 0]); | assert(slice.cycle!2(7) == [0, 1, 0, 1, 0, 1, 0]); | assert([0, 1, 2].cycle(7) == [0, 1, 2, 0, 1, 2, 0]); | assert([4, 3, 2, 1].cycle!4(7) == [4, 3, 2, 1, 4, 3, 2]); |} | |/++ |Strides 1-dimensional slice. |Params: | slice = 1-dimensional unpacked slice. | factor = positive stride size. |Returns: | Contiguous slice with strided iterator. |See_also: $(SUBREF dynamic, strided) |+/ |auto stride | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, ptrdiff_t factor) | if (N == 1) |in |{ | assert (factor > 0, "factor must be positive."); |} |body |{ | static if (kind == Contiguous) | return slice.universal.stride(factor); | else | { | import mir.ndslice.dynamic: strided; | return slice.strided!0(factor).hideStride; | } |} | |/// ditto |auto stride(T)(T[] array, ptrdiff_t factor) |{ | return stride(array.sliced, factor); |} | |/// ditto |auto stride(T)(T withAsSlice, ptrdiff_t factor) | if (hasAsSlice!T) |{ | return stride(withAsSlice.asSlice, factor); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(6); | static immutable str = [0, 2, 4]; | assert(slice.stride(2) == str); | assert(slice.universal.stride(2) == str); |} | |/++ |Reverses order of iteration for all dimensions. |Params: | slice = Unpacked slice. |Returns: | Slice with reversed order of iteration for all dimensions. |See_also: $(SUBREF dynamic, reversed), $(SUBREF dynamic, allReversed). |+/ |auto retro | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | @trusted |{ | static if (kind == Contiguous || kind == Canonical) | { | size_t[slice.N] lengths; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | static if (slice.S) | { | sizediff_t[slice.S] strides; | foreach (i; Iota!(slice.S)) | strides[i] = slice._strides[i]; | alias structure = AliasSeq!(lengths, strides); | } | else | { | alias structure = lengths; | } | static if (is(Iterator : RetroIterator!It, It)) | { | alias Ret = Slice!(It, N, kind); | return Ret(structure, slice._iterator._iterator - slice.lastIndex); | } | else | { | alias Ret = Slice!(RetroIterator!Iterator, N, kind); | return Ret(structure, RetroIterator!Iterator(slice._iterator + slice.lastIndex)); | } | } | else | { | import mir.ndslice.dynamic: allReversed; | return slice.allReversed; | } |} | |/// ditto |auto retro(T)(T[] array) |{ | return retro(array.sliced); |} | |/// ditto |auto retro(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return retro(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(2, 3); | static immutable reversed = [[5, 4, 3], [2, 1, 0]]; | assert(slice.retro == reversed); | assert(slice.canonical.retro == reversed); | assert(slice.universal.retro == reversed); | | static assert(is(typeof(slice.retro.retro) == typeof(slice))); | static assert(is(typeof(slice.canonical.retro.retro) == typeof(slice.canonical))); | static assert(is(typeof(slice.universal.retro) == typeof(slice.universal))); |} | |/++ |Bitwise slice over an integral slice. |Params: | slice = a contiguous or canonical slice on top of integral iterator. |Returns: A bitwise slice. |+/ |auto bitwise | (Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init])) | (Slice!(Iterator, N, kind) slice) | if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical)) |{ | static if (is(Iterator : FieldIterator!Field, Field)) | { | enum simplified = true; | alias It = FieldIterator!(BitField!Field); | } | else | { | enum simplified = false; | alias It = FieldIterator!(BitField!Iterator); | } | alias Ret = Slice!(It, N, kind); | auto structure_ = Ret._Structure.init; | foreach(i; Iota!(Ret.N)) | structure_[0][i] = slice._lengths[i]; | structure_[0][$ - 1] *= I.sizeof * 8; | foreach(i; Iota!(Ret.S)) | structure_[1][i] = slice._strides[i]; | static if (simplified) | return Ret(structure_, It(slice._iterator._index * I.sizeof * 8, BitField!Field(slice._iterator._field))); | else | return Ret(structure_, It(0, BitField!Iterator(slice._iterator))); |} | |/// ditto |auto bitwise(T)(T[] array) |{ | return bitwise(array.sliced); |} | |/// ditto |auto bitwise(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bitwise(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | auto bits = data[].bitwise; | assert(bits.length == data.length * size_t.sizeof * 8); | bits[111] = true; | assert(bits[111]); | | bits.popFront; | assert(bits[110]); | bits[] = true; | bits[110] = false; | bits = bits[10 .. $]; | assert(bits[100] == false); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | auto slice = FieldIterator!(size_t[])(0, data[]).sliced(10); | slice.popFrontExactly(2); | auto bits_normal = data[].sliced.bitwise; | auto bits = slice.bitwise; | assert(bits.length == (data.length - 2) * size_t.sizeof * 8); | bits[111] = true; | assert(bits[111]); | assert(bits_normal[111 + size_t.sizeof * 2 * 8]); | | bits.popFront; | assert(bits[110]); | bits[] = true; | bits[110] = false; | bits = bits[10 .. $]; | assert(bits[100] == false); |} | |/++ |Bitwise field over an integral field. |Params: | field = an integral field. |Returns: A bitwise field. |+/ |auto bitwiseField(Field, I = typeof(Field.init[size_t.init]))(Field field) | if (__traits(isUnsigned, I)) |{ | return BitField!(Field, I)(field); |} | |/++ |Bitpack slice over an integral slice. | |Bitpack is used to represent unsigned integer slice with fewer number of bits in integer binary representation. | |Params: | pack = counts of bits in the integer. | slice = a contiguous or canonical slice on top of integral iterator. |Returns: A bitpack slice. |+/ |auto bitpack | (size_t pack, Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init])) | (Slice!(Iterator, N, kind) slice) | if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical) && pack > 1) |{ | static if (is(Iterator : FieldIterator!Field, Field) && I.sizeof * 8 % pack == 0) | { | enum simplified = true; | alias It = FieldIterator!(BitpackField!(Field, pack)); | } | else | { | enum simplified = false; | alias It = FieldIterator!(BitpackField!(Iterator, pack)); | } | alias Ret = Slice!(It, N, kind); | auto structure = Ret._Structure.init; | foreach(i; Iota!(Ret.N)) | structure[0][i] = slice._lengths[i]; | structure[0][$ - 1] *= I.sizeof * 8; | structure[0][$ - 1] /= pack; | foreach(i; Iota!(Ret.S)) | structure[1][i] = slice._strides[i]; | static if (simplified) | return Ret(structure, It(slice._iterator._index * I.sizeof * 8 / pack, BitpackField!(Field, pack)(slice._iterator._field))); | else | return Ret(structure, It(0, BitpackField!(Iterator, pack)(slice._iterator))); |} | |/// ditto |auto bitpack(size_t pack, T)(T[] array) |{ | return bitpack!pack(array.sliced); |} | |/// ditto |auto bitpack(size_t pack, T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bitpack!pack(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto packs = data[].bitpack!6; | assert(packs.length == data.length * size_t.sizeof * 8 / 6); | packs[$ - 1] = 24; | assert(packs[$ - 1] == 24); | | packs.popFront; | assert(packs[$ - 1] == 24); |} | |/++ |Bytegroup slice over an integral slice. | |Groups existing slice into fixed length chunks and uses them as data store for destination type. | |Correctly handles scalar types on both little-endian and big-endian platforms. | |Params: | group = count of iterator items used to store the destination type. | DestinationType = deep element type of the result slice. | slice = a contiguous or canonical slice. |Returns: A bytegroup slice. |+/ |Slice!(BytegroupIterator!(Iterator, group, DestinationType), N, kind) |bytegroup | (size_t group, DestinationType, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if ((kind == Contiguous || kind == Canonical) && group) |{ | auto structure = slice._structure; | structure[0][$ - 1] /= group; | return typeof(return)(structure, BytegroupIterator!(Iterator, group, DestinationType)(slice._iterator)); |} | | |/// ditto |auto bytegroup(size_t pack, DestinationType, T)(T[] array) |{ | return bytegroup!(pack, DestinationType)(array.sliced); |} | |/// ditto |auto bytegroup(size_t pack, DestinationType, T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bytegroup!(pack, DestinationType)(withAsSlice.asSlice); |} | |/// 24 bit integers |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.slice : DeepElementType, sliced; | | ubyte[20] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto int24ar = data[].bytegroup!(3, int); // 24 bit integers | assert(int24ar.length == data.length / 3); | | enum checkInt = ((1 << 20) - 1); | | int24ar[3] = checkInt; | assert(int24ar[3] == checkInt); | | int24ar.popFront; | assert(int24ar[2] == checkInt); | | static assert(is(DeepElementType!(typeof(int24ar)) == int)); |} | |/// 48 bit integers |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.slice : DeepElementType, sliced; | ushort[20] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto int48ar = data[].sliced.bytegroup!(3, long); // 48 bit integers | assert(int48ar.length == data.length / 3); | | enum checkInt = ((1L << 44) - 1); | | int48ar[3] = checkInt; | assert(int48ar[3] == checkInt); | | int48ar.popFront; | assert(int48ar[2] == checkInt); | | static assert(is(DeepElementType!(typeof(int48ar)) == long)); |} | |/++ |Implements the homonym function (also known as `transform`) present |in many languages of functional flavor. The call `map!(fun)(slice)` |returns a slice of which elements are obtained by applying `fun` |for all elements in `slice`. The original slices are |not changed. Evaluation is done lazily. | |Note: | $(SUBREF dynamic, transposed) and | $(SUBREF topology, pack) can be used to specify dimensions. |Params: | fun = One or more functions. |See_Also: | $(LREF cached), $(LREF vmap), $(LREF indexed), | $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip), | $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function)) |+/ |template map(fun...) | if (fun.length) |{ | import mir.functional: adjoin, naryFun, pipe; | static if (fun.length == 1) | { | static if (__traits(isSame, naryFun!(fun[0]), fun[0]) && !__traits(isSame, naryFun!"a", fun[0])) | { | alias f = fun[0]; | @optmath: | /++ | Params: | slice = An input slice. | Returns: | a slice with each fun applied to all the elements. If there is more than one | fun, the element type will be `Tuple` containing one element for each fun. | +/ | auto map(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | alias Iterator = typeof(_mapIterator!f(slice._iterator)); | import mir.ndslice.traits: isIterator; | static assert(isIterator!Iterator, "mir.ndslice.map: probably the lambda function contains a compile time bug."); | return Slice!(Iterator, N, kind)(slice._structure, _mapIterator!f(slice._iterator)); | } | | /// ditto | auto map(T)(T[] array) | { | return map(array.sliced); | } | | /// ditto | auto map(T)(T withAsSlice) | if (hasAsSlice!T) | { | return map(withAsSlice.asSlice); | } | } | else | static if (__traits(isSame, naryFun!"a", fun[0])) | { | /// | @optmath auto map(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | return slice; | } | | /// ditto | auto map(T)(T[] array) | { | return array.sliced; | } | | /// ditto | auto map(T)(T withAsSlice) | if (hasAsSlice!T) | { | return withAsSlice.asSlice; | } | } | else alias map = .map!(naryFun!fun); | } | else alias map = .map!(adjoin!fun); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto s = iota(2, 3).map!(a => a * 3); | assert(s == [[ 0, 3, 6], | [ 9, 12, 15]]); |} | |/// String lambdas |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | assert(iota(2, 3).map!"a * 2" == [[0, 2, 4], [6, 8, 10]]); |} | |/// Packed tensors. |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, windows; | import mir.math.sum: sum; | | // iota windows map sums ( reduce!"a + b" ) | // -------------- | // ------- | --- --- | ------ | // | 0 1 2 | => || 0 1 || 1 2 || => | 8 12 | | // | 3 4 5 | || 3 4 || 4 5 || ------ | // ------- | --- --- | | // -------------- | auto s = iota(2, 3) | .windows(2, 2) | .map!sum; | | assert(s == [[8, 12]]); |} | |/// Zipped tensors |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | auto z = zip(sl1, sl2); | | assert(zip(sl1, sl2).map!"a + b" == sl1 + sl2); |} | |/++ |Multiple functions can be passed to `map`. |In that case, the element type of `map` is a refTuple containing |one element for each function. |+/ |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto sl = iota(2, 3); | auto s = sl.map!("a + a", "a * a"); | | auto sums = [[0, 2, 4], [6, 8, 10]]; | auto products = [[0, 1, 4], [9, 16, 25]]; | | assert(s.map!"a[0]" == sl + sl); | assert(s.map!"a[1]" == sl * sl); |} | |/++ |`map` can be aliased to a symbol and be used separately: |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | alias halfs = map!"double(a) / 2"; | assert(halfs(iota(2, 3)) == [[0.0, 0.5, 1], [1.5, 2, 2.5]]); |} | |/++ |Type normalization |+/ |version(mir_test) unittest |{ | import mir.functional : pipe; | import mir.ndslice.topology : iota; | auto a = iota(2, 3).map!"a + 10".map!(pipe!("a * 2", "a + 1")); | auto b = iota(2, 3).map!(pipe!("a + 10", "a * 2", "a + 1")); | assert(a == b); | static assert(is(typeof(a) == typeof(b))); |} | |/// |pure version(mir_test) unittest |{ | import std.algorithm.iteration : sum, reduce; | import mir.utility : max; | import mir.ndslice.dynamic : transposed; | /// Returns maximal column average. | auto maxAvg(S)(S matrix) { | return reduce!max(matrix.universal.transposed.pack!1.map!sum) | / double(matrix.length); | } | // 1 2 | // 3 4 | auto matrix = iota([2, 2], 1); | assert(maxAvg(matrix) == 3); |} | | |/++ |Implements the homonym function (also known as `transform`) present |in many languages of functional flavor. The call `slice.vmap(fun)` |returns a slice of which elements are obtained by applying `fun` |for all elements in `slice`. The original slices are |not changed. Evaluation is done lazily. | |Note: | $(SUBREF dynamic, transposed) and | $(SUBREF topology, pack) can be used to specify dimensions. |Params: | slice = ndslice | callable = callable object, structure, delegate, or function pointer. |See_Also: | $(LREF cached), $(LREF map), $(LREF indexed), | $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip), | $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function)) |+/ |@optmath auto vmap(Iterator, size_t N, SliceKind kind, Callable) | ( | scope return Slice!(Iterator, N, kind) slice, | scope return Callable callable, | ) |{ | alias It = VmapIterator!(Iterator, Callable); | return Slice!(It, N, kind)(slice._structure, It(slice._iterator, callable)); |} | |/// ditto |auto vmap(T, Callable)(scope return T[] array, scope return Callable callable) |{ | return vmap(array.sliced, callable); |} | |/// ditto |auto vmap(T, Callable)(scope return T withAsSlice, scope return Callable callable) | if (hasAsSlice!T) |{ | return vmap(withAsSlice.asSlice, callable); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | static struct Mul { | double factor; this(double f) { factor = f; } | auto opCall(long x) const {return x * factor; } | auto lightConst()() const @property { return Mul(factor); } | } | | auto callable = Mul(3); | auto s = iota(2, 3).vmap(callable); | | assert(s == [[ 0, 3, 6], | [ 9, 12, 15]]); |} | |/// Packed tensors. |@safe pure nothrow |version(mir_test) unittest |{ | import mir.math.sum: sum; | import mir.ndslice.topology : iota, windows; | | // iota windows vmap scaled sums | // -------------- | // ------- | --- --- | ----- | // | 0 1 2 | => || 0 1 || 1 2 || => | 4 6 | | // | 3 4 5 | || 3 4 || 4 5 || ----- | // ------- | --- --- | | // -------------- | | struct Callable | { | double factor; | this(double f) {factor = f;} | auto opCall(S)(S x) { return x.sum * factor; } | | auto lightConst()() const @property { return Callable(factor); } | auto lightImmutable()() immutable @property { return Callable(factor); } | } | | auto callable = Callable(0.5); | | auto s = iota(2, 3) | .windows(2, 2) | .vmap(callable); | | assert(s == [[4, 6]]); |} | |/// Zipped tensors |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | struct Callable | { | double factor; | this(double f) {factor = f;} | auto opCall(S, T)(S x, T y) { return x + y * factor; } | | auto lightConst()() const { return Callable(factor); } | auto lightImmutable()() immutable { return Callable(factor); } | } | | auto callable = Callable(10); | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | auto z = zip(sl1, sl2); | | assert(zip(sl1, sl2).vmap(callable) == | [[10, 21, 32], | [43, 54, 65]]); |} | |// TODO |/+ |Multiple functions can be passed to `vmap`. |In that case, the element type of `vmap` is a refTuple containing |one element for each function. |+/ |@safe pure nothrow |version(none) version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto s = iota(2, 3).vmap!("a + a", "a * a"); | | auto sums = [[0, 2, 4], [6, 8, 10]]; | auto products = [[0, 1, 4], [9, 16, 25]]; | | foreach (i; 0..s.length!0) | foreach (j; 0..s.length!1) | { | auto values = s[i, j]; | assert(values.a == sums[i][j]); | assert(values.b == products[i][j]); | } |} | |private auto hideStride | (Iterator, SliceKind kind) | (Slice!(Iterator, 1, kind) slice) |{ | static if (kind == Universal) | return Slice!(StrideIterator!Iterator)( | slice._lengths, | StrideIterator!Iterator(slice._strides[0], slice._iterator)); | else | return slice; |} | |private auto unhideStride | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (is(Iterator : StrideIterator!It, It)) | { | static if (kind == Universal) | { | alias Ret = SliceKind!(It, N, Universal); | auto strides = slice._strides; | foreach(i; Iota!(Ret.S)) | strides[i] = slice._strides[i] * slice._iterator._stride; | return Slice!(It, N, Universal)(slice._lengths, strides, slice._iterator._iterator); | } | else | return slice.universal.unhideStride; | } | else | return slice; |} | |/++ |Creates a random access cache for lazyly computed elements. |Params: | original = original ndslice | caches = cached values | flags = array composed of flags that indicates if values are already computed |Returns: | ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice. |See_also: $(LREF cachedGC), $(LREF map), $(LREF vmap), $(LREF indexed) |+/ |Slice!(CachedIterator!(Iterator, CacheIterator, FlagIterator), N, kind) | cached(Iterator, SliceKind kind, size_t N, CacheIterator, FlagIterator)( | Slice!(Iterator, N, kind) original, | Slice!(CacheIterator, N, kind) caches, | Slice!(FlagIterator, N, kind) flags, | ) |{ | assert(original.shape == caches.shape, "caches.shape should be equal to original.shape"); | assert(original.shape == flags.shape, "flags.shape should be equal to original.shape"); | return typeof(return)( | original._structure, | IteratorOf!(typeof(return))( | original._iterator, | caches._iterator, | flags._iterator, | )); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology: cached, iota, map; | import mir.ndslice.allocation: bitSlice, uninitSlice; | | int[] funCalls; | | auto v = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }); | auto flags = v.length.bitSlice; | auto cache = v.length.uninitSlice!int; | // cached lazy slice: 1 2 4 8 16 | auto sl = v.cached(cache, flags); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); | funCalls = []; | | // set values directly | sl[1 .. 3] = 5; | assert(sl[1] == 5); | assert(sl[2] == 5); | // no function calls | assert(funCalls == []); |} | |/// Cache of immutable elements |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice: DeepElementType; | import mir.ndslice.topology: cached, iota, map, as; | import mir.ndslice.allocation: bitSlice, uninitSlice; | | int[] funCalls; | | auto v = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .as!(immutable int); | auto flags = v.length.bitSlice; | auto cache = v.length.uninitSlice!(immutable int); | | // cached lazy slice: 1 2 4 8 16 | auto sl = v.cached(cache, flags); | | static assert(is(DeepElementType!(typeof(sl)) == immutable int)); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); |} | |/++ |Creates a random access cache for lazyly computed elements. |Params: | original = ND Contiguous or 1D Universal ndslice. |Returns: | ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice. |See_also: $(LREF cached), $(LREF map), $(LREF vmap), $(LREF indexed) |+/ |Slice!(CachedIterator!(Iterator, typeof(Iterator.init[0])*, FieldIterator!(BitField!(size_t*))), N) | cachedGC(Iterator, size_t N)(Slice!(Iterator, N) original) @trusted |{ | import std.traits: hasElaborateAssign, Unqual; | import mir.ndslice.allocation: bitSlice, slice, uninitSlice; | alias C = typeof(Iterator.init[0]); | alias UC = Unqual!C; | static if (hasElaborateAssign!UC) | alias newSlice = slice; | else | alias newSlice = uninitSlice; | return typeof(return)( | original._structure, | IteratorOf!(typeof(return))( | original._iterator, | newSlice!C(original._lengths)._iterator, | original._lengths.bitSlice._iterator, | )); |} | |/// ditto |auto cachedGC(Iterator)(Slice!(Iterator, 1, Universal) from) |{ | return from.flattened.cachedGC; |} | |/// ditto |auto cachedGC(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return cachedGC(withAsSlice.asSlice); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology: cachedGC, iota, map; | | int[] funCalls; | | // cached lazy slice: 1 2 4 8 16 | auto sl = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .cachedGC; | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); | funCalls = []; | | // set values directly | sl[1 .. 3] = 5; | assert(sl[1] == 5); | assert(sl[2] == 5); | // no function calls | assert(funCalls == []); |} | |/// Cache of immutable elements |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice: DeepElementType; | import mir.ndslice.topology: cachedGC, iota, map, as; | | int[] funCalls; | | // cached lazy slice: 1 2 4 8 16 | auto sl = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .as!(immutable int) | .cachedGC; | | static assert(is(DeepElementType!(typeof(sl)) == immutable int)); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); |} | |/++ |Convenience function that creates a lazy view, |where each element of the original slice is converted to the type `T`. |It uses $(LREF map) and $(REF_ALTTEXT $(TT to), to, mir,conv)$(NBSP) |composition under the hood. |Params: | slice = a slice to create a view on. |Returns: | A lazy slice with elements converted to the type `T`. |See_also: $(LREF map), $(LREF vmap) |+/ |template as(T) |{ | /// | @optmath auto as(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | static if (is(slice.DeepElement == T)) | return slice; | else | static if (is(Iterator : T*)) | return slice.toConst; | else | { | import mir.conv: to; | return map!(to!T)(slice); | } | } | | /// ditto | auto as(S)(S[] array) | { | return as(array.sliced); | } | | /// ditto | auto as(S)(S withAsSlice) | if (hasAsSlice!S) | { | return as(withAsSlice.asSlice); | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : diagonal, as; | | auto matrix = slice!double([2, 2], 0); | auto stringMatrixView = matrix.as!int; | assert(stringMatrixView == | [[0, 0], | [0, 0]]); | | matrix.diagonal[] = 1; | assert(stringMatrixView == | [[1, 0], | [0, 1]]); | | /// allocate new slice composed of strings | Slice!(int*, 2) stringMatrix = stringMatrixView.slice; |} | |/// Special behavior for pointers to a constant data. |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.slice : Contiguous, Slice; | | Slice!(double*, 2) matrix = slice!double([2, 2], 0); | Slice!(const(double)*, 2) const_matrix = matrix.as!(const double); |} | |/++ |Takes a field `source` and a slice `indexes`, and creates a view of source as if its elements were reordered according to indexes. |`indexes` may include only a subset of the elements of `source` and may also repeat elements. | |Params: | source = a filed, source of data. `source` must be an array or a pointer, or have `opIndex` primitive. Full random access range API is not required. | indexes = a slice, source of indexes. |Returns: | n-dimensional slice with the same kind, shape and strides. | |See_also: `indexed` is similar to $(LREF, vmap), but a field (`[]`) is used instead of a function (`()`), and order of arguments is reversed. |+/ |Slice!(IndexIterator!(Iterator, Field), N, kind) | indexed(Field, Iterator, size_t N, SliceKind kind) | (Field source, Slice!(Iterator, N, kind) indexes) |{ | return typeof(return)( | indexes._structure, | IndexIterator!(Iterator, Field)( | indexes._iterator, | source)); |} | |/// ditto |auto indexed(Field, S)(Field source, S[] indexes) |{ | return indexed(source, indexes.sliced); |} | |/// ditto |auto indexed(Field, S)(Field source, S indexes) | if (hasAsSlice!S) |{ | return indexed(source, indexes.asSlice); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto source = [1, 2, 3, 4, 5]; | auto indexes = [4, 3, 1, 2, 0, 4]; | auto ind = source.indexed(indexes); | assert(ind == [5, 4, 2, 3, 1, 5]); | | assert(ind.retro == source.indexed(indexes.retro)); | | ind[3] += 10; // for index 2 | // 0 1 2 3 4 | assert(source == [1, 2, 13, 4, 5]); |} | |/++ |Maps indexes pairs to subslices. |Params: | sliceable = pointer, array, ndslice, series, or something sliceable with `[a .. b]`. | slices = ndslice composed of indexes pairs. |Returns: | ndslice composed of subslices. |See_also: $(LREF chopped), $(LREF pairwise). |+/ |Slice!(SubSliceIterator!(Iterator, Sliceable), N, kind) | subSlices(Iterator, size_t N, SliceKind kind, Sliceable)( | Sliceable sliceable, | Slice!(Iterator, N, kind) slices, | ) |{ | return typeof(return)( | slices._structure, | SubSliceIterator!(Iterator, Sliceable)(slices._iterator, sliceable) | ); |} | |/// ditto |auto subSlices(S, Sliceable)(Sliceable sliceable, S[] slices) |{ | return subSlices(sliceable, slices.sliced); |} | |/// ditto |auto subSlices(S, Sliceable)(Sliceable sliceable, S slices) | if (hasAsSlice!S) |{ | return subSlices(sliceable, slices.asSlice); |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.functional: staticArray; | auto subs =[ | staticArray(2, 4), | staticArray(2, 10), | ]; | auto sliceable = 10.iota; | | auto r = sliceable.subSlices(subs); | assert(r == [ | iota([4 - 2], 2), | iota([10 - 2], 2), | ]); |} | |/++ |Maps indexes pairs to subslices. |Params: | bounds = ndslice composed of consequent (`a_i <= a_(i+1)`) pairwise index bounds. | sliceable = pointer, array, ndslice, series, or something sliceable with `[a_i .. a_(i+1)]`. |Returns: | ndslice composed of subslices. |See_also: $(LREF pairwise), $(LREF subSlices). |+/ |Slice!(ChopIterator!(Iterator, Sliceable)) chopped(Iterator, Sliceable)( | Sliceable sliceable, | Slice!Iterator bounds, | ) |in |{ | debug(mir) | foreach(b; bounds.pairwise!"a <= b") | assert(b); |} |do { | | sizediff_t length = bounds._lengths[0] <= 1 ? 0 : bounds._lengths[0] - 1; | static if (hasLength!Sliceable) | { | if (length && bounds[length - 1] > sliceable.length) | { | version (D_Exceptions) | throw choppedException; | else | assert(0, choppedExceptionMsg); | } | } | | return typeof(return)([size_t(length)], ChopIterator!(Iterator, Sliceable)(bounds._iterator, sliceable)); |} | |/// ditto |auto chopped(S, Sliceable)(Sliceable sliceable, S[] bounds) |{ | return chopped(sliceable, bounds.sliced); |} | |/// ditto |auto chopped(S, Sliceable)(Sliceable sliceable, S bounds) | if (hasAsSlice!S) |{ | return chopped(sliceable, bounds.asSlice); |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.functional: staticArray; | import mir.ndslice.slice : sliced; | auto pairwiseIndexes = [2, 4, 10].sliced; | auto sliceable = 10.iota; | | auto r = sliceable.chopped(pairwiseIndexes); | assert(r == [ | iota([4 - 2], 2), | iota([10 - 4], 4), | ]); |} | |/++ |Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional. |Params: | sameStrides = if `true` assumes that all slices has the same strides. | slices = list of slices |Returns: | n-dimensional slice of elements refTuple |See_also: $(SUBREF slice, Slice.strides). |+/ |auto zip | (bool sameStrides = false, Slices...)(Slices slices) | if (Slices.length > 1 && allSatisfy!(isConvertibleToSlice, Slices)) |{ | static if (allSatisfy!(isSlice, Slices)) | { | enum N = Slices[0].N; | foreach(i, S; Slices[1 .. $]) | { | static assert(S.N == N, "zip: all Slices must have the same dimension count"); | assert(slices[i + 1]._lengths == slices[0]._lengths, "zip: all slices must have the same lengths"); | static if (sameStrides) | assert(slices[i + 1].strides == slices[0].strides, "zip: all slices must have the same strides when unpacked"); | } | static if (!sameStrides && minElem(staticMap!(kindOf, Slices)) != Contiguous) | { | static assert(N == 1, "zip: cannot zip canonical and universal multidimensional slices if `sameStrides` is false"); | mixin(`return .zip(` ~ _iotaArgs!(Slices.length, "slices[", "].hideStride, ") ~`);`); | } | else | { | enum kind = maxElem(staticMap!(kindOf, Slices)); | alias Iterator = ZipIterator!(staticMap!(_IteratorOf, Slices)); | alias Ret = Slice!(Iterator, N, kind); | auto structure = Ret._Structure.init; | structure[0] = slices[0]._lengths; | foreach (i; Iota!(Ret.S)) | structure[1][i] = slices[0]._strides[i]; | return Ret(structure, mixin("Iterator(" ~ _iotaArgs!(Slices.length, "slices[", "]._iterator, ") ~ ")")); | } | } | else | { | return .zip(toSlices!slices); | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : flattened, iota; | | auto alpha = iota!int(4, 3); | auto beta = slice!int(4, 3).universal; | | auto m = zip!true(alpha, beta); | foreach (r; m) | foreach (e; r) | e.b = e.a; | assert(alpha == beta); | | beta[] = 0; | foreach (e; m.flattened) | e.b = cast(int)e.a; | assert(alpha == beta); |} | |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : flattened, iota; | | auto alpha = iota!int(4).universal; | auto beta = new int[4]; | | auto m = zip(alpha, beta); | foreach (e; m) | e.b = e.a; | assert(alpha == beta); |} | |/++ |Selects a slice from a zipped slice. |Params: | name = name of a slice to unzip. | slice = zipped slice |Returns: | unzipped slice |+/ |auto unzip | (char name, size_t N, SliceKind kind, Iterators...) | (Slice!(ZipIterator!Iterators, N, kind) slice) |{ | enum size_t i = name - 'a'; | static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`); | return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i]).unhideStride; |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : iota; | | auto alpha = iota!int(4, 3); | auto beta = iota!int([4, 3], 1).slice; | | auto m = zip(alpha, beta); | | static assert(is(typeof(unzip!'a'(m)) == typeof(alpha))); | static assert(is(typeof(unzip!'b'(m)) == typeof(beta))); | | assert(m.unzip!'a' == alpha); | assert(m.unzip!'b' == beta); |} | |private enum TotalDim(NdFields...) = [staticMap!(DimensionCount, NdFields)].sum; | |/++ |Sliding map for vectors. |Works with packed slices. | |Suitable for simple convolution algorithms. | |Params: | params = windows length. | fun = map functions with `params` arity. |See_also: $(LREF pairwise), $(LREF diff). |+/ |template slide(size_t params, alias fun) | if (params <= 'z' - 'a' + 1) |{ | import mir.functional: naryFun; | | static if (params > 1 && __traits(isSame, naryFun!fun, fun)) | { | @optmath: | /++ | Params: | slice = An input slice with first dimension pack equals to one (e.g. 1-dimensional for not packed slices). | Returns: | 1d-slice composed of `fun(slice[i], ..., slice[i + params - 1])`. | +/ | auto slide(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N == 1) | { | auto s = slice.map!"a".flattened; | if (cast(sizediff_t)s._lengths[0] < sizediff_t(params - 1)) | s._lengths[0] = 0; | else | s._lengths[0] -= params - 1; | | alias I = SlideIterator!(_IteratorOf!(typeof(s)), params, fun); | return Slice!(I)( | s._structure, | I(s._iterator)); | } | | /// ditto | auto slide(S)(S[] slice) | { | return slide(slice.sliced); | } | | /// ditto | auto slide(S)(S slice) | if (hasAsSlice!S) | { | return slide(slice.asSlice); | } | } | else | static if (params == 1) | alias slide = .map!(naryFun!fun); | else alias slide = .slide!(params, naryFun!fun); |} | |/// |version(mir_test) unittest |{ | auto data = 10.iota; | auto sw = data.slide!(3, "a + 2 * b + c"); | | import mir.utility: max; | assert(sw.length == max(0, cast(ptrdiff_t)data.length - 3 + 1)); | assert(sw == sw.length.iota.map!"(a + 1) * 4"); | assert(sw == [4, 8, 12, 16, 20, 24, 28, 32]); |} | |/++ |Pairwise map for vectors. |Works with packed slices. | |Params: | fun = function to accumulate | lag = an integer indicating which lag to use |Returns: lazy ndslice composed of `fun(a_n, a_n+1)` values. | |See_also: $(LREF slide), $(LREF subSlices). |+/ |alias pairwise(alias fun, size_t lag = 1) = slide!(lag + 1, fun); | |/// |version(mir_test) unittest |{ | assert([2, 4, 3, -1].sliced.pairwise!"a + b" == [6, 7, 2]); |} | |/++ |Differences between vector elements. |Works with packed slices. | |Params: | lag = an integer indicating which lag to use |Returns: lazy differences. | |See_also: $(LREF slide), $(LREF slide). |+/ |alias diff(size_t lag = 1) = pairwise!(('a' + lag) ~ " - a", lag); | |/// |version(mir_test) unittest |{ | assert([2, 4, 3, -1].sliced.diff == [2, -1, -4]); |} | |/// packed slices |version(mir_test) unittest |{ | // 0 1 2 3 | // 4 5 6 7 | // 8 9 10 11 | auto s = iota(3, 4); | import std.stdio; | assert(iota(3, 4).byDim!0.diff == [ | [4, 4, 4, 4], | [4, 4, 4, 4]]); | assert(iota(3, 4).byDim!1.diff == [ | [1, 1, 1], | [1, 1, 1], | [1, 1, 1]]); |} | | |/++ |Cartesian product. | |Constructs lazy cartesian product $(SUBREF slice, Slice) without memory allocation. | |Params: | fields = list of fields with lengths or ndFields with shapes |Returns: $(SUBREF ndfield, Cartesian)`!NdFields(fields).`$(SUBREF slice, slicedNdField)`;` |+/ |auto cartesian(NdFields...)(NdFields fields) | if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields)) |{ | return Cartesian!NdFields(fields).slicedNdField; |} | |/// 1D x 1D |version(mir_test) unittest |{ | auto a = [10, 20, 30]; | auto b = [ 1, 2, 3]; | | auto c = cartesian(a, b) | .map!"a + b"; | | assert(c == [ | [11, 12, 13], | [21, 22, 23], | [31, 32, 33]]); |} | |/// 1D x 2D |version(mir_test) unittest |{ | auto a = [10, 20, 30]; | auto b = iota([2, 3], 1); | | auto c = cartesian(a, b) | .map!"a + b"; | | assert(c.shape == [3, 2, 3]); | | assert(c == [ | [ | [11, 12, 13], | [14, 15, 16], | ], | [ | [21, 22, 23], | [24, 25, 26], | ], | [ | [31, 32, 33], | [34, 35, 36], | ]]); |} | |/// 1D x 1D x 1D |version(mir_test) unittest |{ | auto u = [100, 200]; | auto v = [10, 20, 30]; | auto w = [1, 2]; | | auto c = cartesian(u, v, w) | .map!"a + b + c"; | | assert(c.shape == [2, 3, 2]); | | assert(c == [ | [ | [111, 112], | [121, 122], | [131, 132], | ], | [ | [211, 212], | [221, 222], | [231, 232], | ]]); |} | | | |/++ |$(LINK2 https://en.wikipedia.org/wiki/Kronecker_product, Kronecker product). | |Constructs lazy kronecker product $(SUBREF slice, Slice) without memory allocation. |+/ |template kronecker(alias fun = product) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | | /++ | Params: | fields = list of either fields with lengths or ndFields with shapes. | All ndFields must have the same dimension count. | Returns: | $(SUBREF ndfield, Kronecker)`!(fun, NdFields)(fields).`$(SUBREF slice, slicedNdField) | +/ | @optmath auto kronecker(NdFields...)(NdFields fields) | if (allSatisfy!(hasShape, NdFields) || allSatisfy!(hasLength, NdFields)) | { | return Kronecker!(fun, NdFields)(fields).slicedNdField; | } | else | alias kronecker = .kronecker!(naryFun!fun); |} | |/// 2D |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | // eye | auto a = slice!double([4, 4], 0); | a.diagonal[] = 1; | | auto b = [ 1, -1, | -1, 1].sliced(2, 2); | | auto c = kronecker(a, b); | | assert(c == [ | [ 1, -1, 0, 0, 0, 0, 0, 0], | [-1, 1, 0, 0, 0, 0, 0, 0], | [ 0, 0, 1, -1, 0, 0, 0, 0], | [ 0, 0, -1, 1, 0, 0, 0, 0], | [ 0, 0, 0, 0, 1, -1, 0, 0], | [ 0, 0, 0, 0, -1, 1, 0, 0], | [ 0, 0, 0, 0, 0, 0, 1, -1], | [ 0, 0, 0, 0, 0, 0, -1, 1]]); |} | |/// 1D |version(mir_test) unittest |{ | auto a = iota([3], 1); | | auto b = [ 1, -1]; | | auto c = kronecker(a, b); | | assert(c == [1, -1, 2, -2, 3, -3]); |} | |/// 2D with 3 arguments |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | auto a = [ 1, 2, | 3, 4].sliced(2, 2); | | auto b = [ 1, 0, | 0, 1].sliced(2, 2); | | auto c = [ 1, -1, | -1, 1].sliced(2, 2); | | auto d = kronecker(a, b, c); | | assert(d == [ | [ 1, -1, 0, 0, 2, -2, 0, 0], | [-1, 1, 0, 0, -2, 2, 0, 0], | [ 0, 0, 1, -1, 0, 0, 2, -2], | [ 0, 0, -1, 1, 0, 0, -2, 2], | [ 3, -3, 0, 0, 4, -4, 0, 0], | [-3, 3, 0, 0, -4, 4, 0, 0], | [ 0, 0, 3, -3, 0, 0, 4, -4], | [ 0, 0, -3, 3, 0, 0, -4, 4]]); |} | |/++ |$(HTTPS en.wikipedia.org/wiki/Magic_square, Magic square). |Params: | length = square matrix length. |Returns: | Lazy magic matrix. |+/ |auto magic(size_t length) |{ 0000000| assert(length > 0); | static if (is(size_t == ulong)) 0000000| assert(length <= uint.max); | else | assert(length <= ushort.max); | import mir.ndslice.field: MagicField; 0000000| return MagicField(length).slicedField(length, length); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.math.sum; | import mir.ndslice: slice, magic, byDim, map, as, repeat, diagonal, antidiagonal; | | bool isMagic(S)(S matrix) | { | auto n = matrix.length; | auto c = n * (n * n + 1) / 2; // magic number | return // check shape | matrix.length!0 > 0 && matrix.length!0 == matrix.length!1 | && // each row sum should equal magic number | matrix.byDim!0.map!sum == c.repeat(n) | && // each columns sum should equal magic number | matrix.byDim!1.map!sum == c.repeat(n) | && // diagonal sum should equal magic number | matrix.diagonal.sum == c | && // antidiagonal sum should equal magic number | matrix.antidiagonal.sum == c; | } | | assert(isMagic(magic(1))); | assert(!isMagic(magic(2))); // 2x2 magic square does not exist | foreach(n; 3 .. 24) | assert(isMagic(magic(n))); | assert(isMagic(magic(3).as!double.slice)); |} | |/++ |Chops 1D input slice into n chunks with ascending or descending lengths. | |`stairs` can be used to pack and unpack symmetric and triangular matrix storage. | |Note: `stairs` is defined for 1D (packet) input and 2D (general) input. | This part of documentation is for 1D input. | |Params: | type = $(UL | $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`.) | $(LI `"+"` for stairs with lengths `1, 2, ..., n`;) | ) | slice = input slice with length equal to `n * (n + 1) / 2` | n = stairs count |Returns: | 1D contiguous slice composed of 1D contiguous slices. | |See_also: $(LREF triplets) $(LREF ._stairs.2) |+/ |Slice!(StairsIterator!(Iterator, type)) stairs(string type, Iterator)(Slice!Iterator slice, size_t n) | if (type == "+" || type == "-") |{ | assert(slice.length == (n + 1) * n / 2, "stairs: slice length must be equal to n * (n + 1) / 2, where n is stairs count."); | static if (type == "+") | size_t length = 1; | else | size_t length = n; | return StairsIterator!(Iterator, type)(length, slice._iterator).sliced(n); |} | |/// ditto |Slice!(StairsIterator!(S*, type)) stairs(string type, S)(S[] slice, size_t n) | if (type == "+" || type == "-") |{ | return stairs!type(slice.sliced, n); |} | |/// ditto |auto stairs(string type, S)(S slice, size_t n) | if (hasAsSlice!S && (type == "+" || type == "-")) |{ | return stairs!type(slice.asSlice, n); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, stairs; | | auto pck = 15.iota; | auto inc = pck.stairs!"+"(5); | auto dec = pck.stairs!"-"(5); | | assert(inc == [ | [0], | [1, 2], | [3, 4, 5], | [6, 7, 8, 9], | [10, 11, 12, 13, 14]]); | assert(inc[1 .. $][2] == [6, 7, 8, 9]); | | assert(dec == [ | [0, 1, 2, 3, 4], | [5, 6, 7, 8], | [9, 10, 11], | [12, 13], | [14]]); | assert(dec[1 .. $][2] == [12, 13]); | | static assert(is(typeof(inc.front) == typeof(pck))); | static assert(is(typeof(dec.front) == typeof(pck))); |} | |/++ |Slice composed of rows of lower or upper triangular matrix. | |`stairs` can be used to pack and unpack symmetric and triangular matrix storage. | |Note: `stairs` is defined for 1D (packet) input and 2D (general) input. | This part of documentation is for 2D input. | |Params: | type = $(UL | $(LI `"+"` for stairs with lengths `1, 2, ..., n`, lower matrix;) | $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`, upper matrix.) | ) | slice = input slice with length equal to `n * (n + 1) / 2` |Returns: | 1D slice composed of 1D contiguous slices. | |See_also: $(LREF _stairs) $(SUBREF dynamic, transposed), $(LREF universal) |+/ |auto stairs(string type, Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice) | if (type == "+" || type == "-") |{ | assert(slice.length!0 == slice.length!1, "stairs: input slice must be a square matrix."); | static if (type == "+") | { | return slice | .pack!1 | .map!"a" | .zip([slice.length].iota!size_t(1)) | .map!"a[0 .. b]"; | } | else | { | return slice | .pack!1 | .map!"a" | .zip([slice.length].iota!size_t) | .map!"a[b .. $]"; | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, as, stairs; | | auto gen = [3, 3].iota.as!double; | auto inc = gen.stairs!"+"; | auto dec = gen.stairs!"-"; | | assert(inc == [ | [0], | [3, 4], | [6, 7, 8]]); | | assert(dec == [ | [0, 1, 2], | [4, 5], | [8]]); | | static assert(is(typeof(inc.front) == typeof(gen.front))); | static assert(is(typeof(dec.front) == typeof(gen.front))); | | ///////////////////////////////////////// | // Pack lower and upper matrix parts | auto n = gen.length; | auto m = n * (n + 1) / 2; | // allocate memory | import mir.ndslice.allocation: uninitSlice; | auto lowerData = m.uninitSlice!double; | auto upperData = m.uninitSlice!double; | // construct packed stairs | auto lower = lowerData.stairs!"+"(n); | auto upper = upperData.stairs!"-"(n); | // copy data | import mir.algorithm.iteration: each; | each!"a[] = b"(lower, inc); | each!"a[] = b"(upper, dec); | | assert(&lower[0][0] is &lowerData[0]); | assert(&upper[0][0] is &upperData[0]); | | assert(lowerData == [0, 3, 4, 6, 7, 8]); | assert(upperData == [0, 1, 2, 4, 5, 8]); |} | |/++ |Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them. | |Combines $(LREF transposed) and $(LREF ipack). | |Params: | Dimensions = dimensions to perform iteration on |Returns: | n-dimensional slice ipacked to allow iteration by dimension |See_also: | $(LREF slice), | $(LREF ipack), | $(LREF transposed). |+/ |template byDim(Dimensions...) | if (Dimensions.length > 0) |{ | import mir.ndslice.internal : isSize_t; | import std.meta : allSatisfy; | | static if (!allSatisfy!(isSize_t, Dimensions)) | { | import std.meta : staticMap; | import mir.ndslice.internal : toSize_t; | | alias byDim = .byDim!(staticMap!(toSize_t, Dimensions)); | } | else | { | import mir.ndslice.slice : Slice, SliceKind; | /++ | Params: | slice = input slice (may not be 1-dimensional slice) | Returns: | n-dimensional slice ipacked to allow iteration by dimension | +/ | @optmath auto byDim(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | import mir.ndslice.topology : ipack; | import mir.ndslice.internal : DimensionsCountCTError; | | mixin DimensionsCountCTError; | | static if (N == 1) | { | return slice; | } | else | { | import mir.ndslice.dynamic: transposed; | return slice | .transposed!Dimensions | .ipack!(Dimensions.length); | } | } | } |} | |/// 2-dimensional slice support |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4); | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |/// 3-dimensional slice support, N-dimensional also supported |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, universal, flattened, reshape; | import mir.ndslice.dynamic : strided, transposed; | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // - - - - - - - - | // | 20 21 22 23 24 | | // | 25 26 27 28 29 | | // | 30 31 32 33 34 | | // | 35 36 37 38 39 | | // - - - - - - - - | // | 40 41 42 43 44 | | // | 45 46 47 48 49 | | // | 50 51 52 53 54 | | // | 55 56 57 58 59 | | // ---------------- | auto slice = iota(3, 4, 5); | //-> | // | 4 5 | | //-> | // | 3 5 | | //-> | // | 3 4 | | //-> | // | 5 4 | | //-> | // | 3 | | //-> | // | 4 | | //-> | // | 5 | | size_t[2] shape45 = [4, 5]; | size_t[2] shape35 = [3, 5]; | size_t[2] shape34 = [3, 4]; | size_t[2] shape54 = [5, 4]; | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | size_t[1] shape5 = [5]; | | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // - - - - - - - - | // | 20 21 22 23 24 | | // | 25 26 27 28 29 | | // | 30 31 32 33 34 | | // | 35 36 37 38 39 | | // - - - - - - - - | // | 40 41 42 43 44 | | // | 45 46 47 48 49 | | // | 50 51 52 53 54 | | // | 55 56 57 58 59 | | // ---------------- | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape45); | assert(x.front == iota([4, 5])); | x.popFront; | assert(x.front == iota([4, 5], (4 * 5))); | | // ---------------- | // | 0 1 2 3 4 | | // | 20 21 22 23 24 | | // | 40 41 42 43 44 | | // - - - - - - - - | // | 5 6 7 8 9 | | // | 25 26 27 28 29 | | // | 45 46 47 48 49 | | // - - - - - - - - | // | 10 11 12 13 14 | | // | 30 31 32 33 34 | | // | 50 51 52 53 54 | | // - - - - - - - - | // | 15 16 17 18 19 | | // | 35 36 37 38 39 | | // | 55 56 57 58 59 | | // ---------------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape35); | int err; | assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err)); | y.popFront; | assert(y.front.front == iota([5], 5)); | | // ------------- | // | 0 5 10 15 | | // | 20 25 30 35 | | // | 40 45 50 55 | | // - - - - - - - | // | 1 6 11 16 | | // | 21 26 31 36 | | // | 41 46 51 56 | | // - - - - - - - | // | 2 7 12 17 | | // | 22 27 32 37 | | // | 42 47 52 57 | | // - - - - - - - | // | 3 8 13 18 | | // | 23 28 33 38 | | // | 43 48 53 58 | | // - - - - - - - | // | 4 9 14 19 | | // | 24 29 34 39 | | // | 44 49 54 59 | | // ------------- | auto z = slice.byDim!2; | assert(z.shape == shape5); | assert(z.front.shape == shape34); | assert(z.front == iota([3, 4], 0, 5)); | z.popFront; | assert(z.front.front == iota([4], 1, 5)); | | // ---------- | // | 0 20 40 | | // | 5 25 45 | | // | 10 30 50 | | // | 15 35 55 | | // - - - - - | // | 1 21 41 | | // | 6 26 46 | | // | 11 31 51 | | // | 16 36 56 | | // - - - - - | // | 2 22 42 | | // | 7 27 47 | | // | 12 32 52 | | // | 17 37 57 | | // - - - - - | // | 3 23 43 | | // | 8 28 48 | | // | 13 33 53 | | // | 18 38 58 | | // - - - - - | // | 4 24 44 | | // | 9 29 49 | | // | 14 34 54 | | // | 19 39 59 | | // ---------- | auto a = slice.byDim!(2, 1); | assert(a.shape == shape54); | assert(a.front.shape == shape4); | assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed!1); | a.popFront; | assert(a.front.front == iota([3], 1, 20)); |} | |// Ensure works on canonical |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, canonical; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4).canonical; | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |// Ensure works on universal |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, universal; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4).universal; | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |// 1-dimensional slice support |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // ------- | // | 0 1 2 | | // ------- | auto slice = iota(3); | auto x = slice.byDim!0; | assert(x == slice); |} | |/++ |Field (element's member) projection. | |Params: | name = element's member name |Returns: | lazy n-dimensional slice of the same shape |See_also: | $(LREF map) |+/ | |template member(string name) | if (name.length) |{ | /++ | Params: | slice = n-dimensional slice composed of structs, classes or unions | Returns: | lazy n-dimensional slice of the same shape | +/ | Slice!(MemberIterator!(Iterator, name), N, kind) member(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | return typeof(return)(slice._structure, MemberIterator!(Iterator, name)(slice._iterator)); | } | | /// ditto | Slice!(MemberIterator!(T*, name)) member(T)(T[] array) | { | return member(array.sliced); | } | | /// ditto | auto member(T)(T withAsSlice) | if (hasAsSlice!T) | { | return member(withAsSlice.asSlice); | } |} | |/// |version(mir_test) |@safe pure unittest |{ | // struct, union or class | struct S | { | // Property support | // Getter always must be defined. | double _x; | double x() @property | { | return x; | } | void x(double x) @property | { | _x = x; | } | | /// Field support | double y; | | /// Zero argument function support | double f() | { | return _x * 2; | } | } | | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto matrix = slice!S(2, 3); | matrix.member!"x"[] = [2, 3].iota; | matrix.member!"y"[] = matrix.member!"f"; | assert(matrix.member!"y" == [2, 3].iota * 2); |} | |/++ |Functional deep-element wise reduce of a slice composed of fields or iterators. |+/ |template orthogonalReduceField(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | { | @optmath: | /++ | Params: | slice = Non empty input slice composed of fields or iterators. | Returns: | a lazy field with each element of which is reduced value of element of the same index of all iterators. | +/ | OrthogonalReduceField!(Iterator, fun, I) orthogonalReduceField(I, Iterator)(I initialValue, Slice!Iterator slice) | { | return typeof(return)(slice, initialValue); | } | | /// ditto | OrthogonalReduceField!(T*, fun, I) orthogonalReduceField(I, T)(I initialValue, T[] array) | { | return orthogonalReduceField(initialValue, array.sliced); | } | | /// ditto | auto orthogonalReduceField(I, T)(I initialValue, T withAsSlice) | if (hasAsSlice!T) | { | return orthogonalReduceField(initialValue, withAsSlice.asSlice); | } | } | else alias orthogonalReduceField = .orthogonalReduceField!(naryFun!fun); |} | |/// bit array operations |version(mir_test) |unittest |{ | import mir.ndslice.slice: slicedField; | import mir.ndslice.allocation: bitSlice; | import mir.ndslice.dynamic: strided; | import mir.ndslice.topology: iota, orthogonalReduceField; | auto len = 100; | auto a = len.bitSlice; | auto b = len.bitSlice; | auto c = len.bitSlice; | a[len.iota.strided!0(7)][] = true; | b[len.iota.strided!0(11)][] = true; | c[len.iota.strided!0(13)][] = true; | | // this is valid since bitslices above are oroginal slices of allocated memory. | auto and = | orthogonalReduceField!"a & b"(size_t.max, [ | a.iterator._field._field, // get raw data pointers | b.iterator._field._field, | c.iterator._field._field, | ]) // operation on size_t | .bitwiseField | .slicedField(len); | | assert(and == (a & b & c)); |} | |/++ |Constructs a lazy view of triplets with `left`, `center`, and `right` members. | |Returns: Slice of the same length composed of $(SUBREF iterator, Triplet) triplets. |The `center` member is type of a slice element. |The `left` and `right` members has the same type as slice. | |The module contains special function $(LREF collapse) to handle |left and right side of triplets in one expression. | |Params: | slice = a slice or an array to iterate over | |Example: |------ |triplets(eeeeee) => | |||c|lllll| ||r|c|llll| ||rr|c|lll| ||rrr|c|ll| ||rrrr|c|l| ||rrrrr|c|| |------ | |See_also: $(LREF stairs). |+/ |Slice!(TripletIterator!(Iterator, kind)) triplets(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice) |{ | return typeof(return)(slice.length, typeof(return).Iterator(0, slice)); |} | |/// ditto |Slice!(TripletIterator!(T*)) triplets(T)(scope return T[] slice) |{ | return .triplets(slice.sliced); |} | |/// ditto |auto triplets(string type, S)(S slice, size_t n) | if (hasAsSlice!S) |{ | return .triplets(slice.asSlice); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: triplets, member, iota; | | auto a = [4, 5, 2, 8]; | auto h = a.triplets; | | assert(h[1].center == 5); | assert(h[1].left == [4]); | assert(h[1].right == [2, 8]); | | h[1].center = 9; | assert(a[1] == 9); | | assert(h.member!"center" == a); | | // `triplets` topology can be used with iota to index a slice | auto s = a.sliced; | auto w = s.length.iota.triplets[1]; | | assert(&s[w.center] == &a[1]); | assert(s[w.left].field is a[0 .. 1]); | assert(s[w.right].field is a[2 .. $]); |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/ndslice/topology.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.4.17-mir-algorithm-source-mir-series.lst |/++ |$(H1 Index-series) | |The module contains $(LREF Series) data structure with special iteration and indexing methods. |It is aimed to construct index or time-series using Mir and Phobos algorithms. | |Public_imports: $(MREF mir,ndslice,slice). | |Copyright: Copyright © 2017, Kaleidic Associates Advisory Limited |Authors: Ilya Yaroshenko | |Macros: |NDSLICE = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.series; | |public import mir.ndslice.slice; |public import mir.ndslice.sorting: sort; |import mir.ndslice.iterator: IotaIterator; |import mir.ndslice.sorting: transitionIndex; |import mir.qualifier; |import std.traits; | |/++ |See_also: $(LREF unionSeries), $(LREF troykaSeries), $(LREF troykaGalop). |+/ |@safe version(mir_test) unittest |{ | import mir.ndslice; | import mir.series; | | import mir.array.allocation: array; | import mir.algorithm.setops: multiwayUnion; | | import std.datetime: Date; | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | import std.exception: collectExceptionMsg; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [ | Date(2017, 01, 01), | Date(2017, 03, 01), | Date(2017, 04, 01)]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 05, 01)]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // asSlice method | ////////////////////////////////////// | assert(series0 | .asSlice | // ref qualifier is optional | .map!((ref key, ref value) => key.month == value) | .all); | | ////////////////////////////////////// | // get* methods | ////////////////////////////////////// | | auto refDate = Date(2017, 03, 01); | auto missingDate = Date(2016, 03, 01); | | // default value | double defaultValue = 100; | assert(series0.get(refDate, defaultValue) == 3); | assert(series0.get(missingDate, defaultValue) == defaultValue); | | // Exceptions handlers | assert(series0.get(refDate) == 3); | assert(series0.get(refDate, new Exception("My exception msg")) == 3); | assert(series0.getVerbose(refDate) == 3); | assert(series0.getExtraVerbose(refDate, "My exception msg") == 3); | | assert(collectExceptionMsg!Exception( | series0.get(missingDate) | ) == "Series double[Date]: Missing required key"); | | assert(collectExceptionMsg!Exception( | series0.get(missingDate, new Exception("My exception msg")) | ) == "My exception msg"); | | assert(collectExceptionMsg!Exception( | series0.getVerbose(missingDate) | ) == "Series double[Date]: Missing 2016-Mar-01 key"); | | assert(collectExceptionMsg!Exception( | series0.getExtraVerbose(missingDate, "My exception msg") | ) == "My exception msg. Series double[Date]: Missing 2016-Mar-01 key"); | | // assign with get* | series0.get(refDate) = 100; | assert(series0.get(refDate) == 100); | series0.get(refDate) = 3; | | // tryGet | double val; | assert(series0.tryGet(refDate, val)); | assert(val == 3); | assert(!series0.tryGet(missingDate, val)); | assert(val == 3); // val was not changed | | ////////////////////////////////////// | // Merges multiple series into one. | // Allocates using GC. M | // Makes exactly two allocations per merge: | // one for index/time and one for data. | ////////////////////////////////////// | auto m0 = unionSeries(series0, series1); | auto m1 = unionSeries(series1, series0); // order is matter | | assert(m0.index == [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 03, 01), | Date(2017, 04, 01), | Date(2017, 05, 01)]); | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | | ////////////////////////////////////// | // Joins two time-series into a one with two columns. | ////////////////////////////////////// | auto u = [index0, index1].multiwayUnion; | auto index = u.move.array; | auto data = slice!double([index.length, 2], 0); // initialized to 0 value | auto series = index.series(data); | | series[0 .. $, 0][].opIndexAssign(series0); // fill first column | series[0 .. $, 1][] = series1; // fill second column | | assert(data == [ | [1, 10], | [0, 20], | [3, 0], | [4, 0], | [0, 50]]); |} | |/// |unittest{ | | import mir.series; | | double[int] map; | map[1] = 4.0; | map[2] = 5.0; | map[4] = 6.0; | map[5] = 10.0; | map[10] = 11.0; | | const s = series(map); | | double value; | int key; | assert(s.tryGet(2, value) && value == 5.0); | assert(!s.tryGet(8, value)); | | assert(s.tryGetNext(2, value) && value == 5.0); | assert(s.tryGetPrev(2, value) && value == 5.0); | assert(s.tryGetNext(8, value) && value == 11.0); | assert(s.tryGetPrev(8, value) && value == 10.0); | assert(!s.tryGetFirst(8, 9, value)); | assert(s.tryGetFirst(2, 10, value) && value == 5.0); | assert(s.tryGetLast(2, 10, value) && value == 11.0); | assert(s.tryGetLast(2, 8, value) && value == 10.0); | | key = 2; assert(s.tryGetNextUpdateKey(key, value) && key == 2 && value == 5.0); | key = 2; assert(s.tryGetPrevUpdateKey(key, value) && key == 2 && value == 5.0); | key = 8; assert(s.tryGetNextUpdateKey(key, value) && key == 10 && value == 11.0); | key = 8; assert(s.tryGetPrevUpdateKey(key, value) && key == 5 && value == 10.0); | key = 2; assert(s.tryGetFirstUpdateLower(key, 10, value) && key == 2 && value == 5.0); | key = 10; assert(s.tryGetLastUpdateKey(2, key, value) && key == 10 && value == 11.0); | key = 8; assert(s.tryGetLastUpdateKey(2, key, value) && key == 5 && value == 10.0); |} | |import mir.ndslice.slice; |import mir.ndslice.internal: is_Slice, isIndex; |import mir.math.common: optmath; | |import std.meta; | |@optmath: | |/++ |Plain index/time observation data structure. |Observation are used as return tuple for for indexing $(LREF Series). |+/ |struct mir_observation(Index, Data) |{ | /// Date, date-time, time, or index. | Index index; | /// An alias for time-series index. | alias time = index; | /// An alias for key-value representation. | alias key = index; | /// Value or ndslice. | Data data; | /// An alias for key-value representation. | alias value = data; |} | |/// ditto |alias Observation = mir_observation; | |/// Convenient function for $(LREF Observation) construction. |auto observation(Index, Data)(Index index, Data data) |{ | return mir_observation!(Index, Data)(index, data); |} | |/++ |Convinient alias for 1D Contiguous $(LREF Series). |+/ |alias SeriesMap(K, V) = mir_series!(K*, V*); | |/// |version(mir_test) unittest |{ | import std.traits; | import mir.series; | | static assert (is(SeriesMap!(string, double) == Series!(string*, double*))); | | /// LHS, RHS | static assert (isAssignable!(SeriesMap!(string, double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(string, double), typeof(null))); | | static assert (isAssignable!(SeriesMap!(const string, double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(string, const double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, double))); | | static assert (isAssignable!(SeriesMap!(immutable string, double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(immutable string, const double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(string, immutable double), SeriesMap!(string, immutable double))); | static assert (isAssignable!(SeriesMap!(const string, immutable double), SeriesMap!(string, immutable double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, immutable double))); | // etc |} | |/++ |Plain index series data structure. | |`*.index[i]`/`*.key[i]`/`*.time` corresponds to `*.data[i]`/`*.value`. | |Index is assumed to be sorted. |$(LREF sort) can be used to normalise a series. |+/ |struct mir_series(IndexIterator_, Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous) |{ | private enum doUnittest = is(typeof(this) == Series!(int*, double*)); | | /// | alias IndexIterator = IndexIterator_; | | /// | alias Iterator = Iterator_; | | /// | enum size_t N = N_; | | /// | enum SliceKind kind = kind_; | | /// | Slice!(Iterator, N, kind) _data; | | /// | IndexIterator _index; | | /// Index / Key / Time type aliases | alias Index = typeof(this.front.index); | /// ditto | alias Key = Index; | /// ditto | alias Time = Index; | /// Data / Value type aliases | alias Data = typeof(this.front.data); | /// ditto | alias Value = Data; | | /// An alias for time-series index. | alias time = index; | /// An alias for key-value representation. | alias key = index; | /// An alias for key-value representation. | alias value = data; | | private enum defaultMsg() = "Series " ~ Unqual!(this.Data).stringof ~ "[" ~ Unqual!(this.Index).stringof ~ "]: Missing"; | private static immutable defaultExc() = new Exception(defaultMsg!() ~ " required key"); | |@optmath: | | /// | this()(Slice!IndexIterator index, Slice!(Iterator, N, kind) data) | { | assert(index.length == data.length, "Series constructor: index and data lengths must be equal."); | _data = data; | _index = index._iterator; | } | | | /// Construct from null | this(typeof(null)) | { | _data = _data.init; | _index = _index.init; | } | | /// | bool opEquals(RIndexIterator, RIterator, size_t RN, SliceKind rkind, )(Series!(RIndexIterator, RIterator, RN, rkind) rhs) const | { | return this.lightScopeIndex == rhs.lightScopeIndex && this._data.lightScope == rhs._data.lightScope; | } | | /++ | Index series is assumed to be sorted. | | `IndexIterator` is an iterator on top of date, date-time, time, or numbers or user defined types with defined `opCmp`. | For example, `Date*`, `DateTime*`, `immutable(long)*`, `mir.ndslice.iterator.IotaIterator`. | +/ | auto index()() @property @trusted | { | return _index.sliced(_data._lengths[0]); | } | | /// ditto | auto index()() @property @trusted const | { | return _index.lightConst.sliced(_data._lengths[0]); | } | | /// ditto | auto index()() @property @trusted immutable | { | return _index.lightImmutable.sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted const | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted immutable | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | /++ | Data is any ndslice with only one constraints, | `data` and `index` lengths should be equal. | +/ | auto data()() @property @trusted | { | return _data; | } | | /// ditto | auto data()() @property @trusted const | { | return _data[]; | } | | /// ditto | auto data()() @property @trusted immutable | { | return _data[]; | } | | /// | typeof(this) opBinary(string op : "~")(typeof(this) rhs) | { | return unionSeries(this.lightScope, rhs.lightScope); | } | | /// ditto | auto opBinary(string op : "~")(const typeof(this) rhs) const | { | return unionSeries(this.lightScope, rhs.lightScope); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = series0 ~ series1; | auto m1 = series1 ~ series0; | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | const series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = series0 ~ series1; | auto m1 = series1 ~ series0; | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | } | | /++ | Special `[] =` index-assign operator for index-series. | Assigns data from `r` with index intersection. | If a index index in `r` is not in the index index for this series, then no op-assign will take place. | This and r series are assumed to be sorted. | | Params: | r = rvalue index-series | +/ | void opIndexAssign(IndexIterator_, Iterator_, size_t N_, SliceKind kind_) | (Series!(IndexIterator_, Iterator_, N_, kind_) r) | { | opIndexOpAssign!("", IndexIterator_, Iterator_, N_, kind_)(r); | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | auto index = [1, 2, 3, 4]; | auto data = [10.0, 10, 10, 10]; | auto series = index.series(data); | | auto rindex = [0, 2, 4, 5]; | auto rdata = [1.0, 2, 3, 4]; | auto rseries = rindex.series(rdata); | | // series[] = rseries; | series[] = rseries; | assert(series.data == [10, 2, 10, 3]); | } | | /++ | Special `[] op=` index-op-assign operator for index-series. | Op-assigns data from `r` with index intersection. | If a index index in `r` is not in the index index for this series, then no op-assign will take place. | This and r series are assumed to be sorted. | | Params: | rSeries = rvalue index-series | +/ | void opIndexOpAssign(string op, IndexIterator_, Iterator_, size_t N_, SliceKind kind_) | (auto ref Series!(IndexIterator_, Iterator_, N_, kind_) rSeries) | { | auto l = this.lightScope; | auto r = rSeries.lightScope; | if (r.empty) | return; | if (l.empty) | return; | Unqual!(typeof(*r._index)) rf = *r._index; | Unqual!(typeof(*l._index)) lf = *l._index; | goto Begin; | R: | r.popFront; | if (r.empty) | goto End; | rf = *r._index; | Begin: | if (lf > rf) | goto R; | if (lf < rf) | goto L; | E: | static if (N != 1) | mixin("l.data.front[] " ~ op ~ "= r.data.front;"); | else | mixin("l.data.front " ~ op ~ "= r.data.front;"); | | r.popFront; | if (r.empty) | goto End; | rf = *r._index; | L: | l.popFront; | if (l.empty) | goto End; | lf = *l._index; | | if (lf < rf) | goto L; | if (lf == rf) | goto E; | goto R; | End: | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | auto index = [1, 2, 3, 4]; | auto data = [10.0, 10, 10, 10]; | auto series = index.series(data); | | auto rindex = [0, 2, 4, 5]; | auto rdata = [1.0, 2, 3, 4]; | auto rseries = rindex.series(rdata); | | series[] += rseries; | assert(series.data == [10, 12, 10, 13]); | } | | /++ | This function uses a search with policy sp to find the largest left subrange on which | `t < key` is true for all `t`. | The search schedule and its complexity are documented in `std.range.SearchPolicy`. | +/ | auto lowerBound(Index)(auto ref scope const Index key) | { | return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key))); | } | | /// ditto | auto lowerBound(Index)(auto ref scope const Index key) const | { | return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key))); | } | | | /++ | This function uses a search with policy sp to find the largest left subrange on which | `t > key` is true for all `t`. | The search schedule and its complexity are documented in `std.range.SearchPolicy`. | +/ | auto upperBound(Index)(auto ref scope const Index key) | { | return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length)); | } | | /// ditto | auto upperBound(Index)(auto ref scope const Index key) const | { | return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length)); | } | | /** | Gets data for the index. | Params: | key = index | _default = default value is returned if the series does not contains the index. | Returns: | data that corresponds to the index or default value. | */ | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) @trusted | if (!is(Value : const(Exception))) | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default; | } | | /// ditto | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) const | if (!is(Value : const(Exception))) | { | return this.lightScope.get(key, _default); | } | | /// ditto | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) immutable | if (!is(Value : const(Exception))) | { | return this.lightScope.get(key, _default); | } | | auto get(Index, Value)(auto ref scope const Index key, Value _default) @trusted | if (!is(Value : const(Exception))) | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default; | } | | /// ditto | auto get(Index, Value)(auto ref scope const Index key, Value _default) const | if (!is(Value : const(Exception))) | { | import mir.functional: forward; | return this.lightScope.get(key, forward!_default); | } | | /// ditto | auto get(Index, Value)(auto ref scope const Index key, Value _default) immutable | if (!is(Value : const(Exception))) | { | import mir.functional: forward; | return this.lightScope.get(key, forward!_default); | } | | /** | Gets data for the index. | Params: | key = index | exc = (lazy, optional) exception to throw if the series does not contains the index. | Returns: data that corresponds to the index. | Throws: | Exception if the series does not contains the index. | See_also: $(LREF Series.getVerbose), $(LREF Series.tryGet) | */ | auto ref get(Index)(auto ref scope const Index key) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | if (idx < _data._lengths[0] && _index[idx] == key) | { | return _data[idx]; | } | throw defaultExc!(); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | if (idx < _data._lengths[0] && _index[idx] == key) | { | return _data[idx]; | } | throw exc; | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key) const | { | return this.lightScope.get(key); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) const | { | return this.lightScope.get(key, exc); | } | | | /// ditto | auto ref get(Index)(auto ref scope const Index key) immutable | { | return this.lightScope.get(key); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) immutable | { | return this.lightScope.get(key, exc); | } | | /** | Gets data for the index (verbose exception). | Params: | key = index | Returns: data that corresponds to the index. | Throws: | Detailed exception if the series does not contains the index. | See_also: $(LREF Series.get), $(LREF Series.tryGet) | */ | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) | { | import std.format: format; | return this.get(key, new Exception(format("%s %s key", defaultMsg!(), key), file, line)); | } | | /// ditto | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) const | { | return this.lightScope.getVerbose(key, file, line); | } | | /// ditto | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) immutable | { | return this.lightScope.getVerbose(key, file, line); | } | | /** | Gets data for the index (extra verbose exception). | Params: | key = index | Returns: data that corresponds to the index. | Throws: | Detailed exception if the series does not contains the index. | See_also: $(LREF Series.get), $(LREF Series.tryGet) | */ | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) | { | import std.format: format; | return this.get(key, new Exception(format("%s. %s %s key", exceptionInto, defaultMsg!(), key), file, line)); | } | | /// ditto | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) const | { | return this.lightScope.getExtraVerbose(key, exceptionInto, file, line); | } | | /// ditto | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) immutable | { | return this.lightScope.getExtraVerbose(key, exceptionInto, file, line); | } | | /// | bool contains(Index)(auto ref scope const Index key) const @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key; | } | | /// | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | bool cond = idx < _data._lengths[0] && _index[idx] == key; | static if (__traits(compiles, &_data[size_t.init])) | { | if (cond) | return &_data[idx]; | return null; | } | else | { | return bool(cond); | } | } | | /// ditto | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) const | { | return key in this.lightScope; | } | | /// ditto | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) immutable | { | return key in this.lightScope; | } | | /++ | Tries to get the first value, such that `key_i == key`. | | Returns: `true` on success. | +/ | bool tryGet(Index, Value)(Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0] && _index[idx] == key; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGet(Index, Value)(Index key, scope ref Value val) const | { | return this.lightScope.tryGet(key, val); | } | | /// ditto | bool tryGet(Index, Value)(Index key, scope ref Value val) immutable | { | return this.lightScope.tryGet(key, val); | } | | /++ | Tries to get the first value, such that `key_i >= key`. | | Returns: `true` on success. | +/ | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0]; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) const | { | return this.lightScope.tryGetNext(key, val); | } | | /// ditto | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetNext(key, val); | } | | /++ | Tries to get the first value, such that `key_i >= key`. | Updates `key` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0]; | if (cond) | { | key = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) const | { | return this.lightScope.tryGetNextUpdateKey(key, val); | } | | /// ditto | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetNextUpdateKey(key, val); | } | | /++ | Tries to get the last value, such that `key_i <= key`. | | Returns: `true` on success. | +/ | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(key) - 1; | auto cond = 0 <= sizediff_t(idx); | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) const | { | return this.lightScope.tryGetPrev(key, val); | } | | /// ditto | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetPrev(key, val); | } | | /++ | Tries to get the last value, such that `key_i <= key`. | Updates `key` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(key) - 1; | auto cond = 0 <= sizediff_t(idx); | if (cond) | { | key = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) const | { | return this.lightScope.tryGetPrevUpdateKey(key, val); | } | | /// ditto | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetPrevUpdateKey(key, val); | } | | /++ | Tries to get the first value, such that `lowerBound <= key_i <= upperBound`. | | Returns: `true` on success. | +/ | bool tryGetFirst(Index, Value)(auto ref scope const Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(lowerBound); | auto cond = idx < _data._lengths[0] && _index[idx] <= upperBound; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetFirst(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetFirst(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetFirst(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetFirst(lowerBound, upperBound, val); | } | | /++ | Tries to get the first value, such that `lowerBound <= key_i <= upperBound`. | Updates `lowerBound` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(lowerBound); | auto cond = idx < _data._lengths[0] && _index[idx] <= upperBound; | if (cond) | { | lowerBound = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetFirstUpdateLower(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetFirstUpdateLower(lowerBound, upperBound, val); | } | | /++ | Tries to get the last value, such that `lowerBound <= key_i <= upperBound`. | | Returns: `true` on success. | +/ | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(upperBound) - 1; | auto cond = 0 <= sizediff_t(idx) && _index[idx] >= lowerBound; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetLast(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetLast(lowerBound, upperBound, val); | } | | /++ | Tries to get the last value, such that `lowerBound <= key_i <= upperBound`. | Updates `upperBound` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(upperBound) - 1; | auto cond = 0 <= sizediff_t(idx) && _index[idx] >= lowerBound; | if (cond) | { | upperBound = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetLastUpdateKey(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetLastUpdateKey(lowerBound, upperBound, val); | } | | /++ | Returns: | 1D Slice with creared with $(NDSLICE topology, zip) ([0] - key, [1] - value). | See_also: | $(NDSLICE topology, map) uses multiargument lambdas to handle zipped slices. | +/ | auto asSlice()() @property | { | import mir.ndslice.topology: zip, map, ipack; | static if (N == 1) | return index.zip(data); | else | return index.zip(data.ipack!1.map!"a"); | } | | /// ditto | auto asSlice()() const @property | { | return opIndex.asSlice; | } | | /// ditto | auto asSlice()() immutable @property | { | return opIndex.asSlice; | } | | /// ndslice-like primitives | bool empty(size_t dimension = 0)() const @property | if (dimension < N) | { | return !length!dimension; | } | | /// ditto | size_t length(size_t dimension = 0)() const @property | if (dimension < N) | { | return _data.length!dimension; | } | | /// ditto | auto front(size_t dimension = 0)() @property | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension) | { | return index.series(data.front!dimension); | } | else | { | return index.front.observation(data.front); | } | } | | /// ditto | auto back(size_t dimension = 0)() @property | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension) | { | return index.series(_data.back!dimension); | } | else | { | return index.back.observation(_data.back); | } | } | | /// ditto | void popFront(size_t dimension = 0)() @trusted | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension == 0) | _index++; | _data.popFront!dimension; | } | | /// ditto | void popBack(size_t dimension = 0)() | if (dimension < N) | { | assert(!empty!dimension); | _data.popBack!dimension; | } | | /// ditto | void popFrontExactly(size_t dimension = 0)(size_t n) @trusted | if (dimension < N) | { | assert(length!dimension >= n); | static if (dimension == 0) | _index += n; | _data.popFrontExactly!dimension(n); | } | | /// ditto | void popBackExactly(size_t dimension = 0)(size_t n) | if (dimension < N) | { | assert(length!dimension >= n); | _data.popBackExactly!dimension(n); | } | | /// ditto | void popFrontN(size_t dimension = 0)(size_t n) | if (dimension < N) | { | auto len = length!dimension; | n = n <= len ? n : len; | popFrontExactly!dimension(n); | } | | /// ditto | void popBackN(size_t dimension = 0)(size_t n) | if (dimension < N) | { | auto len = length!dimension; | n = n <= len ? n : len; | popBackExactly!dimension(n); | } | | /// ditto | Slice!(IotaIterator!size_t) opSlice(size_t dimension = 0)(size_t i, size_t j) const | if (dimension < N) | in | { | assert(i <= j, | "Series.opSlice!" ~ dimension.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": difference between the right and the left bounds" | ~ " must be less than or equal to the length of the given dimension."; | assert(j - i <= _data._lengths[dimension], | "Series.opSlice!" ~ dimension.stringof ~ errorMsg); | } | body | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /// ditto | size_t opDollar(size_t dimension = 0)() const | { | return _data.opDollar!dimension; | } | | /// ditto | auto opIndex(Slices...)(Slices slices) | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | static if (Slices.length == 0) | { | return this; | } | else | static if (is_Slice!(Slices[0])) | { | return index[slices[0]].series(data[slices]); | } | else | { | return index[slices[0]].observation(data[slices]); | } | } | | /// ditto | auto opIndex(Slices...)(Slices slices) const | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | return lightConst.opIndex(slices); | } | | /// ditto | auto opIndex(Slices...)(Slices slices) immutable | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | return lightImmutable.opIndex(slices); | } | | /// | ref opAssign(typeof(this) rvalue) return @trusted | { | import mir.utility: swap; | this._data._structure = rvalue._data._structure; | swap(this._data._iterator, rvalue._data._iterator); | swap(this._index, rvalue._index); | return this; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, RIndexIterator) && isAssignable!(Iterator, RIterator)) | { | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | this._data._structure = rvalue._data._structure; | this._data._iterator = rvalue._data._iterator.move; | this._index = rvalue._index.move; | return this; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(auto ref const Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, LightConstOf!RIndexIterator) && isAssignable!(Iterator, LightConstOf!RIterator)) | { | return this = rvalue.opIndex; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(auto ref immutable Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, LightImmutableOf!RIndexIterator) && isAssignable!(Iterator, LightImmutableOf!RIterator)) | { | return this = rvalue.opIndex; | } | | /// ditto | ref opAssign(typeof(null)) return | { | return this = this.init; | } | | /// ditto | auto save()() @property | { | return this; | } | | /// | Series!(LightScopeOf!IndexIterator, LightScopeOf!Iterator, N, kind) lightScope()() @trusted scope return @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// ditto | Series!(LightConstOf!(LightScopeOf!IndexIterator), LightConstOf!(LightScopeOf!Iterator), N, kind) lightScope()() @trusted scope return const @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// ditto | Series!(LightConstOf!(LightScopeOf!IndexIterator), LightConstOf!(LightScopeOf!Iterator), N, kind) lightScope()() @trusted scope return immutable @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// | Series!(LightConstOf!IndexIterator, LightConstOf!Iterator, N, kind) lightConst()() scope return const @property @trusted | { | return index.series(data); | } | | /// | Series!(LightImmutableOf!IndexIterator, LightImmutableOf!Iterator, N, kind) lightImmutable()() scope return immutable @property @trusted | { | return index.series(data); | } | | /// | auto toConst()() const @property | { | return index.toConst.series(data.toConst); | } | | /// | void toString(Writer, Spec)(auto ref Writer w, const ref Spec f) const | { | import std.format: formatValue, formatElement; | import std.range: put; | | if (f.spec != 's' && f.spec != '(') | throw new Exception("incompatible format character for Mir Series argument: %" ~ f.spec); | | enum defSpec = "%s" ~ f.keySeparator ~ "%s" ~ f.seqSeparator; | auto fmtSpec = f.spec == '(' ? f.nested : defSpec; | | if (f.spec == 's') | put(w, f.seqBefore); | if (length) for (size_t i = 0;;) | { | auto fmt = Spec(fmtSpec); | fmt.writeUpToNextSpec(w); | if (f.flDash) | { | formatValue(w, index[i], fmt); | fmt.writeUpToNextSpec(w); | formatValue(w, data[i], fmt); | } | else | { | formatElement(w, index[i], fmt); | fmt.writeUpToNextSpec(w); | formatElement(w, data[i], fmt); | } | if (f.sep !is null) | { | fmt.writeUpToNextSpec(w); | if (++i != length) | put(w, f.sep); | else | break; | } | else | { | if (++i != length) | fmt.writeUpToNextSpec(w); | else | break; | } | } | if (f.spec == 's') | put(w, f.seqAfter); | } | | version(mir_test) | /// | unittest | { | import mir.series: series, sort; | auto s = ["b", "a"].series([9, 8]).sort; | | import std.conv : to; | assert(s.to!string == `["a":8, "b":9]`); | | import std.format : format; | assert("%s".format(s) == `["a":8, "b":9]`); | assert("%(%s %s | %)".format(s) == `"a" 8 | "b" 9`); | assert("%-(%s,%s\n%)\n".format(s) == "a,8\nb,9\n"); | } |} | |/// ditto |alias Series = mir_series; | |/// 1-dimensional data |@safe pure version(mir_test) unittest |{ | auto index = [1, 2, 3, 4]; | auto data = [2.1, 3.4, 5.6, 7.8]; | auto series = index.series(data); | const cseries = series; | | assert(series.contains(2)); | assert( ()@trusted{ return (2 in series) is &data[1]; }() ); | | assert(!series.contains(5)); | assert( ()@trusted{ return (5 in series) is null; }() ); | | assert(series.lowerBound(2) == series[0 .. 1]); | assert(series.upperBound(2) == series[2 .. $]); | | assert(cseries.lowerBound(2) == cseries[0 .. 1]); | assert(cseries.upperBound(2) == cseries[2 .. $]); | | // slicing type deduction for const / immutable series | static assert(is(typeof(series[]) == | Series!(int*, double*))); | static assert(is(typeof(cseries[]) == | Series!(const(int)*, const(double)*))); | static assert(is(typeof((cast(immutable) series)[]) == | Series!(immutable(int)*, immutable(double)*))); | | /// slicing | auto seriesSlice = series[1 .. $ - 1]; | assert(seriesSlice.index == index[1 .. $ - 1]); | assert(seriesSlice.data == data[1 .. $ - 1]); | static assert(is(typeof(series) == typeof(seriesSlice))); | | /// indexing | assert(series[1] == observation(2, 3.4)); | | /// range primitives | assert(series.length == 4); | assert(series.front == observation(1, 2.1)); | | series.popFront; | assert(series.front == observation(2, 3.4)); | | series.popBackN(10); | assert(series.empty); |} | |/// 2-dimensional data |@safe pure version(mir_test) unittest |{ | import std.datetime: Date; | import mir.ndslice.topology: canonical, iota; | | size_t row_length = 5; | | auto index = [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 03, 01), | Date(2017, 04, 01)]; | | // 1, 2, 3, 4, 5 | // 6, 7, 8, 9, 10 | // 11, 12, 13, 14, 15 | // 16, 17, 18, 19, 20 | auto data = iota!int([index.length, row_length], 1); | | // canonical and universal ndslices are more flexible then contiguous | auto series = index.series(data.canonical); | | /// slicing | auto seriesSlice = series[1 .. $ - 1, 2 .. 4]; | assert(seriesSlice.index == index[1 .. $ - 1]); | assert(seriesSlice.data == data[1 .. $ - 1, 2 .. 4]); | | static if (kindOf!(typeof(series.data)) != Contiguous) | static assert(is(typeof(series) == typeof(seriesSlice))); | | /// indexing | assert(series[1, 4] == observation(Date(2017, 02, 01), 10)); | assert(series[2] == observation(Date(2017, 03, 01), iota!int([row_length], 11))); | | /// range primitives | assert(series.length == 4); | assert(series.length!1 == 5); | | series.popFront!1; | assert(series.length!1 == 4); |} | |/// Construct from null |@safe pure nothrow @nogc version(mir_test) unittest |{ | import mir.series; | alias Map = Series!(string*, double*); | Map a = null; | auto b = Map(null); | assert(a.empty); | assert(b.empty); | | auto fun(Map a = null) | { | | } |} | |/++ |Convenient function for $(LREF Series) construction. |See_also: $(LREF assocArray) |Attention: | This overloads do not sort the data. | User should call $(LREF directly) if index was not sorted. |+/ |auto series(IndexIterator, Iterator, size_t N, SliceKind kind) | ( | Slice!IndexIterator index, | Slice!(Iterator, N, kind) data, | ) |{ | assert(index.length == data.length); | return Series!(IndexIterator, Iterator, N, kind)(index, data); |} | |/// ditto |auto series(Index, Data)(Index[] index, Data[] data) |{ | assert(index.length == data.length); | return .series(index.sliced, data.sliced); |} | |/// ditto |auto series(IndexIterator, Data)(Slice!IndexIterator index, Data[] data) |{ | assert(index.length == data.length); | return .series(index, data.sliced); |} | |/// ditto |auto series(Index, Iterator, size_t N, SliceKind kind)(Index[] index, Slice!(Iterator, N, kind) data) |{ | assert(index.length == data.length); | return .series(index.sliced, data); |} | |/** |Constructs a GC-allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa = associative array or a pointer to associative array |Returns: | sorted GC-allocated series. |See_also: $(LREF assocArray) |*/ |Series!(K*, V*) series(RK, RV, K = RK, V = RV)(RV[RK] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.conv: to; | const size_t length = aa.length; | alias R = typeof(return); | if (__ctfe) | { | K[] keys; | V[] values; | foreach(ref kv; aa.byKeyValue) | { | keys ~= kv.key.to!K; | values ~= kv.value.to!V; | } | auto ret = series(keys, values); | .sort((()@trusted=>cast(Series!(Unqual!K*, Unqual!V*))ret)()); | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return cast(R) ret; }(); | } | import mir.ndslice.allocation: uninitSlice; | Series!(Unqual!K*, Unqual!V*) ret = series(length.uninitSlice!(Unqual!K), length.uninitSlice!(Unqual!V)); | auto it = ret; | foreach(ref kv; aa.byKeyValue) | { | import mir.conv: emplaceRef; | emplaceRef!K(it.index.front, kv.key.to!K); | emplaceRef!V(it._data.front, kv.value.to!V); | it.popFront; | } | .sort(ret); | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return cast(R) ret; }(); |} | |/// ditto |Series!(RK*, RV*) series(K, V, RK = const K, RV = const V)(const V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .series!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |Series!(RK*, RV*) series( K, V, RK = immutable K, RV = immutable V)(immutable V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .series!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto series(K, V)(V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return series(*a); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto s = [1: 1.5, 3: 3.3, 2: 20.9].series; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 20.9, 3.3]); | assert(s.data[s.findIndex(2)] == 20.9); |} | |pure nothrow version(mir_test) unittest |{ | immutable aa = [1: 1.5, 3: 3.3, 2: 2.9]; | auto s = aa.series; | s = cast() s; | s = cast(const) s; | s = cast(immutable) s; | s = s; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); |} | | |/** |Constructs a RC-allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa = associative array or a pointer to associative array |Returns: | sorted RC-allocated series. |See_also: $(LREF assocArray) |*/ |auto rcseries(RK, RV, K = RK, V = RV)(RV[RK] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.rc.array; | import mir.conv: to; | alias R = Series!(RCI!K, RCI!V); | const size_t length = aa.length; | auto ret = series(length.mininitRcarray!(Unqual!K).asSlice, length.mininitRcarray!(Unqual!V).asSlice); | auto it = ret.lightScope; | foreach(ref kv; aa.byKeyValue) | { | import mir.conv: emplaceRef; | emplaceRef!K(it.lightScopeIndex.front, kv.key.to!K); | emplaceRef!V(it._data.front, kv.value.to!V); | it.popFront; | } | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | .sort(ret.lightScope); | static if (is(typeof(ret) == R)) | return ret; | else | return ()@trusted{ return (*cast(R*) &ret); }(); |} | |/// ditto |auto rcseries(K, V, RK = const K, RV = const V)(const V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .rcseries!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto rcseries( K, V, RK = immutable K, RV = immutable V)(immutable V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .rcseries!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto rcseries(K, V)(V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return rcseries(*a); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto s = [1: 1.5, 3: 3.3, 2: 20.9].rcseries; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 20.9, 3.3]); | assert(s.data[s.findIndex(2)] == 20.9); |} | |// pure nothrow |version(mir_test) unittest |{ | import mir.rc.array; | immutable aa = [1: 1.5, 3: 3.3, 2: 2.9]; | auto s = aa.rcseries; | Series!(RCI!(const int), RCI!(const double)) c; | s = cast() s; | c = s; | s = cast(const) s; | s = cast(immutable) s; | s = s; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); |} | |/++ |Constructs a manually allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa == associative array or a pointer to associative array |Returns: | sorted manually allocated series. |+/ |Series!(K*, V*) makeSeries(Allocator, K, V)(auto ref Allocator allocator, V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.ndslice.allocation: makeUninitSlice; | import mir.conv: emplaceRef; | | immutable size_t length = aa.length; | | auto ret = series( | allocator.makeUninitSlice!(Unqual!K)(length), | allocator.makeUninitSlice!(Unqual!V)(length)); | | auto it = ret; | foreach(ref kv; aa.byKeyValue) | { | it.index.front.emplaceRef!K(kv.key); | it.data.front.emplaceRef!V(kv.value); | it.popFront; | } | | ret.sort; | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return cast(typeof(return)) ret; }(); |} | |/// ditto |Series!(K*, V*) makeSeries(Allocator, K, V)(auto ref Allocator allocator, V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return makeSeries(allocator, *a); |} | |/// |pure nothrow version(mir_test) unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.building_blocks.region; | | InSituRegion!(1024) allocator; | auto aa = [1: 1.5, 3: 3.3, 2: 2.9]; | | auto s = (double[int] aa) @nogc @trusted pure nothrow { | return allocator.makeSeries(aa); | }(aa); | | auto indexArray = s.index.field; | auto dataArray = s.data.field; | | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); | | allocator.dispose(indexArray); | allocator.dispose(dataArray); |} | |/++ |Returns a newly allocated associative array from a range of key/value tuples. | |Params: | series = index / time $(LREF Series), may not be sorted | |Returns: A newly allocated associative array out of elements of the input |_series. Returns a null associative |array reference when given an empty _series. | |Duplicates: Associative arrays have unique keys. If r contains duplicate keys, |then the result will contain the value of the last pair for that key in r. |+/ |auto assocArray(IndexIterator, Iterator, size_t N, SliceKind kind) | (Series!(IndexIterator, Iterator, N, kind) series) |{ | alias SK = series.Key; | alias SV = series.Value; | alias UK = Unqual!SK; | alias UV = Unqual!SV; | static if (isImplicitlyConvertible!(SK, UK)) | alias K = UK; | else | alias K = SK; | static if (isImplicitlyConvertible!(SV, UV)) | alias V = UV; | else | alias V = SV; | static assert(isMutable!V, "mir.series.assocArray: value type ( " ~ V.stringof ~ " ) must be mutable"); | | V[K] aa; | aa.insertOrAssign = series; | return aa; |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.ndslice; //iota and etc | import mir.series; | | auto s = ["c", "a", "b"].series(3.iota!int); | assert(s.assocArray == [ | "c": 0, | "a": 1, | "b": 2, | ]); |} | |/// Returns: true if `U` is a $(LREF Series); |enum isSeries(U) = is(U : Series!(IndexIterator, Iterator, N, kind), IndexIterator, Iterator, size_t N, SliceKind kind); | |/++ |Finds an index such that `series.index[index] == key`. | |Params: | series = series | key = index to find in the series |Returns: | `size_t.max` if the series does not contain the key and appropriate index otherwise. |+/ |size_t findIndex(IndexIterator, Iterator, size_t N, SliceKind kind, Index)(Series!(IndexIterator, Iterator, N, kind) series, auto ref scope const Index key) |{ | auto idx = series.lightScopeIndex.transitionIndex(key); | if (idx < series._data._lengths[0] && series.index[idx] == key) | { | return idx; | } | return size_t.max; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto index = [1, 2, 3, 4].sliced; | auto data = [2.1, 3.4, 5.6, 7.8].sliced; | auto series = index.series(data); | | assert(series.data[series.findIndex(3)] == 5.6); | assert(series.findIndex(0) == size_t.max); |} | |/++ |Finds a backward index such that `series.index[$ - backward_index] == key`. | |Params: | series = series | key = index key to find in the series |Returns: | `0` if the series does not contain the key and appropriate backward index otherwise. |+/ |size_t find(IndexIterator, Iterator, size_t N, SliceKind kind, Index)(Series!(IndexIterator, Iterator, N, kind) series, auto ref scope const Index key) |{ | auto idx = series.lightScopeIndex.transitionIndex(key); | auto bidx = series._data._lengths[0] - idx; | if (bidx && series.index[idx] == key) | { | return bidx; | } | return 0; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto index = [1, 2, 3, 4].sliced; | auto data = [2.1, 3.4, 5.6, 7.8].sliced; | auto series = index.series(data); | | if (auto bi = series.find(3)) | { | assert(series.data[$ - bi] == 5.6); | } | else | { | assert(0); | } | | assert(series.find(0) == 0); |} | |/++ |Iterates union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key (and left side value) | cfun = trinary function that accepts left side key, (left side value,) and right side value | rfun = binary function that accepts right side key (and right side value) |+/ |template troykaGalop(alias lfun, alias cfun, alias rfun) |{ | import std.range.primitives: isInputRange; | | /++ | Params: | lhs = left hand series | rhs = right hand series | +/ | pragma(inline, false) | void troykaGalop( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | if (lhs.empty) | goto R0; | if (rhs.empty) | goto L1; | for(;;) | { | if (lhs.index.front < rhs.index.front) | { | lfun(lhs.index.front, lhs.data.front); | lhs.popFront; | if (lhs.empty) | goto R1; | continue; | } | else | if (lhs.index.front > rhs.index.front) | { | rfun(rhs.index.front, rhs.data.front); | rhs.popFront; | if (rhs.empty) | goto L1; | continue; | } | else | { | cfun(lhs.index.front, lhs.data.front, rhs.data.front); | lhs.popFront; | rhs.popFront; | if (rhs.empty) | goto L0; | if (lhs.empty) | goto R1; | continue; | } | } | | L0: | if (lhs.empty) | return; | L1: | do | { | lfun(lhs.index.front, lhs.data.front); | lhs.popFront; | } while(!lhs.empty); | return; | | R0: | if (rhs.empty) | return; | R1: | do | { | rfun(rhs.index.front, rhs.data.front); | rhs.popFront; | } while(!rhs.empty); | return; | } | | /++ | Params: | lhs = left hand input range | rhs = right hand input range | +/ | pragma(inline, false) | void troykaGalop (LeftRange, RightRange)(LeftRange lhs, RightRange rhs) | if (isInputRange!LeftRange && isInputRange!RightRange && !isSeries!LeftRange && !isSeries!RightRange) | { | if (lhs.empty) | goto R0; | if (rhs.empty) | goto L1; | for(;;) | { | if (lhs.front < rhs.front) | { | lfun(lhs.front); | lhs.popFront; | if (lhs.empty) | goto R1; | continue; | } | else | if (lhs.front > rhs.front) | { | rfun(rhs.front); | rhs.popFront; | if (rhs.empty) | goto L1; | continue; | } | else | { | cfun(lhs.front, rhs.front); | lhs.popFront; | rhs.popFront; | if (rhs.empty) | goto L0; | if (lhs.empty) | goto R1; | continue; | } | } | | L0: | if (lhs.empty) | return; | L1: | do | { | lfun(lhs.front); | lhs.popFront; | } while(!lhs.empty); | return; | | R0: | if (rhs.empty) | return; | R1: | do | { | rfun(rhs.front); | rhs.popFront; | } while(!rhs.empty); | return; | } |} | |/++ |Constructs union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key and left side value | cfun = trinary function that accepts left side key, left side value, and right side value | rfun = binary function that accepts right side key and right side value |+/ |template troykaSeries(alias lfun, alias cfun, alias rfun) |{ | /++ | Params: | lhs = left hand series | rhs = right hand series | Returns: | GC-allocated union series with length equal to $(LREF troykaLength) | +/ | auto troykaSeries | ( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | alias I = CommonType!(typeof(lhs.index.front), typeof(rhs.index.front)); | alias E = CommonType!( | typeof(lfun(lhs.index.front, lhs.data.front)), | typeof(cfun(lhs.index.front, lhs.data.front, rhs.data.front)), | typeof(rfun(rhs.index.front, rhs.data.front)), | ); | alias R = Series!(I*, E*); | alias UI = Unqual!I; | alias UE = Unqual!E; | const length = troykaLength(lhs.index, rhs.index); | import mir.ndslice.allocation: uninitSlice; | auto index = length.uninitSlice!UI; | auto data = length.uninitSlice!UE; | auto ret = index.series(data); | alias algo = troykaSeriesImpl!(lfun, cfun, rfun); | algo!(I, E)(lhs.lightScope, rhs.lightScope, ret); | return (()@trusted => cast(R) ret)(); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice; | auto a = [1, 2, 3, 9].sliced.series(iota!int([4], 1)); | auto b = [0, 2, 4, 9].sliced.series(iota!int([4], 1) * 10.0); | alias unionAlgorithm = troykaSeries!( | (key, left) => left, | (key, left, right) => left + right, | (key, right) => -right, | ); | auto c = unionAlgorithm(a, b); | assert(c.index == [0, 1, 2, 3, 4, 9]); | assert(c.data == [-10, 1, 22, 3, -30, 44]); |} | |/++ |Constructs union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key and left side value | cfun = trinary function that accepts left side key, left side value, and right side value | rfun = binary function that accepts right side key and right side value |+/ |template rcTroykaSeries(alias lfun, alias cfun, alias rfun) |{ | /++ | Params: | lhs = left hand series | rhs = right hand series | Returns: | RC-allocated union series with length equal to $(LREF troykaLength) | +/ | auto rcTroykaSeries | ( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | auto ref Series!(IndexIterL, IterL, LN, lkind) lhs, | auto ref Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | import mir.rc.array; | alias I = CommonType!(typeof(lhs.index.front), typeof(rhs.index.front)); | alias E = CommonType!( | typeof(lfun(lhs.index.front, lhs.data.front)), | typeof(cfun(lhs.index.front, lhs.data.front, rhs.data.front)), | typeof(rfun(rhs.index.front, rhs.data.front)), | ); | alias R = Series!(RCI!I, RCI!E); | alias UI = Unqual!I; | alias UE = Unqual!E; | const length = troykaLength(lhs.index, rhs.index); | import mir.ndslice.allocation: uninitSlice; | auto ret = length.mininitRcarray!UI.asSlice.series(length.mininitRcarray!UE.asSlice); | alias algo = troykaSeriesImpl!(lfun, cfun, rfun); | algo!(I, E)(lhs.lightScope, rhs.lightScope, ret.lightScope); | return (()@trusted => *cast(R*) &ret)(); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice; | auto a = [1, 2, 3, 9].sliced.series(iota!int([4], 1)); | auto b = [0, 2, 4, 9].sliced.series(iota!int([4], 1) * 10.0); | alias unionAlgorithm = rcTroykaSeries!( | (key, left) => left, | (key, left, right) => left + right, | (key, right) => -right, | ); | auto c = unionAlgorithm(a, b); | assert(c.index == [0, 1, 2, 3, 4, 9]); | assert(c.data == [-10, 1, 22, 3, -30, 44]); |} | | |/++ |Length for Troyka union handlers. |Params: | lhs = left hand side series/range | rhs = right hand side series/range |Returns: Total count of lambda function calls in $(LREF troykaGalop) union handler. |+/ |size_t troykaLength( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, |)( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, |) |{ | return troykaLength(lhs.index, rhs.index); |} | |/// ditto |size_t troykaLength(LeftRange, RightRange)(LeftRange lhs, RightRange rhs) | if (!isSeries!LeftRange && !isSeries!RightRange) |{ | size_t length; | alias counter = (scope auto ref _) => ++length; | alias ccounter = (scope auto ref _l, scope auto ref _r) => ++length; | troykaGalop!(counter, ccounter, counter)(lhs, rhs); | return length; |} | |/// |template troykaSeriesImpl(alias lfun, alias cfun, alias rfun) |{ | /// | void troykaSeriesImpl | ( | I, E, | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | UI, UE, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | Series!(UI*, UE*) uninitSlice, | ) | { | import mir.conv: emplaceRef; | troykaGalop!( | (auto ref key, auto ref value) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(lfun(key, value)); | uninitSlice.popFront; | }, | (auto ref key, auto ref lvalue, auto ref rvalue) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(cfun(key, lvalue, rvalue)); | uninitSlice.popFront; | }, | (auto ref key, auto ref value) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(rfun(key, value)); | uninitSlice.popFront; | }, | )(lhs, rhs); | assert(uninitSlice.length == 0); | } |} | |/** |Merges multiple (time) series into one. |Makes exactly one memory allocation for two series union |and two memory allocation for three and more series union. | |Params: | seriesTuple = variadic static array of composed of series, each series must be sorted. |Returns: sorted GC-allocated series. |See_also $(LREF Series.opBinary) $(LREF makeUnionSeries) |*/ |auto unionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C)(Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!false(seriesTuple); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = unionSeries(series0, series1); | auto m1 = unionSeries(series1, series0); | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs three time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | auto index2 = [1, 6]; | auto data2 = [100.0, 600]; | auto series2 = index2.series(data2); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = unionSeries(series0, series1, series2); | auto m1 = unionSeries(series1, series0, series2); | auto m2 = unionSeries(series2, series0, series1); | | assert(m0.index == m1.index); | assert(m0.index == m2.index); | assert(m0.data == [ 1, 20, 3, 4, 50, 600]); | assert(m1.data == [ 10, 20, 3, 4, 50, 600]); | assert(m2.data == [100, 20, 3, 4, 50, 600]); |} | |/** |Merges multiple (time) series into one. | |Params: | allocator = memory allocator | seriesTuple = variadic static array of composed of series. |Returns: sorted manually allocated series. |See_also $(LREF unionSeries) |*/ |auto makeUnionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C, Allocator)(auto ref Allocator allocator, Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!false(seriesTuple, allocator); |} | |/// |@system pure nothrow version(mir_test) unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.building_blocks.region; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | | InSituRegion!(1024) allocator; | | auto m0 = allocator.makeUnionSeries(series0, series1); | auto m1 = allocator.makeUnionSeries(series1, series0); // order is matter | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | | /// series should have the same sizes as after allocation | allocator.dispose(m0.index.field); | allocator.dispose(m0.data.field); | allocator.dispose(m1.index.field); | allocator.dispose(m1.data.field); |} | |/** |Merges multiple (time) series into one. | |Params: | seriesTuple = variadic static array of composed of series. |Returns: sorted manually allocated series. |See_also $(LREF unionSeries) |*/ |auto rcUnionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C)(Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!true(seriesTuple); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.rc.array; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | | Series!(RCI!int, RCI!double) m0 = rcUnionSeries(series0, series1); | Series!(RCI!int, RCI!double) m1 = rcUnionSeries(series1, series0); // order is matter | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); |} | |/** |Initialize preallocated series using union of multiple (time) series. |Doesn't make any allocations. | |Params: | seriesTuple = dynamic array composed of series. | uninitSeries = uninitialized series with exactly required length. |*/ |pragma(inline, false) |auto unionSeriesImpl(I, E, | IndexIterator, Iterator, size_t N, SliceKind kind, UI, UE)( | Series!(IndexIterator, Iterator, N, kind)[] seriesTuple, | Series!(UI*, UE*, N) uninitSeries, | ) |{ | import mir.conv: emplaceRef; | import mir.algorithm.setops: multiwayUnion; | | enum N = N; | alias I = DeepElementType!(typeof(seriesTuple[0].index)); | alias E = DeepElementType!(typeof(seriesTuple[0]._data)); | | if(uninitSeries.length) | { | auto u = seriesTuple.multiwayUnion!"a.index < b.index"; | do | { | auto obs = u.front; | emplaceRef!I(uninitSeries.index.front, obs.index); | static if (N == 1) | emplaceRef!E(uninitSeries._data.front, obs.data); | else | each!(emplaceRef!E)(uninitSeries._data.front, obs.data); | u.popFront; | uninitSeries.popFront; | } | while(uninitSeries.length); | } |} | |private auto unionSeriesImplPrivate(bool rc, IndexIterator, Iterator, size_t N, SliceKind kind, size_t C, Allocator...)(ref Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple, ref Allocator allocator) | if (C > 1 && Allocator.length <= 1) |{ | import mir.algorithm.setops: unionLength; | import mir.ndslice.topology: iota; | import mir.internal.utility: Iota; | import mir.ndslice.allocation: uninitSlice, makeUninitSlice; | static if (rc) | import mir.rc.array; | | Slice!IndexIterator[C] indeces; | foreach (i; Iota!C) | indeces[i] = seriesTuple[i].index; | | immutable len = indeces[].unionLength; | | alias I = typeof(seriesTuple[0].index.front); | alias E = typeof(seriesTuple[0].data.front); | static if (rc) | alias R = Series!(RCI!I, RCI!E, N); | else | alias R = Series!(I*, E*, N); | alias UI = Unqual!I; | alias UE = Unqual!E; | | static if (N > 1) | { | auto shape = seriesTuple[0]._data._lengths; | shape[0] = len; | | foreach (ref sl; seriesTuple[1 .. $]) | foreach (i; Iota!(1, N)) | if (seriesTuple._data[0]._lengths[i] != sl._data._lengths[i]) | assert(0, "shapes mismatch"); | } | else | { | alias shape = len; | } | | static if (rc == false) | { | static if (Allocator.length) | auto ret = (()@trusted => allocator[0].makeUninitSlice!UI(len).series(allocator[0].makeUninitSlice!UE(shape)))(); | else | auto ret = (()@trusted => len.uninitSlice!UI.series(shape.uninitSlice!UE))(); | } | else | { | static if (Allocator.length) | static assert(0, "rcUnionSeries with allocators is not implemented."); | else | auto ret = (()@trusted => | len | .mininitRcarray!UI | .asSlice | .series( | shape | .iota | .elementCount | .mininitRcarray!UE | .asSlice | .sliced(shape)))(); | } | | static if (N == 2) // fast path | { | alias algo = troykaSeriesImpl!( | (auto scope ref key, auto scope return ref left) => left, | (auto scope ref key, auto scope return ref left, auto scope return ref right) => left, | (auto scope ref key, auto scope return ref right) => right, | ); | algo!(I, E)(seriesTuple[0], seriesTuple[1], ret.lightScope); | } | else | { | unionSeriesImpl!(I, E)(seriesTuple, ret.lightScope); | } | | return () @trusted {return cast(R) ret; }(); |} | |/** |Inserts or assigns a series to the associative array `aa`. |Params: | aa = associative array | series = series |Returns: | associative array |*/ |ref V[K] insertOrAssign(V, K, IndexIterator, Iterator, size_t N, SliceKind kind)(return ref V[K] aa, auto ref Series!(IndexIterator, Iterator, N, kind) series) @property |{ | auto s = series.lightScope; | foreach (i; 0 .. s.length) | { | aa[s.index[i]] = s.data[i]; | } | return aa; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = [1: 3.0, 4: 2.0]; | auto s = series([1, 2, 3], [10, 20, 30]); | a.insertOrAssign = s; | assert(a.series == series([1, 2, 3, 4], [10.0, 20, 30, 2])); |} | |/** |Inserts a series to the associative array `aa`. |Params: | aa = associative array | series = series |Returns: | associative array |*/ |ref V[K] insert(V, K, IndexIterator, Iterator, size_t N, SliceKind kind)(return ref V[K] aa, auto ref Series!(IndexIterator, Iterator, N, kind) series) @property |{ | auto s = series.lightScope; | foreach (i; 0 .. s.length) | { | if (s.index[i] in aa) | continue; | aa[s.index[i]] = s.data[i]; | } | return aa; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = [1: 3.0, 4: 2.0]; | auto s = series([1, 2, 3], [10, 20, 30]); | a.insert = s; | assert(a.series == series([1, 2, 3, 4], [3.0, 20, 30, 2])); |} | | |static if (__VERSION__ < 2078) |//////////////////// OBJECT.d |{ | |private: | |extern (C) |{ | // from druntime/src/rt/aaA.d | | // size_t _aaLen(in void* p) pure nothrow @nogc; | private void* _aaGetY(void** paa, const TypeInfo_AssociativeArray ti, in size_t valuesize, in void* pkey) pure nothrow; | // inout(void)* _aaGetRvalueX(inout void* p, in TypeInfo keyti, in size_t valuesize, in void* pkey); | inout(void)[] _aaValues(inout void* p, in size_t keysize, in size_t valuesize, const TypeInfo tiValArray) pure nothrow; | inout(void)[] _aaKeys(inout void* p, in size_t keysize, const TypeInfo tiKeyArray) pure nothrow; | void* _aaRehash(void** pp, in TypeInfo keyti) pure nothrow; | void _aaClear(void* p) pure nothrow; | | // alias _dg_t = extern(D) int delegate(void*); | // int _aaApply(void* aa, size_t keysize, _dg_t dg); | | // alias _dg2_t = extern(D) int delegate(void*, void*); | // int _aaApply2(void* aa, size_t keysize, _dg2_t dg); | | // private struct AARange { void* impl; size_t idx; } | alias AARange = ReturnType!(object._aaRange); | AARange _aaRange(void* aa) pure nothrow @nogc @safe; | bool _aaRangeEmpty(AARange r) pure nothrow @nogc @safe; | void* _aaRangeFrontKey(AARange r) pure nothrow @nogc @safe; | void* _aaRangeFrontValue(AARange r) pure nothrow @nogc @safe; | void _aaRangePopFront(ref AARange r) pure nothrow @nogc @safe; | |} | |auto byKeyValue(T : V[K], K, V)(T aa) pure nothrow @nogc @safe |{ | import core.internal.traits : substInout; | | static struct Result | { | AARange r; | | pure nothrow @nogc: | @property bool empty() @safe { return _aaRangeEmpty(r); } | @property auto front() | { | static struct Pair | { | // We save the pointers here so that the Pair we return | // won't mutate when Result.popFront is called afterwards. | private void* keyp; | private void* valp; | | @property ref key() inout | { | auto p = (() @trusted => cast(substInout!K*) keyp) (); | return *p; | }; | @property ref value() inout | { | auto p = (() @trusted => cast(substInout!V*) valp) (); | return *p; | }; | } | return Pair(_aaRangeFrontKey(r), | _aaRangeFrontValue(r)); | } | void popFront() @safe { return _aaRangePopFront(r); } | @property Result save() { return this; } | } | | return Result(_aaToRange(aa)); |} | |auto byKeyValue(T : V[K], K, V)(T* aa) pure nothrow @nogc |{ | return (*aa).byKeyValue(); |} | |// this should never be made public. |private AARange _aaToRange(T: V[K], K, V)(ref T aa) pure nothrow @nogc @safe |{ | // ensure we are dealing with a genuine AA. | static if (is(const(V[K]) == const(T))) | alias realAA = aa; | else | const(V[K]) realAA = aa; | return _aaRange(() @trusted { return cast(void*)realAA; } ()); |} | |} ../../../.dub/packages/mir-algorithm-3.4.17/mir-algorithm/source/mir/series.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.4-mir-random-source-mir-random-engine-mersenne_twister.lst |/++ |The Mersenne Twister generator. | |Copyright: Copyright Andrei Alexandrescu 2008 - 2009, Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: $(HTTP erdani.org, Andrei Alexandrescu) Ilya Yaroshenko (rework) |+/ |module mir.random.engine.mersenne_twister; | |import std.traits; | |/++ |The $(LUCKY Mersenne Twister) generator. |+/ |struct MersenneTwisterEngine(UIntType, size_t w, size_t n, size_t m, size_t r, | UIntType a, size_t u, UIntType d, size_t s, | UIntType b, size_t t, | UIntType c, size_t l, UIntType f) | if (isUnsigned!UIntType) |{ | /// | enum isRandomEngine = true; | | static assert(0 < w && w <= UIntType.sizeof * 8); | static assert(1 <= m && m <= n); | static assert(0 <= r && 0 <= u && 0 <= s && 0 <= t && 0 <= l); | static assert(r <= w && u <= w && s <= w && t <= w && l <= w); | static assert(0 <= a && 0 <= b && 0 <= c); | | @disable this(); | @disable this(this); | | /// Largest generated value. | enum UIntType max = UIntType.max >> (UIntType.sizeof * 8u - w); | static assert(a <= max && b <= max && c <= max && f <= max); | | private enum UIntType lowerMask = (cast(UIntType) 1u << r) - 1; | private enum UIntType upperMask = ~lowerMask & max; | | /** | Parameters for the generator. | */ | enum size_t wordSize = w; | enum size_t stateSize = n; /// ditto | enum size_t shiftSize = m; /// ditto | enum size_t maskBits = r; /// ditto | enum UIntType xorMask = a; /// ditto | enum size_t temperingU = u; /// ditto | enum UIntType temperingD = d; /// ditto | enum size_t temperingS = s; /// ditto | enum UIntType temperingB = b; /// ditto | enum size_t temperingT = t; /// ditto | enum UIntType temperingC = c; /// ditto | enum size_t temperingL = l; /// ditto | enum UIntType initializationMultiplier = f; /// ditto | | | /// The default seed value. | enum UIntType defaultSeed = 5489; | | /++ | Current reversed payload index with initial value equals to `n-1` | +/ | size_t index = void; | | private UIntType _z = void; | | /++ | Reversed(!) payload. | +/ | UIntType[n] data = void; | | /* | * Marker indicating it's safe to construct from void | * (i.e. the constructor doesn't depend on the struct | * being in an initially valid state). | * Non-public because we don't want to commit to this | * design. | */ | package enum bool _isVoidInitOkay = true; | | /++ | Constructs a MersenneTwisterEngine object. | +/ 0000000| this(UIntType value) @safe pure nothrow @nogc | { | static if (max == UIntType.max) 0000000| data[$-1] = value; | else | data[$-1] = value & max; 0000000| foreach_reverse (size_t i, ref e; data[0 .. $-1]) | { 0000000| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } 0000000| index = n-1; 0000000| opCall(); | } | | /++ | Constructs a MersenneTwisterEngine object. | | Note that `MersenneTwisterEngine([123])` will not result in | the same initial state as `MersenneTwisterEngine(123)`. | +/ | this()(scope const(UIntType)[] array) @safe pure nothrow @nogc | { | static if (is(UIntType == uint)) | { | enum UIntType f2 = 1664525u; | enum UIntType f3 = 1566083941u; | } | else static if (is(UIntType == ulong)) | { | enum UIntType f2 = 3935559000370003845uL; | enum UIntType f3 = 2862933555777941757uL; | } | else | static assert(0, "init by slice only supported if UIntType is uint or ulong!"); | | data[$-1] = cast(UIntType) (19650218u & max); | foreach_reverse (size_t i, ref e; data[0 .. $-1]) | { | e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | index = n-1; | if (array.length == 0) | { | opCall(); | return; | } | | size_t final_mix_index = void; | | if (array.length >= n) | { | size_t j = 0; | //Handle all but tail. | while (array.length - j >= n - 1) | { | foreach_reverse (i, ref e; data[0 .. $-1]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | e &= max; | ++j; | } | data[$ - 1] = data[0]; | } | //Handle tail. | size_t i = n - 2; | while (j < array.length) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | ++j; | --i; | } | //Set the index for use by the next pass. | final_mix_index = i; | } | else | { | size_t i = n - 2; | //Handle all but tail. | while (i >= array.length) | { | foreach (j; 0 .. array.length) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | --i; | } | } | //Handle tail. | size_t j = 0; | while (i != cast(size_t) -1) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | ++j; | --i; | } | data[$ - 1] = data[0]; | i = n - 2; | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | //Set the index for use by the next pass. | final_mix_index = n - 2; | } | | foreach_reverse (i, ref e; data[0 .. final_mix_index]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3)) | - cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | foreach_reverse (i, ref e; data[final_mix_index .. n-1]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3)) | - cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | data[$-1] = (cast(UIntType)1) << ((UIntType.sizeof * 8) - 1); /* MSB is 1; assuring non-zero initial array */ | opCall(); | } | | /++ | Advances the generator. | +/ | UIntType opCall() @safe pure nothrow @nogc | { | // This function blends two nominally independent | // processes: (i) calculation of the next random | // variate from the cached previous `data` entry | // `_z`, and (ii) updating `data[index]` and `_z` | // and advancing the `index` value to the next in | // sequence. | // | // By interweaving the steps involved in these | // procedures, rather than performing each of | // them separately in sequence, the variables | // are kept 'hot' in CPU registers, allowing | // for significantly faster performance. 0000000| sizediff_t index = this.index; 0000000| sizediff_t next = index - 1; 0000000| if(next < 0) 0000000| next = n - 1; 0000000| auto z = _z; 0000000| sizediff_t conj = index - m; 0000000| if(conj < 0) 0000000| conj = index - m + n; | static if (d == UIntType.max) 0000000| z ^= (z >> u); | else 0000000| z ^= (z >> u) & d; 0000000| auto q = data[index] & upperMask; 0000000| auto p = data[next] & lowerMask; 0000000| z ^= (z << s) & b; 0000000| auto y = q | p; 0000000| auto x = y >> 1; 0000000| z ^= (z << t) & c; 0000000| if (y & 1) 0000000| x ^= a; 0000000| auto e = data[conj] ^ x; 0000000| z ^= (z >> l); 0000000| _z = data[index] = e; 0000000| this.index = next; 0000000| return z; | } |} | |/++ |A $(D MersenneTwisterEngine) instantiated with the parameters of the |original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister, |MT19937), generating uniformly-distributed 32-bit numbers with a |period of 2 to the power of 19937. | |This is recommended for random number generation on 32-bit systems |unless memory is severely restricted, in which case a |$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift) |would be the generator of choice. |+/ |alias Mt19937 = MersenneTwisterEngine!(uint, 32, 624, 397, 31, | 0x9908b0df, 11, 0xffffffff, 7, | 0x9d2c5680, 15, | 0xefc60000, 18, 1812433253); | |/// |@safe version(mir_random_test) unittest |{ | import mir.random.engine; | | // bit-masking by generator maximum is necessary | // to handle 64-bit `unpredictableSeed` | auto gen = Mt19937(unpredictableSeed & Mt19937.max); | auto n = gen(); | | import std.traits; | static assert(is(ReturnType!gen == uint)); |} | |/++ |A $(D MersenneTwisterEngine) instantiated with the parameters of the |original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister, |MT19937), generating uniformly-distributed 64-bit numbers with a |period of 2 to the power of 19937. | |This is recommended for random number generation on 64-bit systems |unless memory is severely restricted, in which case a |$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift) |would be the generator of choice. |+/ |alias Mt19937_64 = MersenneTwisterEngine!(ulong, 64, 312, 156, 31, | 0xb5026f5aa96619e9, 29, 0x5555555555555555, 17, | 0x71d67fffeda60000, 37, | 0xfff7eee000000000, 43, 6364136223846793005); | |/// |@safe version(mir_random_test) unittest |{ | import mir.random.engine; | | auto gen = Mt19937_64(unpredictableSeed); | auto n = gen(); | | import std.traits; | static assert(is(ReturnType!gen == ulong)); |} | |@safe nothrow version(mir_random_test) unittest |{ | import mir.random.engine; | | static assert(isSaturatedRandomEngine!Mt19937); | static assert(isSaturatedRandomEngine!Mt19937_64); | auto gen = Mt19937(Mt19937.defaultSeed); | foreach(_; 0 .. 9999) | gen(); | assert(gen() == 4123659995); | | auto gen64 = Mt19937_64(Mt19937_64.defaultSeed); | foreach(_; 0 .. 9999) | gen64(); | assert(gen64() == 9981545732273789042uL); |} | |version(mir_random_test) unittest |{ | enum val = [1341017984, 62051482162767]; | alias MT(UIntType, uint w) = MersenneTwisterEngine!(UIntType, w, 624, 397, 31, | 0x9908b0df, 11, 0xffffffff, 7, | 0x9d2c5680, 15, | 0xefc60000, 18, 1812433253); | | import std.meta: AliasSeq; | foreach (i, R; AliasSeq!(MT!(ulong, 32), MT!(ulong, 48))) | { | static if (R.wordSize == 48) static assert(R.max == 0xFFFFFFFFFFFF); | auto a = R(R.defaultSeed); | foreach(_; 0..999) | a(); | assert(val[i] == a()); | } |} | |@safe nothrow @nogc version(mir_random_test) unittest |{ | //Verify that seeding with an array gives the same result as the reference | //implementation. | | //32-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.tgz | immutable uint[4] seed32 = [0x123u, 0x234u, 0x345u, 0x456u]; | auto gen32 = Mt19937(seed32); | foreach(_; 0..999) | gen32(); | assert(3460025646u == gen32()); | | //64-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/mt19937-64.tgz | immutable ulong[4] seed64 = [0x12345uL, 0x23456uL, 0x34567uL, 0x45678uL]; | auto gen64 = Mt19937_64(seed64); | foreach(_; 0..999) | gen64(); | assert(994412663058993407uL == gen64()); |} ../../../.dub/packages/mir-random-2.2.4/mir-random/source/mir/random/engine/mersenne_twister.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.4-mir-random-source-mir-random-engine-package.lst |/++ |$(SCRIPT inhibitQuickIndex = 1;) |Uniform random engines. | |$(B Sections:) | $(LINK2 #Convenience, Convenience) |• $(LINK2 #Entropy, Entropy) |• $(LINK2 #ThreadLocal, Thread-Local) |• $(LINK2 #Traits, Traits) |• $(LINK2 #CInterface, C Interface) | |$(BOOKTABLE | |$(LEADINGROW Convenience) |$(TR | $(RROW Random, Default random number _engine)) | $(RROW rne, Per-thread uniquely-seeded instance of default `Random`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).) | |$(LEADINGROW Entropy) |$(TR | $(RROW unpredictableSeed, Seed of `size_t` using system entropy. May use `unpredictableSeed!UIntType` for unsigned integers of different sizes.) | $(RROW genRandomNonBlocking, Fills a buffer with system entropy, returning number of bytes copied or negative number on error) | $(RROW genRandomBlocking, Fills a buffer with system entropy, possibly waiting if the system believes it has insufficient entropy. Returns 0 on success.)) | |$(LEADINGROW Thread-Local (when $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS) enabled)) |$(TR | $(TR $(TDNW $(LREF threadLocal)`!(Engine)`) $(TD Per-thread uniquely-seeded instance of any specified `Engine`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).)) | $(TR $(TDNW $(LREF threadLocalPtr)`!(Engine)`) $(TD `@safe` pointer to `threadLocal!Engine`. Always initializes before return. $(I Warning: do not share between threads!))) | $(TR $(TDNW $(LREF threadLocalInitialized)`!(Engine)`) $(TD Explicitly manipulate "is seeded" flag for thread-local instance. Not needed by most library users.)) | $(TR $(TDNW $(LREF setThreadLocalSeed)`!(Engine, A...)`) $(TD Initialize thread-local `Engine` with a known seed rather than a random seed.)) | ) | |$(LEADINGROW Traits) |$(TR | $(RROW EngineReturnType, Get return type of random number _engine's `opCall()`) | $(RROW isRandomEngine, Check if is random number _engine) | $(RROW isSaturatedRandomEngine, Check if random number _engine `G` such that `G.max == EngineReturnType!(G).max`) | $(RROW preferHighBits, Are the high bits of the _engine's output known to have better statistical properties than the low bits?)) | |$(LEADINGROW C Interface) | $(RROW mir_random_engine_ctor, Perform any necessary setup. Automatically called by DRuntime.) | $(RROW mir_random_engine_dtor, Release any resources. Automatically called by DRuntime.) | $(RROW mir_random_genRandomNonBlocking, External name for $(LREF genRandomNonBlocking)) | $(RROW mir_random_genRandomBlocking, External name for $(LREF genRandomBlocking)) |) | |Copyright: Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: Ilya Yaroshenko | |Macros: | T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | RROW = $(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.random.engine; | |version (OSX) | version = Darwin; |else version (iOS) | version = Darwin; |else version (TVOS) | version = Darwin; |else version (WatchOS) | version = Darwin; | |// A secure arc4random implementation that uses some modern algorithm rather |// than ARC4 may be used synonymously with non-blocking system entropy. |version (CRuntime_Bionic) | version = SecureARC4Random; // ChaCha20 |version (Darwin) | version = SecureARC4Random; // AES |version (OpenBSD) | version = SecureARC4Random; // ChaCha20 |version (NetBSD) | version = SecureARC4Random; // ChaCha20 | |// A legacy arc4random should not be used when cryptographic security |// is required but may used for `unpredictableSeed`. |version (CRuntime_UClibc) | version = LegacyARC4Random; // ARC4 |version (FreeBSD) | version = LegacyARC4Random; // ARC4 |version (DragonFlyBSD) | version = LegacyARC4Random; // ARC4 |version (BSD) | version = LegacyARC4Random; // Unknown implementation | |version (SecureARC4Random) | version = AnyARC4Random; |version (LegacyARC4Random) | version = AnyARC4Random; | |version (D_betterC) | private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = false; |else | private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = __traits(compiles, { static size_t x = 0; }); | |import std.traits; | |import mir.random.engine.mersenne_twister; | |/++ |Like `std.traits.ReturnType!T` but it works even if |T.opCall is a function template. |+/ |template EngineReturnType(T) |{ | import std.traits : ReturnType; | static if (is(ReturnType!T)) | alias EngineReturnType = ReturnType!T; | else | alias EngineReturnType = typeof(T.init()); |} | |/++ |Test if T is a random engine. |A type should define `enum isRandomEngine = true;` to be a random engine. |+/ |template isRandomEngine(T) |{ | static if (is(typeof(T.isRandomEngine) : bool) && is(typeof(T.init()))) | { | private alias R = typeof(T.init()); | static if (T.isRandomEngine && isUnsigned!R) | enum isRandomEngine = is(typeof({ | enum max = T.max; | static assert(is(typeof(T.max) == R)); | })); | else enum isRandomEngine = false; | } | else enum isRandomEngine = false; |} | |/++ |Test if T is a saturated random-bit generator. |A random number generator is saturated if `T.max == ReturnType!T.max`. |A type should define `enum isRandomEngine = true;` to be a random engine. |+/ |template isSaturatedRandomEngine(T) |{ | static if (isRandomEngine!T) | enum isSaturatedRandomEngine = T.max == EngineReturnType!T.max; | else | enum isSaturatedRandomEngine = false; |} | |/++ |Are the high bits of the engine's output known to have |better statistical properties than the low bits of the |output? This property is set by checking the value of |an optional enum named `preferHighBits`. If the property |is missing it is treated as false. | |This should be specified as true for: | |+/ |template preferHighBits(G) | if (isSaturatedRandomEngine!G) |{ | static if (__traits(compiles, { enum bool e = G.preferHighBits; })) | private enum bool preferHighBits = G.preferHighBits; | else | private enum bool preferHighBits = false; |} | |/* | * Marker indicating it's safe to construct from void | * (i.e. the constructor doesn't depend on the struct | * being in an initially valid state). | * Either checks an explicit flag `_isVoidInitOkay` | * or tests to make sure that the structure contains | * nothing that looks like a pointer or an index into | * an array. Also ensures that there is not an elaborate | * destructor since it could be called when the struct | * is in an invalid state. | * Non-public because we don't want to commit to this | * design. | */ |package template _isVoidInitOkay(G) if (isRandomEngine!G && is(G == struct)) |{ | static if (is(typeof(G._isVoidInitOkay) : bool)) | enum bool _isVoidInitOkay = G._isVoidInitOkay; | else static if (!hasNested!G && !hasElaborateDestructor!G) | { | import std.meta : allSatisfy; | static if (allSatisfy!(isScalarType, FieldTypeTuple!G)) | //All members are scalars. | enum bool _isVoidInitOkay = true; | else static if (FieldTypeTuple!(G).length == 1 && isStaticArray!(FieldTypeTuple!(G)[0])) | //Only has one member which is a static array of scalars. | enum bool _isVoidInitOkay = isScalarType!(typeof(FieldTypeTuple!(G)[0].init[0])); | else | enum bool _isVoidInitOkay = false; | } | else | enum bool _isVoidInitOkay = false; |} |@nogc nothrow pure @safe version(mir_random_test) |{ | import mir.random.engine.mersenne_twister: Mt19937, Mt19937_64; | //Ensure that this property is set for the Mersenne Twister, | //whose internal state is huge enough for this to potentially | //matter: | static assert(_isVoidInitOkay!Mt19937); | static assert(_isVoidInitOkay!Mt19937_64); | //Check that the property is set for a moderately-sized PRNG. | import mir.random.engine.xorshift: Xorshift1024StarPhi; | static assert(_isVoidInitOkay!Xorshift1024StarPhi); | //Check that PRNGs not explicitly marked as void-init safe | //can be inferred as such if they only have scalar fields. | import mir.random.engine.pcg: pcg32, pcg32_oneseq; | import mir.random.engine.splitmix: SplitMix64; | static assert(_isVoidInitOkay!pcg32); | static assert(_isVoidInitOkay!pcg32_oneseq); | static assert(_isVoidInitOkay!SplitMix64); | //Check that PRNGs not explicitly marked as void-init safe | //can be inferred as such if their only field is a static | //array of scalars. | import mir.random.engine.xorshift: Xorshift128, Xoroshiro128Plus; | static assert(_isVoidInitOkay!Xorshift128); | static assert(_isVoidInitOkay!Xoroshiro128Plus); |} | |version (D_Ddoc) |{ | /++ | A "good" seed for initializing random number engines. Initializing | with $(D_PARAM unpredictableSeed) makes engines generate different | random number sequences every run. | | Returns: | A single unsigned integer seed value, different on each successive call | +/ | pragma(inline, true) | @property size_t unpredictableSeed() @trusted nothrow @nogc | { | return unpredictableSeed!size_t; | } |} | |/// ditto |pragma(inline, true) |@property T unpredictableSeed(T = size_t)() @trusted nothrow @nogc | if (isUnsigned!T) |{ | import mir.utility: _expect; 0000000| T seed = void; | version (AnyARC4Random) | { | // If we just need 32 bits it's faster to call arc4random() | // than arc4random_buf(&seed, seed.sizeof). | static if (T.sizeof <= uint.sizeof) | seed = cast(T) arc4random(); | else 0000000| arc4random_buf(&seed, seed.sizeof); | } | else if (_expect(genRandomNonBlocking(&seed, seed.sizeof) != T.sizeof, false)) | { | // fallback to old time/thread-based implementation in case of errors | seed = cast(T) fallbackSeed(); | } 0000000| return seed; |} | |// Old name of `unpredictableSeedOf!T`. Undocumented but |// defined so existing code using mir.random won't break. |deprecated("Use unpredictableSeed!T instead of unpredictableSeedOf!T") |public alias unpredictableSeedOf(T) = unpredictableSeed!T; | |version (mir_random_test) @nogc nothrow @safe unittest |{ | // Check unpredictableSeed syntax works with or without parentheses. | auto a = unpredictableSeed; | auto b = unpredictableSeed!uint; | auto c = unpredictableSeed!ulong; | static assert(is(typeof(a) == size_t)); | static assert(is(typeof(b) == uint)); | static assert(is(typeof(c) == ulong)); | | auto d = unpredictableSeed(); | auto f = unpredictableSeed!uint(); | auto g = unpredictableSeed!ulong(); | static assert(is(typeof(d) == size_t)); | static assert(is(typeof(f) == uint)); | static assert(is(typeof(g) == ulong)); |} | |// Is llvm_readcyclecounter supported on this platform? |// We need to whitelist platforms where it is known to work because if it |// isn't supported it will compile but always return 0. |// https://llvm.org/docs/LangRef.html#llvm-readcyclecounter-intrinsic |version(LDC) |{ | // The only architectures the documentation says are supported are | // x86 and Alpha. x86 uses RDTSC and Alpha uses RPCC. | version(X86_64) version = LLVMReadCycleCounter; | // Do *not* support 32-bit x86 because some x86 processors don't | // support `rdtsc` and because on x86 (but not x86-64) Linux | // `prctl` can disable a process's ability to use `rdtsc`. | else version(Alpha) version = LLVMReadCycleCounter; |} | | |pragma(inline, false) |private ulong fallbackSeed()() |{ | // fallback to old time/thread-based implementation in case of errors | version(LLVMReadCycleCounter) | { | import ldc.intrinsics : llvm_readcyclecounter; | ulong ticks = llvm_readcyclecounter(); | } | else version(D_InlineAsm_X86_64) | { | // RDTSC takes around 22 clock cycles. | ulong ticks = void; | asm @nogc nothrow | { | rdtsc; | shl RDX, 32; | xor RDX, RAX; | mov ticks, RDX; | } | } | //else version(D_InlineAsm_X86) | //{ | // // We don't use `rdtsc` with version(D_InlineAsm_X86) because | // // some x86 processors don't support `rdtsc` and because on | // // x86 (but not x86-64) Linux `prctl` can disable a process's | // // ability to use `rdtsc`. | // static assert(0); | //} | else version(Windows) | { | import core.sys.windows.winbase : QueryPerformanceCounter; | ulong ticks = void; | QueryPerformanceCounter(cast(long*)&ticks); | } | else version(Darwin) | { | import core.time : mach_absolute_time; | ulong ticks = mach_absolute_time(); | } | else version(Posix) | { | import core.sys.posix.time : clock_gettime, CLOCK_MONOTONIC, timespec; | timespec ts = void; | const tserr = clock_gettime(CLOCK_MONOTONIC, &ts); | // Should never fail. Only allowed arror codes are | // EINVAL if the 1st argument is an invalid clock ID and | // EFAULT if the 2nd argument is an invalid address. | assert(tserr == 0, "Call to clock_gettime failed."); | ulong ticks = (cast(ulong) ts.tv_sec << 32) ^ ts.tv_nsec; | } | version(Posix) | { | import core.sys.posix.unistd : getpid; | import core.sys.posix.pthread : pthread_self; | auto pid = cast(uint) getpid; | auto tid = cast(uint) pthread_self(); | } | else | version(Windows) | { | import core.sys.windows.winbase : GetCurrentProcessId, GetCurrentThreadId; | auto pid = cast(uint) GetCurrentProcessId; | auto tid = cast(uint) GetCurrentThreadId; | } | ulong k = ((cast(ulong)pid << 32) ^ tid) + ticks; | k ^= k >> 33; | k *= 0xff51afd7ed558ccd; | k ^= k >> 33; | k *= 0xc4ceb9fe1a85ec53; | k ^= k >> 33; | return k; |} | |/// |@safe version(mir_random_test) unittest |{ | auto rnd = Random(unpredictableSeed); | auto n = rnd(); | static assert(is(typeof(n) == size_t)); |} | |/++ |The "default", "favorite", "suggested" random number generator type on |the current platform. It is an alias for one of the |generators. You may want to use it if (1) you need to generate some |nice random numbers, and (2) you don't care for the minutiae of the |method being used. |+/ |static if (is(size_t == uint)) | alias Random = Mt19937; |else | alias Random = Mt19937_64; | |/// |version(mir_random_test) unittest |{ | import std.traits; | static assert(isSaturatedRandomEngine!Random); | static assert(is(EngineReturnType!Random == size_t)); |} | |static if (THREAD_LOCAL_STORAGE_AVAILABLE) |{ | /++ | Thread-local instance of the default $(LREF Random) allocated and seeded independently | for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS). | +/ | alias rne = threadLocal!Random; | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import std.complex; | | auto c = complex(rne.rand!real, rne.rand!real); | | int[10] array; | foreach (ref e; array) | e = rne.rand!int; | auto picked = array[rne.randIndex(array.length)]; | } | | private static struct TL(Engine) | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | static bool initialized; | static if (_isVoidInitOkay!Engine) | static Engine engine = void; | else static if (__traits(compiles, { Engine defaultConstructed; })) | static Engine engine; | else | static Engine engine = Engine.init; | | static if (is(ucent) && is(typeof((ucent t) => Engine(t)))) | alias seed_t = ucent; | else static if (is(typeof((ulong t) => Engine(t)))) | alias seed_t = ulong; | else static if (is(typeof((uint t) => Engine(t)))) | alias seed_t = uint; | else | alias seed_t = EngineReturnType!Engine; | | pragma(inline, false) // Usually called only once per thread. | private static void reseed() | { 0000000| engine.__ctor(unpredictableSeed!(seed_t)); 0000000| initialized = true; | } | } | /++ | `threadLocal!Engine` returns a reference to a thread-local instance of | the specified random number generator allocated and seeded uniquely | for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS). | | `threadLocalPtr!Engine` is a pointer to the area of thread-local | storage used by `threadLocal!Engine`. This function is provided because | the compiler can infer it is `@safe`, unlike `&(threadLocal!Engine)`. | Like `threadLocal!Engine` this function will auto-initialize the engine. | $(I Do not share pointers returned by threadLocalPtr between | threads!) | | `threadLocalInitialized!Engine` is a low-level way to explicitly change | the "initialized" flag used by `threadLocal!Engine` to determine whether | the Engine needs to be seeded. Setting this to `false` gives a way of | forcing the next call to `threadLocal!Engine` to reseed. In general this | is unnecessary but there are some specialized use cases where users have | requested this ability. | +/ | @property ref Engine threadLocal(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | import mir.utility: _expect; 0000000| if (_expect(!TL!Engine.initialized, false)) | { 0000000| TL!Engine.reseed(); | } 0000000| return TL!Engine.engine; | } | /// ditto | @property Engine* threadLocalPtr(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | import mir.utility: _expect; | if (_expect(!TL!Engine.initialized, false)) | { | TL!Engine.reseed(); | } | return &TL!Engine.engine; | } | /// ditto | @property ref bool threadLocalInitialized(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | return TL!Engine.initialized; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | | alias gen = threadLocal!Xorshift1024StarPhi; | double x = gen.rand!double; | size_t i = gen.randIndex(100u); | ulong a = gen.rand!ulong; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | //If you need a pointer to the engine, getting it like this is @safe: | Random* ptr = threadLocalPtr!Random; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | //If you need to mark the engine as uninitialized to force a reseed, | //you can do it like this: | threadLocalInitialized!Xorshift1024StarPhi = false; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.mersenne_twister; | //You can mark the engine as already initialized to skip | //automatic seeding then initialize it yourself, for instance | //if you want to use a known seed rather than a random one. | threadLocalInitialized!Mt19937 = true; | immutable uint[4] customSeed = [0x123, 0x234, 0x345, 0x456]; | threadLocal!Mt19937.__ctor(customSeed); | foreach(_; 0..999) | threadLocal!Mt19937.rand!uint; | assert(3460025646u == threadLocal!Mt19937.rand!uint); | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | | alias gen = threadLocal!Xorshift1024StarPhi; | | //If you want to you can call the generator's opCall instead of using | //rand!T but it is somewhat clunky because of the ambiguity of | //@property syntax: () looks like optional function parentheses. | static assert(!__traits(compiles, {ulong x0 = gen();}));//<-- Won't work | static assert(is(typeof(gen()) == Xorshift1024StarPhi));//<-- because the type is this. | ulong x1 = gen.opCall();//<-- This works though. | ulong x2 = gen()();//<-- This also works. | | //But instead of any of those you should really just use gen.rand!T. | ulong x3 = gen.rand!ulong; | } |// /// |// @nogc nothrow pure @safe version(mir_random_test) unittest |// { |// //If you want something like Phobos std.random.rndGen and |// //don't care about the specific algorithm you can do this: |// alias rndGen = threadLocal!Random; |// } | | @nogc nothrow @system version(mir_random_test) unittest | { | //Verify Returns same instance every time per thread. | import mir.random; | import mir.random.engine.xorshift; | | Xorshift1024StarPhi* addr = &(threadLocal!Xorshift1024StarPhi()); | Xorshift1024StarPhi* sameAddr = &(threadLocal!Xorshift1024StarPhi()); | assert(addr is sameAddr); | assert(sameAddr is threadLocalPtr!Xorshift1024StarPhi); | } | | /++ | Sets or resets the _seed of `threadLocal!Engine` using the given arguments. | It is not necessary to call this except if you wish to ensure the | PRNG uses a known _seed. | +/ | void setThreadLocalSeed(Engine, A...)(auto ref A seed) | if (isSaturatedRandomEngine!Engine && is(Engine == struct) | && A.length >= 1 && is(typeof((ref A a) => Engine(a)))) | { | TL!Engine.initialized = true; | TL!Engine.engine.__ctor(seed); | } | /// | @nogc nothrow @system version(mir_random_test) unittest | { | import mir.random; | alias rnd = threadLocal!Random; | | setThreadLocalSeed!Random(123); | immutable float x = rnd.rand!float; | | assert(x != rnd.rand!float); | | setThreadLocalSeed!Random(123); | immutable float y = rnd.rand!float; | | assert(x == y); | } |} |else |{ | static assert(!THREAD_LOCAL_STORAGE_AVAILABLE); | | @property ref Random rne()() | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocal(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocalPtr(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocalInitialized(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template setThreadLocalSeed(T, A...) | { | static assert(0, "Thread-local storage not available!"); | } |} | |version(linux) |{ | import mir.linux._asm.unistd; | enum bool LINUX_NR_GETRANDOM = (__traits(compiles, {enum e = NR_getrandom;})); | //If X86_64 or X86 are missing there is a problem with the library. | static if (!LINUX_NR_GETRANDOM) | { | version (X86_64) | static assert(0, "Missing linux syscall constants!"); | version (X86) | static assert(0, "Missing linux syscall constants!"); | } |} |else | enum bool LINUX_NR_GETRANDOM = false; | |static if (LINUX_NR_GETRANDOM) |{ | // getrandom was introduced in Linux 3.17 | private __gshared bool getRandomFailedENOSYS = false; | | private extern(C) int syscall(size_t ident, size_t n, size_t arg1, size_t arg2) @nogc nothrow; | | /* | * Flags for getrandom(2) | * | * GRND_NONBLOCK Don't block and return EAGAIN instead | * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom | */ | private enum GRND_NONBLOCK = 0x0001; | private enum GRND_RANDOM = 0x0002; | | private enum GETRANDOM = NR_getrandom; | | /* | http://man7.org/linux/man-pages/man2/getrandom.2.html | If the urandom source has been initialized, reads of up to 256 bytes | will always return as many bytes as requested and will not be | interrupted by signals. No such guarantees apply for larger buffer | sizes. | */ | private ptrdiff_t genRandomImplSysBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { | while (len > 0) | { | auto res = syscall(GETRANDOM, cast(size_t) ptr, len, 0); | if (res >= 0) | { | len -= res; | ptr += res; | } | else | { | return res; | } | } | return 0; | } | | /* | * If the GRND_NONBLOCK flag is set, then | * getrandom() does not block in these cases, but instead | * immediately returns -1 with errno set to EAGAIN. | */ | private ptrdiff_t genRandomImplSysNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { | return syscall(GETRANDOM, cast(size_t) ptr, len, GRND_NONBLOCK); | } |} | |version(AnyARC4Random) |extern(C) private @nogc nothrow |{ | void arc4random_buf(scope void* buf, size_t nbytes) @system; | uint arc4random() @trusted; |} | |version(Darwin) |{ | //On Darwin /dev/random is identical to /dev/urandom (neither blocks | //when there is low system entropy) so there is no point mucking | //about with file descriptors. Just use arc4random_buf for both. |} |else version(Posix) |{ | import core.stdc.stdio : fclose, feof, ferror, fopen, fread; | alias IOType = typeof(fopen("a", "b")); | private __gshared IOType fdRandom; | version (SecureARC4Random) | { | //Don't need /dev/urandom if we have arc4random_buf. | } | else | private __gshared IOType fdURandom; | | | /* The /dev/random device is a legacy interface which dates back to a | time where the cryptographic primitives used in the implementation of | /dev/urandom were not widely trusted. It will return random bytes | only within the estimated number of bits of fresh noise in the | entropy pool, blocking if necessary. /dev/random is suitable for | applications that need high quality randomness, and can afford | indeterminate delays. | | When the entropy pool is empty, reads from /dev/random will block | until additional environmental noise is gathered. | */ | private ptrdiff_t genRandomImplFileBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { | if (fdRandom is null) | { | fdRandom = fopen("/dev/random", "r"); | if (fdRandom is null) | return -1; | } | | while (len > 0) | { | auto res = fread(ptr, 1, len, fdRandom); | len -= res; | ptr += res; | // check for possible permanent errors | if (len != 0) | { | if (fdRandom.ferror) | return -1; | | if (fdRandom.feof) | return -1; | } | } | | return 0; | } |} | |version (SecureARC4Random) |{ | //Don't need /dev/urandom if we have arc4random_buf. |} |else version(Posix) |{ | /** | When read, the /dev/urandom device returns random bytes using a | pseudorandom number generator seeded from the entropy pool. Reads | from this device do not block (i.e., the CPU is not yielded), but can | incur an appreciable delay when requesting large amounts of data. | When read during early boot time, /dev/urandom may return data prior | to the entropy pool being initialized. | */ | private ptrdiff_t genRandomImplFileNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { | if (fdURandom is null) | { | fdURandom = fopen("/dev/urandom", "r"); | if (fdURandom is null) | return -1; | } | | auto res = fread(ptr, 1, len, fdURandom); | // check for possible errors | if (res != len) | { | if (fdURandom.ferror) | return -1; | | if (fdURandom.feof) | return -1; | } | return res; | } |} | |version(Windows) |{ | // the wincrypt headers in druntime are broken for x64! | private alias ULONG_PTR = size_t; // uint in druntime | private alias BOOL = bool; | private alias DWORD = uint; | private alias LPCWSTR = wchar*; | private alias PBYTE = ubyte*; | private alias HCRYPTPROV = ULONG_PTR; | private alias LPCSTR = const(char)*; | | private extern(Windows) BOOL CryptGenRandom(HCRYPTPROV, DWORD, PBYTE) @nogc @safe nothrow; | private extern(Windows) BOOL CryptAcquireContextA(HCRYPTPROV*, LPCSTR, LPCSTR, DWORD, DWORD) @nogc nothrow; | private extern(Windows) BOOL CryptAcquireContextW(HCRYPTPROV*, LPCWSTR, LPCWSTR, DWORD, DWORD) @nogc nothrow; | private extern(Windows) BOOL CryptReleaseContext(HCRYPTPROV, ULONG_PTR) @nogc nothrow; | | private __gshared ULONG_PTR hProvider; | | private auto initGetRandom()() @nogc @trusted nothrow | { | import core.sys.windows.winbase : GetLastError; | import core.sys.windows.winerror : NTE_BAD_KEYSET; | import core.sys.windows.wincrypt : PROV_RSA_FULL, CRYPT_NEWKEYSET, CRYPT_VERIFYCONTEXT, CRYPT_SILENT; | | // https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886(v=vs.85).aspx | // For performance reasons, we recommend that you set the pszContainer | // parameter to NULL and the dwFlags parameter to CRYPT_VERIFYCONTEXT | // in all situations where you do not require a persisted key. | // CRYPT_SILENT is intended for use with applications for which the UI cannot be displayed by the CSP. | if (!CryptAcquireContextW(&hProvider, null, null, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) | { | if (GetLastError() == NTE_BAD_KEYSET) | { | // Attempt to create default container | if (!CryptAcquireContextA(&hProvider, null, null, PROV_RSA_FULL, CRYPT_NEWKEYSET | CRYPT_SILENT)) | return 1; | } | else | { | return 1; | } | } | | return 0; | } |} | |/++ |Constructs the mir random seed generators. |This constructor needs to be called once $(I before) |other calls in `mir.random.engine`. | |Automatically called by DRuntime. |+/ |extern(C) void mir_random_engine_ctor() @system nothrow @nogc |{ | version(Windows) | { | if (hProvider == 0) | initGetRandom; | } |} | |/++ |Destructs the mir random seed generators. | |Automatically called by DRuntime. |+/ |extern(C) void mir_random_engine_dtor() @system nothrow @nogc |{ | version(Windows) | { | if (hProvider > 0) | CryptReleaseContext(hProvider, 0); | } | else | version(Darwin) | { | | } | else | version(Posix) | { | if (fdRandom !is null) | fdRandom.fclose; | | version (SecureARC4Random) | { | //Don't need /dev/urandom if we have arc4random_buf. | } | else if (fdURandom !is null) | fdURandom.fclose; | } |} | | |version(D_BetterC) |{ | pragma(crt_constructor) | extern(C) void mir_random_engine_ctor_() @system nothrow @nogc | { | mir_random_engine_ctor(); | } | | pragma(crt_destructor) | extern(C) void mir_random_engine_dtor_() @system nothrow @nogc | { | mir_random_engine_dtor(); | } |} |else |{ | /// Automatically calls the extern(C) module constructor | shared static this() | { 1| mir_random_engine_ctor(); | } | | /// Automatically calls the extern(C) module destructor | shared static ~this() | { 0000000| mir_random_engine_dtor(); | } |} | |/++ |Fills a buffer with random data. |If not enough entropy has been gathered, it will block. | |Note that on Mac OS X this method will never block. | |Params: | ptr = pointer to the buffer to fill | len = length of the buffer (in bytes) | |Returns: | A non-zero integer if an error occurred. |+/ |extern(C) ptrdiff_t mir_random_genRandomBlocking(scope void* ptr , size_t len) @nogc nothrow @system |{ | version(Windows) | { | static if (DWORD.max >= size_t.max) | while(!CryptGenRandom(hProvider, len, cast(PBYTE) ptr)) {} | else | while (len != 0) | { | import mir.utility : min; | const n = min(DWORD.max, len); | if (CryptGenRandom(hProvider, cast(DWORD) n, cast(PBYTE) ptr)) | { | len -= n; | } | } | return 0; | } | else version (Darwin) | { 0000000| arc4random_buf(ptr, len); 0000000| return 0; | } | else | { | static if (LINUX_NR_GETRANDOM) | if (!getRandomFailedENOSYS) // harmless data race | { | import core.stdc.errno; | ptrdiff_t result = genRandomImplSysBlocking(ptr, len); | if (result >= 0) | return result; | if (errno != ENOSYS) | return result; | getRandomFailedENOSYS = true; // harmless data race | } | return genRandomImplFileBlocking(ptr, len); | } |} | |/// ditto |alias genRandomBlocking = mir_random_genRandomBlocking; | |/// ditto |ptrdiff_t genRandomBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted |{ | pragma(inline, true); | return mir_random_genRandomBlocking(buffer.ptr, buffer.length); |} | |/// |@safe nothrow version(mir_random_test) unittest |{ | ubyte[] buf = new ubyte[10]; | genRandomBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | ubyte[10] buf; | genRandomBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |/++ |Fills a buffer with random data. |If not enough entropy has been gathered, it won't block. |Hence the error code should be inspected. | |On Linux >= 3.17 genRandomNonBlocking is guaranteed to succeed for 256 bytes and |fewer. | |On Mac OS X, OpenBSD, and NetBSD genRandomNonBlocking is guaranteed to |succeed for any number of bytes. | |Params: | buffer = the buffer to fill | len = length of the buffer (in bytes) | |Returns: | The number of bytes filled - a negative number if an error occurred |+/ |extern(C) size_t mir_random_genRandomNonBlocking(scope void* ptr, size_t len) @nogc nothrow @system |{ | version(Windows) | { | static if (DWORD.max < size_t.max) | if (len > DWORD.max) | len = DWORD.max; | if (!CryptGenRandom(hProvider, cast(DWORD) len, cast(PBYTE) ptr)) | return -1; | return len; | } | else version(SecureARC4Random) | { 0000000| arc4random_buf(ptr, len); 0000000| return len; | } | else | { | static if (LINUX_NR_GETRANDOM) | if (!getRandomFailedENOSYS) // harmless data race | { | import core.stdc.errno; | ptrdiff_t result = genRandomImplSysNonBlocking(ptr, len); | if (result >= 0) | return result; | if (errno != ENOSYS) | return result; | getRandomFailedENOSYS = true; // harmless data race | } | return genRandomImplFileNonBlocking(ptr, len); | } |} |/// ditto |alias genRandomNonBlocking = mir_random_genRandomNonBlocking; |/// ditto |size_t genRandomNonBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted |{ | pragma(inline, true); | return mir_random_genRandomNonBlocking(buffer.ptr, buffer.length); |} | |/// |@safe nothrow version(mir_random_test) unittest |{ | ubyte[] buf = new ubyte[10]; | genRandomNonBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |@nogc nothrow @safe |version(mir_random_test) unittest |{ | ubyte[10] buf; | genRandomNonBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} ../../../.dub/packages/mir-random-2.2.4/mir-random/source/mir/random/engine/package.d is 7% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.4-mir-random-source-mir-random-package.lst |/++ |$(SCRIPT inhibitQuickIndex = 1;) | |Basic API to construct non-uniform random number generators and stochastic algorithms. |Non-uniform and uniform random variable can be found at `mir.random.variable`. | |$(TABLE $(H2 Generation functions), |$(TR $(TH Function Name) $(TH Description)) |$(T2 rand, Generates real, integral, boolean, and enumerated uniformly distributed values.) |$(T2 randIndex, Generates uniformly distributed index.) |$(T2 randGeometric, Generates geometric distribution with `p = 1/2`.) |$(T2 randExponential2, Generates scaled Exponential distribution.) |) | |$(TABLE $(H2 Phobos Compatibility), |$(TR $(TH Template Name) $(TH Description)) |$(T2 PhobosRandom, Extends a Mir random number engine to meet Phobos `std.random` interface) |$(T2 isPhobosUniformRNG, Tests if type is a Phobos-style uniform RNG) |) | |Publicly includes `mir.random.engine`. | |Authors: Ilya Yaroshenko, Nathan Sashihara |Copyright: Copyright, Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, random, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | |+/ |module mir.random; | |import std.traits; |import mir.bitop: cttz; |import mir.math.common: log2; | |public import mir.random.engine; | |version (LDC) |{ | import ldc.intrinsics: llvm_expect; | // LDC 1.8.0 supports llvm_expect in CTFE. | private template _ctfeExpect(string expr, string expected) | { | static if (__traits(compiles, { enum a = llvm_expect(123, 456); static assert(a == 123); })) | private enum _ctfeExpect = "llvm_expect("~expr~","~expected~")"; | else | private enum _ctfeExpect = expr; | } |} |else version (GNU) |{ | import gcc.builtins: __builtin_expect; | private enum _ctfeExpect(string expr, string expected) = `__builtin_expect(`~expr~`,`~expected~`)`; |} |else |{ | private enum _ctfeExpect(string expr, string expected) = expr; |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed integer for interval `[T.min .. T.max]`. |+/ |T rand(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum)) |{ | alias R = EngineReturnType!G; | enum P = T.sizeof / R.sizeof; | static if (P > 1) | { | _Uab!(R[P],T) u = void; | version(LittleEndian) | foreach (ref e; u.asArray) | e = gen(); | else | foreach_reverse (ref e; u.asArray) | e = gen(); | return u.asInteger; | } | else static if (preferHighBits!G && P == 0) | { | version(LDC) pragma(inline, true); | return cast(T) (gen() >>> ((R.sizeof - T.sizeof) * 8)); | } | else | { | version(LDC) pragma(inline, true); | return cast(T) gen(); | } |} | |/// ditto |T rand(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum)) |{ | return rand!(T, G)(*gen); |} | |/// ditto |T rand(T)() | if (isIntegral!T && !is(T == enum)) |{ | return rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = rand!short; | auto n = rand!ulong; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.rand!short; | auto n = gen.rand!ulong; |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed boolean. |+/ |bool rand(T : bool, G)(scope ref G gen) | if (isSaturatedRandomEngine!G) |{ | import std.traits : Signed; | return 0 > cast(Signed!(EngineReturnType!G)) gen(); |} | |/// ditto |bool rand(T : bool, G)(scope G* gen) | if (isSaturatedRandomEngine!G) |{ | return rand!(T, G)(*gen); |} | |/// ditto |bool rand(T : bool)() |{ | return rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = rand!bool; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.rand!bool; |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | auto s = gen.rand!bool; |} | |private alias Iota(size_t j) = Iota!(0, j); | |private template Iota(size_t i, size_t j) |{ | import std.meta; | static assert(i <= j, "Iota: i should be less than or equal to j"); | static if (i == j) | alias Iota = AliasSeq!(); | else | alias Iota = AliasSeq!(i, Iota!(i + 1, j)); |} | |/+ |Returns pseudo-random integer with the low `bitsWanted` bits set to |random values and the remaining high bits all 0. |+/ |private T _randBits(T, uint bitsWanted, G)(scope ref G gen) |if (bitsWanted >= 0 && bitsWanted <= T.sizeof * 8 | && (is(T == uint) || is(T == ulong) || is(T == size_t))) |{ | static if (EngineReturnType!G.sizeof >= T.sizeof) | auto bits = gen(); | else | auto bits = gen.rand!T; | static if (preferHighBits!G) | { | enum rshift = (typeof(bits).sizeof * 8) - bitsWanted; | return cast(T) (bits >>> rshift); | } | else | { | enum mask = (typeof(bits)(1) << bitsWanted) - 1; | return cast(T) (bits & typeof(bits)(mask)); | } |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed enumeration. |+/ |T rand(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && is(T == enum)) |{ | static if (is(T : long)) | enum tiny = [EnumMembers!T] == [Iota!(EnumMembers!T.length)]; | else | enum tiny = false; | enum n = [EnumMembers!T].length; | // If `gen` produces 32 bits or fewer at a time and we have fewer | // than 2^^32 elements, use a `uint` index. | static if (n <= uint.max && EngineReturnType!G.max <= uint.max) | alias IndexType = uint; | else | alias IndexType = size_t; | | static if ((n & (n - 1)) == 0) | { | // Optimized case: power of 2. | import core.bitop : bsr; | enum bitsWanted = bsr(n); | IndexType index = _randBits!(IndexType, bitsWanted)(gen); | } | else | { | // General case. | IndexType index = gen.randIndex!IndexType(n); | } | | static if (tiny) | { | return cast(T) index; | } | else | { | static immutable T[EnumMembers!T.length] members = [EnumMembers!T]; | return members[index]; | } |} | |/// ditto |T rand(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && is(T == enum)) |{ | return rand!(T, G)(*gen); |} | |/// ditto |T rand(T)() | if (is(T == enum)) |{ | return .rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | enum A { a, b, c } | auto e = rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A { a, b, c } | auto e = gen.rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A : dchar { a, b, c } | auto e = gen.rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A : string { a = "a", b = "b", c = "c" } | auto e = gen.rand!A; |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | enum A : dchar { a, b, c, d } | auto e = gen.rand!A; |} | |private static union _U |{ | real r; | struct | { | version(LittleEndian) | { | ulong m; | ushort e; | } | else | { | ushort e; | align(2) | ulong m; | } | } |} | |private static union _Uab(A,B) if (A.sizeof == B.sizeof && !is(Unqual!A == Unqual!B)) |{ | A a; | B b; | | private import std.traits: isArray, isIntegral, isFloatingPoint; | | static if (isArray!A && !isArray!B) | alias asArray = a; | static if (isArray!B && !isArray!A) | alias asArray = b; | | static if (isIntegral!A && !isIntegral!B) | alias asInteger = a; | static if (isIntegral!B && !isIntegral!A) | alias asInteger = b; | | static if (isFloatingPoint!A && !isFloatingPoint!B) | alias asFloatingPoint = a; | static if (isFloatingPoint!B && !isFloatingPoint!A) | alias asFloatingPoint = b; |} | |/++ |Params: | gen = saturated random number generator | boundExp = bound exponent (optional). `boundExp` must be less or equal to `T.max_exp`. |Returns: | Uniformly distributed real for interval `(-2^^boundExp , 2^^boundExp)`. |Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`. |+/ |T rand(T, G)(scope ref G gen, sizediff_t boundExp = 0) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | assert(boundExp <= T.max_exp); | static if (T.mant_dig == float.mant_dig) | { | enum W = T.sizeof * 8 - T.mant_dig;//8 | _Uab!(int,float) u = void; | u.asInteger = gen.rand!uint; | enum uint EXPMASK = 0x7F80_0000; | boundExp -= T.min_exp - 1; | size_t exp = EXPMASK & u.asInteger; | exp = boundExp - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W); | u.asInteger &= ~EXPMASK; | if(cast(sizediff_t)exp < 0) | { | exp = -cast(sizediff_t)exp; | uint m = u.asInteger & int.max; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | u.asInteger = (u.asInteger & ~int.max) ^ m; | exp = 0; | } | u.asInteger = cast(uint)(exp << (T.mant_dig - 1)) ^ u.asInteger; | return u.asFloatingPoint; | } | else | static if (T.mant_dig == double.mant_dig) | { | enum W = T.sizeof * 8 - T.mant_dig; //11 | _Uab!(long,double) u = void; | u.asInteger = gen.rand!ulong; | enum ulong EXPMASK = 0x7FF0_0000_0000_0000; | boundExp -= T.min_exp - 1; | ulong exp = EXPMASK & u.asInteger; | exp = ulong(boundExp) - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W); | u.asInteger &= ~EXPMASK; | if(cast(long)exp < 0) | { | exp = -cast(sizediff_t)exp; | ulong m = u.asInteger & long.max; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | u.asInteger = (u.asInteger & ~long.max) ^ m; | exp = 0; | } | u.asInteger = (exp << (T.mant_dig - 1)) ^ u.asInteger; | return u.asFloatingPoint; | } | else | static if (T.mant_dig == 64) | { | enum W = 15; | auto d = gen.rand!uint; | auto m = gen.rand!ulong; | enum uint EXPMASK = 0x7FFF; | boundExp -= T.min_exp - 1; | size_t exp = EXPMASK & d; | exp = boundExp - (exp ? cttz(exp) : gen.randGeometric + W); | if (cast(sizediff_t)exp > 0) | m |= ~long.max; | else | { | m &= long.max; | exp = -cast(sizediff_t)exp; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | exp = 0; | } | d = cast(uint) exp ^ (d & ~EXPMASK); | _U ret = void; | ret.e = cast(ushort)d; | ret.m = m; | return ret.r; | } | /// TODO: quadruple | else static assert(0); |} | |/// ditto |T rand(T, G)(scope G* gen, sizediff_t boundExp = 0) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | return rand!(T, G)(*gen, boundExp); |} | |/// ditto |T rand(T)(sizediff_t boundExp = 0) | if (isFloatingPoint!T) |{ | return rand!T(rne, boundExp); |} | | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | import mir.math.common: fabs; | | auto a = rand!float; | assert(-1 < a && a < +1); | | auto b = rand!double(4); | assert(-16 < b && b < +16); | | auto c = rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = rand!real.fabs; | assert(0.0L <= d && d < 1.0L); |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.math.common: fabs; | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | | auto a = gen.rand!float; | assert(-1 < a && a < +1); | | auto b = gen.rand!double(4); | assert(-16 < b && b < +16); | | auto c = gen.rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = gen.rand!real.fabs; | assert(0.0L <= d && d < 1.0L); |} | |/// Subnormal numbers |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto x = gen.rand!double(double.min_exp-1); | assert(-double.min_normal < x && x < double.min_normal); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | import mir.math.common: fabs; | import std.meta: AliasSeq; | | auto a = rne.rand!float; | assert(-1 < a && a < +1); | | auto b = rne.rand!double(4); | assert(-16 < b && b < +16); | | auto c = rne.rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = rne.rand!real.fabs; | assert(0.0L <= d && d < 1.0L); | | foreach(T; AliasSeq!(float, double, real)) | { | auto f = rne.rand!T(T.min_exp-1); | assert(f.fabs < T.min_normal, T.stringof); | } |} | |/++ |Params: | gen = uniform random number generator | m = positive module |Returns: | Uniformly distributed integer for interval `[0 .. m$(RPAREN)`. |+/ |T randIndex(T, G)(scope ref G gen, T _m) | if(isSaturatedRandomEngine!G && isUnsigned!T) |{ | immutable m = _m + 0u; | static if (EngineReturnType!G.sizeof >= T.sizeof * 2) | alias MaybeR = EngineReturnType!G; | else static if (uint.sizeof >= T.sizeof * 2) | alias MaybeR = uint; | else static if (ulong.sizeof >= T.sizeof * 2) | alias MaybeR = ulong; | else static if (is(ucent) && __traits(compiles, {static assert(ucent.sizeof >= T.sizeof * 2);})) | mixin ("alias MaybeR = ucent;"); | else | alias MaybeR = void; | | static if (!is(MaybeR == void)) | { | alias R = MaybeR; | static assert(R.sizeof >= T.sizeof * 2); | //Use Daniel Lemire's fast alternative to modulo reduction: | //https://lemire.me/blog/2016/06/30/fast-random-shuffling/ | R randombits = cast(R) gen.rand!T; | R multiresult = randombits * m; | T leftover = cast(T) multiresult; | if (mixin(_ctfeExpect!(`leftover < m`, `false`))) | { | immutable threshold = -m % m ; | while (leftover < threshold) | { | randombits = cast(R) gen.rand!T; | multiresult = randombits * m; | leftover = cast(T) multiresult; | } | } | enum finalshift = T.sizeof * 8; | return cast(T) (multiresult >>> finalshift); | } | else | { | import mir.utility : extMul; | //Use Daniel Lemire's fast alternative to modulo reduction: | //https://lemire.me/blog/2016/06/30/fast-random-shuffling/ | auto u = extMul!T(gen.rand!T, m); | if (mixin(_ctfeExpect!(`u.low < m`, `false`))) | { | immutable T threshold = -m % m; | while (u.low < threshold) | { | u = extMul!T(gen.rand!T, m); | } | } | return u.high; | } |} | |/// ditto |T randIndex(T, G)(scope G* gen, T m) | if(isSaturatedRandomEngine!G && isUnsigned!T) |{ | return randIndex!(T, G)(*gen, m); |} | |/// ditto |T randIndex(T)(T m) | if(isUnsigned!T) |{ | return randIndex!T(rne, m); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = randIndex(100u); | auto n = randIndex!ulong(-100); |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.randIndex!uint(100); | auto n = gen.randIndex!ulong(-100); |} | |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | //CTFE check. | import std.meta : AliasSeq; | import mir.random.engine.xoshiro : Xoroshiro128Plus; | foreach (IntType; AliasSeq!(ubyte,ushort,uint,ulong)) | { | enum IntType e = (){auto g = Xoroshiro128Plus(1); return g.randIndex!IntType(100);}(); | auto gen = Xoroshiro128Plus(1); | assert(e == gen.randIndex!IntType(100)); | } |} | |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | //Test production of ulong from ulong generator. | import mir.random.engine.xoshiro; | auto gen = Xoroshiro128Plus(1); | enum ulong limit = 10; | enum count = 10; | ulong[limit] buckets; | foreach (_; 0 .. count) | { | ulong x = gen.randIndex!ulong(limit); | assert(x < limit); | buckets[cast(size_t) x] += 1; | } | foreach (i, x; buckets) | assert(x != count, "All values were the same!"); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | auto s = gen.randIndex!uint(100); | auto n = gen.randIndex!ulong(-100); |} | |/++ | Returns: `n >= 0` such that `P(n) := 1 / (2^^(n + 1))`. |+/ |size_t randGeometric(G)(scope ref G gen) | if(isSaturatedRandomEngine!G) |{ | alias R = EngineReturnType!G; | static if (R.sizeof >= size_t.sizeof) | alias T = size_t; | else | alias T = R; | for(size_t count = 0;; count += T.sizeof * 8) | if(auto val = gen.rand!T()) | return count + cttz(val); |} | |/// ditto |size_t randGeometric(G)(scope G* gen) | if(isSaturatedRandomEngine!G) |{ | return randGeometric!(G)(*gen); |} | |/// ditto |size_t randGeometric()() |{ | return randGeometric(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | size_t s = randGeometric; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xoshiro; | auto gen = Xoroshiro128Plus(1); | | size_t s = gen.randGeometric; |} | |/++ |Params: | gen = saturated random number generator |Returns: | `X ~ Exp(1) / log(2)`. |Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`. |+/ |T randExponential2(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | enum W = T.sizeof * 8 - T.mant_dig - 1 - bool(T.mant_dig == 64); | static if (is(T == float)) | { | _Uab!(uint,float) u = void; | u.asInteger = gen.rand!uint; | enum uint EXPMASK = 0xFF80_0000; | auto exp = EXPMASK & u.asInteger; | u.asInteger &= ~EXPMASK; | u.asInteger ^= 0x3F000000; // 0.5 | auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W; | auto x = u.asFloatingPoint; | } | else | static if (is(T == double)) | { | _Uab!(ulong,double) u = void; | u.asInteger = gen.rand!ulong; | enum ulong EXPMASK = 0xFFF0_0000_0000_0000; | auto exp = EXPMASK & u.asInteger; | u.asInteger &= ~EXPMASK; | u.asInteger ^= 0x3FE0000000000000; // 0.5 | auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W; | auto x = u.asFloatingPoint; | } | else | static if (T.mant_dig == 64) | { | _U ret = void; | ret.e = 0x3FFE; | ret.m = gen.rand!ulong | ~long.max; | auto y = gen.randGeometric; | auto x = ret.r; | } | /// TODO: quadruple | else static assert(0); | | if (x == 0.5f) | return y; | else | return -log2(x) + y; |} | |/// ditto |T randExponential2(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | return randExponential2!(T, G)(*gen); |} | |/// ditto |T randExponential2(T)() | if (isFloatingPoint!T) |{ | return randExponential2!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto v = randExponential2!double; |} | |/// |@nogc nothrow @safe pure version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto v = gen.randExponential2!double(); |} | |/++ |$(LINK2 https://dlang.org/phobos/std_random.html#.isUniformRNG, |Tests if T is a Phobos-style uniform RNG.) |+/ |template isPhobosUniformRNG(T) |{ | import std.random: isUniformRNG; | enum bool isPhobosUniformRNG = isUniformRNG!T; |} | |/++ |Extends a Mir-style random number generator to also be a Phobos-style |uniform RNG. If `Engine` is already a Phobos-style uniform RNG, |`PhobosRandom` is just an alias for `Engine`. |+/ |struct PhobosRandom(Engine) if (isRandomEngine!Engine && !isPhobosUniformRNG!Engine)//Doesn't need to be saturated. |{ | alias Uint = EngineReturnType!Engine; | private Engine _engine; | private Uint _front; | | /// Default constructor and copy constructor are disabled. | @disable this(); | /// ditto | @disable this(this); | | /// Forward constructor arguments to `Engine`. | this(A...)(auto ref A args) | if (is(typeof(Engine(args)))) | { | _engine = Engine(args); | _front = _engine.opCall(); | } | | /// Phobos-style random interface. | enum bool isUniformRandom = true; | /// ditto | enum Uint min = Uint.min;//Always normalized. | /// ditto | enum Uint max = Engine.max;//Might not be saturated. | /// ditto | enum bool empty = false; | /// ditto | @property Uint front()() const { return _front; } | /// ditto | void popFront()() { _front = _engine.opCall(); } | /// ditto | void seed(A...)(auto ref A args) if (is(typeof(Engine(args)))) | { | _engine.__ctor(args); | _front = _engine.opCall(); | } | | /// Retain support for Mir-style random interface. | enum bool isRandomEngine = true; | /// ditto | enum bool preferHighBits = .preferHighBits!Engine; | /// ditto | Uint opCall()() | { | Uint result = _front; | _front = _engine.opCall(); | return result; | } | | /// | @property ref inout(Engine) engine()() inout @nogc nothrow pure @safe | { | return _engine; | } |} | |/// ditto |template PhobosRandom(Engine) if (isRandomEngine!Engine && isPhobosUniformRNG!Engine) |{ | alias PhobosRandom = Engine; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift: Xorshift1024StarPhi; | import std.random: isSeedable, isPhobosUniformRNG = isUniformRNG; | | alias RNG = PhobosRandom!Xorshift1024StarPhi; | | //Phobos interface | static assert(isPhobosUniformRNG!(RNG, ulong)); | static assert(isSeedable!(RNG, ulong)); | //Mir interface | static assert(isSaturatedRandomEngine!RNG); | static assert(is(EngineReturnType!RNG == ulong)); | | auto gen = Xorshift1024StarPhi(1); | auto rng = RNG(1); | assert(gen() == rng.front); | rng.popFront(); | assert(gen() == rng.front); | rng.popFront(); | assert(gen() == rng()); | | gen.__ctor(1); | rng.seed(1); | assert(gen() == rng()); |} ../../../.dub/packages/mir-random-2.2.4/mir-random/source/mir/random/package.d has no code <<<<<< EOF # path=doc/dlang.org/code_coverage.dd Ddoc $(D_S Code Coverage Analysis, $(P A major part of the engineering of a professional software project is creating a test suite for it. Without some sort of test suite, it is impossible to know if the software works at all. The D language has many features to aid in the creation of test suites, such as $(LINK2 spec/unittest.html, unit tests) and $(LINK2 spec/contracts.html, contract programming). But there's the issue of how thoroughly the test suite tests the code. The $(LINK2 http://www.digitalmars.com/ctg/trace.html, profiler) can give valuable information on which functions were called, and by whom. But to look inside a function, and determine which statements were executed and which were not, requires a code coverage analyzer. ) $(P A code coverage analyzer will help in these ways:) $(OL $(LI Expose code that is not exercised by the test suite. Add test cases that will exercise it.) $(LI Identify code that is unreachable. Unreachable code is often the leftover result of program design changes. Unreachable code should be removed, as it can be very confusing to the maintenance programmer.) $(LI It can be used to track down why a particular section of code exists, as the test case that causes it to execute will illuminate why.) $(LI Since execution counts are given for each line, it is possible to use the coverage analysis to reorder the basic blocks in a function to minimize jmps in the most used path, thus optimizing it.) ) $(P Experience with code coverage analyzers show that they dramatically reduce the number of bugs in shipping code. But it isn't a panacea, a code coverage analyzer won't help with:) $(OL $(LI Identifying race conditions.) $(LI Memory consumption problems.) $(LI Pointer bugs.) $(LI Verifying that the program got the correct result.) ) $(P Code coverage analysers are available for many popular languages, but they are often third party products that integrate poorly with the compiler, and are often very expensive. A big problem with third party products is, in order to instrument the source code, they must include what is essentially a full blown compiler front end for the same language. Not only is this an expensive proposition, it often winds up out of step with the various compiler vendors as their implementations change and as they evolve various extensions. ($(LINK2 http://gcc.gnu.org/onlinedocs/gcc-3.0/gcc_8.html, gcov), the Gnu coverage analyzer, is an exception as it is both free and is integrated into gcc.) ) $(P The D code coverage analyser is built in as part of the D compiler. Therefore, it is always in perfect synchronization with the language implementation. It's implemented by establishing a counter for each line in each module compiled with the $(DDSUBLINK dmd-windows,switch-cov, $(B -cov)) switch. Code is inserted at the beginning of each statement to increment the corresponding counter. When the program finishes, the runtime collects all the counters, merges it with the source files, and writes the reports out to listing (.lst) files.) $(P For example, consider the Sieve program:) ---------------------- /* Eratosthenes Sieve prime number calculation. */ import std.stdio; bool flags[8191]; int main() { int i, prime, k, count, iter; writeln("10 iterations"); for (iter = 1; iter <= 10; iter++) { count = 0; flags[] = true; for (i = 0; i < flags.length; i++) { if (flags[i]) { prime = i + i + 3; k = i + prime; while (k < flags.length) { flags[k] = false; k += prime; } count += 1; } } } writefln("%d primes", count); return 0; } ---------------------- $(P Compile and run it with:) $(CONSOLE dmd sieve -cov sieve ) $(P The output file will be created called $(D sieve.lst), the contents of which are:) $(CONSOLE |/* Eratosthenes Sieve prime number calculation. */ | |import std.stdio; | |bool flags[8191]; | |int main() |{ 5| int i, prime, k, count, iter; | 1| writeln("10 iterations"); 22| for (iter = 1; iter <= 10; iter++) | { 10| count = 0; 10| flags[] = true; 163840| for (i = 0; i < flags.length; i++) | { 81910| if (flags[i]) | { 18990| prime = i + i + 3; 18990| k = i + prime; 168980| while (k < flags.length) | { 149990| flags[k] = false; 149990| k += prime; | } 18990| count += 1; | } | } | } 1| writefln("%d primes", count); 1| return 0; |} sieve.d is 100% covered ) $(P The numbers to the left of the $(B |) are the execution counts for that line. Lines that have no executable code are left blank. Lines that have executable code, but were not executed, have a "0000000" as the execution count. At the end of the .lst file, the percent coverage is given. ) $(P There are 3 lines with an exection count of 1, these were each executed once. The declaration line for $(D i, prime), etc., has 5 because there are 5 declarations, and the initialization of each declaration counts as one statement.) $(P The first $(D for) loop shows 22. This is the sum of the 3 parts of the for header. If the for header is broken up into 3 lines, the data is similarly divided:) $(CONSOLE 1| for (iter = 1; 11| iter <= 10; 10| iter++) ) $(P which adds up to 22.) $(P $(D e1&&e2) and $(D e1||e2) expressions conditionally execute the right-hand operand $(D e2). Therefore, the right-hand operand is treated as a separate statement with its own counter:) $(CONSOLE |void foo(int a, int b) |{ 5| bar(a); 8| if (a && b) 1| bar(b); |} ) $(P By putting the right-hand operand on a separate line, this illuminates things:) $(CONSOLE |void foo(int a, int b) |{ 5| bar(a); 5| if (a && 3| b) 1| bar(b); |} ) $(P Similarly, for the $(D e?e1:e2) expressions, $(D e1) and $(D e2) are treated as separate statements.) $(H3 Controlling the Coverage Analyser) $(COMMENT The behavior of the coverage analyser can be controlled through the $(DRUNTIMESRC rt/cover.d) module.) $(P When the $(DDSUBLINK dmd-windows,switch-cov, $(B -cov)) switch is thrown, the $(DDSUBLINK spec/version, PredefinedVersions, version identifier) $(B D_Coverage) is defined.) $(H3 References) $(LINK2 https://en.wikipedia.org/wiki/Code_coverage, Wikipedia) ) Macros: TITLE=Code Coverage Analysis SUBNAV=$(SUBNAV_ARTICLES) <<<<<< EOF # path=source-mir-glas-l1.lst |/++ |$(H2 Level 1) | |$(SCRIPT inhibitQuickIndex = 1;) | |This is a submodule of $(MREF mir,glas). | |The Level 1 GLAS perform vector and vector-vector operations. | |$(BOOKTABLE $(H2 Vector-vector operations), |$(T2 rot, apply Givens rotation) |$(T2 axpy, constant times a vector plus a vector) |$(T2 dot, dot product) |$(T2 dotc, dot product, conjugating the first vector) |) | |$(BOOKTABLE $(H2 Vector operations), |$(TR $(TH Function Name) $(TH Description)) |$(T2 nrm2, Euclidean norm) |$(T2 sqnrm2, square of Euclidean norm) |$(T2 asum, sum of absolute values) |$(T2 iamax, index of max abs value) |$(T2 amax, max abs value) |) | |All functions except $(LREF iamax) work with multidimensional tensors. | |GLAS does not provide `swap`, `scal`, and `copy` functions. |This functionality is part of $(MREF_ALTTEXT ndslice, mir, ndslice) package. Examples can be found below. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.glas.l1; | |/// SWAP |unittest |{ | import std.algorithm.mutation: swap; | import mir.ndslice.allocation: slice; | import mir.algorithm.iteration: each; | import std.typecons: Yes; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| each!(swap)(x, y); 1| assert(x == [4, 5, 6, 7]); 1| assert(y == [0, 1, 2, 3]); |} | |/// SCAL |unittest |{ | import mir.ndslice.allocation: slice; | import std.typecons: Yes; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| x[] *= 2.0; 1| assert(x == [0, 2, 4, 6]); |} | |/// COPY |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = x; 1| assert(y == [0, 1, 2, 3]); |} | |import mir.math.common; |import mir.internal.utility; |import mir.ndslice.slice; |import mir.algorithm.iteration : reduce, each; |import mir.math.common: fastmath; | |import std.traits: Unqual, isPointer; |import std.meta: allSatisfy; | |@fastmath: | |template _rot(alias c, alias s) |{ | @fastmath | void _rot(X, Y)(ref X xr, ref Y yr) | { 4| auto x = xr; 4| auto y = yr; 4| auto t1 = c * x + s * y; | static if (isComplex!(typeof(c))) | { | auto t2 = (c.re - c.im * 1fi) * y; | } | else 4| auto t2 = c * y; | static if (isComplex!(typeof(s))) | { | t2 -= (s.re - s.im * 1fi) * x; | } | else 4| t2 -= s * x; 4| xr = t1; 4| yr = t2; | } |} | |template _axpy(alias a) |{ | @fastmath | void _axpy(X, Y)(ref X x, ref Y y) | { 15| y += a * x; | } |} | |A _fmuladd(A, B, C)(A a, in B b, in C c) |{ 42| return a + b * c; |} | |A _fmuladdc(A, B, C)(A a, in B b, in C c) |{ | static if (isComplex!B) | { | return a + (b.re - b.im * 1fi) * c; | } | else | return a + b * c; |} | |A _nrm2(A, B)(A a, in B b) |{ | static if (isComplex!B) 4| return a + b.re * b.re + b.im * b.im; | else 8| return a + b * b; |} | |A _asum(A, B)(A a, in B b) |{ | static if (isComplex!B) | { 2| return a + (b.re.fabs + b.im.fabs); | } | else | static if (isFloatingPoint!B) | { 4| return a + b.fabs; | } | else | { | static if (isUnsigned!B) | return a + b; | else | return a + (b >= 0 ? b : -b); | } |} | |A _amax(A, B)(A a, in B b) |{ | static if (isComplex!B) | { 4| return a.fmax(b.re.fabs + b.im.fabs); | } | else | static if (isFloatingPoint!B) | { 6| return a.fmax(b.fabs); | } | else | { | static if (!isUnsigned!B) | b = (b >= 0 ? b : -b); | return a >= b ? a : b; | } |} | |private enum _shouldBeCastedToUnqual(T) = isPointer!T && !is(Unqual!T == T); | |/++ |Applies a plane rotation, where the `c` (cos) and `s` (sin) are scalars. |Uses unrolled loops for strides equal to one. |Params: | c = cos scalar | s = sin scalar | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SROT, DROT, CROT, ZROT, CSROT, ZDROTF |+/ |void rot(C, S, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in C c, in S s, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ 1| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 1| each!(_rot!(c, s))(x, y); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| auto a = slice!double(4); 1| auto b = slice!double(4); 1| double cos = 3.0 / 5; 1| double sin = 4.0 / 5; 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 15| foreach (i; 0 .. 4) | { 4| a[i] = cos * x[i] + sin * y[i]; 4| b[i] = cos * y[i] - sin * x[i]; | } 1| rot(cos, sin, x, y); 1| assert(x == a); 1| assert(y == b); |} | |/++ |Constant times a vector plus a vector. |Uses unrolled loops for strides equal to one. |Params: | a = scale parameter | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SAXPY, DAXPY, CAXPY, ZAXPY |+/ |void axpy(A, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in A a, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | static if (_shouldBeCastedToUnqual!Iterator2) | { | .axpy(a, cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y); | } | else | { 5| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 5| each!(_axpy!a)(x, y); | } |} | |/// SAXPY, DAXPY |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| axpy(2.0, x, y); 1| assert(y == [4, 7, 10, 13]); |} | |/// SAXPY, DAXPY |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto a = 3 + 4i; 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; 1| axpy(a, x, y); 1| assert(y == [a * (0 + 1i) + (4 + 5i), a * (2 + 3i) + (6 + 7i)]); |} | |/++ |Forms the dot product of two vectors. |Uses unrolled loops for strides equal to one. |Returns: dot product `conj(xᐪ) × y` |Params: | F = type for summation (optional template parameter) | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SDOT, DDOT, SDSDOT, DSDOT, CDOTC, ZDOTC |+/ |F dot(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2)) | { | return .dot!F(cast(Slice!(Unqual!Iterator1, N, kind1))x, cast(Slice!(Unqual!Iterator2, N, kind2))y); | } | else | { 9| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 9| return reduce!(_fmuladd)(cast(F)(0), x, y); | } |} | |/// SDOT, DDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot(x, y) == 5 + 12 + 21); |} | |/// ditto |auto dot(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ 8| return .dot!(Unqual!(typeof(x[0] * y[0])))(x, y); |} | |/// SDOT, DDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot(x, y) == 5 + 12 + 21); |} | |/// SDSDOT, DSDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!float(4); 1| auto y = slice!float(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot!real(x, y) == 5 + 12 + 21); // 80-bit FP for x86 CPUs |} | |/// CDOTU, ZDOTU |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; | version(LDC) // DMD Internal error: backend/cgxmm.c 628 | assert(dot(x, y) == (0 + 1i) * (4 + 5i) + (2 + 3i) * (6 + 7i)); |} | |/++ |Forms the dot product of two complex vectors. |Uses unrolled loops for strides equal to one. |Returns: dot product `xᐪ × y` |Params: | F = type for summation (optional template parameter) | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: CDOTU, ZDOTU |+/ |F dotc(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) | if (isComplex!(DeepElementType!(typeof(x))) && isComplex!(DeepElementType!(typeof(y)))) |{ | static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2)) | { | return .dotc!F(cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y); | } | else | { | assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); | return reduce!(_fmuladdc)(cast(F)(0), x, y); | } |} | |/// ditto |auto dotc(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | return .dotc!(Unqual!(typeof(x[x.shape.init] * y[y.shape.init])))(x, y); |} | |/// CDOTC, ZDOTC |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; | version(LDC) // DMD Internal error: backend/cgxmm.c 628 | assert(dotc(x, y) == (0 + -1i) * (4 + 5i) + (2 + -3i) * (6 + 7i)); |} | |/++ |Returns the euclidean norm of a vector. |Uses unrolled loops for stride equal to one. |Returns: euclidean norm `sqrt(conj(xᐪ) × x)` |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |BLAS: SNRM2, DNRM2, SCNRM2, DZNRM2 |+/ |F nrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | return .sqnrm2!F(cast(Slice!(N, Unqual!R))x).sqrt; | else 2| return .sqnrm2!F(x).sqrt; |} | |/// ditto |auto nrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ 2| return .nrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x); |} | |/// SNRM2, DNRM2 |unittest |{ | import mir.ndslice.allocation: slice; | import std.math: sqrt, approxEqual; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9))); |} | |/// SCNRM2, DZNRM2 |unittest |{ | import mir.ndslice.allocation: slice; | import std.math: sqrt, approxEqual; | 1| auto x = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; | 1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9))); |} | |/++ |Forms the square of the euclidean norm. |Uses unrolled loops for stride equal to one. |Returns: `conj(xᐪ) × x` |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |+/ |F sqnrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .sqnrm2!F(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); 4| return reduce!(_nrm2)(F(0), x); | } |} | |/// ditto |auto sqnrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ 2| return .sqnrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| assert(sqnrm2(x) == 1.0 + 4 + 9); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; | 1| assert(sqnrm2(x) == 1.0 + 4 + 9); |} | |/++ |Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and | returns a single precision result. |Returns: sum of the `|Re(.)| + |Im(.)|`'s |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |BLAS: SASUM, DASUM, SCASUM, DZASUM |+/ |F asum(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .asum!F(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); 2| return reduce!(_asum)(F(0), x); | } |} | |/// ditto |auto asum(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | alias T = DeepElementType!(typeof(x)); 2| return .asum!(realType!T)(x); |} | |/// SASUM, DASUM |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| x[] = [0, -1, -2, 3]; 1| assert(asum(x) == 1 + 2 + 3); |} | |/// SCASUM, DZASUM |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| x[] = [0 - 1i, -2 + 3i]; | 1| assert(asum(x) == 1 + 2 + 3); |} | |/++ |Finds the index of the first element having maximum `|Re(.)| + |Im(.)|`. |Return: index of the first element having maximum `|Re(.)| + |Im(.)|` |Params: x = 1-dimensional tensor |BLAS: ISAMAX, IDAMAX, ICAMAX, IZAMAX |+/ |sizediff_t iamax(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .iamax(cast(Slice!(1, Unqual!R))x); | } | else | { | pragma(inline, false); 4| if (x.length == 0) 2| return -1; 2| if (x._stride == 0) 0000000| return 0; | alias T = Unqual!(DeepElementType!(typeof(x))); | alias F = realType!T; | static if (isFloatingPoint!F) 2| auto m = -double.infinity; | else | auto m = F.min; 2| sizediff_t l = x.length; 2| sizediff_t r = x.length; | do | { 10| auto f = x.front; | static if (isComplex!T) | { 4| auto e = f.re.fabs + f.im.fabs; | } | else | static if (isFloatingPoint!T) | { 6| auto e = f.fabs; | } | else | { | static if (isUnsigned!T) | auto e = f; | else | auto e = (f >= 0 ? f : -f); | } | 10| if (e > m) | { 6| m = e; 6| r = x.length; | } 10| x.popFront; | } 10| while (x.length); 2| return l - r; | } |} | |/// ISAMAX, IDAMAX |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(6); | // 0 1 2 3 4 5 1| x[] = [0, -1, -2, -3, 3, 2]; 1| assert(iamax(x) == 3); | // -1 for empty vectors 1| assert(iamax(x[0 .. 0]) == -1); |} | |/// ICAMAX, IZAMAX |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(4); | // 0 1 2 3 1| x[] = [0 + -1i, -2 + 3i, 2 + 3i, 2 + 2i]; | 1| assert(iamax(x) == 1); | // -1 for empty vectors 1| assert(iamax(x[$ .. $]) == -1); |} | |/++ |Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and | returns a single precision result. |Returns: sum of the `|Re(.)| + |Im(.)|`'s |Params: | x = n-dimensional tensor |BLAS: SASUM, DASUM, SCASUM, DZASUM |+/ |auto amax(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .amax(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); | alias T = DeepElementType!(typeof(x)); | alias F = realType!T; 4| return reduce!(_amax)(F(0), x); | } |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(6); 1| x[] = [0, -1, -2, -7, 6, 2]; 1| assert(amax(x) == 7); | // 0 for empty vectors 1| assert(amax(x[0 .. 0]) == 0); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(4); 1| x[] = [0 + -1i, -7 + 3i, 2 + 3i, 2 + 2i]; | 1| assert(amax(x) == 10); | // 0 for empty vectors 1| assert(amax(x[$ .. $]) == 0); |} source/mir/glas/l1.d is 99% covered <<<<<< EOF # path=source-mir-glas-l2.lst |/++ |$(H2 Level 2) | |$(SCRIPT inhibitQuickIndex = 1;) | |This is a submodule of $(MREF mir,glas). | |The Level 2 BLAS perform matrix-vector operations. | |Note: GLAS is singe thread for now. | |$(BOOKTABLE $(H2 Matrix-vector operations), | |$(TR $(TH Function Name) $(TH Description)) |$(T2 gemv, general matrix-vector multiplication, $(RED partially optimized)) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.glas.l2; | |import std.traits; |import std.meta; | |import mir.math.common; |import mir.internal.utility; |import mir.ndslice.slice; | |import mir.glas.l1; | |import mir.math.common: fastmath; | |@fastmath: | |/++ |$(RED DRAFT) |Performs general matrix-vector multiplication. | |Pseudo_code: `y := alpha A × x + beta y`. | |Params: | alpha = scalar | asl = `m ⨉ n` matrix | xsl = `n ⨉ 1` vector | beta = scalar. When `beta` is supplied as zero then the vector `ysl` need not be set on input. | ysl = `m ⨉ 1` vector | |Note: | GLAS does not require transposition parameters. | Use $(NDSLICEREF iteration, transposed) | to perform zero cost `Slice` transposition. | |BLAS: SGEMV, DGEMV, (CGEMV, ZGEMV are not implemented for now) |+/ |nothrow @nogc @system |void gemv(A, B, C, | SliceKind kindA, | SliceKind kindB, | SliceKind kindC, | ) |( | C alpha, | Slice!(const(A)*, 2, kindA) asl, | Slice!(const(B)*, 1, kindB) xsl, | C beta, | Slice!(C*, 1, kindC) ysl, |) | if (allSatisfy!(isNumeric, A, B, C)) |in |{ 1| assert(asl.length!0 == ysl.length, "constraint: asl.length!0 == ysl.length"); 1| assert(asl.length!1 == xsl.length, "constraint: asl.length!1 == xsl.length"); |} |body |{ | import mir.ndslice.dynamic: reversed; | static assert(is(Unqual!C == C), msgWrongType); 1| if (ysl.empty) 0000000| return; 1| if (beta == 0) | { 1| ysl[] = 0; | } | else 0000000| if (beta == 1) | { 0000000| ysl[] *= beta; | } 1| if (xsl.empty) 0000000| return; | do | { 3| ysl.front += alpha * dot(asl.front, xsl); 3| asl.popFront; 3| ysl.popFront; | } 3| while (ysl.length); |} | |/// |unittest |{ | import mir.ndslice; | 1| auto a = slice!double(3, 5); 1| a[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto b = slice!double(5); 1| b[] = | [-5.0, | 4.0, | -4.0, | -1.0, | 9.0]; | 1| auto c = slice!double(3); | 1| gemv!(double, double, double)(1.0, a, b, 0.0, c); | 1| assert(c == | [-42.0, | -69.0, | 23.0]); |} source/mir/glas/l2.d is 80% covered <<<<<< EOF # path=source-mir-glas-package.lst |/++ | |$(H1 GLAS (Generic Linear Algebra Subprograms)) | |The GLAS are generic routines that provide standard building blocks for performing vector and matrix operations. |The Level 1 GLAS perform scalar, vector and vector-vector operations, |the Level 2 GLAS perform matrix-vector operations, and the Level 3 GLAS perform matrix-matrix operations. | |$(H2 Implemented Routines) | |The list of already implemented features. | |$(BOOKTABLE , | $(TR | $(TH Modules) | $(TH Description) | ) | $(TR | $(TDNW $(SUBMODULE l1)) | $(TD vector operations 100% done, partially optimized for now) | ) | $(TR | $(TDNW $(SUBMODULE l2)) | $(TD matrix-vector operations %3 done, partially optimized for now) | ) | $(TR | $(TDNW l3 was moved to $(HTTP github.com/libmir/mir-glas, mir-glas)) | $(TD matrix-matrix operations 50% done) | ) |) | |GLAS is generalization of $(LINK2 http://www.netlib.org/blas/, BLAS) (Basic Linear Algebra Subprograms) |Because the BLAS are efficient, portable, and widely available, they are commonly used in the development of |high quality linear algebra or related software, such as |$(LINK2 http://www.netlib.org/lapack/, LAPACK), |$(LINK2 http://www.numpy.org/, NumPy), or $(LINK2 http://julialang.org/, The Julia language). | |Efficient Level 3 BLAS implementation requires |$(LINK2 https://en.wikipedia.org/wiki/CPU_cache, cache)-friendly matrix blocking. |In additional, $(LINK2 https://en.wikipedia.org/wiki/SIMD, SIMD) instructions should be used for all levels on modern architectures. | |$(H2 Why GLAS) | |GLAS is ... | | |$(H2 Optimization notes) | |GLAS requires recent $(LINK2 https://github.com/ldc-developers/ldc, LDC) >= 1.1.0-beta2. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |+/ |module mir.glas; | |public import mir.glas.l1; |public import mir.glas.l2; source/mir/glas/package.d has no code <<<<<< EOF # path=source-mir-model-lda-hoffman.lst |/** | |$(H3 Online variational Bayes for latent Dirichlet allocation) | |References: | Hoffman, Matthew D., Blei, David M. and Bach, Francis R.. | "Online Learning for Latent Dirichlet Allocation.." | Paper presented at the meeting of the NIPS, 2010. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |*/ |module mir.model.lda.hoffman; | |import std.traits; | |/++ |Batch variational Bayes for LDA with mini-batches. |+/ |struct LdaHoffman(F) | if (isFloatingPoint!F) |{ | import std.parallelism; | import mir.ndslice.iterator: FieldIterator; | import mir.ndslice.topology: iota; | | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | import mir.math.common; | import mir.sparse; | | private alias Vector = Slice!(F*); | private alias Matrix = Slice!(F*, 2); | | private size_t D; | private F alpha; | private F eta; | private F kappa; | private F _tau; | private F eps; | | private Matrix _lambda; // [k, w] | private Matrix _beta; // [k, w] | | private TaskPool tp; | | private F[][] _lambdaTemp; | | @disable this(); | @disable this(this); | | /++ | Params: | K = theme count | W = dictionary size | D = approximate total number of documents in a collection. | alpha = Dirichlet document-topic prior (0.1) | eta = Dirichlet word-topic prior (0.1) | tau0 = tau0 ≧ 0 slows down the early iterations of the algorithm. | kappa = `kappa belongs to $(LPAREN)0.5, 1]`, controls the rate at which old values of lambda are forgotten. | `lambda = (1 - rho(tau)) lambda + rho lambda', rho(tau) = (tau0 + tau)^(-kappa)`. Use `kappa = 0` for Batch variational Bayes LDA. | eps = Stop iterations if `||lambda - lambda'||_l1 < s * eps`, where `s` is a documents count in a batch. | tp = task pool | +/ 0000000| this(size_t K, size_t W, size_t D, F alpha, F eta, F tau0, F kappa, F eps = 1e-5, TaskPool tp = taskPool()) | { | import mir.random; | 0000000| this.D = D; 0000000| this.alpha = alpha; 0000000| this.eta = eta; 0000000| this._tau = tau0; 0000000| this.kappa = kappa; 0000000| this.eps = eps; 0000000| this.tp = tp; | 0000000| _lambda = slice!F(K, W); 0000000| _beta = slice!F(K, W); 0000000| _lambdaTemp = new F[][](tp.size + 1, W); | | import std.math: fabs; 0000000| auto gen = Random(unpredictableSeed); 0000000| foreach (r; _lambda) 0000000| foreach (ref e; r) 0000000| e = (gen.rand!F.fabs + 0.9) / 1.901; | 0000000| updateBeta(); | } | | /// | void updateBeta() | { 0000000| foreach (i; tp.parallel(lambda.length.iota)) 0000000| unparameterize(lambda[i], beta[i]); | } | | /++ | Posterior over the topics | +/ | Slice!(F*, 2) beta() @property | { 0000000| return _beta; | } | | /++ | Parameterized posterior over the topics. | +/ | Slice!(F*, 2) lambda() @property | { 0000000| return _lambda; | } | | /++ | Count of already seen documents. | Slows down the iterations of the algorithm. | +/ | F tau() const @property | { 0000000| return _tau; | } | | /// ditto | void tau(F v) @property | { 0000000| _tau = v; | } | | /++ | Accepts mini-batch and performs multiple E-step iterations for each document and single M-step. | | This implementation is optimized for sparse documents, | which contain much less unique words than a dictionary. | | Params: | n = mini-batch, a collection of compressed documents. | maxIterations = maximal number of iterations for s This implementation is optimized for sparse documents, |ingle document in a batch for E-step. | +/ | size_t putBatch(SliceKind kind, C, I, J)(Slice!(ChopIterator!(J*, Series!(I*, C*)), 1, kind) n, size_t maxIterations) | { | return putBatchImpl(n.recompress!F, maxIterations); | } | | private size_t putBatchImpl(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n, size_t maxIterations) | { | import std.math: isFinite; | import mir.sparse.blas.dot; | import mir.sparse.blas.gemv; | import mir.ndslice.dynamic: transposed; | import mir.ndslice.topology: universal; | import mir.internal.utility; | 0000000| immutable S = n.length; 0000000| immutable K = _lambda.length!0; 0000000| immutable W = _lambda.length!1; 0000000| _tau += S; 0000000| auto theta = slice!F(S, K); 0000000| auto nsave = saveN(n); | 0000000| immutable rho = pow!F(F(tau), -kappa); 0000000| auto thetat = theta.universal.transposed; 0000000| auto _gamma = slice!F(tp.size + 1, K); 0000000| shared size_t ret; | // E step 0000000| foreach (d; tp.parallel(S.iota)) | { 0000000| auto gamma = _gamma[tp.workerIndex]; 0000000| gamma[] = 1; 0000000| auto nd = n[d]; 0000000| auto thetad = theta[d]; 0000000| for (size_t c; ;c++) | { 0000000| unparameterize(gamma, thetad); | 0000000| selectiveGemv!"/"(_beta.universal.transposed, thetad, nd); 0000000| F sum = 0; | { 0000000| auto beta = _beta; 0000000| auto th = thetad; 0000000| foreach (ref g; gamma) | { 0000000| if (!th.front.isFinite) 0000000| th.front = F.max; 0000000| auto value = dot(nd, beta.front) * th.front + alpha; 0000000| sum += fabs(value - g); 0000000| g = value; 0000000| beta.popFront; 0000000| th.popFront; | } | } 0000000| if (c < maxIterations && sum > eps * K) | { 0000000| nd.value[] = nsave[d].value; 0000000| continue; | } | import core.atomic; 0000000| ret.atomicOp!"+="(c); 0000000| break; | } | } | // M step 0000000| foreach (k; tp.parallel(K.iota)) | { 0000000| auto lambdaTemp = _lambdaTemp[tp.workerIndex]; 0000000| gemtv!F(F(1), n, thetat[k], F(0), lambdaTemp.sliced); | import mir.algorithm.iteration: each; 0000000| each!((ref l, bk, lt) {l = (1 - rho) * l + | rho * (eta + (F(D) / F(S)) * bk * lt);})(_lambda[k], _beta[k],lambdaTemp.sliced); 0000000| unparameterize(_lambda[k], _beta[k]); | } 0000000| return ret; | } | | private auto saveN(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n) | { | import mir.series: series; | import mir.ndslice.topology: chopped, universal; 0000000| return n.iterator._sliceable.index | .series(n.iterator._sliceable.value.dup) | .chopped(n.iterator._iterator.sliced(n.length + 1)); | } | | private static void unparameterize(Vector param, Vector posterior) | { 0000000| assert(param.structure == posterior.structure); | import mir.ndslice.topology: zip; | import mir.math.func.expdigamma; | import mir.math.sum: sum; 0000000| immutable c = 1 / expDigamma(sum(param)); 0000000| foreach (e; zip(param, posterior)) 0000000| e.b = c * expDigamma(e.a); | } |} | |unittest |{ | alias ff = LdaHoffman!double; |} source/mir/model/lda/hoffman.d is 0% covered <<<<<< EOF # path=source-mir-sparse-blas-axpy.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas.axpy; | |import std.traits; |import mir.ndslice.slice; |import mir.sparse; |import mir.series; | |/++ |Constant times a vector plus a vector. | |Params: | x = sparse vector | y = dense vector | alpha = scalar |Returns: | `y = alpha * x + y` |+/ |void axpy( | CR, | V1 : Series!(I1, T1), | I1, T1, V2) |(in CR alpha, V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |in |{ 28| if (x.index.length) 27| assert(x.index[$-1] < y.length); |} |body |{ | import mir.internal.utility; | 321| foreach (size_t i; 0 .. x.index.length) | { 79| auto j = x.index[i]; 79| y[j] = alpha * x.value[i] + y[j]; | } |} | |/// |unittest |{ 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} | |unittest |{ 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y.sliced); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} | |unittest |{ 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y.slicedField); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} source/mir/sparse/blas/axpy.d is 100% covered <<<<<< EOF # path=source-mir-sparse-blas-dot.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas.dot; | |import std.traits; |import mir.ndslice.slice; |import mir.sparse; |import mir.series; | |/++ |Dot product of two vectors | |Params: | x = sparse vector | y = sparse vector |Returns: | scalar `xᵀ × y` |+/ |Unqual!(CommonType!(T1, T2)) dot( | V1 : Series!(I1*, T1*), | V2 : Series!(I2*, T2*), | T1, T2, I1, I2) |(V1 x, V2 y) |{ 1| return dot!(typeof(return))(x, y); |} | |/// ditto |D dot( | D, | V1 : Series!(I1*, T1*), | V2 : Series!(I2*, T2*), | T1, T2, I1, I2) |(V1 x, V2 y) |{ | 2| typeof(return) s = 0; | 2| uint done = 2; 2| Unqual!I1 ai0 = void; 2| Unqual!I2 bi0 = void; | 4| if (x.length && y.length) for (;;) | { 8| bi0 = y.index[0]; 8| if (x.index[0] < bi0) | { | do | { 4| x.popFront; 4| if (x.length == 0) | { 0000000| break; | } | } 4| while (x.index[0] < bi0); 4| done = 2; | } 8| if (--done == 0) | { 2| goto L; | } 6| ai0 = x.index[0]; 6| if (y.index[0] < ai0) | { | do | { 4| y.popFront; 4| if (y.length == 0) | { 0000000| break; | } | } 4| while (y.index[0] < ai0); 4| done = 2; | } 6| if (--done == 0) | { 2| goto L; | } 4| continue; | L: 4| s = x.value[0] * y.value[0] + s; 4| x.popFront; 4| if (x.length == 0) | { 0000000| break; | } 4| y.popFront; 4| if (y.length == 0) | { 2| break; | } | } | 2| return s; |} | |/// |unittest |{ 1| auto x = series([0u, 3, 5, 9, 100], [1, 3, 4, 9, 10]); 1| auto y = series([1u, 3, 4, 9], [1, 10, 100, 1000]); | // x = [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, ... ,10] | // y = [0, 1, 0, 10, 0, 0, 0, 0, 0, 1000] 1| assert(dot(x, y) == 9030); 1| assert(dot!double(x, y) == 9030); |} | |/++ |Dot product of two vectors. |Params: | x = sparse vector | y = dense vector |Returns: | scalar `x × y` |+/ |Unqual!(CommonType!(T1, ForeachType!V2)) dot( | V1 : Series!(I1*, T1*), | T1, I1, V2) |(V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |{ 21| return dot!(typeof(return))(x, y); |} | |///ditto |D dot( | D, | V1 : Series!(I1*, T1*), | T1, I1, V2) |(V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |in |{ 21| if (x.length) 21| assert(x.index[$-1] < y.length); |} |body |{ | | import mir.internal.utility; | | alias T2 = ForeachType!V2; | | alias F = Unqual!(CommonType!(T1, T2)); 21| F s = 0; 324| foreach (size_t i; 0 .. x.index.length) | { 87| s = y[x.index[i]] * x.value[i] + s; | } | 21| return s; |} | |/// |unittest |{ | import std.typecons: No; 1| auto x = [0u, 3, 5, 9, 10].series([1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; | // x: [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, 13, 0, 0, 0] | // y: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] 1| auto r = 0 + 3 * 3 + 4 * 5 + 9 * 9 + 13 * 10; 1| assert(dot(x, y) == r); 1| assert(dot(x, y.sliced) == r); 1| assert(dot(x, y.slicedField) == r); |} source/mir/sparse/blas/dot.d is 94% covered <<<<<< EOF # path=source-mir-sparse-blas-gemm.lst |/++ |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |+/ |module mir.sparse.blas.gemm; | |import std.traits; |import mir.ndslice.slice; |import mir.ndslice.iterator; |import mir.ndslice.allocation: slice; |import mir.sparse; |import mir.series; | |/++ |General matrix-matrix multiplication. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | b = dense matrix | beta = scalar | c = dense matrix |Returns: | `c = alpha * a × b + beta * c` if beta does not equal null and `c = alpha * a × b` otherwise. |+/ |void gemm( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 2, kind2) b, | in CL beta, | Slice!(Iterator3, 2, kind3) c) |in |{ 1| assert(a.length!0 == c.length!0); 1| assert(b.length!1 == c.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; 1| auto ct = c.universal.transposed; 14| foreach (x; b.universal.transposed) | { | import mir.sparse.blas.gemv: gemv; 4| gemv(alpha, a, x, beta, ct.front); 4| ct.popFront; | } |} | |/// |unittest |{ 1| auto sp = sparse!int(3, 5); 1| sp[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto a = sp.compress; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | 1| auto c = slice!double(3, 4); | 1| gemm(1.0, a, b, 0, c); | 1| assert(c == | [[-42.0, 35, -7, 77], | [-69.0, -21, -42, 21], | [23.0, 69, 3, 29]]); |} | | |/++ |General matrix-matrix multiplication with transformation. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | b = dense matrix | beta = scalar | c = dense matrix |Returns: | `c = alpha * aᵀ × b + beta * c` if beta does not equal null and `c = alpha * aᵀ × b` otherwise. |+/ |void gemtm( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 2, kind2) b, | in CL beta, | Slice!(Iterator3, 2, kind3) c) |in |{ 1| assert(a.length!0 == b.length!0); 1| assert(b.length!1 == c.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; 1| auto ct = c.universal.transposed; 14| foreach (x; b.universal.transposed) | { | import mir.sparse.blas.gemv: gemtv; 4| gemtv(alpha, a, x, beta, ct.front); 4| ct.popFront; | } |} | | |/// |unittest |{ 1| auto sp = sparse!int(5, 3); 1| sp[] = | [[-5, -1, -5], | [1, -5, -2], | [7, 6, -3], | [7, 3, 6], | [-4, -3, 0]]; | 1| auto a = sp.compress; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | 1| auto c = slice!double(3, 4); | 1| gemtm(1.0, a, b, 0, c); | 1| assert(c == | [[-42.0, 35, -7, 77], | [-69.0, -21, -42, 21], | [23.0, 69, 3, 29]]); |} | |/++ |Selective general matrix multiplication with selector sparse matrix. |Params: | a = dense matrix | b = dense matrix | c = sparse matrix (CSR format) |Returns: | `c[available indexes] = (a × b)[available indexes]`. |+/ |void selectiveGemm(string op = "", SliceKind kind1, SliceKind kind2, SliceKind kind3, T, T3, I3, J3) |(Slice!(T*, 2, kind1) a, Slice!(T*, 2, kind2) b, Slice!(ChopIterator!(J3*, Series!(I3*, T3*)), 1, kind3) c) |in |{ 1| assert(a.length!1 == b.length!0); 1| assert(c.length!0 == a.length!0); 11| foreach (r; c) 3| if (r.index.length) 2| assert(r.index[$-1] < b.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; | import mir.sparse.blas.gemv: selectiveGemv; | 1| auto bt = b.universal.transposed; 11| foreach (r; c) | { 3| selectiveGemv!op(bt, a.front, r); 3| a.popFront; | } |} | |/// |unittest |{ 1| auto a = slice!double(3, 5); 1| a[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | | // a * b == | // [[-42.0, 35, -7, 77], | // [-69.0, -21, -42, 21], | // [23.0, 69, 3, 29]]); | 1| auto cs = sparse!double(3, 4); 1| cs[0, 2] = 1; 1| cs[0, 1] = 3; 1| cs[2, 3] = 2; | 1| auto c = cs.compress; | 1| selectiveGemm!"*"(a, b, c); 1| assert(c.length == 3); 1| assert(c[0].index == [1, 2]); 1| assert(c[0].value == [105, -7]); 1| assert(c[1].empty); 1| assert(c[2].index == [3]); 1| assert(c[2].value == [58]); |} source/mir/sparse/blas/gemm.d is 100% covered <<<<<< EOF # path=source-mir-sparse-blas-gemv.lst |/++ |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |+/ |module mir.sparse.blas.gemv; | | |import std.traits; |import mir.ndslice.slice; |import mir.ndslice.iterator; |import mir.internal.utility; |import mir.sparse; |import mir.series; | |/++ |General matrix-vector multiplication. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | x = dense vector | beta = scalar | y = dense vector |Returns: | `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise. |+/ |void gemv( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 1, kind2) x, | in CL beta, | Slice!(Iterator3, 1, kind3) y) |in |{ 6| assert(a.length == y.length); |} |body |{ 6| if (beta) | { 22| foreach (ref e; y) | { | import mir.sparse.blas.dot; 6| e = alpha * dot(a.front, x) + beta * e; 6| a.popFront; | } | } | else | { 44| foreach (ref e; y) | { | import mir.sparse.blas.dot; 12| e = alpha * dot(a.front, x); 12| a.popFront; | } | } |} | |/// |unittest |{ 1| auto slice = sparse!double(3, 5); 1| slice[] = | [[ 0.0, 2.0, 3.0, 0.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |General matrix-vector multiplication with transposition. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | x = dense vector | beta = scalar | y = dense vector |Returns: | `y = alpha * aᵀ × x + beta * y` if beta does not equal null and `y = alpha * aᵀ × x` otherwise. |+/ |void gemtv( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 1, kind2) x, | in CL beta, | Slice!(Iterator3, 1, kind3) y) |in |{ 5| assert(a.length == x.length); |} |body |{ | alias T3 = Unqual!(DeepElementType!(Slice!(Iterator3, 1, kind3))); | 5| if (beta == 0) | { 4| y[] = 0; | } 5| if (beta == 1) | { | } | else | { 5| y[] *= T3(beta); | } 85| foreach (ref t; x) | { | import mir.sparse.blas.axpy; 25| axpy(alpha * t, a.front, y); 25| a.popFront; | } |} | |/// |unittest |{ 1| auto slice = sparse!double(5, 3); 1| slice[] = | [[0.0, 6.0, 6.0], | [2.0, 0.0, 0.0], | [3.0, 30.0, 30.0], | [0.0, 8.0, 8.0], | [0.0, 0.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemtv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |General matrix-vector multiplication for sparse vectors. | |Params: | alpha = scalar | a = dense matrix | x = sparse vector | beta = scalar | y = dense vector |Returns: | `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise. |+/ |void gemv( | CR, | CL, | SliceKind kind1, Iterator1, | T2, I2, | SliceKind kind3, Iterator3, | ) |(in CR alpha, Slice!(Iterator1, 2, kind1) a, Series!(I2*, T2*) x, in CL beta, Slice!(Iterator3, 1, kind3) y) |in |{ | assert(a.length == y.length); |} |body |{ | if (beta) | { | foreach (ref e; y) | { | import mir.sparse.blas.dot; | e = alpha * dot(x, a.front) + beta * e; | a.popFront; | } | } | else | { | foreach (ref e; y) | { | import mir.sparse.blas.dot; | e = alpha * dot(x, a.front); | a.popFront; | } | } |} | |/// |unittest |{ 1| auto slice = sparse!double(3, 5); 1| slice[] = | [[ 0.0, 2.0, 3.0, 0.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |Selective general matrix-vector multiplication with a selector sparse vector. | |Params: | a = dense matrix | x = dense vector | y = sparse vector (compressed) |Returns: | `y[available indexes] = (alpha * a × x)[available indexes]`. |+/ |void selectiveGemv(string op = "", SliceKind kind1, SliceKind kind2, T, T3, I3) |(Slice!(T*, 2, kind1) a, Slice!(T*, 1, kind2) x, Series!(I3*, T3*) y) |in |{ 3| assert(a.length!1 == x.length); 3| if (y.index.length) 2| assert(y.index[$-1] < a.length); |} |body |{ | import mir.ndslice.dynamic: transposed; | 21| foreach (i, j; y.index.field) | { | import mir.glas.l1 : dot; 3| auto d = dot(a[j], x); | mixin(`y.value[i] ` ~ op ~ `= d;`); | } |} source/mir/sparse/blas/gemv.d is 100% covered <<<<<< EOF # path=source-mir-sparse-blas-package.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas; | |public import mir.sparse.blas.dot; |public import mir.sparse.blas.axpy; |public import mir.sparse.blas.gemv; |public import mir.sparse.blas.gemm; source/mir/sparse/blas/package.d has no code <<<<<< EOF # path=source-mir-sparse-package.lst |/++ |$(H2 Sparse Tensors) | |License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |+/ |module mir.sparse; | |import std.traits; |import std.meta; | |import mir.ndslice.slice; |public import mir.ndslice.field: SparseField; |public import mir.ndslice.iterator: ChopIterator, FieldIterator; |public import mir.series: isSeries, Series, mir_series, series; |public import mir.ndslice.slice: CoordinateValue, Slice, mir_slice; |public import mir.ndslice.topology: chopped; | |//TODO: replace with `static foreach` |private template Iota(size_t i, size_t j) |{ | static assert(i <= j, "Iota: i should be less than or equal to j"); | static if (i == j) | alias Iota = AliasSeq!(); | else | alias Iota = AliasSeq!(i, Iota!(i + 1, j)); |} | |/++ |Sparse tensors represented in Dictionary of Keys (DOK) format. | |Params: | N = dimension count | lengths = list of dimension lengths |Returns: | `N`-dimensional slice composed of indeces |See_also: $(LREF Sparse) |+/ |Sparse!(T, N) sparse(T, size_t N)(size_t[N] lengths...) |{ 12| T[size_t] table; 12| table[0] = 0; 12| table.remove(0); 12| assert(table !is null); 12| with (typeof(return)) return FieldIterator!(SparseField!T)(0, SparseField!T(table)).sliced(lengths); |} | |/// |pure unittest |{ 1| auto slice = sparse!double(2, 3); 1| slice[0][] = 1; 1| slice[0, 1] = 2; 1| --slice[0, 0]; 1| slice[1, 2] += 4; | 1| assert(slice == [[0, 2, 1], [0, 0, 4]]); | | import std.range.primitives: isRandomAccessRange; | static assert(isRandomAccessRange!(Sparse!(double, 2))); | | import mir.ndslice.slice: Slice, DeepElementType; | static assert(is(Sparse!(double, 2) : Slice!(FieldIterator!(SparseField!double), 2))); | static assert(is(DeepElementType!(Sparse!(double, 2)) == double)); |} | |/++ |Returns unsorted forward range of (coordinate, value) pairs. | |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto byCoordinateValue(size_t N, T)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByCoordinateValue | { | private sizediff_t[N-1] _strides; | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| auto iv = _range.front; 5| size_t index = iv.key; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| CoordinateValue!(T, N) ret; | foreach (i; Iota!(0, N - 1)) | { 5| ret.index[i] = index / _strides[i]; 5| index %= _strides[i]; | } 5| ret.index[N - 1] = index; 5| ret.value = iv.value; 5| return ret; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByCoordinateValue(slice.strides[0..N-1], length, l, r, slice._iterator._field._table.byKeyValue); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | alias CV = CoordinateValue!(double, 2); | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.byCoordinateValue.array.sort() == [ | CV([0, 1], 2), | CV([0, 2], 1), | CV([1, 2], 4), | CV([2, 0], 6), | CV([2, 1], 7)]); |} | |/++ |Returns unsorted forward range of coordinates. |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto byCoordinate(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByCoordinate | { | private sizediff_t[N-1] _strides; | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKey())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| size_t index = _range.front; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| size_t[N] ret; | foreach (i; Iota!(0, N - 1)) | { 5| ret[i] = index / _strides[i]; 5| index %= _strides[i]; | } 5| ret[N - 1] = index; 5| return ret; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByCoordinate(slice.strides[0 .. N - 1], length, l, r, slice._iterator._field._table.byKey); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.byCoordinate.array.sort() == [ | [0, 1], | [0, 2], | [1, 2], | [2, 0], | [2, 1]]); |} | |/++ |Returns unsorted forward range of values. |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto onlyByValue(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByValue | { | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| auto iv = _range.front; 5| size_t index = iv.key; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| return iv.value; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByValue(length, l, r, slice._iterator._field._table.byKeyValue); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.onlyByValue.array.sort() == [1, 2, 4, 6, 7]); |} | |pragma(inline, false) |private size_t countInInterval(Range)(Range range, size_t l, size_t r) |{ 3| size_t count; 51| foreach(ref i; range) 30| if (l <= i && i < r) 15| count++; 3| return count; |} | |private mixin template _sparse_range_methods(Range) |{ | private size_t _length, _l, _r; | private Range _range; | | void popFront() | { 15| assert(!_range.empty); 15| _range.popFront; 15| _length--; | } | | bool empty() const @property | { 0000000| return _length == 0; | } | | auto save() @property | { 0000000| auto ret = this; 0000000| ret._range = ret._range.save; 0000000| return ret; | } | | size_t length() const @property | { 3| return _length; | } |} | |/++ |Returns compressed tensor. |Note: allocates using GC. |+/ |auto compress(I = uint, J = size_t, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) slice) | if (N > 1) |{ 8| return compressWithType!(DeepElementType!(Slice!(Iterator, N, kind)), I, J)(slice); |} | |/// Sparse tensor compression |unittest |{ 1| auto sparse = sparse!double(5, 3); 1| sparse[] = | [[0, 2, 1], | [0, 0, 4], | [0, 0, 0], | [6, 0, 9], | [0, 0, 5]]; | 1| auto crs = sparse.compressWithType!double; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 3, | // [2, 1, 4, 6, 9, 5], | // [1, 2, 2, 0, 2, 2], | // [0, 2, 3, 3, 5, 6])); |} | |/// Sparse tensor compression |unittest |{ 1| auto sparse = sparse!double(5, 8); 1| sparse[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sparse.compressWithType!double; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); |} | |/// Dense tensor compression |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 3); 1| sl[] = | [[0, 2, 1], | [0, 0, 4], | [0, 0, 0], | [6, 0, 9], | [0, 0, 5]]; | 1| auto crs = sl.compressWithType!double; | | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 3, | // [2, 1, 4, 6, 9, 5], | // [1, 2, 2, 0, 2, 2], | // [0, 2, 3, 3, 5, 6])); |} | |/// Dense tensor compression |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 8); 1| sl[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sl.compress; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); |} | |/++ |Returns compressed tensor with different element type. |Note: allocates using GC. |+/ |Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1) | compressWithType(V, I = uint, J = size_t, T, size_t N) | (Slice!(FieldIterator!(SparseField!T), N) slice) | if (is(T : V) && N > 1 && isUnsigned!I) |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: iota; 8| auto compressedData = slice | .iterator | ._field | ._table | .series!(size_t, T, I, V); 8| auto pointers = new J[slice.shape[0 .. N - 1].iota.elementCount + 1]; 16| size_t k = 1, shift; 8| pointers[0] = 0; 8| pointers[1] = 0; 8| const rowLength = slice.length!(N - 1); 233| if(rowLength) foreach (ref index; compressedData.index.field) | { | for(;;) | { 90| sizediff_t newIndex = index - shift; 90| if (newIndex >= rowLength) | { 23| pointers[k + 1] = pointers[k]; 23| shift += rowLength; 23| k++; 23| continue; | } 67| index = cast(I)newIndex; 67| pointers[k] = cast(J) (pointers[k] + 1); 67| break; | } | | } 8| pointers[k + 1 .. $] = pointers[k]; 8| return compressedData.chopped(pointers); |} | | |/// ditto |Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1) | compressWithType(V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (!is(Iterator : FieldIterator!(SparseField!ST), ST) && is(DeepElementType!(Slice!(Iterator, N, kind)) : V) && N > 1 && isUnsigned!I) |{ | import std.array: appender; | import mir.ndslice.topology: pack, flattened; 4| auto vapp = appender!(V[]); 4| auto iapp = appender!(I[]); 4| auto psl = slice.pack!1; 4| auto count = psl.elementCount; 4| auto pointers = new J[count + 1]; | 4| pointers[0] = 0; 4| auto elems = psl.flattened; 4| size_t j = 0; 72| foreach (ref pointer; pointers[1 .. $]) | { 20| auto row = elems.front; 20| elems.popFront; 20| size_t i; 445| foreach (e; row) | { 135| if (e) | { 24| vapp.put(e); 24| iapp.put(cast(I)i); 24| j++; | } 135| i++; | } 20| pointer = cast(J)j; | } 4| return iapp.data.series(vapp.data).chopped(pointers); |} | | |/++ |Re-compresses a compressed tensor. Makes all values, indeces and pointers consequent in memory. | |Sparse slice is iterated twice. The first tine it is iterated to get length of each sparse row, the second time - to copy the data. | |Note: allocates using GC. |+/ |Slice!(ChopIterator!(J*, Series!(I*, V*)), N) | recompress | (V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) sparseSlice) | if (isSeries!(DeepElementType!(Slice!(Iterator, N, kind)))) |{ | import mir.algorithm.iteration: each; | import mir.conv: to, emplaceRef; | import mir.ndslice.allocation: uninitSlice; | import mir.ndslice.topology: pack, flattened, as, member, zip; | 1| size_t count = sparseSlice.elementCount; 1| size_t length; 1| auto pointers = uninitSlice!J(count + 1); 1| pointers.front = 0; 1| sparseSlice | .member!"data" | .member!"elementCount" 5| .each!((len, ref ptr) {ptr = length += len;})(pointers[1 .. $]); | 1| auto i = uninitSlice!I(length); 1| auto v = uninitSlice!V(length); | 1| auto ret = i.series(v).chopped(pointers); | 1| sparseSlice | .each!((a, b) { 5| b.index[] = a.index.as!I; 5| b.value.each!(emplaceRef!V)(a.value.as!V); | })(ret); | 1| return ret; |} | |/// |unittest |{ | import mir.ndslice.topology: universal; | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 8); 1| sl[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sl.compress; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); | | import mir.ndslice.dynamic: reversed; 1| auto rec = crs.reversed.recompress!real; 1| auto rev = sl.universal.reversed.compressWithType!real; 1| assert(rev.structure == rec.structure); | // assert(rev.iterator._field.values == rec.iterator._field.values); | // assert(rev.iterator._field.indeces == rec.iterator._field.indeces); | // assert(rev.iterator._field.pointers == rec.iterator._field.pointers); |} | |/++ |Sparse Slice in Dictionary of Keys (DOK) format. |+/ |alias Sparse(T, size_t N = 1) = Slice!(FieldIterator!(SparseField!T), N); | |/// |alias CompressedVector(T, I = uint) = Series!(T*, I*); | |/// |alias CompressedMatrix(T, I = uint) = Slice!(ChopIterator!(J*, Series!(T*, I*))); | |/// |alias CompressedTensor(T, size_t N, I = uint, J = size_t) = Slice!(ChopIterator!(J*, Series!(T*, I*)), N - 1); | |///ditto |alias CompressedTensor(T, size_t N : 1, I = uint) = Series!(I*, T*); source/mir/sparse/package.d is 93% covered <<<<<< EOF