TRAVIS_OS_NAME=linux
<<<<<< ENV
benchmarks/ndslice/binarization.d
benchmarks/ndslice/convolution.d
benchmarks/ndslice/dot_product.d
benchmarks/ndslice/euclidean_distance.d
dub.json
examples/data/stop_words
examples/data/trndocs.dat
examples/data/words
examples/lda_hoffman_sparse.d
examples/means_of_columns.d
examples/median_filter.d
index.d
meson.build
source/mir/glas/l1.d
source/mir/glas/l2.d
source/mir/glas/package.d
source/mir/model/lda/hoffman.d
source/mir/sparse/blas/axpy.d
source/mir/sparse/blas/dot.d
source/mir/sparse/blas/gemm.d
source/mir/sparse/blas/gemv.d
source/mir/sparse/blas/package.d
source/mir/sparse/package.d
subprojects/mir-algorithm.wrap
subprojects/mir-core.wrap
subprojects/mir-linux-kernel.wrap
subprojects/mir-random.wrap
<<<<<< network
# path=./source-mir-sparse-blas-axpy.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas.axpy;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.sparse;
|import mir.series;
|
|/++
|Constant times a vector plus a vector.
|
|Params:
| x = sparse vector
| y = dense vector
| alpha = scalar
|Returns:
| `y = alpha * x + y`
|+/
|void axpy(
| CR,
| V1 : Series!(I1, T1),
| I1, T1, V2)
|(in CR alpha, V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|in
|{
28| if (x.index.length)
27| assert(x.index[$-1] < y.length);
|}
|body
|{
| import mir.internal.utility;
|
321| foreach (size_t i; 0 .. x.index.length)
| {
79| auto j = x.index[i];
79| y[j] = alpha * x.value[i] + y[j];
| }
|}
|
|///
|unittest
|{
| import mir.series;
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
|
|unittest
|{
| import mir.series;
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y.sliced);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
|
|unittest
|{
1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
1| axpy(2.0, x, y.slicedField);
1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]);
|}
source/mir/sparse/blas/axpy.d is 100% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-engine-mersenne_twister.lst
|/++
|The Mersenne Twister generator.
|
|Copyright: Copyright Andrei Alexandrescu 2008 - 2009, Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Authors: $(HTTP erdani.org, Andrei Alexandrescu) Ilya Yaroshenko (rework)
|+/
|module mir.random.engine.mersenne_twister;
|
|import std.traits;
|
|/++
|The $(LUCKY Mersenne Twister) generator.
|+/
|struct MersenneTwisterEngine(UIntType, size_t w, size_t n, size_t m, size_t r,
| UIntType a, size_t u, UIntType d, size_t s,
| UIntType b, size_t t,
| UIntType c, size_t l, UIntType f)
| if (isUnsigned!UIntType)
|{
| ///
| enum isRandomEngine = true;
|
| static assert(0 < w && w <= UIntType.sizeof * 8);
| static assert(1 <= m && m <= n);
| static assert(0 <= r && 0 <= u && 0 <= s && 0 <= t && 0 <= l);
| static assert(r <= w && u <= w && s <= w && t <= w && l <= w);
| static assert(0 <= a && 0 <= b && 0 <= c);
|
| @disable this();
| @disable this(this);
|
| /// Largest generated value.
| enum UIntType max = UIntType.max >> (UIntType.sizeof * 8u - w);
| static assert(a <= max && b <= max && c <= max && f <= max);
|
| private enum UIntType lowerMask = (cast(UIntType) 1u << r) - 1;
| private enum UIntType upperMask = ~lowerMask & max;
|
| /**
| Parameters for the generator.
| */
| enum size_t wordSize = w;
| enum size_t stateSize = n; /// ditto
| enum size_t shiftSize = m; /// ditto
| enum size_t maskBits = r; /// ditto
| enum UIntType xorMask = a; /// ditto
| enum size_t temperingU = u; /// ditto
| enum UIntType temperingD = d; /// ditto
| enum size_t temperingS = s; /// ditto
| enum UIntType temperingB = b; /// ditto
| enum size_t temperingT = t; /// ditto
| enum UIntType temperingC = c; /// ditto
| enum size_t temperingL = l; /// ditto
| enum UIntType initializationMultiplier = f; /// ditto
|
|
| /// The default seed value.
| enum UIntType defaultSeed = 5489;
|
| /++
| Current reversed payload index with initial value equals to `n-1`
| +/
| size_t index = void;
|
| private UIntType _z = void;
|
| /++
| Reversed(!) payload.
| +/
| UIntType[n] data = void;
|
| /*
| * Marker indicating it's safe to construct from void
| * (i.e. the constructor doesn't depend on the struct
| * being in an initially valid state).
| * Non-public because we don't want to commit to this
| * design.
| */
| package enum bool _isVoidInitOkay = true;
|
| /++
| Constructs a MersenneTwisterEngine object.
| +/
0000000| this(UIntType value) @safe pure nothrow @nogc
| {
| static if (max == UIntType.max)
0000000| data[$-1] = value;
| else
| data[$-1] = value & max;
0000000| foreach_reverse (size_t i, ref e; data[0 .. $-1])
| {
0000000| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
0000000| index = n-1;
0000000| opCall();
| }
|
| /++
| Constructs a MersenneTwisterEngine object.
|
| Note that `MersenneTwisterEngine([123])` will not result in
| the same initial state as `MersenneTwisterEngine(123)`.
| +/
| this()(scope const(UIntType)[] array) @safe pure nothrow @nogc
| {
| static if (is(UIntType == uint))
| {
| enum UIntType f2 = 1664525u;
| enum UIntType f3 = 1566083941u;
| }
| else static if (is(UIntType == ulong))
| {
| enum UIntType f2 = 3935559000370003845uL;
| enum UIntType f3 = 2862933555777941757uL;
| }
| else
| static assert(0, "init by slice only supported if UIntType is uint or ulong!");
|
| data[$-1] = cast(UIntType) (19650218u & max);
| foreach_reverse (size_t i, ref e; data[0 .. $-1])
| {
| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| index = n-1;
| if (array.length == 0)
| {
| opCall();
| return;
| }
|
| size_t final_mix_index = void;
|
| if (array.length >= n)
| {
| size_t j = 0;
| //Handle all but tail.
| while (array.length - j >= n - 1)
| {
| foreach_reverse (i, ref e; data[0 .. $-1])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| e &= max;
| ++j;
| }
| data[$ - 1] = data[0];
| }
| //Handle tail.
| size_t i = n - 2;
| while (j < array.length)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| ++j;
| --i;
| }
| //Set the index for use by the next pass.
| final_mix_index = i;
| }
| else
| {
| size_t i = n - 2;
| //Handle all but tail.
| while (i >= array.length)
| {
| foreach (j; 0 .. array.length)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| --i;
| }
| }
| //Handle tail.
| size_t j = 0;
| while (i != cast(size_t) -1)
| {
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| ++j;
| --i;
| }
| data[$ - 1] = data[0];
| i = n - 2;
| data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2))
| + array[j] + cast(UIntType) j;
| static if (max != UIntType.max)
| data[i] &= max;
| //Set the index for use by the next pass.
| final_mix_index = n - 2;
| }
|
| foreach_reverse (i, ref e; data[0 .. final_mix_index])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3))
| - cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| foreach_reverse (i, ref e; data[final_mix_index .. n-1])
| {
| e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3))
| - cast(UIntType)(n - (i + 1));
| static if (max != UIntType.max)
| e &= max;
| }
| data[$-1] = (cast(UIntType)1) << ((UIntType.sizeof * 8) - 1); /* MSB is 1; assuring non-zero initial array */
| opCall();
| }
|
| /++
| Advances the generator.
| +/
| UIntType opCall() @safe pure nothrow @nogc
| {
| // This function blends two nominally independent
| // processes: (i) calculation of the next random
| // variate from the cached previous `data` entry
| // `_z`, and (ii) updating `data[index]` and `_z`
| // and advancing the `index` value to the next in
| // sequence.
| //
| // By interweaving the steps involved in these
| // procedures, rather than performing each of
| // them separately in sequence, the variables
| // are kept 'hot' in CPU registers, allowing
| // for significantly faster performance.
0000000| sizediff_t index = this.index;
0000000| sizediff_t next = index - 1;
0000000| if(next < 0)
0000000| next = n - 1;
0000000| auto z = _z;
0000000| sizediff_t conj = index - m;
0000000| if(conj < 0)
0000000| conj = index - m + n;
| static if (d == UIntType.max)
0000000| z ^= (z >> u);
| else
0000000| z ^= (z >> u) & d;
0000000| auto q = data[index] & upperMask;
0000000| auto p = data[next] & lowerMask;
0000000| z ^= (z << s) & b;
0000000| auto y = q | p;
0000000| auto x = y >> 1;
0000000| z ^= (z << t) & c;
0000000| if (y & 1)
0000000| x ^= a;
0000000| auto e = data[conj] ^ x;
0000000| z ^= (z >> l);
0000000| _z = data[index] = e;
0000000| this.index = next;
0000000| return z;
| }
|}
|
|/++
|A $(D MersenneTwisterEngine) instantiated with the parameters of the
|original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister,
|MT19937), generating uniformly-distributed 32-bit numbers with a
|period of 2 to the power of 19937.
|
|This is recommended for random number generation on 32-bit systems
|unless memory is severely restricted, in which case a
|$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift)
|would be the generator of choice.
|+/
|alias Mt19937 = MersenneTwisterEngine!(uint, 32, 624, 397, 31,
| 0x9908b0df, 11, 0xffffffff, 7,
| 0x9d2c5680, 15,
| 0xefc60000, 18, 1812433253);
|
|///
|@safe version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| // bit-masking by generator maximum is necessary
| // to handle 64-bit `unpredictableSeed`
| auto gen = Mt19937(unpredictableSeed & Mt19937.max);
| auto n = gen();
|
| import std.traits;
| static assert(is(ReturnType!gen == uint));
|}
|
|/++
|A $(D MersenneTwisterEngine) instantiated with the parameters of the
|original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister,
|MT19937), generating uniformly-distributed 64-bit numbers with a
|period of 2 to the power of 19937.
|
|This is recommended for random number generation on 64-bit systems
|unless memory is severely restricted, in which case a
|$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift)
|would be the generator of choice.
|+/
|alias Mt19937_64 = MersenneTwisterEngine!(ulong, 64, 312, 156, 31,
| 0xb5026f5aa96619e9, 29, 0x5555555555555555, 17,
| 0x71d67fffeda60000, 37,
| 0xfff7eee000000000, 43, 6364136223846793005);
|
|///
|@safe version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| auto gen = Mt19937_64(unpredictableSeed);
| auto n = gen();
|
| import std.traits;
| static assert(is(ReturnType!gen == ulong));
|}
|
|@safe nothrow version(mir_random_test) unittest
|{
| import mir.random.engine;
|
| static assert(isSaturatedRandomEngine!Mt19937);
| static assert(isSaturatedRandomEngine!Mt19937_64);
| auto gen = Mt19937(Mt19937.defaultSeed);
| foreach(_; 0 .. 9999)
| gen();
| assert(gen() == 4123659995);
|
| auto gen64 = Mt19937_64(Mt19937_64.defaultSeed);
| foreach(_; 0 .. 9999)
| gen64();
| assert(gen64() == 9981545732273789042uL);
|}
|
|version(mir_random_test) unittest
|{
| enum val = [1341017984, 62051482162767];
| alias MT(UIntType, uint w) = MersenneTwisterEngine!(UIntType, w, 624, 397, 31,
| 0x9908b0df, 11, 0xffffffff, 7,
| 0x9d2c5680, 15,
| 0xefc60000, 18, 1812433253);
|
| import std.meta: AliasSeq;
| foreach (i, R; AliasSeq!(MT!(ulong, 32), MT!(ulong, 48)))
| {
| static if (R.wordSize == 48) static assert(R.max == 0xFFFFFFFFFFFF);
| auto a = R(R.defaultSeed);
| foreach(_; 0..999)
| a();
| assert(val[i] == a());
| }
|}
|
|@safe nothrow @nogc version(mir_random_test) unittest
|{
| //Verify that seeding with an array gives the same result as the reference
| //implementation.
|
| //32-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.tgz
| immutable uint[4] seed32 = [0x123u, 0x234u, 0x345u, 0x456u];
| auto gen32 = Mt19937(seed32);
| foreach(_; 0..999)
| gen32();
| assert(3460025646u == gen32());
|
| //64-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/mt19937-64.tgz
| immutable ulong[4] seed64 = [0x12345uL, 0x23456uL, 0x34567uL, 0x45678uL];
| auto gen64 = Mt19937_64(seed64);
| foreach(_; 0..999)
| gen64();
| assert(994412663058993407uL == gen64());
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/engine/mersenne_twister.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-engine-package.lst
|/++
|$(SCRIPT inhibitQuickIndex = 1;)
|Uniform random engines.
|
|$(B Sections:)
| $(LINK2 #Convenience, Convenience)
|• $(LINK2 #Entropy, Entropy)
|• $(LINK2 #ThreadLocal, Thread-Local)
|• $(LINK2 #Traits, Traits)
|• $(LINK2 #CInterface, C Interface)
|
|$(BOOKTABLE
|
|$(LEADINGROW Convenience)
|$(TR
| $(RROW Random, Default random number _engine))
| $(RROW rne, Per-thread uniquely-seeded instance of default `Random`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).)
|
|$(LEADINGROW Entropy)
|$(TR
| $(RROW unpredictableSeed, Seed of `size_t` using system entropy. May use `unpredictableSeed!UIntType` for unsigned integers of different sizes.)
| $(RROW genRandomNonBlocking, Fills a buffer with system entropy, returning number of bytes copied or negative number on error)
| $(RROW genRandomBlocking, Fills a buffer with system entropy, possibly waiting if the system believes it has insufficient entropy. Returns 0 on success.))
|
|$(LEADINGROW Thread-Local (when $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS) enabled))
|$(TR
| $(TR $(TDNW $(LREF threadLocal)`!(Engine)`) $(TD Per-thread uniquely-seeded instance of any specified `Engine`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).))
| $(TR $(TDNW $(LREF threadLocalPtr)`!(Engine)`) $(TD `@safe` pointer to `threadLocal!Engine`. Always initializes before return. $(I Warning: do not share between threads!)))
| $(TR $(TDNW $(LREF threadLocalInitialized)`!(Engine)`) $(TD Explicitly manipulate "is seeded" flag for thread-local instance. Not needed by most library users.))
| $(TR $(TDNW $(LREF setThreadLocalSeed)`!(Engine, A...)`) $(TD Initialize thread-local `Engine` with a known seed rather than a random seed.))
| )
|
|$(LEADINGROW Traits)
|$(TR
| $(RROW EngineReturnType, Get return type of random number _engine's `opCall()`)
| $(RROW isRandomEngine, Check if is random number _engine)
| $(RROW isSaturatedRandomEngine, Check if random number _engine `G` such that `G.max == EngineReturnType!(G).max`)
| $(RROW preferHighBits, Are the high bits of the _engine's output known to have better statistical properties than the low bits?))
|
|$(LEADINGROW C Interface)
| $(RROW mir_random_engine_ctor, Perform any necessary setup. Automatically called by DRuntime.)
| $(RROW mir_random_engine_dtor, Release any resources. Automatically called by DRuntime.)
| $(RROW mir_random_genRandomNonBlocking, External name for $(LREF genRandomNonBlocking))
| $(RROW mir_random_genRandomBlocking, External name for $(LREF genRandomBlocking))
|)
|
|Copyright: Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Authors: Ilya Yaroshenko
|
|Macros:
| T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
| RROW = $(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.random.engine;
|
|version (OSX)
| version = Darwin;
|else version (iOS)
| version = Darwin;
|else version (TVOS)
| version = Darwin;
|else version (WatchOS)
| version = Darwin;
|
|// A secure arc4random implementation that uses some modern algorithm rather
|// than ARC4 may be used synonymously with non-blocking system entropy.
|version (CRuntime_Bionic)
| version = SecureARC4Random; // ChaCha20
|version (Darwin)
| version = SecureARC4Random; // AES
|version (OpenBSD)
| version = SecureARC4Random; // ChaCha20
|version (NetBSD)
| version = SecureARC4Random; // ChaCha20
|
|// A legacy arc4random should not be used when cryptographic security
|// is required but may used for `unpredictableSeed`.
|version (CRuntime_UClibc)
| version = LegacyARC4Random; // ARC4
|version (FreeBSD)
| version = LegacyARC4Random; // ARC4
|version (DragonFlyBSD)
| version = LegacyARC4Random; // ARC4
|version (BSD)
| version = LegacyARC4Random; // Unknown implementation
|
|version (SecureARC4Random)
| version = AnyARC4Random;
|version (LegacyARC4Random)
| version = AnyARC4Random;
|
|version (D_betterC)
| private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = false;
|else
| private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = __traits(compiles, { static size_t x = 0; });
|
|import std.traits;
|
|import mir.random.engine.mersenne_twister;
|
|/++
|Like `std.traits.ReturnType!T` but it works even if
|T.opCall is a function template.
|+/
|template EngineReturnType(T)
|{
| import std.traits : ReturnType;
| static if (is(ReturnType!T))
| alias EngineReturnType = ReturnType!T;
| else
| alias EngineReturnType = typeof(T.init());
|}
|
|/++
|Test if T is a random engine.
|A type should define `enum isRandomEngine = true;` to be a random engine.
|+/
|template isRandomEngine(T)
|{
| static if (is(typeof(T.isRandomEngine) : bool) && is(typeof(T.init())))
| {
| private alias R = typeof(T.init());
| static if (T.isRandomEngine && isUnsigned!R)
| enum isRandomEngine = is(typeof({
| enum max = T.max;
| static assert(is(typeof(T.max) == R));
| }));
| else enum isRandomEngine = false;
| }
| else enum isRandomEngine = false;
|}
|
|/++
|Test if T is a saturated random-bit generator.
|A random number generator is saturated if `T.max == ReturnType!T.max`.
|A type should define `enum isRandomEngine = true;` to be a random engine.
|+/
|template isSaturatedRandomEngine(T)
|{
| static if (isRandomEngine!T)
| enum isSaturatedRandomEngine = T.max == EngineReturnType!T.max;
| else
| enum isSaturatedRandomEngine = false;
|}
|
|/++
|Are the high bits of the engine's output known to have
|better statistical properties than the low bits of the
|output? This property is set by checking the value of
|an optional enum named `preferHighBits`. If the property
|is missing it is treated as false.
|
|This should be specified as true for:
|
|- linear congruential generators with power-of-2 modulus
|- xorshift+ family
|- xorshift* family
|- in principle any generator whose final operation is something like
|multiplication or addition in which the high bits depend on the low bits
|but the low bits are unaffected by the high bits.
|
|+/
|template preferHighBits(G)
| if (isSaturatedRandomEngine!G)
|{
| static if (__traits(compiles, { enum bool e = G.preferHighBits; }))
| private enum bool preferHighBits = G.preferHighBits;
| else
| private enum bool preferHighBits = false;
|}
|
|/*
| * Marker indicating it's safe to construct from void
| * (i.e. the constructor doesn't depend on the struct
| * being in an initially valid state).
| * Either checks an explicit flag `_isVoidInitOkay`
| * or tests to make sure that the structure contains
| * nothing that looks like a pointer or an index into
| * an array. Also ensures that there is not an elaborate
| * destructor since it could be called when the struct
| * is in an invalid state.
| * Non-public because we don't want to commit to this
| * design.
| */
|package template _isVoidInitOkay(G) if (isRandomEngine!G && is(G == struct))
|{
| static if (is(typeof(G._isVoidInitOkay) : bool))
| enum bool _isVoidInitOkay = G._isVoidInitOkay;
| else static if (!hasNested!G && !hasElaborateDestructor!G)
| {
| import std.meta : allSatisfy;
| static if (allSatisfy!(isScalarType, FieldTypeTuple!G))
| //All members are scalars.
| enum bool _isVoidInitOkay = true;
| else static if (FieldTypeTuple!(G).length == 1 && isStaticArray!(FieldTypeTuple!(G)[0]))
| //Only has one member which is a static array of scalars.
| enum bool _isVoidInitOkay = isScalarType!(typeof(FieldTypeTuple!(G)[0].init[0]));
| else
| enum bool _isVoidInitOkay = false;
| }
| else
| enum bool _isVoidInitOkay = false;
|}
|@nogc nothrow pure @safe version(mir_random_test)
|{
| import mir.random.engine.mersenne_twister: Mt19937, Mt19937_64;
| //Ensure that this property is set for the Mersenne Twister,
| //whose internal state is huge enough for this to potentially
| //matter:
| static assert(_isVoidInitOkay!Mt19937);
| static assert(_isVoidInitOkay!Mt19937_64);
| //Check that the property is set for a moderately-sized PRNG.
| import mir.random.engine.xorshift: Xorshift1024StarPhi;
| static assert(_isVoidInitOkay!Xorshift1024StarPhi);
| //Check that PRNGs not explicitly marked as void-init safe
| //can be inferred as such if they only have scalar fields.
| import mir.random.engine.pcg: pcg32, pcg32_oneseq;
| import mir.random.engine.splitmix: SplitMix64;
| static assert(_isVoidInitOkay!pcg32);
| static assert(_isVoidInitOkay!pcg32_oneseq);
| static assert(_isVoidInitOkay!SplitMix64);
| //Check that PRNGs not explicitly marked as void-init safe
| //can be inferred as such if their only field is a static
| //array of scalars.
| import mir.random.engine.xorshift: Xorshift128, Xoroshiro128Plus;
| static assert(_isVoidInitOkay!Xorshift128);
| static assert(_isVoidInitOkay!Xoroshiro128Plus);
|}
|
|version (D_Ddoc)
|{
| /++
| A "good" seed for initializing random number engines. Initializing
| with $(D_PARAM unpredictableSeed) makes engines generate different
| random number sequences every run.
|
| Returns:
| A single unsigned integer seed value, different on each successive call
| +/
| pragma(inline, true)
| @property size_t unpredictableSeed() @trusted nothrow @nogc
| {
| return unpredictableSeed!size_t;
| }
|}
|
|/// ditto
|pragma(inline, true)
|@property T unpredictableSeed(T = size_t)() @trusted nothrow @nogc
| if (isUnsigned!T)
|{
| import mir.utility: _expect;
0000000| T seed = void;
| version (AnyARC4Random)
| {
| // If we just need 32 bits it's faster to call arc4random()
| // than arc4random_buf(&seed, seed.sizeof).
| static if (T.sizeof <= uint.sizeof)
| seed = cast(T) arc4random();
| else
| arc4random_buf(&seed, seed.sizeof);
| }
0000000| else if (_expect(genRandomNonBlocking(&seed, seed.sizeof) != T.sizeof, false))
| {
| // fallback to old time/thread-based implementation in case of errors
0000000| seed = cast(T) fallbackSeed();
| }
0000000| return seed;
|}
|
|// Old name of `unpredictableSeedOf!T`. Undocumented but
|// defined so existing code using mir.random won't break.
|deprecated("Use unpredictableSeed!T instead of unpredictableSeedOf!T")
|public alias unpredictableSeedOf(T) = unpredictableSeed!T;
|
|version (mir_random_test) @nogc nothrow @safe unittest
|{
| // Check unpredictableSeed syntax works with or without parentheses.
| auto a = unpredictableSeed;
| auto b = unpredictableSeed!uint;
| auto c = unpredictableSeed!ulong;
| static assert(is(typeof(a) == size_t));
| static assert(is(typeof(b) == uint));
| static assert(is(typeof(c) == ulong));
|
| auto d = unpredictableSeed();
| auto f = unpredictableSeed!uint();
| auto g = unpredictableSeed!ulong();
| static assert(is(typeof(d) == size_t));
| static assert(is(typeof(f) == uint));
| static assert(is(typeof(g) == ulong));
|}
|
|// Is llvm_readcyclecounter supported on this platform?
|// We need to whitelist platforms where it is known to work because if it
|// isn't supported it will compile but always return 0.
|// https://llvm.org/docs/LangRef.html#llvm-readcyclecounter-intrinsic
|version(LDC)
|{
| // The only architectures the documentation says are supported are
| // x86 and Alpha. x86 uses RDTSC and Alpha uses RPCC.
| version(X86_64) version = LLVMReadCycleCounter;
| // Do *not* support 32-bit x86 because some x86 processors don't
| // support `rdtsc` and because on x86 (but not x86-64) Linux
| // `prctl` can disable a process's ability to use `rdtsc`.
| else version(Alpha) version = LLVMReadCycleCounter;
|}
|
|
|pragma(inline, false)
|private ulong fallbackSeed()()
|{
| // fallback to old time/thread-based implementation in case of errors
| version(LLVMReadCycleCounter)
| {
| import ldc.intrinsics : llvm_readcyclecounter;
| ulong ticks = llvm_readcyclecounter();
| }
| else version(D_InlineAsm_X86_64)
| {
| // RDTSC takes around 22 clock cycles.
| ulong ticks = void;
| asm @nogc nothrow
| {
| rdtsc;
| shl RDX, 32;
| xor RDX, RAX;
| mov ticks, RDX;
| }
| }
| //else version(D_InlineAsm_X86)
| //{
| // // We don't use `rdtsc` with version(D_InlineAsm_X86) because
| // // some x86 processors don't support `rdtsc` and because on
| // // x86 (but not x86-64) Linux `prctl` can disable a process's
| // // ability to use `rdtsc`.
| // static assert(0);
| //}
| else version(Windows)
| {
| import core.sys.windows.winbase : QueryPerformanceCounter;
| ulong ticks = void;
| QueryPerformanceCounter(cast(long*)&ticks);
| }
| else version(Darwin)
| {
| import core.time : mach_absolute_time;
| ulong ticks = mach_absolute_time();
| }
| else version(Posix)
| {
| import core.sys.posix.time : clock_gettime, CLOCK_MONOTONIC, timespec;
0000000| timespec ts = void;
0000000| const tserr = clock_gettime(CLOCK_MONOTONIC, &ts);
| // Should never fail. Only allowed arror codes are
| // EINVAL if the 1st argument is an invalid clock ID and
| // EFAULT if the 2nd argument is an invalid address.
0000000| assert(tserr == 0, "Call to clock_gettime failed.");
0000000| ulong ticks = (cast(ulong) ts.tv_sec << 32) ^ ts.tv_nsec;
| }
| version(Posix)
| {
| import core.sys.posix.unistd : getpid;
| import core.sys.posix.pthread : pthread_self;
0000000| auto pid = cast(uint) getpid;
0000000| auto tid = cast(uint) pthread_self();
| }
| else
| version(Windows)
| {
| import core.sys.windows.winbase : GetCurrentProcessId, GetCurrentThreadId;
| auto pid = cast(uint) GetCurrentProcessId;
| auto tid = cast(uint) GetCurrentThreadId;
| }
0000000| ulong k = ((cast(ulong)pid << 32) ^ tid) + ticks;
0000000| k ^= k >> 33;
0000000| k *= 0xff51afd7ed558ccd;
0000000| k ^= k >> 33;
0000000| k *= 0xc4ceb9fe1a85ec53;
0000000| k ^= k >> 33;
0000000| return k;
|}
|
|///
|@safe version(mir_random_test) unittest
|{
| auto rnd = Random(unpredictableSeed);
| auto n = rnd();
| static assert(is(typeof(n) == size_t));
|}
|
|/++
|The "default", "favorite", "suggested" random number generator type on
|the current platform. It is an alias for one of the
|generators. You may want to use it if (1) you need to generate some
|nice random numbers, and (2) you don't care for the minutiae of the
|method being used.
|+/
|static if (is(size_t == uint))
| alias Random = Mt19937;
|else
| alias Random = Mt19937_64;
|
|///
|version(mir_random_test) unittest
|{
| import std.traits;
| static assert(isSaturatedRandomEngine!Random);
| static assert(is(EngineReturnType!Random == size_t));
|}
|
|static if (THREAD_LOCAL_STORAGE_AVAILABLE)
|{
| /++
| Thread-local instance of the default $(LREF Random) allocated and seeded independently
| for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).
| +/
| alias rne = threadLocal!Random;
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import std.complex;
|
| auto c = complex(rne.rand!real, rne.rand!real);
|
| int[10] array;
| foreach (ref e; array)
| e = rne.rand!int;
| auto picked = array[rne.randIndex(array.length)];
| }
|
| private static struct TL(Engine)
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| static bool initialized;
| static if (_isVoidInitOkay!Engine)
| static Engine engine = void;
| else static if (__traits(compiles, { Engine defaultConstructed; }))
| static Engine engine;
| else
| static Engine engine = Engine.init;
|
| static if (is(ucent) && is(typeof((ucent t) => Engine(t))))
| alias seed_t = ucent;
| else static if (is(typeof((ulong t) => Engine(t))))
| alias seed_t = ulong;
| else static if (is(typeof((uint t) => Engine(t))))
| alias seed_t = uint;
| else
| alias seed_t = EngineReturnType!Engine;
|
| pragma(inline, false) // Usually called only once per thread.
| private static void reseed()
| {
0000000| engine.__ctor(unpredictableSeed!(seed_t));
0000000| initialized = true;
| }
| }
| /++
| `threadLocal!Engine` returns a reference to a thread-local instance of
| the specified random number generator allocated and seeded uniquely
| for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).
|
| `threadLocalPtr!Engine` is a pointer to the area of thread-local
| storage used by `threadLocal!Engine`. This function is provided because
| the compiler can infer it is `@safe`, unlike `&(threadLocal!Engine)`.
| Like `threadLocal!Engine` this function will auto-initialize the engine.
| $(I Do not share pointers returned by threadLocalPtr between
| threads!)
|
| `threadLocalInitialized!Engine` is a low-level way to explicitly change
| the "initialized" flag used by `threadLocal!Engine` to determine whether
| the Engine needs to be seeded. Setting this to `false` gives a way of
| forcing the next call to `threadLocal!Engine` to reseed. In general this
| is unnecessary but there are some specialized use cases where users have
| requested this ability.
| +/
| @property ref Engine threadLocal(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| import mir.utility: _expect;
0000000| if (_expect(!TL!Engine.initialized, false))
| {
0000000| TL!Engine.reseed();
| }
0000000| return TL!Engine.engine;
| }
| /// ditto
| @property Engine* threadLocalPtr(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| import mir.utility: _expect;
| if (_expect(!TL!Engine.initialized, false))
| {
| TL!Engine.reseed();
| }
| return &TL!Engine.engine;
| }
| /// ditto
| @property ref bool threadLocalInitialized(Engine)()
| if (isSaturatedRandomEngine!Engine && is(Engine == struct))
| {
| version (DigitalMars)
| pragma(inline);//DMD may fail to inline this.
| else
| pragma(inline, true);
| return TL!Engine.initialized;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
|
| alias gen = threadLocal!Xorshift1024StarPhi;
| double x = gen.rand!double;
| size_t i = gen.randIndex(100u);
| ulong a = gen.rand!ulong;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| //If you need a pointer to the engine, getting it like this is @safe:
| Random* ptr = threadLocalPtr!Random;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
| //If you need to mark the engine as uninitialized to force a reseed,
| //you can do it like this:
| threadLocalInitialized!Xorshift1024StarPhi = false;
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.mersenne_twister;
| //You can mark the engine as already initialized to skip
| //automatic seeding then initialize it yourself, for instance
| //if you want to use a known seed rather than a random one.
| threadLocalInitialized!Mt19937 = true;
| immutable uint[4] customSeed = [0x123, 0x234, 0x345, 0x456];
| threadLocal!Mt19937.__ctor(customSeed);
| foreach(_; 0..999)
| threadLocal!Mt19937.rand!uint;
| assert(3460025646u == threadLocal!Mt19937.rand!uint);
| }
| ///
| @nogc nothrow @safe version(mir_random_test) unittest
| {
| import mir.random;
| import mir.random.engine.xorshift;
|
| alias gen = threadLocal!Xorshift1024StarPhi;
|
| //If you want to you can call the generator's opCall instead of using
| //rand!T but it is somewhat clunky because of the ambiguity of
| //@property syntax: () looks like optional function parentheses.
| static assert(!__traits(compiles, {ulong x0 = gen();}));//<-- Won't work
| static assert(is(typeof(gen()) == Xorshift1024StarPhi));//<-- because the type is this.
| ulong x1 = gen.opCall();//<-- This works though.
| ulong x2 = gen()();//<-- This also works.
|
| //But instead of any of those you should really just use gen.rand!T.
| ulong x3 = gen.rand!ulong;
| }
|// ///
|// @nogc nothrow pure @safe version(mir_random_test) unittest
|// {
|// //If you want something like Phobos std.random.rndGen and
|// //don't care about the specific algorithm you can do this:
|// alias rndGen = threadLocal!Random;
|// }
|
| @nogc nothrow @system version(mir_random_test) unittest
| {
| //Verify Returns same instance every time per thread.
| import mir.random;
| import mir.random.engine.xorshift;
|
| Xorshift1024StarPhi* addr = &(threadLocal!Xorshift1024StarPhi());
| Xorshift1024StarPhi* sameAddr = &(threadLocal!Xorshift1024StarPhi());
| assert(addr is sameAddr);
| assert(sameAddr is threadLocalPtr!Xorshift1024StarPhi);
| }
|
| /++
| Sets or resets the _seed of `threadLocal!Engine` using the given arguments.
| It is not necessary to call this except if you wish to ensure the
| PRNG uses a known _seed.
| +/
| void setThreadLocalSeed(Engine, A...)(auto ref A seed)
| if (isSaturatedRandomEngine!Engine && is(Engine == struct)
| && A.length >= 1 && is(typeof((ref A a) => Engine(a))))
| {
| TL!Engine.initialized = true;
| TL!Engine.engine.__ctor(seed);
| }
| ///
| @nogc nothrow @system version(mir_random_test) unittest
| {
| import mir.random;
|
| alias rnd = threadLocal!Random;
|
| setThreadLocalSeed!Random(123);
| immutable float x = rnd.rand!float;
|
| assert(x != rnd.rand!float);
|
| setThreadLocalSeed!Random(123);
| immutable float y = rnd.rand!float;
|
| assert(x == y);
| }
|}
|else
|{
| static assert(!THREAD_LOCAL_STORAGE_AVAILABLE);
|
| @property ref Random rne()()
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocal(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocalPtr(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template threadLocalInitialized(T)
| {
| static assert(0, "Thread-local storage not available!");
| }
|
| template setThreadLocalSeed(T, A...)
| {
| static assert(0, "Thread-local storage not available!");
| }
|}
|
|version(linux)
|{
| import mir.linux._asm.unistd;
| enum bool LINUX_NR_GETRANDOM = (__traits(compiles, {enum e = NR_getrandom;}));
| //If X86_64 or X86 are missing there is a problem with the library.
| static if (!LINUX_NR_GETRANDOM)
| {
| version (X86_64)
| static assert(0, "Missing linux syscall constants!");
| version (X86)
| static assert(0, "Missing linux syscall constants!");
| }
|}
|else
| enum bool LINUX_NR_GETRANDOM = false;
|
|static if (LINUX_NR_GETRANDOM)
|{
| // getrandom was introduced in Linux 3.17
| private __gshared bool getRandomFailedENOSYS = false;
|
| private extern(C) int syscall(size_t ident, size_t n, size_t arg1, size_t arg2) @nogc nothrow;
|
| /*
| * Flags for getrandom(2)
| *
| * GRND_NONBLOCK Don't block and return EAGAIN instead
| * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
| */
| private enum GRND_NONBLOCK = 0x0001;
| private enum GRND_RANDOM = 0x0002;
|
| private enum GETRANDOM = NR_getrandom;
|
| /*
| http://man7.org/linux/man-pages/man2/getrandom.2.html
| If the urandom source has been initialized, reads of up to 256 bytes
| will always return as many bytes as requested and will not be
| interrupted by signals. No such guarantees apply for larger buffer
| sizes.
| */
| private ptrdiff_t genRandomImplSysBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| while (len > 0)
| {
0000000| auto res = syscall(GETRANDOM, cast(size_t) ptr, len, 0);
0000000| if (res >= 0)
| {
0000000| len -= res;
0000000| ptr += res;
| }
| else
| {
0000000| return res;
| }
| }
0000000| return 0;
| }
|
| /*
| * If the GRND_NONBLOCK flag is set, then
| * getrandom() does not block in these cases, but instead
| * immediately returns -1 with errno set to EAGAIN.
| */
| private ptrdiff_t genRandomImplSysNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| return syscall(GETRANDOM, cast(size_t) ptr, len, GRND_NONBLOCK);
| }
|}
|
|version(AnyARC4Random)
|extern(C) private @nogc nothrow
|{
| void arc4random_buf(scope void* buf, size_t nbytes) @system;
| uint arc4random() @trusted;
|}
|
|version(Darwin)
|{
| //On Darwin /dev/random is identical to /dev/urandom (neither blocks
| //when there is low system entropy) so there is no point mucking
| //about with file descriptors. Just use arc4random_buf for both.
|}
|else version(Posix)
|{
| import core.stdc.stdio : fclose, feof, ferror, fopen, fread;
| alias IOType = typeof(fopen("a", "b"));
| private __gshared IOType fdRandom;
| version (SecureARC4Random)
| {
| //Don't need /dev/urandom if we have arc4random_buf.
| }
| else
| private __gshared IOType fdURandom;
|
|
| /* The /dev/random device is a legacy interface which dates back to a
| time where the cryptographic primitives used in the implementation of
| /dev/urandom were not widely trusted. It will return random bytes
| only within the estimated number of bits of fresh noise in the
| entropy pool, blocking if necessary. /dev/random is suitable for
| applications that need high quality randomness, and can afford
| indeterminate delays.
|
| When the entropy pool is empty, reads from /dev/random will block
| until additional environmental noise is gathered.
| */
| private ptrdiff_t genRandomImplFileBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| if (fdRandom is null)
| {
0000000| fdRandom = fopen("/dev/random", "r");
0000000| if (fdRandom is null)
0000000| return -1;
| }
|
0000000| while (len > 0)
| {
0000000| auto res = fread(ptr, 1, len, fdRandom);
0000000| len -= res;
0000000| ptr += res;
| // check for possible permanent errors
0000000| if (len != 0)
| {
0000000| if (fdRandom.ferror)
0000000| return -1;
|
0000000| if (fdRandom.feof)
0000000| return -1;
| }
| }
|
0000000| return 0;
| }
|}
|
|version (SecureARC4Random)
|{
| //Don't need /dev/urandom if we have arc4random_buf.
|}
|else version(Posix)
|{
| /**
| When read, the /dev/urandom device returns random bytes using a
| pseudorandom number generator seeded from the entropy pool. Reads
| from this device do not block (i.e., the CPU is not yielded), but can
| incur an appreciable delay when requesting large amounts of data.
| When read during early boot time, /dev/urandom may return data prior
| to the entropy pool being initialized.
| */
| private ptrdiff_t genRandomImplFileNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system
| {
0000000| if (fdURandom is null)
| {
0000000| fdURandom = fopen("/dev/urandom", "r");
0000000| if (fdURandom is null)
0000000| return -1;
| }
|
0000000| auto res = fread(ptr, 1, len, fdURandom);
| // check for possible errors
0000000| if (res != len)
| {
0000000| if (fdURandom.ferror)
0000000| return -1;
|
0000000| if (fdURandom.feof)
0000000| return -1;
| }
0000000| return res;
| }
|}
|
|version(Windows)
|{
| // the wincrypt headers in druntime are broken for x64!
| private alias ULONG_PTR = size_t; // uint in druntime
| private alias BOOL = bool;
| private alias DWORD = uint;
| private alias LPCWSTR = wchar*;
| private alias PBYTE = ubyte*;
| private alias HCRYPTPROV = ULONG_PTR;
| private alias LPCSTR = const(char)*;
|
| private extern(Windows) BOOL CryptGenRandom(HCRYPTPROV, DWORD, PBYTE) @nogc @safe nothrow;
| private extern(Windows) BOOL CryptAcquireContextA(HCRYPTPROV*, LPCSTR, LPCSTR, DWORD, DWORD) @nogc nothrow;
| private extern(Windows) BOOL CryptAcquireContextW(HCRYPTPROV*, LPCWSTR, LPCWSTR, DWORD, DWORD) @nogc nothrow;
| private extern(Windows) BOOL CryptReleaseContext(HCRYPTPROV, ULONG_PTR) @nogc nothrow;
|
| private __gshared ULONG_PTR hProvider;
|
| private auto initGetRandom()() @nogc @trusted nothrow
| {
| import core.sys.windows.winbase : GetLastError;
| import core.sys.windows.winerror : NTE_BAD_KEYSET;
| import core.sys.windows.wincrypt : PROV_RSA_FULL, CRYPT_NEWKEYSET, CRYPT_VERIFYCONTEXT, CRYPT_SILENT;
|
| // https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886(v=vs.85).aspx
| // For performance reasons, we recommend that you set the pszContainer
| // parameter to NULL and the dwFlags parameter to CRYPT_VERIFYCONTEXT
| // in all situations where you do not require a persisted key.
| // CRYPT_SILENT is intended for use with applications for which the UI cannot be displayed by the CSP.
| if (!CryptAcquireContextW(&hProvider, null, null, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT))
| {
| if (GetLastError() == NTE_BAD_KEYSET)
| {
| // Attempt to create default container
| if (!CryptAcquireContextA(&hProvider, null, null, PROV_RSA_FULL, CRYPT_NEWKEYSET | CRYPT_SILENT))
| return 1;
| }
| else
| {
| return 1;
| }
| }
|
| return 0;
| }
|}
|
|/++
|Constructs the mir random seed generators.
|This constructor needs to be called once $(I before)
|other calls in `mir.random.engine`.
|
|Automatically called by DRuntime.
|+/
|extern(C) void mir_random_engine_ctor() @system nothrow @nogc
|{
| version(Windows)
| {
| if (hProvider == 0)
| initGetRandom;
| }
|}
|
|/++
|Destructs the mir random seed generators.
|
|Automatically called by DRuntime.
|+/
|extern(C) void mir_random_engine_dtor() @system nothrow @nogc
|{
| version(Windows)
| {
| if (hProvider > 0)
| CryptReleaseContext(hProvider, 0);
| }
| else
| version(Darwin)
| {
|
| }
| else
| version(Posix)
| {
0000000| if (fdRandom !is null)
0000000| fdRandom.fclose;
|
| version (SecureARC4Random)
| {
| //Don't need /dev/urandom if we have arc4random_buf.
| }
0000000| else if (fdURandom !is null)
0000000| fdURandom.fclose;
| }
|}
|
|
|version(D_BetterC)
|{
| pragma(crt_constructor)
| extern(C) void mir_random_engine_ctor_() @system nothrow @nogc
| {
| mir_random_engine_ctor();
| }
|
| pragma(crt_destructor)
| extern(C) void mir_random_engine_dtor_() @system nothrow @nogc
| {
| mir_random_engine_dtor();
| }
|}
|else
|{
| /// Automatically calls the extern(C) module constructor
| shared static this()
| {
1| mir_random_engine_ctor();
| }
|
| /// Automatically calls the extern(C) module destructor
| shared static ~this()
| {
0000000| mir_random_engine_dtor();
| }
|}
|
|/++
|Fills a buffer with random data.
|If not enough entropy has been gathered, it will block.
|
|Note that on Mac OS X this method will never block.
|
|Params:
| ptr = pointer to the buffer to fill
| len = length of the buffer (in bytes)
|
|Returns:
| A non-zero integer if an error occurred.
|+/
|extern(C) ptrdiff_t mir_random_genRandomBlocking(scope void* ptr , size_t len) @nogc nothrow @system
|{
| version(Windows)
| {
| static if (DWORD.max >= size_t.max)
| while(!CryptGenRandom(hProvider, len, cast(PBYTE) ptr)) {}
| else
| while (len != 0)
| {
| import mir.utility : min;
| const n = min(DWORD.max, len);
| if (CryptGenRandom(hProvider, cast(DWORD) n, cast(PBYTE) ptr))
| {
| len -= n;
| }
| }
| return 0;
| }
| else version (Darwin)
| {
| arc4random_buf(ptr, len);
| return 0;
| }
| else
| {
| static if (LINUX_NR_GETRANDOM)
0000000| if (!getRandomFailedENOSYS) // harmless data race
| {
| import core.stdc.errno;
0000000| ptrdiff_t result = genRandomImplSysBlocking(ptr, len);
0000000| if (result >= 0)
0000000| return result;
0000000| if (errno != ENOSYS)
0000000| return result;
0000000| getRandomFailedENOSYS = true; // harmless data race
| }
0000000| return genRandomImplFileBlocking(ptr, len);
| }
|}
|
|/// ditto
|alias genRandomBlocking = mir_random_genRandomBlocking;
|
|/// ditto
|ptrdiff_t genRandomBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted
|{
| pragma(inline, true);
| return mir_random_genRandomBlocking(buffer.ptr, buffer.length);
|}
|
|///
|@safe nothrow version(mir_random_test) unittest
|{
| ubyte[] buf = new ubyte[10];
| genRandomBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| ubyte[10] buf;
| genRandomBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|/++
|Fills a buffer with random data.
|If not enough entropy has been gathered, it won't block.
|Hence the error code should be inspected.
|
|On Linux >= 3.17 genRandomNonBlocking is guaranteed to succeed for 256 bytes and
|fewer.
|
|On Mac OS X, OpenBSD, and NetBSD genRandomNonBlocking is guaranteed to
|succeed for any number of bytes.
|
|Params:
| buffer = the buffer to fill
| len = length of the buffer (in bytes)
|
|Returns:
| The number of bytes filled - a negative number if an error occurred
|+/
|extern(C) size_t mir_random_genRandomNonBlocking(scope void* ptr, size_t len) @nogc nothrow @system
|{
| version(Windows)
| {
| static if (DWORD.max < size_t.max)
| if (len > DWORD.max)
| len = DWORD.max;
| if (!CryptGenRandom(hProvider, cast(DWORD) len, cast(PBYTE) ptr))
| return -1;
| return len;
| }
| else version(SecureARC4Random)
| {
| arc4random_buf(ptr, len);
| return len;
| }
| else
| {
| static if (LINUX_NR_GETRANDOM)
0000000| if (!getRandomFailedENOSYS) // harmless data race
| {
| import core.stdc.errno;
0000000| ptrdiff_t result = genRandomImplSysNonBlocking(ptr, len);
0000000| if (result >= 0)
0000000| return result;
0000000| if (errno != ENOSYS)
0000000| return result;
0000000| getRandomFailedENOSYS = true; // harmless data race
| }
0000000| return genRandomImplFileNonBlocking(ptr, len);
| }
|}
|/// ditto
|alias genRandomNonBlocking = mir_random_genRandomNonBlocking;
|/// ditto
|size_t genRandomNonBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted
|{
| pragma(inline, true);
| return mir_random_genRandomNonBlocking(buffer.ptr, buffer.length);
|}
|
|///
|@safe nothrow version(mir_random_test) unittest
|{
| ubyte[] buf = new ubyte[10];
| genRandomNonBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
|
|@nogc nothrow @safe
|version(mir_random_test) unittest
|{
| ubyte[10] buf;
| genRandomNonBlocking(buf);
|
| int sum;
| foreach (b; buf)
| sum += b;
|
| assert(sum > 0, "Only zero points generated");
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/engine/package.d is 1% covered
<<<<<< EOF
# path=./source-mir-sparse-blas-package.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas;
|
|public import mir.sparse.blas.dot;
|public import mir.sparse.blas.axpy;
|public import mir.sparse.blas.gemv;
|public import mir.sparse.blas.gemm;
source/mir/sparse/blas/package.d has no code
<<<<<< EOF
# path=./source-mir-sparse-package.lst
|/++
|$(H2 Sparse Tensors)
|
|License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|+/
|module mir.sparse;
|
|import std.traits;
|import std.meta;
|
|import mir.ndslice.slice;
|public import mir.ndslice.field: SparseField;
|public import mir.ndslice.iterator: ChopIterator, FieldIterator;
|public import mir.series: isSeries, Series, mir_series, series;
|public import mir.ndslice.slice: CoordinateValue, Slice, mir_slice;
|public import mir.ndslice.topology: chopped;
|
|//TODO: replace with `static foreach`
|private template Iota(size_t i, size_t j)
|{
| static assert(i <= j, "Iota: i should be less than or equal to j");
| static if (i == j)
| alias Iota = AliasSeq!();
| else
| alias Iota = AliasSeq!(i, Iota!(i + 1, j));
|}
|
|/++
|Sparse tensors represented in Dictionary of Keys (DOK) format.
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
|Returns:
| `N`-dimensional slice composed of indeces
|See_also: $(LREF Sparse)
|+/
|Sparse!(T, N) sparse(T, size_t N)(size_t[N] lengths...)
|{
12| T[size_t] table;
12| table[0] = 0;
12| table.remove(0);
12| assert(table !is null);
12| with (typeof(return)) return FieldIterator!(SparseField!T)(0, SparseField!T(table)).sliced(lengths);
|}
|
|///
|pure unittest
|{
1| auto slice = sparse!double(2, 3);
1| slice[0][] = 1;
1| slice[0, 1] = 2;
1| --slice[0, 0];
1| slice[1, 2] += 4;
|
1| assert(slice == [[0, 2, 1], [0, 0, 4]]);
|
| import std.range.primitives: isRandomAccessRange;
| static assert(isRandomAccessRange!(Sparse!(double, 2)));
|
| import mir.ndslice.slice: Slice, DeepElementType;
| static assert(is(Sparse!(double, 2) : Slice!(FieldIterator!(SparseField!double), 2)));
| static assert(is(DeepElementType!(Sparse!(double, 2)) == double));
|}
|
|/++
|Returns unsorted forward range of (coordinate, value) pairs.
|
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto byCoordinateValue(size_t N, T)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByCoordinateValue
| {
| private sizediff_t[N-1] _strides;
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| auto iv = _range.front;
5| size_t index = iv.key;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| CoordinateValue!(T, N) ret;
| foreach (i; Iota!(0, N - 1))
| {
5| ret.index[i] = index / _strides[i];
5| index %= _strides[i];
| }
5| ret.index[N - 1] = index;
5| ret.value = iv.value;
5| return ret;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByCoordinateValue(slice.strides[0..N-1], length, l, r, slice._iterator._field._table.byKeyValue);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
| alias CV = CoordinateValue!(double, 2);
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.byCoordinateValue.array.sort() == [
| CV([0, 1], 2),
| CV([0, 2], 1),
| CV([1, 2], 4),
| CV([2, 0], 6),
| CV([2, 1], 7)]);
|}
|
|/++
|Returns unsorted forward range of coordinates.
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto byCoordinate(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByCoordinate
| {
| private sizediff_t[N-1] _strides;
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKey()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| size_t index = _range.front;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| size_t[N] ret;
| foreach (i; Iota!(0, N - 1))
| {
5| ret[i] = index / _strides[i];
5| index %= _strides[i];
| }
5| ret[N - 1] = index;
5| return ret;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByCoordinate(slice.strides[0 .. N - 1], length, l, r, slice._iterator._field._table.byKey);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.byCoordinate.array.sort() == [
| [0, 1],
| [0, 2],
| [1, 2],
| [2, 0],
| [2, 1]]);
|}
|
|/++
|Returns unsorted forward range of values.
|Params:
| slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed.
|+/
|auto onlyByValue(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice)
|{
| struct ByValue
| {
| mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue()));
|
| auto front() @property
| {S:
5| assert(!_range.empty);
5| auto iv = _range.front;
5| size_t index = iv.key;
10| if (!(_l <= index && index < _r))
| {
0000000| _range.popFront;
0000000| goto S;
| }
5| return iv.value;
| }
| }
1| size_t l = slice._iterator._index;
1| size_t r = l + slice.elementCount;
1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r);
1| return ByValue(length, l, r, slice._iterator._field._table.byKeyValue);
|}
|
|///
|pure unittest
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
|
1| auto slice = sparse!double(3, 3);
1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]];
1| assert(slice.onlyByValue.array.sort() == [1, 2, 4, 6, 7]);
|}
|
|pragma(inline, false)
|private size_t countInInterval(Range)(Range range, size_t l, size_t r)
|{
3| size_t count;
51| foreach(ref i; range)
30| if (l <= i && i < r)
15| count++;
3| return count;
|}
|
|private mixin template _sparse_range_methods(Range)
|{
| private size_t _length, _l, _r;
| private Range _range;
|
| void popFront()
| {
15| assert(!_range.empty);
15| _range.popFront;
15| _length--;
| }
|
| bool empty() const @property
| {
0000000| return _length == 0;
| }
|
| auto save() @property
| {
0000000| auto ret = this;
0000000| ret._range = ret._range.save;
0000000| return ret;
| }
|
| size_t length() const @property
| {
3| return _length;
| }
|}
|
|/++
|Returns compressed tensor.
|Note: allocates using GC.
|+/
|auto compress(I = uint, J = size_t, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) slice)
| if (N > 1)
|{
8| return compressWithType!(DeepElementType!(Slice!(Iterator, N, kind)), I, J)(slice);
|}
|
|/// Sparse tensor compression
|unittest
|{
1| auto sparse = sparse!double(5, 3);
1| sparse[] =
| [[0, 2, 1],
| [0, 0, 4],
| [0, 0, 0],
| [6, 0, 9],
| [0, 0, 5]];
|
1| auto crs = sparse.compressWithType!double;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 3,
| // [2, 1, 4, 6, 9, 5],
| // [1, 2, 2, 0, 2, 2],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Sparse tensor compression
|unittest
|{
1| auto sparse = sparse!double(5, 8);
1| sparse[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sparse.compressWithType!double;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Dense tensor compression
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 3);
1| sl[] =
| [[0, 2, 1],
| [0, 0, 4],
| [0, 0, 0],
| [6, 0, 9],
| [0, 0, 5]];
|
1| auto crs = sl.compressWithType!double;
|
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 3,
| // [2, 1, 4, 6, 9, 5],
| // [1, 2, 2, 0, 2, 2],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/// Dense tensor compression
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 8);
1| sl[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sl.compress;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|}
|
|/++
|Returns compressed tensor with different element type.
|Note: allocates using GC.
|+/
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1)
| compressWithType(V, I = uint, J = size_t, T, size_t N)
| (Slice!(FieldIterator!(SparseField!T), N) slice)
| if (is(T : V) && N > 1 && isUnsigned!I)
|{
| import mir.array.allocation: array;
| import mir.ndslice.sorting: sort;
| import mir.ndslice.topology: iota;
8| auto compressedData = slice
| .iterator
| ._field
| ._table
| .series!(size_t, T, I, V);
8| auto pointers = new J[slice.shape[0 .. N - 1].iota.elementCount + 1];
16| size_t k = 1, shift;
8| pointers[0] = 0;
8| pointers[1] = 0;
8| const rowLength = slice.length!(N - 1);
233| if(rowLength) foreach (ref index; compressedData.index.field)
| {
| for(;;)
| {
90| sizediff_t newIndex = index - shift;
90| if (newIndex >= rowLength)
| {
23| pointers[k + 1] = pointers[k];
23| shift += rowLength;
23| k++;
23| continue;
| }
67| index = cast(I)newIndex;
67| pointers[k] = cast(J) (pointers[k] + 1);
67| break;
| }
|
| }
8| pointers[k + 1 .. $] = pointers[k];
8| return compressedData.chopped(pointers);
|}
|
|
|/// ditto
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1)
| compressWithType(V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (!is(Iterator : FieldIterator!(SparseField!ST), ST) && is(DeepElementType!(Slice!(Iterator, N, kind)) : V) && N > 1 && isUnsigned!I)
|{
| import std.array: appender;
| import mir.ndslice.topology: pack, flattened;
4| auto vapp = appender!(V[]);
4| auto iapp = appender!(I[]);
4| auto psl = slice.pack!1;
4| auto count = psl.elementCount;
4| auto pointers = new J[count + 1];
|
4| pointers[0] = 0;
4| auto elems = psl.flattened;
4| size_t j = 0;
72| foreach (ref pointer; pointers[1 .. $])
| {
20| auto row = elems.front;
20| elems.popFront;
20| size_t i;
445| foreach (e; row)
| {
135| if (e)
| {
24| vapp.put(e);
24| iapp.put(cast(I)i);
24| j++;
| }
135| i++;
| }
20| pointer = cast(J)j;
| }
4| return iapp.data.series(vapp.data).chopped(pointers);
|}
|
|
|/++
|Re-compresses a compressed tensor. Makes all values, indeces and pointers consequent in memory.
|
|Sparse slice is iterated twice. The first tine it is iterated to get length of each sparse row, the second time - to copy the data.
|
|Note: allocates using GC.
|+/
|Slice!(ChopIterator!(J*, Series!(I*, V*)), N)
| recompress
| (V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) sparseSlice)
| if (isSeries!(DeepElementType!(Slice!(Iterator, N, kind))))
|{
| import mir.algorithm.iteration: each;
| import mir.conv: to, emplaceRef;
| import mir.ndslice.allocation: uninitSlice;
| import mir.ndslice.topology: pack, flattened, as, member, zip;
|
1| size_t count = sparseSlice.elementCount;
1| size_t length;
1| auto pointers = uninitSlice!J(count + 1);
1| pointers.front = 0;
1| sparseSlice
| .member!"data"
| .member!"elementCount"
5| .each!((len, ref ptr) {ptr = length += len;})(pointers[1 .. $]);
|
1| auto i = uninitSlice!I(length);
1| auto v = uninitSlice!V(length);
|
1| auto ret = i.series(v).chopped(pointers);
|
1| sparseSlice
| .each!((a, b) {
5| b.index[] = a.index.as!I;
5| b.value.each!(emplaceRef!V)(a.value.as!V);
| })(ret);
|
1| return ret;
|}
|
|///
|unittest
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.allocation: slice;
|
1| auto sl = slice!double(5, 8);
1| sl[] =
| [[0, 2, 0, 0, 0, 0, 0, 1],
| [0, 0, 0, 0, 0, 0, 0, 4],
| [0, 0, 0, 0, 0, 0, 0, 0],
| [6, 0, 0, 0, 0, 0, 0, 9],
| [0, 0, 0, 0, 0, 0, 0, 5]];
|
1| auto crs = sl.compress;
| // assert(crs.iterator._field == CompressedField!(double, uint, uint)(
| // 8,
| // [2, 1, 4, 6, 9, 5],
| // [1, 7, 7, 0, 7, 7],
| // [0, 2, 3, 3, 5, 6]));
|
| import mir.ndslice.dynamic: reversed;
1| auto rec = crs.reversed.recompress!real;
1| auto rev = sl.universal.reversed.compressWithType!real;
1| assert(rev.structure == rec.structure);
| // assert(rev.iterator._field.values == rec.iterator._field.values);
| // assert(rev.iterator._field.indeces == rec.iterator._field.indeces);
| // assert(rev.iterator._field.pointers == rec.iterator._field.pointers);
|}
|
|/++
|Sparse Slice in Dictionary of Keys (DOK) format.
|+/
|alias Sparse(T, size_t N = 1) = Slice!(FieldIterator!(SparseField!T), N);
|
|///
|alias CompressedVector(T, I = uint) = Series!(T*, I*);
|
|///
|alias CompressedMatrix(T, I = uint) = Slice!(ChopIterator!(J*, Series!(T*, I*)));
|
|///
|alias CompressedTensor(T, size_t N, I = uint, J = size_t) = Slice!(ChopIterator!(J*, Series!(T*, I*)), N - 1);
|
|///ditto
|alias CompressedTensor(T, size_t N : 1, I = uint) = Series!(I*, T*);
source/mir/sparse/package.d is 93% covered
<<<<<< EOF
# path=./source-mir-model-lda-hoffman.lst
|/**
|
|$(H3 Online variational Bayes for latent Dirichlet allocation)
|
|References:
| Hoffman, Matthew D., Blei, David M. and Bach, Francis R..
| "Online Learning for Latent Dirichlet Allocation.."
| Paper presented at the meeting of the NIPS, 2010.
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|*/
|module mir.model.lda.hoffman;
|
|import std.traits;
|
|/++
|Batch variational Bayes for LDA with mini-batches.
|+/
|struct LdaHoffman(F)
| if (isFloatingPoint!F)
|{
| import std.parallelism;
| import mir.ndslice.iterator: FieldIterator;
| import mir.ndslice.topology: iota;
|
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| import mir.math.common;
| import mir.sparse;
|
| private alias Vector = Slice!(F*);
| private alias Matrix = Slice!(F*, 2);
|
| private size_t D;
| private F alpha;
| private F eta;
| private F kappa;
| private F _tau;
| private F eps;
|
| private Matrix _lambda; // [k, w]
| private Matrix _beta; // [k, w]
|
| private TaskPool tp;
|
| private F[][] _lambdaTemp;
|
| @disable this();
| @disable this(this);
|
| /++
| Params:
| K = theme count
| W = dictionary size
| D = approximate total number of documents in a collection.
| alpha = Dirichlet document-topic prior (0.1)
| eta = Dirichlet word-topic prior (0.1)
| tau0 = tau0 ≧ 0 slows down the early iterations of the algorithm.
| kappa = `kappa belongs to $(LPAREN)0.5, 1]`, controls the rate at which old values of lambda are forgotten.
| `lambda = (1 - rho(tau)) lambda + rho lambda', rho(tau) = (tau0 + tau)^(-kappa)`. Use `kappa = 0` for Batch variational Bayes LDA.
| eps = Stop iterations if `||lambda - lambda'||_l1 < s * eps`, where `s` is a documents count in a batch.
| tp = task pool
| +/
0000000| this(size_t K, size_t W, size_t D, F alpha, F eta, F tau0, F kappa, F eps = 1e-5, TaskPool tp = taskPool())
| {
| import mir.random;
|
0000000| this.D = D;
0000000| this.alpha = alpha;
0000000| this.eta = eta;
0000000| this._tau = tau0;
0000000| this.kappa = kappa;
0000000| this.eps = eps;
0000000| this.tp = tp;
|
0000000| _lambda = slice!F(K, W);
0000000| _beta = slice!F(K, W);
0000000| _lambdaTemp = new F[][](tp.size + 1, W);
|
| import std.math: fabs;
0000000| auto gen = Random(unpredictableSeed);
0000000| foreach (r; _lambda)
0000000| foreach (ref e; r)
0000000| e = (gen.rand!F.fabs + 0.9) / 1.901;
|
0000000| updateBeta();
| }
|
| ///
| void updateBeta()
| {
0000000| foreach (i; tp.parallel(lambda.length.iota))
0000000| unparameterize(lambda[i], beta[i]);
| }
|
| /++
| Posterior over the topics
| +/
| Slice!(F*, 2) beta() @property
| {
0000000| return _beta;
| }
|
| /++
| Parameterized posterior over the topics.
| +/
| Slice!(F*, 2) lambda() @property
| {
0000000| return _lambda;
| }
|
| /++
| Count of already seen documents.
| Slows down the iterations of the algorithm.
| +/
| F tau() const @property
| {
0000000| return _tau;
| }
|
| /// ditto
| void tau(F v) @property
| {
0000000| _tau = v;
| }
|
| /++
| Accepts mini-batch and performs multiple E-step iterations for each document and single M-step.
|
| This implementation is optimized for sparse documents,
| which contain much less unique words than a dictionary.
|
| Params:
| n = mini-batch, a collection of compressed documents.
| maxIterations = maximal number of iterations for s This implementation is optimized for sparse documents,
|ingle document in a batch for E-step.
| +/
| size_t putBatch(SliceKind kind, C, I, J)(Slice!(ChopIterator!(J*, Series!(I*, C*)), 1, kind) n, size_t maxIterations)
| {
| return putBatchImpl(n.recompress!F, maxIterations);
| }
|
| private size_t putBatchImpl(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n, size_t maxIterations)
| {
| import std.math: isFinite;
| import mir.sparse.blas.dot;
| import mir.sparse.blas.gemv;
| import mir.ndslice.dynamic: transposed;
| import mir.ndslice.topology: universal;
| import mir.internal.utility;
|
0000000| immutable S = n.length;
0000000| immutable K = _lambda.length!0;
0000000| immutable W = _lambda.length!1;
0000000| _tau += S;
0000000| auto theta = slice!F(S, K);
0000000| auto nsave = saveN(n);
|
0000000| immutable rho = pow!F(F(tau), -kappa);
0000000| auto thetat = theta.universal.transposed;
0000000| auto _gamma = slice!F(tp.size + 1, K);
0000000| shared size_t ret;
| // E step
0000000| foreach (d; tp.parallel(S.iota))
| {
0000000| auto gamma = _gamma[tp.workerIndex];
0000000| gamma[] = 1;
0000000| auto nd = n[d];
0000000| auto thetad = theta[d];
0000000| for (size_t c; ;c++)
| {
0000000| unparameterize(gamma, thetad);
|
0000000| selectiveGemv!"/"(_beta.universal.transposed, thetad, nd);
0000000| F sum = 0;
| {
0000000| auto beta = _beta;
0000000| auto th = thetad;
0000000| foreach (ref g; gamma)
| {
0000000| if (!th.front.isFinite)
0000000| th.front = F.max;
0000000| auto value = dot(nd, beta.front) * th.front + alpha;
0000000| sum += fabs(value - g);
0000000| g = value;
0000000| beta.popFront;
0000000| th.popFront;
| }
| }
0000000| if (c < maxIterations && sum > eps * K)
| {
0000000| nd.value[] = nsave[d].value;
0000000| continue;
| }
| import core.atomic;
0000000| ret.atomicOp!"+="(c);
0000000| break;
| }
| }
| // M step
0000000| foreach (k; tp.parallel(K.iota))
| {
0000000| auto lambdaTemp = _lambdaTemp[tp.workerIndex];
0000000| gemtv!F(F(1), n, thetat[k], F(0), lambdaTemp.sliced);
| import mir.algorithm.iteration: each;
0000000| each!((ref l, bk, lt) {l = (1 - rho) * l +
| rho * (eta + (F(D) / F(S)) * bk * lt);})(_lambda[k], _beta[k],lambdaTemp.sliced);
0000000| unparameterize(_lambda[k], _beta[k]);
| }
0000000| return ret;
| }
|
| private auto saveN(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n)
| {
| import mir.series: series;
| import mir.ndslice.topology: chopped, universal;
0000000| return n.iterator._sliceable.index
| .series(n.iterator._sliceable.value.dup)
| .chopped(n.iterator._iterator.sliced(n.length + 1));
| }
|
| private static void unparameterize(Vector param, Vector posterior)
| {
0000000| assert(param.structure == posterior.structure);
| import mir.ndslice.topology: zip;
| import mir.math.func.expdigamma;
| import mir.math.sum: sum;
0000000| immutable c = 1 / expDigamma(sum(param));
0000000| foreach (e; zip(param, posterior))
0000000| e.b = c * expDigamma(e.a);
| }
|}
|
|unittest
|{
| alias ff = LdaHoffman!double;
|}
source/mir/model/lda/hoffman.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-iterator.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Iterator is a type with a pointer like behavior.
|An ndslice can be created on top of an iterator using $(SUBREF slice, sliced).
|
|$(BOOKTABLE $(H2 Iterators),
|$(TR $(TH Iterator Name) $(TH Used By))
|$(T2 BytegroupIterator, $(SUBREF topology, bytegroup).)
|$(T2 CachedIterator, $(SUBREF topology, cached), $(SUBREF topology, cachedGC).)
|$(T2 ChopIterator, $(SUBREF topology, chopped))
|$(T2 FieldIterator, $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.)
|$(T2 FlattenedIterator, $(SUBREF topology, flattened))
|$(T2 IndexIterator, $(SUBREF topology, indexed))
|$(T2 IotaIterator, $(SUBREF topology, iota))
|$(T2 MapIterator, $(SUBREF topology, map))
|$(T2 MemberIterator, $(SUBREF topology, member))
|$(T2 NeighboursIterator, $(SUBREF topology, withNeighboursSum))
|$(T2 RetroIterator, $(SUBREF topology, retro))
|$(T2 SliceIterator, $(SUBREF topology, map) in composition with $(LREF MapIterator) for packed slices.)
|$(T2 SlideIterator, $(SUBREF topology, diff), $(SUBREF topology, pairwise), and $(SUBREF topology, slide).)
|$(T2 StairsIterator, $(SUBREF topology, stairs))
|$(T2 StrideIterator, $(SUBREF topology, stride))
|$(T2 SubSliceIterator, $(SUBREF topology, subSlices))
|$(T2 TripletIterator, $(SUBREF topology, triplets))
|$(T2 ZipIterator, $(SUBREF topology, zip))
|)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.iterator;
|
|import mir.internal.utility: Iota;
|import mir.math.common: optmath;
|import mir.ndslice.field;
|import mir.ndslice.internal;
|import mir.ndslice.slice: SliceKind, Slice, Universal, Canonical, Contiguous, isSlice;
|import mir.qualifier;
|import mir.conv;
|import std.traits;
|
|private static immutable assumeZeroShiftExceptionMsg = "*.assumeFieldsHaveZeroShift: shift is not zero!";
|version(D_Exceptions)
| private static immutable assumeZeroShiftException = new Exception(assumeZeroShiftExceptionMsg);
|
|@optmath:
|
|enum std_ops = q{
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin(op ~ "_iterator;"); }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|};
|
|/++
|Step counter.
|
|`IotaIterator` is used by $(SUBREF topology, iota).
|+/
|struct IotaIterator(I)
| if (isIntegral!I || isPointer!I)
|{
|@optmath:
|
| ///
| I _index;
|
| static if (isPointer!I)
| ///
| auto lightConst()() const @property
| {
| static if (isIntegral!I)
| return IotaIterator!I(_index);
| else
| return IotaIterator!(LightConstOf!I)(_index);
| }
|
| static if (isPointer!I)
| ///
| auto lightImmutable()() immutable @property
| {
| static if (isIntegral!I)
| return IotaIterator!I(_index);
| else
| return IotaIterator!(LightImmutableOf!I)(_index);
| }
|
|pure:
|
| I opUnary(string op : "*")()
0000000| { return _index; }
|
| void opUnary(string op)()
| if (op == "--" || op == "++")
| { mixin(op ~ `_index;`); }
|
| I opIndex()(ptrdiff_t index) const
0000000| { return cast(I)(_index + index); }
|
| void opOpAssign(string op)(ptrdiff_t index)
| if (op == `+` || op == `-`)
| { mixin(`_index ` ~ op ~ `= index;`); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(const typeof(this) right) const
| { return cast(ptrdiff_t)(this._index - right._index); }
|
| bool opEquals()(const typeof(this) right) const
0000000| { return this._index == right._index; }
|
| auto opCmp()(const typeof(this) right) const
0000000| { return this._index - right._index; }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| assert(*iota == 0);
|
| // iteration
| ++iota;
| assert(*iota == 1);
|
| assert(iota[2] == 3);
| assert(iota[-1] == 0);
|
| --iota;
| assert(*iota == 0);
|
| // opBinary
| assert(*(iota + 2) == 2);
| assert(*(iota - 3) == -3);
| assert((iota - 3) - iota == -3);
|
| // construction
| assert(*IotaIterator!int(3) == 3);
| assert(iota - 1 < iota);
|}
|
|///
|pure nothrow @nogc version(mir_test) unittest
|{
| int[32] data;
| auto iota = IotaIterator!(int*)(data.ptr);
| assert(*iota == data.ptr);
|
| // iteration
| ++iota;
| assert(*iota == 1 + data.ptr);
|
| assert(iota[2] == 3 + data.ptr);
| assert(iota[-1] == 0 + data.ptr);
|
| --iota;
| assert(*iota == 0 + data.ptr);
|
| // opBinary
| assert(*(iota + 2) == 2 + data.ptr);
| assert(*(iota - 3) == -3 + data.ptr);
| assert((iota - 3) - iota == -3);
|
| // construction
| assert(*IotaIterator!(int*)(data.ptr) == data.ptr);
| assert(iota - 1 < iota);
|}
|
|auto RetroIterator__map(Iterator, alias fun)(ref RetroIterator!Iterator it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return RetroIterator!(typeof(iterator))(iterator);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota(9).retro.map!(a => a).slice;
| uint r;
| auto w = iota(9).retro.map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Reverse directions for an iterator.
|
|`RetroIterator` is used by $(SUBREF topology, retro).
|+/
|struct RetroIterator(Iterator)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return RetroIterator!(LightConstOf!Iterator)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return RetroIterator!(LightImmutableOf!Iterator)(.lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = RetroIterator__map!(Iterator, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op : "--")()
| { ++_iterator; }
|
| void opUnary(string op : "++")() pure
| { --_iterator; }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[-index]; }
|
| void opOpAssign(string op : "-")(ptrdiff_t index) scope
| { _iterator += index; }
|
| void opOpAssign(string op : "+")(ptrdiff_t index) scope
| { _iterator -= index; }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return right._iterator - this._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return right._iterator == this._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return right._iterator - this._iterator;
| else
| return right._iterator.opCmp(this._iterator);
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| RetroIterator!(IotaIterator!int) retro;
|
| ++iota;
| --retro;
| assert(*retro == *iota);
|
| --iota;
| ++retro;
| assert(*retro == *iota);
|
| assert(retro[-7] == iota[7]);
|
| iota += 100;
| retro -= 100;
| assert(*retro == *iota);
|
| iota -= 100;
| retro += 100;
| assert(*retro == *iota);
|
| assert(*(retro + 10) == *(iota - 10));
|
| assert(retro - 1 < retro);
|
| assert((retro - 5) - retro == -5);
|
| iota = IotaIterator!int(3);
| retro = RetroIterator!(IotaIterator!int)(iota);
| assert(*retro == *iota);
|}
|
|auto StrideIterator__map(Iterator, alias fun)(StrideIterator!Iterator it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return StrideIterator!(typeof(iterator))(it._stride, iterator);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota([3], 0, 3).map!(a => a).slice;
| uint r;
| auto w = iota([3], 0, 3).map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Iterates an iterator with a fixed strides.
|
|`StrideIterator` is used by $(SUBREF topology, stride).
|+/
|struct StrideIterator(Iterator)
|{
|@optmath:
| ///
| ptrdiff_t _stride;
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StrideIterator!(LightConstOf!Iterator)(_stride, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StrideIterator!(LightImmutableOf!Iterator)(_stride, .lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = StrideIterator__map!(Iterator, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= _stride;"); }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[index * _stride]; }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * _stride;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / _stride; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| ptrdiff_t ret = this._iterator - right._iterator;
| else
| ptrdiff_t ret = this._iterator.opCmp(right._iterator);
| return _stride >= 0 ? ret : -ret;
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| StrideIterator!(IotaIterator!int) stride;
| stride._stride = -3;
|
| iota -= stride._stride;
| --stride;
| assert(*stride == *iota);
|
| iota += stride._stride;
| ++stride;
| assert(*stride == *iota);
|
| assert(stride[7] == iota[7 * stride._stride]);
|
| iota -= 100 * stride._stride;
| stride -= 100;
| assert(*stride == *iota);
|
| iota += 100 * stride._stride;
| stride += 100;
| assert(*stride == *iota);
|
| assert(*(stride + 10) == *(iota + 10 * stride._stride));
|
| assert(stride - 1 < stride);
|
| assert((stride - 5) - stride == -5);
|
| iota = IotaIterator!int(3);
| stride = StrideIterator!(IotaIterator!int)(3, iota);
| assert(*stride == *iota);
|}
|
|auto StrideIterator__map(Iterator, size_t factor, alias fun)(StrideIterator!(Iterator, factor) it)
|{
| auto iterator = it._iterator._mapIterator!fun;
| return StrideIterator!(typeof(iterator), factor)(iterator);
|}
|
|/++
|Iterates an iterator with a fixed strides.
|
|`StrideIterator` is used by $(SUBREF topology, stride).
|+/
|struct StrideIterator(Iterator, ptrdiff_t factor)
|{
|@optmath:
| ///
| enum _stride = factor;
|
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StrideIterator!(LightConstOf!Iterator, _stride)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StrideIterator!(LightImmutableOf!Iterator, _stride)(.lightImmutable(_iterator));
| }
|
| ///
| static alias __map(alias fun) = StrideIterator__map!(Iterator, _stride, fun);
|
| auto ref opUnary(string op : "*")()
| { return *_iterator; }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= _stride;"); }
|
| auto ref opIndex()(ptrdiff_t index)
| { return _iterator[index * _stride]; }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * _stride;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / _stride; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| ptrdiff_t ret = this._iterator - right._iterator;
| else
| ptrdiff_t ret = this._iterator.opCmp(right._iterator);
| return _stride >= 0 ? ret : -ret;
| }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| IotaIterator!int iota;
| StrideIterator!(IotaIterator!int, -3) stride;
|
| iota -= stride._stride;
| --stride;
| assert(*stride == *iota);
|
| iota += stride._stride;
| ++stride;
| assert(*stride == *iota);
|
| assert(stride[7] == iota[7 * stride._stride]);
|
| iota -= 100 * stride._stride;
| stride -= 100;
| assert(*stride == *iota);
|
| iota += 100 * stride._stride;
| stride += 100;
| assert(*stride == *iota);
|
| assert(*(stride + 10) == *(iota + 10 * stride._stride));
|
| assert(stride - 1 < stride);
|
| assert((stride - 5) - stride == -5);
|}
|
|package template _zip_types(Iterators...)
|{
| alias AliasSeq(T...) = T;
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| alias T = typeof(Iterators[i].init[sizediff_t.init]);
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| {
| import mir.functional: Ref;
| alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), Ref!T);
| }
| else
| alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), T);
| }
| else
| alias _zip_types = AliasSeq!();
|}
|
|package template _zip_fronts(Iterators...)
|{
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "_ref(*_iterators[" ~ i.stringof ~ "]), ";
| else
| enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "*_iterators[" ~ i.stringof ~ "], ";
| }
| else
| enum _zip_fronts = "";
|}
|
|package template _zip_index(Iterators...)
|{
| static if (Iterators.length)
| {
| enum i = Iterators.length - 1;
| static if (__traits(compiles, &Iterators[i].init[sizediff_t.init]))
| enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_ref(_iterators[" ~ i.stringof ~ "][index]), ";
| else
| enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_iterators[" ~ i.stringof ~ "][index], ";
| }
| else
| enum _zip_index = "";
|}
|
|/++
|Iterates multiple iterators in lockstep.
|
|`ZipIterator` is used by $(SUBREF topology, zip).
|+/
|struct ZipIterator(Iterators...)
| if (Iterators.length > 1)
|{
|@optmath:
| import std.traits: ConstOf, ImmutableOf;
| import std.meta: staticMap;
| import mir.functional: RefTuple, Ref, _ref;
| ///
| Iterators _iterators;
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| alias Ret = ZipIterator!(staticMap!(LightConstOf, Iterators));
| enum ret = "Ret(%(.lightConst(_iterators[%s]),%)]))".format(_iterators.length.iota);
| return mixin(ret);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| alias Ret = ZipIterator!(staticMap!(LightImmutableOf, Iterators));
| enum ret = "Ret(%(.lightImmutable(_iterators[%s]),%)]))".format(_iterators.length.iota);
| return mixin(ret);
| }
|
| auto opUnary(string op : "*")()
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
|
| auto opUnary(string op : "*")() const
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
| auto opUnary(string op : "*")() immutable
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); }
|
| void opUnary(string op)() scope
| if (op == "++" || op == "--")
| {
| foreach (ref _iterator; _iterators)
| mixin(op ~ `_iterator;`);
| }
|
| auto opIndex()(ptrdiff_t index)
| { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_index!Iterators ~ ")"); }
|
| auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index)
| if (Types.length == Iterators.length)
| {
| foreach(i, ref val; value.expand)
| {
| import mir.functional: unref;
| _iterators[i][index] = unref(val);
| }
| return opIndex(index);
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "+" || op == "-")
| {
| foreach (ref _iterator; _iterators)
| mixin(`_iterator ` ~ op ~ `= index;`);
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterators[0] - right._iterators[0]; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterators[0] == right._iterators[0]; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!(Iterators[0]))
| return this._iterators[0] - right._iterators[0];
| else
| return this._iterators[0].opCmp(right._iterators[0]);
| }
|
| import std.meta: anySatisfy;
| static if (anySatisfy!(hasZeroShiftFieldMember, Iterators))
| /// Defined if at least one of `Iterators` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| import std.meta: staticMap;
| alias _fields = _iterators;
| return mixin("ZipField!(staticMap!(ZeroShiftField, Iterators))(" ~ applyAssumeZeroShift!Iterators ~ ")");
| }
|}
|
|///
|pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.traits: isIterator;
|
| double[10] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
| alias ItA = IotaIterator!int;
| alias ItB = double*;
| alias ItZ = ZipIterator!(ItA, ItB);
| auto zip = ItZ(ItA(3), data.ptr);
| assert((*zip).a == 3);
| assert((*zip).b == 1);
|
| // iteration
| ++zip;
| assert((*zip).a == 3 + 1);
| assert((*zip).b == 1 + 1);
| assert(&(*zip).b() == data.ptr + 1);
|
| assert(zip[4].a == 3 + 5);
| assert(zip[4].b == 1 + 5);
| assert(&zip[4].b() == data.ptr + 5);
|
| --zip;
| assert((*zip).a == 3);
| assert((*zip).b == 1);
|
| assert((*(zip + 2)).a == 3 + 2);
| assert((*(zip - 3)).a == 3 + -3);
| assert((*(zip + 2)).b == 1 + 2);
| assert((*(zip + 3 - 3)).b == 1);
| assert((zip - 3).opBinary!"-"(zip) == -3);
|
| assert(zip == zip);
| assert(zip - 1 < zip);
|
| static assert(isIterator!(ZipIterator!(double*, int*)));
| static assert(isIterator!(ZipIterator!(immutable(double)*, immutable(int)*)));
|}
|
|///
|struct CachedIterator(Iterator, CacheIterator, FlagIterator)
|{
| ///
| Iterator _iterator;
| ///
| CacheIterator _caches;
| ///
| FlagIterator _flags;
|
|@optmath:
|
| ///
| auto lightScope()() scope @property
| {
| return CachedIterator!(LightScopeOf!Iterator, LightScopeOf!CacheIterator, LightScopeOf!FlagIterator)(
| .lightScope(_iterator),
| .lightScope(_caches),
| .lightScope(_flags),
| );
| }
|
| ///
| auto lightScope()() scope const @property
| {
| return lightConst.lightScope;
| }
|
| ///
| auto lightScope()() scope immutable @property
| {
| return lightImmutable.lightScope;
| }
|
| ///
| auto lightConst()() const @property
| {
| return CachedIterator!(LightConstOf!Iterator, CacheIterator, FlagIterator)(
| .lightConst(_iterator),
| *cast(CacheIterator*)&_caches,
| *cast(FlagIterator*)&_flags,
| );
| }
|
| ///
| auto lightImmutable()() immutable @property @trusted
| {
| return CachedIterator!(LightImmutableOf!Iterator, CacheIterator, FlagIterator)(
| .lightImmutable(_iterator),
| *cast(CacheIterator*)&_caches,
| *cast(FlagIterator*)&_flags,
| );
| }
|
| private alias T = typeof(Iterator.init[0]);
| private alias UT = Unqual!T;
|
| auto opUnary(string op : "*")()
| {
| if (_expect(!*_flags, false))
| {
| _flags[0] = true;
| emplaceRef!T(*cast(UT*)&*_caches, *_iterator);
| }
| return *_caches;
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| if (_expect(!_flags[index], false))
| {
| _flags[index] = true;
| emplaceRef!T(*cast(UT*)&(_caches[index]), _iterator[index]);
| }
| return _caches[index];
| }
|
| auto ref opIndexAssign(T)(auto ref T val, ptrdiff_t index)
| {
| _flags[index] = true;
| return _caches[index] = val;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| mixin(op ~ "_iterator;");
| mixin(op ~ "_caches;");
| mixin(op ~ "_flags;");
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
| mixin("_iterator" ~ op ~ "= index;");
| mixin("_caches" ~ op ~ "= index;");
| mixin("_flags" ~ op ~ "= index;");
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|private enum map_primitives = q{
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|};
|
|/++
|`VmapIterator` is used by $(SUBREF topology, map).
|+/
|struct VmapIterator(Iterator, Fun)
|{
|@optmath:
|
| ///
| Iterator _iterator;
| ///
| Fun _fun;
|
| ///
| auto lightConst()() const @property
| {
| return VmapIterator!(LightConstOf!Iterator, LightConstOf!Fun)(.lightConst(_iterator), .lightConst(_fun));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return VmapIterator!(LightImmutableOf!Iterator, LightImmutableOf!Fun)(.lightImmutable(_iterator), .lightImmutable(_fun));
| }
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|
| static if (hasZeroShiftFieldMember!Iterator)
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
| return _vmapField(_iterator.assumeFieldsHaveZeroShift, _fun);
| }
|}
|
|auto MapIterator__map(Iterator, alias fun0, alias fun)(ref MapIterator!(Iterator, fun0) it)
|{
| return MapIterator!(Iterator, fun)(it._iterator);
|}
|
|/++
|`MapIterator` is used by $(SUBREF topology, map).
|+/
|struct MapIterator(Iterator, alias _fun)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return MapIterator!(LightConstOf!Iterator, _fun)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MapIterator!(LightImmutableOf!Iterator, _fun)(.lightImmutable(_iterator));
| }
|
| import mir.functional: pipe;
| ///
| static alias __map(alias fun1) = MapIterator__map!(Iterator, _fun, pipe!(_fun, fun1));
|
| import mir.functional: RefTuple, unref;
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(*_iterator) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(*_iterator);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_iterator[index]);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value");
| }
| else
| return _fun(_iterator[index]) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return mixin(op ~ "_fun(_iterator[index])");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value");
| }
| else
| return mixin("_fun(_iterator[index])" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|
| static if (hasZeroShiftFieldMember!Iterator)
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
| return _mapField!_fun(_iterator.assumeFieldsHaveZeroShift);
| }
|}
|
|/+
|Creates a mapped iterator. Uses `__map` if possible.
|+/
|auto _mapIterator(alias fun, Iterator)(Iterator iterator)
|{
| import core.lifetime: move;
| static if (__traits(hasMember, Iterator, "__map"))
| {
| static if (is(Iterator : MapIterator!(Iter0, fun0), Iter0, alias fun0)
| && !__traits(compiles, Iterator.__map!fun(iterator)))
| {
| // https://github.com/libmir/mir-algorithm/issues/111
| debug(mir) pragma(msg, __FUNCTION__~" not coalescing chained map calls into a single lambda, possibly because of multiple embedded context pointers");
| return MapIterator!(Iterator, fun)(move(iterator));
| }
| else
| return Iterator.__map!fun(iterator);
| }
| else
| return MapIterator!(Iterator, fun)(move(iterator));
|}
|
|
|/+
|Creates a mapped iterator. Uses `__vmap` if possible.
|+/
|auto _vmapIterator(Iterator, Fun)(Iterator iterator, Fun fun)
|{
| static if (__traits(hasMember, Iterator, "__vmap"))
| return Iterator.__vmap(iterator, fun);
| else
| return MapIterator!(Iterator, fun)(iterator);
|}
|
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| // https://github.com/libmir/mir-algorithm/issues/111
| import mir.ndslice.topology : iota, map;
| import mir.functional : pipe;
|
| static auto foo(T)(T x)
| {
| return x.map!(a => a + 1);
| }
|
| static auto bar(T)(T x)
| {
| return foo(x).map!(a => a + 2);
| }
|
| auto data = iota(5);
| auto result = iota([5], 3);
|
| auto x = data.map!(a => a + 1).map!(a => a + 2);
| assert(x == result);
|
| auto y = bar(data);
| assert(y == result);
|}
|
|/++
|`NeighboursIterator` is used by $(SUBREF topology, map).
|+/
|struct NeighboursIterator(Iterator, size_t N, alias _fun, bool around)
|{
| import std.meta: AliasSeq;
|@optmath:
| ///
| Iterator _iterator;
| static if (N)
| Iterator[2][N] _neighbours;
| else alias _neighbours = AliasSeq!();
|
| ///
| auto lightConst()() const @property
| {
| LightConstOf!Iterator[2][N] neighbours;
| foreach (i; 0 .. N)
| {
| neighbours[i][0] = .lightConst(_neighbours[i][0]);
| neighbours[i][1] = .lightConst(_neighbours[i][1]);
| }
| return NeighboursIterator!(LightConstOf!Iterator, N, _fun, around)(.lightConst(_iterator), neighbours);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| LightImmutableOf!Iterator[2][N] neighbours;
| foreach (i; 0 .. N)
| {
| neighbours[i][0] = .lightImmutable(_neighbours[i][0]);
| neighbours[i][1] = .lightImmutable(_neighbours[i][1]);
| }
| return NeighboursIterator!(LightImmutableOf!Iterator, N, _fun, around)(.lightImmutable(_iterator), neighbours);
| }
|
| import mir.functional: RefTuple, _ref;
|
| private alias RA = Unqual!(typeof(_fun(_iterator[-1], _iterator[+1])));
| private alias Result = RefTuple!(_zip_types!Iterator, RA);
|
| auto ref opUnary(string op : "*")()
| {
| return opIndex(0);
| }
|
| auto ref opIndex(ptrdiff_t index) scope
| {
| static if (around)
| RA result = _fun(_iterator[index - 1], _iterator[index + 1]);
|
| foreach (i; Iota!N)
| {
| static if (i == 0 && !around)
| RA result = _fun(_neighbours[i][0][index], _neighbours[i][1][index]);
| else
| result = _fun(result, _fun(_neighbours[i][0][index], _neighbours[i][1][index]));
| }
| static if (__traits(compiles, &_iterator[index]))
| return Result(_ref(_iterator[index]), result);
| else
| return Result(_iterator[index], result);
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| mixin(op ~ "_iterator;");
| foreach (i; Iota!N)
| {
| mixin(op ~ "_neighbours[i][0];");
| mixin(op ~ "_neighbours[i][1];");
| }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
|
| mixin("_iterator " ~ op ~ "= index;");
| foreach (i; Iota!N)
| {
| mixin("_neighbours[i][0] " ~ op ~ "= index;");
| mixin("_neighbours[i][1] " ~ op ~ "= index;");
| }
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._iterator - right._iterator; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|/++
|`MemberIterator` is used by $(SUBREF topology, member).
|+/
|struct MemberIterator(Iterator, string member)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return MemberIterator!(LightConstOf!Iterator, member)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MemberIterator!(LightImmutableOf!Iterator, member)(.lightImmutable(_iterator));
| }
|
| auto ref opUnary(string op : "*")()
| {
| return __traits(getMember, *_iterator, member);
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return __traits(getMember, _iterator[index], member);
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| return __traits(getMember, _iterator[index], member) = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| return mixin(op ~ "__traits(getMember, _iterator[index], member)");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| return mixin("__traits(getMember, _iterator[index], member)" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|}
|
|/++
|`BytegroupIterator` is used by $(SUBREF topology, Bytegroup) and $(SUBREF topology, bytegroup).
|+/
|struct BytegroupIterator(Iterator, size_t count, DestinationType)
| if (count)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return BytegroupIterator!(LightConstOf!Iterator, count, DestinationType)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BytegroupIterator!(LightImmutableOf!Iterator, count, DestinationType)(.lightImmutable(_iterator));
| }
|
| package(mir) alias Byte = Unqual!(typeof(_iterator[0]));
|
| version(LittleEndian)
| private enum BE = false;
| else
| private enum BE = true;
|
| private union U
| {
| DestinationType value;
| static if (DestinationType.sizeof > Byte[count].sizeof && BE && isScalarType!DestinationType)
| {
| struct
| {
| ubyte[DestinationType.sizeof - Byte[count].sizeof] shiftPayload;
| Byte[count] bytes;
| }
| }
| else
| {
| Byte[count] bytes;
| }
| }
|
| DestinationType opUnary(string op : "*")()
| {
| U ret = { value: DestinationType.init };
| foreach (i; Iota!count)
| ret.bytes[i] = _iterator[i];
| return ret.value;
| }
|
| DestinationType opIndex()(ptrdiff_t index)
| {
| return *(this + index);
| }
|
| DestinationType opIndexAssign(T)(T val, ptrdiff_t index) scope
| {
| auto it = this + index;
| U ret = { value: val };
| foreach (i; Iota!count)
| it._iterator[i] = ret.bytes[i];
| return ret.value;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| { mixin("_iterator " ~ op[0] ~ "= count;"); }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| { mixin("_iterator " ~ op ~ "= index * count;"); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return (this._iterator - right._iterator) / count; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._iterator == right._iterator; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| static if (isPointer!Iterator)
| return this._iterator - right._iterator;
| else
| return this._iterator.opCmp(right._iterator);
| }
|}
|
|auto SlideIterator__map(Iterator, size_t params, alias fun0, alias fun)(SlideIterator!(Iterator, params, fun0) it)
|{
| return SlideIterator!(Iterator, params, fun)(it._iterator);
|}
|
|/++
|`SlideIterator` is used by $(SUBREF topology, diff) and $(SUBREF topology, slide).
|+/
|struct SlideIterator(Iterator, size_t params, alias fun)
| if (params > 1)
|{
|@optmath:
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return SlideIterator!(LightConstOf!Iterator, params, fun)(.lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SlideIterator!(LightImmutableOf!Iterator, params, fun)(.lightImmutable(_iterator));
| }
|
| import mir.functional: pipe;
| ///
| static alias __map(alias fun1) = SlideIterator__map!(Iterator, params, fun, pipe!(fun, fun1));
|
| auto ref opUnary(string op : "*")()
| {
| return mixin("fun(" ~ _iotaArgs!(params, "_iterator[", "], ") ~ ")");
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return mixin("fun(" ~ _iotaArgs!(params, "_iterator[index + ", "], ") ~ ")");
| }
|
| mixin(std_ops);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.functional: naryFun;
| auto data = [1, 3, 8, 18];
| auto diff = SlideIterator!(int*, 2, naryFun!"b - a")(data.ptr);
| assert(*diff == 2);
| assert(diff[1] == 5);
| assert(diff[2] == 10);
|}
|
|auto IndexIterator__map(Iterator, Field, alias fun)(ref IndexIterator!(Iterator, Field) it)
|{
| auto field = it._field._mapField!fun;
| return IndexIterator!(Iterator, typeof(field))(it._iterator, field);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto indices = [4, 3, 1, 2, 0, 4].sliced;
| auto v = iota(5).indexed(indices).map!(a => a).slice;
| uint r;
| auto w = iota(5).indexed(indices).map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Iterates a field using an iterator.
|
|`IndexIterator` is used by $(SUBREF topology, indexed).
|+/
|struct IndexIterator(Iterator, Field)
|{
| import mir.functional: RefTuple, unref;
|
|@optmath:
| ///
| Iterator _iterator;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return IndexIterator!(LightConstOf!Iterator, LightConstOf!Field)(.lightConst(_iterator), .lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return IndexIterator!(LightImmutableOf!Iterator, LightImmutableOf!Field)(.lightImmutable(_iterator), _field.lightImmutable);
| }
|
| ///
| static alias __map(alias fun) = IndexIterator__map!(Iterator, Field, fun);
|
| auto ref opUnary(string op : "*")()
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = *_iterator;
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return _field[*_iterator];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return _field[_iterator[index]];
| }
|
| static if (!__traits(compiles, &opIndex(ptrdiff_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "] = value");
| }
| else
| return _field[_iterator[index]] = value;
| }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin(op ~ "_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]");
| }
| else
| return mixin(op ~ "_field[_iterator[index]]");
| }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| {
| static if (is(typeof(_iterator[0]) : RefTuple!T, T...))
| {
| auto t = _iterator[index];
| return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]" ~ op ~ "= value");
| }
| else
| return mixin("_field[_iterator[index]]" ~ op ~ "= value");
| }
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates chunks in a sliceable using an iterator composed of indices.
|
|Definition:
|----
|auto index = iterator[i];
|auto elem = sliceable[index[0] .. index[1]];
|----
|+/
|struct SubSliceIterator(Iterator, Sliceable)
|{
|@optmath:
| ///
| Iterator _iterator;
| ///
| Sliceable _sliceable;
|
| ///
| auto lightConst()() const @property
| {
| return SubSliceIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SubSliceIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable);
| }
|
| auto ref opUnary(string op : "*")()
| {
| auto i = *_iterator;
| return _sliceable[i[0] .. i[1]];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| auto i = _iterator[index];
| return _sliceable[i[0] .. i[1]];
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates chunks in a sliceable using an iterator composed of indices stored consequently.
|
|Definition:
|----
|auto elem = _sliceable[_iterator[index] .. _iterator[index + 1]];
|----
|+/
|struct ChopIterator(Iterator, Sliceable)
|{
|@optmath:
| ///
| Iterator _iterator;
| ///
| Sliceable _sliceable;
|
| ///
| auto lightConst()() const @property
| {
| return ChopIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return ChopIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable);
| }
|
| auto ref opUnary(string op : "*")()
| {
| return _sliceable[*_iterator .. _iterator[1]];
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return _sliceable[_iterator[index] .. _iterator[index + 1]];
| }
|
| mixin(std_ops);
|}
|
|/++
|Iterates on top of another iterator and returns a slice
|as a multidimensional window at the current position.
|
|`SliceIterator` is used by $(SUBREF topology, map) for packed slices.
|+/
|struct SliceIterator(Iterator, size_t N = 1, SliceKind kind = Contiguous)
|{
|@optmath:
| ///
| alias Element = Slice!(Iterator, N, kind);
| ///
| Element._Structure _structure;
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return SliceIterator!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return SliceIterator!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator));
| }
|
| auto opUnary(string op : "*")()
| {
| return Element(_structure, _iterator);
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| return Element(_structure, _iterator + index);
| }
|
| mixin(std_ops);
|}
|
|public auto FieldIterator__map(Field, alias fun)(FieldIterator!(Field) it)
|{
| import mir.ndslice.field: _mapField;
| auto field = it._field._mapField!fun;
| return FieldIterator!(typeof(field))(it._index, field);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = ndiota(3, 3).map!(a => a).slice;
| uint r;
| auto w = ndiota(3, 3).map!(a => a).map!(a => a[0] * r).slice;
|}
|
|/++
|Creates an iterator on top of a field.
|
|`FieldIterator` is used by $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.
|+/
|struct FieldIterator(Field)
|{
|@optmath:
| ///
| ptrdiff_t _index;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
0000000| return FieldIterator!(LightConstOf!Field)(_index, .lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return FieldIterator!(LightImmutableOf!Field)(_index, .lightImmutable(_field));
| }
|
| ///
| static alias __map(alias fun) = FieldIterator__map!(Field, fun);
|
| ///
| Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) scope const
| {
| assert(i <= j);
| return typeof(return)(j - i, typeof(return).Iterator(i));
| }
|
| /++
| Returns:
| `_field[_index + sl.i .. _index + sl.j]`.
| +/
| auto opIndex()(Slice!(IotaIterator!size_t) sl)
| {
| auto idx = _index + sl._iterator._index;
| return _field[idx .. idx + sl.length];
| }
|
| auto ref opUnary(string op : "*")()
0000000| { return _field[_index]; }
|
| void opUnary(string op)() scope
| if (op == "++" || op == "--")
| { mixin(op ~ `_index;`); }
|
| auto ref opIndex()(ptrdiff_t index)
0000000| { return _field[_index + index]; }
|
| static if (!__traits(compiles, &_field[_index]))
| {
| auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index)
| { return _field[_index + index] = value; }
|
| auto ref opIndexUnary(string op)(ptrdiff_t index)
| { mixin (`return ` ~ op ~ `_field[_index + index];`); }
|
| auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index)
| { mixin (`return _field[_index + index] ` ~ op ~ `= value;`); }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "+" || op == "-")
| { mixin(`_index ` ~ op ~ `= index;`); }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| { return this._index - right._index; }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
0000000| { return this._index == right._index; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
0000000| { return this._index - right._index; }
|
| ///
| auto assumeFieldsHaveZeroShift() @property
| {
0000000| if (_expect(_index != 0, false))
| {
| version (D_Exceptions)
0000000| throw assumeZeroShiftException;
| else
| assert(0, assumeZeroShiftExceptionMsg);
| }
| static if (hasZeroShiftFieldMember!Field)
| return _field.assumeFieldsHaveZeroShift;
| else
0000000| return _field;
| }
|}
|
|auto FlattenedIterator__map(Iterator, size_t N, SliceKind kind, alias fun)(FlattenedIterator!(Iterator, N, kind) it)
|{
| import mir.ndslice.topology: map;
| auto slice = it._slice.map!fun;
| return FlattenedIterator!(TemplateArgsOf!(typeof(slice)))(it._indices, slice);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.allocation;
| auto v = iota(3, 3).universal.flattened.map!(a => a).slice;
| uint r;
| auto w = iota(3, 3).universal.flattened.map!(a => a).map!(a => a * r).slice;
|}
|
|/++
|Creates an iterator on top of all elements in a slice.
|
|`FieldIterator` is used by $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.
|+/
|struct FlattenedIterator(Iterator, size_t N, SliceKind kind)
| if (N > 1 && (kind == Universal || kind == Canonical))
|{
|@optmath:
| ///
| ptrdiff_t[N] _indices;
| ///
| Slice!(Iterator, N, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return FlattenedIterator!(LightConstOf!Iterator, N, kind)(_indices, _slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return FlattenedIterator!(LightImmutableOf!Iterator, N, kind)(_indices, _slice.lightImmutable);
| }
|
| ///
| static alias __map(alias fun) = FlattenedIterator__map!(Iterator, N, kind, fun);
|
| private ptrdiff_t getShift()(ptrdiff_t n)
| {
| ptrdiff_t _shift;
| n += _indices[$ - 1];
| foreach_reverse (i; Iota!(1, N))
| {
| immutable v = n / ptrdiff_t(_slice._lengths[i]);
| n %= ptrdiff_t(_slice._lengths[i]);
| static if (i == _slice.S)
| _shift += (n - _indices[i]);
| else
| _shift += (n - _indices[i]) * _slice._strides[i];
| n = _indices[i - 1] + v;
| }
| _shift += (n - _indices[0]) * _slice._strides[0];
| return _shift;
| }
|
| auto ref opUnary(string op : "*")()
| {
| return *_slice._iterator;
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| foreach_reverse (i; Iota!N)
| {
| static if (i == _slice.S)
| mixin(op ~ `_slice._iterator;`);
| else
| mixin(`_slice._iterator ` ~ op[0] ~ `= _slice._strides[i];`);
| mixin (op ~ `_indices[i];`);
| static if (i)
| {
| static if (op == "++")
| {
| if (_indices[i] < _slice._lengths[i])
| return;
| static if (i == _slice.S)
| _slice._iterator -= _slice._lengths[i];
| else
| _slice._iterator -= _slice._lengths[i] * _slice._strides[i];
| _indices[i] = 0;
| }
| else
| {
| if (_indices[i] >= 0)
| return;
| static if (i == _slice.S)
| _slice._iterator += _slice._lengths[i];
| else
| _slice._iterator += _slice._lengths[i] * _slice._strides[i];
| _indices[i] = _slice._lengths[i] - 1;
| }
| }
| }
| }
|
| auto ref opIndex()(ptrdiff_t index)
| {
| return _slice._iterator[getShift(index)];
| }
|
| static if (isMutable!(_slice.DeepElement) && !_slice.hasAccessByRef)
| ///
| auto ref opIndexAssign(E)(scope ref E elem, size_t index) scope return
| {
| return _slice._iterator[getShift(index)] = elem;
| }
|
| void opOpAssign(string op : "+")(ptrdiff_t n) scope
| {
| ptrdiff_t _shift;
| n += _indices[$ - 1];
| foreach_reverse (i; Iota!(1, N))
| {
| immutable v = n / ptrdiff_t(_slice._lengths[i]);
| n %= ptrdiff_t(_slice._lengths[i]);
| static if (i == _slice.S)
| _shift += (n - _indices[i]);
| else
| _shift += (n - _indices[i]) * _slice._strides[i];
| _indices[i] = n;
| n = _indices[i - 1] + v;
| }
| _shift += (n - _indices[0]) * _slice._strides[0];
| _indices[0] = n;
| foreach_reverse (i; Iota!(1, N))
| {
| if (_indices[i] >= 0)
| break;
| _indices[i] += _slice._lengths[i];
| _indices[i - 1]--;
| }
| _slice._iterator += _shift;
| }
|
| void opOpAssign(string op : "-")(ptrdiff_t n) scope
| { this += -n; }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| {
| ptrdiff_t ret = this._indices[0] - right._indices[0];
| foreach (i; Iota!(1, N))
| {
| ret *= _slice._lengths[i];
| ret += this._indices[i] - right._indices[i];
| }
| return ret;
| }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| {
| foreach_reverse (i; Iota!N)
| if (this._indices[i] != right._indices[i])
| return false;
| return true;
| }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| {
| foreach (i; Iota!(N - 1))
| if (auto ret = this._indices[i] - right._indices[i])
| return ret;
| return this._indices[$ - 1] - right._indices[$ - 1];
| }
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology;
| import mir.ndslice.slice;
|
| auto it0 = iota(3, 4).universal.flattened._iterator;
| auto it1 = it0;
| assert(it0 == it1);
| it0 += 5;
| assert(it0 > it1);
| it0 -= 5;
| assert(*it0 == *it1);
| assert(it0 == it1);
| it0 += 5;
| it0 += 7;
| it0 -= 9;
| assert(it0 > it1);
| it1 += 3;
| assert(*it0 == *it1);
| assert(it0 == it1);
| assert(it0 <= it1);
| assert(it0 >= it1);
|
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
| ++it0;
|
| assert(it0 - it1 == 9);
| assert(it1 - it0 == -9);
|
| ++it0;
|
| assert(it0 - it1 == 10);
| assert(it1 - it0 == -10);
|
| --it0;
|
| assert(it0 - it1 == 9);
| assert(it1 - it0 == -9);
| assert(it0[-9] == *it1);
| assert(*it0 == it1[9]);
|
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| --it0;
| assert(*it0 == *it1);
| assert(it0 == it1);
| assert(it0 <= it1);
| assert(it0 >= it1);
|}
|
|/++
|`StairsIterator` is used by $(SUBREF topology, stairs).
|+/
|struct StairsIterator(Iterator, string direction)
| if (direction == "+" || direction == "-")
|{
| ///
| size_t _length;
|
| ///
| Iterator _iterator;
|
| ///
| auto lightConst()() const @property
| {
| return StairsIterator!(LightConstOf!Iterator, direction)(_length, .lightConst(_iterator));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return StairsIterator!(LightImmutableOf!Iterator, direction)(_length, .lightImmutable(_iterator));
| }
|
|@optmath:
|
| ///
| Slice!Iterator opUnary(string op : "*")()
| {
| import mir.ndslice.slice: sliced;
| return _iterator.sliced(_length);
| }
|
| ///
| Slice!Iterator opIndex()(ptrdiff_t index)
| {
| import mir.ndslice.slice: sliced;
| static if (direction == "+")
| {
| auto newLength = _length + index;
| auto shift = ptrdiff_t(_length + newLength - 1) * index / 2;
| }
| else
| {
| auto newLength = _length - index;
| auto shift = ptrdiff_t(_length + newLength + 1) * index / 2;
| }
| assert(ptrdiff_t(newLength) >= 0);
| return (_iterator + shift).sliced(newLength);
| }
|
| void opUnary(string op)() scope
| if (op == "--" || op == "++")
| {
| static if (op == "++")
| {
| _iterator += _length;
| static if (direction == "+")
| ++_length;
| else
| --_length;
| }
| else
| {
| assert(_length);
| static if (direction == "+")
| --_length;
| else
| ++_length;
| _iterator -= _length;
| }
| }
|
| void opOpAssign(string op)(ptrdiff_t index) scope
| if (op == "-" || op == "+")
| {
| static if (op == direction)
| auto newLength = _length + index;
| else
| auto newLength = _length - index;
| static if (direction == "+")
| auto shift = ptrdiff_t(_length + newLength - 1) * index / 2;
| else
| auto shift = ptrdiff_t(_length + newLength + 1) * index / 2;
| assert(ptrdiff_t(newLength) >= 0);
| _length = newLength;
| static if (op == "+")
| _iterator += shift;
| else
| _iterator -= shift;
| }
|
| auto opBinary(string op)(ptrdiff_t index)
| if (op == "+" || op == "-")
| {
| auto ret = this;
| mixin(`ret ` ~ op ~ `= index;`);
| return ret;
| }
|
| ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const
| {
| static if (direction == "+")
| return this._length - right._length;
| else
| return right._length - this._length;
| }
|
| bool opEquals()(scope ref const typeof(this) right) scope const
| { return this._length == right._length; }
|
| ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const
| { return this - right; }
|}
|
|///
|version(mir_test) unittest
|{
| // 0
| // 1 2
| // 3 4 5
| // 6 7 8 9
| // 10 11 12 13 14
| auto it = StairsIterator!(IotaIterator!size_t, "+")(1, IotaIterator!size_t());
| assert(*it == [0]);
| assert(it[4] == [10, 11, 12, 13, 14]);
| assert(*(it + 4) == [10, 11, 12, 13, 14]);
| ++it;
| assert(*it == [1, 2]);
| it += 3;
| assert(*it == [10, 11, 12, 13, 14]);
| assert(it[-3] == [1, 2]);
| assert(*(it - 3) == [1, 2]);
| assert(it + 1 > it);
| assert(it + 1 - 1 == it);
| assert(it - 3 - it == -3);
| --it;
| assert(*it == [6, 7, 8, 9]);
|}
|
|///
|version(mir_test) unittest
|{
| // [0, 1, 2, 3, 4],
| // [5, 6, 7, 8],
| // [9, 10, 11],
| // [12, 13],
| // [14]]);
|
| auto it = StairsIterator!(IotaIterator!size_t, "-")(5, IotaIterator!size_t());
| assert(*it == [0, 1, 2, 3, 4]);
| assert(it[4] == [14]);
| assert(*(it + 4) == [14]);
| ++it;
| assert(*it == [5, 6, 7, 8]);
| it += 3;
| assert(*it == [14]);
| assert(it[-3] == [5, 6, 7, 8]);
| assert(*(it - 3) == [5, 6, 7, 8]);
| assert(it + 1 > it);
| assert(it + 1 - 1 == it);
| assert(it - 3 - it == -3);
| --it;
| assert(*it == [12, 13]);
|}
|
|/++
|Element type of $(LREF TripletIterator).
|+/
|struct Triplet(Iterator, SliceKind kind = Contiguous)
|{
|@optmath:
| ///
| size_t _iterator;
| ///
| Slice!(Iterator, 1, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return Triplet!(LightConstOf!Iterator, kind)(_iterator, slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return Triplet!(LightImmutableOf!Iterator, kind)(_iterator, slice.lightImmutable);
| }
|
| @property
| {
| ///
| auto ref center()
| {
| assert(_iterator < _slice.length);
| return _slice[_iterator];
| }
|
| ///
| Slice!(Iterator, 1, kind) left()
| {
| assert(_iterator < _slice.length);
| return _slice[0 .. _iterator];
| }
|
| ///
| Slice!(Iterator, 1, kind) right()
| {
| assert(_iterator < _slice.length);
| return _slice[_iterator + 1 .. $];
| }
| }
|}
|
|/++
|Iterates triplets position in a slice.
|
|`TripletIterator` is used by $(SUBREF topology, triplets).
|+/
|struct TripletIterator(Iterator, SliceKind kind = Contiguous)
|{
|@optmath:
|
| ///
| size_t _iterator;
| ///
| Slice!(Iterator, 1, kind) _slice;
|
| ///
| auto lightConst()() const @property
| {
| return TripletIterator!(LightConstOf!Iterator, kind)(_iterator, _slice.lightConst);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return TripletIterator!(LightImmutableOf!Iterator, kind)(_iterator, _slice.lightImmutable);
| }
|
| ///
| Triplet!(Iterator, kind) opUnary(string op : "*")()
| {
| return typeof(return)(_iterator, _slice);
| }
|
| ///
| Triplet!(Iterator, kind) opIndex()(ptrdiff_t index)
| {
| return typeof(return)(_iterator + index, _slice);
| }
|
| mixin(std_ops);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/iterator.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-topology.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Selectors create new views and iteration patterns over the same data, without copying.
|
|$(BOOKTABLE $(H2 Sequence Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 cycle, Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice)
|$(T2 iota, Contiguous Slice with initial flattened (contiguous) index.)
|$(T2 linspace, Evenly spaced numbers over a specified interval.)
|$(T2 magic, Magic square.)
|$(T2 ndiota, Contiguous Slice with initial multidimensional index.)
|$(T2 repeat, Slice with identical values)
|)
|
|$(BOOKTABLE $(H2 Shape Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 blocks, n-dimensional slice composed of n-dimensional non-overlapping blocks. If the slice has two dimensions, it is a block matrix.)
|$(T2 diagonal, 1-dimensional slice composed of diagonal elements)
|$(T2 dropBorders, Drops borders for all dimensions.)
|$(T2 reshape, New slice view with changed dimensions)
|$(T2 squeeze, New slice view of an n-dimensional slice with dimension removed)
|$(T2 unsqueeze, New slice view of an n-dimensional slice with a dimension added)
|$(T2 windows, n-dimensional slice of n-dimensional overlapping windows. If the slice has two dimensions, it is a sliding window.)
|
|)
|
|
|$(BOOKTABLE $(H2 Subspace Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 alongDim , Returns a slice that can be iterated along dimension.)
|$(T2 byDim , Returns a slice that can be iterated by dimension.)
|$(T2 pack , Returns slice of slices.)
|$(T2 ipack , Returns slice of slices.)
|$(T2 unpack , Merges two hight dimension packs. See also $(SUBREF fuse, fuse).)
|$(T2 evertPack, Reverses dimension packs.)
|
|)
|
|$(BOOKTABLE $(H2 SliceKind Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 universal, Converts a slice to universal $(SUBREF slice, SliceKind).)
|$(T2 canonical, Converts a slice to canonical $(SUBREF slice, SliceKind).)
|$(T2 assumeCanonical, Converts a slice to canonical $(SUBREF slice, SliceKind). Does only `assert` checks.)
|$(T2 assumeContiguous, Converts a slice to contiguous $(SUBREF slice, SliceKind). Does only `assert` checks.)
|$(T2 assumeHypercube, Helps the compiler to use optimisations related to the shape form. Does only `assert` checks.)
|$(T2 assumeSameShape, Helps the compiler to use optimisations related to the shape form. Does only `assert` checks.)
|
|)
|
|$(BOOKTABLE $(H2 Products),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 cartesian, Cartesian product.)
|$(T2 kronecker, Kronecker product.)
|
|)
|
|$(BOOKTABLE $(H2 Representation Selectors),
|$(TR $(TH Function Name) $(TH Description))
|
|$(T2 as, Convenience function that creates a lazy view,
|where each element of the original slice is converted to a type `T`.)
|$(T2 bitpack, Bitpack slice over an unsigned integral slice.)
|$(T2 bitwise, Bitwise slice over an unsigned integral slice.)
|$(T2 bytegroup, Groups existing slice into fixed length chunks and uses them as data store for destination type.)
|$(T2 cached, Random access cache. It is usefull in combiation with $(LREF map) and $(LREF vmap).)
|$(T2 cachedGC, Random access cache auto-allocated in GC heap. It is usefull in combiation with $(LREF map) and $(LREF vmap).)
|$(T2 diff, Differences between vector elements.)
|$(T2 flattened, Contiguous 1-dimensional slice of all elements of a slice.)
|$(T2 map, Multidimensional functional map.)
|$(T2 member, Field (element's member) projection.)
|$(T2 orthogonalReduceField, Functional deep-element wise reduce of a slice composed of fields or iterators.)
|$(T2 pairwise, Pairwise map for vectors.)
|$(T2 pairwiseMapSubSlices, Maps pairwise index pairs to subslices.)
|$(T2 retro, Reverses order of iteration for all dimensions.)
|$(T2 slide, Lazy convolution for tensors.)
|$(T2 slideAlong, Lazy convolution for tensors.)
|$(T2 stairs, Two functions to pack, unpack, and iterate triangular and symmetric matrix storage.)
|$(T2 stride, Strides 1-dimensional slice.)
|$(T2 subSlices, Maps index pairs to subslices.)
|$(T2 triplets, Constructs a lazy view of triplets with `left`, `center`, and `right` members. The topology is usefull for Math and Physics.)
|$(T2 unzip, Selects a slice from a zipped slice.)
|$(T2 withNeighboursSum, Zip view of elements packed with sum of their neighbours.)
|$(T2 zip, Zips slices into a slice of refTuples.)
|)
|
|Subspace selectors serve to generalize and combine other selectors easily.
|For a slice of `Slice!(Iterator, N, kind)` type `slice.pack!K` creates a slice of
|slices of `Slice!(kind, [N - K, K], Iterator)` type by packing
|the last `K` dimensions of the top dimension pack,
|and the type of element of $(LREF flattened) is `Slice!(Iterator, K)`.
|Another way to use $(LREF pack) is transposition of dimension packs using
|$(LREF evertPack).
|Examples of use of subspace selectors are available for selectors,
|$(SUBREF slice, Slice.shape), and $(SUBREF slice, Slice.elementCount).
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko, John Michael Hall, Shigeki Karita (original numir code)
|
|Sponsors: Part of this work has been sponsored by $(LINK2 http://symmetryinvestments.com, Symmetry Investments) and Kaleidic Associates.
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4))
|+/
|module mir.ndslice.topology;
|
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.field;
|import mir.ndslice.internal;
|import mir.ndslice.iterator;
|import mir.ndslice.ndfield;
|import mir.ndslice.slice;
|import mir.primitives;
|import mir.qualifier;
|import mir.utility: min;
|import std.meta: AliasSeq, allSatisfy, staticMap, templateOr, Repeat;
|
|private immutable choppedExceptionMsg = "bounds passed to chopped are out of sliceable bounds.";
|version (D_Exceptions) private immutable choppedException = new Exception(choppedExceptionMsg);
|
|@optmath:
|
|/++
|Converts a slice to universal kind.
|
|Params:
| slice = a slice
|Returns:
| universal slice
|See_also:
| $(LREF canonical),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|auto universal(Iterator, size_t N, SliceKind kind, Labels...)(Slice!(Iterator, N, kind, Labels) slice)
|{
| import core.lifetime: move;
|
| static if (kind == Universal)
| {
| return slice;
| }
| else
| static if (is(Iterator : RetroIterator!It, It))
| {
| return slice.move.retro.universal.retro;
| }
| else
| {
| alias Ret = Slice!(Iterator, N, Universal, Labels);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| static if (kind == Canonical)
| {
| foreach (i; Iota!(slice.S))
| strides[i] = slice._strides[i];
| strides[$-1] = 1;
| }
| else
| {
| ptrdiff_t ball = 1;
| foreach_reverse (i; Iota!(Ret.S))
| {
| strides[i] = ball;
| static if (i)
| ball *= slice._lengths[i];
| }
| }
| return Ret(lengths, strides, slice._iterator.move, slice._labels);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3, 1]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).canonical.universal;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3, 1]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto universaldf = dataframe.universal;
| assert(universaldf._lengths == [2, 3]);
| assert(universaldf._strides == [3, 1]);
|
| assert(is(typeof(universaldf) ==
| Slice!(double*, 2, Universal, int*, string*)));
| assert(universaldf.label!0[0] == 1);
| assert(universaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to canonical kind.
|
|Params:
| slice = contiguous or canonical slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, N == 1 ? Contiguous : Canonical, Labels)
| canonical
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
| if (kind == Contiguous || kind == Canonical)
|{
| import core.lifetime: move;
|
| static if (kind == Canonical || N == 1)
| return slice;
| else
| {
| alias Ret = typeof(return);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| ptrdiff_t ball = 1;
| foreach_reverse (i; Iota!(Ret.S))
| {
| ball *= slice._lengths[i + 1];
| strides[i] = ball;
| }
| return Ret(lengths, strides, slice._iterator.move, slice._labels);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).canonical;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto canonicaldf = dataframe.canonical;
| assert(canonicaldf._lengths == [2, 3]);
| assert(canonicaldf._strides == [3]);
|
| assert(is(typeof(canonicaldf) ==
| Slice!(double*, 2, Canonical, int*, string*)));
| assert(canonicaldf.label!0[0] == 1);
| assert(canonicaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to canonical kind (unsafe).
|
|Params:
| slice = a slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF canonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, Canonical, Labels)
| assumeCanonical
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (kind == Contiguous)
| return slice.canonical;
| else
| static if (kind == Canonical)
| return slice;
| else
| {
| import mir.utility: swap;
| assert(slice._lengths[N - 1] <= 1 || slice._strides[N - 1] == 1);
| typeof(return) ret;
| ret._lengths = slice._lengths;
| ret._strides = slice._strides[0 .. $ - 1];
| swap(ret._iterator, slice._iterator);
| foreach(i, _; Labels)
| swap(ret._labels[i], slice._labels[i]);
| return ret;
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal.assumeCanonical;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| assert(slice._strides == [3]);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto assmcanonicaldf = dataframe.assumeCanonical;
| assert(assmcanonicaldf._lengths == [2, 3]);
| assert(assmcanonicaldf._strides == [3]);
|
| assert(is(typeof(assmcanonicaldf) ==
| Slice!(double*, 2, Canonical, int*, string*)));
| assert(assmcanonicaldf.label!0[0] == 1);
| assert(assmcanonicaldf.label!1[1] == "Label2");
|}
|
|/++
|Converts a slice to contiguous kind (unsafe).
|
|Params:
| slice = a slice
|Returns:
| canonical slice
|See_also:
| $(LREF universal),
| $(LREF canonical),
| $(LREF assumeCanonical).
|+/
|Slice!(Iterator, N, Contiguous, Labels)
| assumeContiguous
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (kind == Contiguous)
| return slice;
| else
| {
| import mir.utility: swap;
| typeof(return) ret;
| ret._lengths = slice._lengths;
| swap(ret._iterator, slice._iterator);
| foreach(i, _; Labels)
| swap(ret._labels[i], slice._labels[i]);
| return ret;
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto slice = iota(2, 3).universal.assumeContiguous;
| assert(slice == [[0, 1, 2], [3, 4, 5]]);
| assert(slice._lengths == [2, 3]);
| static assert(slice._strides.length == 0);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation: slice;
|
| auto dataframe = slice!(double, int, string)(2, 3);
| dataframe.label[] = [1, 2];
| dataframe.label!1[] = ["Label1", "Label2", "Label3"];
|
| auto assmcontdf = dataframe.canonical.assumeContiguous;
| assert(assmcontdf._lengths == [2, 3]);
| static assert(assmcontdf._strides.length == 0);
|
| assert(is(typeof(assmcontdf) ==
| Slice!(double*, 2, Contiguous, int*, string*)));
| assert(assmcontdf.label!0[0] == 1);
| assert(assmcontdf.label!1[1] == "Label2");
|}
|
|/++
|Helps the compiler to use optimisations related to the shape form
|+/
|void assumeHypercube
| (Iterator, size_t N, SliceKind kind, Labels...)
| (ref scope Slice!(Iterator, N, kind, Labels) slice)
|{
| foreach (i; Iota!(1, N))
| {
| assert(slice._lengths[i] == slice._lengths[0]);
| slice._lengths[i] = slice._lengths[0];
| }
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| auto b = iota(5, 5);
|
| assumeHypercube(b);
|
| assert(b == iota(5, 5));
|}
|
|/++
|Helps the compiler to use optimisations related to the shape form
|+/
|void assumeSameShape(T...)
| (ref scope T slices)
| if (allSatisfy!(isSlice, T))
|{
| foreach (i; Iota!(1, T.length))
| {
| assert(slices[i]._lengths == slices[0]._lengths);
| slices[i]._lengths = slices[0]._lengths;
| }
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| auto a = iota(5, 5);
| auto b = iota(5, 5);
|
| assumeHypercube(a); // first use this one, if applicable
| assumeSameShape(a, b); //
|
| assert(a == iota(5, 5));
| assert(b == iota(5, 5));
|}
|
|/++
|+/
|auto assumeFieldsHaveZeroShift(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (__traits(hasMember, Iterator, "assumeFieldsHaveZeroShift"))
|{
| return slice._iterator.assumeFieldsHaveZeroShift.slicedField(slice._lengths);
|}
|
|/++
|Creates a packed slice, i.e. slice of slices.
|Packs the last `P` dimensions.
|The function does not allocate any data.
|
|Params:
| P = size of dimension pack
| slice = a slice to pack
|Returns:
| `slice.pack!p` returns `Slice!(kind, [N - p, p], Iterator)`
|See_also: $(LREF ipack)
|+/
|Slice!(SliceIterator!(Iterator, P, P == 1 && kind == Canonical ? Contiguous : kind), N - P, Universal)
|pack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| if (P && P < N)
|{
| import core.lifetime: move;
| return slice.move.ipack!(N - P);
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice : sliced, Slice;
|
| auto a = iota(3, 4, 5, 6);
| auto b = a.pack!2;
|
| static immutable res1 = [3, 4];
| static immutable res2 = [5, 6];
| assert(b.shape == res1);
| assert(b[0, 0].shape == res2);
| assert(a == b.unpack);
| assert(a.pack!2 == b);
| static assert(is(typeof(b) == typeof(a.pack!2)));
|}
|
|/++
|Creates a packed slice, i.e. slice of slices.
|Packs the last `N - P` dimensions.
|The function does not allocate any data.
|
|Params:
| + = size of dimension pack
| slice = a slice to pack
|See_also: $(LREF pack)
|+/
|Slice!(SliceIterator!(Iterator, N - P, N - P == 1 && kind == Canonical ? Contiguous : kind), P, Universal)
|ipack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| if (P && P < N)
|{
| import core.lifetime: move;
| alias Ret = typeof(return);
| alias It = Ret.Iterator;
| alias EN = It.Element.N;
| alias ES = It.Element.S;
| auto sl = slice.move.universal;
| static if (It.Element.kind == Contiguous)
| return Ret(
| cast( size_t[P]) sl._lengths[0 .. P],
| cast(ptrdiff_t[P]) sl._strides[0 .. P],
| It(
| cast(size_t[EN]) sl._lengths[P .. $],
| sl._iterator.move));
| else
| return Ret(
| cast( size_t[P]) sl._lengths[0 .. P],
| cast(ptrdiff_t[P]) sl._strides[0 .. P],
| It(
| cast( size_t[EN]) sl._lengths[P .. $],
| cast(ptrdiff_t[ES]) sl._strides[P .. $ - (It.Element.kind == Canonical)],
| sl._iterator.move));
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice : sliced, Slice;
|
| auto a = iota(3, 4, 5, 6);
| auto b = a.ipack!2;
|
| static immutable res1 = [3, 4];
| static immutable res2 = [5, 6];
| assert(b.shape == res1);
| assert(b[0, 0].shape == res2);
| assert(a.ipack!2 == b);
| static assert(is(typeof(b) == typeof(a.ipack!2)));
|}
|
|/++
|Unpacks a packed slice.
|
|The functions does not allocate any data.
|
|Params:
| slice = packed slice
|Returns:
| unpacked slice, that is a view on the same data.
|
|See_also: $(LREF pack), $(LREF evertPack)
|+/
|Slice!(Iterator, N + M, min(innerKind, Canonical))
| unpack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind)
| (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice)
|{
| alias Ret = typeof(return);
| size_t[N + M] lengths;
| auto strides = sizediff_t[Ret.S].init;
| auto outerStrides = slice.strides;
| auto innerStrides = Slice!(Iterator, M, innerKind)(
| slice._iterator._structure,
| slice._iterator._iterator,
| ).strides;
| foreach(i; Iota!N)
| lengths[i] = slice._lengths[i];
| foreach(i; Iota!N)
| strides[i] = outerStrides[i];
| foreach(i; Iota!M)
| lengths[N + i] = slice._iterator._structure[0][i];
| foreach(i; Iota!(Ret.S - N))
| strides[N + i] = innerStrides[i];
| return Ret(lengths, strides, slice._iterator._iterator);
|}
|
|/++
|Reverses the order of dimension packs.
|This function is used in a functional pipeline with other selectors.
|
|Params:
| slice = packed slice
|Returns:
| packed slice
|
|See_also: $(LREF pack), $(LREF unpack)
|+/
|Slice!(SliceIterator!(Iterator, N, outerKind), M, innerKind)
|evertPack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind)
| (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice)
|{
| import core.lifetime: move;
| return typeof(return)(
| slice._iterator._structure,
| typeof(return).Iterator(
| slice._structure,
| slice._iterator._iterator.move));
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : transposed;
| auto slice = iota(3, 4, 5, 6, 7, 8, 9, 10, 11).universal;
| assert(slice
| .pack!2
| .evertPack
| .unpack
| == slice.transposed!(
| slice.shape.length-2,
| slice.shape.length-1));
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.allocation: slice;
| static assert(is(typeof(
| slice!int(6)
| .sliced(1,2,3)
| .pack!1
| .evertPack
| )
| == Slice!(SliceIterator!(int*, 2, Universal), 1)));
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto a = iota(3, 4, 5, 6, 7, 8, 9, 10, 11);
| auto b = a.pack!2.unpack;
| static assert(is(typeof(a.canonical) == typeof(b)));
| assert(a == b);
|}
|
|/++
|Returns a slice, the elements of which are equal to the initial flattened index value.
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
| start = value of the first element in a slice (optional for integer `I`)
| stride = value of the stride between elements (optional)
|Returns:
| n-dimensional slice composed of indices
|See_also: $(LREF ndiota)
|+/
|Slice!(IotaIterator!I, N)
|iota
| (I = sizediff_t, size_t N)(size_t[N] lengths...)
| if (__traits(isIntegral, I))
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(I.init).sliced(lengths);
|}
|
|///ditto
|Slice!(IotaIterator!sizediff_t, N)
|iota
| (size_t N)(size_t[N] lengths, sizediff_t start)
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!sizediff_t(start).sliced(lengths);
|}
|
|///ditto
|Slice!(StrideIterator!(IotaIterator!sizediff_t), N)
|iota
| (size_t N)(size_t[N] lengths, sizediff_t start, size_t stride)
|{
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!sizediff_t)(stride, IotaIterator!sizediff_t(start)).sliced(lengths);
|}
|
|///ditto
|template iota(I)
| if (__traits(isIntegral, I))
|{
| ///
| Slice!(IotaIterator!I, N)
| iota
| (size_t N)(size_t[N] lengths, I start)
| if (__traits(isIntegral, I))
| {
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(start).sliced(lengths);
| }
|
| ///ditto
| Slice!(StrideIterator!(IotaIterator!I), N)
| iota
| (size_t N)(size_t[N] lengths, I start, size_t stride)
| if (__traits(isIntegral, I))
| {
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths);
| }
|}
|
|///ditto
|Slice!(IotaIterator!I, N)
|iota
| (I, size_t N)(size_t[N] lengths, I start)
| if (is(I P : P*))
|{
| import mir.ndslice.slice : sliced;
| return IotaIterator!I(start).sliced(lengths);
|}
|
|///ditto
|Slice!(StrideIterator!(IotaIterator!I), N)
|iota
| (I, size_t N)(size_t[N] lengths, I start, size_t stride)
| if (is(I P : P*))
|{
| import mir.ndslice.slice : sliced;
| return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(2, 3);
| static immutable array =
| [[0, 1, 2],
| [3, 4, 5]];
|
| assert(slice == array);
|
| static assert(is(DeepElementType!(typeof(slice)) == sizediff_t));
|}
|
|///
|pure nothrow @nogc
|version(mir_test) unittest
|{
| int[6] data;
| auto slice = iota([2, 3], data.ptr);
| assert(slice[0, 0] == data.ptr);
| assert(slice[0, 1] == data.ptr + 1);
| assert(slice[1, 0] == data.ptr + 3);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| auto im = iota([10, 5], 100);
| assert(im[2, 1] == 111); // 100 + 2 * 5 + 1
|
| //slicing works correctly
| auto cm = im[1 .. $, 3 .. $];
| assert(cm[2, 1] == 119); // 119 = 100 + (1 + 2) * 5 + (3 + 1)
|}
|
|/// `iota` with step
|@safe pure nothrow version(mir_test) unittest
|{
| auto sl = iota([2, 3], 10, 10);
|
| assert(sl == [[10, 20, 30],
| [40, 50, 60]]);
|}
|
|/++
|Returns a 1-dimensional slice over the main diagonal of an n-dimensional slice.
|`diagonal` can be generalized with other selectors such as
|$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice).
|
|Params:
| slice = input slice
|Returns:
| 1-dimensional slice composed of diagonal elements
|See_also: $(LREF antidiagonal)
|+/
|Slice!(Iterator, 1, N == 1 ? kind : Universal)
| diagonal
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
|{
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| alias Ret = typeof(return);
| size_t[Ret.N] lengths;
| auto strides = sizediff_t[Ret.S].init;
| lengths[0] = slice._lengths[0];
| foreach (i; Iota!(1, N))
| if (lengths[0] > slice._lengths[i])
| lengths[0] = slice._lengths[i];
| foreach (i; Iota!(1, Ret.N))
| lengths[i] = slice._lengths[i + N - 1];
| auto rstrides = slice.strides;
| strides[0] = rstrides[0];
| foreach (i; Iota!(1, N))
| strides[0] += rstrides[i];
| foreach (i; Iota!(1, Ret.S))
| strides[i] = rstrides[i + N - 1];
| return Ret(lengths, strides, slice._iterator);
| }
|}
|
|/// Matrix, main diagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 0 4 |
| static immutable d = [0, 4];
| assert(iota(2, 3).diagonal == d);
|}
|
|/// Non-square matrix
|@safe pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 |
| // | 2 3 |
| // | 4 5 |
| // -------
| //->
| // | 0 3 |
|
| assert(iota(3, 2).diagonal == iota([2], 0, 3));
|}
|
|/// Loop through diagonal
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
|
| auto slice = slice!int(3, 3);
| int i;
| foreach (ref e; slice.diagonal)
| e = ++i;
| assert(slice == [
| [1, 0, 0],
| [0, 2, 0],
| [0, 0, 3]]);
|}
|
|/// Matrix, subdiagonal
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 1 5 |
| static immutable d = [1, 5];
| auto a = iota(2, 3).canonical;
| a.popFront!1;
| assert(a.diagonal == d);
|}
|
|/// 3D, main diagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // - - - - - -
| // | 6 7 8 |
| // | 9 10 11 |
| // -----------
| //->
| // | 0 10 |
| static immutable d = [0, 10];
| assert(iota(2, 2, 3).diagonal == d);
|}
|
|/// 3D, subdiagonal
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // - - - - - -
| // | 6 7 8 |
| // | 9 10 11 |
| // -----------
| //->
| // | 1 11 |
| static immutable d = [1, 11];
| auto a = iota(2, 2, 3).canonical;
| a.popFront!2;
| assert(a.diagonal == d);
|}
|
|/// 3D, diagonal plain
|@nogc @safe pure nothrow
|version(mir_test) unittest
|{
| // -----------
| // | 0 1 2 |
| // | 3 4 5 |
| // | 6 7 8 |
| // - - - - - -
| // | 9 10 11 |
| // | 12 13 14 |
| // | 15 16 17 |
| // - - - - - -
| // | 18 20 21 |
| // | 22 23 24 |
| // | 24 25 26 |
| // -----------
| //->
| // -----------
| // | 0 4 8 |
| // | 9 13 17 |
| // | 18 23 26 |
| // -----------
|
| static immutable d =
| [[ 0, 4, 8],
| [ 9, 13, 17],
| [18, 22, 26]];
|
| auto slice = iota(3, 3, 3)
| .pack!2
| .evertPack
| .diagonal
| .evertPack;
|
| assert(slice == d);
|}
|
|/++
|Returns a 1-dimensional slice over the main antidiagonal of an 2D-dimensional slice.
|`antidiagonal` can be generalized with other selectors such as
|$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice).
|
|It runs from the top right corner to the bottom left corner.
|
|Pseudo_code:
|------
|auto antidiagonal = slice.dropToHypercube.reversed!1.diagonal;
|------
|
|Params:
| slice = input slice
|Returns:
| 1-dimensional slice composed of antidiagonal elements.
|See_also: $(LREF diagonal)
|+/
|Slice!(Iterator, 1, Universal)
| antidiagonal
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N == 2)
|{
| import mir.ndslice.dynamic : dropToHypercube, reversed;
| return slice.dropToHypercube.reversed!1.diagonal;
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -----
| // | 0 1 |
| // | 2 3 |
| // -----
| //->
| // | 1 2 |
| static immutable c = [1, 2];
| import std.stdio;
| assert(iota(2, 2).antidiagonal == c);
|}
|
|///
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| // -------
| // | 0 1 2 |
| // | 3 4 5 |
| // -------
| //->
| // | 1 3 |
| static immutable d = [1, 3];
| assert(iota(2, 3).antidiagonal == d);
|}
|
|/++
|Returns an n-dimensional slice of n-dimensional non-overlapping blocks.
|`blocks` can be generalized with other selectors.
|For example, `blocks` in combination with $(LREF diagonal) can be used to get a slice of diagonal blocks.
|For overlapped blocks, combine $(LREF windows) with $(SUBREF dynamic, strided).
|
|Params:
| N = dimension count
| slice = slice to be split into blocks
| rlengths_ = dimensions of block, residual blocks are ignored
|Returns:
| packed `N`-dimensional slice composed of `N`-dimensional slices
|
|See_also: $(SUBREF chunks, ._chunks)
|+/
|Slice!(SliceIterator!(Iterator, N, N == 1 ? Universal : min(kind, Canonical)), N, Universal)
| blocks
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, size_t[N] rlengths_...)
|in
|{
| foreach (i, length; rlengths_)
| assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive"
| ~ tailErrorMessage!());
|}
|do
|{
| size_t[N] lengths;
| size_t[N] rlengths = rlengths_;
| sizediff_t[N] strides;
| foreach (dimension; Iota!N)
| lengths[dimension] = slice._lengths[dimension] / rlengths[dimension];
| auto rstrides = slice.strides;
| foreach (i; Iota!N)
| {
| strides[i] = rstrides[i];
| if (lengths[i]) //do not remove `if (...)`
| strides[i] *= rlengths[i];
| }
| return typeof(return)(
| lengths,
| strides,
| typeof(return).Iterator(
| rlengths,
| rstrides[0 .. typeof(return).DeepElement.S],
| slice._iterator));
|}
|
|///
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
| auto slice = slice!int(5, 8);
| auto blocks = slice.blocks(2, 3);
| int i;
| foreach (blocksRaw; blocks)
| foreach (block; blocksRaw)
| block[] = ++i;
|
| assert(blocks ==
| [[[[1, 1, 1], [1, 1, 1]],
| [[2, 2, 2], [2, 2, 2]]],
| [[[3, 3, 3], [3, 3, 3]],
| [[4, 4, 4], [4, 4, 4]]]]);
|
| assert( slice ==
| [[1, 1, 1, 2, 2, 2, 0, 0],
| [1, 1, 1, 2, 2, 2, 0, 0],
|
| [3, 3, 3, 4, 4, 4, 0, 0],
| [3, 3, 3, 4, 4, 4, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Diagonal blocks
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.allocation;
| auto slice = slice!int(5, 8);
| auto blocks = slice.blocks(2, 3);
| auto diagonalBlocks = blocks.diagonal.unpack;
|
| diagonalBlocks[0][] = 1;
| diagonalBlocks[1][] = 2;
|
| assert(diagonalBlocks ==
| [[[1, 1, 1], [1, 1, 1]],
| [[2, 2, 2], [2, 2, 2]]]);
|
| assert(blocks ==
| [[[[1, 1, 1], [1, 1, 1]],
| [[0, 0, 0], [0, 0, 0]]],
| [[[0, 0, 0], [0, 0, 0]],
| [[2, 2, 2], [2, 2, 2]]]]);
|
| assert(slice ==
| [[1, 1, 1, 0, 0, 0, 0, 0],
| [1, 1, 1, 0, 0, 0, 0, 0],
|
| [0, 0, 0, 2, 2, 2, 0, 0],
| [0, 0, 0, 2, 2, 2, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Matrix divided into vertical blocks
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 13);
| auto blocks = slice
| .pack!1
| .evertPack
| .blocks(3)
| .unpack;
|
| int i;
| foreach (block; blocks)
| block[] = ++i;
|
| assert(slice ==
| [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0],
| [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0]]);
|}
|
|/++
|Returns an n-dimensional slice of n-dimensional overlapping windows.
|`windows` can be generalized with other selectors.
|For example, `windows` in combination with $(LREF diagonal) can be used to get a multi-diagonal slice.
|
|Params:
| N = dimension count
| slice = slice to be iterated
| rlengths = dimensions of windows
|Returns:
| packed `N`-dimensional slice composed of `N`-dimensional slices
|+/
|Slice!(SliceIterator!(Iterator, N, N == 1 ? kind : min(kind, Canonical)), N, Universal)
| windows
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, size_t[N] rlengths...)
|in
|{
| foreach (i, length; rlengths)
| assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive"
| ~ tailErrorMessage!());
|}
|do
|{
| size_t[N] rls = rlengths;
| size_t[N] lengths;
| foreach (dimension; Iota!N)
| lengths[dimension] = slice._lengths[dimension] >= rls[dimension] ?
| slice._lengths[dimension] - rls[dimension] + 1 : 0;
| auto rstrides = slice.strides;
| static if (typeof(return).DeepElement.S)
| return typeof(return)(
| lengths,
| rstrides,
| typeof(return).Iterator(
| rls,
| rstrides[0 .. typeof(return).DeepElement.S],
| slice._iterator));
| else
| return typeof(return)(
| lengths,
| rstrides,
| typeof(return).Iterator(
| rls,
| slice._iterator));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice.windows(2, 3);
|
| int i;
| foreach (windowsRaw; windows)
| foreach (window; windowsRaw)
| ++window[];
|
| assert(slice ==
| [[1, 2, 3, 3, 3, 3, 2, 1],
|
| [2, 4, 6, 6, 6, 6, 4, 2],
| [2, 4, 6, 6, 6, 6, 4, 2],
| [2, 4, 6, 6, 6, 6, 4, 2],
|
| [1, 2, 3, 3, 3, 3, 2, 1]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice.windows(2, 3);
| windows[1, 2][] = 1;
| windows[1, 2][0, 1] += 1;
| windows.unpack[1, 2, 0, 1] += 1;
|
| assert(slice ==
| [[0, 0, 0, 0, 0, 0, 0, 0],
|
| [0, 0, 1, 3, 1, 0, 0, 0],
| [0, 0, 1, 1, 1, 0, 0, 0],
|
| [0, 0, 0, 0, 0, 0, 0, 0],
| [0, 0, 0, 0, 0, 0, 0, 0]]);
|}
|
|/// Multi-diagonal matrix
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(8, 8);
| auto windows = slice.windows(3, 3);
|
| auto multidiagonal = windows
| .diagonal
| .unpack;
| foreach (window; multidiagonal)
| window[] += 1;
|
| assert(slice ==
| [[ 1, 1, 1, 0, 0, 0, 0, 0],
| [ 1, 2, 2, 1, 0, 0, 0, 0],
| [ 1, 2, 3, 2, 1, 0, 0, 0],
| [0, 1, 2, 3, 2, 1, 0, 0],
| [0, 0, 1, 2, 3, 2, 1, 0],
| [0, 0, 0, 1, 2, 3, 2, 1],
| [0, 0, 0, 0, 1, 2, 2, 1],
| [0, 0, 0, 0, 0, 1, 1, 1]]);
|}
|
|/// Sliding window over matrix columns
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.slice;
| auto slice = slice!int(5, 8);
| auto windows = slice
| .pack!1
| .evertPack
| .windows(3)
| .unpack;
|
| foreach (window; windows)
| window[] += 1;
|
| assert(slice ==
| [[1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1],
| [1, 2, 3, 3, 3, 3, 2, 1]]);
|}
|
|/// Overlapping blocks using windows
|@safe pure nothrow version(mir_test) unittest
|{
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // | 20 21 22 23 24 |
| // ----------------
| //->
| // ---------------------
| // | 0 1 2 | 2 3 4 |
| // | 5 6 7 | 7 8 9 |
| // | 10 11 12 | 12 13 14 |
| // | - - - - - - - - - - |
| // | 10 11 13 | 12 13 14 |
| // | 15 16 17 | 17 18 19 |
| // | 20 21 22 | 22 23 24 |
| // ---------------------
|
| import mir.ndslice.slice;
| import mir.ndslice.dynamic : strided;
|
| auto overlappingBlocks = iota(5, 5)
| .windows(3, 3)
| .universal
| .strided!(0, 1)(2, 2);
|
| assert(overlappingBlocks ==
| [[[[ 0, 1, 2], [ 5, 6, 7], [10, 11, 12]],
| [[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14]]],
| [[[10, 11, 12], [15, 16, 17], [20, 21, 22]],
| [[12, 13, 14], [17, 18, 19], [22, 23, 24]]]]);
|}
|
|version(mir_test) unittest
|{
| auto w = iota(9, 9).windows(3, 3);
| assert(w.front == w[0]);
|}
|
|/++
|Error codes for $(LREF reshape).
|+/
|enum ReshapeError
|{
| /// No error
| none,
| /// Slice should be not empty
| empty,
| /// Total element count should be the same
| total,
| /// Structure is incompatible with new shape
| incompatible,
|}
|
|/++
|Returns a new slice for the same data with different dimensions.
|
|Params:
| slice = slice to be reshaped
| rlengths = list of new dimensions. One of the lengths can be set to `-1`.
| In this case, the corresponding dimension is inferable.
| err = $(LREF ReshapeError) code
|Returns:
| reshaped slice
|+/
|Slice!(Iterator, M, kind) reshape
| (Iterator, size_t N, SliceKind kind, size_t M)
| (Slice!(Iterator, N, kind) slice, ptrdiff_t[M] rlengths, ref int err)
|{
| static if (kind == Canonical)
| {
| auto r = slice.universal.reshape(rlengths, err);
| assert(err || r._strides[$-1] == 1);
| r._strides[$-1] = 1;
| return r.assumeCanonical;
| }
| else
| {
| alias Ret = typeof(return);
| auto structure = Ret._Structure.init;
| alias lengths = structure[0];
| foreach (i; Iota!M)
| lengths[i] = rlengths[i];
|
| /// Code size optimization
| immutable size_t eco = slice.elementCount;
| size_t ecn = lengths[0 .. rlengths.length].iota.elementCount;
| if (eco == 0)
| {
| err = ReshapeError.empty;
| goto R;
| }
| foreach (i; Iota!M)
| if (lengths[i] == -1)
| {
| ecn = -ecn;
| lengths[i] = eco / ecn;
| ecn *= lengths[i];
| break;
| }
| if (eco != ecn)
| {
| err = ReshapeError.total;
| goto R;
| }
| static if (kind == Universal)
| {
| for (size_t oi, ni, oj, nj; oi < N && ni < M; oi = oj, ni = nj)
| {
| size_t op = slice._lengths[oj++];
| size_t np = lengths[nj++];
|
| for (;;)
| {
| if (op < np)
| op *= slice._lengths[oj++];
| if (op > np)
| np *= lengths[nj++];
| if (op == np)
| break;
| }
| while (oj < N && slice._lengths[oj] == 1) oj++;
| while (nj < M && lengths[nj] == 1) nj++;
|
| for (size_t l = oi, r = oi + 1; r < oj; r++)
| if (slice._lengths[r] != 1)
| {
| if (slice._strides[l] != slice._lengths[r] * slice._strides[r])
| {
| err = ReshapeError.incompatible;
| goto R;
| }
| l = r;
| }
| assert((oi == N) == (ni == M));
|
| structure[1][nj - 1] = slice._strides[oj - 1];
| foreach_reverse (i; ni .. nj - 1)
| structure[1][i] = lengths[i + 1] * structure[1][i + 1];
| }
| }
| foreach (i; Iota!(M, Ret.N))
| lengths[i] = slice._lengths[i + N - M];
| static if (M < Ret.S)
| foreach (i; Iota!(M, Ret.S))
| structure[1][i] = slice._strides[i + N - M];
| err = 0;
| return Ret(structure, slice._iterator);
| R:
| return Ret(structure, slice._iterator.init);
| }
|}
|
|///
|@safe nothrow pure
|version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
| int err;
| auto slice = iota(3, 4)
| .universal
| .allReversed
| .reshape([-1, 3], err);
| assert(err == 0);
| assert(slice ==
| [[11, 10, 9],
| [ 8, 7, 6],
| [ 5, 4, 3],
| [ 2, 1, 0]]);
|}
|
|/// Reshaping with memory allocation
|@safe pure version(mir_test) unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.allocation: slice;
| import mir.ndslice.dynamic : reversed;
|
| auto reshape2(S, size_t M)(S sl, ptrdiff_t[M] lengths)
| {
| int err;
| // Tries to reshape without allocation
| auto ret = sl.reshape(lengths, err);
| if (!err)
| return ret;
| if (err == ReshapeError.incompatible)
| // allocates, flattens, reshapes with `sliced`, converts to universal kind
| return sl.slice.flattened.sliced(cast(size_t[M])lengths).universal;
| throw new Exception("total elements count is different or equals to zero");
| }
|
| auto sl = iota!int(3, 4)
| .slice
| .universal
| .reversed!0;
|
| assert(reshape2(sl, [4, 3]) ==
| [[ 8, 9, 10],
| [11, 4, 5],
| [ 6, 7, 0],
| [ 1, 2, 3]]);
|}
|
|nothrow @safe pure version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
| auto slice = iota(1, 1, 3, 2, 1, 2, 1).universal.allReversed;
| int err;
| assert(slice.reshape([1, -1, 1, 1, 3, 1], err) ==
| [[[[[[11], [10], [9]]]],
| [[[[ 8], [ 7], [6]]]],
| [[[[ 5], [ 4], [3]]]],
| [[[[ 2], [ 1], [0]]]]]]);
| assert(err == 0);
|}
|
|// Issue 15919
|nothrow @nogc @safe pure
|version(mir_test) unittest
|{
| int err;
| assert(iota(3, 4, 5, 6, 7).pack!2.reshape([4, 3, 5], err)[0, 0, 0].shape == cast(size_t[2])[6, 7]);
| assert(err == 0);
|}
|
|nothrow @nogc @safe pure version(mir_test) unittest
|{
| import mir.ndslice.slice;
|
| int err;
| auto e = iota(1);
| // resize to the wrong dimension
| auto s = e.reshape([2], err);
| assert(err == ReshapeError.total);
| e.popFront;
| // test with an empty slice
| e.reshape([1], err);
| assert(err == ReshapeError.empty);
|}
|
|nothrow @nogc @safe pure
|version(mir_test) unittest
|{
| auto pElements = iota(3, 4, 5, 6, 7)
| .pack!2
| .flattened;
| assert(pElements[0][0] == iota(7));
| assert(pElements[$-1][$-1] == iota([7], 2513));
|}
|
|/++
|A contiguous 1-dimensional slice of all elements of a slice.
|`flattened` iterates existing data.
|The order of elements is preserved.
|
|`flattened` can be generalized with other selectors.
|
|Params:
| slice = slice to be iterated
|Returns:
| contiguous 1-dimensional slice of elements of the `slice`
|+/
|Slice!(FlattenedIterator!(Iterator, N, kind))
| flattened
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N != 1 && kind != Contiguous)
|{
| import core.lifetime: move;
| size_t[typeof(return).N] lengths;
| sizediff_t[typeof(return)._iterator._indices.length] indices;
| lengths[0] = slice.elementCount;
| return typeof(return)(lengths, FlattenedIterator!(Iterator, N, kind)(indices, slice.move));
|}
|
|/// ditto
|Slice!Iterator
| flattened
| (Iterator, size_t N)
| (Slice!(Iterator, N) slice)
|{
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| import core.lifetime: move;
0000000| size_t[typeof(return).N] lengths;
0000000| lengths[0] = slice.elementCount;
0000000| return typeof(return)(lengths, slice._iterator.move);
| }
|}
|
|/// ditto
|Slice!(StrideIterator!Iterator)
| flattened
| (Iterator)
| (Slice!(Iterator, 1, Universal) slice)
|{
| import core.lifetime: move;
| return slice.move.hideStride;
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| auto sl1 = iota(2, 3).slice.universal.pack!1.flattened;
| auto sl2 = iota(2, 3).slice.canonical.pack!1.flattened;
| auto sl3 = iota(2, 3).slice.pack!1.flattened;
|}
|
|/// Regular slice
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| assert(iota(4, 5).flattened == iota(20));
| assert(iota(4, 5).canonical.flattened == iota(20));
| assert(iota(4, 5).universal.flattened == iota(20));
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| assert(iota(4).flattened == iota(4));
| assert(iota(4).canonical.flattened == iota(4));
| assert(iota(4).universal.flattened == iota(4));
|}
|
|/// Packed slice
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.dynamic;
| assert(iota(3, 4, 5, 6, 7).pack!2.flattened[1] == iota([6, 7], 6 * 7));
|}
|
|/// Properties
|@safe pure nothrow version(mir_test) unittest
|{
| auto elems = iota(3, 4).universal.flattened;
|
| elems.popFrontExactly(2);
| assert(elems.front == 2);
| /// `_index` is available only for canonical and universal ndslices.
| assert(elems._iterator._indices == [0, 2]);
|
| elems.popBackExactly(2);
| assert(elems.back == 9);
| assert(elems.length == 8);
|}
|
|/// Index property
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| auto slice = new long[20].sliced(5, 4);
|
| for (auto elems = slice.universal.flattened; !elems.empty; elems.popFront)
| {
| ptrdiff_t[2] index = elems._iterator._indices;
| elems.front = index[0] * 10 + index[1] * 3;
| }
| assert(slice ==
| [[ 0, 3, 6, 9],
| [10, 13, 16, 19],
| [20, 23, 26, 29],
| [30, 33, 36, 39],
| [40, 43, 46, 49]]);
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| auto elems = iota(3, 4).universal.flattened;
| assert(elems.front == 0);
| assert(elems.save[1] == 1);
|}
|
|/++
|Random access and slicing
|+/
|nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| auto elems = iota(4, 5).slice.flattened;
|
| elems = elems[11 .. $ - 2];
|
| assert(elems.length == 7);
| assert(elems.front == 11);
| assert(elems.back == 17);
|
| foreach (i; 0 .. 7)
| assert(elems[i] == i + 11);
|
| // assign an element
| elems[2 .. 6] = -1;
| assert(elems[2 .. 6] == repeat(-1, 4));
|
| // assign an array
| static ar = [-1, -2, -3, -4];
| elems[2 .. 6] = ar;
| assert(elems[2 .. 6] == ar);
|
| // assign a slice
| ar[] *= 2;
| auto sl = ar.sliced(ar.length);
| elems[2 .. 6] = sl;
| assert(elems[2 .. 6] == sl);
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : allReversed;
|
| auto slice = iota(3, 4, 5);
|
| foreach (ref e; slice.universal.flattened.retro)
| {
| //...
| }
|
| foreach_reverse (ref e; slice.universal.flattened)
| {
| //...
| }
|
| foreach (ref e; slice.universal.allReversed.flattened)
| {
| //...
| }
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import std.range.primitives : isRandomAccessRange, hasSlicing;
| auto elems = iota(4, 5).flattened;
| static assert(isRandomAccessRange!(typeof(elems)));
| static assert(hasSlicing!(typeof(elems)));
|}
|
|// Checks strides
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic;
| import std.range.primitives : isRandomAccessRange;
| auto elems = iota(4, 5).universal.everted.flattened;
| static assert(isRandomAccessRange!(typeof(elems)));
|
| elems = elems[11 .. $ - 2];
| auto elems2 = elems;
| foreach (i; 0 .. 7)
| {
| assert(elems[i] == elems2.front);
| elems2.popFront;
| }
|}
|
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.dynamic;
| import std.range.primitives : isRandomAccessRange, hasLength;
|
| auto range = (3 * 4 * 5 * 6 * 7).iota;
| auto slice0 = range.sliced(3, 4, 5, 6, 7).universal;
| auto slice1 = slice0.transposed!(2, 1).pack!2;
| auto elems0 = slice0.flattened;
| auto elems1 = slice1.flattened;
|
| foreach (S; AliasSeq!(typeof(elems0), typeof(elems1)))
| {
| static assert(isRandomAccessRange!S);
| static assert(hasLength!S);
| }
|
| assert(elems0.length == slice0.elementCount);
| assert(elems1.length == 5 * 4 * 3);
|
| auto elems2 = elems1;
| foreach (q; slice1)
| foreach (w; q)
| foreach (e; w)
| {
| assert(!elems2.empty);
| assert(e == elems2.front);
| elems2.popFront;
| }
| assert(elems2.empty);
|
| elems0.popFront();
| elems0.popFrontExactly(slice0.elementCount - 14);
| assert(elems0.length == 13);
| assert(elems0 == range[slice0.elementCount - 13 .. slice0.elementCount]);
|
| foreach (elem; elems0) {}
|}
|
|// Issue 15549
|version(mir_test) unittest
|{
| import std.range.primitives;
| import mir.ndslice.allocation;
| alias A = typeof(iota(1, 2, 3, 4).pack!1);
| static assert(isRandomAccessRange!A);
| static assert(hasLength!A);
| static assert(hasSlicing!A);
| alias B = typeof(slice!int(1, 2, 3, 4).pack!3);
| static assert(isRandomAccessRange!B);
| static assert(hasLength!B);
| static assert(hasSlicing!B);
|}
|
|// Issue 16010
|version(mir_test) unittest
|{
| auto s = iota(3, 4).flattened;
| foreach (_; 0 .. s.length)
| s = s[1 .. $];
|}
|
|/++
|Returns a slice, the elements of which are equal to the initial multidimensional index value.
|For a flattened (contiguous) index, see $(LREF iota).
|
|Params:
| N = dimension count
| lengths = list of dimension lengths
|Returns:
| `N`-dimensional slice composed of indices
|See_also: $(LREF iota)
|+/
|Slice!(FieldIterator!(ndIotaField!N), N)
| ndiota
| (size_t N)
| (size_t[N] lengths...)
| if (N)
|{
| return FieldIterator!(ndIotaField!N)(0, ndIotaField!N(lengths[1 .. $])).sliced(lengths);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = ndiota(2, 3);
| static immutable array =
| [[[0, 0], [0, 1], [0, 2]],
| [[1, 0], [1, 1], [1, 2]]];
|
| assert(slice == array);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto im = ndiota(7, 9);
|
| assert(im[2, 1] == [2, 1]);
|
| //slicing works correctly
| auto cm = im[1 .. $, 4 .. $];
| assert(cm[2, 1] == [3, 5]);
|}
|
|version(mir_test) unittest
|{
| auto r = ndiota(1);
| auto d = r.front;
| r.popFront;
| import std.range.primitives;
| static assert(isRandomAccessRange!(typeof(r)));
|}
|
|/++
|Evenly spaced numbers over a specified interval.
|
|Params:
| T = floating point or complex numbers type
| lengths = list of dimension lengths. Each length must be greater then 1.
| intervals = list of [start, end] pairs.
|Returns:
| `n`-dimensional grid of evenly spaced numbers over specified intervals.
|See_also: $(LREF)
|+/
|auto linspace(T, size_t N)(size_t[N] lengths, T[2][N] intervals...)
| if (N && (isFloatingPoint!T || isComplex!T))
|{
0000000| Repeat!(N, LinspaceField!T) fields;
| foreach(i; Iota!N)
| {
0000000| assert(lengths[i] > 1, "linspace: all lengths must be greater then 1.");
0000000| fields[i] = LinspaceField!T(lengths[i], intervals[i][0], intervals[i][1]);
| }
| static if (N == 1)
0000000| return slicedField(fields);
| else
| return cartesian(fields);
|}
|
|// example from readme
|version(mir_test) unittest
|{
| import mir.ndslice;
| // import std.stdio: writefln;
|
| enum fmt = "%(%(%.2f %)\n%)\n";
|
| auto a = magic(5).as!float;
| // writefln(fmt, a);
|
| auto b = linspace!float([5, 5], [1f, 2f], [0f, 1f]).map!"a * a + b";
| // writefln(fmt, b);
|
| auto c = slice!float(5, 5);
| c[] = transposed(a + b / 2);
|}
|
|/// 1D
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto s = linspace!double([5], [1.0, 2.0]);
| assert(s == [1.0, 1.25, 1.5, 1.75, 2.0]);
|
| // reverse order
| assert(linspace!double([5], [2.0, 1.0]) == s.retro);
|
| // remove endpoint
| s.popBack;
| assert(s == [1.0, 1.25, 1.5, 1.75]);
|}
|
|/// 2D
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.functional: refTuple;
|
| auto s = linspace!double([5, 3], [1.0, 2.0], [0.0, 1.0]);
|
| assert(s == [
| [refTuple(1.00, 0.00), refTuple(1.00, 0.5), refTuple(1.00, 1.0)],
| [refTuple(1.25, 0.00), refTuple(1.25, 0.5), refTuple(1.25, 1.0)],
| [refTuple(1.50, 0.00), refTuple(1.50, 0.5), refTuple(1.50, 1.0)],
| [refTuple(1.75, 0.00), refTuple(1.75, 0.5), refTuple(1.75, 1.0)],
| [refTuple(2.00, 0.00), refTuple(2.00, 0.5), refTuple(2.00, 1.0)],
| ]);
|
| assert(s.map!"a * b" == [
| [0.0, 0.500, 1.00],
| [0.0, 0.625, 1.25],
| [0.0, 0.750, 1.50],
| [0.0, 0.875, 1.75],
| [0.0, 1.000, 2.00],
| ]);
|}
|
|/// Complex numbers
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto s = linspace!cdouble([3], [1.0 + 0i, 2.0 + 4i]);
| assert(s == [1.0 + 0i, 1.5 + 2i, 2.0 + 4i]);
|}
|
|/++
|Returns a slice with identical elements.
|`RepeatSlice` stores only single value.
|Params:
| lengths = list of dimension lengths
|Returns:
| `n`-dimensional slice composed of identical values, where `n` is dimension count.
|+/
|Slice!(FieldIterator!(RepeatField!T), M, Universal)
| repeat(T, size_t M)(T value, size_t[M] lengths...) @trusted
| if (M && !isSlice!T)
|{
| size_t[M] ls = lengths;
| return typeof(return)(
| ls,
| sizediff_t[M].init,
| typeof(return).Iterator(0, RepeatField!T(cast(RepeatField!T.UT) value)));
|}
|
|/// ditto
|Slice!(SliceIterator!(Iterator, N, kind), M, Universal)
| repeat
| (SliceKind kind, size_t N, Iterator, size_t M)
| (Slice!(Iterator, N, kind) slice, size_t[M] lengths...)
| if (M)
|{
| import core.lifetime: move;
| size_t[M] ls = lengths;
| return typeof(return)(
| ls,
| sizediff_t[M].init,
| typeof(return).Iterator(
| slice._structure,
| move(slice._iterator)));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| auto sl = iota(3).repeat(4);
| assert(sl == [[0, 1, 2],
| [0, 1, 2],
| [0, 1, 2],
| [0, 1, 2]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.dynamic : transposed;
|
| auto sl = iota(3)
| .repeat(4)
| .unpack
| .universal
| .transposed;
|
| assert(sl == [[0, 0, 0, 0],
| [1, 1, 1, 1],
| [2, 2, 2, 2]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
|
| auto sl = iota([3], 6).slice;
| auto slC = sl.repeat(2, 3);
| sl[1] = 4;
| assert(slC == [[[6, 4, 8],
| [6, 4, 8],
| [6, 4, 8]],
| [[6, 4, 8],
| [6, 4, 8],
| [6, 4, 8]]]);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto sl = repeat(4.0, 2, 3);
| assert(sl == [[4.0, 4.0, 4.0],
| [4.0, 4.0, 4.0]]);
|
| static assert(is(DeepElementType!(typeof(sl)) == double));
|
| sl[1, 1] = 3;
| assert(sl == [[3.0, 3.0, 3.0],
| [3.0, 3.0, 3.0]]);
|}
|
|/++
|Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice.
|+/
|auto cycle(Field)(Field field, size_t loopLength, size_t length)
| if (!isSlice!Field && !is(Field : T[], T))
|{
| return CycleField!Field(loopLength, field).slicedField(length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, Field)(Field field, size_t length)
| if (!isSlice!Field && !is(Field : T[], T))
|{
| static assert(loopLength);
| return CycleField!(Field, loopLength)(field).slicedField(length);
|}
|
|/// ditto
|auto cycle(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length)
|{
| assert(slice.length);
| static if (kind == Universal)
| return slice.hideStride.cycle(length);
| else
| return CycleField!Iterator(slice._lengths[0], slice._iterator).slicedField(length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length)
|{
| static assert(loopLength);
| assert(loopLength <= slice.length);
| static if (kind == Universal)
| return slice.hideStride.cycle!loopLength(length);
| else
| return CycleField!(Iterator, loopLength)(slice._iterator).slicedField(length);
|}
|
|/// ditto
|auto cycle(T)(T[] array, size_t length)
|{
| return cycle(array.sliced, length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, T)(T[] array, size_t length)
|{
| return cycle!loopLength(array.sliced, length);
|}
|
|/// ditto
|auto cycle(size_t loopLength, T)(T withAsSlice, size_t length)
| if (hasAsSlice!T)
|{
| return cycle!loopLength(withAsSlice.asSlice, length);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto slice = iota(3);
| assert(slice.cycle(7) == [0, 1, 2, 0, 1, 2, 0]);
| assert(slice.cycle!2(7) == [0, 1, 0, 1, 0, 1, 0]);
| assert([0, 1, 2].cycle(7) == [0, 1, 2, 0, 1, 2, 0]);
| assert([4, 3, 2, 1].cycle!4(7) == [4, 3, 2, 1, 4, 3, 2]);
|}
|
|/++
|Strides 1-dimensional slice.
|Params:
| slice = 1-dimensional unpacked slice.
| factor = positive stride size.
|Returns:
| Contiguous slice with strided iterator.
|See_also: $(SUBREF dynamic, strided)
|+/
|auto stride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, ptrdiff_t factor)
| if (N == 1)
|in
|{
| assert (factor > 0, "factor must be positive.");
|}
|do
|{
| static if (kind == Contiguous)
| return slice.universal.stride(factor);
| else
| {
| import mir.ndslice.dynamic: strided;
| return slice.strided!0(factor).hideStride;
| }
|}
|
|///ditto
|template stride(size_t factor = 2)
| if (factor > 1)
|{
| auto stride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1)
| {
| return stride(slice.move.ipack!1.map!(.stride!factor));
| }
| else
| static if (kind == Contiguous)
| {
| immutable rem = slice._lengths[0] % factor;
| slice._lengths[0] /= factor;
| if (rem)
| slice._lengths[0]++;
| return Slice!(StrideIterator!(Iterator, factor), 1, kind)(slice._structure, StrideIterator!(Iterator, factor)(move(slice._iterator)));
| }
| else
| {
| return .stride(slice.move, factor);
| }
| }
|
| /// ditto
| auto stride(T)(T[] array)
| {
| return stride(array.sliced);
| }
|
| /// ditto
| auto stride(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return stride(withAsSlice.asSlice);
| }
|}
|
|/// ditto
|auto stride(T)(T[] array, ptrdiff_t factor)
|{
| return stride(array.sliced, factor);
|}
|
|/// ditto
|auto stride(T)(T withAsSlice, ptrdiff_t factor)
| if (hasAsSlice!T)
|{
| return stride(withAsSlice.asSlice, factor);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(6);
| static immutable str = [0, 2, 4];
| assert(slice.stride(2) == str); // runtime factor
| assert(slice.stride!2 == str); // compile time factor
| assert(slice.stride == str); // default compile time factor is 2
| assert(slice.universal.stride(2) == str);
|}
|
|/// ND-compile time
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(4, 6);
| static immutable str = [[0, 2, 4], [12, 14, 16]];
| assert(slice.stride!2 == str); // compile time factor
| assert(slice.stride == str); // default compile time factor is 2
|}
|
|/++
|Reverses order of iteration for all dimensions.
|Params:
| slice = slice, range, or array.
|Returns:
| Slice/range with reversed order of iteration for all dimensions.
|See_also: $(SUBREF dynamic, reversed), $(SUBREF dynamic, allReversed).
|+/
|auto retro
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| @trusted
|{
| import core.lifetime: move;
| static if (kind == Contiguous || kind == Canonical)
| {
| size_t[slice.N] lengths;
| foreach (i; Iota!(slice.N))
| lengths[i] = slice._lengths[i];
| static if (slice.S)
| {
| sizediff_t[slice.S] strides;
| foreach (i; Iota!(slice.S))
| strides[i] = slice._strides[i];
| alias structure = AliasSeq!(lengths, strides);
| }
| else
| {
| alias structure = lengths;
| }
| static if (is(Iterator : RetroIterator!It, It))
| {
| alias Ret = Slice!(It, N, kind);
| slice._iterator._iterator -= slice.lastIndex;
| return Ret(structure, slice._iterator._iterator.move);
| }
| else
| {
| alias Ret = Slice!(RetroIterator!Iterator, N, kind);
| slice._iterator += slice.lastIndex;
| return Ret(structure, RetroIterator!Iterator(slice._iterator.move));
| }
| }
| else
| {
| import mir.ndslice.dynamic: allReversed;
| return slice.move.allReversed;
| }
|}
|
|/// ditto
|auto retro(T)(T[] array)
|{
| return retro(array.sliced);
|}
|
|/// ditto
|auto retro(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return retro(withAsSlice.asSlice);
|}
|
|/// ditto
|auto retro(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
|{
| import std.traits: Unqual;
|
| static if (is(Unqual!Range == Range))
| {
| import core.lifetime: move;
| static if (is(Range : RetroRange!R, R))
| {
| return move(r._source);
| }
| else
| {
| return RetroRange!Range(move(r));
| }
| }
| else
| {
| return .retro!(Unqual!Range)(r);
| }
|}
|
|/// ditto
|struct RetroRange(Range)
|{
| import mir.primitives: hasLength;
|
| ///
| Range _source;
|
| private enum hasAccessByRef = __traits(compiles, &_source.front);
|
| @property
| {
| bool empty()() const { return _source.empty; }
| static if (hasLength!Range)
| auto length()() const { return _source.length; }
| auto ref front()() { return _source.back; }
| auto ref back()() { return _source.front; }
| static if (__traits(hasMember, Range, "save"))
| auto save()() { return RetroRange(_source.save); }
| alias opDollar = length;
|
| static if (!hasAccessByRef)
| {
| import std.traits: ForeachType;
|
| void front()(ForeachType!R val)
| {
| import mir.functional: forward;
| _source.back = forward!val;
| }
|
| void back()(ForeachType!R val)
| {
| import mir.functional: forward;
| _source.front = forward!val;
| }
| }
| }
|
| void popFront()() { _source.popBack(); }
| void popBack()() { _source.popFront(); }
|
| static if (is(typeof(_source.moveBack())))
| auto moveFront()() { return _source.moveBack(); }
|
| static if (is(typeof(_source.moveFront())))
| auto moveBack()() { return _source.moveFront(); }
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto slice = iota(2, 3);
| static immutable reversed = [[5, 4, 3], [2, 1, 0]];
| assert(slice.retro == reversed);
| assert(slice.canonical.retro == reversed);
| assert(slice.universal.retro == reversed);
|
| static assert(is(typeof(slice.retro.retro) == typeof(slice)));
| static assert(is(typeof(slice.canonical.retro.retro) == typeof(slice.canonical)));
| static assert(is(typeof(slice.universal.retro) == typeof(slice.universal)));
|}
|
|/// Ranges
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.algorithm.iteration: equal;
| import std.range: std_iota = iota;
|
| assert(std_iota(4).retro.equal(iota(4).retro));
| static assert(is(typeof(std_iota(4).retro.retro) == typeof(std_iota(4))));
|}
|
|/++
|Bitwise slice over an integral slice.
|Params:
| slice = a contiguous or canonical slice on top of integral iterator.
|Returns: A bitwise slice.
|+/
|auto bitwise
| (Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init]))
| (Slice!(Iterator, N, kind) slice)
| if (__traits(isIntegral, I) && (kind != Universal || N == 1))
|{
| import core.lifetime: move;
| static if (kind == Universal)
| {
| return slice.move.flattened.bitwise;
| }
| else
| {
| static if (is(Iterator : FieldIterator!Field, Field))
| {
| enum simplified = true;
| alias It = FieldIterator!(BitField!Field);
| }
| else
| {
| enum simplified = false;
| alias It = FieldIterator!(BitField!Iterator);
| }
| alias Ret = Slice!(It, N, kind);
| auto structure_ = Ret._Structure.init;
| foreach(i; Iota!(Ret.N))
| structure_[0][i] = slice._lengths[i];
| structure_[0][$ - 1] *= I.sizeof * 8;
| foreach(i; Iota!(Ret.S))
| structure_[1][i] = slice._strides[i];
| static if (simplified)
| return Ret(structure_, It(slice._iterator._index * I.sizeof * 8, BitField!Field(slice._iterator._field.move)));
| else
| return Ret(structure_, It(0, BitField!Iterator(slice._iterator.move)));
| }
|}
|
|/// ditto
|auto bitwise(T)(T[] array)
|{
| return bitwise(array.sliced);
|}
|
|/// ditto
|auto bitwise(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bitwise(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| auto bits = data[].bitwise;
| assert(bits.length == data.length * size_t.sizeof * 8);
| bits[111] = true;
| assert(bits[111]);
|
| bits.popFront;
| assert(bits[110]);
| bits[] = true;
| bits[110] = false;
| bits = bits[10 .. $];
| assert(bits[100] == false);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| auto slice = FieldIterator!(size_t[])(0, data[]).sliced(10);
| slice.popFrontExactly(2);
| auto bits_normal = data[].sliced.bitwise;
| auto bits = slice.bitwise;
| assert(bits.length == (data.length - 2) * size_t.sizeof * 8);
| bits[111] = true;
| assert(bits[111]);
| assert(bits_normal[111 + size_t.sizeof * 2 * 8]);
| auto ubits = slice.universal.bitwise;
| assert(bits.map!"~a" == bits.map!"!a");
| static assert (is(typeof(bits.map!"~a") == typeof(bits.map!"!a")));
| assert(bits.map!"~a" == bits.map!"!!!a");
| static assert (!is(typeof(bits.map!"~a") == typeof(bits.map!"!!!a")));
| assert(bits == ubits);
|
| bits.popFront;
| assert(bits[110]);
| bits[] = true;
| bits[110] = false;
| bits = bits[10 .. $];
| assert(bits[100] == false);
|}
|
|/++
|Bitwise field over an integral field.
|Params:
| field = an integral field.
|Returns: A bitwise field.
|+/
|auto bitwiseField(Field, I = typeof(Field.init[size_t.init]))(Field field)
| if (__traits(isUnsigned, I))
|{
| import core.lifetime: move;
| return BitField!(Field, I)(field.move);
|}
|
|/++
|Bitpack slice over an integral slice.
|
|Bitpack is used to represent unsigned integer slice with fewer number of bits in integer binary representation.
|
|Params:
| pack = counts of bits in the integer.
| slice = a contiguous or canonical slice on top of integral iterator.
|Returns: A bitpack slice.
|+/
|auto bitpack
| (size_t pack, Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init]))
| (Slice!(Iterator, N, kind) slice)
| if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical) && pack > 1)
|{
| import core.lifetime: move;
| static if (is(Iterator : FieldIterator!Field, Field) && I.sizeof * 8 % pack == 0)
| {
| enum simplified = true;
| alias It = FieldIterator!(BitpackField!(Field, pack));
| }
| else
| {
| enum simplified = false;
| alias It = FieldIterator!(BitpackField!(Iterator, pack));
| }
| alias Ret = Slice!(It, N, kind);
| auto structure = Ret._Structure.init;
| foreach(i; Iota!(Ret.N))
| structure[0][i] = slice._lengths[i];
| structure[0][$ - 1] *= I.sizeof * 8;
| structure[0][$ - 1] /= pack;
| foreach(i; Iota!(Ret.S))
| structure[1][i] = slice._strides[i];
| static if (simplified)
| return Ret(structure, It(slice._iterator._index * I.sizeof * 8 / pack, BitpackField!(Field, pack)(slice._iterator._field.move)));
| else
| return Ret(structure, It(0, BitpackField!(Iterator, pack)(slice._iterator.move)));
|}
|
|/// ditto
|auto bitpack(size_t pack, T)(T[] array)
|{
| return bitpack!pack(array.sliced);
|}
|
|/// ditto
|auto bitpack(size_t pack, T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bitpack!pack(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| size_t[10] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto packs = data[].bitpack!6;
| assert(packs.length == data.length * size_t.sizeof * 8 / 6);
| packs[$ - 1] = 24;
| assert(packs[$ - 1] == 24);
|
| packs.popFront;
| assert(packs[$ - 1] == 24);
|}
|
|/++
|Bytegroup slice over an integral slice.
|
|Groups existing slice into fixed length chunks and uses them as data store for destination type.
|
|Correctly handles scalar types on both little-endian and big-endian platforms.
|
|Params:
| group = count of iterator items used to store the destination type.
| DestinationType = deep element type of the result slice.
| slice = a contiguous or canonical slice.
|Returns: A bytegroup slice.
|+/
|Slice!(BytegroupIterator!(Iterator, group, DestinationType), N, kind)
|bytegroup
| (size_t group, DestinationType, Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if ((kind == Contiguous || kind == Canonical) && group)
|{
| import core.lifetime: move;
| auto structure = slice._structure;
| structure[0][$ - 1] /= group;
| return typeof(return)(structure, BytegroupIterator!(Iterator, group, DestinationType)(slice._iterator.move));
|}
|
|/// ditto
|auto bytegroup(size_t pack, DestinationType, T)(T[] array)
|{
| return bytegroup!(pack, DestinationType)(array.sliced);
|}
|
|/// ditto
|auto bytegroup(size_t pack, DestinationType, T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return bytegroup!(pack, DestinationType)(withAsSlice.asSlice);
|}
|
|/// 24 bit integers
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.slice : DeepElementType, sliced;
|
| ubyte[20] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto int24ar = data[].bytegroup!(3, int); // 24 bit integers
| assert(int24ar.length == data.length / 3);
|
| enum checkInt = ((1 << 20) - 1);
|
| int24ar[3] = checkInt;
| assert(int24ar[3] == checkInt);
|
| int24ar.popFront;
| assert(int24ar[2] == checkInt);
|
| static assert(is(DeepElementType!(typeof(int24ar)) == int));
|}
|
|/// 48 bit integers
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.slice : DeepElementType, sliced;
| ushort[20] data;
| // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`.
| auto int48ar = data[].sliced.bytegroup!(3, long); // 48 bit integers
| assert(int48ar.length == data.length / 3);
|
| enum checkInt = ((1L << 44) - 1);
|
| int48ar[3] = checkInt;
| assert(int48ar[3] == checkInt);
|
| int48ar.popFront;
| assert(int48ar[2] == checkInt);
|
| static assert(is(DeepElementType!(typeof(int48ar)) == long));
|}
|
|/++
|Implements the homonym function (also known as `transform`) present
|in many languages of functional flavor. The call `map!(fun)(slice)`
|returns a slice of which elements are obtained by applying `fun`
|for all elements in `slice`. The original slices are
|not changed. Evaluation is done lazily.
|
|Note:
| $(SUBREF dynamic, transposed) and
| $(SUBREF topology, pack) can be used to specify dimensions.
|Params:
| fun = One or more functions.
|See_Also:
| $(LREF cached), $(LREF vmap), $(LREF indexed),
| $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip),
| $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function))
|+/
|template map(fun...)
| if (fun.length)
|{
| import mir.functional: adjoin, naryFun, pipe;
| static if (fun.length == 1)
| {
| static if (__traits(isSame, naryFun!(fun[0]), fun[0]))
| {
| alias f = fun[0];
| @optmath:
| /++
| Params:
| slice = An ndslice, array, or an input range.
| Returns:
| ndslice or an input range with each fun applied to all the elements. If there is more than one
| fun, the element type will be `Tuple` containing one element for each fun.
| +/
| auto map(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| alias MIterator = typeof(_mapIterator!f(slice._iterator));
| import mir.ndslice.traits: isIterator;
| alias testIter = typeof(MIterator.init[0]);
| static assert(isIterator!MIterator, "mir.ndslice.map: probably the lambda function contains a compile time bug.");
| return Slice!(MIterator, N, kind)(slice._structure, _mapIterator!f(slice._iterator.move));
| }
|
| /// ditto
| auto map(T)(T[] array)
| {
| return map(array.sliced);
| }
|
| /// ditto
| auto map(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return map(withAsSlice.asSlice);
| }
|
| /// ditto
| auto map(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
| {
| import core.lifetime: forward;
| import std.range.primitives: isInputRange;
| static assert (isInputRange!Range, "map can work with ndslice, array, or an input range.");
| return MapRange!(f, ImplicitlyUnqual!Range)(forward!r);
| }
| }
| else alias map = .map!(staticMap!(naryFun, fun));
| }
| else alias map = .map!(adjoin!fun);
|}
|
|/// ditto
|struct MapRange(alias fun, Range)
|{
| import std.range.primitives;
|
| Range _input;
|
| static if (isInfinite!Range)
| {
| enum bool empty = false;
| }
| else
| {
| bool empty() @property
| {
| return _input.empty;
| }
| }
|
| void popFront()
| {
| assert(!empty, "Attempting to popFront an empty map.");
| _input.popFront();
| }
|
| auto ref front() @property
| {
| assert(!empty, "Attempting to fetch the front of an empty map.");
| return fun(_input.front);
| }
|
| static if (isBidirectionalRange!Range)
| auto ref back()() @property
| {
| assert(!empty, "Attempting to fetch the back of an empty map.");
| return fun(_input.back);
| }
|
| static if (isBidirectionalRange!Range)
| void popBack()()
| {
| assert(!empty, "Attempting to popBack an empty map.");
| _input.popBack();
| }
|
| static if (hasLength!Range)
| auto length() @property
| {
| return _input.length;
| }
|
| static if (isForwardRange!Range)
| auto save()() @property
| {
| return typeof(this)(_input.save);
| }
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto s = iota(2, 3).map!(a => a * 3);
| assert(s == [[ 0, 3, 6],
| [ 9, 12, 15]]);
|}
|
|/// String lambdas
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| assert(iota(2, 3).map!"a * 2" == [[0, 2, 4], [6, 8, 10]]);
|}
|
|/// Input ranges
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.algorithm.iteration: filter, equal;
| assert (6.iota.filter!"a % 2".map!"a * 10".equal([10, 30, 50]));
|}
|
|/// Packed tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, windows;
| import mir.math.sum: sum;
|
| // iota windows map sums ( reduce!"a + b" )
| // --------------
| // ------- | --- --- | ------
| // | 0 1 2 | => || 0 1 || 1 2 || => | 8 12 |
| // | 3 4 5 | || 3 4 || 4 5 || ------
| // ------- | --- --- |
| // --------------
| auto s = iota(2, 3)
| .windows(2, 2)
| .map!sum;
|
| assert(s == [[8, 12]]);
|}
|
|/// Zipped tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| auto z = zip(sl1, sl2);
|
| assert(zip(sl1, sl2).map!"a + b" == sl1 + sl2);
| assert(zip(sl1, sl2).map!((a, b) => a + b) == sl1 + sl2);
|}
|
|/++
|Multiple functions can be passed to `map`.
|In that case, the element type of `map` is a refTuple containing
|one element for each function.
|+/
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto sl = iota(2, 3);
| auto s = sl.map!("a + a", "a * a");
|
| auto sums = [[0, 2, 4], [6, 8, 10]];
| auto products = [[0, 1, 4], [9, 16, 25]];
|
| assert(s.map!"a[0]" == sl + sl);
| assert(s.map!"a[1]" == sl * sl);
|}
|
|/++
|`map` can be aliased to a symbol and be used separately:
|+/
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| alias halfs = map!"double(a) / 2";
| assert(halfs(iota(2, 3)) == [[0.0, 0.5, 1], [1.5, 2, 2.5]]);
|}
|
|/++
|Type normalization
|+/
|version(mir_test) unittest
|{
| import mir.functional : pipe;
| import mir.ndslice.topology : iota;
| auto a = iota(2, 3).map!"a + 10".map!(pipe!("a * 2", "a + 1"));
| auto b = iota(2, 3).map!(pipe!("a + 10", "a * 2", "a + 1"));
| assert(a == b);
| static assert(is(typeof(a) == typeof(b)));
|}
|
|/// Use map with byDim/alongDim to apply functions to each dimension
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: byDim, alongDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use byDim/alongDim with map to compute mean of row/column.
| assert(x.byDim!0.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
| assert(x.alongDim!1.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|}
|
|/++
|Use map with a lambda and with byDim/alongDim, but may need to allocate result.
|This example uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, byDim, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).byDim!0.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).byDim!1.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
| auto s3 = iota(2, 3).alongDim!1.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s4 = iota(2, 3).alongDim!0.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|///
|pure version(mir_test) unittest
|{
| import mir.algorithm.iteration: reduce;
| import mir.math.common: fmax;
| import mir.math.stat: mean;
| import mir.math.sum;
| /// Returns maximal column average.
| auto maxAvg(S)(S matrix) {
| return reduce!fmax(0.0, matrix.alongDim!1.map!mean);
| }
| // 1 2
| // 3 4
| auto matrix = iota([2, 2], 1);
| assert(maxAvg(matrix) == 3.5);
|}
|
|/++
|Implements the homonym function (also known as `transform`) present
|in many languages of functional flavor. The call `slice.vmap(fun)`
|returns a slice of which elements are obtained by applying `fun`
|for all elements in `slice`. The original slices are
|not changed. Evaluation is done lazily.
|
|Note:
| $(SUBREF dynamic, transposed) and
| $(SUBREF topology, pack) can be used to specify dimensions.
|Params:
| slice = ndslice
| callable = callable object, structure, delegate, or function pointer.
|See_Also:
| $(LREF cached), $(LREF map), $(LREF indexed),
| $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip),
| $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function))
|+/
|@optmath auto vmap(Iterator, size_t N, SliceKind kind, Callable)
| (
| Slice!(Iterator, N, kind) slice,
| Callable callable,
| )
|{
| import core.lifetime: move;
| alias It = VmapIterator!(Iterator, Callable);
| return Slice!(It, N, kind)(slice._structure, It(slice._iterator.move, callable.move));
|}
|
|/// ditto
|auto vmap(T, Callable)(T[] array, Callable callable)
|{
| import core.lifetime: move;
| return vmap(array.sliced, callable.move);
|}
|
|/// ditto
|auto vmap(T, Callable)(T withAsSlice, Callable callable)
| if (hasAsSlice!T)
|{
| import core.lifetime: move;
| return vmap(withAsSlice.asSlice, callable.move);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| static struct Mul {
| double factor; this(double f) { factor = f; }
| auto opCall(long x) const {return x * factor; }
| auto lightConst()() const @property { return Mul(factor); }
| }
|
| auto callable = Mul(3);
| auto s = iota(2, 3).vmap(callable);
|
| assert(s == [[ 0, 3, 6],
| [ 9, 12, 15]]);
|}
|
|/// Packed tensors.
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.math.sum: sum;
| import mir.ndslice.topology : iota, windows;
|
| // iota windows vmap scaled sums
| // --------------
| // ------- | --- --- | -----
| // | 0 1 2 | => || 0 1 || 1 2 || => | 4 6 |
| // | 3 4 5 | || 3 4 || 4 5 || -----
| // ------- | --- --- |
| // --------------
|
| struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(S)(S x) { return x.sum * factor; }
|
| auto lightConst()() const @property { return Callable(factor); }
| auto lightImmutable()() immutable @property { return Callable(factor); }
| }
|
| auto callable = Callable(0.5);
|
| auto s = iota(2, 3)
| .windows(2, 2)
| .vmap(callable);
|
| assert(s == [[4, 6]]);
|}
|
|/// Zipped tensors
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(S, T)(S x, T y) { return x + y * factor; }
|
| auto lightConst()() const { return Callable(factor); }
| auto lightImmutable()() immutable { return Callable(factor); }
| }
|
| auto callable = Callable(10);
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| auto z = zip(sl1, sl2);
|
| assert(zip(sl1, sl2).vmap(callable) ==
| [[10, 21, 32],
| [43, 54, 65]]);
|}
|
|// TODO
|/+
|Multiple functions can be passed to `vmap`.
|In that case, the element type of `vmap` is a refTuple containing
|one element for each function.
|+/
|@safe pure nothrow
|version(none) version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto s = iota(2, 3).vmap!("a + a", "a * a");
|
| auto sums = [[0, 2, 4], [6, 8, 10]];
| auto products = [[0, 1, 4], [9, 16, 25]];
|
| foreach (i; 0..s.length!0)
| foreach (j; 0..s.length!1)
| {
| auto values = s[i, j];
| assert(values.a == sums[i][j]);
| assert(values.b == products[i][j]);
| }
|}
|
|/// Use map with byDim/alongDim to apply functions to each dimension
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| static struct Callable
| {
| double factor;
| this(double f) {factor = f;}
| auto opCall(U)(U x) const {return x.mean + factor; }
| auto lightConst()() const @property { return Callable(factor); }
| }
|
| auto callable = Callable(0.0);
|
| // Use byDim/alongDim with map to compute callable of row/column.
| assert(x.byDim!0.vmap(callable).all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.vmap(callable).all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
| assert(x.alongDim!1.vmap(callable).all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.vmap(callable).all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|}
|
|/++
|Use map with a lambda and with byDim/alongDim, but may need to allocate result.
|This example uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| static struct Mul(T)
| {
| T factor;
| this(T f) { factor = f; }
| auto opCall(U)(U x) {return x * factor; }
| auto lightConst()() const @property { return Mul!(typeof(factor.lightConst))(factor.lightConst); }
| }
|
| auto a = [1, 2, 3].sliced;
| auto b = [1, 2].sliced;
| auto A = Mul!(typeof(a))(a);
| auto B = Mul!(typeof(b))(b);
|
| auto x = [
| [0, 1, 2],
| [3, 4, 5]
| ].fuse;
|
| auto s1 = x.byDim!0.vmap(A).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = x.byDim!1.vmap(B).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
| auto s3 = x.alongDim!1.vmap(A).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s4 = x.alongDim!0.vmap(B).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|private auto hideStride
| (Iterator, SliceKind kind)
| (Slice!(Iterator, 1, kind) slice)
|{
| import core.lifetime: move;
| static if (kind == Universal)
| return Slice!(StrideIterator!Iterator)(
| slice._lengths,
| StrideIterator!Iterator(slice._strides[0], move(slice._iterator)));
| else
| return slice;
|}
|
|private auto unhideStride
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
|{
| static if (is(Iterator : StrideIterator!It, It))
| {
| import core.lifetime: move;
| static if (kind == Universal)
| {
| alias Ret = SliceKind!(It, N, Universal);
| auto strides = slice._strides;
| foreach(i; Iota!(Ret.S))
| strides[i] = slice._strides[i] * slice._iterator._stride;
| return Slice!(It, N, Universal)(slice._lengths, strides, slice._iterator._iterator.move);
| }
| else
| return slice.move.universal.unhideStride;
| }
| else
| return slice;
|}
|
|/++
|Creates a random access cache for lazyly computed elements.
|Params:
| original = original ndslice
| caches = cached values
| flags = array composed of flags that indicates if values are already computed
|Returns:
| ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice.
|See_also: $(LREF cachedGC), $(LREF map), $(LREF vmap), $(LREF indexed)
|+/
|Slice!(CachedIterator!(Iterator, CacheIterator, FlagIterator), N, kind)
| cached(Iterator, SliceKind kind, size_t N, CacheIterator, FlagIterator)(
| Slice!(Iterator, N, kind) original,
| Slice!(CacheIterator, N, kind) caches,
| Slice!(FlagIterator, N, kind) flags,
| )
|{
| assert(original.shape == caches.shape, "caches.shape should be equal to original.shape");
| assert(original.shape == flags.shape, "flags.shape should be equal to original.shape");
| return typeof(return)(
| original._structure,
| IteratorOf!(typeof(return))(
| original._iterator,
| caches._iterator,
| flags._iterator,
| ));
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology: cached, iota, map;
| import mir.ndslice.allocation: bitSlice, uninitSlice;
|
| int[] funCalls;
|
| auto v = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| });
| auto flags = v.length.bitSlice;
| auto cache = v.length.uninitSlice!int;
| // cached lazy slice: 1 2 4 8 16
| auto sl = v.cached(cache, flags);
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
| funCalls = [];
|
| // set values directly
| sl[1 .. 3] = 5;
| assert(sl[1] == 5);
| assert(sl[2] == 5);
| // no function calls
| assert(funCalls == []);
|}
|
|/// Cache of immutable elements
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice: DeepElementType;
| import mir.ndslice.topology: cached, iota, map, as;
| import mir.ndslice.allocation: bitSlice, uninitSlice;
|
| int[] funCalls;
|
| auto v = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .as!(immutable int);
| auto flags = v.length.bitSlice;
| auto cache = v.length.uninitSlice!(immutable int);
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = v.cached(cache, flags);
|
| static assert(is(DeepElementType!(typeof(sl)) == immutable int));
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
|}
|
|/++
|Creates a random access cache for lazyly computed elements.
|Params:
| original = ND Contiguous or 1D Universal ndslice.
|Returns:
| ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice.
|See_also: $(LREF cached), $(LREF map), $(LREF vmap), $(LREF indexed)
|+/
|Slice!(CachedIterator!(Iterator, typeof(Iterator.init[0])*, FieldIterator!(BitField!(size_t*))), N)
| cachedGC(Iterator, size_t N)(Slice!(Iterator, N) original) @trusted
|{
| import std.traits: hasElaborateAssign, Unqual;
| import mir.ndslice.allocation: bitSlice, slice, uninitSlice;
| alias C = typeof(Iterator.init[0]);
| alias UC = Unqual!C;
| static if (hasElaborateAssign!UC)
| alias newSlice = slice;
| else
| alias newSlice = uninitSlice;
| return typeof(return)(
| original._structure,
| IteratorOf!(typeof(return))(
| original._iterator,
| newSlice!C(original._lengths)._iterator,
| original._lengths.bitSlice._iterator,
| ));
|}
|
|/// ditto
|auto cachedGC(Iterator)(Slice!(Iterator, 1, Universal) from)
|{
| return from.flattened.cachedGC;
|}
|
|/// ditto
|auto cachedGC(T)(T withAsSlice)
| if (hasAsSlice!T)
|{
| return cachedGC(withAsSlice.asSlice);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology: cachedGC, iota, map;
|
| int[] funCalls;
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .cachedGC;
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
| funCalls = [];
|
| // set values directly
| sl[1 .. 3] = 5;
| assert(sl[1] == 5);
| assert(sl[2] == 5);
| // no function calls
| assert(funCalls == []);
|}
|
|/// Cache of immutable elements
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.slice: DeepElementType;
| import mir.ndslice.topology: cachedGC, iota, map, as;
|
| int[] funCalls;
|
| // cached lazy slice: 1 2 4 8 16
| auto sl = 5.iota!int
| .map!((i) {
| funCalls ~= i;
| return 2 ^^ i;
| })
| .as!(immutable int)
| .cachedGC;
|
| static assert(is(DeepElementType!(typeof(sl)) == immutable int));
|
| assert(funCalls == []);
| assert(sl[1] == 2); // remember result
| assert(funCalls == [1]);
| assert(sl[1] == 2); // reuse result
| assert(funCalls == [1]);
|
| assert(sl[0] == 1);
| assert(funCalls == [1, 0]);
|}
|
|/++
|Convenience function that creates a lazy view,
|where each element of the original slice is converted to the type `T`.
|It uses $(LREF map) and $(REF_ALTTEXT $(TT to), to, mir,conv)$(NBSP)
|composition under the hood.
|Params:
| slice = a slice to create a view on.
|Returns:
| A lazy slice with elements converted to the type `T`.
|See_also: $(LREF map), $(LREF vmap)
|+/
|template as(T)
|{
| ///
| @optmath auto as(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| static if (is(slice.DeepElement == T))
| return slice;
| else
| static if (is(Iterator : T*))
| return slice.toConst;
| else
| {
| import core.lifetime: move;
| import mir.conv: to;
| return map!(to!T)(slice.move);
| }
| }
|
| /// ditto
| auto as(S)(S[] array)
| {
| return as(array.sliced);
| }
|
| /// ditto
| auto as(S)(S withAsSlice)
| if (hasAsSlice!S)
| {
| return as(withAsSlice.asSlice);
| }
|
| /// ditto
| auto as(Range)(Range r)
| if (!hasAsSlice!Range && !isSlice!Range && !is(Range : T[], T))
| {
| static if (is(ForeachType!Range == T))
| return r;
| else
| {
| import core.lifetime: move;
| import mir.conv: to;
| return map!(to!T)(r.move);
| }
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : diagonal, as;
|
| auto matrix = slice!double([2, 2], 0);
| auto stringMatrixView = matrix.as!int;
| assert(stringMatrixView ==
| [[0, 0],
| [0, 0]]);
|
| matrix.diagonal[] = 1;
| assert(stringMatrixView ==
| [[1, 0],
| [0, 1]]);
|
| /// allocate new slice composed of strings
| Slice!(int*, 2) stringMatrix = stringMatrixView.slice;
|}
|
|/// Special behavior for pointers to a constant data.
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.slice : Contiguous, Slice;
|
| Slice!(double*, 2) matrix = slice!double([2, 2], 0);
| Slice!(const(double)*, 2) const_matrix = matrix.as!(const double);
|}
|
|/// Ranges
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.algorithm.iteration: filter, equal;
| assert(5.iota.filter!"a % 2".as!double.map!"a / 2".equal([0.5, 1.5]));
|}
|
|/++
|Takes a field `source` and a slice `indices`, and creates a view of source as if its elements were reordered according to indices.
|`indices` may include only a subset of the elements of `source` and may also repeat elements.
|
|Params:
| source = a filed, source of data. `source` must be an array or a pointer, or have `opIndex` primitive. Full random access range API is not required.
| indices = a slice, source of indices.
|Returns:
| n-dimensional slice with the same kind, shape and strides.
|
|See_also: `indexed` is similar to $(LREF vmap), but a field (`[]`) is used instead of a function (`()`), and order of arguments is reversed.
|+/
|Slice!(IndexIterator!(Iterator, Field), N, kind)
| indexed(Field, Iterator, size_t N, SliceKind kind)
| (Field source, Slice!(Iterator, N, kind) indices)
|{
| import core.lifetime: move;
| return typeof(return)(
| indices._structure,
| IndexIterator!(Iterator, Field)(
| indices._iterator.move,
| source));
|}
|
|/// ditto
|auto indexed(Field, S)(Field source, S[] indices)
|{
| return indexed(source, indices.sliced);
|}
|
|/// ditto
|auto indexed(Field, S)(Field source, S indices)
| if (hasAsSlice!S)
|{
| return indexed(source, indices.asSlice);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| auto source = [1, 2, 3, 4, 5];
| auto indices = [4, 3, 1, 2, 0, 4];
| auto ind = source.indexed(indices);
| assert(ind == [5, 4, 2, 3, 1, 5]);
|
| assert(ind.retro == source.indexed(indices.retro));
|
| ind[3] += 10; // for index 2
| // 0 1 2 3 4
| assert(source == [1, 2, 13, 4, 5]);
|}
|
|/++
|Maps index pairs to subslices.
|Params:
| sliceable = pointer, array, ndslice, series, or something sliceable with `[a .. b]`.
| slices = ndslice composed of index pairs.
|Returns:
| ndslice composed of subslices.
|See_also: $(LREF chopped), $(LREF pairwise).
|+/
|Slice!(SubSliceIterator!(Iterator, Sliceable), N, kind)
| subSlices(Iterator, size_t N, SliceKind kind, Sliceable)(
| Sliceable sliceable,
| Slice!(Iterator, N, kind) slices,
| )
|{
| import core.lifetime: move;
| return typeof(return)(
| slices._structure,
| SubSliceIterator!(Iterator, Sliceable)(slices._iterator.move, sliceable.move)
| );
|}
|
|/// ditto
|auto subSlices(S, Sliceable)(Sliceable sliceable, S[] slices)
|{
| return subSlices(sliceable, slices.sliced);
|}
|
|/// ditto
|auto subSlices(S, Sliceable)(Sliceable sliceable, S slices)
| if (hasAsSlice!S)
|{
| return subSlices(sliceable, slices.asSlice);
|}
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.functional: staticArray;
| auto subs =[
| staticArray(2, 4),
| staticArray(2, 10),
| ];
| auto sliceable = 10.iota;
|
| auto r = sliceable.subSlices(subs);
| assert(r == [
| iota([4 - 2], 2),
| iota([10 - 2], 2),
| ]);
|}
|
|/++
|Maps index pairs to subslices.
|Params:
| bounds = ndslice composed of consequent (`a_i <= a_(i+1)`) pairwise index bounds.
| sliceable = pointer, array, ndslice, series, or something sliceable with `[a_i .. a_(i+1)]`.
|Returns:
| ndslice composed of subslices.
|See_also: $(LREF pairwise), $(LREF subSlices).
|+/
|Slice!(ChopIterator!(Iterator, Sliceable)) chopped(Iterator, Sliceable)(
| Sliceable sliceable,
| Slice!Iterator bounds,
| )
|in
|{
| debug(mir)
| foreach(b; bounds.pairwise!"a <= b")
| assert(b);
|}
|do {
| import core.lifetime: move;
| sizediff_t length = bounds._lengths[0] <= 1 ? 0 : bounds._lengths[0] - 1;
| static if (hasLength!Sliceable)
| {
| if (length && bounds[length - 1] > sliceable.length)
| {
| version (D_Exceptions)
| throw choppedException;
| else
| assert(0, choppedExceptionMsg);
| }
| }
|
| return typeof(return)([size_t(length)], ChopIterator!(Iterator, Sliceable)(bounds._iterator.move, sliceable.move));
|}
|
|/// ditto
|auto chopped(S, Sliceable)(Sliceable sliceable, S[] bounds)
|{
| return chopped(sliceable, bounds.sliced);
|}
|
|/// ditto
|auto chopped(S, Sliceable)(Sliceable sliceable, S bounds)
| if (hasAsSlice!S)
|{
| return chopped(sliceable, bounds.asSlice);
|}
|
|///
|@safe pure version(mir_test) unittest
|{
| import mir.functional: staticArray;
| import mir.ndslice.slice : sliced;
| auto pairwiseIndexes = [2, 4, 10].sliced;
| auto sliceable = 10.iota;
|
| auto r = sliceable.chopped(pairwiseIndexes);
| assert(r == [
| iota([4 - 2], 2),
| iota([10 - 4], 4),
| ]);
|}
|
|/++
|Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional.
|Params:
| sameStrides = if `true` assumes that all slices has the same strides.
| slices = list of slices
|Returns:
| n-dimensional slice of elements refTuple
|See_also: $(SUBREF slice, Slice.strides).
|+/
|template zip(bool sameStrides = false)
|{
| /++
| Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional.
| Params:
| slices = list of slices
| Returns:
| n-dimensional slice of elements refTuple
| See_also: $(SUBREF slice, Slice.strides).
| +/
| @optmath
| auto zip(Slices...)(Slices slices)
| if (Slices.length > 1 && allSatisfy!(isConvertibleToSlice, Slices))
| {
| static if (allSatisfy!(isSlice, Slices))
| {
| enum N = Slices[0].N;
| foreach(i, S; Slices[1 .. $])
| {
| static assert(S.N == N, "zip: all Slices must have the same dimension count");
| assert(slices[i + 1]._lengths == slices[0]._lengths, "zip: all slices must have the same lengths");
| static if (sameStrides)
| assert(slices[i + 1].strides == slices[0].strides, "zip: all slices must have the same strides when unpacked");
| }
| static if (!sameStrides && minElem(staticMap!(kindOf, Slices)) != Contiguous)
| {
| static assert(N == 1, "zip: cannot zip canonical and universal multidimensional slices if `sameStrides` is false");
| mixin(`return .zip(` ~ _iotaArgs!(Slices.length, "slices[", "].hideStride, ") ~`);`);
| }
| else
| {
| enum kind = maxElem(staticMap!(kindOf, Slices));
| alias Iterator = ZipIterator!(staticMap!(_IteratorOf, Slices));
| alias Ret = Slice!(Iterator, N, kind);
| auto structure = Ret._Structure.init;
| structure[0] = slices[0]._lengths;
| foreach (i; Iota!(Ret.S))
| structure[1][i] = slices[0]._strides[i];
| return Ret(structure, mixin("Iterator(" ~ _iotaArgs!(Slices.length, "slices[", "]._iterator, ") ~ ")"));
| }
| }
| else
| {
| return .zip(toSlices!slices);
| }
| }
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : flattened, iota;
|
| auto alpha = iota!int(4, 3);
| auto beta = slice!int(4, 3).universal;
|
| auto m = zip!true(alpha, beta);
| foreach (r; m)
| foreach (e; r)
| e.b = e.a;
| assert(alpha == beta);
|
| beta[] = 0;
| foreach (e; m.flattened)
| e.b = cast(int)e.a;
| assert(alpha == beta);
|}
|
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : flattened, iota;
|
| auto alpha = iota!int(4).universal;
| auto beta = new int[4];
|
| auto m = zip(alpha, beta);
| foreach (e; m)
| e.b = e.a;
| assert(alpha == beta);
|}
|
|/++
|Selects a slice from a zipped slice.
|Params:
| name = name of a slice to unzip.
| slice = zipped slice
|Returns:
| unzipped slice
|+/
|auto unzip
| (char name, size_t N, SliceKind kind, Iterators...)
| (Slice!(ZipIterator!Iterators, N, kind) slice)
|{
| import core.lifetime: move;
| enum size_t i = name - 'a';
| static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`);
| return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i].move).unhideStride;
|}
|
|/// ditto
|auto unzip
| (char name, size_t N, SliceKind kind, Iterators...)
| (ref Slice!(ZipIterator!Iterators, N, kind) slice)
|{
| enum size_t i = name - 'a';
| static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`);
| return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i]).unhideStride;
|}
|
|///
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : iota;
|
| auto alpha = iota!int(4, 3);
| auto beta = iota!int([4, 3], 1).slice;
|
| auto m = zip(alpha, beta);
|
| static assert(is(typeof(unzip!'a'(m)) == typeof(alpha)));
| static assert(is(typeof(unzip!'b'(m)) == typeof(beta)));
|
| assert(m.unzip!'a' == alpha);
| assert(m.unzip!'b' == beta);
|}
|
|private enum TotalDim(NdFields...) = [staticMap!(DimensionCount, NdFields)].sum;
|
|private template applyInner(alias fun, size_t N)
|{
| static if (N == 0)
| alias applyInner = fun;
| else
| {
| import mir.functional: pipe;
| alias applyInner = pipe!(zip!true, map!(.applyInner!(fun, N - 1)));
| }
|}
|
|/++
|Lazy convolution for tensors.
|
|Suitable for advanced convolution algorithms.
|
|Params:
| params = convolution windows length.
| fun = one dimensional convolution function with `params` arity.
| SDimensions = dimensions to perform lazy convolution along. Negative dimensions are supported.
|See_also: $(LREF slide), $(LREF pairwise), $(LREF diff).
|+/
|template slideAlong(size_t params, alias fun, SDimensions...)
| if (params <= 'z' - 'a' + 1 && SDimensions.length > 0)
|{
| import mir.functional: naryFun;
|
| static if (allSatisfy!(isSizediff_t, SDimensions) && params > 1 && __traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params: slice = ndslice or array
| Returns: lazy convolution result
| +/
| auto slideAlong(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1 && kind == Contiguous)
| {
| return slideAlong(slice.move.canonical);
| }
| else
| static if (N == 1 && kind == Universal)
| {
| return slideAlong(slice.move.flattened);
| }
| else
| {
| alias Dimensions = staticMap!(ShiftNegativeWith!N, SDimensions);
| enum dimension = Dimensions[$ - 1];
| size_t len = slice._lengths[dimension] - (params - 1);
| if (sizediff_t(len) <= 0) // overfow
| len = 0;
| slice._lengths[dimension] = len;
| static if (dimension + 1 == N || kind == Universal)
| {
| alias I = SlideIterator!(Iterator, params, fun);
| auto ret = Slice!(I, N, kind)(slice._structure, I(move(slice._iterator)));
| }
| else
| {
| alias Z = ZipIterator!(Repeat!(params, Iterator));
| Z z;
| foreach_reverse (p; Iota!(1, params))
| z._iterators[p] = slice._iterator + slice._strides[dimension] * p;
| z._iterators[0] = move(slice._iterator);
| alias M = MapIterator!(Z, fun);
| auto ret = Slice!(M, N, kind)(slice._structure, M(move(z)));
| }
| static if (Dimensions.length == 1)
| {
| return ret;
| }
| else
| {
| return .slideAlong!(params, fun, Dimensions[0 .. $ - 1])(ret);
| }
| }
| }
|
| /// ditto
| auto slideAlong(S)(S[] slice)
| {
| return slideAlong(slice.sliced);
| }
|
| /// ditto
| auto slideAlong(S)(S slice)
| if (hasAsSlice!S)
| {
| return slideAlong(slice.asSlice);
| }
| }
| else
| static if (params == 1)
| alias slideAlong = .map!(naryFun!fun);
| else alias slideAlong = .slideAlong!(params, naryFun!fun, staticMap!(toSizediff_t, SDimensions));
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto data = [4, 5].iota;
|
| alias scaled = a => a * 0.25;
|
| auto v = data.slideAlong!(3, "a + 2 * b + c", 0).map!scaled;
| auto h = data.slideAlong!(3, "a + 2 * b + c", 1).map!scaled;
|
| assert(v == [4, 5].iota[1 .. $ - 1, 0 .. $]);
| assert(h == [4, 5].iota[0 .. $, 1 .. $ - 1]);
|}
|
|/++
|Lazy convolution for tensors.
|
|Suitable for simple convolution algorithms.
|
|Params:
| params = windows length.
| fun = one dimensional convolution function with `params` arity.
|See_also: $(LREF slideAlong), $(LREF withNeighboursSum), $(LREF pairwise), $(LREF diff).
|+/
|template slide(size_t params, alias fun)
| if (params <= 'z' - 'a' + 1)
|{
| import mir.functional: naryFun;
|
| static if (params > 1 && __traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params: slice = ndslice or array
| Returns: lazy convolution result
| +/
| auto slide(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| return slice.move.slideAlong!(params, fun, Iota!N);
| }
|
| /// ditto
| auto slide(S)(S[] slice)
| {
| return slide(slice.sliced);
| }
|
| /// ditto
| auto slide(S)(S slice)
| if (hasAsSlice!S)
| {
| return slide(slice.asSlice);
| }
| }
| else
| static if (params == 1)
| alias slide = .map!(naryFun!fun);
| else alias slide = .slide!(params, naryFun!fun);
|}
|
|///
|version(mir_test) unittest
|{
| auto data = 10.iota;
| auto sw = data.slide!(3, "a + 2 * b + c");
|
| import mir.utility: max;
| assert(sw.length == max(0, cast(ptrdiff_t)data.length - 3 + 1));
| assert(sw == sw.length.iota.map!"(a + 1) * 4");
| assert(sw == [4, 8, 12, 16, 20, 24, 28, 32]);
|}
|
|/++
|ND-use case
|+/
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| auto data = [4, 5].iota;
|
| enum factor = 1.0 / 4 ^^ data.shape.length;
| alias scaled = a => a * factor;
|
| auto sw = data.slide!(3, "a + 2 * b + c").map!scaled;
|
| assert(sw == [4, 5].iota[1 .. $ - 1, 1 .. $ - 1]);
|}
|
|/++
|Pairwise map for tensors.
|
|The computation is performed on request, when the element is accessed.
|
|Params:
| fun = function to accumulate
| lag = an integer indicating which lag to use
|Returns: lazy ndslice composed of `fun(a_n, a_n+1)` values.
|
|See_also: $(LREF slide), $(LREF slideAlong), $(LREF subSlices).
|+/
|alias pairwise(alias fun, size_t lag = 1) = slide!(lag + 1, fun);
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| assert([2, 4, 3, -1].sliced.pairwise!"a + b" == [6, 7, 2]);
|}
|
|/// N-dimensional
|@safe pure nothrow
|version(mir_test) unittest
|{
| // performs pairwise along each dimension
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| assert([3, 4].iota.pairwise!"a + b" == [[10, 14, 18], [26, 30, 34]]);
|}
|
|/++
|Differences between tensor elements.
|
|The computation is performed on request, when the element is accessed.
|
|Params:
| lag = an integer indicating which lag to use
|Returns: lazy differences.
|
|See_also: $(LREF slide), $(LREF slide).
|+/
|alias diff(size_t lag = 1) = pairwise!(('a' + lag) ~ " - a", lag);
|
|///
|version(mir_test) unittest
|{
| assert([2, 4, 3, -1].sliced.diff == [2, -1, -4]);
|}
|
|/// N-dimensional
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| // 0 1 2 3
| // 4 5 6 7 =>
| // 8 9 10 11
|
| // 1 1 1
| // 1 1 1 =>
| // 1 1 1
|
| // 0 0 0
| // 0 0 0
|
| assert([3, 4].iota.diff == repeat(0, [2, 3]));
|}
|
|/// packed slices
|version(mir_test) unittest
|{
| // 0 1 2 3
| // 4 5 6 7
| // 8 9 10 11
| auto s = iota(3, 4);
| import std.stdio;
| assert(iota(3, 4).byDim!0.diff == [
| [4, 4, 4, 4],
| [4, 4, 4, 4]]);
| assert(iota(3, 4).byDim!1.diff == [
| [1, 1, 1],
| [1, 1, 1],
| [1, 1, 1]]);
|}
|
|/++
|Drops borders for all dimensions.
|
|Params:
| slice = ndslice
|Returns:
| Tensors with striped borders
|See_also:
| $(LREF universal),
| $(LREF assumeCanonical),
| $(LREF assumeContiguous).
|+/
|Slice!(Iterator, N, N > 1 && kind == Contiguous ? Canonical : kind, Labels)
| dropBorders
| (Iterator, size_t N, SliceKind kind, Labels...)
| (Slice!(Iterator, N, kind, Labels) slice)
|{
| static if (N > 1 && kind == Contiguous)
| {
| import core.lifetime: move;
| auto ret = slice.move.canonical;
| }
| else
| {
| alias ret = slice;
| }
| ret.popFrontAll;
| ret.popBackAll;
| return ret;
|}
|
|///
|version(mir_test) unittest
|{
| assert([4, 5].iota.dropBorders == [[6, 7, 8], [11, 12, 13]]);
|}
|
|/++
|Lazy zip view of elements packed with sum of their neighbours.
|
|Params:
| fun = neighbours accumulation function.
|See_also: $(LREF slide), $(LREF slideAlong).
|+/
|template withNeighboursSum(alias fun = "a + b")
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params:
| slice = ndslice or array
| Returns:
| Lazy zip view of elements packed with sum of their neighbours.
| +/
| auto withNeighboursSum(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| {
| import core.lifetime: move;
| static if (N > 1 && kind == Contiguous)
| {
| return withNeighboursSum(slice.move.canonical);
| }
| else
| static if (N == 1 && kind == Universal)
| {
| return withNeighboursSum(slice.move.flattened);
| }
| else
| {
| enum around = kind != Universal;
| alias Z = NeighboursIterator!(Iterator, N - around, fun, around);
|
| size_t shift;
| foreach (dimension; Iota!N)
| {
| slice._lengths[dimension] -= 2;
| if (sizediff_t(slice._lengths[dimension]) <= 0) // overfow
| slice._lengths[dimension] = 0;
| shift += slice._stride!dimension;
| }
|
| Z z;
| z._iterator = move(slice._iterator);
| z._iterator += shift;
| foreach (dimension; Iota!(N - around))
| {
| z._neighbours[dimension][0] = z._iterator - slice._strides[dimension];
| z._neighbours[dimension][1] = z._iterator + slice._strides[dimension];
| }
| return Slice!(Z, N, kind)(slice._structure, move(z));
| }
| }
|
| /// ditto
| auto withNeighboursSum(S)(S[] slice)
| {
| return withNeighboursSum(slice.sliced);
| }
|
| /// ditto
| auto withNeighboursSum(S)(S slice)
| if (hasAsSlice!S)
| {
| return withNeighboursSum(slice.asSlice);
| }
| }
| else alias withNeighboursSum = .withNeighboursSum!(naryFun!fun);
|}
|
|///
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.algorithm.iteration: all;
|
| auto wn = [4, 5].iota.withNeighboursSum;
| assert(wn.all!"a[0] == a[1] * 0.25");
| assert(wn.map!"a" == wn.map!"b * 0.25");
|}
|
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.algorithm.iteration: all;
|
| auto wn = [4, 5].iota.withNeighboursSum.universal;
| assert(wn.all!"a[0] == a[1] * 0.25");
| assert(wn.map!"a" == wn.map!"b * 0.25");
|}
|
|/++
|Cartesian product.
|
|Constructs lazy cartesian product $(SUBREF slice, Slice) without memory allocation.
|
|Params:
| fields = list of fields with lengths or ndFields with shapes
|Returns: $(SUBREF ndfield, Cartesian)`!NdFields(fields).`$(SUBREF slice, slicedNdField)`;`
|+/
|auto cartesian(NdFields...)(NdFields fields)
| if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields))
|{
| return Cartesian!NdFields(fields).slicedNdField;
|}
|
|/// 1D x 1D
|version(mir_test) unittest
|{
| auto a = [10, 20, 30];
| auto b = [ 1, 2, 3];
|
| auto c = cartesian(a, b)
| .map!"a + b";
|
| assert(c == [
| [11, 12, 13],
| [21, 22, 23],
| [31, 32, 33]]);
|}
|
|/// 1D x 2D
|version(mir_test) unittest
|{
| auto a = [10, 20, 30];
| auto b = iota([2, 3], 1);
|
| auto c = cartesian(a, b)
| .map!"a + b";
|
| assert(c.shape == [3, 2, 3]);
|
| assert(c == [
| [
| [11, 12, 13],
| [14, 15, 16],
| ],
| [
| [21, 22, 23],
| [24, 25, 26],
| ],
| [
| [31, 32, 33],
| [34, 35, 36],
| ]]);
|}
|
|/// 1D x 1D x 1D
|version(mir_test) unittest
|{
| auto u = [100, 200];
| auto v = [10, 20, 30];
| auto w = [1, 2];
|
| auto c = cartesian(u, v, w)
| .map!"a + b + c";
|
| assert(c.shape == [2, 3, 2]);
|
| assert(c == [
| [
| [111, 112],
| [121, 122],
| [131, 132],
| ],
| [
| [211, 212],
| [221, 222],
| [231, 232],
| ]]);
|}
|
|/++
|$(LINK2 https://en.wikipedia.org/wiki/Kronecker_product, Kronecker product).
|
|Constructs lazy kronecker product $(SUBREF slice, Slice) without memory allocation.
|+/
|template kronecker(alias fun = product)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
|
| /++
| Params:
| fields = list of either fields with lengths or ndFields with shapes.
| All ndFields must have the same dimension count.
| Returns:
| $(SUBREF ndfield, Kronecker)`!(fun, NdFields)(fields).`$(SUBREF slice, slicedNdField)
| +/
| @optmath auto kronecker(NdFields...)(NdFields fields)
| if (allSatisfy!(hasShape, NdFields) || allSatisfy!(hasLength, NdFields))
| {
| return Kronecker!(fun, NdFields)(fields).slicedNdField;
| }
| else
| alias kronecker = .kronecker!(naryFun!fun);
|}
|
|/// 2D
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| // eye
| auto a = slice!double([4, 4], 0);
| a.diagonal[] = 1;
|
| auto b = [ 1, -1,
| -1, 1].sliced(2, 2);
|
| auto c = kronecker(a, b);
|
| assert(c == [
| [ 1, -1, 0, 0, 0, 0, 0, 0],
| [-1, 1, 0, 0, 0, 0, 0, 0],
| [ 0, 0, 1, -1, 0, 0, 0, 0],
| [ 0, 0, -1, 1, 0, 0, 0, 0],
| [ 0, 0, 0, 0, 1, -1, 0, 0],
| [ 0, 0, 0, 0, -1, 1, 0, 0],
| [ 0, 0, 0, 0, 0, 0, 1, -1],
| [ 0, 0, 0, 0, 0, 0, -1, 1]]);
|}
|
|/// 1D
|version(mir_test) unittest
|{
| auto a = iota([3], 1);
|
| auto b = [ 1, -1];
|
| auto c = kronecker(a, b);
|
| assert(c == [1, -1, 2, -2, 3, -3]);
|}
|
|/// 2D with 3 arguments
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.slice : sliced;
|
| auto a = [ 1, 2,
| 3, 4].sliced(2, 2);
|
| auto b = [ 1, 0,
| 0, 1].sliced(2, 2);
|
| auto c = [ 1, -1,
| -1, 1].sliced(2, 2);
|
| auto d = kronecker(a, b, c);
|
| assert(d == [
| [ 1, -1, 0, 0, 2, -2, 0, 0],
| [-1, 1, 0, 0, -2, 2, 0, 0],
| [ 0, 0, 1, -1, 0, 0, 2, -2],
| [ 0, 0, -1, 1, 0, 0, -2, 2],
| [ 3, -3, 0, 0, 4, -4, 0, 0],
| [-3, 3, 0, 0, -4, 4, 0, 0],
| [ 0, 0, 3, -3, 0, 0, 4, -4],
| [ 0, 0, -3, 3, 0, 0, -4, 4]]);
|}
|
|/++
|$(HTTPS en.wikipedia.org/wiki/Magic_square, Magic square).
|Params:
| length = square matrix length.
|Returns:
| Lazy magic matrix.
|+/
|auto magic(size_t length)
|{
0000000| assert(length > 0);
| static if (is(size_t == ulong))
| assert(length <= uint.max);
| else
0000000| assert(length <= ushort.max);
| import mir.ndslice.field: MagicField;
0000000| return MagicField(length).slicedField(length, length);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.math.sum;
| import mir.ndslice: slice, magic, byDim, map, as, repeat, diagonal, antidiagonal;
|
| bool isMagic(S)(S matrix)
| {
| auto n = matrix.length;
| auto c = n * (n * n + 1) / 2; // magic number
| return // check shape
| matrix.length!0 > 0 && matrix.length!0 == matrix.length!1
| && // each row sum should equal magic number
| matrix.byDim!0.map!sum == c.repeat(n)
| && // each columns sum should equal magic number
| matrix.byDim!1.map!sum == c.repeat(n)
| && // diagonal sum should equal magic number
| matrix.diagonal.sum == c
| && // antidiagonal sum should equal magic number
| matrix.antidiagonal.sum == c;
| }
|
| assert(isMagic(magic(1)));
| assert(!isMagic(magic(2))); // 2x2 magic square does not exist
| foreach(n; 3 .. 24)
| assert(isMagic(magic(n)));
| assert(isMagic(magic(3).as!double.slice));
|}
|
|/++
|Chops 1D input slice into n chunks with ascending or descending lengths.
|
|`stairs` can be used to pack and unpack symmetric and triangular matrix storage.
|
|Note: `stairs` is defined for 1D (packet) input and 2D (general) input.
| This part of documentation is for 1D input.
|
|Params:
| type = $(UL
| $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`.)
| $(LI `"+"` for stairs with lengths `1, 2, ..., n`;)
| )
| slice = input slice with length equal to `n * (n + 1) / 2`
| n = stairs count
|Returns:
| 1D contiguous slice composed of 1D contiguous slices.
|
|See_also: $(LREF triplets) $(LREF ._stairs.2)
|+/
|Slice!(StairsIterator!(Iterator, type)) stairs(string type, Iterator)(Slice!Iterator slice, size_t n)
| if (type == "+" || type == "-")
|{
| assert(slice.length == (n + 1) * n / 2, "stairs: slice length must be equal to n * (n + 1) / 2, where n is stairs count.");
| static if (type == "+")
| size_t length = 1;
| else
| size_t length = n;
| return StairsIterator!(Iterator, type)(length, slice._iterator).sliced(n);
|}
|
|/// ditto
|Slice!(StairsIterator!(S*, type)) stairs(string type, S)(S[] slice, size_t n)
| if (type == "+" || type == "-")
|{
| return stairs!type(slice.sliced, n);
|}
|
|/// ditto
|auto stairs(string type, S)(S slice, size_t n)
| if (hasAsSlice!S && (type == "+" || type == "-"))
|{
| return stairs!type(slice.asSlice, n);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota, stairs;
|
| auto pck = 15.iota;
| auto inc = pck.stairs!"+"(5);
| auto dec = pck.stairs!"-"(5);
|
| assert(inc == [
| [0],
| [1, 2],
| [3, 4, 5],
| [6, 7, 8, 9],
| [10, 11, 12, 13, 14]]);
| assert(inc[1 .. $][2] == [6, 7, 8, 9]);
|
| assert(dec == [
| [0, 1, 2, 3, 4],
| [5, 6, 7, 8],
| [9, 10, 11],
| [12, 13],
| [14]]);
| assert(dec[1 .. $][2] == [12, 13]);
|
| static assert(is(typeof(inc.front) == typeof(pck)));
| static assert(is(typeof(dec.front) == typeof(pck)));
|}
|
|/++
|Slice composed of rows of lower or upper triangular matrix.
|
|`stairs` can be used to pack and unpack symmetric and triangular matrix storage.
|
|Note: `stairs` is defined for 1D (packet) input and 2D (general) input.
| This part of documentation is for 2D input.
|
|Params:
| type = $(UL
| $(LI `"+"` for stairs with lengths `1, 2, ..., n`, lower matrix;)
| $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`, upper matrix.)
| )
| slice = input slice with length equal to `n * (n + 1) / 2`
|Returns:
| 1D slice composed of 1D contiguous slices.
|
|See_also: $(LREF _stairs) $(SUBREF dynamic, transposed), $(LREF universal)
|+/
|auto stairs(string type, Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice)
| if (type == "+" || type == "-")
|{
| assert(slice.length!0 == slice.length!1, "stairs: input slice must be a square matrix.");
| static if (type == "+")
| {
| return slice
| .pack!1
| .map!"a"
| .zip([slice.length].iota!size_t(1))
| .map!"a[0 .. b]";
| }
| else
| {
| return slice
| .pack!1
| .map!"a"
| .zip([slice.length].iota!size_t)
| .map!"a[b .. $]";
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota, as, stairs;
|
| auto gen = [3, 3].iota.as!double;
| auto inc = gen.stairs!"+";
| auto dec = gen.stairs!"-";
|
| assert(inc == [
| [0],
| [3, 4],
| [6, 7, 8]]);
|
| assert(dec == [
| [0, 1, 2],
| [4, 5],
| [8]]);
|
| static assert(is(typeof(inc.front) == typeof(gen.front)));
| static assert(is(typeof(dec.front) == typeof(gen.front)));
|
| /////////////////////////////////////////
| // Pack lower and upper matrix parts
| auto n = gen.length;
| auto m = n * (n + 1) / 2;
| // allocate memory
| import mir.ndslice.allocation: uninitSlice;
| auto lowerData = m.uninitSlice!double;
| auto upperData = m.uninitSlice!double;
| // construct packed stairs
| auto lower = lowerData.stairs!"+"(n);
| auto upper = upperData.stairs!"-"(n);
| // copy data
| import mir.algorithm.iteration: each;
| each!"a[] = b"(lower, inc);
| each!"a[] = b"(upper, dec);
|
| assert(&lower[0][0] is &lowerData[0]);
| assert(&upper[0][0] is &upperData[0]);
|
| assert(lowerData == [0, 3, 4, 6, 7, 8]);
| assert(upperData == [0, 1, 2, 4, 5, 8]);
|}
|
|/++
|Returns a slice that can be iterated along dimension. Transposes other dimensions on top and then packs them.
|
|Combines $(LREF byDim) and $(LREF evertPack).
|
|Params:
| SDimensions = dimensions to iterate along, length of d, `1 <= d < n`. Negative dimensions are supported.
|Returns:
| `(n-d)`-dimensional slice composed of d-dimensional slices
|See_also:
| $(LREF byDim),
| $(LREF iota),
| $(SUBREF allocation, slice),
| $(LREF ipack),
| $(SUBREF dynamic, transposed).
|+/
|template alongDim(SDimensions...)
| if (SDimensions.length > 0)
|{
| static if (allSatisfy!(isSizediff_t, SDimensions))
| {
| /++
| Params:
| slice = input n-dimensional slice, n > d
| Returns:
| `(n-d)`-dimensional slice composed of d-dimensional slices
| +/
| @optmath auto alongDim(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N > SDimensions.length)
| {
| import core.lifetime: move;
| return slice.move.byDim!SDimensions.evertPack;
| }
| }
| else
| {
| alias alongDim = .alongDim!(staticMap!(toSizediff_t, SDimensions));
| }
|}
|
|/// 2-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4);
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.alongDim!(-1); // -1 is the last dimension index, the same as 1 for this case.
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.alongDim!0; // alongDim!(-2) is the same for matrices.
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|/// 3-dimensional slice support, N-dimensional also supported
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto slice = iota(3, 4, 5);
|
| size_t[2] shape45 = [4, 5];
| size_t[2] shape35 = [3, 5];
| size_t[2] shape34 = [3, 4];
| size_t[2] shape54 = [5, 4];
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
| size_t[1] shape5 = [5];
|
| // ----------
| // | 0 20 40 |
| // | 5 25 45 |
| // | 10 30 50 |
| // | 15 35 55 |
| // - - - - -
| // | 1 21 41 |
| // | 6 26 46 |
| // | 11 31 51 |
| // | 16 36 56 |
| // - - - - -
| // | 2 22 42 |
| // | 7 27 47 |
| // | 12 32 52 |
| // | 17 37 57 |
| // - - - - -
| // | 3 23 43 |
| // | 8 28 48 |
| // | 13 33 53 |
| // | 18 38 58 |
| // - - - - -
| // | 4 24 44 |
| // | 9 29 49 |
| // | 14 34 54 |
| // | 19 39 59 |
| // ----------
| auto a = slice.alongDim!0.transposed;
| static assert(is(typeof(a) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 2, Universal)));
|
| assert(a.shape == shape54);
| assert(a.front.shape == shape4);
| assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed);
| a.popFront;
| assert(a.front.front == iota([3], 1, 20));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto x = slice.alongDim!(1, 2);
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape45);
| assert(x.front == iota([4, 5]));
| x.popFront;
| assert(x.front == iota([4, 5], (4 * 5)));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 20 21 22 23 24 |
| // | 40 41 42 43 44 |
| // - - - - - - - -
| // | 5 6 7 8 9 |
| // | 25 26 27 28 29 |
| // | 45 46 47 48 49 |
| // - - - - - - - -
| // | 10 11 12 13 14 |
| // | 30 31 32 33 34 |
| // | 50 51 52 53 54 |
| // - - - - - - - -
| // | 15 16 17 18 19 |
| // | 35 36 37 38 39 |
| // | 55 56 57 58 59 |
| // ----------------
| auto y = slice.alongDim!(0, 2);
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Canonical), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape35);
| int err;
| assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err));
| y.popFront;
| assert(y.front.front == iota([5], 5));
|
| // -------------
| // | 0 5 10 15 |
| // | 20 25 30 35 |
| // | 40 45 50 55 |
| // - - - - - - -
| // | 1 6 11 16 |
| // | 21 26 31 36 |
| // | 41 46 51 56 |
| // - - - - - - -
| // | 2 7 12 17 |
| // | 22 27 32 37 |
| // | 42 47 52 57 |
| // - - - - - - -
| // | 3 8 13 18 |
| // | 23 28 33 38 |
| // | 43 48 53 58 |
| // - - - - - - -
| // | 4 9 14 19 |
| // | 24 29 34 39 |
| // | 44 49 54 59 |
| // -------------
| auto z = slice.alongDim!(0, 1);
| static assert(is(typeof(z) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Universal))));
|
| assert(z.shape == shape5);
| assert(z.front.shape == shape34);
| assert(z.front == iota([3, 4], 0, 5));
| z.popFront;
| assert(z.front.front == iota([4], 1, 5));
|}
|
|/// Use alongDim to calculate column mean/row mean of 2-dimensional slice
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: alongDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use alongDim with map to compute mean of row/column.
| assert(x.alongDim!1.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.alongDim!0.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|
| // FIXME
| // Without using map, computes the mean of the whole slice
| // assert(x.alongDim!1.mean == x.sliced.mean);
| // assert(x.alongDim!0.mean == x.sliced.mean);
|}
|
|/++
|Use alongDim and map with a lambda, but may need to allocate result. This example
|uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, alongDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).alongDim!1.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).alongDim!0.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|/++
|Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them.
|
|Combines $(SUBREF dynamic, transposed), $(LREF ipack), and SliceKind Selectors.
|
|Params:
| SDimensions = dimensions to perform iteration on, length of d, `1 <= d <= n`. Negative dimensions are supported.
|Returns:
| d-dimensional slice composed of `(n-d)`-dimensional slices
|See_also:
| $(LREF alongDim),
| $(SUBREF allocation, slice),
| $(LREF ipack),
| $(SUBREF dynamic, transposed).
|+/
|template byDim(SDimensions...)
| if (SDimensions.length > 0)
|{
| static if (allSatisfy!(isSizediff_t, SDimensions))
| {
| /++
| Params:
| slice = input n-dimensional slice, n >= d
| Returns:
| d-dimensional slice composed of `(n-d)`-dimensional slices
| +/
| @optmath auto byDim(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (N >= SDimensions.length)
| {
|
| alias Dimensions = staticMap!(ShiftNegativeWith!N, SDimensions);
|
| mixin DimensionsCountCTError;
|
| static if (N == 1)
| {
| return slice;
| }
| else
| {
| import core.lifetime: move;
| import mir.ndslice.dynamic: transposed;
| import mir.algorithm.iteration: all;
|
| auto trans = slice
| .move
| .transposed!Dimensions;
| static if (Dimensions.length == N)
| {
| return trans;
| }
| else
| {
| auto ret = trans.move.ipack!(Dimensions.length);
| static if ((kind == Contiguous || kind == Canonical && N - Dimensions.length == 1) && [Dimensions].all!(a => a < Dimensions.length))
| {
| return ret
| .move
| .evertPack
| .assumeContiguous
| .evertPack;
| }
| else
| static if (kind == Canonical && [Dimensions].all!(a => a < N - 1))
| {
| return ret
| .move
| .evertPack
| .assumeCanonical
| .evertPack;
| }
| else
| static if ((kind == Contiguous || kind == Canonical && Dimensions.length == 1) && [Dimensions] == [Iota!(N - Dimensions.length, N)])
| {
| return ret.assumeContiguous;
| }
| else
| static if ((kind == Contiguous || kind == Canonical) && Dimensions[$-1] == N - 1)
| {
| return ret.assumeCanonical;
| }
| else
| {
| return ret;
| }
| }
| }
| }
| }
| else
| {
| alias byDim = .byDim!(staticMap!(toSizediff_t, SDimensions));
| }
|}
|
|/// 2-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4);
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0; // byDim!(-2) is the same for matrices.
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!(-1); // -1 is the last dimension index, the same as 1 for this case.
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|/// 3-dimensional slice support, N-dimensional also supported
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice;
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto slice = iota(3, 4, 5);
|
| size_t[2] shape45 = [4, 5];
| size_t[2] shape35 = [3, 5];
| size_t[2] shape34 = [3, 4];
| size_t[2] shape54 = [5, 4];
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
| size_t[1] shape5 = [5];
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 5 6 7 8 9 |
| // | 10 11 12 13 14 |
| // | 15 16 17 18 19 |
| // - - - - - - - -
| // | 20 21 22 23 24 |
| // | 25 26 27 28 29 |
| // | 30 31 32 33 34 |
| // | 35 36 37 38 39 |
| // - - - - - - - -
| // | 40 41 42 43 44 |
| // | 45 46 47 48 49 |
| // | 50 51 52 53 54 |
| // | 55 56 57 58 59 |
| // ----------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape45);
| assert(x.front == iota([4, 5]));
| x.popFront;
| assert(x.front == iota([4, 5], (4 * 5)));
|
| // ----------------
| // | 0 1 2 3 4 |
| // | 20 21 22 23 24 |
| // | 40 41 42 43 44 |
| // - - - - - - - -
| // | 5 6 7 8 9 |
| // | 25 26 27 28 29 |
| // | 45 46 47 48 49 |
| // - - - - - - - -
| // | 10 11 12 13 14 |
| // | 30 31 32 33 34 |
| // | 50 51 52 53 54 |
| // - - - - - - - -
| // | 15 16 17 18 19 |
| // | 35 36 37 38 39 |
| // | 55 56 57 58 59 |
| // ----------------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Canonical), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape35);
| int err;
| assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err));
| y.popFront;
| assert(y.front.front == iota([5], 5));
|
| // -------------
| // | 0 5 10 15 |
| // | 20 25 30 35 |
| // | 40 45 50 55 |
| // - - - - - - -
| // | 1 6 11 16 |
| // | 21 26 31 36 |
| // | 41 46 51 56 |
| // - - - - - - -
| // | 2 7 12 17 |
| // | 22 27 32 37 |
| // | 42 47 52 57 |
| // - - - - - - -
| // | 3 8 13 18 |
| // | 23 28 33 38 |
| // | 43 48 53 58 |
| // - - - - - - -
| // | 4 9 14 19 |
| // | 24 29 34 39 |
| // | 44 49 54 59 |
| // -------------
| auto z = slice.byDim!2;
| static assert(is(typeof(z) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 2, Universal))));
|
| assert(z.shape == shape5);
| assert(z.front.shape == shape34);
| assert(z.front == iota([3, 4], 0, 5));
| z.popFront;
| assert(z.front.front == iota([4], 1, 5));
|
| // ----------
| // | 0 20 40 |
| // | 5 25 45 |
| // | 10 30 50 |
| // | 15 35 55 |
| // - - - - -
| // | 1 21 41 |
| // | 6 26 46 |
| // | 11 31 51 |
| // | 16 36 56 |
| // - - - - -
| // | 2 22 42 |
| // | 7 27 47 |
| // | 12 32 52 |
| // | 17 37 57 |
| // - - - - -
| // | 3 23 43 |
| // | 8 28 48 |
| // | 13 33 53 |
| // | 18 38 58 |
| // - - - - -
| // | 4 24 44 |
| // | 9 29 49 |
| // | 14 34 54 |
| // | 19 39 59 |
| // ----------
| auto a = slice.byDim!(2, 1);
| static assert(is(typeof(a) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 2, Universal)));
|
| assert(a.shape == shape54);
| assert(a.front.shape == shape4);
| assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed);
| a.popFront;
| assert(a.front.front == iota([3], 1, 20));
|}
|
|/// Use byDim to calculate column mean/row mean of 2-dimensional slice
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.topology: byDim;
| import mir.ndslice.fuse: fuse;
| import mir.math.stat: mean;
| import mir.algorithm.iteration: all;
| import mir.math.common: approxEqual;
|
| auto x = [
| [0.0, 1.0, 1.5, 2.0, 3.5, 4.25],
| [2.0, 7.5, 5.0, 1.0, 1.5, 0.0]
| ].fuse;
|
| // Use byDim with map to compute mean of row/column.
| assert(x.byDim!0.map!mean.all!approxEqual([12.25 / 6, 17.0 / 6]));
| assert(x.byDim!1.map!mean.all!approxEqual([1, 4.25, 3.25, 1.5, 2.5, 2.125]));
|
| // FIXME
| // Without using map, computes the mean of the whole slice
| // assert(x.byDim!0.mean == x.sliced.mean);
| // assert(x.byDim!1.mean == x.sliced.mean);
|}
|
|/++
|Use byDim and map with a lambda, but may need to allocate result. This example
|uses fuse, which allocates. Note: fuse!1 will transpose the result.
|+/
|version(mir_test)
|@safe pure
|unittest {
| import mir.ndslice.topology: iota, byDim, map;
| import mir.ndslice.fuse: fuse;
| import mir.ndslice.slice: sliced;
|
| auto x = [1, 2, 3].sliced;
| auto y = [1, 2].sliced;
|
| auto s1 = iota(2, 3).byDim!0.map!(a => a * x).fuse;
| assert(s1 == [[ 0, 2, 6],
| [ 3, 8, 15]]);
| auto s2 = iota(2, 3).byDim!1.map!(a => a * y).fuse!1;
| assert(s2 == [[ 0, 1, 2],
| [ 6, 8, 10]]);
|}
|
|// Ensure works on canonical
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, canonical;
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4).canonical;
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal))));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|// Ensure works on universal
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, universal;
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto slice = iota(3, 4).universal;
| //->
| // | 3 |
| //->
| // | 4 |
| size_t[1] shape3 = [3];
| size_t[1] shape4 = [4];
|
| // ------------
| // | 0 1 2 3 |
| // | 4 5 6 7 |
| // | 8 9 10 11 |
| // ------------
| auto x = slice.byDim!0;
| static assert(is(typeof(x) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 1, Universal)));
|
| assert(x.shape == shape3);
| assert(x.front.shape == shape4);
| assert(x.front == iota(4));
| x.popFront;
| assert(x.front == iota([4], 4));
|
| // ---------
| // | 0 4 8 |
| // | 1 5 9 |
| // | 2 6 10 |
| // | 3 7 11 |
| // ---------
| auto y = slice.byDim!1;
| static assert(is(typeof(y) == Slice!(SliceIterator!(IotaIterator!sizediff_t, 1, Universal), 1, Universal)));
|
| assert(y.shape == shape4);
| assert(y.front.shape == shape3);
| assert(y.front == iota([3], 0, 4));
| y.popFront;
| assert(y.front == iota([3], 1, 4));
|}
|
|// 1-dimensional slice support
|@safe @nogc pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // -------
| // | 0 1 2 |
| // -------
| auto slice = iota(3);
| auto x = slice.byDim!0;
| static assert (is(typeof(x) == typeof(slice)));
| assert(x == slice);
|}
|
|/++
|Constructs a new view of an n-dimensional slice with dimension `axis` removed.
|
|Throws:
| `AssertError` if the length of the corresponding dimension doesn' equal 1.
|Params:
| axis = dimension to remove, if it is single-dimensional
| slice = n-dimensional slice
|Returns:
| new view of a slice with dimension removed
|See_also: $(LREF unsqueeze), $(LREF iota).
|+/
|template squeeze(sizediff_t axis = 0)
|{
| Slice!(Iterator, N - 1, kind != Canonical ? kind : ((axis == N - 1 || axis == -1) ? Universal : (N == 2 ? Contiguous : kind)))
| squeeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| if (-sizediff_t(N) <= axis && axis < sizediff_t(N) && N > 1)
| in {
| assert(slice._lengths[axis < 0 ? N + axis : axis] == 1);
| }
| do {
| import mir.utility: swap;
| enum sizediff_t a = axis < 0 ? N + axis : axis;
| typeof(return) ret;
| foreach (i; Iota!(0, a))
| ret._lengths[i] = slice._lengths[i];
| foreach (i; Iota!(a + 1, N))
| ret._lengths[i - 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a + 1, N))
| ret._strides[i - 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| static if (a == N - 1)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| }
| else
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a + 1, N - 1))
| ret._strides[i - 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice.topology : iota;
| import mir.ndslice.allocation : slice;
|
| // [[0, 1, 2]] -> [0, 1, 2]
| assert([1, 3].iota.squeeze == [3].iota);
| // [[0], [1], [2]] -> [0, 1, 2]
| assert([3, 1].iota.squeeze!1 == [3].iota);
| assert([3, 1].iota.squeeze!(-1) == [3].iota);
|
| assert([1, 3].iota.canonical.squeeze == [3].iota);
| assert([3, 1].iota.canonical.squeeze!1 == [3].iota);
| assert([3, 1].iota.canonical.squeeze!(-1) == [3].iota);
|
| assert([1, 3].iota.universal.squeeze == [3].iota);
| assert([3, 1].iota.universal.squeeze!1 == [3].iota);
| assert([3, 1].iota.universal.squeeze!(-1) == [3].iota);
|
| assert([1, 3, 4].iota.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.squeeze!(-1) == [3, 4].iota);
|
| assert([1, 3, 4].iota.canonical.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.canonical.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.canonical.squeeze!(-1) == [3, 4].iota);
|
| assert([1, 3, 4].iota.universal.squeeze == [3, 4].iota);
| assert([3, 1, 4].iota.universal.squeeze!1 == [3, 4].iota);
| assert([3, 4, 1].iota.universal.squeeze!(-1) == [3, 4].iota);
|}
|
|/++
|Constructs a view of an n-dimensional slice with a dimension added at `axis`. Used
|to unsqueeze a squeezed slice.
|
|Params:
| slice = n-dimensional slice
| axis = dimension to be unsqueezed (add new dimension), default values is 0, the first dimension
|Returns:
| unsqueezed n+1-dimensional slice of the same slice kind
|See_also: $(LREF squeeze), $(LREF iota).
|+/
|Slice!(Iterator, N + 1, kind) unsqueeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice, sizediff_t axis)
|in {
| assert(-sizediff_t(N + 1) <= axis && axis <= sizediff_t(N));
|}
|do {
| import mir.utility: swap;
| typeof(return) ret;
| auto a = axis < 0 ? axis + N + 1 : axis;
| foreach (i; 0 .. a)
| ret._lengths[i] = slice._lengths[i];
| ret._lengths[a] = 1;
| foreach (i; a .. N)
| ret._lengths[i + 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; 0 .. a)
| ret._strides[i] = slice._strides[i];
| foreach (i; a .. N)
| ret._strides[i + 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| if (a == N)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| ret._strides[N - 1] = 1;
| }
| else
| {
| foreach (i; 0 .. a)
| ret._strides[i] = slice._strides[i];
| foreach (i; a .. N - 1)
| ret._strides[i + 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
|}
|
|/// ditto
|template unsqueeze(sizediff_t axis = 0)
|{
| Slice!(Iterator, N + 1, kind) unsqueeze(Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, N, kind) slice)
| in {
| assert(-sizediff_t(N + 1) <= axis && axis <= sizediff_t(N));
| }
| do {
| import mir.utility: swap;
| typeof(return) ret;
| enum a = axis < 0 ? axis + N + 1 : axis;
| foreach (i; Iota!a)
| ret._lengths[i] = slice._lengths[i];
| ret._lengths[a] = 1;
| foreach (i; Iota!(a, N))
| ret._lengths[i + 1] = slice._lengths[i];
| static if (kind == Universal)
| {
| foreach (i; Iota!a)
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a, N))
| ret._strides[i + 1] = slice._strides[i];
| }
| else
| static if (kind == Canonical)
| {
| static if (a == N)
| {
| foreach (i; Iota!(0, N - 1))
| ret._strides[i] = slice._strides[i];
| ret._strides[N - 1] = 1;
| }
| else
| {
| foreach (i; Iota!(0, a))
| ret._strides[i] = slice._strides[i];
| foreach (i; Iota!(a, N - 1))
| ret._strides[i + 1] = slice._strides[i];
| }
| }
| swap(ret._iterator, slice._iterator);
| return ret;
| }
|}
|
|///
|version (mir_test)
|@safe pure nothrow @nogc
|unittest
|{
| // [0, 1, 2] -> [[0, 1, 2]]
| assert([3].iota.unsqueeze == [1, 3].iota);
|
| assert([3].iota.universal.unsqueeze == [1, 3].iota);
| assert([3, 4].iota.unsqueeze == [1, 3, 4].iota);
| assert([3, 4].iota.canonical.unsqueeze == [1, 3, 4].iota);
| assert([3, 4].iota.universal.unsqueeze == [1, 3, 4].iota);
|
| // [0, 1, 2] -> [[0], [1], [2]]
| assert([3].iota.unsqueeze(-1) == [3, 1].iota);
| assert([3].iota.unsqueeze!(-1) == [3, 1].iota);
|
| assert([3].iota.universal.unsqueeze(-1) == [3, 1].iota);
| assert([3].iota.universal.unsqueeze!(-1) == [3, 1].iota);
| assert([3, 4].iota.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.unsqueeze!(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.canonical.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.canonical.unsqueeze!(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.universal.unsqueeze(-1) == [3, 4, 1].iota);
| assert([3, 4].iota.universal.unsqueeze!(-1) == [3, 4, 1].iota);
|}
|
|/++
|Field (element's member) projection.
|
|Params:
| name = element's member name
|Returns:
| lazy n-dimensional slice of the same shape
|See_also:
| $(LREF map)
|+/
|
|template member(string name)
| if (name.length)
|{
| /++
| Params:
| slice = n-dimensional slice composed of structs, classes or unions
| Returns:
| lazy n-dimensional slice of the same shape
| +/
| Slice!(MemberIterator!(Iterator, name), N, kind) member(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| return typeof(return)(slice._structure, MemberIterator!(Iterator, name)(slice._iterator));
| }
|
| /// ditto
| Slice!(MemberIterator!(T*, name)) member(T)(T[] array)
| {
| return member(array.sliced);
| }
|
| /// ditto
| auto member(T)(T withAsSlice)
| if (hasAsSlice!T)
| {
| return member(withAsSlice.asSlice);
| }
|}
|
|///
|version(mir_test)
|@safe pure unittest
|{
| // struct, union or class
| struct S
| {
| // Property support
| // Getter always must be defined.
| double _x;
| double x() @property
| {
| return x;
| }
| void x(double x) @property
| {
| _x = x;
| }
|
| /// Field support
| double y;
|
| /// Zero argument function support
| double f()
| {
| return _x * 2;
| }
| }
|
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| auto matrix = slice!S(2, 3);
| matrix.member!"x"[] = [2, 3].iota;
| matrix.member!"y"[] = matrix.member!"f";
| assert(matrix.member!"y" == [2, 3].iota * 2);
|}
|
|/++
|Functional deep-element wise reduce of a slice composed of fields or iterators.
|+/
|template orthogonalReduceField(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| @optmath:
| /++
| Params:
| slice = Non empty input slice composed of fields or iterators.
| Returns:
| a lazy field with each element of which is reduced value of element of the same index of all iterators.
| +/
| OrthogonalReduceField!(Iterator, fun, I) orthogonalReduceField(I, Iterator)(I initialValue, Slice!Iterator slice)
| {
| return typeof(return)(slice, initialValue);
| }
|
| /// ditto
| OrthogonalReduceField!(T*, fun, I) orthogonalReduceField(I, T)(I initialValue, T[] array)
| {
| return orthogonalReduceField(initialValue, array.sliced);
| }
|
| /// ditto
| auto orthogonalReduceField(I, T)(I initialValue, T withAsSlice)
| if (hasAsSlice!T)
| {
| return orthogonalReduceField(initialValue, withAsSlice.asSlice);
| }
| }
| else alias orthogonalReduceField = .orthogonalReduceField!(naryFun!fun);
|}
|
|/// bit array operations
|version(mir_test)
|unittest
|{
| import mir.ndslice.slice: slicedField;
| import mir.ndslice.allocation: bitSlice;
| import mir.ndslice.dynamic: strided;
| import mir.ndslice.topology: iota, orthogonalReduceField;
| auto len = 100;
| auto a = len.bitSlice;
| auto b = len.bitSlice;
| auto c = len.bitSlice;
| a[len.iota.strided!0(7)][] = true;
| b[len.iota.strided!0(11)][] = true;
| c[len.iota.strided!0(13)][] = true;
|
| // this is valid since bitslices above are oroginal slices of allocated memory.
| auto and =
| orthogonalReduceField!"a & b"(size_t.max, [
| a.iterator._field._field, // get raw data pointers
| b.iterator._field._field,
| c.iterator._field._field,
| ]) // operation on size_t
| .bitwiseField
| .slicedField(len);
|
| assert(and == (a & b & c));
|}
|
|/++
|Constructs a lazy view of triplets with `left`, `center`, and `right` members.
|
|Returns: Slice of the same length composed of $(SUBREF iterator, Triplet) triplets.
|The `center` member is type of a slice element.
|The `left` and `right` members has the same type as slice.
|
|The module contains special function $(LREF collapse) to handle
|left and right side of triplets in one expression.
|
|Params:
| slice = a slice or an array to iterate over
|
|Example:
|------
|triplets(eeeeee) =>
|
|||c|lllll|
||r|c|llll|
||rr|c|lll|
||rrr|c|ll|
||rrrr|c|l|
||rrrrr|c||
|------
|
|See_also: $(LREF stairs).
|+/
|Slice!(TripletIterator!(Iterator, kind)) triplets(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice)
|{
| return typeof(return)(slice.length, typeof(return).Iterator(0, slice));
|}
|
|/// ditto
|Slice!(TripletIterator!(T*)) triplets(T)(scope return T[] slice)
|{
| return .triplets(slice.sliced);
|}
|
|/// ditto
|auto triplets(string type, S)(S slice, size_t n)
| if (hasAsSlice!S)
|{
| return .triplets(slice.asSlice);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: triplets, member, iota;
|
| auto a = [4, 5, 2, 8];
| auto h = a.triplets;
|
| assert(h[1].center == 5);
| assert(h[1].left == [4]);
| assert(h[1].right == [2, 8]);
|
| h[1].center = 9;
| assert(a[1] == 9);
|
| assert(h.member!"center" == a);
|
| // `triplets` topology can be used with iota to index a slice
| auto s = a.sliced;
| auto w = s.length.iota.triplets[1];
|
| assert(&s[w.center] == &a[1]);
| assert(s[w.left].field is a[0 .. 1]);
| assert(s[w.right].field is a[2 .. $]);
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/topology.d is 0% covered
<<<<<< EOF
# path=./source-mir-sparse-blas-gemm.lst
|/++
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|+/
|module mir.sparse.blas.gemm;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.ndslice.iterator;
|import mir.ndslice.allocation: slice;
|import mir.sparse;
|import mir.series;
|
|/++
|General matrix-matrix multiplication.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| b = dense matrix
| beta = scalar
| c = dense matrix
|Returns:
| `c = alpha * a × b + beta * c` if beta does not equal null and `c = alpha * a × b` otherwise.
|+/
|void gemm(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 2, kind2) b,
| in CL beta,
| Slice!(Iterator3, 2, kind3) c)
|in
|{
1| assert(a.length!0 == c.length!0);
1| assert(b.length!1 == c.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
1| auto ct = c.universal.transposed;
14| foreach (x; b.universal.transposed)
| {
| import mir.sparse.blas.gemv: gemv;
4| gemv(alpha, a, x, beta, ct.front);
4| ct.popFront;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto sp = sparse!int(3, 5);
1| sp[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto a = sp.compress;
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
1| auto c = slice!double(3, 4);
|
1| gemm(1.0, a, b, 0, c);
|
1| assert(c ==
| [[-42.0, 35, -7, 77],
| [-69.0, -21, -42, 21],
| [23.0, 69, 3, 29]]);
|}
|
|
|/++
|General matrix-matrix multiplication with transformation.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| b = dense matrix
| beta = scalar
| c = dense matrix
|Returns:
| `c = alpha * aᵀ × b + beta * c` if beta does not equal null and `c = alpha * aᵀ × b` otherwise.
|+/
|void gemtm(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 2, kind2) b,
| in CL beta,
| Slice!(Iterator3, 2, kind3) c)
|in
|{
1| assert(a.length!0 == b.length!0);
1| assert(b.length!1 == c.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
1| auto ct = c.universal.transposed;
14| foreach (x; b.universal.transposed)
| {
| import mir.sparse.blas.gemv: gemtv;
4| gemtv(alpha, a, x, beta, ct.front);
4| ct.popFront;
| }
|}
|
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto sp = sparse!int(5, 3);
1| sp[] =
| [[-5, -1, -5],
| [1, -5, -2],
| [7, 6, -3],
| [7, 3, 6],
| [-4, -3, 0]];
|
1| auto a = sp.compress;
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
1| auto c = slice!double(3, 4);
|
1| gemtm(1.0, a, b, 0, c);
|
1| assert(c ==
| [[-42.0, 35, -7, 77],
| [-69.0, -21, -42, 21],
| [23.0, 69, 3, 29]]);
|}
|
|/++
|Selective general matrix multiplication with selector sparse matrix.
|Params:
| a = dense matrix
| b = dense matrix
| c = sparse matrix (CSR format)
|Returns:
| `c[available indexes] = (a × b)[available indexes]`.
|+/
|void selectiveGemm(string op = "", SliceKind kind1, SliceKind kind2, SliceKind kind3, T, T3, I3, J3)
|(Slice!(T*, 2, kind1) a, Slice!(T*, 2, kind2) b, Slice!(ChopIterator!(J3*, Series!(I3*, T3*)), 1, kind3) c)
|in
|{
1| assert(a.length!1 == b.length!0);
1| assert(c.length!0 == a.length!0);
11| foreach (r; c)
3| if (r.index.length)
2| assert(r.index[$-1] < b.length!1);
|}
|body
|{
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed;
| import mir.sparse.blas.gemv: selectiveGemv;
|
1| auto bt = b.universal.transposed;
11| foreach (r; c)
| {
3| selectiveGemv!op(bt, a.front, r);
3| a.popFront;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto a = slice!double(3, 5);
1| a[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto b = slice!double(5, 4);
1| b[] =
| [[-5.0, -3, 3, 1],
| [4.0, 3, 6, 4],
| [-4.0, -2, -2, 2],
| [-1.0, 9, 4, 8],
| [9.0, 8, 3, -2]];
|
| // a * b ==
| // [[-42.0, 35, -7, 77],
| // [-69.0, -21, -42, 21],
| // [23.0, 69, 3, 29]]);
|
1| auto cs = sparse!double(3, 4);
1| cs[0, 2] = 1;
1| cs[0, 1] = 3;
1| cs[2, 3] = 2;
|
1| auto c = cs.compress;
|
1| selectiveGemm!"*"(a, b, c);
1| assert(c.length == 3);
1| assert(c[0].index == [1, 2]);
1| assert(c[0].value == [105, -7]);
1| assert(c[1].empty);
1| assert(c[2].index == [3]);
1| assert(c[2].value == [58]);
|}
source/mir/sparse/blas/gemm.d is 100% covered
<<<<<< EOF
# path=./source-mir-sparse-blas-dot.lst
|/**
|License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0).
|
|Authors: Ilya Yaroshenko
|*/
|module mir.sparse.blas.dot;
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.sparse;
|import mir.series;
|
|/++
|Dot product of two vectors
|
|Params:
| x = sparse vector
| y = sparse vector
|Returns:
| scalar `xᵀ × y`
|+/
|Unqual!(CommonType!(T1, T2)) dot(
| V1 : Series!(I1*, T1*),
| V2 : Series!(I2*, T2*),
| T1, T2, I1, I2)
|(V1 x, V2 y)
|{
1| return dot!(typeof(return))(x, y);
|}
|
|/// ditto
|D dot(
| D,
| V1 : Series!(I1*, T1*),
| V2 : Series!(I2*, T2*),
| T1, T2, I1, I2)
|(V1 x, V2 y)
|{
|
2| typeof(return) s = 0;
|
2| uint done = 2;
2| Unqual!I1 ai0 = void;
2| Unqual!I2 bi0 = void;
|
4| if (x.length && y.length) for (;;)
| {
8| bi0 = y.index[0];
8| if (x.index[0] < bi0)
| {
| do
| {
4| x.popFront;
4| if (x.length == 0)
| {
0000000| break;
| }
| }
4| while (x.index[0] < bi0);
4| done = 2;
| }
8| if (--done == 0)
| {
2| goto L;
| }
6| ai0 = x.index[0];
6| if (y.index[0] < ai0)
| {
| do
| {
4| y.popFront;
4| if (y.length == 0)
| {
0000000| break;
| }
| }
4| while (y.index[0] < ai0);
4| done = 2;
| }
6| if (--done == 0)
| {
2| goto L;
| }
4| continue;
| L:
4| s = x.value[0] * y.value[0] + s;
4| x.popFront;
4| if (x.length == 0)
| {
0000000| break;
| }
4| y.popFront;
4| if (y.length == 0)
| {
2| break;
| }
| }
|
2| return s;
|}
|
|///
|unittest
|{
| import mir.series;
|
1| auto x = series([0u, 3, 5, 9, 100], [1, 3, 4, 9, 10]);
1| auto y = series([1u, 3, 4, 9], [1, 10, 100, 1000]);
| // x = [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, ... ,10]
| // y = [0, 1, 0, 10, 0, 0, 0, 0, 0, 1000]
1| assert(dot(x, y) == 9030);
1| assert(dot!double(x, y) == 9030);
|}
|
|/++
|Dot product of two vectors.
|Params:
| x = sparse vector
| y = dense vector
|Returns:
| scalar `x × y`
|+/
|Unqual!(CommonType!(T1, ForeachType!V2)) dot(
| V1 : Series!(I1*, T1*),
| T1, I1, V2)
|(V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|{
21| return dot!(typeof(return))(x, y);
|}
|
|///ditto
|D dot(
| D,
| V1 : Series!(I1*, T1*),
| T1, I1, V2)
|(V1 x, V2 y)
| if (isDynamicArray!V2 || isSlice!V2)
|in
|{
21| if (x.length)
21| assert(x.index[$-1] < y.length);
|}
|body
|{
|
| import mir.internal.utility;
|
| alias T2 = ForeachType!V2;
|
| alias F = Unqual!(CommonType!(T1, T2));
21| F s = 0;
324| foreach (size_t i; 0 .. x.index.length)
| {
87| s = y[x.index[i]] * x.value[i] + s;
| }
|
21| return s;
|}
|
|///
|unittest
|{
| import mir.series;
|
1| auto x = [0u, 3, 5, 9, 10].series([1.0, 3, 4, 9, 13]);
1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
| // x: [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, 13, 0, 0, 0]
| // y: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
1| auto r = 0 + 3 * 3 + 4 * 5 + 9 * 9 + 13 * 10;
1| assert(dot(x, y) == r);
1| assert(dot(x, y.sliced) == r);
1| assert(dot(x, y.slicedField) == r);
|}
source/mir/sparse/blas/dot.d is 94% covered
<<<<<< EOF
# path=./source-mir-glas-l2.lst
|/++
|$(H2 Level 2)
|
|$(SCRIPT inhibitQuickIndex = 1;)
|
|This is a submodule of $(MREF mir,glas).
|
|The Level 2 BLAS perform matrix-vector operations.
|
|Note: GLAS is singe thread for now.
|
|$(BOOKTABLE $(H2 Matrix-vector operations),
|
|$(TR $(TH Function Name) $(TH Description))
|$(T2 gemv, general matrix-vector multiplication, $(RED partially optimized))
|)
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|
|Macros:
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP)
|NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|+/
|module mir.glas.l2;
|
|import std.traits;
|import std.meta;
|
|import mir.math.common;
|import mir.internal.utility;
|import mir.ndslice.slice;
|
|import mir.glas.l1;
|
|import mir.math.common: fastmath;
|
|@fastmath:
|
|/++
|$(RED DRAFT)
|Performs general matrix-vector multiplication.
|
|Pseudo_code: `y := alpha A × x + beta y`.
|
|Params:
| alpha = scalar
| asl = `m ⨉ n` matrix
| xsl = `n ⨉ 1` vector
| beta = scalar. When `beta` is supplied as zero then the vector `ysl` need not be set on input.
| ysl = `m ⨉ 1` vector
|
|Note:
| GLAS does not require transposition parameters.
| Use $(NDSLICEREF iteration, transposed)
| to perform zero cost `Slice` transposition.
|
|BLAS: SGEMV, DGEMV, (CGEMV, ZGEMV are not implemented for now)
|+/
|nothrow @nogc @system
|void gemv(A, B, C,
| SliceKind kindA,
| SliceKind kindB,
| SliceKind kindC,
| )
|(
| C alpha,
| Slice!(const(A)*, 2, kindA) asl,
| Slice!(const(B)*, 1, kindB) xsl,
| C beta,
| Slice!(C*, 1, kindC) ysl,
|)
| if (allSatisfy!(isNumeric, A, B, C))
|in
|{
1| assert(asl.length!0 == ysl.length, "constraint: asl.length!0 == ysl.length");
1| assert(asl.length!1 == xsl.length, "constraint: asl.length!1 == xsl.length");
|}
|body
|{
| import mir.ndslice.dynamic: reversed;
| static assert(is(Unqual!C == C), msgWrongType);
1| if (ysl.empty)
0000000| return;
1| if (beta == 0)
| {
1| ysl[] = 0;
| }
| else
0000000| if (beta == 1)
| {
0000000| ysl[] *= beta;
| }
1| if (xsl.empty)
0000000| return;
| do
| {
3| ysl.front += alpha * dot(asl.front, xsl);
3| asl.popFront;
3| ysl.popFront;
| }
3| while (ysl.length);
|}
|
|///
|unittest
|{
| import mir.ndslice;
|
1| auto a = slice!double(3, 5);
1| a[] =
| [[-5, 1, 7, 7, -4],
| [-1, -5, 6, 3, -3],
| [-5, -2, -3, 6, 0]];
|
1| auto b = slice!double(5);
1| b[] =
| [-5.0,
| 4.0,
| -4.0,
| -1.0,
| 9.0];
|
1| auto c = slice!double(3);
|
1| gemv!(double, double, double)(1.0, a, b, 0.0, c);
|
1| assert(c ==
| [-42.0,
| -69.0,
| 23.0]);
|}
source/mir/glas/l2.d is 80% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-algorithm-iteration.lst
|// Written in the D programming language.
|/**
|This module contains generic _iteration algorithms.
|$(SCRIPT inhibitQuickIndex = 1;)
|
|$(BOOKTABLE $(H2 Function),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 all, Checks if all elements satisfy to a predicate.)
|$(T2 any, Checks if at least one element satisfy to a predicate.)
|$(T2 cmp, Compares two slices.)
|$(T2 count, Counts elements in a slices according to a predicate.)
|$(T2 each, Iterates elements.)
|$(T2 eachLower, Iterates lower triangle of matrix.)
|$(T2 eachOnBorder, Iterates elementes on tensors borders and corners.)
|$(T2 eachUploPair, Iterates upper and lower pairs of elements in square matrix.)
|$(T2 eachUpper, Iterates upper triangle of matrix.)
|$(T2 equal, Compares two slices for equality.)
|$(T2 filter, Filters elements in a range or an ndslice.)
|$(T2 find, Finds backward index.)
|$(T2 findIndex, Finds index.)
|$(T2 fold, Accumulates all elements (different parameter order than `reduce`).)
|$(T2 isSymmetric, Checks if the matrix is symmetric.)
|$(T2 maxIndex, Finds index of the maximum.)
|$(T2 maxPos, Finds backward index of the maximum.)
|$(T2 minIndex, Finds index of the minimum.)
|$(T2 minmaxIndex, Finds indices of the minimum and the maximum.)
|$(T2 minmaxPos, Finds backward indices of the minimum and the maximum.)
|$(T2 minPos, Finds backward index of the minimum.)
|$(T2 nBitsToCount, Сount bits until set bit count is reached.)
|$(T2 reduce, Accumulates all elements.)
|$(T2 Chequer, Chequer color selector to work with $(LREF each) .)
|$(T2 uniq, Iterates over the unique elements in a range or an ndslice, which is assumed sorted.)
|)
|
|Transform function is represented by $(NDSLICEREF topology, map).
|
|All operators are suitable to change slices using `ref` argument qualification in a function declaration.
|Note, that string lambdas in Mir are `auto ref` functions.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko, John Michael Hall, Andrei Alexandrescu (original Phobos code)
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|
|Authors: , Ilya Yaroshenko (Mir & BetterC rework).
|Source: $(PHOBOSSRC std/algorithm/_iteration.d)
|Macros:
| NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
| T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
| */
|module mir.algorithm.iteration;
|
|import mir.functional: naryFun;
|import mir.internal.utility;
|import mir.math.common: optmath;
|import mir.ndslice.field: BitField;
|import mir.ndslice.internal;
|import mir.ndslice.iterator: FieldIterator, RetroIterator;
|import mir.ndslice.slice;
|import mir.primitives;
|import mir.qualifier;
|import std.meta;
|import std.range.primitives: isInputRange, isBidirectionalRange, isInfinite, isForwardRange, ElementType;
|import std.traits;
|
|/++
|Chequer color selector to work with $(LREF each)
|+/
|enum Chequer : bool
|{
| /// Main diagonal color
| black,
| /// First sub-diagonal color
| red,
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| auto s = [5, 4].slice!int;
|
| Chequer.black.each!"a = 1"(s);
| assert(s == [
| [1, 0, 1, 0],
| [0, 1, 0, 1],
| [1, 0, 1, 0],
| [0, 1, 0, 1],
| [1, 0, 1, 0],
| ]);
|
| Chequer.red.each!((ref b) => b = 2)(s);
| assert(s == [
| [1, 2, 1, 2],
| [2, 1, 2, 1],
| [1, 2, 1, 2],
| [2, 1, 2, 1],
| [1, 2, 1, 2],
| ]);
|
|}
|
|@optmath:
|
|/+
|Bitslice representation for accelerated bitwise algorithm.
|1-dimensional contiguousitslice can be split into three chunks: head bits, body chunks, and tail bits.
|
|Bitslice can have head bits because it has slicing and the zero bit may not be aligned to the zero of a body chunk.
|+/
|private struct BitSliceAccelerator(Field, I = typeof(Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
| import mir.bitop;
| import mir.qualifier: lightConst;
| import mir.ndslice.traits: isIterator;
| import mir.ndslice.iterator: FieldIterator;
| import mir.ndslice.field: BitField;
|
| ///
| alias U = typeof(I + 1u);
| /// body bits chunks
| static if (isIterator!Field)
| Slice!Field bodyChunks;
| else
| Slice!(FieldIterator!Field) bodyChunks;
| /// head length
| int headLength;
| /// tail length
| int tailLength;
|
|@optmath:
|
| this(Slice!(FieldIterator!(BitField!(Field, I))) slice)
| {
| enum mask = bitShiftMask!I;
| enum shift = bitElemShift!I;
| size_t length = slice.length;
| size_t index = slice._iterator._index;
| if (auto hlen = index & mask)
| {
| auto l = I.sizeof * 8 - hlen;
| if (l > length)
| {
| // central problem
| headLength = -cast(int) length;
| tailLength = cast(int) hlen;
| goto F;
| }
| else
| {
| headLength = cast(uint) l;
| length -= l;
| index += l;
| }
| }
| tailLength = cast(int) (length & mask);
| F:
| length >>= shift;
| index >>= shift;
| bodyChunks._lengths[0] = length;
| static if (isIterator!Field)
| {
| bodyChunks._iterator = slice._iterator._field._field;
| bodyChunks._iterator += index;
| }
| else
| {
| bodyChunks._iterator._index = index;
| bodyChunks._iterator._field = slice._iterator._field._field;
| }
| }
|
|scope const:
|
| bool isCentralProblem()
| {
| return headLength < 0;
| }
|
| U centralBits()
| {
| assert(isCentralProblem);
| return *bodyChunks._iterator.lightConst >>> tailLength;
| }
|
| uint centralLength()
| {
| assert(isCentralProblem);
| return -headLength;
| }
|
| /// head bits (last `headLength` bits are valid).
| U headBits()
| {
| assert(!isCentralProblem);
| if (headLength == 0)
| return U.init;
| static if (isIterator!Field)
| return bodyChunks._iterator.lightConst[-1];
| else
| return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index - 1];
| }
|
| /// tail bits (first `tailLength` bits are valid).
| U tailBits()
| {
| assert(!isCentralProblem);
| if (tailLength == 0)
| return U.init;
| static if (isIterator!Field)
| return bodyChunks._iterator.lightConst[bodyChunks.length];
| else
| return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index + bodyChunks.length];
| }
|
| U negCentralMask()
| {
| return U.max << centralLength;
| }
|
| U negHeadMask()
| {
| return U.max << headLength;
| }
|
| U negTailMask()
| {
| return U.max << tailLength;
| }
|
| U negCentralMaskS()
| {
| return U.max >> centralLength;
| }
|
| U negHeadMaskS()
| {
| return U.max >> headLength;
| }
|
| U negTailMaskS()
| {
| return U.max >> tailLength;
| }
|
| U centralBitsWithRemainingZeros()
| {
| return centralBits & ~negCentralMask;
| }
|
| U centralBitsWithRemainingZerosS()
| {
| return centralBits << (U.sizeof * 8 - centralLength);
| }
|
| U headBitsWithRemainingZeros()
| {
| return headBits >>> (I.sizeof * 8 - headLength);
| }
|
| U headBitsWithRemainingZerosS()
| {
| static if (U.sizeof > I.sizeof)
| return (headBits << (U.sizeof - I.sizeof) * 8) & ~negTailMaskS;
| else
| return headBits & ~negTailMaskS;
| }
|
| U tailBitsWithRemainingZeros()
| {
| return tailBits & ~negTailMask;
| }
|
| U tailBitsWithRemainingZerosS()
| {
| return tailBits << (U.sizeof * 8 - tailLength);
| }
|
| U centralBitsWithRemainingOnes()
| {
| return centralBits | negCentralMask;
| }
|
| U centralBitsWithRemainingOnesS()
| {
| return centralBitsWithRemainingZerosS | negCentralMaskS;
| }
|
| U headBitsWithRemainingOnes()
| {
| return headBitsWithRemainingZeros | negHeadMask;
| }
|
| U headBitsWithRemainingOnesS()
| {
| return headBitsWithRemainingZerosS | negHeadMaskS;
| }
|
| U tailBitsWithRemainingOnes()
| {
| return tailBits | negTailMask;
| }
|
| U tailBitsWithRemainingOnesS()
| {
| return tailBitsWithRemainingZerosS | negTailMaskS;
| }
|
| size_t ctpop()
| {
| import mir.bitop: ctpop;
| if (isCentralProblem)
| return centralBitsWithRemainingZeros.ctpop;
| size_t ret;
| if (headLength)
| ret = cast(size_t) headBitsWithRemainingZeros.ctpop;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| ret += cast(size_t) bc.front.ctpop;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBits)
| ret += cast(size_t) tailBitsWithRemainingZeros.ctpop;
| return ret;
| }
|
| bool any()
| {
| if (isCentralProblem)
| return centralBitsWithRemainingZeros != 0;
| if (headBitsWithRemainingZeros != 0)
| return true;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| if (bc.front != 0)
| return true;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBitsWithRemainingZeros != 0)
| return true;
| return false;
| }
|
| bool all()
| {
| if (isCentralProblem)
| return centralBitsWithRemainingOnes != U.max;
| size_t ret;
| if (headBitsWithRemainingOnes != U.max)
| return false;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| if (bc.front != I.max)
| return false;
| bc.popFront;
| }
| while(bc.length);
| }
| if (tailBitsWithRemainingOnes != U.max)
| return false;
| return true;
| }
|
| size_t cttz()
| {
| U v;
| size_t ret;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = centralLength;
| goto L;
| }
| v = headBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = headLength;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.front;
| if (v)
| goto R;
| ret += I.sizeof * 8;
| bc.popFront;
| }
| while(bc.length);
| }
| v = tailBitsWithRemainingOnes;
| if (v)
| goto R;
| ret += tailLength;
| goto L;
| R:
| ret += v.cttz;
| L:
| return ret;
| }
|
| size_t ctlz()
| {
| U v;
| size_t ret;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingOnes;
| if (v)
| goto R;
| ret = centralLength;
| goto L;
| }
| v = tailBitsWithRemainingOnesS;
| if (v)
| goto R;
| ret = tailLength;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.back;
| if (v)
| goto R;
| ret += I.sizeof * 8;
| bc.popBack;
| }
| while(bc.length);
| }
| v = headBitsWithRemainingOnesS;
| if (v)
| goto R;
| ret += headLength;
| goto L;
| R:
| ret += v.ctlz;
| L:
| return ret;
| }
|
| sizediff_t nBitsToCount(size_t count)
| {
| size_t ret;
| if (count == 0)
| return count;
| U v, cnt;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingZeros;
| goto E;
| }
| v = headBitsWithRemainingZeros;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += headLength;
| count -= cast(size_t) cnt;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.front;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += I.sizeof * 8;
| count -= cast(size_t) cnt;
| bc.popFront;
| }
| while(bc.length);
| }
| v = tailBitsWithRemainingZeros;
| E:
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| return -1;
| R:
| return ret + v.nTrailingBitsToCount(count);
| }
|
| sizediff_t retroNBitsToCount(size_t count)
| {
| if (count == 0)
| return count;
| size_t ret;
| U v, cnt;
| if (isCentralProblem)
| {
| v = centralBitsWithRemainingZerosS;
| goto E;
| }
| v = tailBitsWithRemainingZerosS;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += tailLength;
| count -= cast(size_t) cnt;
| if (bodyChunks.length)
| {
| auto bc = bodyChunks.lightConst;
| do
| {
| v = bc.back;
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| ret += I.sizeof * 8;
| count -= cast(size_t) cnt;
| bc.popBack;
| }
| while(bc.length);
| }
| v = headBitsWithRemainingZerosS;
| E:
| cnt = v.ctpop;
| if (cnt >= count)
| goto R;
| return -1;
| R:
| return ret + v.nLeadingBitsToCount(count);
| }
|}
|
|/++
|Сount bits until set bit count is reached. Works with ndslices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|Returns: bit count if set bit count is reached or `-1` otherwise.
|+/
|sizediff_t nBitsToCount(Field, I)(Slice!(FieldIterator!(BitField!(Field, I))) bitSlice, size_t count)
|{
| return BitSliceAccelerator!(Field, I)(bitSlice).nBitsToCount(count);
|}
|
|///ditto
|sizediff_t nBitsToCount(Field, I)(Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))) bitSlice, size_t count)
|{
| import mir.ndslice.topology: retro;
| return BitSliceAccelerator!(Field, I)(bitSlice.retro).retroNBitsToCount(count);
|}
|
|///
|pure unittest
|{
| import mir.ndslice.allocation: bitSlice;
| import mir.ndslice.topology: retro;
| auto s = bitSlice(1000);
| s[50] = true;
| s[100] = true;
| s[200] = true;
| s[300] = true;
| s[400] = true;
| assert(s.nBitsToCount(4) == 301);
| assert(s.retro.nBitsToCount(4) == 900);
|}
|
|private void checkShapesMatch(
| string fun = __FUNCTION__,
| string pfun = __PRETTY_FUNCTION__,
| Slices...)
| (scope ref const Slices slices)
| if (Slices.length > 1)
|{
| enum msgShape = "all slices must have the same shape" ~ tailErrorMessage!(fun, pfun);
| enum N = slices[0].shape.length;
| foreach (i, Slice; Slices)
| {
| static if (i == 0)
0000000| continue;
| else
| static if (slices[i].shape.length == N)
0000000| assert(slices[i].shape == slices[0].shape, msgShape);
| else
| {
| import mir.ndslice.fuse: fuseShape;
| static assert(slices[i].fuseShape.length >= N);
| assert(cast(size_t[N])slices[i].fuseShape[0 .. N] == slices[0].shape, msgShape);
| }
| }
|}
|
|
|package(mir) template allFlattened(args...)
|{
| static if (args.length)
| {
| alias arg = args[0];
| @optmath @property ls()()
| {
| import mir.ndslice.topology: flattened;
0000000| return flattened(arg);
| }
| alias allFlattened = AliasSeq!(ls, allFlattened!(args[1..$]));
| }
| else
| alias allFlattened = AliasSeq!();
|}
|
|private template areAllContiguousSlices(Slices...)
|{
| import mir.ndslice.traits: isContiguousSlice;
| static if (allSatisfy!(isContiguousSlice, Slices))
| enum areAllContiguousSlices = Slices[0].N > 1 && areAllContiguousSlicesImpl!(Slices[0].N, Slices[1 .. $]);
| else
| enum areAllContiguousSlices = false;
|}
|
|private template areAllContiguousSlicesImpl(size_t N, Slices...)
|{
| static if (Slices.length == 0)
| enum areAllContiguousSlicesImpl = true;
| else
| enum areAllContiguousSlicesImpl = Slices[0].N == N && areAllContiguousSlicesImpl!(N, Slices[1 .. $]);
|}
|
|version(LDC) {}
|else version(GNU) {}
|else version (Windows) {}
|else version (X86_64)
|{
| //Compiling with DMD for x86-64 for Linux & OS X with optimizations enabled,
| //"Tensor mutation on-the-fly" unittest was failing. Disabling inlining
| //caused it to succeed.
| //TODO: Rework so this is unnecessary!
| version = Mir_disable_inlining_in_reduce;
|}
|
|version(Mir_disable_inlining_in_reduce)
|{
| private enum Mir_disable_inlining_in_reduce = true;
|
| private template _naryAliases(size_t n)
| {
| static if (n == 0)
| enum _naryAliases = "";
| else
| {
| enum i = n - 1;
| enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n";
| }
| }
|
| private template nonInlinedNaryFun(alias fun)
| {
| import mir.math.common : optmath;
| static if (is(typeof(fun) : string))
| {
| /// Specialization for string lambdas
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (args.length <= 26)
| {
| pragma(inline,false);
| mixin(_naryAliases!(Args.length));
| return mixin(fun);
| }
| }
| else static if (is(typeof(fun.opCall) == function))
| {
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (is(typeof(fun.opCall(args))))
| {
| pragma(inline,false);
| return fun.opCall(args);
| }
| }
| else
| {
| @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args)
| if (is(typeof(fun(args))))
| {
| pragma(inline,false);
| return fun(args);
| }
| }
| }
|}
|else
|{
| private enum Mir_disable_inlining_in_reduce = false;
|}
|
|S reduceImpl(alias fun, S, Slices...)(S seed, scope Slices slices)
|{
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| seed = fun(seed, frontOf!slices);
| else
| seed = .reduceImpl!fun(seed, frontOf!slices);
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return seed;
|}
|
|/++
|Implements the homonym function (also known as `accumulate`,
|`compress`, `inject`, or `fold`) present in various programming
|languages of functional flavor. The call `reduce!(fun)(seed, slice1, ..., sliceN)`
|first assigns `seed` to an internal variable `result`,
|also called the accumulator. Then, for each set of element `x1, ..., xN` in
|`slice1, ..., sliceN`, `result = fun(result, x1, ..., xN)` gets evaluated. Finally,
|`result` is returned.
|
|`reduce` allows to iterate multiple slices in the lockstep.
|
|Note:
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|Params:
| fun = A function.
|See_Also:
| $(HTTP llvm.org/docs/LangRef.html#fast-math-flags, LLVM IR: Fast Math Flags)
|
| $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function))
|+/
|template reduce(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun)
| && !Mir_disable_inlining_in_reduce)
| /++
| Params:
| seed = An initial accumulation value.
| slices = One or more slices, range, and arrays.
| Returns:
| the accumulated `result`
| +/
| @optmath auto reduce(S, Slices...)(S seed, scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .reduce!fun(seed, allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return cast(Unqual!S) seed;
| static if (is(S : Unqual!S))
| alias UT = Unqual!S;
| else
| alias UT = S;
| return reduceImpl!(fun, UT, Slices)(seed, allLightScope!slices);
| }
| }
| else version(Mir_disable_inlining_in_reduce)
| //As above, but with inlining disabled.
| @optmath auto reduce(S, Slices...)(S seed, scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .reduce!fun(seed, allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return cast(Unqual!S) seed;
| static if (is(S : Unqual!S))
| alias UT = Unqual!S;
| else
| alias UT = S;
| return reduceImpl!(nonInlinedNaryFun!fun, UT, Slices)(seed, allLightScope!slices);
| }
| }
| else
| alias reduce = .reduce!(naryFun!fun);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| auto ar = [1, 2, 3];
| auto s = 0.reduce!"a + b"(ar);
| assert (s == 6);
|}
|
|/// Single slice
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology : iota;
|
| //| 0 1 2 | => 3 |
| //| 3 4 5 | => 12 | => 15
| auto sl = iota(2, 3);
|
| // sum of all element in the slice
| auto res = size_t(0).reduce!"a + b"(sl);
|
| assert(res == 15);
|}
|
|/// Multiple slices, dot product
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 1 2 3 |
| //| 4 5 6 |
| auto b = iota([2, 3], 1).as!double.slice;
|
| alias dot = reduce!"a + b * c";
| auto res = dot(0.0, a, b);
|
| // check the result:
| import mir.ndslice.topology : flattened;
| import std.numeric : dotProduct;
| assert(res == dotProduct(a.flattened, b.flattened));
|}
|
|/// Zipped slices, dot product
|pure
|version(mir_test) unittest
|{
| import std.typecons : Yes;
| import std.numeric : dotProduct;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota, zip, universal;
| import mir.math.common : optmath;
|
| static @optmath T fmuladd(T, Z)(const T a, Z z)
| {
| return a + z.a * z.b;
| }
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3).as!double.slice.universal;
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1).as!double.slice;
|
| // slices must have the same strides for `zip!true`.
| assert(sl1.strides == sl2.strides);
|
| auto z = zip!true(sl1, sl2);
|
| auto dot = reduce!fmuladd(0.0, z);
|
| assert(dot == dotProduct(iota(6), iota([6], 1)));
|}
|
|/// Tensor mutation on-the-fly
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
| import mir.math.common : optmath;
|
| static @optmath T fun(T)(const T a, ref T b)
| {
| return a + b++;
| }
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3).as!double.slice;
|
| auto res = reduce!fun(double(0), sl);
|
| assert(res == 15);
|
| //| 1 2 3 |
| //| 4 5 6 |
| assert(sl == iota([2, 3], 1));
|}
|
|/++
|Packed slices.
|
|Computes minimum value of maximum values for each row.
|+/
|version(mir_test)
|unittest
|{
| import mir.math.common;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.dynamic : transposed;
| import mir.ndslice.topology : as, iota, pack, map, universal;
|
| alias maxVal = (a) => reduce!fmax(-double.infinity, a);
| alias minVal = (a) => reduce!fmin(double.infinity, a);
| alias minimaxVal = (a) => minVal(a.pack!1.map!maxVal);
|
| auto sl = iota(2, 3).as!double.slice;
|
| // Vectorized computation: row stride equals 1.
| //| 0 1 2 | => | 2 |
| //| 3 4 5 | => | 5 | => 2
| auto res = minimaxVal(sl);
| assert(res == 2);
|
| // Common computation: row stride does not equal 1.
| //| 0 1 2 | | 0 3 | => | 3 |
| //| 3 4 5 | => | 1 4 | => | 4 |
| // | 2 5 | => | 5 | => 3
| auto resT = minimaxVal(sl.universal.transposed);
| assert(resT == 3);
|}
|
|/// Dlang Range API support.
|version(mir_test)
|unittest
|{
| import mir.algorithm.iteration: each;
| import std.range: phobos_iota = iota;
|
| int s;
| // 0 1 2 3
| 4.phobos_iota.each!(i => s += i);
| assert(s == 6);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto a = reduce!"a + b"(size_t(7), iota([0, 1], 1));
| assert(a == 7);
|}
|
|void eachImpl(alias fun, Slices...)(scope Slices slices)
|{
| foreach(ref slice; slices)
| assert(!slice.empty);
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| fun(frontOf!slices);
| else
| .eachImpl!fun(frontOf!slices);
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| }
| while(!slices[0].empty);
|}
|
|void chequerEachImpl(alias fun, Slices...)(Chequer color, scope Slices slices)
|{
| foreach(ref slice; slices)
| assert(!slice.empty);
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (color)
| {
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| if (slices[0].empty)
| return;
| }
| eachImpl!fun(strideOf!slices);
| }
| else
| {
| do
| {
| .chequerEachImpl!fun(color, frontOf!slices);
| color = cast(Chequer)!color;
| foreach_reverse(i; Iota!(Slices.length))
| slices[i].popFront;
| }
| while(!slices[0].empty);
| }
|}
|
|/++
|The call `each!(fun)(slice1, ..., sliceN)`
|evaluates `fun` for each set of elements `x1, ..., xN` in
|the borders of `slice1, ..., sliceN` respectively.
|
|`each` allows to iterate multiple slices in the lockstep.
|
|Params:
| fun = A function.
|Note:
| $(NDSLICEREF dynamic, transposed) and
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|+/
|template eachOnBorder(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| slices = One or more slices.
| +/
| @optmath void eachOnBorder(Slices...)(Slices slices)
| if (allSatisfy!(isSlice, Slices))
| {
| import mir.ndslice.traits: isContiguousSlice;
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (!slices[0].anyEmpty)
| {
| alias N = DimensionCount!(Slices[0]);
| static if (N == 1)
| {
| fun(frontOf!slices);
| if (slices[0].length > 1)
| fun(backOf!slices);
| }
| else
| static if (anySatisfy!(isContiguousSlice, Slices))
| {
| import mir.ndslice.topology: canonical;
| template f(size_t i)
| {
| static if (isContiguousSlice!(Slices[i]))
| auto f () { return canonical(slices[i]); }
| else
| alias f = slices[i];
| }
| eachOnBorder(staticMap!(f, Iota!(Slices.length)));
| }
| else
| {
| foreach (dimension; Iota!N)
| {
| eachImpl!fun(frontOfD!(dimension, slices));
| foreach_reverse(ref slice; slices)
| slice.popFront!dimension;
| if (slices[0].empty!dimension)
| return;
| eachImpl!fun(backOfD!(dimension, slices));
| foreach_reverse(ref slice; slices)
| slice.popBack!dimension;
| if (slices[0].empty!dimension)
| return;
| }
| }
| }
| }
| else
| alias eachOnBorder = .eachOnBorder!(naryFun!fun);
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : repeat, iota;
|
| auto sl = [3, 4].iota.slice;
| auto zeros = repeat(0, [3, 4]);
|
| sl.eachOnBorder!"a = b"(zeros);
|
| assert(sl ==
| [[0, 0, 0 ,0],
| [0, 5, 6, 0],
| [0, 0, 0 ,0]]);
|
| sl.eachOnBorder!"a = 1";
| sl[0].eachOnBorder!"a = 2";
|
| assert(sl ==
| [[2, 1, 1, 2],
| [1, 5, 6, 1],
| [1, 1, 1 ,1]]);
|}
|
|/++
|The call `each!(fun)(slice1, ..., sliceN)`
|evaluates `fun` for each set of elements `x1, ..., xN` in
|`slice1, ..., sliceN` respectively.
|
|`each` allows to iterate multiple slices in the lockstep.
|Params:
| fun = A function.
|Note:
| $(NDSLICEREF dynamic, transposed) and
| $(NDSLICEREF topology, pack) can be used to specify dimensions.
|See_Also:
| This is functionally similar to $(LREF reduce) but has not seed.
|+/
|template each(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
| +/
| @optmath auto each(Slices...)(scope Slices slices)
| if (Slices.length && !is(Slices[0] : Chequer))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| .each!fun(allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return;
| eachImpl!fun(allLightScope!slices);
| }
| }
|
| /++
| Iterates elements of selected $(LREF Chequer) color.
| Params:
| color = $(LREF Chequer).
| slices = One or more slices.
| +/
| @optmath auto each(Slices...)(Chequer color, scope Slices slices)
| if (Slices.length && allSatisfy!(isSlice, Slices))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
| chequerEachImpl!fun(color, allLightScope!slices);
| }
| }
| else
| alias each = .each!(naryFun!fun);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| auto ar = [1, 2, 3];
| ar.each!"a *= 2";
| assert (ar == [2, 4, 6]);
|}
|
|/// Single slice, multiply-add
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3).as!double.slice;
|
| sl.each!((ref a) { a = a * 10 + 5; });
|
| assert(sl ==
| [[ 5, 15, 25],
| [35, 45, 55]]);
|}
|
|/// Swap two slices
|version(mir_test)
|unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| auto b = iota([2, 3], 10).as!double.slice;
|
| each!swap(a, b);
|
| assert(a == iota([2, 3], 10));
| assert(b == iota([2, 3], 0));
|}
|
|/// Swap two zipped slices
|version(mir_test)
|unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto a = iota([2, 3], 0).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| auto b = iota([2, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.each!(z => swap(z.a, z.b));
|
| assert(a == iota([2, 3], 10));
| assert(b == iota([2, 3], 0));
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| iota(0, 2).each!((a){i++;});
| assert(i == 0);
|}
|
|/++
|The call `eachUploPair!(fun)(matrix)`
|evaluates `fun` for each pair (`matrix[j, i]`, `matrix[i, j]`),
|for i <= j (default) or i < j (if includeDiagonal is false).
|
|Params:
| fun = A function.
| includeDiagonal = true if applying function to diagonal,
| false (default) otherwise.
|+/
|template eachUploPair(alias fun, bool includeDiagonal = false)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| matrix = Square matrix.
| +/
| auto eachUploPair(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix)
| in
| {
| assert(matrix.length!0 == matrix.length!1, "matrix must be square.");
| }
| do
| {
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: canonical;
| .eachUploPair!(fun, includeDiagonal)(matrix.canonical);
| }
| else
| {
| static if (includeDiagonal == true)
| {
| if (matrix.length) do
| {
| eachImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1);
| matrix.popFront!1;
| matrix.popFront!0;
| // hint for optimizer
| matrix._lengths[1] = matrix._lengths[0];
| }
| while (matrix.length);
| }
| else
| {
| if (matrix.length) for(;;)
| {
| assert(!matrix.empty!0);
| assert(!matrix.empty!1);
| auto l = matrix.lightScope.front!1;
| auto u = matrix.lightScope.front!0;
| matrix.popFront!1;
| matrix.popFront!0;
| l.popFront;
| u.popFront;
| // hint for optimizer
| matrix._lengths[1] = matrix._lengths[0] = l._lengths[0] = u._lengths[0];
| if (u.length == 0)
| break;
| eachImpl!fun(u, l);
| }
| }
| }
| }
| }
| else
| {
| alias eachUploPair = .eachUploPair!(naryFun!fun, includeDiagonal);
| }
|}
|
|/// Transpose matrix in place.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, universal;
| import mir.ndslice.dynamic: transposed;
| import mir.utility: swap;
|
| auto m = iota(4, 4).slice;
|
| m.eachUploPair!swap;
|
| assert(m == iota(4, 4).universal.transposed);
|}
|
|/// Reflect Upper matrix part to lower part.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, universal;
| import mir.ndslice.dynamic: transposed;
| import mir.utility: swap;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
|
| m.eachUploPair!((u, ref l) { l = u; });
|
| assert(m == [
| [0, 1, 2],
| [1, 4, 5],
| [2, 5, 8]]);
|}
|
|/// Fill lower triangle and diagonal with zeroes.
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 1 2 3
| // 4 5 6
| // 7 8 9
| auto m = iota([3, 3], 1).slice;
|
| m.eachUploPair!((u, ref l) { l = 0; }, true);
|
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0]]);
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
| m.eachUploPair!((u, ref l) { l = l + 1; }, true);
| assert(m == [
| [1, 1, 2],
| [4, 5, 5],
| [7, 8, 9]]);
|}
|
|version(mir_test)
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| // 0 1 2
| // 3 4 5
| // 6 7 8
| auto m = iota(3, 3).slice;
| m.eachUploPair!((u, ref l) { l = l + 1; }, false);
|
| assert(m == [
| [0, 1, 2],
| [4, 4, 5],
| [7, 8, 8]]);
|}
|
|/++
|Checks if the matrix is symmetric.
|+/
|template isSymmetric(alias fun = "a == b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| matrix = 2D ndslice.
| +/
| bool isSymmetric(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix)
| {
| static if (kind == Contiguous)
| {
| import mir.ndslice.topology: canonical;
| return .isSymmetric!fun(matrix.canonical);
| }
| else
| {
| if (matrix.length!0 != matrix.length!1)
| return false;
| if (matrix.length) do
| {
| if (!allImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1))
| {
| return false;
| }
| matrix.popFront!1;
| matrix.popFront!0;
| matrix._lengths[1] = matrix._lengths[0];
| }
| while (matrix.length);
| return true;
| }
| }
| else
| alias isSymmetric = .isSymmetric!(naryFun!fun);
|}
|
|///
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: iota;
| assert(iota(2, 2).isSymmetric == false);
|
| assert(
| [1, 2,
| 2, 3].sliced(2, 2).isSymmetric == true);
|}
|
|bool minPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[N] backwardIndex, scope ref Iterator iterator, Slice!(Iterator, N, kind) slice)
|{
| bool found;
| do
| {
| static if (slice.shape.length == 1)
| {
| if (fun(*slice._iterator, *iterator))
| {
| backwardIndex[0] = slice.length;
| iterator = slice._iterator;
| found = true;
| }
| }
| else
| {
| if (minPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front))
| {
| backwardIndex[0] = slice.length;
| found = true;
| }
| }
| slice.popFront;
| }
| while(!slice.empty);
| return found;
|}
|
|bool[2] minmaxPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[2][N] backwardIndex, scope ref Iterator[2] iterator, Slice!(Iterator, N, kind) slice)
|{
| bool[2] found;
| do
| {
| static if (slice.shape.length == 1)
| {
| if (fun(*slice._iterator, *iterator[0]))
| {
| backwardIndex[0][0] = slice.length;
| iterator[0] = slice._iterator;
| found[0] = true;
| }
| else
| if (fun(*iterator[1], *slice._iterator))
| {
| backwardIndex[0][1] = slice.length;
| iterator[1] = slice._iterator;
| found[1] = true;
| }
| }
| else
| {
| auto r = minmaxPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front);
| if (r[0])
| {
| backwardIndex[0][0] = slice.length;
| }
| if (r[1])
| {
| backwardIndex[0][1] = slice.length;
| }
| }
| slice.popFront;
| }
| while(!slice.empty);
| return found;
|}
|
|/++
|Finds a positions (ndslices) such that
|`position[0].first` is minimal and `position[1].first` is maximal elements in the slice.
|
|Position is sub-ndslice of the same dimension in the right-$(RPAREN)down-$(RPAREN)etc$(LPAREN)$(LPAREN) corner.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minmaxIndex),
| $(LREF minPos),
| $(LREF maxPos),
| $(NDSLICEREF slice, Slice.backward).
|+/
|template minmaxPos(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| 2 subslices with minimal and maximal `first` elements.
| +/
| @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)[2]
| minmaxPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) pret;
| if (!slice.anyEmpty)
| {
| size_t[2][N] ret;
| auto scopeSlice = lightScope(slice);
| auto it = scopeSlice._iterator;
| LightScopeOf!Iterator[2] iterator = [it, it];
| minmaxPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| {
| pret[0]._lengths[i] = ret[i][0];
| pret[1]._lengths[i] = ret[i][1];
| }
| pret[0]._iterator = slice._iterator + (iterator[0] - scopeSlice._iterator);
| pret[1]._iterator = slice._iterator + (iterator[1] - scopeSlice._iterator);
| }
| auto strides = slice.strides;
| foreach(i; Iota!(0, pret[0].S))
| {
| pret[0]._strides[i] = strides[i];
| pret[1]._strides[i] = strides[i];
| }
| return pret;
| }
| else
| alias minmaxPos = .minmaxPos!(naryFun!pred);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 2,
| ].sliced(3, 4);
|
| auto pos = s.minmaxPos;
|
| assert(pos[0] == s[$ - 2 .. $, $ - 3 .. $]);
| assert(pos[1] == s[$ - 1 .. $, $ - 2 .. $]);
|
| assert(pos[0].first == -4);
| assert(s.backward(pos[0].shape) == -4);
| assert(pos[1].first == 7);
| assert(s.backward(pos[1].shape) == 7);
|}
|
|/++
|Finds a backward indices such that
|`slice[indices[0]]` is minimal and `slice[indices[1]]` is maximal elements in the slice.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minmaxIndex),
| $(LREF minPos),
| $(LREF maxPos),
| $(REF Slice.backward, mir,ndslice,slice).
|+/
|template minmaxIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Subslice with minimal (maximal) `first` element.
| +/
| @optmath size_t[N][2] minmaxIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) pret = size_t.max;
| if (!slice.anyEmpty)
| {
| auto shape = slice.shape;
| size_t[2][N] ret;
| foreach (i; Iota!N)
| {
| ret[i][1] = ret[i][0] = shape[i];
| }
| auto scopeSlice = lightScope(slice);
| auto it = scopeSlice._iterator;
| LightScopeOf!Iterator[2] iterator = [it, it];
| minmaxPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| {
| pret[0][i] = slice._lengths[i] - ret[i][0];
| pret[1][i] = slice._lengths[i] - ret[i][1];
| }
| }
| return pret;
| }
| else
| alias minmaxIndex = .minmaxIndex!(naryFun!pred);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto indices = s.minmaxIndex;
|
| assert(indices == [[1, 1], [2, 3]]);
| assert(s[indices[0]] == -4);
| assert(s[indices[1]] == 8);
|}
|
|/++
|Finds a backward index such that
|`slice.backward(index)` is minimal(maximal).
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minIndex),
| $(LREF maxPos),
| $(LREF maxIndex),
| $(REF Slice.backward, mir,ndslice,slice).
|+/
|template minPos(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Multidimensional backward index such that element is minimal(maximal).
| Backward index equals zeros, if slice is empty.
| +/
| @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)
| minPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| typeof(return) ret;
| auto iterator = slice.lightScope._iterator;
| if (!slice.anyEmpty)
| {
| minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret._lengths, iterator, lightScope(slice));
| ret._iterator = slice._iterator + (iterator - slice.lightScope._iterator);
| }
| auto strides = slice.strides;
| foreach(i; Iota!(0, ret.S))
| {
| ret._strides[i] = strides[i];
| }
| return ret;
| }
| else
| alias minPos = .minPos!(naryFun!pred);
|}
|
|/// ditto
|template maxPos(alias pred = "a < b")
|{
| import mir.functional: naryFun, reverseArgs;
| alias maxPos = minPos!(reverseArgs!(naryFun!pred));
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 2,
| ].sliced(3, 4);
|
| auto pos = s.minPos;
|
| assert(pos == s[$ - 2 .. $, $ - 3 .. $]);
| assert(pos.first == -4);
| assert(s.backward(pos.shape) == -4);
|
| pos = s.maxPos;
|
| assert(pos == s[$ - 1 .. $, $ - 2 .. $]);
| assert(pos.first == 7);
| assert(s.backward(pos.shape) == 7);
|}
|
|/++
|Finds an index such that
|`slice[index]` is minimal(maximal).
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF minIndex),
| $(LREF maxPos),
| $(LREF maxIndex).
|+/
|template minIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slice = ndslice.
| Returns:
| Multidimensional index such that element is minimal(maximal).
| Index elements equal to `size_t.max`, if slice is empty.
| +/
| @optmath size_t[N] minIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| size_t[N] ret = size_t.max;
| if (!slice.anyEmpty)
| {
| ret = slice.shape;
| auto scopeSlice = lightScope(slice);
| auto iterator = scopeSlice._iterator;
| minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, scopeSlice);
| foreach (i; Iota!N)
| ret[i] = slice._lengths[i] - ret[i];
| }
| return ret;
| }
| else
| alias minIndex = .minIndex!(naryFun!pred);
|}
|
|/// ditto
|template maxIndex(alias pred = "a < b")
|{
| import mir.functional: naryFun, reverseArgs;
| alias maxIndex = minIndex!(reverseArgs!(naryFun!pred));
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| 2, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto index = s.minIndex;
|
| assert(index == [1, 1]);
| assert(s[index] == -4);
|
| index = s.maxIndex;
|
| assert(index == [2, 3]);
| assert(s[index] == 8);
|}
|
|///
|version(mir_test)
|unittest
|{
| auto s = [
| -8, 6, 4, -3,
| 0, -4, -3, 3,
| -3, -2, 7, 8,
| ].sliced(3, 4);
|
| auto index = s.minIndex;
|
| assert(index == [0, 0]);
| assert(s[index] == -8);
|}
|
|version(mir_test)
|unittest
|{
| auto s = [
| 0, 1, 2, 3,
| 4, 5, 6, 7,
| 8, 9, 10, 11
| ].sliced(3, 4);
|
| auto index = s.minIndex;
| assert(index == [0, 0]);
| assert(s[index] == 0);
|
| index = s.maxIndex;
| assert(index == [2, 3]);
| assert(s[index] == 11);
|}
|
|bool findImpl(alias fun, size_t N, Slices...)(scope ref size_t[N] backwardIndex, Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| auto cnt = BitSliceAccelerator!(Field, I)(slices[0]).cttz;
| if (cnt = -1)
| return false;
| backwardIndex[0] = slices[0].length - cnt;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| import mir.ndslice.topology: retro;
| auto cnt = BitSliceAccelerator!(Field, I)(slices[0].retro).ctlz;
| if (cnt = -1)
| return false;
| backwardIndex[0] = slices[0].length - cnt;
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (fun(frontOf!slices))
| {
| backwardIndex[0] = slices[0].length;
| return true;
| }
| }
| else
| {
| if (findImpl!fun(backwardIndex[1 .. $], frontOf!slices))
| {
| backwardIndex[0] = slices[0].length;
| return true;
| }
| }
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return false;
| }
|}
|
|/++
|Finds an index such that
|`pred(slices[0][index], ..., slices[$-1][index])` is `true`.
|
|Params:
| pred = A predicate.
|
|See_also:
| $(LREF find),
| $(LREF any).
|Optimization:
| `findIndex!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template findIndex(alias pred)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| Multidimensional index such that the predicate is true.
| Index equals `size_t.max`, if the predicate evaluates `false` for all indices.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) findIndex(Slices...)(Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| size_t[DimensionCount!(Slices[0])] ret = -1;
| auto lengths = slices[0].shape;
| if (!slices[0].anyEmpty && findImpl!pred(ret, allLightScope!slices))
| foreach (i; Iota!(DimensionCount!(Slices[0])))
| ret[i] = lengths[i] - ret[i];
| static if (DimensionCount!(Slices[0]) > 1)
| return ret;
| else
| return ret[0];
| }
| else
| alias findIndex = .findIndex!(naryFun!pred);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto sl = iota(5);
| size_t index = sl.findIndex!"a == 3";
|
| assert(index == 3);
| assert(sl[index] == 3);
|
| assert(sl.findIndex!(a => a == 8) == size_t.max);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
| size_t[2] index = sl.findIndex!(a => a == 3);
|
| assert(sl[index] == 3);
|
| index = sl.findIndex!"a == 6";
| assert(index[0] == size_t.max);
| assert(index[1] == size_t.max);
|}
|
|/++
|Finds a backward index such that
|`pred(slices[0].backward(index), ..., slices[$-1].backward(index))` is `true`.
|
|Params:
| pred = A predicate.
|
|Optimization:
| To check if any element was found
| use the last dimension (row index).
| This will slightly optimize the code.
|--------
|if (backwardIndex)
|{
| auto elem1 = slice1.backward(backwardIndex);
| //...
| auto elemK = sliceK.backward(backwardIndex);
|}
|else
|{
| // not found
|}
|--------
|
|See_also:
| $(LREF findIndex),
| $(LREF any),
| $(REF Slice.backward, mir,ndslice,slice).
|
|Optimization:
| `find!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template find(alias pred)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| Multidimensional backward index such that the predicate is true.
| Backward index equals zeros, if the predicate evaluates `false` for all indices.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) find(Slices...)(auto ref Slices slices)
| if (Slices.length && allSatisfy!(hasShape, Slices))
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| size_t[DimensionCount!(Slices[0])] ret;
| if (!slices[0].anyEmpty)
| findImpl!pred(ret, allLightScope!slices);
| static if (DimensionCount!(Slices[0]) > 1)
| return ret;
| else
| return ret[0];
| }
| else
| alias find = .find!(naryFun!pred);
|}
|
|/// Ranges and arrays
|version(mir_test)
|unittest
|{
| import std.range : iota;
|
| auto sl = iota(10);
| size_t index = sl.find!"a == 3";
|
| assert(sl[$ - index] == 3);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
| size_t[2] bi = sl.find!"a == 3";
| assert(sl.backward(bi) == 3);
| assert(sl[$ - bi[0], $ - bi[1]] == 3);
|
| bi = sl.find!"a == 6";
| assert(bi[0] == 0);
| assert(bi[1] == 0);
|}
|
|/// Multiple slices
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| size_t[2] bi = find!((a, b) => a * b == 39)(a, b);
| assert(a.backward(bi) == 3);
| assert(b.backward(bi) == 13);
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| size_t[2] bi = zip!true(a, b).find!"a.a * a.b == 39";
|
| assert(a.backward(bi) == 3);
| assert(b.backward(bi) == 13);
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a == 5)
| return true;
| a = 8;
| return false;
| }
|
| size_t[2] bi = sl.find!pred;
|
| assert(bi == [1, 1]);
| assert(sl.backward(bi) == 5);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 8, 5]]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| size_t[2] bi = iota(2, 0).find!((elem){i++; return true;});
| assert(i == 0);
| assert(bi == [0, 0]);
|}
|
|size_t anyImpl(alias fun, Slices...)(scope Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| return BitSliceAccelerator!(Field, I)(slices[0]).any;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| return .anyImpl!fun(lightScope(slices[0]).retro);
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if (fun(frontOf!slices))
| return true;
| }
| else
| {
| if (anyImpl!fun(frontOf!slices))
| return true;
| }
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return false;
| }
|}
|
|/++
|Like $(LREF find), but only returns whether or not the search was successful.
|
|Params:
| pred = The predicate.
|Optimization:
| `any!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template any(alias pred = "a")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
| Returns:
| `true` if the search was successful and `false` otherwise.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath bool any(Slices...)(scope Slices slices)
| if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .any!pred(allFlattened!(allLightScope!slices));
| }
| else
| {
| return !slices[0].anyEmpty && anyImpl!pred(allLightScope!slices);
| }
| }
| else
| alias any = .any!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.any!"a == 3");
| assert(!r.any!"a == 6");
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(sl.any!"a == 3");
| assert(!sl.any!"a == 6");
|}
|
|/// Multiple slices
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| assert(any!((a, b) => a * b == 39)(a, b));
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto a = iota(2, 3);
| // 10 11 12
| // 13 14 15
| auto b = iota([2, 3], 10);
|
| // slices must have the same strides
|
| assert(zip!true(a, b).any!"a.a * a.b == 39");
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a == 5)
| return true;
| a = 8;
| return false;
| }
|
| assert(sl.any!pred);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 8, 5]]);
|}
|
|size_t allImpl(alias fun, Slices...)(scope Slices slices)
| if (Slices.length)
|{
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| return BitSliceAccelerator!(LightScopeOf!Field, I)(lightScope(slices[0])).all;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| return .allImpl!fun(lightScope(slices[0]).retro);
| }
| else
| {
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
0000000| if (!fun(frontOf!slices))
0000000| return false;
| }
| else
| {
| if (!allImpl!fun(frontOf!slices))
| return false;
| }
0000000| foreach_reverse(ref slice; slices)
0000000| slice.popFront;
| }
0000000| while(!slices[0].empty);
0000000| return true;
| }
|}
|
|/++
|Checks if all of the elements verify `pred`.
|
|Params:
| pred = The predicate.
|Optimization:
| `all!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template all(alias pred = "a")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| slices = One or more slices.
| Returns:
| `true` all of the elements verify `pred` and `false` otherwise.
| Constraints:
| All slices must have the same shape.
| +/
| @optmath bool all(Slices...)(scope Slices slices)
| if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length)
| {
| static if (Slices.length > 1)
0000000| slices.checkShapesMatch;
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
0000000| return .all!pred(allFlattened!(allLightScope!slices));
| }
| else
| {
0000000| return slices[0].anyEmpty || allImpl!pred(allLightScope!slices);
| }
| }
| else
| alias all = .all!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.all!"a < 6");
| assert(!r.all!"a < 5");
|}
|
|///
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(sl.all!"a < 6");
| assert(!sl.all!"a < 5");
|}
|
|/// Multiple slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
| assert(all!"a - b == 0"(sl, sl));
|}
|
|/// Zipped slices
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota, zip;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3);
|
|
| assert(zip!true(sl, sl).all!"a.a - a.b == 0");
|}
|
|/// Mutation on-the-fly
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| // 0 1 2
| // 3 4 5
| auto sl = iota(2, 3).as!double.slice;
|
| static bool pred(T)(ref T a)
| {
| if (a < 4)
| {
| a = 8;
| return true;
| }
| return false;
| }
|
| assert(!sl.all!pred);
|
| // sl was changed
| assert(sl == [[8, 8, 8],
| [8, 4, 5]]);
|}
|
|@safe pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| size_t i;
| assert(iota(2, 0).all!((elem){i++; return true;}));
| assert(i == 0);
|}
|
|/++
|Counts elements in slices according to the `fun`.
|Params:
| fun = A predicate.
|
|Optimization:
| `count!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation).
|+/
|template count(alias fun)
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!fun, fun))
| /++
| Params:
| slices = One or more slices, ranges, and arrays.
|
| Returns: The number elements according to the `fun`.
|
| Constraints:
| All slices must have the same shape.
| +/
| @optmath size_t count(Slices...)(scope Slices slices)
| if (Slices.length)
| {
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| static if (__traits(isSame, fun, naryFun!"true"))
| {
| return slices[0].elementCount;
| }
| else
| static if (areAllContiguousSlices!Slices)
| {
| import mir.ndslice.topology: flattened;
| return .count!fun(allFlattened!(allLightScope!slices));
| }
| else
| {
| if (slices[0].anyEmpty)
| return 0;
| return countImpl!(fun)(allLightScope!slices);
| }
| }
| else
| alias count = .count!(naryFun!fun);
|
|}
|
|/// Ranges and arrays
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import std.range : iota;
| // 0 1 2 3 4 5
| auto r = iota(6);
|
| assert(r.count!"true" == 6);
| assert(r.count!"a" == 5);
| assert(r.count!"a % 2" == 3);
|}
|
|/// Single slice
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology : iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota(2, 3);
|
| assert(sl.count!"true" == 6);
| assert(sl.count!"a" == 5);
| assert(sl.count!"a % 2" == 3);
|}
|
|/// Accelerated set bit count
|version(mir_test)
|unittest
|{
| import mir.ndslice.topology: retro, iota, bitwise;
| import mir.ndslice.allocation: slice;
|
| //| 0 1 2 |
| //| 3 4 5 |
| auto sl = iota!size_t(2, 3).bitwise;
|
| assert(sl.count!"true" == 6 * size_t.sizeof * 8);
|
| assert(sl.slice.count!"a" == 7);
|
| // accelerated
| assert(sl.count!"a" == 7);
| assert(sl.retro.count!"a" == 7);
|
| auto sl2 = iota!ubyte([6], 128).bitwise;
| // accelerated
| assert(sl2.count!"a" == 13);
| assert(sl2[4 .. $].count!"a" == 13);
| assert(sl2[4 .. $ - 1].count!"a" == 12);
| assert(sl2[4 .. $ - 1].count!"a" == 12);
| assert(sl2[41 .. $ - 1].count!"a" == 1);
|}
|
|unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: bitwise, assumeFieldsHaveZeroShift;
| auto sl = slice!uint([6]).bitwise;
| auto slb = slice!ubyte([6]).bitwise;
| slb[4] = true;
| auto d = slb[4];
| auto c = assumeFieldsHaveZeroShift(slb & ~slb);
| // pragma(msg, typeof(c));
| assert(!sl.any);
| assert((~sl).all);
| // pragma(msg, typeof(~slb));
| // pragma(msg, typeof(~slb));
| // assert(sl.findIndex);
|}
|
|/++
|Compares two or more slices for equality, as defined by predicate `pred`.
|
|See_also: $(NDSLICEREF slice, Slice.opEquals)
|
|Params:
| pred = The predicate.
|+/
|template equal(alias pred = "a == b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| slices = Two or more ndslices, ranges, and arrays.
|
| Returns:
| `true` any of the elements verify `pred` and `false` otherwise.
| +/
| bool equal(Slices...)(scope Slices slices)
| if (Slices.length >= 2)
| {
| import mir.internal.utility;
| static if (allSatisfy!(hasShape, Slices))
| {
0000000| auto shape0 = slices[0].shape;
| enum N = DimensionCount!(Slices[0]);
0000000| foreach (ref slice; slices[1 .. $])
| {
0000000| if (slice.shape != shape0)
0000000| goto False;
| }
0000000| return all!pred(allLightScope!slices);
| }
| else
| {
| for(;;)
| {
| auto empty = slices[0].empty;
| foreach (ref slice; slices[1 .. $])
| {
| if (slice.empty != empty)
| goto False;
| }
| if (empty)
| return true;
| if (!pred(frontOf!slices))
| goto False;
| foreach (ref slice; slices)
| slice.popFront;
| }
| }
0000000| False: return false;
| }
| }
| else
| alias equal = .equal!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow
|version(mir_test) unittest
|{
| import std.range : iota;
| auto r = iota(6);
| assert(r.equal([0, 1, 2, 3, 4, 5]));
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| assert(equal(sl1, sl1));
| assert(sl1 == sl1); //can also use opEquals for two Slices
| assert(equal!"2 * a == b + c"(sl1, sl1, sl1));
|
| assert(equal!"a < b"(sl1, sl2));
|
| assert(!equal(sl1[0 .. $ - 1], sl1));
| assert(!equal(sl1[0 .. $, 0 .. $ - 1], sl1));
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.math.common: approxEqual;
| import mir.ndslice.allocation: rcslice;
| import mir.ndslice.topology: as, iota;
|
| auto x = 5.iota.as!double.rcslice;
| auto y = x.rcslice;
|
| assert(equal(x, y));
| assert(equal!approxEqual(x, y));
|}
|
|ptrdiff_t cmpImpl(alias pred, A, B)
| (scope A sl1, scope B sl2)
| if (DimensionCount!A == DimensionCount!B)
|{
| for (;;)
| {
| static if (DimensionCount!A == 1)
| {
| import mir.functional : naryFun;
| if (naryFun!pred(sl1.front, sl2.front))
| return -1;
| if (naryFun!pred(sl2.front, sl1.front))
| return 1;
| }
| else
| {
| if (auto res = .cmpImpl!pred(sl1.front, sl2.front))
| return res;
| }
| sl1.popFront;
| if (sl1.empty)
| return -cast(ptrdiff_t)(sl2.length > 1);
| sl2.popFront;
| if (sl2.empty)
| return 1;
| }
|}
|
|/++
|Performs three-way recursive lexicographical comparison on two slices according to predicate `pred`.
|Iterating `sl1` and `sl2` in lockstep, `cmp` compares each `N-1` dimensional element `e1` of `sl1`
|with the corresponding element `e2` in `sl2` recursively.
|If one of the slices has been finished,`cmp` returns a negative value if `sl1` has fewer elements than `sl2`,
|a positive value if `sl1` has more elements than `sl2`,
|and `0` if the ranges have the same number of elements.
|
|Params:
| pred = The predicate.
|+/
|template cmp(alias pred = "a < b")
|{
| import mir.functional: naryFun;
| static if (__traits(isSame, naryFun!pred, pred))
| /++
| Params:
| sl1 = First slice, range, or array.
| sl2 = Second slice, range, or array.
|
| Returns:
| `0` if both ranges compare equal.
| Negative value if the first differing element of `sl1` is less than the corresponding
| element of `sl2` according to `pred`.
| Positive value if the first differing element of `sl2` is less than the corresponding
| element of `sl1` according to `pred`.
| +/
| auto cmp(A, B)
| (scope A sl1, scope B sl2)
| if (DimensionCount!A == DimensionCount!B)
| {
| auto b = sl2.anyEmpty;
| if (sl1.anyEmpty)
| {
| if (!b)
| return -1;
| auto sh1 = sl1.shape;
| auto sh2 = sl2.shape;
| foreach (i; Iota!(DimensionCount!A))
| if (sh1[i] != sh2[i])
| return sh1[i] > sh2[i] ? 1 : -1;
| return 0;
| }
| if (b)
| return 1;
| return cmpImpl!pred(lightScope(sl1), lightScope(sl2));
| }
| else
| alias cmp = .cmp!(naryFun!pred);
|}
|
|/// Ranges and arrays
|@safe pure nothrow
|version(mir_test) unittest
|{
| import std.range : iota;
|
| // 0 1 2 3 4 5
| auto r1 = iota(0, 6);
| // 1 2 3 4 5 6
| auto r2 = iota(1, 7);
|
| assert(cmp(r1, r1) == 0);
| assert(cmp(r1, r2) < 0);
| assert(cmp!"a >= b"(r1, r2) > 0);
|}
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| // 0 1 2
| // 3 4 5
| auto sl1 = iota(2, 3);
| // 1 2 3
| // 4 5 6
| auto sl2 = iota([2, 3], 1);
|
| assert(cmp(sl1, sl1) == 0);
| assert(cmp(sl1, sl2) < 0);
| assert(cmp!"a >= b"(sl1, sl2) > 0);
|}
|
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| auto sl1 = iota(2, 3);
| auto sl2 = iota([2, 3], 1);
|
| assert(cmp(sl1[0 .. $ - 1], sl1) < 0);
| assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 1]) > 0);
|
| assert(cmp(sl1[0 .. $ - 2], sl1) < 0);
| assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 3]) > 0);
| assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) == 0);
| assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $ - 1, 0 .. $ - 3]) > 0);
| assert(cmp(sl1[0 .. $ - 1, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) < 0);
|}
|
|size_t countImpl(alias fun, Slices...)(scope Slices slices)
|{
| size_t ret;
| alias S = Slices[0];
| import mir.functional: naryFun;
| import mir.ndslice.iterator: FieldIterator, RetroIterator;
| import mir.ndslice.field: BitField;
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I))
| {
| ret = BitSliceAccelerator!(Field, I)(slices[0]).ctpop;
| }
| else
| static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I))
| {
| // pragma(msg, S);
| import mir.ndslice.topology: retro;
| ret = .countImpl!fun(lightScope(slices[0]).retro);
| }
| else
| do
| {
| static if (DimensionCount!(Slices[0]) == 1)
| {
| if(fun(frontOf!slices))
| ret++;
| }
| else
| ret += .countImpl!fun(frontOf!slices);
| foreach_reverse(ref slice; slices)
| slice.popFront;
| }
| while(!slices[0].empty);
| return ret;
|}
|
|/++
|Returns: max length across all dimensions.
|+/
|size_t maxLength(S)(auto ref scope S s)
| if (hasShape!S)
|{
| auto shape = s.shape;
| size_t length = 0;
| foreach(i; Iota!(shape.length))
| if (shape[i] > length)
| length = shape[i];
| return length;
|}
|
|/++
|The call `eachLower!(fun)(slice1, ..., sliceN)` evaluates `fun` on the lower
|triangle in `slice1, ..., sliceN` respectively.
|
|`eachLower` allows iterating multiple slices in the lockstep.
|
|Params:
| fun = A function
|See_Also:
| This is functionally similar to $(LREF each).
|+/
|template eachLower(alias fun)
|{
| import mir.functional : naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| inputs = One or more two-dimensional slices and an optional
| integer, `k`.
|
| The value `k` determines which diagonals will have the function
| applied:
| For k = 0, the function is also applied to the main diagonal
| For k = 1 (default), only the non-main diagonals below the main
| diagonal will have the function applied.
| For k > 1, fewer diagonals below the main diagonal will have the
| function applied.
| For k < 0, more diagonals above the main diagonal will have the
| function applied.
| +/
| void eachLower(Inputs...)(scope Inputs inputs)
| if (((Inputs.length > 1) &&
| (isIntegral!(Inputs[$ - 1]))) ||
| (Inputs.length))
| {
| import mir.ndslice.traits : isMatrix;
|
| size_t val;
|
| static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1])))
| {
| immutable(sizediff_t) k = inputs[$ - 1];
| alias Slices = Inputs[0..($ - 1)];
| alias slices = inputs[0..($ - 1)];
| }
| else
| {
| enum sizediff_t k = 1;
| alias Slices = Inputs;
| alias slices = inputs;
| }
|
| static assert (allSatisfy!(isMatrix, Slices),
| "eachLower: Every slice input must be a two-dimensional slice");
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
|
| foreach(ref slice; slices)
| assert(!slice.empty);
|
| immutable(size_t) m = slices[0].length!0;
| immutable(size_t) n = slices[0].length!1;
|
| if ((n + k) < m)
| {
| val = m - (n + k);
| .eachImpl!fun(selectBackOf!(val, slices));
| }
|
| size_t i;
|
| if (k > 0)
| {
| foreach(ref slice; slices)
| slice.popFrontExactly!0(k);
| i = k;
| }
|
| do
| {
| val = i - k + 1;
| .eachImpl!fun(frontSelectFrontOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFront!0;
| i++;
| } while ((i < (n + k)) && (i < m));
| }
| }
| else
| {
| alias eachLower = .eachLower!(naryFun!fun);
| }
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, canonical, universal;
| alias AliasSeq(T...) = T;
|
| pure nothrow
| void test(alias func)()
| {
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = func(iota([3, 3], 1).slice);
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0]]);
| }
|
| @safe pure nothrow @nogc
| T identity(T)(T x)
| {
| return x;
| }
|
| alias kinds = AliasSeq!(identity, canonical, universal);
| test!(kinds[0]);
| test!(kinds[1]);
| test!(kinds[2]);
|}
|
|///
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3],
| [0, 5, 6],
| [0, 0, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3],
| [4, 5, 6],
| [0, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3, 4],
| [0, 0, 7, 8],
| [0, 0, 0, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3, 4],
| [0, 6, 7, 8],
| [0, 0, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3, 4],
| [0, 0, 0, 8],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3, 4],
| [5, 6, 7, 8],
| [0, 10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0, 4],
| [0, 0, 0, 0],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(0);
| assert(m == [
| [0, 2, 3],
| [0, 0, 6],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0";
| assert(m == [
| [1, 2, 3],
| [0, 5, 6],
| [0, 0, 9],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(-1);
| assert(m == [
| [0, 0, 3],
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(2);
| assert(m == [
| [1, 2, 3],
| [4, 5, 6],
| [0, 8, 9],
| [0, 0, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachLower!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|/// Swap two slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| eachLower!swap(a, b);
|
| assert(a == [
| [ 0, 1, 2],
| [13, 4, 5],
| [16, 17, 8]]);
| assert(b == [
| [10, 11, 12],
| [ 3, 14, 15],
| [ 6, 7, 18]]);
|}
|
|/// Swap two zipped slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.eachLower!(z => swap(z.a, z.b));
|
| assert(a == [
| [ 0, 1, 2],
| [13, 4, 5],
| [16, 17, 8]]);
| assert(b == [
| [10, 11, 12],
| [ 3, 14, 15],
| [ 6, 7, 18]]);
|}
|
|/++
|The call `eachUpper!(fun)(slice1, ..., sliceN)` evaluates `fun` on the upper
|triangle in `slice1, ..., sliceN`, respectively.
|
|`eachUpper` allows iterating multiple slices in the lockstep.
|
|Params:
| fun = A function
|See_Also:
| This is functionally similar to $(LREF each).
|+/
|template eachUpper(alias fun)
|{
| import mir.functional: naryFun;
|
| static if (__traits(isSame, naryFun!fun, fun))
| {
| /++
| Params:
| inputs = One or more two-dimensional slices and an optional
| integer, `k`.
|
| The value `k` determines which diagonals will have the function
| applied:
| For k = 0, the function is also applied to the main diagonal
| For k = 1 (default), only the non-main diagonals above the main
| diagonal will have the function applied.
| For k > 1, fewer diagonals below the main diagonal will have the
| function applied.
| For k < 0, more diagonals above the main diagonal will have the
| function applied.
| +/
| void eachUpper(Inputs...)(scope Inputs inputs)
| if (((Inputs.length > 1) &&
| (isIntegral!(Inputs[$ - 1]))) ||
| (Inputs.length))
| {
| import mir.ndslice.traits : isMatrix;
|
| size_t val;
|
| static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1])))
| {
| immutable(sizediff_t) k = inputs[$ - 1];
| alias Slices = Inputs[0..($ - 1)];
| alias slices = inputs[0..($ - 1)];
| }
| else
| {
| enum sizediff_t k = 1;
| alias Slices = Inputs;
| alias slices = inputs;
| }
|
| static assert (allSatisfy!(isMatrix, Slices),
| "eachUpper: Every slice input must be a two-dimensional slice");
| static if (Slices.length > 1)
| slices.checkShapesMatch;
| if (slices[0].anyEmpty)
| return;
|
| foreach(ref slice; slices)
| assert(!slice.empty);
|
| immutable(size_t) m = slices[0].length!0;
| immutable(size_t) n = slices[0].length!1;
|
| size_t i;
|
| if (k < 0)
| {
| val = -k;
| .eachImpl!fun(selectFrontOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFrontExactly!0(-k);
| i = -k;
| }
|
| do
| {
| val = (n - k) - i;
| .eachImpl!fun(frontSelectBackOf!(val, slices));
|
| foreach(ref slice; slices)
| slice.popFront;
| i++;
| } while ((i < (n - k)) && (i < m));
| }
| }
| else
| {
| alias eachUpper = .eachUpper!(naryFun!fun);
| }
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota, canonical, universal;
|
| pure nothrow
| void test(alias func)()
| {
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = func(iota([3, 3], 1).slice);
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0],
| [4, 0, 0],
| [7, 8, 0]]);
| }
|
| @safe pure nothrow @nogc
| T identity(T)(T x)
| {
| return x;
| }
|
| alias kinds = AliasSeq!(identity, canonical, universal);
| test!(kinds[0]);
| test!(kinds[1]);
| test!(kinds[2]);
|}
|
|///
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0],
| [4, 5, 0],
| [7, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [7, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0],
| [4, 5, 6],
| [7, 8, 9]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| auto m = iota([3, 3], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0, 0],
| [5, 0, 0, 0],
| [9, 10, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0, 0],
| [5, 6, 0, 0],
| [9, 10, 11, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0, 0],
| [0, 0, 0, 0],
| [9, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0, 0],
| [5, 6, 7, 0],
| [9, 10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 4 |
| //| 5 6 7 8 |
| //| 9 10 11 12 |
| auto m = iota([3, 4], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0, 0],
| [0, 0, 0, 0],
| [0, 0, 0, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(0);
| assert(m == [
| [0, 0, 0],
| [4, 0, 0],
| [7, 8, 0],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0";
| assert(m == [
| [1, 0, 0],
| [4, 5, 0],
| [7, 8, 9],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(-1);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [7, 0, 0],
| [10, 11, 0]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(2);
| assert(m == [
| [1, 2, 0],
| [4, 5, 6],
| [7, 8, 9],
| [10, 11, 12]]);
|}
|
|pure nothrow
|version(mir_test) unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: iota;
|
| //| 1 2 3 |
| //| 4 5 6 |
| //| 7 8 9 |
| //| 10 11 12 |
| auto m = iota([4, 3], 1).slice;
| m.eachUpper!"a = 0"(-2);
| assert(m == [
| [0, 0, 0],
| [0, 0, 0],
| [0, 0, 0],
| [10, 0, 0]]);
|}
|
|/// Swap two slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| eachUpper!swap(a, b);
|
| assert(a == [
| [0, 11, 12],
| [3, 4, 15],
| [6, 7, 8]]);
| assert(b == [
| [10, 1, 2],
| [13, 14, 5],
| [16, 17, 18]]);
|}
|
|/// Swap two zipped slices
|pure nothrow
|version(mir_test) unittest
|{
| import mir.utility : swap;
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : as, zip, iota;
|
| //| 0 1 2 |
| //| 3 4 5 |
| //| 6 7 8 |
| auto a = iota([3, 3]).as!double.slice;
| //| 10 11 12 |
| //| 13 14 15 |
| //| 16 17 18 |
| auto b = iota([3, 3], 10).as!double.slice;
|
| auto z = zip(a, b);
|
| z.eachUpper!(z => swap(z.a, z.b));
|
| assert(a == [
| [0, 11, 12],
| [3, 4, 15],
| [6, 7, 8]]);
| assert(b == [
| [10, 1, 2],
| [13, 14, 5],
| [16, 17, 18]]);
|}
|
|// uniq
|/**
|Lazily iterates unique consecutive elements of the given range (functionality
|akin to the $(HTTP wikipedia.org/wiki/_Uniq, _uniq) system
|utility). Equivalence of elements is assessed by using the predicate
|$(D pred), by default $(D "a == b"). The predicate is passed to
|$(REF nary, mir,functional), and can either accept a string, or any callable
|that can be executed via $(D pred(element, element)). If the given range is
|bidirectional, $(D uniq) also yields a
|`std,range,primitives`.
|Params:
| pred = Predicate for determining equivalence between range elements.
|*/
|template uniq(alias pred = "a == b")
|{
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| r = An input range of elements to filter.
| Returns:
| An input range of
| consecutively unique elements in the original range. If `r` is also a
| forward range or bidirectional range, the returned range will be likewise.
| +/
| Uniq!(naryFun!pred, Range) uniq(Range)(Range r)
| if (isInputRange!Range && !isSlice!Range)
| {
| import core.lifetime: move;
| return typeof(return)(r.move);
| }
|
| /// ditto
| auto uniq(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| import mir.ndslice.topology: flattened;
| import core.lifetime: move;
| auto r = slice.move.flattened;
| return Uniq!(pred, typeof(r))(move(r));
| }
| }
| else
| alias uniq = .uniq!(naryFun!pred);
|}
|
|///
|@safe version(mir_test) unittest
|{
| int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ];
| assert(equal(uniq(arr), [ 1, 2, 3, 4, 5 ]));
|
| import std.algorithm.mutation : copy;
| // Filter duplicates in-place using copy
| arr.length -= arr.uniq.copy(arr).length;
| assert(arr == [ 1, 2, 3, 4, 5 ]);
|
| // Note that uniqueness is only determined consecutively; duplicated
| // elements separated by an intervening different element will not be
| // eliminated:
| assert(equal(uniq([ 1, 1, 2, 1, 1, 3, 1]), [1, 2, 1, 3, 1]));
|}
|
|/// N-dimensional case
|version(mir_test)
|@safe pure unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: byDim, map, iota;
|
| auto matrix = [ [1, 2, 2], [2, 2, 3], [4, 4, 4] ].fuse;
|
| assert(matrix.uniq.equal([ 1, 2, 3, 4 ]));
|
| // unique elements for each row
| assert(matrix.byDim!0.map!uniq.equal!equal([ [1, 2], [2, 3], [4] ]));
|}
|
|/++
|Authros: $(HTTP erdani.com, Andrei Alexandrescu) (original Phobos code), Ilya Yaroshenko (betterC rework)
|+/
|struct Uniq(alias pred, Range)
|{
| Range _input;
|
| ref opSlice() inout
| {
| return this;
| }
|
| void popFront() scope
| {
| assert(!empty, "Attempting to popFront an empty uniq.");
| auto last = _input.front;
| do
| {
| _input.popFront();
| }
| while (!_input.empty && pred(last, _input.front));
| }
|
| auto ref front() @property
| {
| assert(!empty, "Attempting to fetch the front of an empty uniq.");
| return _input.front;
| }
|
| static if (isBidirectionalRange!Range)
| {
| void popBack() scope
| {
| assert(!empty, "Attempting to popBack an empty uniq.");
| auto last = _input.back;
| do
| {
| _input.popBack();
| }
| while (!_input.empty && pred(last, _input.back));
| }
|
| auto ref back() scope return @property
| {
| assert(!empty, "Attempting to fetch the back of an empty uniq.");
| return _input.back;
| }
| }
|
| static if (isInfinite!Range)
| {
| enum bool empty = false; // Propagate infiniteness.
| }
| else
| {
| @property bool empty() const { return _input.empty; }
| }
|
| static if (isForwardRange!Range)
| {
| @property typeof(this) save() scope return
| {
| return typeof(this)(_input.save);
| }
| }
|}
|
|version(none)
|@safe version(mir_test) unittest
|{
| import std.internal.test.dummyrange;
| import std.range;
|
| int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ];
| auto r = uniq(arr);
| static assert(isForwardRange!(typeof(r)));
|
| assert(equal(r, [ 1, 2, 3, 4, 5 ][]));
| assert(equal(retro(r), retro([ 1, 2, 3, 4, 5 ][])));
|
| foreach (DummyType; AllDummyRanges)
| {
| DummyType d;
| auto u = uniq(d);
| assert(equal(u, [1,2,3,4,5,6,7,8,9,10]));
|
| static assert(d.rt == RangeType.Input || isForwardRange!(typeof(u)));
|
| static if (d.rt >= RangeType.Bidirectional)
| {
| assert(equal(retro(u), [10,9,8,7,6,5,4,3,2,1]));
| }
| }
|}
|
|@safe version(mir_test) unittest // https://issues.dlang.org/show_bug.cgi?id=17264
|{
| const(int)[] var = [0, 1, 1, 2];
| assert(var.uniq.equal([0, 1, 2]));
|}
|
|@safe version(mir_test) unittest {
| import mir.ndslice.allocation;
| import mir.math.common: approxEqual;
| auto x = rcslice!double(2);
| auto y = rcslice!double(2);
| x[] = [2, 3];
| y[] = [2, 3];
| assert(equal!approxEqual(x,y));
|}
|
|/++
|Implements the higher order filter function. The predicate is passed to
|`mir.functional.naryFun`, and can either accept a string, or any callable
|that can be executed via `pred(element)`.
|Params:
| pred = Function to apply to each element of range
|Returns:
| `filter!(pred)(range)` returns a new range containing only elements `x` in `range` for
| which `pred(x)` returns `true`.
|See_Also:
| $(HTTP en.wikipedia.org/wiki/Filter_(higher-order_function), Filter (higher-order function))
|Note:
| $(RED User and library code MUST call `empty` method ahead each call of pair or one of `front` and `popFront` methods.)
|+/
|template filter(alias pred = "a")
|{
| static if (__traits(isSame, naryFun!pred, pred))
| {
| /++
| Params:
| r = An input range of elements to filter.
| Returns:
| A new range containing only elements `x` in `range` for which `predicate(x)` returns `true`.
| +/
| Filter!(naryFun!pred, Range) filter(Range)(Range r)
| if (isInputRange!Range && !isSlice!Range)
| {
| import core.lifetime: move;
| return typeof(return)(r.move);
| }
|
| /// ditto
| auto filter(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice)
| {
| import mir.ndslice.topology: flattened;
| import core.lifetime: move;
| auto r = slice.move.flattened;
| return Filter!(pred, typeof(r))(move(r));
| }
| }
| else
| alias filter = .filter!(naryFun!pred);
|}
|
|/// ditto
|struct Filter(alias pred, Range)
|{
| Range _input;
| version(assert) bool _freshEmpty;
|
| ref opSlice() inout
| {
| return this;
| }
|
| void popFront() scope
| {
| assert(!_input.empty, "Attempting to popFront an empty Filter.");
| version(assert) assert(_freshEmpty, "Attempting to pop the front of a Filter without calling '.empty' method ahead.");
| version(assert) _freshEmpty = false;
| _input.popFront;
| }
|
| auto ref front() @property
| {
| assert(!_input.empty, "Attempting to fetch the front of an empty Filter.");
| version(assert) assert(_freshEmpty, "Attempting to fetch the front of a Filter without calling '.empty' method ahead.");
| return _input.front;
| }
|
| bool empty() @property
| {
| version(assert) _freshEmpty = true;
| for (;;)
| {
| if (auto r = _input.empty)
| return true;
| if (pred(_input.front))
| return false;
| _input.popFront;
| }
| }
|
| static if (isForwardRange!Range)
| {
| @property typeof(this) save() scope return
| {
| return typeof(this)(_input.save);
| }
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow unittest
|{
| int[] arr = [ 0, 1, 2, 3, 4, 5 ];
|
| // Filter below 3
| auto small = filter!(a => a < 3)(arr);
| assert(equal(small, [ 0, 1, 2 ]));
|
| // Filter again, but with Uniform Function Call Syntax (UFCS)
| auto sum = arr.filter!(a => a < 3);
| assert(equal(sum, [ 0, 1, 2 ]));
|
| // Filter with the default predicate
| auto nonZeros = arr.filter;
| assert(equal(nonZeros, [ 1, 2, 3, 4, 5 ]));
|
| // In combination with concatenation() to span multiple ranges
| import mir.ndslice.concatenation;
|
| int[] a = [ 3, -2, 400 ];
| int[] b = [ 100, -101, 102 ];
| auto r = concatenation(a, b).filter!(a => a > 0);
| assert(equal(r, [ 3, 400, 100, 102 ]));
|
| // Mixing convertible types is fair game, too
| double[] c = [ 2.5, 3.0 ];
| auto r1 = concatenation(c, a, b).filter!(a => cast(int) a != a);
| assert(equal(r1, [ 2.5 ]));
|}
|
|/// N-dimensional filtering
|version(mir_test)
|@safe pure unittest
|{
| import mir.ndslice.fuse;
| import mir.ndslice.topology: byDim, map;
|
| auto matrix =
| [[ 3, -2, 400 ],
| [ 100, -101, 102 ]].fuse;
|
| alias filterPositive = filter!"a > 0";
|
| // filter all elements in the matrix
| auto r = filterPositive(matrix);
| assert(equal(r, [ 3, 400, 100, 102 ]));
|
| // filter all elements for each row
| auto rr = matrix.byDim!0.map!filterPositive;
| assert(equal!equal(rr, [ [3, 400], [100, 102] ]));
|
| // filter all elements for each column
| auto rc = matrix.byDim!1.map!filterPositive;
| assert(equal!equal(rc, [ [3, 100], [], [400, 102] ]));
|}
|
|/++
|Implements the homonym function (also known as `accumulate`, $(D
|compress), `inject`, or `foldl`) present in various programming
|languages of functional flavor. The call `fold!(fun)(slice, seed)`
|first assigns `seed` to an internal variable `result`,
|also called the accumulator. Then, for each element `x` in $(D
|slice), `result = fun(result, x)` gets evaluated. Finally, $(D
|result) is returned.
|
|Params:
| fun = the predicate function to apply to the elements
|
|See_Also:
| $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function))
| $(LREF sum) is similar to `fold!((a, b) => a + b)` that offers
| precise summing of floating point numbers.
| This is functionally equivalent to $(LREF reduce) with the argument order
| reversed.
|+/
|template fold(alias fun)
|{
| /++
| Params:
| slice = A slice, range, and array.
| seed = An initial accumulation value.
| Returns:
| the accumulated result
| +/
| @optmath auto fold(Slice, S)(scope Slice slice, S seed)
| {
| import core.lifetime: move;
| return reduce!fun(seed, slice.move);
| }
|}
|
|///
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice.slice: sliced;
| import mir.ndslice.topology: map;
|
| auto arr = [1, 2, 3, 4, 5].sliced;
|
| // Sum all elements
| assert(arr.fold!((a, b) => a + b)(0) == 15);
| assert(arr.fold!((a, b) => a + b)(6) == 21);
|
| // Can be used in a UFCS chain
| assert(arr.map!(a => a + 1).fold!((a, b) => a + b)(0) == 20);
|
| // Return the last element of any range
| assert(arr.fold!((a, b) => b)(0) == 5);
|}
|
|/// Works for matrices
|version(mir_test)
|@safe pure
|unittest
|{
| import mir.ndslice.fuse: fuse;
|
| auto arr = [
| [1, 2, 3],
| [4, 5, 6]
| ].fuse;
|
| assert(arr.fold!((a, b) => a + b)(0) == 21);
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| import mir.ndslice.topology: map;
|
| int[] arr = [1, 2, 3, 4, 5];
|
| // Sum all elements
| assert(arr.fold!((a, b) => a + b)(0) == 15);
| assert(arr.fold!((a, b) => a + b)(6) == 21);
|
| // Can be used in a UFCS chain
| assert(arr.map!(a => a + 1).fold!((a, b) => a + b)(0) == 20);
|
| // Return the last element of any range
| assert(arr.fold!((a, b) => b)(0) == 5);
|}
|
|version(mir_test)
|@safe pure nothrow
|unittest
|{
| int[] arr = [1];
| static assert(!is(typeof(arr.fold!()(0))));
| static assert(!is(typeof(arr.fold!(a => a)(0))));
| static assert(is(typeof(arr.fold!((a, b) => a)(0))));
| assert(arr.length == 1);
|}
|
|unittest
|{
| import mir.rc.array: RCArray;
| import mir.algorithm.iteration: minmaxPos, minPos, maxPos, minmaxIndex, minIndex, maxIndex;
|
| static immutable a = [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
|
| auto x = RCArray!double(12);
| foreach(i, ref e; x)
| e = a[i];
| auto y = x.asSlice;
| auto z0 = y.minmaxPos;
| auto z1 = y.minPos;
| auto z2 = y.maxPos;
| auto z3 = y.minmaxIndex;
| auto z4 = y.minIndex;
| auto z5 = y.maxIndex;
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/algorithm/iteration.d is 0% covered
<<<<<< EOF
# path=./-tmp-dub_test_root_a66906a3_e20b_435d_97a0_65fd9f267119.lst
|module dub_test_root;
|import std.typetuple;
|static import mir.glas.l1;
|static import mir.glas.l2;
|static import mir.model.lda.hoffman;
|static import mir.sparse.blas.axpy;
|static import mir.sparse.blas.dot;
|static import mir.sparse.blas.gemm;
|static import mir.sparse.blas.gemv;
|alias allModules = TypeTuple!(mir.glas.l1, mir.glas.l2, mir.model.lda.hoffman, mir.sparse.blas.axpy, mir.sparse.blas.dot, mir.sparse.blas.gemm, mir.sparse.blas.gemv);
|
| import std.stdio;
| import core.runtime;
|
0000000| void main() { writeln("All unit tests have been run successfully."); }
| shared static this() {
| version (Have_tested) {
| import tested;
| import core.runtime;
| import std.exception;
| Runtime.moduleUnitTester = () => true;
| enforce(runUnitTests!allModules(new ConsoleTestResultWriter), "Unit tests failed.");
| }
| }
|
/tmp/dub_test_root_a66906a3_e20b_435d_97a0_65fd9f267119.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-field.lst
|/++
|This is a submodule of $(MREF mir,ndslice).
|
|Field is a type with `opIndex()(ptrdiff_t index)` primitive.
|An iterator can be created on top of a field using $(SUBREF iterator, FieldIterator).
|An ndslice can be created on top of a field using $(SUBREF slice, slicedField).
|
|$(BOOKTABLE $(H2 Fields),
|$(TR $(TH Field Name) $(TH Used By))
|$(T2 BitField, $(SUBREF topology, bitwise))
|$(T2 BitpackField, $(SUBREF topology, bitpack))
|$(T2 CycleField, $(SUBREF topology, cycle) (2 kinds))
|$(T2 LinspaceField, $(SUBREF topology, linspace))
|$(T2 MagicField, $(SUBREF topology, magic))
|$(T2 MapField, $(SUBREF topology, map) and $(SUBREF topology, mapField))
|$(T2 ndIotaField, $(SUBREF topology, ndiota))
|$(T2 OrthogonalReduceField, $(SUBREF topology, orthogonalReduceField))
|$(T2 RepeatField, $(SUBREF topology, repeat))
|$(T2 SparseField, Used for mutable DOK sparse matrixes )
|)
|
|
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|+/
|module mir.ndslice.field;
|
|import mir.internal.utility: Iota;
|import mir.math.common: optmath;
|import mir.ndslice.internal;
|import mir.qualifier;
|
|@optmath:
|
|package template ZeroShiftField(T)
|{
| static if (hasZeroShiftFieldMember!T)
| alias ZeroShiftField = typeof(T.init.assumeFieldsHaveZeroShift());
| else
| alias ZeroShiftField = T;
|}
|
|package enum hasZeroShiftFieldMember(T) = __traits(hasMember, T, "assumeFieldsHaveZeroShift");
|
|package auto applyAssumeZeroShift(Types...)()
|{
| import mir.ndslice.topology;
| string str;
| foreach(i, T; Types)
| static if (hasZeroShiftFieldMember!T)
| str ~= "_fields[" ~ i.stringof ~ "].assumeFieldsHaveZeroShift, ";
| else
| str ~= "_fields[" ~ i.stringof ~ "], ";
| return str;
|}
|
|auto MapField__map(Field, alias fun, alias fun1)(ref MapField!(Field, fun) f)
|{
| import core.lifetime: move;
| import mir.functional: pipe;
| return MapField!(Field, pipe!(fun, fun1))(move(f._field));
|}
|
|
|/++
|`MapField` is used by $(SUBREF topology, map).
|+/
|struct MapField(Field, alias _fun)
|{
|@optmath:
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return MapField!(LightConstOf!Field, _fun)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return MapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field));
| }
|
| /++
| User defined constructor used by $(LREF mapField).
| +/
| static alias __map(alias fun1) = MapField__map!(Field, _fun, fun1);
|
| auto ref opIndex(T...)(auto ref T index)
| {
| import mir.functional: RefTuple, unref;
| static if (is(typeof(_field[index]) : RefTuple!K, K...))
| {
| auto t = _field[index];
| return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_field[index]);
| }
|
| static if (__traits(hasMember, Field, "length"))
| auto length() const @property
| {
| return _field.length;
| }
|
| static if (__traits(hasMember, Field, "shape"))
| auto shape() const @property
| {
| return _field.shape;
| }
|
| static if (__traits(hasMember, Field, "elementCount"))
| auto elementCount() const @property
| {
| return _field.elementCount;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return _mapField!_fun(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|/++
|`VmapField` is used by $(SUBREF topology, map).
|+/
|struct VmapField(Field, Fun)
|{
|@optmath:
| ///
| Field _field;
| ///
| Fun _fun;
|
| ///
| auto lightConst()() const @property
| {
| return VmapField!(LightConstOf!Field, _fun)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return VmapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field));
| }
|
| auto ref opIndex(T...)(auto ref T index)
| {
| import mir.functional: RefTuple, unref;
| static if (is(typeof(_field[index]) : RefTuple!K, K...))
| {
| auto t = _field[index];
| return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")");
| }
| else
| return _fun(_field[index]);
| }
|
| static if (__traits(hasMember, Field, "length"))
| auto length() const @property
| {
| return _field.length;
| }
|
| static if (__traits(hasMember, Field, "shape"))
| auto shape() const @property
| {
| return _field.shape;
| }
|
| static if (__traits(hasMember, Field, "elementCount"))
| auto elementCount()const @property
| {
| return _field.elementCount;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return _vmapField(_field.assumeFieldsHaveZeroShift, _fun);
| }
|}
|
|/+
|Creates a mapped field. Uses `__map` if possible.
|+/
|auto _mapField(alias fun, Field)(Field field)
|{
| import mir.functional: naryFun;
| static if ((
| __traits(isSame, fun, naryFun!"a|b") ||
| __traits(isSame, fun, naryFun!"a^b") ||
| __traits(isSame, fun, naryFun!"a&b") ||
| __traits(isSame, fun, naryFun!"a | b") ||
| __traits(isSame, fun, naryFun!"a ^ b") ||
| __traits(isSame, fun, naryFun!"a & b")) &&
| is(Field : ZipField!(BitField!(LeftField, I), BitField!(RightField, I)), LeftField, RightField, I))
| {
| import mir.ndslice.topology: bitwiseField;
| auto f = ZipField!(LeftField, RightField)(field._fields[0]._field, field._fields[1]._field)._mapField!fun;
| return f.bitwiseField!(typeof(f), I);
| }
| else
| static if (__traits(hasMember, Field, "__map"))
| return Field.__map!fun(field);
| else
| return MapField!(Field, fun)(field);
|}
|
|/+
|Creates a mapped field. Uses `__vmap` if possible.
|+/
|auto _vmapField(Field, Fun)(Field field, Fun fun)
|{
| static if (__traits(hasMember, Field, "__vmap"))
| return Field.__vmap(field, fun);
| else
| return VmapField!(Field, Fun)(field, fun);
|}
|
|/++
|Iterates multiple fields in lockstep.
|
|`ZipField` is used by $(SUBREF topology, zipFields).
|+/
|struct ZipField(Fields...)
| if (Fields.length > 1)
|{
|@optmath:
| import mir.functional: RefTuple, Ref, _ref;
| import std.meta: anySatisfy;
|
| ///
| Fields _fields;
|
| ///
| auto lightConst()() const @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(LightConstOf, Fields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| import std.format;
| import mir.ndslice.topology: iota;
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(LightImmutableOf, Fields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota));
| }
|
| auto opIndex()(ptrdiff_t index)
| {
| alias Iterators = Fields;
| alias _iterators = _fields;
| import mir.ndslice.iterator: _zip_types, _zip_index;
| return mixin("RefTuple!(_zip_types!Fields)(" ~ _zip_index!Fields ~ ")");
| }
|
| auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index)
| if (Types.length == Fields.length)
| {
| foreach(i, ref val; value.expand)
| {
| _fields[i][index] = val;
| }
| return opIndex(index);
| }
|
| static if (anySatisfy!(hasZeroShiftFieldMember, Fields))
| /// Defined if at least one of `Fields` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| import std.meta: staticMap;
| return mixin("ZipField!(staticMap!(ZeroShiftField, Fields))(" ~ applyAssumeZeroShift!Fields ~ ")");
| }
|}
|
|/++
|`RepeatField` is used by $(SUBREF topology, repeat).
|+/
|struct RepeatField(T)
|{
| import std.traits: Unqual;
|
|@optmath:
| alias UT = Unqual!T;
|
| ///
| UT _value;
|
| ///
| auto lightConst()() const @property @trusted
| {
| return RepeatField!(const T)(cast(UT) _value);
| }
|
| ///
| auto lightImmutable()() immutable @property @trusted
| {
| return RepeatField!(immutable T)(cast(UT) _value);
| }
|
| auto ref T opIndex()(ptrdiff_t) @trusted
| { return cast(T) _value; }
|}
|
|/++
|`BitField` is used by $(SUBREF topology, bitwise).
|+/
|struct BitField(Field, I = typeof(cast()Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
|@optmath:
| import mir.bitop: ctlz;
| package(mir) alias E = I;
| package(mir) enum shift = ctlz(I.sizeof) + 3;
|
| ///
| Field _field;
|
| /// optimization for bitwise operations
| auto __vmap(Fun : LeftOp!(op, bool), string op)(Fun fun)
| if (op == "|" || op == "&" || op == "^")
| {
| import mir.ndslice.topology: bitwiseField;
| return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField;
| }
|
| /// ditto
| auto __vmap(Fun : RightOp!(op, bool), string op)(Fun fun)
| if (op == "|" || op == "&" || op == "^")
| {
| import mir.ndslice.topology: bitwiseField;
| return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField;
| }
|
| /// ditto
| auto __vmap(Fun)(Fun fun)
| {
| return VmapField!(typeof(this), Fun)(this, fun);
| }
|
| /// ditto
| alias __map(alias fun) = BitField__map!(Field, I, fun);
|
| ///
| auto lightConst()() const @property
| {
| return BitField!(LightConstOf!Field, I)(mir.qualifier.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BitField!(LightImmutableOf!Field, I)(mir.qualifier.lightImmutable(_field));
| }
|
| bool opIndex()(size_t index)
| {
| import mir.bitop: bt;
| return bt!(Field, I)(_field, index) != 0;
| }
|
| bool opIndexAssign()(bool value, size_t index)
| {
| import mir.bitop: bta;
| bta!(Field, I)(_field, index, value);
| return value;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return BitField!(ZeroShiftField!Field, I)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.iterator: FieldIterator;
| ushort[10] data;
| auto f = FieldIterator!(BitField!(ushort*))(0, BitField!(ushort*)(data.ptr));
| f[123] = true;
| f++;
| assert(f[122]);
|}
|
|auto BitField__map(Field, I, alias fun)(BitField!(Field, I) field)
|{
| import core.lifetime: move;
| import mir.functional: naryFun;
| static if (__traits(isSame, fun, naryFun!"~a") || __traits(isSame, fun, naryFun!"!a"))
| {
| import mir.ndslice.topology: bitwiseField;
| auto f = _mapField!(naryFun!"~a")(move(field._field));
| return f.bitwiseField!(typeof(f), I);
| }
| else
| {
| return MapField!(BitField!(Field, I), fun)(move(field));
| }
|}
|
|/++
|`BitpackField` is used by $(SUBREF topology, bitpack).
|+/
|struct BitpackField(Field, uint pack, I = typeof(cast()Field.init[size_t.init]))
| if (__traits(isUnsigned, I))
|{
| //static assert();
|@optmath:
| package(mir) alias E = I;
| package(mir) enum mask = (I(1) << pack) - 1;
| package(mir) enum bits = I.sizeof * 8;
|
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| return BitpackField!(LightConstOf!Field, pack)(.lightConst(_field));
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| return BitpackField!(LightImmutableOf!Field, pack)(.lightImmutable(_field));
| }
|
| I opIndex()(size_t index)
| {
| index *= pack;
| size_t start = index % bits;
| index /= bits;
| auto ret = (_field[index] >>> start) & mask;
| static if (bits % pack)
| {
| sizediff_t end = start - (bits - pack);
| if (end > 0)
| ret ^= cast(I)(_field[index + 1] << (bits - end)) >>> (bits - pack);
| }
| return cast(I) ret;
| }
|
| I opIndexAssign()(I value, size_t index)
| {
| import std.traits: Unsigned;
| assert(cast(Unsigned!I)value <= mask);
| index *= pack;
| size_t start = index % bits;
| index /= bits;
| _field[index] = cast(I)((_field[index] & ~(mask << start)) ^ (value << start));
| static if (bits % pack)
| {
| sizediff_t end = start - (bits - pack);
| if (end > 0)
| _field[index + 1] = cast(I)((_field[index + 1] & ~((I(1) << end) - 1)) ^ (value >>> (pack - end)));
| }
| return value;
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return BitpackField!(ZeroShiftField!Field, pack, I)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|unittest
|{
| import mir.ndslice.iterator: FieldIterator;
| ushort[10] data;
| auto f = FieldIterator!(BitpackField!(ushort*, 6))(0, BitpackField!(ushort*, 6)(data.ptr));
| f[0] = cast(ushort) 31;
| f[1] = cast(ushort) 13;
| f[2] = cast(ushort) 8;
| f[3] = cast(ushort) 43;
| f[4] = cast(ushort) 28;
| f[5] = cast(ushort) 63;
| f[6] = cast(ushort) 39;
| f[7] = cast(ushort) 23;
| f[8] = cast(ushort) 44;
|
| assert(f[0] == 31);
| assert(f[1] == 13);
| assert(f[2] == 8);
| assert(f[3] == 43);
| assert(f[4] == 28);
| assert(f[5] == 63);
| assert(f[6] == 39);
| assert(f[7] == 23);
| assert(f[8] == 44);
| assert(f[9] == 0);
| assert(f[10] == 0);
| assert(f[11] == 0);
|}
|
|unittest
|{
| import mir.ndslice.slice;
| import mir.ndslice.topology;
| import mir.ndslice.sorting;
| uint[2] data;
| auto packed = data[].sliced.bitpack!18;
| assert(packed.length == 3);
| packed[0] = 5;
| packed[1] = 3;
| packed[2] = 2;
| packed.sort;
| assert(packed[0] == 2);
| assert(packed[1] == 3);
| assert(packed[2] == 5);
|}
|
|///
|struct OrthogonalReduceField(FieldsIterator, alias fun, T)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// non empty slice
|
| Slice!FieldsIterator _fields;
|
| ///
| T _initialValue;
|
| ///
| auto lightConst()() const @property
| {
| auto fields = _fields.lightConst;
| return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto fields = _fields.lightImmutable;
| return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue);
| }
|
| /// `r = fun(r, fields[i][index]);` reduction by `i`
| auto opIndex()(size_t index)
| {
| import std.traits: Unqual;
| auto fields = _fields;
| T r = _initialValue;
| if (!fields.empty) do
| {
| r = cast(T) fun(r, fields.front[index]);
| fields.popFront;
| }
| while(!fields.empty);
| return r;
| }
|}
|
|///
|struct CycleField(Field)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// Cycle length
| size_t _length;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| auto field = .lightConst(_field);
| return CycleField!(typeof(field))(_length, field);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto field = .lightImmutable(_field);
| return CycleField!(typeof(field))(_length, field);
| }
|
| ///
| auto ref opIndex()(size_t index)
| {
| return _field[index % _length];
| }
|
| ///
| static if (!__traits(compiles, &opIndex(size_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, size_t index)
| {
| return _field[index % _length] = value;
| }
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return CycleField!(ZeroShiftField!Field)(_length, _field.assumeFieldsHaveZeroShift);
| }
|}
|
|///
|struct CycleField(Field, size_t length)
|{
| import mir.ndslice.slice: Slice;
|
|@optmath:
| /// Cycle length
| enum _length = length;
| ///
| Field _field;
|
| ///
| auto lightConst()() const @property
| {
| auto field = .lightConst(_field);
| return CycleField!(typeof(field), _length)(field);
| }
|
| ///
| auto lightImmutable()() immutable @property
| {
| auto field = .lightImmutable(_field);
| return CycleField!(typeof(field), _length)(field);
| }
|
| ///
| auto ref opIndex()(size_t index)
| {
| return _field[index % _length];
| }
|
| ///
| static if (!__traits(compiles, &opIndex(size_t.init)))
| {
| auto ref opIndexAssign(T)(auto ref T value, size_t index)
| {
| return _field[index % _length] = value;
| }
| }
|
| static if (hasZeroShiftFieldMember!Field)
| /// Defined if `Field` has member `assumeFieldsHaveZeroShift`.
| auto assumeFieldsHaveZeroShift() @property
| {
| return CycleField!(ZeroShiftField!Field, _length)(_field.assumeFieldsHaveZeroShift);
| }
|}
|
|/++
|`ndIotaField` is used by $(SUBREF topology, ndiota).
|+/
|struct ndIotaField(size_t N)
| if (N)
|{
|@optmath:
| ///
| size_t[N - 1] _lengths;
|
| ///
| auto lightConst()() const @property
| {
| return ndIotaField!N(_lengths);
| }
|
| ///
| auto lightImmutable()() const @property
| {
| return ndIotaField!N(_lengths);
| }
|
| ///
| size_t[N] opIndex()(size_t index) const
| {
| size_t[N] indices;
| foreach_reverse (i; Iota!(N - 1))
| {
| indices[i + 1] = index % _lengths[i];
| index /= _lengths[i];
| }
| indices[0] = index;
| return indices;
| }
|}
|
|/++
|`LinspaceField` is used by $(SUBREF topology, linspace).
|+/
|struct LinspaceField(T)
|{
| ///
| size_t _length;
|
| ///
| T _start = cast(T) 0, _stop = cast(T) 0;
|
| ///
| auto lightConst()() scope const @property
| {
0000000| return LinspaceField!T(_length, _start, _stop);
| }
|
| ///
| auto lightImmutable()() scope const @property
| {
| return LinspaceField!T(_length, _start, _stop);
| }
|
| // no fastmath
| ///
| T opIndex()(sizediff_t index) scope const
| {
0000000| sizediff_t d = _length - 1;
0000000| auto v = typeof(T.init.re)(d - index);
0000000| auto w = typeof(T.init.re)(index);
0000000| v /= d;
0000000| w /= d;
0000000| auto a = v * _start;
0000000| auto b = w * _stop;
0000000| return a + b;
| }
|
|@optmath:
|
| ///
| size_t length(size_t dimension = 0)() scope const @property
| if (dimension == 0)
| {
0000000| return _length;
| }
|
| ///
| size_t[1] shape()() scope const @property @nogc
| {
| return [_length];
| }
|}
|
|/++
|Magic square field.
|+/
|struct MagicField
|{
|@optmath:
|@safe pure nothrow @nogc:
|
| /++
| Magic Square size.
| +/
| size_t _n;
|
|scope const:
|
| ///
| MagicField lightConst()() @property
| {
0000000| return this;
| }
|
| ///
| MagicField lightImmutable()() @property
| {
| return this;
| }
|
| ///
| size_t length(size_t dimension = 0)() @property
| if(dimension <= 2)
| {
0000000| return _n * _n;
| }
|
| ///
| size_t[1] shape() @property
| {
0000000| return [_n * _n];
| }
|
| ///
| size_t opIndex(size_t index)
| {
| pragma(inline, false);
0000000| auto d = index / _n;
0000000| auto m = index % _n;
0000000| if (_n & 1)
| {
| //d = _n - 1 - d; // MATLAB synchronization
| //index = d * _n + m; // ditto
0000000| auto r = (index + 1 - d + (_n - 3) / 2) % _n;
0000000| auto c = (_n * _n - index + 2 * d) % _n;
0000000| return r * _n + c + 1;
| }
| else
0000000| if ((_n & 2) == 0)
| {
0000000| auto a = (d + 1) & 2;
0000000| auto b = (m + 1) & 2;
0000000| return a != b ? index + 1: _n * _n - index;
| }
| else
| {
0000000| auto n = _n / 2 ;
0000000| size_t shift;
0000000| ptrdiff_t q;
0000000| ptrdiff_t p = m - n;
0000000| if (p >= 0)
| {
0000000| m = p;
0000000| shift = n * n;
0000000| auto mul = m <= n / 2 + 1;
0000000| q = d - n;
0000000| if (q >= 0)
| {
0000000| d = q;
0000000| mul = !mul;
| }
0000000| if (mul)
| {
0000000| shift *= 2;
| }
| }
| else
| {
0000000| auto mul = m < n / 2;
0000000| q = d - n;
0000000| if (q >= 0)
| {
0000000| d = q;
0000000| mul = !mul;
| }
0000000| if (d == n / 2 && (m == 0 || m == n / 2))
| {
0000000| mul = !mul;
| }
0000000| if (mul)
| {
0000000| shift = n * n * 3;
| }
| }
0000000| index = d * n + m;
0000000| auto r = (index + 1 - d + (n - 3) / 2) % n;
0000000| auto c = (n * n - index + 2 * d) % n;
0000000| return r * n + c + 1 + shift;
| }
| }
|}
|
|/++
|`SparseField` is used to represent Sparse ndarrays in mutable DOK format.
|+/
|struct SparseField(T)
|{
| ///
| T[size_t] _table;
|
| ///
| auto lightConst()() const @trusted
| {
| return SparseField!(const T)(cast(const(T)[size_t])_table);
| }
|
| ///
| auto lightImmutable()() immutable @trusted
| {
| return SparseField!(immutable T)(cast(immutable(T)[size_t])_table);
| }
|
| ///
| T opIndex()(size_t index)
| {
| import std.traits: isScalarType;
| static if (isScalarType!T)
| return _table.get(index, cast(T)0);
| else
| return _table.get(index, null);
| }
|
| ///
| T opIndexAssign()(T value, size_t index)
| {
| import std.traits: isScalarType;
| static if (isScalarType!T)
| {
| if (value != 0)
| _table[index] = value;
| else
| _table.remove(index);
| }
| else
| {
| if (value !is null)
| _table[index] = value;
| else
| _table.remove(index);
| }
| return value;
| }
|
| ///
| T opIndexUnary(string op)(size_t index)
| if (op == `++` || op == `--`)
| {
| import std.traits: isScalarType;
| mixin (`auto value = ` ~ op ~ `_table[index];`);
| static if (isScalarType!T)
| {
| if (value == 0)
| _table.remove(index);
| }
| else
| {
| if (value is null)
| _table.remove(index);
| }
| return value;
| }
|
| ///
| T opIndexOpAssign(string op)(T value, size_t index)
| if (op == `+` || op == `-`)
| {
| import std.traits: isScalarType;
| mixin (`value = _table[index] ` ~ op ~ `= value;`); // this works
| static if (isScalarType!T)
| {
| if (value == 0)
| _table.remove(index);
| }
| else
| {
| if (value is null)
| _table.remove(index);
| }
| return value;
| }
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/field.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-random-2.2.15-mir-random-source-mir-random-package.lst
|/++
|$(SCRIPT inhibitQuickIndex = 1;)
|
|Basic API to construct non-uniform random number generators and stochastic algorithms.
|Non-uniform and uniform random variable can be found at `mir.random.variable`.
|
|$(TABLE $(H2 Generation functions),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 rand, Generates real, integral, boolean, and enumerated uniformly distributed values.)
|$(T2 randIndex, Generates uniformly distributed index.)
|$(T2 randGeometric, Generates geometric distribution with `p = 1/2`.)
|$(T2 randExponential2, Generates scaled Exponential distribution.)
|)
|
|$(TABLE $(H2 Phobos Compatibility),
|$(TR $(TH Template Name) $(TH Description))
|$(T2 PhobosRandom, Extends a Mir random number engine to meet Phobos `std.random` interface)
|$(T2 isPhobosUniformRNG, Tests if type is a Phobos-style uniform RNG)
|)
|
|Publicly includes `mir.random.engine`.
|
|Authors: Ilya Yaroshenko, Nathan Sashihara
|Copyright: Copyright, Ilya Yaroshenko 2016-.
|License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, random, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|
|+/
|module mir.random;
|
|import std.traits;
|import mir.bitop: cttz;
|import mir.math.common: log2;
|
|public import mir.random.engine;
|
|version (LDC)
|{
| import ldc.intrinsics: llvm_expect;
| // LDC 1.8.0 supports llvm_expect in CTFE.
| private template _ctfeExpect(string expr, string expected)
| {
| static if (__traits(compiles, { enum a = llvm_expect(123, 456); static assert(a == 123); }))
| private enum _ctfeExpect = "llvm_expect("~expr~","~expected~")";
| else
| private enum _ctfeExpect = expr;
| }
|}
|else version (GNU)
|{
| import gcc.builtins: __builtin_expect;
| private enum _ctfeExpect(string expr, string expected) = `__builtin_expect(`~expr~`,`~expected~`)`;
|}
|else
|{
| private enum _ctfeExpect(string expr, string expected) = expr;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed integer for interval `[T.min .. T.max]`.
|+/
|T rand(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum))
|{
| alias R = EngineReturnType!G;
| enum P = T.sizeof / R.sizeof;
| static if (P > 1)
| {
| _Uab!(R[P],T) u = void;
| version(LittleEndian)
| foreach (ref e; u.asArray)
| e = gen();
| else
| foreach_reverse (ref e; u.asArray)
| e = gen();
| return u.asInteger;
| }
| else static if (preferHighBits!G && P == 0)
| {
| version(LDC) pragma(inline, true);
| return cast(T) (gen() >>> ((R.sizeof - T.sizeof) * 8));
| }
| else
| {
| version(LDC) pragma(inline, true);
| return cast(T) gen();
| }
|}
|
|/// ditto
|T rand(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum))
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|T rand(T)()
| if (isIntegral!T && !is(T == enum))
|{
| return rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = rand!short;
| auto n = rand!ulong;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.rand!short;
| auto n = gen.rand!ulong;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed boolean.
|+/
|bool rand(T : bool, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G)
|{
| import std.traits : Signed;
| return 0 > cast(Signed!(EngineReturnType!G)) gen();
|}
|
|/// ditto
|bool rand(T : bool, G)(scope G* gen)
| if (isSaturatedRandomEngine!G)
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|bool rand(T : bool)()
|{
| return rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = rand!bool;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.rand!bool;
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| auto s = gen.rand!bool;
|}
|
|private alias Iota(size_t j) = Iota!(0, j);
|
|private template Iota(size_t i, size_t j)
|{
| import std.meta;
| static assert(i <= j, "Iota: i should be less than or equal to j");
| static if (i == j)
| alias Iota = AliasSeq!();
| else
| alias Iota = AliasSeq!(i, Iota!(i + 1, j));
|}
|
|/+
|Returns pseudo-random integer with the low `bitsWanted` bits set to
|random values and the remaining high bits all 0.
|+/
|private T _randBits(T, uint bitsWanted, G)(scope ref G gen)
|if (bitsWanted >= 0 && bitsWanted <= T.sizeof * 8
| && (is(T == uint) || is(T == ulong) || is(T == size_t)))
|{
| static if (EngineReturnType!G.sizeof >= T.sizeof)
| auto bits = gen();
| else
| auto bits = gen.rand!T;
| static if (preferHighBits!G)
| {
| enum rshift = (typeof(bits).sizeof * 8) - bitsWanted;
| return cast(T) (bits >>> rshift);
| }
| else
| {
| enum mask = (typeof(bits)(1) << bitsWanted) - 1;
| return cast(T) (bits & typeof(bits)(mask));
| }
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| Uniformly distributed enumeration.
|+/
|T rand(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && is(T == enum))
|{
| static if (is(T : long))
| enum tiny = [EnumMembers!T] == [Iota!(EnumMembers!T.length)];
| else
| enum tiny = false;
| enum n = [EnumMembers!T].length;
| // If `gen` produces 32 bits or fewer at a time and we have fewer
| // than 2^^32 elements, use a `uint` index.
| static if (n <= uint.max && EngineReturnType!G.max <= uint.max)
| alias IndexType = uint;
| else
| alias IndexType = size_t;
|
| static if ((n & (n - 1)) == 0)
| {
| // Optimized case: power of 2.
| import core.bitop : bsr;
| enum bitsWanted = bsr(n);
| IndexType index = _randBits!(IndexType, bitsWanted)(gen);
| }
| else
| {
| // General case.
| IndexType index = gen.randIndex!IndexType(n);
| }
|
| static if (tiny)
| {
| return cast(T) index;
| }
| else
| {
| static immutable T[EnumMembers!T.length] members = [EnumMembers!T];
| return members[index];
| }
|}
|
|/// ditto
|T rand(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && is(T == enum))
|{
| return rand!(T, G)(*gen);
|}
|
|/// ditto
|T rand(T)()
| if (is(T == enum))
|{
| return .rand!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| enum A { a, b, c }
| auto e = rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A { a, b, c }
| auto e = gen.rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A : dchar { a, b, c }
| auto e = gen.rand!A;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| enum A : string { a = "a", b = "b", c = "c" }
| auto e = gen.rand!A;
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| enum A : dchar { a, b, c, d }
| auto e = gen.rand!A;
|}
|
|private static union _U
|{
| real r;
| struct
| {
| version(LittleEndian)
| {
| ulong m;
| ushort e;
| }
| else
| {
| ushort e;
| align(2)
| ulong m;
| }
| }
|}
|
|private static union _Uab(A,B) if (A.sizeof == B.sizeof && !is(Unqual!A == Unqual!B))
|{
| A a;
| B b;
|
| private import std.traits: isArray, isIntegral, isFloatingPoint;
|
| static if (isArray!A && !isArray!B)
| alias asArray = a;
| static if (isArray!B && !isArray!A)
| alias asArray = b;
|
| static if (isIntegral!A && !isIntegral!B)
| alias asInteger = a;
| static if (isIntegral!B && !isIntegral!A)
| alias asInteger = b;
|
| static if (isFloatingPoint!A && !isFloatingPoint!B)
| alias asFloatingPoint = a;
| static if (isFloatingPoint!B && !isFloatingPoint!A)
| alias asFloatingPoint = b;
|}
|
|/++
|Params:
| gen = saturated random number generator
| boundExp = bound exponent (optional). `boundExp` must be less or equal to `T.max_exp`.
|Returns:
| Uniformly distributed real for interval `(-2^^boundExp , 2^^boundExp)`.
|Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`.
|+/
|T rand(T, G)(scope ref G gen, sizediff_t boundExp = 0)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| assert(boundExp <= T.max_exp);
| static if (T.mant_dig == float.mant_dig)
| {
| enum W = T.sizeof * 8 - T.mant_dig;//8
| _Uab!(int,float) u = void;
| u.asInteger = gen.rand!uint;
| enum uint EXPMASK = 0x7F80_0000;
| boundExp -= T.min_exp - 1;
| size_t exp = EXPMASK & u.asInteger;
| exp = boundExp - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W);
| u.asInteger &= ~EXPMASK;
| if(cast(sizediff_t)exp < 0)
| {
| exp = -cast(sizediff_t)exp;
| uint m = u.asInteger & int.max;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| u.asInteger = (u.asInteger & ~int.max) ^ m;
| exp = 0;
| }
| u.asInteger = cast(uint)(exp << (T.mant_dig - 1)) ^ u.asInteger;
| return u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == double.mant_dig)
| {
| enum W = T.sizeof * 8 - T.mant_dig; //11
| _Uab!(long,double) u = void;
| u.asInteger = gen.rand!ulong;
| enum ulong EXPMASK = 0x7FF0_0000_0000_0000;
| boundExp -= T.min_exp - 1;
| ulong exp = EXPMASK & u.asInteger;
| exp = ulong(boundExp) - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W);
| u.asInteger &= ~EXPMASK;
| if(cast(long)exp < 0)
| {
| exp = -cast(sizediff_t)exp;
| ulong m = u.asInteger & long.max;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| u.asInteger = (u.asInteger & ~long.max) ^ m;
| exp = 0;
| }
| u.asInteger = (exp << (T.mant_dig - 1)) ^ u.asInteger;
| return u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == 64)
| {
| enum W = 15;
| auto d = gen.rand!uint;
| auto m = gen.rand!ulong;
| enum uint EXPMASK = 0x7FFF;
| boundExp -= T.min_exp - 1;
| size_t exp = EXPMASK & d;
| exp = boundExp - (exp ? cttz(exp) : gen.randGeometric + W);
| if (cast(sizediff_t)exp > 0)
| m |= ~long.max;
| else
| {
| m &= long.max;
| exp = -cast(sizediff_t)exp;
| if(exp >= T.mant_dig)
| m = 0;
| else
| m >>= cast(uint)exp;
| exp = 0;
| }
| d = cast(uint) exp ^ (d & ~EXPMASK);
| _U ret = void;
| ret.e = cast(ushort)d;
| ret.m = m;
| return ret.r;
| }
| /// TODO: quadruple
| else static assert(0);
|}
|
|/// ditto
|T rand(T, G)(scope G* gen, sizediff_t boundExp = 0)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| return rand!(T, G)(*gen, boundExp);
|}
|
|/// ditto
|T rand(T)(sizediff_t boundExp = 0)
| if (isFloatingPoint!T)
|{
| return rand!T(rne, boundExp);
|}
|
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| import mir.math.common: fabs;
|
| auto a = rand!float;
| assert(-1 < a && a < +1);
|
| auto b = rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.math.common: fabs;
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
|
| auto a = gen.rand!float;
| assert(-1 < a && a < +1);
|
| auto b = gen.rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = gen.rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = gen.rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|}
|
|/// Subnormal numbers
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto x = gen.rand!double(double.min_exp-1);
| assert(-double.min_normal < x && x < double.min_normal);
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| import mir.math.common: fabs;
| import std.meta: AliasSeq;
|
| auto a = rne.rand!float;
| assert(-1 < a && a < +1);
|
| auto b = rne.rand!double(4);
| assert(-16 < b && b < +16);
|
| auto c = rne.rand!double(-2);
| assert(-0.25 < c && c < +0.25);
|
| auto d = rne.rand!real.fabs;
| assert(0.0L <= d && d < 1.0L);
|
| foreach(T; AliasSeq!(float, double, real))
| {
| auto f = rne.rand!T(T.min_exp-1);
| assert(f.fabs < T.min_normal, T.stringof);
| }
|}
|
|/++
|Params:
| gen = uniform random number generator
| m = positive module
|Returns:
| Uniformly distributed integer for interval `[0 .. m$(RPAREN)`.
|+/
|T randIndex(T, G)(scope ref G gen, T _m)
| if(isSaturatedRandomEngine!G && isUnsigned!T)
|{
| immutable m = _m + 0u;
| static if (EngineReturnType!G.sizeof >= T.sizeof * 2)
| alias MaybeR = EngineReturnType!G;
| else static if (uint.sizeof >= T.sizeof * 2)
| alias MaybeR = uint;
| else static if (ulong.sizeof >= T.sizeof * 2)
| alias MaybeR = ulong;
| else static if (is(ucent) && __traits(compiles, {static assert(ucent.sizeof >= T.sizeof * 2);}))
| mixin ("alias MaybeR = ucent;");
| else
| alias MaybeR = void;
|
| static if (!is(MaybeR == void))
| {
| alias R = MaybeR;
| static assert(R.sizeof >= T.sizeof * 2);
| //Use Daniel Lemire's fast alternative to modulo reduction:
| //https://lemire.me/blog/2016/06/30/fast-random-shuffling/
| R randombits = cast(R) gen.rand!T;
| R multiresult = randombits * m;
| T leftover = cast(T) multiresult;
| if (mixin(_ctfeExpect!(`leftover < m`, `false`)))
| {
| immutable threshold = -m % m ;
| while (leftover < threshold)
| {
| randombits = cast(R) gen.rand!T;
| multiresult = randombits * m;
| leftover = cast(T) multiresult;
| }
| }
| enum finalshift = T.sizeof * 8;
| return cast(T) (multiresult >>> finalshift);
| }
| else
| {
| import mir.utility : extMul;
| //Use Daniel Lemire's fast alternative to modulo reduction:
| //https://lemire.me/blog/2016/06/30/fast-random-shuffling/
| auto u = extMul!T(gen.rand!T, m);
| if (mixin(_ctfeExpect!(`u.low < m`, `false`)))
| {
| immutable T threshold = -m % m;
| while (u.low < threshold)
| {
| u = extMul!T(gen.rand!T, m);
| }
| }
| return u.high;
| }
|}
|
|/// ditto
|T randIndex(T, G)(scope G* gen, T m)
| if(isSaturatedRandomEngine!G && isUnsigned!T)
|{
| return randIndex!(T, G)(*gen, m);
|}
|
|/// ditto
|T randIndex(T)(T m)
| if(isUnsigned!T)
|{
| return randIndex!T(rne, m);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto s = randIndex(100u);
| auto n = randIndex!ulong(-100);
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random;
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto s = gen.randIndex!uint(100);
| auto n = gen.randIndex!ulong(-100);
|}
|
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| //CTFE check.
| import std.meta : AliasSeq;
| import mir.random.engine.xoshiro : Xoroshiro128Plus;
| foreach (IntType; AliasSeq!(ubyte,ushort,uint,ulong))
| {
| enum IntType e = (){auto g = Xoroshiro128Plus(1); return g.randIndex!IntType(100);}();
| auto gen = Xoroshiro128Plus(1);
| assert(e == gen.randIndex!IntType(100));
| }
|}
|
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| //Test production of ulong from ulong generator.
| import mir.random.engine.xoshiro;
| auto gen = Xoroshiro128Plus(1);
| enum ulong limit = 10;
| enum count = 10;
| ulong[limit] buckets;
| foreach (_; 0 .. count)
| {
| ulong x = gen.randIndex!ulong(limit);
| assert(x < limit);
| buckets[cast(size_t) x] += 1;
| }
| foreach (i, x; buckets)
| assert(x != count, "All values were the same!");
|}
|
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| //Coverage. Impure because uses thread-local.
| Random* gen = threadLocalPtr!Random;
| auto s = gen.randIndex!uint(100);
| auto n = gen.randIndex!ulong(-100);
|}
|
|/++
| Returns: `n >= 0` such that `P(n) := 1 / (2^^(n + 1))`.
|+/
|size_t randGeometric(G)(scope ref G gen)
| if(isSaturatedRandomEngine!G)
|{
| alias R = EngineReturnType!G;
| static if (R.sizeof >= size_t.sizeof)
| alias T = size_t;
| else
| alias T = R;
| for(size_t count = 0;; count += T.sizeof * 8)
| if(auto val = gen.rand!T())
| return count + cttz(val);
|}
|
|/// ditto
|size_t randGeometric(G)(scope G* gen)
| if(isSaturatedRandomEngine!G)
|{
| return randGeometric!(G)(*gen);
|}
|
|/// ditto
|size_t randGeometric()()
|{
| return randGeometric(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| size_t s = randGeometric;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xoshiro;
| auto gen = Xoroshiro128Plus(1);
|
| size_t s = gen.randGeometric;
|}
|
|/++
|Params:
| gen = saturated random number generator
|Returns:
| `X ~ Exp(1) / log(2)`.
|Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`.
|+/
|T randExponential2(T, G)(scope ref G gen)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| enum W = T.sizeof * 8 - T.mant_dig - 1 - bool(T.mant_dig == 64);
| static if (is(T == float))
| {
| _Uab!(uint,float) u = void;
| u.asInteger = gen.rand!uint;
| enum uint EXPMASK = 0xFF80_0000;
| auto exp = EXPMASK & u.asInteger;
| u.asInteger &= ~EXPMASK;
| u.asInteger ^= 0x3F000000; // 0.5
| auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W;
| auto x = u.asFloatingPoint;
| }
| else
| static if (is(T == double))
| {
| _Uab!(ulong,double) u = void;
| u.asInteger = gen.rand!ulong;
| enum ulong EXPMASK = 0xFFF0_0000_0000_0000;
| auto exp = EXPMASK & u.asInteger;
| u.asInteger &= ~EXPMASK;
| u.asInteger ^= 0x3FE0000000000000; // 0.5
| auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W;
| auto x = u.asFloatingPoint;
| }
| else
| static if (T.mant_dig == 64)
| {
| _U ret = void;
| ret.e = 0x3FFE;
| ret.m = gen.rand!ulong | ~long.max;
| auto y = gen.randGeometric;
| auto x = ret.r;
| }
| /// TODO: quadruple
| else static assert(0);
|
| if (x == 0.5f)
| return y;
| else
| return -log2(x) + y;
|}
|
|/// ditto
|T randExponential2(T, G)(scope G* gen)
| if (isSaturatedRandomEngine!G && isFloatingPoint!T)
|{
| return randExponential2!(T, G)(*gen);
|}
|
|/// ditto
|T randExponential2(T)()
| if (isFloatingPoint!T)
|{
| return randExponential2!T(rne);
|}
|
|///
|@nogc nothrow @safe version(mir_random_test) unittest
|{
| auto v = randExponential2!double;
|}
|
|///
|@nogc nothrow @safe pure version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift;
| auto gen = Xorshift(1);
| auto v = gen.randExponential2!double();
|}
|
|/++
|$(LINK2 https://dlang.org/phobos/std_random.html#.isUniformRNG,
|Tests if T is a Phobos-style uniform RNG.)
|+/
|template isPhobosUniformRNG(T)
|{
| import std.random: isUniformRNG;
| enum bool isPhobosUniformRNG = isUniformRNG!T;
|}
|
|/++
|Extends a Mir-style random number generator to also be a Phobos-style
|uniform RNG. If `Engine` is already a Phobos-style uniform RNG,
|`PhobosRandom` is just an alias for `Engine`.
|+/
|struct PhobosRandom(Engine) if (isRandomEngine!Engine && !isPhobosUniformRNG!Engine)//Doesn't need to be saturated.
|{
| alias Uint = EngineReturnType!Engine;
| private Engine _engine;
| private Uint _front;
|
| /// Default constructor and copy constructor are disabled.
| @disable this();
| /// ditto
| @disable this(this);
|
| /// Forward constructor arguments to `Engine`.
| this(A...)(auto ref A args)
| if (is(typeof(Engine(args))))
| {
| _engine = Engine(args);
| _front = _engine.opCall();
| }
|
| /// Phobos-style random interface.
| enum bool isUniformRandom = true;
| /// ditto
| enum Uint min = Uint.min;//Always normalized.
| /// ditto
| enum Uint max = Engine.max;//Might not be saturated.
| /// ditto
| enum bool empty = false;
| /// ditto
| @property Uint front()() const { return _front; }
| /// ditto
| void popFront()() { _front = _engine.opCall(); }
| /// ditto
| void seed(A...)(auto ref A args) if (is(typeof(Engine(args))))
| {
| _engine.__ctor(args);
| _front = _engine.opCall();
| }
|
| /// Retain support for Mir-style random interface.
| enum bool isRandomEngine = true;
| /// ditto
| enum bool preferHighBits = .preferHighBits!Engine;
| /// ditto
| Uint opCall()()
| {
| Uint result = _front;
| _front = _engine.opCall();
| return result;
| }
|
| ///
| @property ref inout(Engine) engine()() inout @nogc nothrow pure @safe
| {
| return _engine;
| }
|}
|
|/// ditto
|template PhobosRandom(Engine) if (isRandomEngine!Engine && isPhobosUniformRNG!Engine)
|{
| alias PhobosRandom = Engine;
|}
|
|///
|@nogc nothrow pure @safe version(mir_random_test) unittest
|{
| import mir.random.engine.xorshift: Xorshift1024StarPhi;
| import std.random: isSeedable, isPhobosUniformRNG = isUniformRNG;
|
| alias RNG = PhobosRandom!Xorshift1024StarPhi;
|
| //Phobos interface
| static assert(isPhobosUniformRNG!(RNG, ulong));
| static assert(isSeedable!(RNG, ulong));
| //Mir interface
| static assert(isSaturatedRandomEngine!RNG);
| static assert(is(EngineReturnType!RNG == ulong));
|
| auto gen = Xorshift1024StarPhi(1);
| auto rng = RNG(1);
| assert(gen() == rng.front);
| rng.popFront();
| assert(gen() == rng.front);
| rng.popFront();
| assert(gen() == rng());
|
| gen.__ctor(1);
| rng.seed(1);
| assert(gen() == rng());
|}
../../../.dub/packages/mir-random-2.2.15/mir-random/source/mir/random/package.d has no code
<<<<<< EOF
# path=./source-mir-glas-package.lst
|/++
|
|$(H1 GLAS (Generic Linear Algebra Subprograms))
|
|The GLAS are generic routines that provide standard building blocks for performing vector and matrix operations.
|The Level 1 GLAS perform scalar, vector and vector-vector operations,
|the Level 2 GLAS perform matrix-vector operations, and the Level 3 GLAS perform matrix-matrix operations.
|
|$(H2 Implemented Routines)
|
|The list of already implemented features.
|
|$(BOOKTABLE ,
| $(TR
| $(TH Modules)
| $(TH Description)
| )
| $(TR
| $(TDNW $(SUBMODULE l1))
| $(TD vector operations 100% done, partially optimized for now)
| )
| $(TR
| $(TDNW $(SUBMODULE l2))
| $(TD matrix-vector operations %3 done, partially optimized for now)
| )
| $(TR
| $(TDNW l3 was moved to $(HTTP github.com/libmir/mir-glas, mir-glas))
| $(TD matrix-matrix operations 50% done)
| )
|)
|
|GLAS is generalization of $(LINK2 http://www.netlib.org/blas/, BLAS) (Basic Linear Algebra Subprograms)
|Because the BLAS are efficient, portable, and widely available, they are commonly used in the development of
|high quality linear algebra or related software, such as
|$(LINK2 http://www.netlib.org/lapack/, LAPACK),
|$(LINK2 http://www.numpy.org/, NumPy), or $(LINK2 http://julialang.org/, The Julia language).
|
|Efficient Level 3 BLAS implementation requires
|$(LINK2 https://en.wikipedia.org/wiki/CPU_cache, cache)-friendly matrix blocking.
|In additional, $(LINK2 https://en.wikipedia.org/wiki/SIMD, SIMD) instructions should be used for all levels on modern architectures.
|
|$(H2 Why GLAS)
|
|GLAS is ...
|
|- fast to execute.
|- fast to compile.
|- fast to extend using $(MREF_ALTTEXT ndslices, mir, ndslice).
|- fast to add new instruction set targets.
|
|
|$(H2 Optimization notes)
|
|GLAS requires recent $(LINK2 https://github.com/ldc-developers/ldc, LDC) >= 1.1.0-beta2.
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|
|Macros:
|SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP)
|+/
|module mir.glas;
|
|public import mir.glas.l1;
|public import mir.glas.l2;
source/mir/glas/package.d has no code
<<<<<< EOF
# path=./source-mir-glas-l1.lst
|/++
|$(H2 Level 1)
|
|$(SCRIPT inhibitQuickIndex = 1;)
|
|This is a submodule of $(MREF mir,glas).
|
|The Level 1 GLAS perform vector and vector-vector operations.
|
|$(BOOKTABLE $(H2 Vector-vector operations),
|$(T2 rot, apply Givens rotation)
|$(T2 axpy, constant times a vector plus a vector)
|$(T2 dot, dot product)
|$(T2 dotc, dot product, conjugating the first vector)
|)
|
|$(BOOKTABLE $(H2 Vector operations),
|$(TR $(TH Function Name) $(TH Description))
|$(T2 nrm2, Euclidean norm)
|$(T2 sqnrm2, square of Euclidean norm)
|$(T2 asum, sum of absolute values)
|$(T2 iamax, index of max abs value)
|$(T2 amax, max abs value)
|)
|
|All functions except $(LREF iamax) work with multidimensional tensors.
|
|GLAS does not provide `swap`, `scal`, and `copy` functions.
|This functionality is part of $(MREF_ALTTEXT ndslice, mir, ndslice) package. Examples can be found below.
|
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|
|Macros:
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1)
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP)
|NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|+/
|module mir.glas.l1;
|
|/// SWAP
|unittest
|{
| import std.algorithm.mutation: swap;
| import mir.ndslice.allocation: slice;
| import mir.algorithm.iteration: each;
| import std.typecons: Yes;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
1| each!(swap)(x, y);
1| assert(x == [4, 5, 6, 7]);
1| assert(y == [0, 1, 2, 3]);
|}
|
|/// SCAL
|unittest
|{
| import mir.ndslice.allocation: slice;
| import std.typecons: Yes;
1| auto x = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| x[] *= 2.0;
1| assert(x == [0, 2, 4, 6]);
|}
|
|/// COPY
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| y[] = x;
1| assert(y == [0, 1, 2, 3]);
|}
|
|import mir.math.common;
|import mir.internal.utility;
|import mir.ndslice.slice;
|import mir.algorithm.iteration : reduce, each;
|import mir.math.common: fastmath;
|
|import std.traits: Unqual, isPointer;
|import std.meta: allSatisfy;
|
|@fastmath:
|
|template _rot(alias c, alias s)
|{
| @fastmath
| void _rot(X, Y)(ref X xr, ref Y yr)
| {
4| auto x = xr;
4| auto y = yr;
4| auto t1 = c * x + s * y;
| static if (isComplex!(typeof(c)))
| {
| auto t2 = (c.re - c.im * 1fi) * y;
| }
| else
4| auto t2 = c * y;
| static if (isComplex!(typeof(s)))
| {
| t2 -= (s.re - s.im * 1fi) * x;
| }
| else
4| t2 -= s * x;
4| xr = t1;
4| yr = t2;
| }
|}
|
|template _axpy(alias a)
|{
| @fastmath
| void _axpy(X, Y)(ref X x, ref Y y)
| {
15| y += a * x;
| }
|}
|
|A _fmuladd(A, B, C)(A a, in B b, in C c)
|{
44| return a + b * c;
|}
|
|A _fmuladdc(A, B, C)(A a, in B b, in C c)
|{
| static if (isComplex!B)
| {
2| return a + (b.re - b.im * 1fi) * c;
| }
| else
| return a + b * c;
|}
|
|A _nrm2(A, B)(A a, in B b)
|{
| static if (isComplex!B)
4| return a + b.re * b.re + b.im * b.im;
| else
8| return a + b * b;
|}
|
|A _asum(A, B)(A a, in B b)
|{
| static if (isComplex!B)
| {
2| return a + (b.re.fabs + b.im.fabs);
| }
| else
| static if (isFloatingPoint!B)
| {
4| return a + b.fabs;
| }
| else
| {
| static if (isUnsigned!B)
| return a + b;
| else
| return a + (b >= 0 ? b : -b);
| }
|}
|
|A _amax(A, B)(A a, in B b)
|{
| static if (isComplex!B)
| {
4| return a.fmax(b.re.fabs + b.im.fabs);
| }
| else
| static if (isFloatingPoint!B)
| {
6| return a.fmax(b.fabs);
| }
| else
| {
| static if (!isUnsigned!B)
| b = (b >= 0 ? b : -b);
| return a >= b ? a : b;
| }
|}
|
|private enum _shouldBeCastedToUnqual(T) = isPointer!T && !is(Unqual!T == T);
|
|/++
|Applies a plane rotation, where the `c` (cos) and `s` (sin) are scalars.
|Uses unrolled loops for strides equal to one.
|Params:
| c = cos scalar
| s = sin scalar
| x = first n-dimensional tensor
| y = second n-dimensional tensor
|BLAS: SROT, DROT, CROT, ZROT, CSROT, ZDROTF
|+/
|void rot(C, S, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in C c, in S s, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
|{
1| assert(x.shape == y.shape, "constraints: x and y must have equal shapes");
| pragma(inline, false);
1| each!(_rot!(c, s))(x, y);
|}
|
|///
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| auto a = slice!double(4);
1| auto b = slice!double(4);
1| double cos = 3.0 / 5;
1| double sin = 4.0 / 5;
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
15| foreach (i; 0 .. 4)
| {
4| a[i] = cos * x[i] + sin * y[i];
4| b[i] = cos * y[i] - sin * x[i];
| }
1| rot(cos, sin, x, y);
1| assert(x == a);
1| assert(y == b);
|}
|
|/++
|Constant times a vector plus a vector.
|Uses unrolled loops for strides equal to one.
|Params:
| a = scale parameter
| x = first n-dimensional tensor
| y = second n-dimensional tensor
|BLAS: SAXPY, DAXPY, CAXPY, ZAXPY
|+/
|void axpy(A, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in A a, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
|{
| static if (_shouldBeCastedToUnqual!Iterator2)
| {
| .axpy(a, cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y);
| }
| else
| {
5| assert(x.shape == y.shape, "constraints: x and y must have equal shapes");
| pragma(inline, false);
5| each!(_axpy!a)(x, y);
| }
|}
|
|/// SAXPY, DAXPY
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
1| axpy(2.0, x, y);
1| assert(y == [4, 7, 10, 13]);
|}
|
|/// SAXPY, DAXPY
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto a = 3 + 4i;
1| auto x = slice!cdouble(2);
1| auto y = slice!cdouble(2);
1| x[] = [0 + 1i, 2 + 3i];
1| y[] = [4 + 5i, 6 + 7i];
1| axpy(a, x, y);
1| assert(y == [a * (0 + 1i) + (4 + 5i), a * (2 + 3i) + (6 + 7i)]);
|}
|
|/++
|Forms the dot product of two vectors.
|Uses unrolled loops for strides equal to one.
|Returns: dot product `conj(xᐪ) × y`
|Params:
| F = type for summation (optional template parameter)
| x = first n-dimensional tensor
| y = second n-dimensional tensor
|BLAS: SDOT, DDOT, SDSDOT, DSDOT, CDOTC, ZDOTC
|+/
|F dot(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
|{
| static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2))
| {
| return .dot!F(cast(Slice!(Unqual!Iterator1, N, kind1))x, cast(Slice!(Unqual!Iterator2, N, kind2))y);
| }
| else
| {
10| assert(x.shape == y.shape, "constraints: x and y must have equal shapes");
| pragma(inline, false);
10| return reduce!(_fmuladd)(cast(F)(0), x, y);
| }
|}
|
|/// SDOT, DDOT
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
1| assert(dot(x, y) == 5 + 12 + 21);
|}
|
|/// ditto
|auto dot(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
|{
9| return .dot!(Unqual!(typeof(x[0] * y[0])))(x, y);
|}
|
|/// SDOT, DDOT
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| auto y = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
1| assert(dot(x, y) == 5 + 12 + 21);
|}
|
|/// SDSDOT, DSDOT
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!float(4);
1| auto y = slice!float(4);
1| x[] = [0, 1, 2, 3];
1| y[] = [4, 5, 6, 7];
1| assert(dot!real(x, y) == 5 + 12 + 21); // 80-bit FP for x86 CPUs
|}
|
|/// CDOTU, ZDOTU
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(2);
1| auto y = slice!cdouble(2);
1| x[] = [0 + 1i, 2 + 3i];
1| y[] = [4 + 5i, 6 + 7i];
| version(LDC) // DMD Internal error: backend/cgxmm.c 628
1| assert(dot(x, y) == (0 + 1i) * (4 + 5i) + (2 + 3i) * (6 + 7i));
|}
|
|/++
|Forms the dot product of two complex vectors.
|Uses unrolled loops for strides equal to one.
|Returns: dot product `xᐪ × y`
|Params:
| F = type for summation (optional template parameter)
| x = first n-dimensional tensor
| y = second n-dimensional tensor
|BLAS: CDOTU, ZDOTU
|+/
|F dotc(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
| if (isComplex!(DeepElementType!(typeof(x))) && isComplex!(DeepElementType!(typeof(y))))
|{
| static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2))
| {
| return .dotc!F(cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y);
| }
| else
| {
1| assert(x.shape == y.shape, "constraints: x and y must have equal shapes");
| pragma(inline, false);
1| return reduce!(_fmuladdc)(cast(F)(0), x, y);
| }
|}
|
|/// ditto
|auto dotc(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y)
|{
1| return .dotc!(Unqual!(typeof(x[x.shape.init] * y[y.shape.init])))(x, y);
|}
|
|/// CDOTC, ZDOTC
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(2);
1| auto y = slice!cdouble(2);
1| x[] = [0 + 1i, 2 + 3i];
1| y[] = [4 + 5i, 6 + 7i];
| version(LDC) // DMD Internal error: backend/cgxmm.c 628
1| assert(dotc(x, y) == (0 + -1i) * (4 + 5i) + (2 + -3i) * (6 + 7i));
|}
|
|/++
|Returns the euclidean norm of a vector.
|Uses unrolled loops for stride equal to one.
|Returns: euclidean norm `sqrt(conj(xᐪ) × x)`
|Params:
| F = type for summation (optional template parameter)
| x = n-dimensional tensor
|BLAS: SNRM2, DNRM2, SCNRM2, DZNRM2
|+/
|F nrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
| static if (_shouldBeCastedToUnqual!Iterator)
| return .sqnrm2!F(cast(Slice!(N, Unqual!R))x).sqrt;
| else
2| return .sqnrm2!F(x).sqrt;
|}
|
|/// ditto
|auto nrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
2| return .nrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x);
|}
|
|/// SNRM2, DNRM2
|unittest
|{
| import mir.ndslice.allocation: slice;
| import std.math: sqrt, approxEqual;
1| auto x = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9)));
|}
|
|/// SCNRM2, DZNRM2
|unittest
|{
| import mir.ndslice.allocation: slice;
| import std.math: sqrt, approxEqual;
|
1| auto x = slice!cdouble(2);
1| x[] = [0 + 1i, 2 + 3i];
|
1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9)));
|}
|
|/++
|Forms the square of the euclidean norm.
|Uses unrolled loops for stride equal to one.
|Returns: `conj(xᐪ) × x`
|Params:
| F = type for summation (optional template parameter)
| x = n-dimensional tensor
|+/
|F sqnrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
| static if (_shouldBeCastedToUnqual!Iterator)
| {
| return .sqnrm2!F(cast(Slice!(N, Unqual!R))x);
| }
| else
| {
| pragma(inline, false);
4| return reduce!(_nrm2)(F(0), x);
| }
|}
|
|/// ditto
|auto sqnrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
2| return .sqnrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x);
|}
|
|///
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| x[] = [0, 1, 2, 3];
1| assert(sqnrm2(x) == 1.0 + 4 + 9);
|}
|
|///
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(2);
1| x[] = [0 + 1i, 2 + 3i];
|
1| assert(sqnrm2(x) == 1.0 + 4 + 9);
|}
|
|/++
|Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and
| returns a single precision result.
|Returns: sum of the `|Re(.)| + |Im(.)|`'s
|Params:
| F = type for summation (optional template parameter)
| x = n-dimensional tensor
|BLAS: SASUM, DASUM, SCASUM, DZASUM
|+/
|F asum(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
| static if (_shouldBeCastedToUnqual!Iterator)
| {
| return .asum!F(cast(Slice!(N, Unqual!R))x);
| }
| else
| {
| pragma(inline, false);
2| return reduce!(_asum)(F(0), x);
| }
|}
|
|/// ditto
|auto asum(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
| alias T = DeepElementType!(typeof(x));
2| return .asum!(realType!T)(x);
|}
|
|/// SASUM, DASUM
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(4);
1| x[] = [0, -1, -2, 3];
1| assert(asum(x) == 1 + 2 + 3);
|}
|
|/// SCASUM, DZASUM
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(2);
1| x[] = [0 - 1i, -2 + 3i];
|
1| assert(asum(x) == 1 + 2 + 3);
|}
|
|/++
|Finds the index of the first element having maximum `|Re(.)| + |Im(.)|`.
|Return: index of the first element having maximum `|Re(.)| + |Im(.)|`
|Params: x = 1-dimensional tensor
|BLAS: ISAMAX, IDAMAX, ICAMAX, IZAMAX
|+/
|sizediff_t iamax(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) x)
|{
| static if (_shouldBeCastedToUnqual!Iterator)
| {
| return .iamax(cast(Slice!(1, Unqual!R))x);
| }
| else
| {
| pragma(inline, false);
4| if (x.length == 0)
2| return -1;
2| if (x._stride == 0)
0000000| return 0;
| alias T = Unqual!(DeepElementType!(typeof(x)));
| alias F = realType!T;
| static if (isFloatingPoint!F)
2| auto m = -double.infinity;
| else
| auto m = F.min;
2| sizediff_t l = x.length;
2| sizediff_t r = x.length;
| do
| {
10| auto f = x.front;
| static if (isComplex!T)
| {
4| auto e = f.re.fabs + f.im.fabs;
| }
| else
| static if (isFloatingPoint!T)
| {
6| auto e = f.fabs;
| }
| else
| {
| static if (isUnsigned!T)
| auto e = f;
| else
| auto e = (f >= 0 ? f : -f);
| }
|
10| if (e > m)
| {
6| m = e;
6| r = x.length;
| }
10| x.popFront;
| }
10| while (x.length);
2| return l - r;
| }
|}
|
|/// ISAMAX, IDAMAX
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(6);
| // 0 1 2 3 4 5
1| x[] = [0, -1, -2, -3, 3, 2];
1| assert(iamax(x) == 3);
| // -1 for empty vectors
1| assert(iamax(x[0 .. 0]) == -1);
|}
|
|/// ICAMAX, IZAMAX
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(4);
| // 0 1 2 3
1| x[] = [0 + -1i, -2 + 3i, 2 + 3i, 2 + 2i];
|
1| assert(iamax(x) == 1);
| // -1 for empty vectors
1| assert(iamax(x[$ .. $]) == -1);
|}
|
|/++
|Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and
| returns a single precision result.
|Returns: sum of the `|Re(.)| + |Im(.)|`'s
|Params:
| x = n-dimensional tensor
|BLAS: SASUM, DASUM, SCASUM, DZASUM
|+/
|auto amax(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x)
|{
| static if (_shouldBeCastedToUnqual!Iterator)
| {
| return .amax(cast(Slice!(N, Unqual!R))x);
| }
| else
| {
| pragma(inline, false);
| alias T = DeepElementType!(typeof(x));
| alias F = realType!T;
4| return reduce!(_amax)(F(0), x);
| }
|}
|
|///
|unittest
|{
| import mir.ndslice.allocation: slice;
1| auto x = slice!double(6);
1| x[] = [0, -1, -2, -7, 6, 2];
1| assert(amax(x) == 7);
| // 0 for empty vectors
1| assert(amax(x[0 .. 0]) == 0);
|}
|
|///
|unittest
|{
| import mir.ndslice.allocation: slice;
|
1| auto x = slice!cdouble(4);
1| x[] = [0 + -1i, -7 + 3i, 2 + 3i, 2 + 2i];
|
1| assert(amax(x) == 10);
| // 0 for empty vectors
1| assert(amax(x[$ .. $]) == 0);
|}
source/mir/glas/l1.d is 99% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-algorithm-3.10.12-mir-algorithm-source-mir-ndslice-slice.lst
|/++
|This is a submodule of $(MREF mir, ndslice).
|
|Safety_note:
| User-defined iterators should care about their safety except bounds checks.
| Bounds are checked in ndslice code.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Copyright: 2020 Ilya Yaroshenko, Kaleidic Associates Advisory Limited, Symmetry Investments
|Authors: Ilya Yaroshenko
|
|$(BOOKTABLE $(H2 Definitions),
|$(TR $(TH Name) $(TH Description))
|$(T2 Slice, N-dimensional slice.)
|$(T2 SliceKind, SliceKind of $(LREF Slice) enumeration.)
|$(T2 Universal, Alias for $(LREF .SliceKind.universal).)
|$(T2 Canonical, Alias for $(LREF .SliceKind.canonical).)
|$(T2 Contiguous, Alias for $(LREF .SliceKind.contiguous).)
|$(T2 sliced, Creates a slice on top of an iterator, a pointer, or an array's pointer.)
|$(T2 slicedField, Creates a slice on top of a field, a random access range, or an array.)
|$(T2 slicedNdField, Creates a slice on top of an ndField.)
|$(T2 kindOf, Extracts $(LREF SliceKind).)
|$(T2 isSlice, Checks if the type is `Slice` instance.)
|$(T2 Structure, A tuple of lengths and strides.)
|)
|
|Macros:
|SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP)
|T2=$(TR $(TDNW $(LREF $1)) $(TD $+))
|T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4))
|STD = $(TD $(SMALL $0))
|+/
|module mir.ndslice.slice;
|
|import mir.internal.utility : Iota;
|import mir.math.common : optmath;
|import mir.ndslice.concatenation;
|import mir.ndslice.field;
|import mir.ndslice.internal;
|import mir.ndslice.iterator;
|import mir.ndslice.traits: isIterator;
|import mir.primitives;
|import mir.qualifier;
|import mir.utility;
|import std.meta;
|import std.traits;
|
|public import mir.primitives: DeepElementType;
|
|/++
|Checks if type T has asSlice property and its returns a slices.
|Aliases itself to a dimension count
|+/
|template hasAsSlice(T)
|{
| static if (__traits(hasMember, T, "asSlice"))
| enum size_t hasAsSlice = typeof(T.init.asSlice).N;
| else
| enum size_t hasAsSlice = 0;
|}
|
|///
|version(mir_test) unittest
|{
| import mir.series;
| static assert(!hasAsSlice!(int[]));
| static assert(hasAsSlice!(SeriesMap!(int, string)) == 1);
|}
|
|/++
|Check if $(LREF toConst) function can be called with type T.
|+/
|enum isConvertibleToSlice(T) = isSlice!T || isDynamicArray!T || hasAsSlice!T;
|
|///
|version(mir_test) unittest
|{
| import mir.series: SeriesMap;
| static assert(isConvertibleToSlice!(immutable int[]));
| static assert(isConvertibleToSlice!(string[]));
| static assert(isConvertibleToSlice!(SeriesMap!(string, int)));
| static assert(isConvertibleToSlice!(Slice!(int*)));
|}
|
|/++
|Reurns:
| Ndslice view in the same data.
|See_also: $(LREF isConvertibleToSlice).
|+/
|auto toSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) val)
|{
| import core.lifetime: move;
| return val.move;
|}
|
|/// ditto
|auto toSlice(Iterator, size_t N, SliceKind kind)(const Slice!(Iterator, N, kind) val)
|{
| return val[];
|}
|
|/// ditto
|auto toSlice(Iterator, size_t N, SliceKind kind)(immutable Slice!(Iterator, N, kind) val)
|{
| return val[];
|}
|
|/// ditto
|auto toSlice(T)(T[] val)
|{
| return val.sliced;
|}
|
|/// ditto
|auto toSlice(T)(T val)
| if (hasAsSlice!T || __traits(hasMember, T, "moveToSlice"))
|{
| static if (__traits(hasMember, T, "moveToSlice"))
| return val.moveToSlice;
| else
| return val.asSlice;
|}
|
|/// ditto
|auto toSlice(T)(ref T val)
| if (hasAsSlice!T)
|{
| return val.asSlice;
|}
|
|///
|template toSlices(args...)
|{
| static if (args.length)
| {
| alias arg = args[0];
| alias Arg = typeof(arg);
| static if (isMutable!Arg && isSlice!Arg)
| alias slc = arg;
| else
| @optmath @property auto ref slc()()
| {
| return toSlice(arg);
| }
| alias toSlices = AliasSeq!(slc, toSlices!(args[1..$]));
| }
| else
| alias toSlices = AliasSeq!();
|}
|
|/++
|Checks if the type is `Slice` instance.
|+/
|enum isSlice(T) = is(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind);
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| alias A = uint[];
| alias S = Slice!(int*);
|
| static assert(isSlice!S);
| static assert(!isSlice!A);
|}
|
|/++
|SliceKind of $(LREF Slice).
|See_also:
| $(SUBREF topology, universal),
| $(SUBREF topology, canonical),
| $(SUBREF topology, assumeCanonical),
| $(SUBREF topology, assumeContiguous).
|+/
|enum mir_slice_kind
|{
| /// A slice has strides for all dimensions.
| universal,
| /// A slice has >=2 dimensions and row dimension is contiguous.
| canonical,
| /// A slice is a flat contiguous data without strides.
| contiguous,
|}
|/// ditto
|alias SliceKind = mir_slice_kind;
|
|/++
|Alias for $(LREF .SliceKind.universal).
|
|See_also:
| Internal Binary Representation section in $(LREF Slice).
|+/
|alias Universal = SliceKind.universal;
|/++
|Alias for $(LREF .SliceKind.canonical).
|
|See_also:
| Internal Binary Representation section in $(LREF Slice).
|+/
|alias Canonical = SliceKind.canonical;
|/++
|Alias for $(LREF .SliceKind.contiguous).
|
|See_also:
| Internal Binary Representation section in $(LREF Slice).
|+/
|alias Contiguous = SliceKind.contiguous;
|
|/// Extracts $(LREF SliceKind).
|enum kindOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = kind;
|
|///
|@safe pure nothrow @nogc
|version(mir_test) unittest
|{
| static assert(kindOf!(Slice!(int*, 1, Universal)) == Universal);
|}
|
|/// Extracts iterator type from a $(LREF Slice).
|alias IteratorOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = Iterator;
|
|private template SkipDimension(size_t dimension, size_t index)
|{
| static if (index < dimension)
| enum SkipDimension = index;
| else
| static if (index == dimension)
| static assert (0, "SkipInex: wrong index");
| else
| enum SkipDimension = index - 1;
|}
|
|/++
|Creates an n-dimensional slice-shell over an iterator.
|Params:
| iterator = An iterator, a pointer, or an array.
| lengths = A list of lengths for each dimension
|Returns:
| n-dimensional slice
|+/
|auto sliced(size_t N, Iterator)(Iterator iterator, size_t[N] lengths...)
| if (!isStaticArray!Iterator && N
| && !is(Iterator : Slice!(_Iterator, _N, kind), _Iterator, size_t _N, SliceKind kind))
|{
| alias C = ImplicitlyUnqual!(typeof(iterator));
0000000| size_t[N] _lengths;
| foreach (i; Iota!N)
0000000| _lengths[i] = lengths[i];
0000000| ptrdiff_t[1] _strides = 0;
| static if (isDynamicArray!Iterator)
| {
| assert(lengthsProduct(_lengths) <= iterator.length,
| "array length should be greater or equal to the product of constructed ndslice lengths");
| auto ptr = iterator.length ? &iterator[0] : null;
| return Slice!(typeof(C.init[0])*, N)(_lengths, ptr);
| }
| else
| {
| // break safety
0000000| if (false)
| {
| ++iterator;
| --iterator;
| iterator += 34;
| iterator -= 34;
| }
| import core.lifetime: move;
0000000| return Slice!(C, N)(_lengths, iterator.move);
| }
|}
|
|/// Random access range primitives for slices over user defined types
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| struct MyIota
| {
| //`[index]` operator overloading
| auto opIndex(size_t index) @safe nothrow
| {
| return index;
| }
|
| auto lightConst()() const @property { return MyIota(); }
| auto lightImmutable()() immutable @property { return MyIota(); }
| }
|
| import mir.ndslice.iterator: FieldIterator;
| alias Iterator = FieldIterator!MyIota;
| alias S = Slice!(Iterator, 2);
| import std.range.primitives;
| static assert(hasLength!S);
| static assert(hasSlicing!S);
| static assert(isRandomAccessRange!S);
|
| auto slice = Iterator().sliced(20, 10);
| assert(slice[1, 2] == 12);
| auto sCopy = slice.save;
| assert(slice[1, 2] == 12);
|}
|
|/++
|Creates an 1-dimensional slice-shell over an array.
|Params:
| array = An array.
|Returns:
| 1-dimensional slice
|+/
|Slice!(T*) sliced(T)(T[] array) @trusted
|{
| version(LDC) pragma(inline, true);
| return Slice!(T*)([array.length], array.ptr);
|}
|
|/// Creates a slice from an array.
|@safe pure nothrow version(mir_test) unittest
|{
| auto slice = new int[10].sliced;
| assert(slice.length == 10);
| static assert(is(typeof(slice) == Slice!(int*)));
|}
|
|/++
|Creates an n-dimensional slice-shell over the 1-dimensional input slice.
|Params:
| slice = slice
| lengths = A list of lengths for each dimension.
|Returns:
| n-dimensional slice
|+/
|Slice!(Iterator, N, kind)
| sliced
| (Iterator, size_t N, SliceKind kind)
| (Slice!(Iterator, 1, kind) slice, size_t[N] lengths...)
| if (N)
|{
| auto structure = typeof(return)._Structure.init;
| structure[0] = lengths;
| static if (kind != Contiguous)
| {
| import mir.ndslice.topology: iota;
| structure[1] = structure[0].iota.strides;
| }
| import core.lifetime: move;
| return typeof(return)(structure, slice._iterator.move);
|}
|
|///
|@safe pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto data = new int[24];
| foreach (i, ref e; data)
| e = cast(int)i;
| auto a = data[0..10].sliced(10)[0..6].sliced(2, 3);
| auto b = iota!int(10)[0..6].sliced(2, 3);
| assert(a == b);
| a[] += b;
| foreach (i, e; data[0..6])
| assert(e == 2*i);
| foreach (i, e; data[6..$])
| assert(e == i+6);
|}
|
|/++
|Creates an n-dimensional slice-shell over a field.
|Params:
| field = A field. The length of the
| array should be equal to or less then the product of
| lengths.
| lengths = A list of lengths for each dimension.
|Returns:
| n-dimensional slice
|+/
|Slice!(FieldIterator!Field, N)
|slicedField(Field, size_t N)(Field field, size_t[N] lengths...)
| if (N)
|{
| static if (hasLength!Field)
0000000| assert(lengths.lengthsProduct <= field.length, "Length product should be less or equal to the field length.");
0000000| return FieldIterator!Field(0, field).sliced(lengths);
|}
|
|///ditto
|auto slicedField(Field)(Field field)
| if(hasLength!Field)
|{
0000000| return .slicedField(field, field.length);
|}
|
|/// Creates an 1-dimensional slice over a field, array, or random access range.
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto slice = 10.iota.slicedField;
| assert(slice.length == 10);
|}
|
|/++
|Creates an n-dimensional slice-shell over an ndField.
|Params:
| field = A ndField. Lengths should fit into field's shape.
| lengths = A list of lengths for each dimension.
|Returns:
| n-dimensional slice
|See_also: $(SUBREF concatenation, concatenation) examples.
|+/
|Slice!(IndexIterator!(FieldIterator!(ndIotaField!N), ndField), N)
|slicedNdField(ndField, size_t N)(ndField field, size_t[N] lengths...)
| if (N)
|{
| static if(hasShape!ndField)
| {
| auto shape = field.shape;
| foreach (i; 0 .. N)
| assert(lengths[i] <= shape[i], "Lengths should fit into ndfield's shape.");
| }
| import mir.ndslice.topology: indexed, ndiota;
| return indexed(field, ndiota(lengths));
|}
|
|///ditto
|auto slicedNdField(ndField)(ndField field)
| if(hasShape!ndField)
|{
| return .slicedNdField(field, field.shape);
|}
|
|/++
|Combination of coordinate(s) and value.
|+/
|struct CoordinateValue(T, size_t N = 1)
|{
| ///
| size_t[N] index;
|
| ///
| T value;
|
| ///
| int opCmp()(scope auto ref const typeof(this) rht) const
| {
| return cmpCoo(this.index, rht.index);
| }
|}
|
|private int cmpCoo(size_t N)(scope const auto ref size_t[N] a, scope const auto ref size_t[N] b)
|{
| foreach (i; Iota!(0, N))
| if (a[i] != b[i])
| return a[i] > b[i] ? 1 : -1;
| return 0;
|}
|
|/++
|Presents $(LREF .Slice.structure).
|+/
|struct Structure(size_t N)
|{
| ///
| size_t[N] lengths;
| ///
| sizediff_t[N] strides;
|}
|
|package(mir) alias LightConstOfLightScopeOf(Iterator) = LightConstOf!(LightScopeOf!Iterator);
|package(mir) alias LightImmutableOfLightConstOf(Iterator) = LightImmutableOf!(LightScopeOf!Iterator);
|package(mir) alias ImmutableOfUnqualOfPointerTarget(Iterator) = immutable(Unqual!(PointerTarget!Iterator))*;
|package(mir) alias ConstOfUnqualOfPointerTarget(Iterator) = const(Unqual!(PointerTarget!Iterator))*;
|
|package(mir) template allLightScope(args...)
|{
| static if (args.length)
| {
| alias arg = args[0];
| alias Arg = typeof(arg);
| static if(!isDynamicArray!Arg)
| {
| static if(!is(LightScopeOf!Arg == Arg))
| @optmath @property ls()()
| {
| import mir.qualifier: lightScope;
| return lightScope(arg);
| }
| else alias ls = arg;
| }
| else alias ls = arg;
| alias allLightScope = AliasSeq!(ls, allLightScope!(args[1..$]));
| }
| else
| alias allLightScope = AliasSeq!();
|}
|
|/++
|Presents an n-dimensional view over a range.
|
|$(H3 Definitions)
|
|In order to change data in a slice using
|overloaded operators such as `=`, `+=`, `++`,
|a syntactic structure of type
|`[]` must be used.
|It is worth noting that just like for regular arrays, operations `a = b`
|and `a[] = b` have different meanings.
|In the first case, after the operation is carried out, `a` simply points at the same data as `b`
|does, and the data which `a` previously pointed at remains unmodified.
|Here, `а` and `b` must be of the same type.
|In the second case, `a` points at the same data as before,
|but the data itself will be changed. In this instance, the number of dimensions of `b`
|may be less than the number of dimensions of `а`; and `b` can be a Slice,
|a regular multidimensional array, or simply a value (e.g. a number).
|
|In the following table you will find the definitions you might come across
|in comments on operator overloading.
|
|$(BOOKTABLE
|$(TR $(TH Operator Overloading) $(TH Examples at `N == 3`))
|$(TR $(TD An $(B interval) is a part of a sequence of type `i .. j`.)
| $(STD `2..$-3`, `0..4`))
|$(TR $(TD An $(B index) is a part of a sequence of type `i`.)
| $(STD `3`, `$-1`))
|$(TR $(TD A $(B partially defined slice) is a sequence composed of
| $(B intervals) and $(B indices) with an overall length strictly less than `N`.)
| $(STD `[3]`, `[0..$]`, `[3, 3]`, `[0..$,0..3]`, `[0..$,2]`))
|$(TR $(TD A $(B fully defined index) is a sequence
| composed only of $(B indices) with an overall length equal to `N`.)
| $(STD `[2,3,1]`))
|$(TR $(TD A $(B fully defined slice) is an empty sequence
| or a sequence composed of $(B indices) and at least one
| $(B interval) with an overall length equal to `N`.)
| $(STD `[]`, `[3..$,0..3,0..$-1]`, `[2,0..$,1]`))
|$(TR $(TD An $(B indexed slice) is syntax sugar for $(SUBREF topology, indexed) and $(SUBREF topology, cartesian).)
| $(STD `[anNdslice]`, `[$.iota, anNdsliceForCartesian1, $.iota]`))
|)
|
|See_also:
| $(SUBREF topology, iota).
|
|$(H3 Internal Binary Representation)
|
|Multidimensional Slice is a structure that consists of lengths, strides, and a iterator (pointer).
|
|$(SUBREF topology, FieldIterator) shell is used to wrap fields and random access ranges.
|FieldIterator contains a shift of the current initial element of a multidimensional slice
|and the field itself.
|
|With the exception of $(MREF mir,ndslice,allocation) module, no functions in this
|package move or copy data. The operations are only carried out on lengths, strides,
|and pointers. If a slice is defined over a range, only the shift of the initial element
|changes instead of the range.
|
|Mir n-dimensional Slices can be one of the three kinds.
|
|$(H4 Contiguous slice)
|
|Contiguous in memory (or in a user-defined iterator's field) row-major tensor that doesn't store strides because they can be computed on the fly using lengths.
|The row stride is always equaled 1.
|
|$(H4 Canonical slice)
|
|Canonical slice as contiguous in memory (or in a user-defined iterator's field) rows of a row-major tensor, it doesn't store the stride for row dimension because it is always equaled 1.
|BLAS/LAPACK matrices are Canonical but originally have column-major order.
|In the same time you can use 2D Canonical Slices with LAPACK assuming that rows are columns and columns are rows.
|
|$(H4 Universal slice)
|
|A row-major tensor that stores the strides for all dimensions.
|NumPy strides are Universal.
|
|$(H4 Internal Representation for Universal Slices)
|
|Type definition
|
|-------
|Slice!(Iterator, N, Universal)
|-------
|
|Schema
|
|-------
|Slice!(Iterator, N, Universal)
| size_t[N] _lengths
| sizediff_t[N] _strides
| Iterator _iterator
|-------
|
|$(H5 Example)
|
|Definitions
|
|-------
|import mir.ndslice;
|auto a = new double[24];
|Slice!(double*, 3, Universal) s = a.sliced(2, 3, 4).universal;
|Slice!(double*, 3, Universal) t = s.transposed!(1, 2, 0);
|Slice!(double*, 3, Universal) r = t.reversed!1;
|-------
|
|Representation
|
|-------
|s________________________
| lengths[0] ::= 2
| lengths[1] ::= 3
| lengths[2] ::= 4
|
| strides[0] ::= 12
| strides[1] ::= 4
| strides[2] ::= 1
|
| iterator ::= &a[0]
|
|t____transposed!(1, 2, 0)
| lengths[0] ::= 3
| lengths[1] ::= 4
| lengths[2] ::= 2
|
| strides[0] ::= 4
| strides[1] ::= 1
| strides[2] ::= 12
|
| iterator ::= &a[0]
|
|r______________reversed!1
| lengths[0] ::= 2
| lengths[1] ::= 3
| lengths[2] ::= 4
|
| strides[0] ::= 12
| strides[1] ::= -4
| strides[2] ::= 1
|
| iterator ::= &a[8] // (old_strides[1] * (lengths[1] - 1)) = 8
|-------
|
|$(H4 Internal Representation for Canonical Slices)
|
|Type definition
|
|-------
|Slice!(Iterator, N, Canonical)
|-------
|
|Schema
|
|-------
|Slice!(Iterator, N, Canonical)
| size_t[N] _lengths
| sizediff_t[N-1] _strides
| Iterator _iterator
|-------
|
|$(H4 Internal Representation for Contiguous Slices)
|
|Type definition
|
|-------
|Slice!(Iterator, N)
|-------
|
|Schema
|
|-------
|Slice!(Iterator, N, Contiguous)
| size_t[N] _lengths
| sizediff_t[0] _strides
| Iterator _iterator
|-------
|+/
|struct mir_slice(Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous, Labels_...)
| if (0 < N_ && N_ < 255 && !(kind_ == Canonical && N_ == 1) && Labels_.length <= N_ && isIterator!Iterator_)
|{
|@optmath:
|
| /// $(LREF SliceKind)
| enum SliceKind kind = kind_;
|
| /// Dimensions count
| enum size_t N = N_;
|
| /// Strides count
| enum size_t S = kind == Universal ? N : kind == Canonical ? N - 1 : 0;
|
| /// Labels count.
| enum size_t L = Labels_.length;
|
| /// Data iterator type
| alias Iterator = Iterator_;
|
| /// This type
| alias This = Slice!(Iterator, N, kind);
|
| /// Data element type
| alias DeepElement = typeof(Iterator.init[size_t.init]);
|
| ///
| alias serdeKeysProxy = DeepElementType;
|
| /// Label Iterators types
| alias Labels = Labels_;
|
| ///
| template Element(size_t dimension)
| if (dimension < N)
| {
| static if (N == 1)
| alias Element = DeepElement;
| else
| {
| static if (kind == Universal || dimension == N - 1)
| alias Element = mir_slice!(Iterator, N - 1, Universal);
| else
| static if (N == 2 || kind == Contiguous && dimension == 0)
| alias Element = mir_slice!(Iterator, N - 1);
| else
| alias Element = mir_slice!(Iterator, N - 1, Canonical);
| }
| }
|
|package(mir):
|
| enum doUnittest = is(Iterator == int*) && (N == 1 || N == 2) && kind == Contiguous;
|
| enum hasAccessByRef = __traits(compiles, &_iterator[0]);
|
| enum PureIndexLength(Slices...) = Filter!(isIndex, Slices).length;
|
| enum isPureSlice(Slices...) =
| Slices.length == 0
| || Slices.length <= N
| && PureIndexLength!Slices < N
| && Filter!(isIndex, Slices).length < Slices.length
| && allSatisfy!(templateOr!(isIndex, is_Slice), Slices);
|
|
| enum isFullPureSlice(Slices...) =
| Slices.length == 0
| || Slices.length == N
| && PureIndexLength!Slices < N
| && allSatisfy!(templateOr!(isIndex, is_Slice), Slices);
|
| enum isIndexedSlice(Slices...) =
| Slices.length
| && Slices.length <= N
| && allSatisfy!(isSlice, Slices)
| && anySatisfy!(templateNot!is_Slice, Slices);
|
| static if (S)
| {
| ///
| public alias _Structure = AliasSeq!(size_t[N], ptrdiff_t[S]);
| ///
| _Structure _structure;
| ///
| public alias _lengths = _structure[0];
| ///
| public alias _strides = _structure[1];
| }
| else
| {
| ///
| public alias _Structure = AliasSeq!(size_t[N]);
| ///
| _Structure _structure;
| ///
| public alias _lengths = _structure[0];
| ///
| public enum ptrdiff_t[S] _strides = ptrdiff_t[S].init;
| }
|
| /// Data Iterator
| public Iterator _iterator;
| /// Labels iterators
| public Labels _labels;
|
| sizediff_t backIndex(size_t dimension = 0)() @safe @property scope const
| if (dimension < N)
| {
| return _stride!dimension * (_lengths[dimension] - 1);
| }
|
| size_t indexStride(size_t I)(size_t[I] _indices) @safe scope const
| {
| static if (_indices.length)
| {
| static if (kind == Contiguous)
| {
| enum E = I - 1;
| assert(_indices[E] < _lengths[E], indexError!(E, N));
| ptrdiff_t ball = this._stride!E;
| ptrdiff_t stride = _indices[E] * ball;
| foreach_reverse (i; Iota!E) //static
| {
| ball *= _lengths[i + 1];
| assert(_indices[i] < _lengths[i], indexError!(i, N));
| stride += ball * _indices[i];
| }
| }
| else
| static if (kind == Canonical)
| {
| enum E = I - 1;
| assert(_indices[E] < _lengths[E], indexError!(E, N));
| static if (I == N)
| size_t stride = _indices[E];
| else
| size_t stride = _strides[E] * _indices[E];
| foreach_reverse (i; Iota!E) //static
| {
| assert(_indices[i] < _lengths[i], indexError!(i, N));
| stride += _strides[i] * _indices[i];
| }
| }
| else
| {
| enum E = I - 1;
| assert(_indices[E] < _lengths[E], indexError!(E, N));
| size_t stride = _strides[E] * _indices[E];
| foreach_reverse (i; Iota!E) //static
| {
| assert(_indices[i] < _lengths[i], indexError!(i, N));
| stride += _strides[i] * _indices[i];
| }
| }
| return stride;
| }
| else
| {
| return 0;
| }
| }
|
|public:
|
| // static if (S == 0)
| // {
| /// Defined for Contiguous Slice only
| // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // assert(empty.length == 0);
| // this._lengths = lengths;
| // this._iterator = iterator;
| // }
|
| // /// ditto
| // this()(size_t[N] lengths, Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // this._lengths = lengths;
| // this._iterator = iterator;
| // }
|
| // /// ditto
| // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // assert(empty.length == 0);
| // this._lengths = lengths;
| // this._iterator = iterator;
| // }
|
| // /// ditto
| // this()(size_t[N] lengths, Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // this._lengths = lengths;
| // this._iterator = iterator;
| // }
| // }
|
| // version(LDC)
| // private enum classicConstructor = true;
| // else
| // private enum classicConstructor = S > 0;
|
| // static if (classicConstructor)
| // {
| /// Defined for Canonical and Universal Slices (DMD, GDC, LDC) and for Contiguous Slices (LDC)
| // this()(size_t[N] lengths, ptrdiff_t[S] strides, Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // this._lengths = lengths;
| // this._strides = strides;
| // this._iterator = iterator;
| // this._labels = labels;
| // }
|
| // /// ditto
| // this()(size_t[N] lengths, ptrdiff_t[S] strides, ref Iterator iterator, Labels labels)
| // {
| // version(LDC) pragma(inline, true);
| // this._lengths = lengths;
| // this._strides = strides;
| // this._iterator = iterator;
| // this._labels = labels;
| // }
| // }
|
| // /// Construct from null
| // this()(typeof(null))
| // {
| // version(LDC) pragma(inline, true);
| // }
|
| // static if (doUnittest)
| // ///
| // @safe pure version(mir_test) unittest
| // {
| // import mir.ndslice.slice;
| // alias Array = Slice!(double*);
| // Array a = null;
| // auto b = Array(null);
| // assert(a.empty);
| // assert(b.empty);
|
| // auto fun(Array a = null)
| // {
|
| // }
| // }
|
| static if (doUnittest)
| /// Creates a 2-dimentional slice with custom strides.
| nothrow pure
| version(mir_test) unittest
| {
| uint[8] array = [1, 2, 3, 4, 5, 6, 7, 8];
| auto slice = Slice!(uint*, 2, Universal)([2, 2], [4, 1], array.ptr);
|
| assert(&slice[0, 0] == &array[0]);
| assert(&slice[0, 1] == &array[1]);
| assert(&slice[1, 0] == &array[4]);
| assert(&slice[1, 1] == &array[5]);
| assert(slice == [[1, 2], [5, 6]]);
|
| array[2] = 42;
| assert(slice == [[1, 2], [5, 6]]);
|
| array[1] = 99;
| assert(slice == [[1, 99], [5, 6]]);
| }
|
| /++
| Returns: View with stripped out reference counted context.
| The lifetime of the result mustn't be longer then the lifetime of the original slice.
| +/
| auto lightScope()() scope return @property
| {
| auto ret = Slice!(LightScopeOf!Iterator, N, kind, staticMap!(LightScopeOf, Labels))
| (_structure, .lightScope(_iterator));
| foreach(i; Iota!L)
| ret._labels[i] = .lightScope(_labels[i]);
| return ret;
| }
|
| /// ditto
| auto lightScope()() scope const return @property
| {
0000000| auto ret = Slice!(LightConstOf!(LightScopeOf!Iterator), N, kind, staticMap!(LightConstOfLightScopeOf, Labels))
| (_structure, .lightScope(_iterator));
| foreach(i; Iota!L)
| ret._labels[i] = .lightScope(_labels[i]);
0000000| return ret;
| }
|
| /// ditto
| auto lightScope()() scope immutable return @property
| {
| auto ret = Slice!(LightImmutableOf!(LightScopeOf!Iterator), N, kind, staticMap!(LightImmutableOfLightConstOf(Labels)))
| (_structure, .lightScope(_iterator));
| foreach(i; Iota!L)
| ret._labels[i] = .lightScope(_labels[i]);
| return ret;
| }
|
| /// Returns: Mutable slice over immutable data.
| Slice!(LightImmutableOf!Iterator, N, kind, staticMap!(LightImmutableOf, Labels)) lightImmutable()() scope return immutable @property
| {
| auto ret = typeof(return)(_structure, .lightImmutable(_iterator));
| foreach(i; Iota!L)
| ret._labels[i] = .lightImmutable(_labels[i]);
| return ret;
| }
|
| /// Returns: Mutable slice over const data.
| Slice!(LightConstOf!Iterator, N, kind, staticMap!(LightConstOf, Labels)) lightConst()() scope return const @property @trusted
| {
| auto ret = typeof(return)(_structure, .lightConst(_iterator));
| foreach(i; Iota!L)
| ret._labels[i] = .lightConst(_labels[i]);
| return ret;
| }
|
| /// ditto
| Slice!(LightImmutableOf!Iterator, N, kind, staticMap!(LightImmutableOf, Labels)) lightConst()() scope return immutable @property
| {
| return this.lightImmutable;
| }
|
| /// Label for the dimensions 'd'. By default returns the row label.
| Slice!(Labels[d])
| label(size_t d = 0)() @property
| if (d <= L)
| {
| return typeof(return)(_lengths[d], _labels[d]);
| }
|
| /// ditto
| void label(size_t d = 0)(Slice!(Labels[d]) rhs) @property
| if (d <= L)
| {
| import core.lifetime: move;
| assert(rhs.length == _lengths[d], "ndslice: labels dimension mismatch");
| _labels[d] = rhs._iterator.move;
| }
|
| /// ditto
| Slice!(LightConstOf!(Labels[d]))
| label(size_t d = 0)() @property const
| if (d <= L)
| {
| return typeof(return)(_lengths[d].lightConst, _labels[d]);
| }
|
| /// ditto
| Slice!(LightImmutableOf!(Labels[d]))
| label(size_t d = 0)() @property immutable
| if (d <= L)
| {
| return typeof(return)(_lengths[d].lightImmutable, _labels[d]);
| }
|
| /// Strips label off the DataFrame
| auto values()() @property
| {
| return Slice!(Iterator, N, kind)(_structure, _iterator);
| }
|
| /// ditto
| auto values()() @property const
| {
| return Slice!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator));
| }
|
| /// ditto
| auto values()() @property immutable
| {
| return Slice!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator));
| }
|
| /// `opIndex` overload for const slice
| auto ref opIndex(Indexes...)(Indexes indices) const @trusted
| if (isPureSlice!Indexes || isIndexedSlice!Indexes)
| {
| return lightConst.opIndex(indices);
| }
| /// `opIndex` overload for immutable slice
| auto ref opIndex(Indexes...)(Indexes indices) immutable @trusted
| if (isPureSlice!Indexes || isIndexedSlice!Indexes)
| {
| return lightImmutable.opIndex(indices);
| }
|
| static if (allSatisfy!(isPointer, Iterator, Labels))
| {
| private alias ConstThis = Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind);
| private alias ImmutableThis = Slice!(immutable(Unqual!(PointerTarget!Iterator))*, N, kind);
|
| /++
| Cast to const and immutable slices in case of underlying range is a pointer.
| +/
| auto toImmutable()() scope return immutable @trusted pure nothrow @nogc
| {
| return Slice!(ImmutableOfUnqualOfPointerTarget!Iterator, N, kind, staticMap!(ImmutableOfUnqualOfPointerTarget, Labels))
| (_structure, _iterator, _labels);
| }
|
| /// ditto
| auto toConst()() scope return const @trusted pure nothrow @nogc
| {
| version(LDC) pragma(inline, true);
| return Slice!(ConstOfUnqualOfPointerTarget!Iterator, N, kind, staticMap!(ConstOfUnqualOfPointerTarget, Labels))
| (_structure, _iterator, _labels);
| }
|
| static if (!is(Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind) == This))
| /// ditto
| alias toConst this;
|
| static if (doUnittest)
| ///
| version(mir_test) unittest
| {
| static struct Foo
| {
| Slice!(int*) bar;
|
| int get(size_t i) immutable
| {
| return bar[i];
| }
|
| int get(size_t i) const
| {
| return bar[i];
| }
|
| int get(size_t i) inout
| {
| return bar[i];
| }
| }
| }
|
| static if (doUnittest)
| ///
| version(mir_test) unittest
| {
| Slice!(double*, 2, Universal) nn;
| Slice!(immutable(double)*, 2, Universal) ni;
| Slice!(const(double)*, 2, Universal) nc;
|
| const Slice!(double*, 2, Universal) cn;
| const Slice!(immutable(double)*, 2, Universal) ci;
| const Slice!(const(double)*, 2, Universal) cc;
|
| immutable Slice!(double*, 2, Universal) in_;
| immutable Slice!(immutable(double)*, 2, Universal) ii;
| immutable Slice!(const(double)*, 2, Universal) ic;
|
| nc = nc; nc = cn; nc = in_;
| nc = nc; nc = cc; nc = ic;
| nc = ni; nc = ci; nc = ii;
|
| void fun(T, size_t N)(Slice!(const(T)*, N, Universal) sl)
| {
| //...
| }
|
| fun(nn); fun(cn); fun(in_);
| fun(nc); fun(cc); fun(ic);
| fun(ni); fun(ci); fun(ii);
|
| static assert(is(typeof(cn[]) == typeof(nc)));
| static assert(is(typeof(ci[]) == typeof(ni)));
| static assert(is(typeof(cc[]) == typeof(nc)));
|
| static assert(is(typeof(in_[]) == typeof(ni)));
| static assert(is(typeof(ii[]) == typeof(ni)));
| static assert(is(typeof(ic[]) == typeof(ni)));
|
| ni = ci[];
| ni = in_[];
| ni = ii[];
| ni = ic[];
| }
| }
|
| /++
| Iterator
| Returns:
| Iterator (pointer) to the $(LREF .Slice.first) element.
| +/
| auto iterator()() inout scope return @property
| {
| return _iterator;
| }
|
| static if (kind == Contiguous && isPointer!Iterator)
| /++
| `ptr` alias is available only if the slice kind is $(LREF Contiguous) contiguous and the $(LREF .Slice.iterator) is a pointers.
| +/
| alias ptr = iterator;
| else
| {
| import mir.rc.array: mir_rci;
| static if (kind == Contiguous && is(Iterator : mir_rci!ET, ET))
| auto ptr() scope return inout @property
| {
| return _iterator._iterator;
| }
| }
|
| /++
| Field (array) data.
| Returns:
| Raw data slice.
| Constraints:
| Field is defined only for contiguous slices.
| +/
| auto field()() scope return @trusted @property
| {
| static assert(kind == Contiguous, "Slice.field is defined only for contiguous slices. Slice kind is " ~ kind.stringof);
| static if (is(typeof(_iterator[size_t(0) .. elementCount])))
| {
| return _iterator[size_t(0) .. elementCount];
| }
| else
| {
| import mir.ndslice.topology: flattened;
| return this.flattened;
| }
| }
|
| /// ditto
| auto field()() scope const return @trusted @property
| {
| return this.lightConst.field;
| }
|
| /// ditto
| auto field()() scope immutable return @trusted @property
| {
| return this.lightImmutable.field;
| }
|
| static if (doUnittest)
| ///
| @safe version(mir_test) unittest
| {
| auto arr = [1, 2, 3, 4];
| auto sl0 = arr.sliced;
| auto sl1 = arr.slicedField;
|
| assert(sl0.field is arr);
| assert(sl1.field is arr);
|
| arr = arr[1 .. $];
| sl0 = sl0[1 .. $];
| sl1 = sl1[1 .. $];
|
| assert(sl0.field is arr);
| assert(sl1.field is arr);
| assert((cast(const)sl1).field is arr);
| ()@trusted{ assert((cast(immutable)sl1).field is arr); }();
| }
|
| /++
| Returns: static array of lengths
| See_also: $(LREF .Slice.structure)
| +/
| size_t[N] shape()() @trusted @property scope const
| {
0000000| return _lengths[0 .. N];
| }
|
| static if (doUnittest)
| /// Regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| assert(iota(3, 4, 5).shape == cast(size_t[3])[3, 4, 5]);
| }
|
| static if (doUnittest)
| /// Packed slice
| @safe @nogc pure nothrow
| version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, iota;
| size_t[3] s = [3, 4, 5];
| assert(iota(3, 4, 5, 6, 7).pack!2.shape == s);
| }
|
| /++
| Returns: static array of lengths
| See_also: $(LREF .Slice.structure)
| +/
| ptrdiff_t[N] strides()() @trusted @property scope const
| {
| static if (N <= S)
| return _strides[0 .. N];
| else
| {
0000000| typeof(return) ret;
| static if (kind == Canonical)
| {
| foreach (i; Iota!S)
| ret[i] = _strides[i];
| ret[$-1] = 1;
| }
| else
| {
0000000| ret[$ - 1] = _stride!(N - 1);
| foreach_reverse (i; Iota!(N - 1))
0000000| ret[i] = ret[i + 1] * _lengths[i + 1];
| }
0000000| return ret;
| }
| }
|
| static if (doUnittest)
| /// Regular slice
| @safe @nogc pure nothrow
| version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| size_t[3] s = [20, 5, 1];
| assert(iota(3, 4, 5).strides == s);
| }
|
| static if (doUnittest)
| /// Modified regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, iota, universal;
| import mir.ndslice.dynamic : reversed, strided, transposed;
| assert(iota(3, 4, 50)
| .universal
| .reversed!2 //makes stride negative
| .strided!2(6) //multiplies stride by 6 and changes corresponding length
| .transposed!2 //brings dimension `2` to the first position
| .strides == cast(ptrdiff_t[3])[-6, 200, 50]);
| }
|
| static if (doUnittest)
| /// Packed slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, iota;
| size_t[3] s = [20 * 42, 5 * 42, 1 * 42];
| assert(iota(3, 4, 5, 6, 7)
| .pack!2
| .strides == s);
| }
|
| /++
| Returns: static array of lengths and static array of strides
| See_also: $(LREF .Slice.shape)
| +/
| Structure!N structure()() @safe @property scope const
| {
| return typeof(return)(_lengths, strides);
| }
|
| static if (doUnittest)
| /// Regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| assert(iota(3, 4, 5)
| .structure == Structure!3([3, 4, 5], [20, 5, 1]));
| }
|
| static if (doUnittest)
| /// Modified regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, iota, universal;
| import mir.ndslice.dynamic : reversed, strided, transposed;
| assert(iota(3, 4, 50)
| .universal
| .reversed!2 //makes stride negative
| .strided!2(6) //multiplies stride by 6 and changes corresponding length
| .transposed!2 //brings dimension `2` to the first position
| .structure == Structure!3([9, 3, 4], [-6, 200, 50]));
| }
|
| static if (doUnittest)
| /// Packed slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, iota;
| assert(iota(3, 4, 5, 6, 7)
| .pack!2
| .structure == Structure!3([3, 4, 5], [20 * 42, 5 * 42, 1 * 42]));
| }
|
| /++
| Save primitive.
| +/
| auto save()() scope return inout @property
| {
| return this;
| }
|
| static if (doUnittest)
| /// Save range
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto slice = iota(2, 3).save;
| }
|
| static if (doUnittest)
| /// Pointer type.
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| //sl type is `Slice!(2, int*)`
| auto sl = slice!int(2, 3).save;
| }
|
| /++
| Multidimensional `length` property.
| Returns: length of the corresponding dimension
| See_also: $(LREF .Slice.shape), $(LREF .Slice.structure)
| +/
| size_t length(size_t dimension = 0)() @safe @property scope const
| if (dimension < N)
| {
| return _lengths[dimension];
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto slice = iota(3, 4, 5);
| assert(slice.length == 3);
| assert(slice.length!0 == 3);
| assert(slice.length!1 == 4);
| assert(slice.length!2 == 5);
| }
|
| alias opDollar = length;
|
| /++
| Multidimensional `stride` property.
| Returns: stride of the corresponding dimension
| See_also: $(LREF .Slice.structure)
| +/
| sizediff_t _stride(size_t dimension = 0)() @safe @property scope const
| if (dimension < N)
| {
| static if (dimension < S)
| {
| return _strides[dimension];
| }
| else
| static if (dimension + 1 == N)
| {
0000000| return 1;
| }
| else
| {
| size_t ball = _lengths[$ - 1];
| foreach_reverse(i; Iota!(dimension + 1, N - 1))
| ball *= _lengths[i];
| return ball;
| }
|
| }
|
| static if (doUnittest)
| /// Regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto slice = iota(3, 4, 5);
| assert(slice._stride == 20);
| assert(slice._stride!0 == 20);
| assert(slice._stride!1 == 5);
| assert(slice._stride!2 == 1);
| }
|
| static if (doUnittest)
| /// Modified regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.dynamic : reversed, strided, swapped;
| import mir.ndslice.topology : universal, iota;
| assert(iota(3, 4, 50)
| .universal
| .reversed!2 //makes stride negative
| .strided!2(6) //multiplies stride by 6 and changes the corresponding length
| .swapped!(1, 2) //swaps dimensions `1` and `2`
| ._stride!1 == -6);
| }
|
| /++
| Multidimensional input range primitive.
| +/
| bool empty(size_t dimension = 0)() @safe @property scope const
| if (dimension < N)
| {
0000000| return _lengths[dimension] == 0;
| }
|
| static if (N == 1)
| {
| ///ditto
| auto ref front(size_t dimension = 0)() scope return @trusted @property
| if (dimension == 0)
| {
0000000| assert(!empty!dimension);
0000000| return *_iterator;
| }
|
| ///ditto
| auto ref front(size_t dimension = 0)() scope return @trusted @property const
| if (dimension == 0)
| {
| assert(!empty!dimension);
| return *_iterator.lightScope;
| }
|
| ///ditto
| auto ref front(size_t dimension = 0)() scope return @trusted @property immutable
| if (dimension == 0)
| {
| assert(!empty!dimension);
| return *_iterator.lightScope;
| }
| }
| else
| {
| /// ditto
| Element!dimension front(size_t dimension = 0)() scope return @property
| if (dimension < N)
| {
| typeof(return)._Structure structure_ = typeof(return)._Structure.init;
|
| foreach (i; Iota!(typeof(return).N))
| {
| enum j = i >= dimension ? i + 1 : i;
| structure_[0][i] = _lengths[j];
| }
|
| static if (!typeof(return).S || typeof(return).S + 1 == S)
| alias s = _strides;
| else
| auto s = strides;
|
| foreach (i; Iota!(typeof(return).S))
| {
| enum j = i >= dimension ? i + 1 : i;
| structure_[1][i] = s[j];
| }
|
| return typeof(return)(structure_, _iterator);
| }
|
| ///ditto
| auto front(size_t dimension = 0)() scope return @trusted @property const
| if (dimension < N)
| {
| assert(!empty!dimension);
| return this.lightConst.front!dimension;
| }
|
| ///ditto
| auto front(size_t dimension = 0)() scope return @trusted @property immutable
| if (dimension < N)
| {
| assert(!empty!dimension);
| return this.lightImmutable.front!dimension;
| }
| }
|
| static if (N == 1 && isMutable!DeepElement && !hasAccessByRef)
| {
| ///ditto
| auto ref front(size_t dimension = 0, T)(T value) scope return @trusted @property
| if (dimension == 0)
| {
| // check assign safety
| static auto ref fun(ref DeepElement t, ref T v) @safe
| {
| return t = v;
| }
| assert(!empty!dimension);
| static if (__traits(compiles, *_iterator = value))
| return *_iterator = value;
| else
| return _iterator[0] = value;
| }
| }
|
| ///ditto
| static if (N == 1)
| auto ref Element!dimension
| back(size_t dimension = 0)() scope return @trusted @property
| if (dimension < N)
| {
| assert(!empty!dimension);
| return _iterator[backIndex];
| }
| else
| auto ref Element!dimension
| back(size_t dimension = 0)() scope return @trusted @property
| if (dimension < N)
| {
| assert(!empty!dimension);
| auto structure_ = typeof(return)._Structure.init;
|
| foreach (i; Iota!(typeof(return).N))
| {
| enum j = i >= dimension ? i + 1 : i;
| structure_[0][i] = _lengths[j];
| }
|
| static if (!typeof(return).S || typeof(return).S + 1 == S)
| alias s =_strides;
| else
| auto s = strides;
|
| foreach (i; Iota!(typeof(return).S))
| {
| enum j = i >= dimension ? i + 1 : i;
| structure_[1][i] = s[j];
| }
|
| return typeof(return)(structure_, _iterator + backIndex!dimension);
| }
|
| static if (N == 1 && isMutable!DeepElement && !hasAccessByRef)
| {
| ///ditto
| auto ref back(size_t dimension = 0, T)(T value) scope return @trusted @property
| if (dimension == 0)
| {
| // check assign safety
| static auto ref fun(ref DeepElement t, ref T v) @safe
| {
| return t = v;
| }
| assert(!empty!dimension);
| return _iterator[backIndex] = value;
| }
| }
|
| ///ditto
| void popFront(size_t dimension = 0)() @trusted
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
0000000| assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0.");
0000000| _lengths[dimension]--;
| static if ((kind == Contiguous || kind == Canonical) && dimension + 1 == N)
0000000| ++_iterator;
| else
| static if (kind == Canonical || kind == Universal)
| _iterator += _strides[dimension];
| else
| _iterator += _stride!dimension;
| }
|
| ///ditto
| void popBack(size_t dimension = 0)() @safe scope
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
| assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0.");
| --_lengths[dimension];
| }
|
| ///ditto
| void popFrontExactly(size_t dimension = 0)(size_t n) @trusted scope
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
| assert(n <= _lengths[dimension],
| __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof);
| _lengths[dimension] -= n;
| _iterator += _stride!dimension * n;
| }
|
| ///ditto
| void popBackExactly(size_t dimension = 0)(size_t n) @safe scope
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
| assert(n <= _lengths[dimension],
| __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof);
| _lengths[dimension] -= n;
| }
|
| ///ditto
| void popFrontN(size_t dimension = 0)(size_t n) @trusted scope
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
| popFrontExactly!dimension(min(n, _lengths[dimension]));
| }
|
| ///ditto
| void popBackN(size_t dimension = 0)(size_t n) @safe scope
| if (dimension < N && (dimension == 0 || kind != Contiguous))
| {
| popBackExactly!dimension(min(n, _lengths[dimension]));
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import std.range.primitives;
| import mir.ndslice.topology : iota, canonical;
| auto slice = iota(10, 20, 30).canonical;
|
| static assert(isRandomAccessRange!(typeof(slice)));
| static assert(hasSlicing!(typeof(slice)));
| static assert(hasLength!(typeof(slice)));
|
| assert(slice.shape == cast(size_t[3])[10, 20, 30]);
| slice.popFront;
| slice.popFront!1;
| slice.popBackExactly!2(4);
| assert(slice.shape == cast(size_t[3])[9, 19, 26]);
|
| auto matrix = slice.front!1;
| assert(matrix.shape == cast(size_t[2])[9, 26]);
|
| auto column = matrix.back!1;
| assert(column.shape == cast(size_t[1])[9]);
|
| slice.popFrontExactly!1(slice.length!1);
| assert(slice.empty == false);
| assert(slice.empty!1 == true);
| assert(slice.empty!2 == false);
| assert(slice.shape == cast(size_t[3])[9, 0, 26]);
|
| assert(slice.back.front!1.empty);
|
| slice.popFrontN!0(40);
| slice.popFrontN!2(40);
| assert(slice.shape == cast(size_t[3])[0, 0, 0]);
| }
|
| package(mir) ptrdiff_t lastIndex()() @safe @property scope const
| {
| static if (kind == Contiguous)
| {
| return elementCount - 1;
| }
| else
| {
| auto strides = strides;
| ptrdiff_t shift = 0;
| foreach(i; Iota!N)
| shift += strides[i] * (_lengths[i] - 1);
| return shift;
| }
| }
|
| static if (N > 1)
| {
| /// Accesses the first deep element of the slice.
| auto ref first()() scope return @trusted @property
| {
| assert(!anyEmpty);
| return *_iterator;
| }
|
| static if (isMutable!DeepElement && !hasAccessByRef)
| ///ditto
| auto ref first(T)(T value) scope return @trusted @property
| {
| assert(!anyEmpty);
| static if (__traits(compiles, *_iterator = value))
| return *_iterator = value;
| else
| return _iterator[0] = value;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology: iota, universal, canonical;
| auto f = 5;
| assert([2, 3].iota(f).first == f);
| }
|
| /// Accesses the last deep element of the slice.
| auto ref last()() @trusted scope return @property
| {
| assert(!anyEmpty);
| return _iterator[lastIndex];
| }
|
| static if (isMutable!DeepElement && !hasAccessByRef)
| ///ditto
| auto ref last(T)(T value) @trusted scope return @property
| {
| assert(!anyEmpty);
| return _iterator[lastIndex] = value;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology: iota;
| auto f = 5;
| assert([2, 3].iota(f).last == f + 2 * 3 - 1);
| }
|
| static if (kind_ != SliceKind.contiguous)
| /// Peforms `popFrontAll` for all dimensions
| void popFrontAll()
| {
| assert(!anyEmpty);
| foreach(d; Iota!N_)
| popFront!d;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology: iota, canonical;
| auto v = [2, 3].iota.canonical;
| v.popFrontAll;
| assert(v == [[4, 5]]);
| }
|
| static if (kind_ != SliceKind.contiguous)
| /// Peforms `popBackAll` for all dimensions
| void popBackAll()
| {
| assert(!anyEmpty);
| foreach(d; Iota!N_)
| popBack!d;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology: iota, canonical;
| auto v = [2, 3].iota.canonical;
| v.popBackAll;
| assert(v == [[0, 1]]);
| }
| }
| else
| {
| alias first = front;
| alias last = back;
| alias popFrontAll = popFront;
| alias popBackAll = popBack;
| }
|
| /+
| Returns: `true` if for any dimension of completely unpacked slice the length equals to `0`, and `false` otherwise.
| +/
| private bool anyRUEmpty()() @trusted @property scope const
| {
| static if (isInstanceOf!(SliceIterator, Iterator))
| {
| import mir.ndslice.topology: unpack;
| return this.lightScope.unpack.anyRUEmpty;
| }
| else
| return _lengths[0 .. N].anyEmptyShape;
| }
|
|
| /++
| Returns: `true` if for any dimension the length equals to `0`, and `false` otherwise.
| +/
| bool anyEmpty()() @trusted @property scope const
| {
0000000| return _lengths[0 .. N].anyEmptyShape;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology : iota, canonical;
| auto s = iota(2, 3).canonical;
| assert(!s.anyEmpty);
| s.popFrontExactly!1(3);
| assert(s.anyEmpty);
| }
|
| /++
| Convenience function for backward indexing.
|
| Returns: `this[$-index[0], $-index[1], ..., $-index[N-1]]`
| +/
| auto ref backward()(size_t[N] index) scope return
| {
| foreach (i; Iota!N)
| index[i] = _lengths[i] - index[i];
| return this[index];
| }
|
| /// ditto
| auto ref backward()(size_t[N] index) scope return const
| {
| return this.lightConst.backward(index);
| }
|
| /// ditto
| auto ref backward()(size_t[N] index) scope return const
| {
| return this.lightConst.backward(index);
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto s = iota(2, 3);
| assert(s[$ - 1, $ - 2] == s.backward([1, 2]));
| }
|
| /++
| Returns: Total number of elements in a slice
| +/
| size_t elementCount()() @safe @property scope const
| {
0000000| size_t len = 1;
| foreach (i; Iota!N)
0000000| len *= _lengths[i];
0000000| return len;
| }
|
| deprecated("use elementCount instead")
| alias elementsCount = elementCount;
|
| static if (doUnittest)
| /// Regular slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| assert(iota(3, 4, 5).elementCount == 60);
| }
|
|
| static if (doUnittest)
| /// Packed slice
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : pack, evertPack, iota;
| auto slice = iota(3, 4, 5, 6, 7, 8);
| auto p = slice.pack!2;
| assert(p.elementCount == 360);
| assert(p[0, 0, 0, 0].elementCount == 56);
| assert(p.evertPack.elementCount == 56);
| }
|
| /++
| Slice selected dimension.
| Params:
| begin = initial index of the sub-slice (inclusive)
| end = final index of the sub-slice (noninclusive)
| Returns: ndslice with `length!dimension` equal to `end - begin`.
| +/
| auto select(size_t dimension)(size_t begin, size_t end) scope return
| {
| static if (kind == Contiguous && dimension)
| {
| import mir.ndslice.topology: canonical;
| auto ret = this.canonical;
| }
| else
| {
| auto ret = this;
| }
| auto len = end - begin;
| assert(len <= ret._lengths[dimension]);
| ret._lengths[dimension] = len;
| ret._iterator += ret._stride!dimension * begin;
| return ret;
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto sl = iota(3, 4);
| assert(sl.select!1(1, 3) == sl[0 .. $, 1 .. 3]);
| }
|
| /++
| Select the first n elements for the dimension.
| Params:
| dimension = Dimension to slice.
| n = count of elements for the dimension
| Returns: ndslice with `length!dimension` equal to `n`.
| +/
| auto selectFront(size_t dimension)(size_t n) scope return
| {
| static if (kind == Contiguous && dimension)
| {
| import mir.ndslice.topology: canonical;
| auto ret = this.canonical;
| }
| else
| {
| auto ret = this;
| }
| assert(n <= ret._lengths[dimension]);
| ret._lengths[dimension] = n;
| return ret;
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto sl = iota(3, 4);
| assert(sl.selectFront!1(2) == sl[0 .. $, 0 .. 2]);
| }
|
| /++
| Select the last n elements for the dimension.
| Params:
| dimension = Dimension to slice.
| n = count of elements for the dimension
| Returns: ndslice with `length!dimension` equal to `n`.
| +/
| auto selectBack(size_t dimension)(size_t n) scope return
| {
| static if (kind == Contiguous && dimension)
| {
| import mir.ndslice.topology: canonical;
| auto ret = this.canonical;
| }
| else
| {
| auto ret = this;
| }
| assert(n <= ret._lengths[dimension]);
| ret._iterator += ret._stride!dimension * (ret._lengths[dimension] - n);
| ret._lengths[dimension] = n;
| return ret;
| }
|
| static if (doUnittest)
| ///
| @safe @nogc pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : iota;
| auto sl = iota(3, 4);
| assert(sl.selectBack!1(2) == sl[0 .. $, $ - 2 .. $]);
| }
|
| ///ditto
| bool opEquals(IteratorR, SliceKind rkind)(auto ref const Slice!(IteratorR, N, rkind) rslice) @trusted scope const
| {
| static if (
| __traits(isPOD, typeof(this))
| && __traits(isPOD, typeof(rslice))
| && __traits(compiles, this._iterator == rslice._iterator)
| )
| {
0000000| if (this._lengths != rslice._lengths)
0000000| return false;
0000000| if (this.strides == rslice.strides && this._iterator == rslice._iterator)
0000000| return true;
| }
| import mir.algorithm.iteration : equal;
0000000| return equal(this.lightScope, rslice.lightScope);
| }
|
| /// ditto
| bool opEquals(T)(scope const(T)[] arr) @trusted scope const
| {
| auto slice = this.lightConst;
| if (slice.length != arr.length)
| return false;
| if (arr.length) do
| {
| if (slice.front != arr[0])
| return false;
| slice.popFront;
| arr = arr[1 .. $];
| }
| while (arr.length);
| return true;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow
| version(mir_test) unittest
| {
| auto a = [1, 2, 3, 4].sliced(2, 2);
|
| assert(a != [1, 2, 3, 4, 5, 6].sliced(2, 3));
| assert(a != [[1, 2, 3], [4, 5, 6]]);
|
| assert(a == [1, 2, 3, 4].sliced(2, 2));
| assert(a == [[1, 2], [3, 4]]);
|
| assert(a != [9, 2, 3, 4].sliced(2, 2));
| assert(a != [[9, 2], [3, 4]]);
| }
|
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology : iota;
| assert(iota(2, 3).slice[0 .. $ - 2] == iota([4, 3], 2)[0 .. $ - 4]);
| }
|
| /++
| `Slice!(IotaIterator!size_t)` is the basic type for `[a .. b]` syntax for all ndslice based code.
| +/
| Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) @safe scope const
| if (dimension < N)
| in
| {
| assert(i <= j,
| "Slice.opSlice!" ~ dimension.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound.");
| enum errorMsg = ": right opSlice boundary must be less than or equal to the length of the given dimension.";
| assert(j <= _lengths[dimension],
| "Slice.opSlice!" ~ dimension.stringof ~ errorMsg);
| }
| do
| {
| return typeof(return)(j - i, typeof(return).Iterator(i));
| }
|
| /++
| $(BOLD Fully defined index)
| +/
| auto ref opIndex()(size_t[N] _indices...) scope return @trusted
| {
| return _iterator[indexStride(_indices)];
| }
|
| /// ditto
| auto ref opIndex()(size_t[N] _indices...) scope return const @trusted
| {
| static if (is(typeof(_iterator[indexStride(_indices)])))
| return _iterator[indexStride(_indices)];
| else
| return .lightConst(.lightScope(_iterator))[indexStride(_indices)];
| }
|
| /// ditto
| auto ref opIndex()(size_t[N] _indices...) scope return immutable @trusted
| {
| static if (is(typeof(_iterator[indexStride(_indices)])))
| return _iterator[indexStride(_indices)];
| else
| return .lightImmutable(.lightScope(_iterator))[indexStride(_indices)];
| }
|
| /++
| $(BOLD Partially defined index)
| +/
| auto opIndex(size_t I)(size_t[I] _indices...) scope return @trusted
| if (I && I < N)
| {
| enum size_t diff = N - I;
| alias Ret = Slice!(Iterator, diff, diff == 1 && kind == Canonical ? Contiguous : kind);
| static if (I < S)
| return Ret(_lengths[I .. N], _strides[I .. S], _iterator + indexStride(_indices));
| else
| return Ret(_lengths[I .. N], _iterator + indexStride(_indices));
| }
|
| /// ditto
| auto opIndex(size_t I)(size_t[I] _indices...) scope return const
| if (I && I < N)
| {
| return this.lightConst.opIndex(_indices);
| }
|
| /// ditto
| auto opIndex(size_t I)(size_t[I] _indices...) scope return immutable
| if (I && I < N)
| {
| return this.lightImmutable.opIndex(_indices);
| }
|
| /++
| $(BOLD Partially or fully defined slice.)
| +/
| auto opIndex(Slices...)(Slices slices) scope return @trusted
| if (isPureSlice!Slices)
| {
| static if (Slices.length)
| {
| enum size_t j(size_t n) = n - Filter!(isIndex, Slices[0 .. n]).length;
| enum size_t F = PureIndexLength!Slices;
| enum size_t S = Slices.length;
| static assert(N - F > 0);
| size_t stride;
| static if (Slices.length == 1)
| enum K = kind;
| else
| static if (kind == Universal || Slices.length == N && isIndex!(Slices[$-1]))
| enum K = Universal;
| else
| static if (Filter!(isIndex, Slices[0 .. $-1]).length == Slices.length - 1 || N - F == 1)
| enum K = Contiguous;
| else
| enum K = Canonical;
| alias Ret = Slice!(Iterator, N - F, K);
| auto structure_ = Ret._Structure.init;
|
| enum bool shrink = kind == Canonical && slices.length == N;
| static if (shrink)
| {
| {
| enum i = Slices.length - 1;
| auto slice = slices[i];
| static if (isIndex!(Slices[i]))
| {
| assert(slice < _lengths[i], "Slice.opIndex: index must be less than length");
| stride += slice;
| }
| else
| {
| stride += slice._iterator._index;
| structure_[0][j!i] = slice._lengths[0];
| }
| }
| }
| static if (kind == Universal || kind == Canonical)
| {
| foreach_reverse (i, slice; slices[0 .. $ - shrink]) //static
| {
| static if (isIndex!(Slices[i]))
| {
| assert(slice < _lengths[i], "Slice.opIndex: index must be less than length");
| stride += _strides[i] * slice;
| }
| else
| {
| stride += _strides[i] * slice._iterator._index;
| structure_[0][j!i] = slice._lengths[0];
| structure_[1][j!i] = _strides[i];
| }
| }
| }
| else
| {
| ptrdiff_t ball = this._stride!(slices.length - 1);
| foreach_reverse (i, slice; slices) //static
| {
| static if (isIndex!(Slices[i]))
| {
| assert(slice < _lengths[i], "Slice.opIndex: index must be less than length");
| stride += ball * slice;
| }
| else
| {
| stride += ball * slice._iterator._index;
| structure_[0][j!i] = slice._lengths[0];
| static if (j!i < Ret.S)
| structure_[1][j!i] = ball;
| }
| static if (i)
| ball *= _lengths[i];
| }
| }
| foreach (i; Iota!(Slices.length, N))
| structure_[0][i - F] = _lengths[i];
| foreach (i; Iota!(Slices.length, N))
| static if (Ret.S > i - F)
| structure_[1][i - F] = _strides[i];
|
| return Ret(structure_, _iterator + stride);
| }
| else
| {
| return this;
| }
| }
|
| static if (doUnittest)
| ///
| pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto slice = slice!int(5, 3);
|
| /// Fully defined slice
| assert(slice[] == slice);
| auto sublice = slice[0..$-2, 1..$];
|
| /// Partially defined slice
| auto row = slice[3];
| auto col = slice[0..$, 1];
| }
|
| /++
| $(BOLD Indexed slice.)
| +/
| auto opIndex(Slices...)(scope return Slices slices) scope return
| if (isIndexedSlice!Slices)
| {
| import mir.ndslice.topology: indexed, cartesian;
| static if (Slices.length == 1)
| alias index = slices[0];
| else
| auto index = slices.cartesian;
| return this.indexed(index);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation: slice;
| auto sli = slice!int(4, 3);
| auto idx = slice!(size_t[2])(3);
| idx[] = [
| cast(size_t[2])[0, 2],
| cast(size_t[2])[3, 1],
| cast(size_t[2])[2, 0]];
|
| // equivalent to:
| // import mir.ndslice.topology: indexed;
| // sli.indexed(indx)[] = 1;
| sli[idx] = 1;
|
| assert(sli == [
| [0, 0, 1],
| [0, 0, 0],
| [1, 0, 0],
| [0, 1, 0],
| ]);
|
| foreach (row; sli[[1, 3].sliced])
| row[] += 2;
|
| assert(sli == [
| [0, 0, 1],
| [2, 2, 2], // <-- += 2
| [1, 0, 0],
| [2, 3, 2], // <-- += 2
| ]);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology: iota;
| import mir.ndslice.allocation: slice;
| auto sli = slice!int(5, 6);
|
| // equivalent to
| // import mir.ndslice.topology: indexed, cartesian;
| // auto a = [0, sli.length!0 / 2, sli.length!0 - 1].sliced;
| // auto b = [0, sli.length!1 / 2, sli.length!1 - 1].sliced;
| // auto c = cartesian(a, b);
| // auto minor = sli.indexed(c);
| auto minor = sli[[0, $ / 2, $ - 1].sliced, [0, $ / 2, $ - 1].sliced];
|
| minor[] = iota!int([3, 3], 1);
|
| assert(sli == [
| // ↓ ↓ ↓︎
| [1, 0, 0, 2, 0, 3], // <---
| [0, 0, 0, 0, 0, 0],
| [4, 0, 0, 5, 0, 6], // <---
| [0, 0, 0, 0, 0, 0],
| [7, 0, 0, 8, 0, 9], // <---
| ]);
| }
|
| /++
| Element-wise binary operator overloading.
| Returns:
| lazy slice of the same kind and the same structure
| Note:
| Does not allocate neither new slice nor a closure.
| +/
| auto opUnary(string op)() scope return
| if (op == "*" || op == "~" || op == "-" || op == "+")
| {
| import mir.ndslice.topology: map;
| static if (op == "+")
| return this;
| else
| return this.map!(op ~ "a");
| }
|
| static if (doUnittest)
| ///
| version(mir_test) unittest
| {
| import mir.ndslice.topology;
|
| auto payload = [1, 2, 3, 4];
| auto s = iota([payload.length], payload.ptr); // slice of references;
| assert(s[1] == payload.ptr + 1);
|
| auto c = *s; // the same as s.map!"*a"
| assert(c[1] == *s[1]);
|
| *s[1] = 3;
| assert(c[1] == *s[1]);
| }
|
| /++
| Element-wise operator overloading for scalars.
| Params:
| value = a scalar
| Returns:
| lazy slice of the same kind and the same structure
| Note:
| Does not allocate neither new slice nor a closure.
| +/
| auto opBinary(string op, T)(scope return T value) scope return
| if(!isSlice!T)
| {
| import mir.ndslice.topology: vmap;
| return this.vmap(LeftOp!(op, ImplicitlyUnqual!T)(value));
| }
|
| /// ditto
| auto opBinaryRight(string op, T)(scope return T value) scope return
| if(!isSlice!T)
| {
| import mir.ndslice.topology: vmap;
| return this.vmap(RightOp!(op, ImplicitlyUnqual!T)(value));
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology;
|
| // 0 1 2 3
| auto s = iota([4]);
| // 0 1 2 0
| assert(s % 3 == iota([4]).map!"a % 3");
| // 0 2 4 6
| assert(2 * s == iota([4], 0, 2));
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology;
|
| // 0 1 2 3
| auto s = iota([4]);
| // 0 1 4 9
| assert(s ^^ 2.0 == iota([4]).map!"a ^^ 2.0");
| }
|
| /++
| Element-wise operator overloading for slices.
| Params:
| rhs = a slice of the same shape.
| Returns:
| lazy slice the same shape that has $(LREF Contiguous) kind
| Note:
| Binary operator overloading is allowed if both slices are contiguous or one-dimensional.
| $(BR)
| Does not allocate neither new slice nor a closure.
| +/
| auto opBinary(string op, RIterator, size_t RN, SliceKind rkind)
| (scope return Slice!(RIterator, RN, rkind) rhs) scope return
| if(N == RN && (kind == Contiguous && rkind == Contiguous || N == 1) && op != "~")
| {
| import mir.ndslice.topology: zip, map;
| return zip(this, rhs).map!("a " ~ op ~ " b");
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow @nogc version(mir_test) unittest
| {
| import mir.ndslice.topology: iota, map, zip;
|
| auto s = iota([2, 3]);
| auto c = iota([2, 3], 5, 8);
| assert(s * s + c == s.map!"a * a".zip(c).map!"a + b");
| }
|
| /++
| Duplicates slice.
| Returns: GC-allocated Contiguous mutable slice.
| See_also: $(LREF .Slice.idup)
| +/
| Slice!(Unqual!DeepElement*, N)
| dup()() scope @property
| {
| if (__ctfe)
| {
| import mir.ndslice.topology: flattened;
| import mir.array.allocation: array;
| return this.flattened.array.dup.sliced(this.shape);
| }
| else
| {
| import mir.ndslice.allocation: uninitSlice;
| import mir.conv: emplaceRef;
| alias E = this.DeepElement;
|
| auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))();
|
| import mir.algorithm.iteration: each;
| each!(emplaceRef!(Unqual!E))(result, this);
|
| return result;
| }
| }
|
| /// ditto
| Slice!(immutable(DeepElement)*, N)
| dup()() scope const @property
| {
| this.lightScope.dup;
| }
|
| /// ditto
| Slice!(immutable(DeepElement)*, N)
| dup()() scope immutable @property
| {
| this.lightScope.dup;
| }
|
| static if (doUnittest)
| ///
| @safe pure version(mir_test) unittest
| {
| import mir.ndslice;
| auto x = 3.iota!int;
| Slice!(immutable(int)*) imm = x.idup;
| Slice!(int*) mut = imm.dup;
| assert(imm == x);
| assert(mut == x);
| }
|
| /++
| Duplicates slice.
| Returns: GC-allocated Contiguous immutable slice.
| See_also: $(LREF .Slice.dup)
| +/
| Slice!(immutable(DeepElement)*, N)
| idup()() scope @property
| {
| if (__ctfe)
| {
| import mir.ndslice.topology: flattened;
| import mir.array.allocation: array;
| return this.flattened.array.idup.sliced(this.shape);
| }
| else
| {
| import mir.ndslice.allocation: uninitSlice;
| import mir.conv: emplaceRef;
| alias E = this.DeepElement;
|
| auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))();
|
| import mir.algorithm.iteration: each;
| each!(emplaceRef!(immutable E))(result, this);
| alias R = typeof(return);
| return (() @trusted => cast(R) result)();
| }
| }
|
| /// ditto
| Slice!(immutable(DeepElement)*, N)
| idup()() scope const @property
| {
| this.lightScope.idup;
| }
|
| /// ditto
| Slice!(immutable(DeepElement)*, N)
| idup()() scope immutable @property
| {
| this.lightScope.idup;
| }
|
| static if (doUnittest)
| ///
| @safe pure version(mir_test) unittest
| {
| import mir.ndslice;
| auto x = 3.iota!int;
| Slice!(int*) mut = x.dup;
| Slice!(immutable(int)*) imm = mut.idup;
| assert(imm == x);
| assert(mut == x);
| }
|
| /++
| Provides the index location of a slice, taking into account
| `Slice._strides`. Similar to `Slice.indexStride`, except the slice is
| indexed by a value. Called by `Slice.accessFlat`.
|
| Params:
| n = location in slice
| Returns:
| location in slice, adjusted for `Slice._strides`
| See_also:
| $(SUBREF topology, flattened),
| $(LREF .Slice.indexStride),
| $(LREF .Slice.accessFlat)
| +/
| private
| ptrdiff_t indexStrideValue(ptrdiff_t n) @safe scope const
| {
0000000| assert(n < elementCount, "indexStrideValue: n must be less than elementCount");
0000000| assert(n >= 0, "indexStrideValue: n must be greater than or equal to zero");
|
| static if (kind == Contiguous) {
0000000| return n;
| } else {
| ptrdiff_t _shift;
| foreach_reverse (i; Iota!(1, N))
| {
| immutable v = n / ptrdiff_t(_lengths[i]);
| n %= ptrdiff_t(_lengths[i]);
| static if (i == S)
| _shift += n;
| else
| _shift += n * _strides[i];
| n = v;
| }
| _shift += n * _strides[0];
| return _shift;
| }
| }
|
| /++
| Provides access to a slice as if it were `flattened`.
|
| Params:
| index = location in slice
| Returns:
| value of flattened slice at `index`
| See_also: $(SUBREF topology, flattened)
| +/
| auto ref accessFlat(size_t index) scope return @trusted
| {
0000000| return _iterator[indexStrideValue(index)];
| }
|
| ///
| version(mir_test)
| @safe pure @nogc nothrow
| unittest
| {
| import mir.ndslice.topology: iota, flattened;
|
| auto x = iota(2, 3, 4);
| assert(x.accessFlat(9) == x.flattened[9]);
| }
|
| static if (isMutable!DeepElement)
| {
| private void opIndexOpAssignImplSlice(string op, RIterator, size_t RN, SliceKind rkind)
| (Slice!(RIterator, RN, rkind) value) scope
| {
| static if (N > 1 && RN == N && kind == Contiguous && rkind == Contiguous)
| {
| import mir.ndslice.topology : flattened;
| this.flattened.opIndexOpAssignImplSlice!op(value.flattened);
| }
| else
| {
| auto ls = this;
| do
| {
| static if (N > RN)
| {
| ls.front.opIndexOpAssignImplSlice!op(value);
| }
| else
| {
| static if (ls.N == 1)
| {
| static if (isInstanceOf!(SliceIterator, Iterator))
| {
| static if (isSlice!(typeof(value.front)))
| ls.front.opIndexOpAssignImplSlice!op(value.front);
| else
| static if (isDynamicArray!(typeof(value.front)))
| ls.front.opIndexOpAssignImplSlice!op(value.front);
| else
| ls.front.opIndexOpAssignImplValue!op(value.front);
| }
| else
| static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value.front)))
| {
| import mir.math.common: pow;
| ls.front = pow(ls.front, value.front);
| }
| else
| mixin("ls.front " ~ op ~ "= value.front;");
| }
| else
| static if (RN == 1)
| ls.front.opIndexOpAssignImplValue!op(value.front);
| else
| ls.front.opIndexOpAssignImplSlice!op(value.front);
| value.popFront;
| }
| ls.popFront;
| }
| while (ls._lengths[0]);
| }
| }
|
| /++
| Assignment of a value of `Slice` type to a $(B fully defined slice).
| +/
| void opIndexAssign(RIterator, size_t RN, SliceKind rkind, Slices...)
| (Slice!(RIterator, RN, rkind) value, Slices slices) scope return
| if (isFullPureSlice!Slices || isIndexedSlice!Slices)
| {
| auto sl = this.lightScope.opIndex(slices);
| assert(_checkAssignLengths(sl, value));
| if(!sl.anyRUEmpty)
| sl.opIndexOpAssignImplSlice!""(value);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
| auto b = [1, 2, 3, 4].sliced(2, 2);
|
| a[0..$, 0..$-1] = b;
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| // fills both rows with b[0]
| a[0..$, 0..$-1] = b[0];
| assert(a == [[1, 2, 0], [1, 2, 0]]);
|
| a[1, 0..$-1] = b[1];
| assert(a[1] == [3, 4, 0]);
|
| a[1, 0..$-1][] = b[0];
| assert(a[1] == [1, 2, 0]);
| }
|
| static if (doUnittest)
| /// Left slice is packed
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : blocks, iota;
| import mir.ndslice.allocation : slice;
| auto a = slice!int(4, 4);
| a.blocks(2, 2)[] = iota!int(2, 2);
|
| assert(a ==
| [[0, 0, 1, 1],
| [0, 0, 1, 1],
| [2, 2, 3, 3],
| [2, 2, 3, 3]]);
| }
|
| static if (doUnittest)
| /// Both slices are packed
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.topology : blocks, iota, pack;
| import mir.ndslice.allocation : slice;
| auto a = slice!int(4, 4);
| a.blocks(2, 2)[] = iota!int(2, 2, 2).pack!1;
|
| assert(a ==
| [[0, 1, 2, 3],
| [0, 1, 2, 3],
| [4, 5, 6, 7],
| [4, 5, 6, 7]]);
| }
|
| void opIndexOpAssignImplArray(string op, T, Slices...)(T[] value) scope
| {
| auto ls = this;
| assert(ls.length == value.length, __FUNCTION__ ~ ": argument must have the same length.");
| static if (N == 1)
| {
| do
| {
| static if (ls.N == 1)
| {
| static if (isInstanceOf!(SliceIterator, Iterator))
| {
| static if (isSlice!(typeof(value[0])))
| ls.front.opIndexOpAssignImplSlice!op(value[0]);
| else
| static if (isDynamicArray!(typeof(value[0])))
| ls.front.opIndexOpAssignImplSlice!op(value[0]);
| else
| ls.front.opIndexOpAssignImplValue!op(value[0]);
| }
| else
| static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value[0])))
| {
| import mir.math.common: pow;
| ls.front = pow(ls.front, value[0]);
| }
| else
| mixin("ls.front " ~ op ~ "= value[0];");
| }
| else
| mixin("ls.front[] " ~ op ~ "= value[0];");
| value = value[1 .. $];
| ls.popFront;
| }
| while (ls.length);
| }
| else
| static if (N == DynamicArrayDimensionsCount!(T[]))
| {
| do
| {
| ls.front.opIndexOpAssignImplArray!op(value[0]);
| value = value[1 .. $];
| ls.popFront;
| }
| while (ls.length);
| }
| else
| {
| do
| {
| ls.front.opIndexOpAssignImplArray!op(value);
| ls.popFront;
| }
| while (ls.length);
| }
| }
|
| /++
| Assignment of a regular multidimensional array to a $(B fully defined slice).
| +/
| void opIndexAssign(T, Slices...)(T[] value, Slices slices) scope return
| if ((isFullPureSlice!Slices || isIndexedSlice!Slices)
| && (!isDynamicArray!DeepElement || isDynamicArray!T)
| && DynamicArrayDimensionsCount!(T[]) - DynamicArrayDimensionsCount!DeepElement <= typeof(this.opIndex(slices)).N)
| {
| auto sl = this.lightScope.opIndex(slices);
| sl.opIndexOpAssignImplArray!""(value);
| }
|
| static if (doUnittest)
| ///
| pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
| auto b = [[1, 2], [3, 4]];
|
| a[] = [[1, 2, 3], [4, 5, 6]];
| assert(a == [[1, 2, 3], [4, 5, 6]]);
|
| a[0..$, 0..$-1] = [[1, 2], [3, 4]];
| assert(a == [[1, 2, 3], [3, 4, 6]]);
|
| a[0..$, 0..$-1] = [1, 2];
| assert(a == [[1, 2, 3], [1, 2, 6]]);
|
| a[1, 0..$-1] = [3, 4];
| assert(a[1] == [3, 4, 6]);
|
| a[1, 0..$-1][] = [3, 4];
| assert(a[1] == [3, 4, 6]);
| }
|
| static if (doUnittest)
| /// Packed slices
| pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : blocks;
| auto a = slice!int(4, 4);
| a.blocks(2, 2)[] = [[0, 1], [2, 3]];
|
| assert(a ==
| [[0, 0, 1, 1],
| [0, 0, 1, 1],
| [2, 2, 3, 3],
| [2, 2, 3, 3]]);
| }
|
|
| private void opIndexOpAssignImplConcatenation(string op, T)(T value) scope
| {
| auto sl = this;
| static if (concatenationDimension!T)
| {
| if (!sl.empty) do
| {
| static if (op == "")
| sl.front.opIndexAssign(value.front);
| else
| sl.front.opIndexOpAssign!op(value.front);
| value.popFront;
| sl.popFront;
| }
| while(!sl.empty);
| }
| else
| {
| foreach (ref slice; value._slices)
| {
| static if (op == "")
| sl[0 .. slice.length].opIndexAssign(slice);
| else
| sl[0 .. slice.length].opIndexOpAssign!op(slice);
|
| sl = sl[slice.length .. $];
| }
| assert(sl.empty);
| }
| }
|
| ///
| void opIndexAssign(T, Slices...)(T concatenation, Slices slices) scope return
| if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T)
| {
| auto sl = this.lightScope.opIndex(slices);
| static assert(typeof(sl).N == T.N, "incompatible dimension count");
| sl.opIndexOpAssignImplConcatenation!""(concatenation);
| }
|
| /++
| Assignment of a value (e.g. a number) to a $(B fully defined slice).
| +/
| void opIndexAssign(T, Slices...)(T value, Slices slices) scope return
| if ((isFullPureSlice!Slices || isIndexedSlice!Slices)
| && (!isDynamicArray!T || isDynamicArray!DeepElement)
| && DynamicArrayDimensionsCount!T == DynamicArrayDimensionsCount!DeepElement
| && !isSlice!T
| && !isConcatenation!T)
| {
| auto sl = this.lightScope.opIndex(slices);
| if(!sl.anyRUEmpty)
| sl.opIndexOpAssignImplValue!""(value);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow
| version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| a[] = 9;
| assert(a == [[9, 9, 9], [9, 9, 9]]);
|
| a[0..$, 0..$-1] = 1;
| assert(a == [[1, 1, 9], [1, 1, 9]]);
|
| a[0..$, 0..$-1] = 2;
| assert(a == [[2, 2, 9], [2, 2, 9]]);
|
| a[1, 0..$-1] = 3;
| //assert(a[1] == [3, 3, 9]);
|
| a[1, 0..$-1] = 4;
| //assert(a[1] == [4, 4, 9]);
|
| a[1, 0..$-1][] = 5;
|
| assert(a[1] == [5, 5, 9]);
| }
|
| static if (doUnittest)
| /// Packed slices have the same behavior.
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| import mir.ndslice.topology : pack;
| auto a = slice!int(2, 3).pack!1;
|
| a[] = 9;
| //assert(a == [[9, 9, 9], [9, 9, 9]]);
| }
|
| /++
| Assignment of a value (e.g. a number) to a $(B fully defined index).
| +/
| auto ref opIndexAssign(T)(T value, size_t[N] _indices...) scope return @trusted
| {
| // check assign safety
| static auto ref fun(ref DeepElement t, ref T v) @safe
| {
| return t = v;
| }
| return _iterator[indexStride(_indices)] = value;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| a[1, 2] = 3;
| assert(a[1, 2] == 3);
| }
|
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| auto a = new int[6].sliced(2, 3);
|
| a[[1, 2]] = 3;
| assert(a[[1, 2]] == 3);
| }
|
| /++
| Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined index).
| +/
| auto ref opIndexOpAssign(string op, T)(T value, size_t[N] _indices...) scope return @trusted
| {
| // check op safety
| static auto ref fun(ref DeepElement t, ref T v) @safe
| {
| return mixin(`t` ~ op ~ `= v`);
| }
| auto str = indexStride(_indices);
| static if (op == "^^" && isFloatingPoint!DeepElement && isFloatingPoint!(typeof(value)))
| {
| import mir.math.common: pow;
| _iterator[str] = pow(_iterator[str], value);
| }
| else
| return mixin (`_iterator[str] ` ~ op ~ `= value`);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| a[1, 2] += 3;
| assert(a[1, 2] == 3);
| }
|
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| auto a = new int[6].sliced(2, 3);
|
| a[[1, 2]] += 3;
| assert(a[[1, 2]] == 3);
| }
|
| /++
| Op Assignment `op=` of a value of `Slice` type to a $(B fully defined slice).
| +/
| void opIndexOpAssign(string op, RIterator, SliceKind rkind, size_t RN, Slices...)
| (Slice!(RIterator, RN, rkind) value, Slices slices) scope return
| if (isFullPureSlice!Slices || isIndexedSlice!Slices)
| {
| auto sl = this.lightScope.opIndex(slices);
| assert(_checkAssignLengths(sl, value));
| if(!sl.anyRUEmpty)
| sl.opIndexOpAssignImplSlice!op(value);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
| auto b = [1, 2, 3, 4].sliced(2, 2);
|
| a[0..$, 0..$-1] += b;
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| a[0..$, 0..$-1] += b[0];
| assert(a == [[2, 4, 0], [4, 6, 0]]);
|
| a[1, 0..$-1] += b[1];
| assert(a[1] == [7, 10, 0]);
|
| a[1, 0..$-1][] += b[0];
| assert(a[1] == [8, 12, 0]);
| }
|
| static if (doUnittest)
| /// Left slice is packed
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : blocks, iota;
| auto a = slice!size_t(4, 4);
| a.blocks(2, 2)[] += iota(2, 2);
|
| assert(a ==
| [[0, 0, 1, 1],
| [0, 0, 1, 1],
| [2, 2, 3, 3],
| [2, 2, 3, 3]]);
| }
|
| static if (doUnittest)
| /// Both slices are packed
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : blocks, iota, pack;
| auto a = slice!size_t(4, 4);
| a.blocks(2, 2)[] += iota(2, 2, 2).pack!1;
|
| assert(a ==
| [[0, 1, 2, 3],
| [0, 1, 2, 3],
| [4, 5, 6, 7],
| [4, 5, 6, 7]]);
| }
|
| /++
| Op Assignment `op=` of a regular multidimensional array to a $(B fully defined slice).
| +/
| void opIndexOpAssign(string op, T, Slices...)(T[] value, Slices slices) scope return
| if (isFullPureSlice!Slices
| && (!isDynamicArray!DeepElement || isDynamicArray!T)
| && DynamicArrayDimensionsCount!(T[]) - DynamicArrayDimensionsCount!DeepElement <= typeof(this.opIndex(slices)).N)
| {
| auto sl = this.lightScope.opIndex(slices);
| sl.opIndexOpAssignImplArray!op(value);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation : slice;
| auto a = slice!int(2, 3);
|
| a[0..$, 0..$-1] += [[1, 2], [3, 4]];
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| a[0..$, 0..$-1] += [1, 2];
| assert(a == [[2, 4, 0], [4, 6, 0]]);
|
| a[1, 0..$-1] += [3, 4];
| assert(a[1] == [7, 10, 0]);
|
| a[1, 0..$-1][] += [1, 2];
| assert(a[1] == [8, 12, 0]);
| }
|
| static if (doUnittest)
| /// Packed slices
| @safe pure nothrow
| version(mir_test) unittest
| {
| import mir.ndslice.allocation : slice;
| import mir.ndslice.topology : blocks;
| auto a = slice!int(4, 4);
| a.blocks(2, 2)[] += [[0, 1], [2, 3]];
|
| assert(a ==
| [[0, 0, 1, 1],
| [0, 0, 1, 1],
| [2, 2, 3, 3],
| [2, 2, 3, 3]]);
| }
|
| private void opIndexOpAssignImplValue(string op, T)(T value) scope return
| {
| static if (N > 1 && kind == Contiguous)
| {
| import mir.ndslice.topology : flattened;
| this.flattened.opIndexOpAssignImplValue!op(value);
| }
| else
| {
| auto ls = this;
| do
| {
| static if (N == 1)
| {
| static if (isInstanceOf!(SliceIterator, Iterator))
| ls.front.opIndexOpAssignImplValue!op(value);
| else
| mixin (`ls.front ` ~ op ~ `= value;`);
| }
| else
| ls.front.opIndexOpAssignImplValue!op(value);
| ls.popFront;
| }
| while(ls._lengths[0]);
| }
| }
|
| /++
| Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined slice).
| +/
| void opIndexOpAssign(string op, T, Slices...)(T value, Slices slices) scope return
| if ((isFullPureSlice!Slices || isIndexedSlice!Slices)
| && (!isDynamicArray!T || isDynamicArray!DeepElement)
| && DynamicArrayDimensionsCount!T == DynamicArrayDimensionsCount!DeepElement
| && !isSlice!T
| && !isConcatenation!T)
| {
| auto sl = this.lightScope.opIndex(slices);
| if(!sl.anyRUEmpty)
| sl.opIndexOpAssignImplValue!op(value);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| a[] += 1;
| assert(a == [[1, 1, 1], [1, 1, 1]]);
|
| a[0..$, 0..$-1] += 2;
| assert(a == [[3, 3, 1], [3, 3, 1]]);
|
| a[1, 0..$-1] += 3;
| assert(a[1] == [6, 6, 1]);
| }
|
| ///
| void opIndexOpAssign(string op,T, Slices...)(T concatenation, Slices slices) scope return
| if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T)
| {
| auto sl = this.lightScope.opIndex(slices);
| static assert(typeof(sl).N == concatenation.N);
| sl.opIndexOpAssignImplConcatenation!op(concatenation);
| }
|
| static if (doUnittest)
| /// Packed slices have the same behavior.
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| import mir.ndslice.topology : pack;
| auto a = slice!int(2, 3).pack!1;
|
| a[] += 9;
| assert(a == [[9, 9, 9], [9, 9, 9]]);
| }
|
|
| /++
| Increment `++` and Decrement `--` operators for a $(B fully defined index).
| +/
| auto ref opIndexUnary(string op)(size_t[N] _indices...) scope return
| @trusted
| // @@@workaround@@@ for Issue 16473
| //if (op == `++` || op == `--`)
| {
| // check op safety
| static auto ref fun(DeepElement t) @safe
| {
| return mixin(op ~ `t`);
| }
| return mixin (op ~ `_iterator[indexStride(_indices)]`);
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| ++a[1, 2];
| assert(a[1, 2] == 1);
| }
|
| // Issue 16473
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto sl = slice!double(2, 5);
| auto d = -sl[0, 1];
| }
|
| static if (doUnittest)
| @safe pure nothrow version(mir_test) unittest
| {
| auto a = new int[6].sliced(2, 3);
|
| ++a[[1, 2]];
| assert(a[[1, 2]] == 1);
| }
|
| private void opIndexUnaryImpl(string op, Slices...)(Slices slices) scope
| {
| auto ls = this;
| do
| {
| static if (N == 1)
| {
| static if (isInstanceOf!(SliceIterator, Iterator))
| ls.front.opIndexUnaryImpl!op;
| else
| mixin (op ~ `ls.front;`);
| }
| else
| ls.front.opIndexUnaryImpl!op;
| ls.popFront;
| }
| while(ls._lengths[0]);
| }
|
| /++
| Increment `++` and Decrement `--` operators for a $(B fully defined slice).
| +/
| void opIndexUnary(string op, Slices...)(Slices slices) scope return
| if (isFullPureSlice!Slices && (op == `++` || op == `--`))
| {
| auto sl = this.lightScope.opIndex(slices);
| if (!sl.anyRUEmpty)
| sl.opIndexUnaryImpl!op;
| }
|
| static if (doUnittest)
| ///
| @safe pure nothrow
| version(mir_test) unittest
| {
| import mir.ndslice.allocation;
| auto a = slice!int(2, 3);
|
| ++a[];
| assert(a == [[1, 1, 1], [1, 1, 1]]);
|
| --a[1, 0..$-1];
|
| assert(a[1] == [0, 0, 1]);
| }
| }
|}
|
|/// ditto
|alias Slice = mir_slice;
|
|/++
|Slicing, indexing, and arithmetic operations.
|+/
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.dynamic : transposed;
| import mir.ndslice.topology : iota, universal;
| auto tensor = iota(3, 4, 5).slice;
|
| assert(tensor[1, 2] == tensor[1][2]);
| assert(tensor[1, 2, 3] == tensor[1][2][3]);
|
| assert( tensor[0..$, 0..$, 4] == tensor.universal.transposed!2[4]);
| assert(&tensor[0..$, 0..$, 4][1, 2] is &tensor[1, 2, 4]);
|
| tensor[1, 2, 3]++; //`opIndex` returns value by reference.
| --tensor[1, 2, 3]; //`opUnary`
|
| ++tensor[];
| tensor[] -= 1;
|
| // `opIndexAssing` accepts only fully defined indices and slices.
| // Use an additional empty slice `[]`.
| static assert(!__traits(compiles, tensor[0 .. 2] *= 2));
|
| tensor[0 .. 2][] *= 2; //OK, empty slice
| tensor[0 .. 2, 3, 0..$] /= 2; //OK, 3 index or slice positions are defined.
|
| //fully defined index may be replaced by a static array
| size_t[3] index = [1, 2, 3];
| assert(tensor[index] == tensor[1, 2, 3]);
|}
|
|/++
|Operations with rvalue slices.
|+/
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.topology: universal;
| import mir.ndslice.dynamic: transposed, everted;
|
| auto tensor = slice!int(3, 4, 5).universal;
| auto matrix = slice!int(3, 4).universal;
| auto vector = slice!int(3);
|
| foreach (i; 0..3)
| vector[i] = i;
|
| // fills matrix columns
| matrix.transposed[] = vector;
|
| // fills tensor with vector
| // transposed tensor shape is (4, 5, 3)
| // vector shape is ( 3)
| tensor.transposed!(1, 2)[] = vector;
|
| // transposed tensor shape is (5, 3, 4)
| // matrix shape is ( 3, 4)
| tensor.transposed!2[] += matrix;
|
| // transposed tensor shape is (5, 4, 3)
| // transposed matrix shape is ( 4, 3)
| tensor.everted[] ^= matrix.transposed; // XOR
|}
|
|/++
|Creating a slice from text.
|See also $(MREF std, format).
|+/
|version(mir_test) unittest
|{
| import mir.algorithm.iteration: filter, all;
| import mir.array.allocation;
| import mir.exception;
| import mir.functional: not;
| import mir.ndslice.allocation;
| import mir.parse;
| import mir.primitives: empty;
|
| import std.algorithm: map;
| import std.string: lineSplitter, split;
|
| // std.functional, std.string, std.range;
|
| Slice!(int*, 2) toMatrix(string str)
| {
| string[][] data = str.lineSplitter.filter!(not!empty).map!split.array;
|
| size_t rows = data .length.enforce!"empty input";
| size_t columns = data[0].length.enforce!"empty first row";
|
| data.all!(a => a.length == columns).enforce!"rows have different lengths";
| auto slice = slice!int(rows, columns);
| foreach (i, line; data)
| foreach (j, num; line)
| slice[i, j] = num.fromString!int;
| return slice;
| }
|
| auto input = "\r1 2 3\r\n 4 5 6\n";
|
| auto matrix = toMatrix(input);
| assert(matrix == [[1, 2, 3], [4, 5, 6]]);
|
| // back to text
| import std.format;
| auto text2 = format("%(%(%s %)\n%)\n", matrix);
| assert(text2 == "1 2 3\n4 5 6\n");
|}
|
|// Slicing
|@safe @nogc pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
| auto a = iota(10, 20, 30, 40);
| auto b = a[0..$, 10, 4 .. 27, 4];
| auto c = b[2 .. 9, 5 .. 10];
| auto d = b[3..$, $-2];
| assert(b[4, 17] == a[4, 10, 21, 4]);
| assert(c[1, 2] == a[3, 10, 11, 4]);
| assert(d[3] == a[6, 10, 25, 4]);
|}
|
|// Operator overloading. # 1
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.topology : iota;
|
| auto fun(ref sizediff_t x) { x *= 3; }
|
| auto tensor = iota(8, 9, 10).slice;
|
| ++tensor[];
| fun(tensor[0, 0, 0]);
|
| assert(tensor[0, 0, 0] == 3);
|
| tensor[0, 0, 0] *= 4;
| tensor[0, 0, 0]--;
| assert(tensor[0, 0, 0] == 11);
|}
|
|// Operator overloading. # 2
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.topology: map, iota;
| import mir.array.allocation : array;
| //import std.bigint;
|
| auto matrix = 72
| .iota
| //.map!(i => BigInt(i))
| .array
| .sliced(8, 9);
|
| matrix[3 .. 6, 2] += 100;
| foreach (i; 0 .. 8)
| foreach (j; 0 .. 9)
| if (i >= 3 && i < 6 && j == 2)
| assert(matrix[i, j] >= 100);
| else
| assert(matrix[i, j] < 100);
|}
|
|// Operator overloading. # 3
|pure nothrow version(mir_test) unittest
|{
| import mir.ndslice.allocation;
| import mir.ndslice.topology : iota;
|
| auto matrix = iota(8, 9).slice;
| matrix[] = matrix;
| matrix[] += matrix;
| assert(matrix[2, 3] == (2 * 9 + 3) * 2);
|
| auto vec = iota([9], 100);
| matrix[] = vec;
| foreach (v; matrix)
| assert(v == vec);
|
| matrix[] += vec;
| foreach (vector; matrix)
| foreach (elem; vector)
| assert(elem >= 200);
|}
|
|// Type deduction
|version(mir_test) unittest
|{
| // Arrays
| foreach (T; AliasSeq!(int, const int, immutable int))
| static assert(is(typeof((T[]).init.sliced(3, 4)) == Slice!(T*, 2)));
|
| // Container Array
| import std.container.array;
| Array!int ar;
| ar.length = 12;
| auto arSl = ar[].slicedField(3, 4);
|}
|
|// Test for map #1
|version(mir_test) unittest
|{
| import mir.ndslice.topology: map, byDim;
| auto slice = [1, 2, 3, 4].sliced(2, 2);
|
| auto r = slice.byDim!0.map!(a => a.map!(a => a * 6));
| assert(r.front.front == 6);
| assert(r.front.back == 12);
| assert(r.back.front == 18);
| assert(r.back.back == 24);
| assert(r[0][0] == 6);
| assert(r[0][1] == 12);
| assert(r[1][0] == 18);
| assert(r[1][1] == 24);
|
| import std.range.primitives;
| static assert(hasSlicing!(typeof(r)));
| static assert(isForwardRange!(typeof(r)));
| static assert(isRandomAccessRange!(typeof(r)));
|}
|
|// Test for map #2
|version(mir_test) unittest
|{
| import mir.ndslice.topology: map, byDim;
| import std.range.primitives;
| auto data = [1, 2, 3, 4];
| static assert(hasSlicing!(typeof(data)));
| static assert(isForwardRange!(typeof(data)));
| static assert(isRandomAccessRange!(typeof(data)));
| auto slice = data.sliced(2, 2);
| static assert(hasSlicing!(typeof(slice)));
| static assert(isForwardRange!(typeof(slice)));
| static assert(isRandomAccessRange!(typeof(slice)));
| auto r = slice.byDim!0.map!(a => a.map!(a => a * 6));
| static assert(hasSlicing!(typeof(r)));
| static assert(isForwardRange!(typeof(r)));
| static assert(isRandomAccessRange!(typeof(r)));
| assert(r.front.front == 6);
| assert(r.front.back == 12);
| assert(r.back.front == 18);
| assert(r.back.back == 24);
| assert(r[0][0] == 6);
| assert(r[0][1] == 12);
| assert(r[1][0] == 18);
| assert(r[1][1] == 24);
|}
|
|private enum bool isType(alias T) = false;
|
|private enum bool isType(T) = true;
|
|private enum isStringValue(alias T) = is(typeof(T) : string);
|
|
|private bool _checkAssignLengths(
| LIterator, RIterator,
| size_t LN, size_t RN,
| SliceKind lkind, SliceKind rkind,
| )
| (Slice!(LIterator, LN, lkind) ls,
| Slice!(RIterator, RN, rkind) rs)
|{
| static if (isInstanceOf!(SliceIterator, LIterator))
| {
| import mir.ndslice.topology: unpack;
| return _checkAssignLengths(ls.unpack, rs);
| }
| else
| static if (isInstanceOf!(SliceIterator, RIterator))
| {
| import mir.ndslice.topology: unpack;
| return _checkAssignLengths(ls, rs.unpack);
| }
| else
| {
| foreach (i; Iota!(0, RN))
| if (ls._lengths[i + LN - RN] != rs._lengths[i])
| return false;
| return true;
| }
|}
|
|@safe pure nothrow @nogc version(mir_test) unittest
|{
| import mir.ndslice.topology : iota;
|
| assert(_checkAssignLengths(iota(2, 2), iota(2, 2)));
| assert(!_checkAssignLengths(iota(2, 2), iota(2, 3)));
| assert(!_checkAssignLengths(iota(2, 2), iota(3, 2)));
| assert(!_checkAssignLengths(iota(2, 2), iota(3, 3)));
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto slice = new int[15].slicedField(5, 3);
|
| /// Fully defined slice
| assert(slice[] == slice);
| auto sublice = slice[0..$-2, 1..$];
|
| /// Partially defined slice
| auto row = slice[3];
| auto col = slice[0..$, 1];
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
| auto b = [1, 2, 3, 4].sliced(2, 2);
|
| a[0..$, 0..$-1] = b;
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| a[0..$, 0..$-1] = b[0];
| assert(a == [[1, 2, 0], [1, 2, 0]]);
|
| a[1, 0..$-1] = b[1];
| assert(a[1] == [3, 4, 0]);
|
| a[1, 0..$-1][] = b[0];
| assert(a[1] == [1, 2, 0]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
| auto b = [[1, 2], [3, 4]];
|
| a[] = [[1, 2, 3], [4, 5, 6]];
| assert(a == [[1, 2, 3], [4, 5, 6]]);
|
| a[0..$, 0..$-1] = [[1, 2], [3, 4]];
| assert(a == [[1, 2, 3], [3, 4, 6]]);
|
| a[0..$, 0..$-1] = [1, 2];
| assert(a == [[1, 2, 3], [1, 2, 6]]);
|
| a[1, 0..$-1] = [3, 4];
| assert(a[1] == [3, 4, 6]);
|
| a[1, 0..$-1][] = [3, 4];
| assert(a[1] == [3, 4, 6]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[] = 9;
| //assert(a == [[9, 9, 9], [9, 9, 9]]);
|
| a[0..$, 0..$-1] = 1;
| //assert(a == [[1, 1, 9], [1, 1, 9]]);
|
| a[0..$, 0..$-1] = 2;
| //assert(a == [[2, 2, 9], [2, 2, 9]]);
|
| a[1, 0..$-1] = 3;
| //assert(a[1] == [3, 3, 9]);
|
| a[1, 0..$-1] = 4;
| //assert(a[1] == [4, 4, 9]);
|
| a[1, 0..$-1][] = 5;
| //assert(a[1] == [5, 5, 9]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[1, 2] = 3;
| assert(a[1, 2] == 3);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[[1, 2]] = 3;
| assert(a[[1, 2]] == 3);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[1, 2] += 3;
| assert(a[1, 2] == 3);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[[1, 2]] += 3;
| assert(a[[1, 2]] == 3);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
| auto b = [1, 2, 3, 4].sliced(2, 2);
|
| a[0..$, 0..$-1] += b;
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| a[0..$, 0..$-1] += b[0];
| assert(a == [[2, 4, 0], [4, 6, 0]]);
|
| a[1, 0..$-1] += b[1];
| assert(a[1] == [7, 10, 0]);
|
| a[1, 0..$-1][] += b[0];
| assert(a[1] == [8, 12, 0]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[0..$, 0..$-1] += [[1, 2], [3, 4]];
| assert(a == [[1, 2, 0], [3, 4, 0]]);
|
| a[0..$, 0..$-1] += [1, 2];
| assert(a == [[2, 4, 0], [4, 6, 0]]);
|
| a[1, 0..$-1] += [3, 4];
| assert(a[1] == [7, 10, 0]);
|
| a[1, 0..$-1][] += [1, 2];
| assert(a[1] == [8, 12, 0]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| a[] += 1;
| assert(a == [[1, 1, 1], [1, 1, 1]]);
|
| a[0..$, 0..$-1] += 2;
| assert(a == [[3, 3, 1], [3, 3, 1]]);
|
| a[1, 0..$-1] += 3;
| assert(a[1] == [6, 6, 1]);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| ++a[1, 2];
| assert(a[1, 2] == 1);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| ++a[[1, 2]];
| assert(a[[1, 2]] == 1);
|}
|
|pure nothrow version(mir_test) unittest
|{
| auto a = new int[6].slicedField(2, 3);
|
| ++a[];
| assert(a == [[1, 1, 1], [1, 1, 1]]);
|
| --a[1, 0..$-1];
| assert(a[1] == [0, 0, 1]);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota, universal;
|
| auto sl = iota(3, 4).universal;
| assert(sl[0 .. $] == sl);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology: canonical, iota;
| static assert(kindOf!(typeof(iota([1, 2]).canonical[1])) == Contiguous);
|}
|
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota;
| auto s = iota(2, 3);
| assert(s.front!1 == [0, 3]);
| assert(s.back!1 == [2, 5]);
|}
|
|/++
|Assignment utility for generic code that works both with scalars and with ndslices.
|Params:
| op = assign operation (generic, optional)
| lside = left side
| rside = right side
|Returns:
| expression value
|+/
|auto ndassign(string op = "", L, R)(ref L lside, auto ref R rside) @property
| if (!isSlice!L && (op.length == 0 || op[$-1] != '='))
|{
| return mixin(`lside ` ~ op ~ `= rside`);
|}
|
|/// ditto
|auto ndassign(string op = "", L, R)(L lside, auto ref R rside) @property
| if (isSlice!L && (op.length == 0 || op[$-1] != '='))
|{
| static if (op == "")
| return lside.opIndexAssign(rside);
| else
| return lside.opIndexOpAssign!op(rside);
|}
|
|///
|version(mir_test) unittest
|{
| import mir.ndslice.topology: iota;
| import mir.ndslice.allocation: slice;
| auto scalar = 3;
| auto vector = 3.iota.slice; // [0, 1, 2]
|
| // scalar = 5;
| scalar.ndassign = 5;
| assert(scalar == 5);
|
| // vector[] = vector * 2;
| vector.ndassign = vector * 2;
| assert(vector == [0, 2, 4]);
|
| // vector[] += scalar;
| vector.ndassign!"+"= scalar;
| assert(vector == [5, 7, 9]);
|}
|
|version(mir_test) pure nothrow unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: universal;
|
| auto df = slice!(double, int, int)(2, 3).universal;
| df.label[] = [1, 2];
| df.label!1[] = [1, 2, 3];
| auto lsdf = df.lightScope;
| assert(lsdf.label!0[0] == 1);
| assert(lsdf.label!1[1] == 2);
|
| auto immdf = (cast(immutable)df).lightImmutable;
| assert(immdf.label!0[0] == 1);
| assert(immdf.label!1[1] == 2);
|
| auto constdf = df.lightConst;
| assert(constdf.label!0[0] == 1);
| assert(constdf.label!1[1] == 2);
|
| auto constdf2 = df.toConst;
| assert(constdf2.label!0[0] == 1);
| assert(constdf2.label!1[1] == 2);
|
| auto immdf2 = (cast(immutable)df).toImmutable;
| assert(immdf2.label!0[0] == 1);
| assert(immdf2.label!1[1] == 2);
|}
|
|version(mir_test) pure nothrow unittest
|{
| import mir.ndslice.allocation: slice;
| import mir.ndslice.topology: universal;
|
| auto df = slice!(double, int, int)(2, 3).universal;
| df[] = 5;
|
| Slice!(double*, 2, Universal) values = df.values;
| assert(values[0][0] == 5);
| Slice!(LightConstOf!(double*), 2, Universal) constvalues = df.values;
| assert(constvalues[0][0] == 5);
| Slice!(LightImmutableOf!(double*), 2, Universal) immvalues = (cast(immutable)df).values;
| assert(immvalues[0][0] == 5);
|}
|
|version(mir_test) @safe unittest
|{
| import mir.ndslice.allocation;
| auto a = rcslice!double([2, 3], 0);
| auto b = rcslice!double([2, 3], 0);
| a[1, 2] = 3;
| b[] = a;
| assert(a == b);
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
|
| auto m = iota(2, 3, 4); // Contiguous Matrix
| auto mFlat = m.flattened;
|
| for (size_t i = 0; i < m.elementCount; i++) {
| assert(m.accessFlat(i) == mFlat[i]);
| }
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
|
| auto m = iota(3, 4); // Contiguous Matrix
| auto x = m.front; // Contiguous Vector
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == m[0, i]);
| }
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
|
| auto m = iota(3, 4); // Contiguous Matrix
| auto x = m[0 .. $, 0 .. $ - 1]; // Canonical Matrix
| auto xFlat = x.flattened;
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == xFlat[i]);
| }
|}
|
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
|
| auto m = iota(2, 3, 4); // Contiguous Matrix
| auto x = m[0 .. $, 0 .. $, 0 .. $ - 1]; // Canonical Matrix
| auto xFlat = x.flattened;
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == xFlat[i]);
| }
|}
|
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
| import mir.ndslice.dynamic: transposed;
|
| auto m = iota(2, 3, 4); // Contiguous Matrix
| auto x = m.transposed!(2, 1, 0); // Universal Matrix
| auto xFlat = x.flattened;
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == xFlat[i]);
| }
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
| import mir.ndslice.dynamic: transposed;
|
| auto m = iota(3, 4); // Contiguous Matrix
| auto x = m.transposed; // Universal Matrix
| auto xFlat = x.flattened;
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == xFlat[i]);
| }
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened, diagonal;
|
| auto m = iota(3, 4); // Contiguous Matrix
| auto x = m.diagonal; // Universal Vector
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == m[i, i]);
| }
|}
|
|version(mir_test)
|@safe pure @nogc nothrow
|unittest
|{
| import mir.ndslice.topology: iota, flattened;
|
| auto m = iota(3, 4); // Contiguous Matrix
| auto x = m.front!1; // Universal Vector
|
| for (size_t i = 0; i < x.elementCount; i++) {
| assert(x.accessFlat(i) == m[i, 0]);
| }
|}
../../../.dub/packages/mir-algorithm-3.10.12/mir-algorithm/source/mir/ndslice/slice.d is 0% covered
<<<<<< EOF
# path=./..-..-..-.dub-packages-mir-core-1.1.51-mir-core-source-mir-primitives.lst
|/++
|Templates used to check primitives and
|range primitives for arrays with multi-dimensional like API support.
|
|Note:
|UTF strings behaves like common arrays in Mir.
|`std.uni.byCodePoint` can be used to create a range of characters.
|
|License: $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache-2.0)
|Authors: Ilya Yaroshenko
|+/
|module mir.primitives;
|
|import mir.internal.utility;
|import mir.math.common: optmath;
|import std.traits;
|
|@optmath:
|
|/++
|Returns: `true` if `R` has a `length` member that returns an
|integral type implicitly convertible to `size_t`.
|
|`R` does not have to be a range.
|+/
|enum bool hasLength(R) = is(typeof(
|(const R r, inout int = 0)
|{
| size_t l = r.length;
|}));
|
|///
|@safe version(mir_core_test) unittest
|{
| static assert(hasLength!(char[]));
| static assert(hasLength!(int[]));
| static assert(hasLength!(inout(int)[]));
|
| struct B { size_t length() const { return 0; } }
| struct C { @property size_t length() const { return 0; } }
| static assert(hasLength!(B));
| static assert(hasLength!(C));
|}
|
|/++
|Returns: `true` if `R` has a `shape` member that returns an static array type of size_t[N].
|+/
|enum bool hasShape(R) = is(typeof(
|(const R r, inout int = 0)
|{
| auto l = r.shape;
| alias F = typeof(l);
| import std.traits;
| static assert(isStaticArray!F);
| static assert(is(ForeachType!F == size_t));
|}));
|
|///
|@safe version(mir_core_test) unittest
|{
| static assert(hasShape!(char[]));
| static assert(hasShape!(int[]));
| static assert(hasShape!(inout(int)[]));
|
| struct B { size_t length() const { return 0; } }
| struct C { @property size_t length() const { return 0; } }
| static assert(hasShape!(B));
| static assert(hasShape!(C));
|}
|
|///
|auto shape(Range)(scope const auto ref Range range) @property
| if (hasLength!Range || hasShape!Range)
|{
| static if (__traits(hasMember, Range, "shape"))
| {
| return range.shape;
| }
| else
| {
| size_t[1] ret;
| ret[0] = range.length;
| return ret;
| }
|}
|
|///
|version(mir_core_test) unittest
|{
| static assert([2, 2, 2].shape == [3]);
|}
|
|///
|template DimensionCount(T)
|{
| import mir.ndslice.slice: Slice, SliceKind;
| /// Extracts dimension count from a $(LREF Slice). Alias for $(LREF isSlice).
| static if(is(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind))
| enum size_t DimensionCount = N;
| else
| static if (hasShape!T)
| enum size_t DimensionCount = typeof(T.init.shape).length;
| else
| enum size_t DimensionCount = 1;
|}
|
|package(mir) bool anyEmptyShape(size_t N)(scope const auto ref size_t[N] shape) @property
|{
| foreach (i; Iota!N)
| if (shape[i] == 0)
| return true;
| return false;
|}
|
|///
|bool anyEmpty(Range)(scope const auto ref Range range) @property
| if (hasShape!Range || __traits(hasMember, Range, "anyEmpty"))
|{
| static if (__traits(hasMember, Range, "anyEmpty"))
| {
| return range.anyEmpty;
| }
| else
| static if (__traits(hasMember, Range, "shape"))
| {
| return anyEmptyShape(range.shape);
| }
| else
| {
| return range.empty;
| }
|}
|
|///
|size_t elementCount(Range)(scope const auto ref Range range) @property
| if (hasShape!Range || __traits(hasMember, Range, "elementCount"))
|{
| static if (__traits(hasMember, Range, "elementCount"))
| {
| return range.elementCount;
| }
| else
| {
| auto sh = range.shape;
| size_t ret = sh[0];
| foreach(i; Iota!(1, sh.length))
| {
| ret *= sh[i];
| }
| return ret;
| }
|}
|
|deprecated("use elementCount instead")
|alias elementsCount = elementCount;
|
|
|/++
|Returns the element type of a struct with `.DeepElement` inner alias or a type of common array.
|Returns `ForeachType` if struct does not have `.DeepElement` member.
|+/
|template DeepElementType(S)
| if (is(S == struct) || is(S == class) || is(S == interface))
|{
| static if (__traits(hasMember, S, "DeepElement"))
| alias DeepElementType = S.DeepElement;
| else
| alias DeepElementType = ForeachType!S;
|}
|
|/// ditto
|alias DeepElementType(S : T[], T) = T;
|
|/+ ARRAY PRIMITIVES +/
|pragma(inline, true):
|
|///
|bool empty(size_t dim = 0, T)(scope const T[] ar)
| if (!dim)
|{
| return !ar.length;
|}
|
|///
|version(mir_core_test) unittest
|{
| assert((int[]).init.empty);
| assert(![1].empty!0); // Slice-like API
|}
|
|///
|ref inout(T) front(size_t dim = 0, T)(scope return inout(T)[] ar)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length, "Accessing front of an empty array.");
| return ar[0];
|}
|
|///
|version(mir_core_test) unittest
|{
| assert(*&[3, 4].front == 3); // access be ref
| assert([3, 4].front!0 == 3); // Slice-like API
|}
|
|
|///
|ref inout(T) back(size_t dim = 0, T)(scope return inout(T)[] ar)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length, "Accessing back of an empty array.");
| return ar[$ - 1];
|}
|
|///
|version(mir_core_test) unittest
|{
| assert(*&[3, 4].back == 4); // access be ref
| assert([3, 4].back!0 == 4); // Slice-like API
|}
|
|///
|void popFront(size_t dim = 0, T)(scope ref inout(T)[] ar)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length, "Evaluating popFront() on an empty array.");
| ar = ar[1 .. $];
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4];
| ar.popFront;
| assert(ar == [4]);
| ar.popFront!0; // Slice-like API
| assert(ar == []);
|}
|
|///
|void popBack(size_t dim = 0, T)(scope ref inout(T)[] ar)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length, "Evaluating popBack() on an empty array.");
| ar = ar[0 .. $ - 1];
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4];
| ar.popBack;
| assert(ar == [3]);
| ar.popBack!0; // Slice-like API
| assert(ar == []);
|}
|
|///
|size_t popFrontN(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| n = ar.length < n ? ar.length : n;
| ar = ar[n .. $];
| return n;
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4];
| ar.popFrontN(1);
| assert(ar == [4]);
| ar.popFrontN!0(10); // Slice-like API
| assert(ar == []);
|}
|
|///
|size_t popBackN(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| n = ar.length < n ? ar.length : n;
| ar = ar[0 .. $ - n];
| return n;
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4];
| ar.popBackN(1);
| assert(ar == [3]);
| ar.popBackN!0(10); // Slice-like API
| assert(ar == []);
|}
|
|///
|void popFrontExactly(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length >= n, "Evaluating *.popFrontExactly(n) on an array with length less then n.");
| ar = ar[n .. $];
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4, 5];
| ar.popFrontExactly(2);
| assert(ar == [5]);
| ar.popFrontExactly!0(1); // Slice-like API
| assert(ar == []);
|}
|
|///
|void popBackExactly(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n)
| if (!dim && !is(Unqual!T[] == void[]))
|{
| assert(ar.length >= n, "Evaluating *.popBackExactly(n) on an array with length less then n.");
| ar = ar[0 .. $ - n];
|}
|
|///
|version(mir_core_test) unittest
|{
| auto ar = [3, 4, 5];
| ar.popBackExactly(2);
| assert(ar == [3]);
| ar.popBackExactly!0(1); // Slice-like API
| assert(ar == []);
|}
|
|///
|size_t length(size_t d : 0, T)(in T[] array)
| if (d == 0)
|{
| return array.length;
|}
|
|///
|version(mir_core_test) unittest
|{
| assert([1, 2].length!0 == 2);
| assert([1, 2].elementCount == 2);
|}
|
|///
|inout(T)[] save(T)(scope return inout(T)[] array)
|{
| return array;
|}
|
|///
|version(mir_core_test) unittest
|{
| auto a = [1, 2];
| assert(a is a.save);
|}
../../../.dub/packages/mir-core-1.1.51/mir-core/source/mir/primitives.d has no code
<<<<<< EOF
# path=./source-mir-sparse-blas-gemv.lst
|/++
|License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0).
|Copyright: Copyright © 2016-, Ilya Yaroshenko
|Authors: Ilya Yaroshenko
|+/
|module mir.sparse.blas.gemv;
|
|
|import std.traits;
|import mir.ndslice.slice;
|import mir.ndslice.iterator;
|import mir.internal.utility;
|import mir.sparse;
|import mir.series;
|
|/++
|General matrix-vector multiplication.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| x = dense vector
| beta = scalar
| y = dense vector
|Returns:
| `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise.
|+/
|void gemv(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 1, kind2) x,
| in CL beta,
| Slice!(Iterator3, 1, kind3) y)
|in
|{
6| assert(a.length == y.length);
|}
|body
|{
6| if (beta)
| {
22| foreach (ref e; y)
| {
| import mir.sparse.blas.dot;
6| e = alpha * dot(a.front, x) + beta * e;
6| a.popFront;
| }
| }
| else
| {
44| foreach (ref e; y)
| {
| import mir.sparse.blas.dot;
12| e = alpha * dot(a.front, x);
12| a.popFront;
| }
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto slice = sparse!double(3, 5);
1| slice[] =
| [[ 0.0, 2.0, 3.0, 0.0, 0.0],
| [ 6.0, 0.0, 30.0, 8.0, 0.0],
| [ 6.0, 0.0, 30.0, 8.0, 0.0]];
1| auto alpha = 3.0;
1| auto a = slice.compress;
1| auto x = [ 17.0, 19, 31, 3, 5].sliced;
1| auto beta = 2.0;
1| auto y = [1.0, 2, 3].sliced;
1| auto t = [131.0, 1056.0, 1056.0].sliced;
1| t[] *= alpha;
| import mir.glas.l1: axpy;
1| axpy(beta, y, t);
1| gemv(alpha, a, x, beta, y);
1| assert(t == y);
|}
|
|/++
|General matrix-vector multiplication with transposition.
|
|Params:
| alpha = scalar
| a = sparse matrix (CSR format)
| x = dense vector
| beta = scalar
| y = dense vector
|Returns:
| `y = alpha * aᵀ × x + beta * y` if beta does not equal null and `y = alpha * aᵀ × x` otherwise.
|+/
|void gemtv(
| CR,
| CL,
| SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3)
|(
| in CR alpha,
| Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a,
| Slice!(Iterator2, 1, kind2) x,
| in CL beta,
| Slice!(Iterator3, 1, kind3) y)
|in
|{
5| assert(a.length == x.length);
|}
|body
|{
| alias T3 = Unqual!(DeepElementType!(Slice!(Iterator3, 1, kind3)));
|
5| if (beta == 0)
| {
4| y[] = 0;
| }
5| if (beta == 1)
| {
| }
| else
| {
5| y[] *= T3(beta);
| }
85| foreach (ref t; x)
| {
| import mir.sparse.blas.axpy;
25| axpy(alpha * t, a.front, y);
25| a.popFront;
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto slice = sparse!double(5, 3);
1| slice[] =
| [[0.0, 6.0, 6.0],
| [2.0, 0.0, 0.0],
| [3.0, 30.0, 30.0],
| [0.0, 8.0, 8.0],
| [0.0, 0.0, 0.0]];
1| auto alpha = 3.0;
1| auto a = slice.compress;
1| auto x = [ 17.0, 19, 31, 3, 5].sliced;
1| auto beta = 2.0;
1| auto y = [1.0, 2, 3].sliced;
1| auto t = [131.0, 1056.0, 1056.0].sliced;
1| t[] *= alpha;
| import mir.glas.l1: axpy;
1| axpy(beta, y, t);
1| gemtv(alpha, a, x, beta, y);
1| assert(t == y);
|}
|
|/++
|General matrix-vector multiplication for sparse vectors.
|
|Params:
| alpha = scalar
| a = dense matrix
| x = sparse vector
| beta = scalar
| y = dense vector
|Returns:
| `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise.
|+/
|void gemv(
| CR,
| CL,
| SliceKind kind1, Iterator1,
| T2, I2,
| SliceKind kind3, Iterator3,
| )
|(in CR alpha, Slice!(Iterator1, 2, kind1) a, Series!(I2*, T2*) x, in CL beta, Slice!(Iterator3, 1, kind3) y)
|in
|{
| assert(a.length == y.length);
|}
|body
|{
| if (beta)
| {
| foreach (ref e; y)
| {
| import mir.sparse.blas.dot;
| e = alpha * dot(x, a.front) + beta * e;
| a.popFront;
| }
| }
| else
| {
| foreach (ref e; y)
| {
| import mir.sparse.blas.dot;
| e = alpha * dot(x, a.front);
| a.popFront;
| }
| }
|}
|
|///
|unittest
|{
| import mir.ndslice;
| import mir.sparse;
|
1| auto slice = sparse!double(3, 5);
1| slice[] =
| [[ 0.0, 2.0, 3.0, 0.0, 0.0],
| [ 6.0, 0.0, 30.0, 8.0, 0.0],
| [ 6.0, 0.0, 30.0, 8.0, 0.0]];
1| auto alpha = 3.0;
1| auto a = slice.compress;
1| auto x = [ 17.0, 19, 31, 3, 5].sliced;
1| auto beta = 2.0;
1| auto y = [1.0, 2, 3].sliced;
1| auto t = [131.0, 1056.0, 1056.0].sliced;
1| t[] *= alpha;
| import mir.glas.l1: axpy;
1| axpy(beta, y, t);
1| gemv(alpha, a, x, beta, y);
1| assert(t == y);
|}
|
|/++
|Selective general matrix-vector multiplication with a selector sparse vector.
|
|Params:
| a = dense matrix
| x = dense vector
| y = sparse vector (compressed)
|Returns:
| `y[available indexes] = (alpha * a × x)[available indexes]`.
|+/
|void selectiveGemv(string op = "", SliceKind kind1, SliceKind kind2, T, T3, I3)
|(Slice!(T*, 2, kind1) a, Slice!(T*, 1, kind2) x, Series!(I3*, T3*) y)
|in
|{
3| assert(a.length!1 == x.length);
3| if (y.index.length)
2| assert(y.index[$-1] < a.length);
|}
|body
|{
| import mir.ndslice.dynamic: transposed;
|
21| foreach (i, j; y.index.field)
| {
| import mir.glas.l1 : dot;
3| auto d = dot(a[j], x);
| mixin(`y.value[i] ` ~ op ~ `= d;`);
| }
|}
source/mir/sparse/blas/gemv.d is 100% covered
<<<<<< EOF