TRAVIS_OS_NAME=linux <<<<<< ENV benchmarks/ndslice/binarization.d benchmarks/ndslice/convolution.d benchmarks/ndslice/dot_product.d benchmarks/ndslice/euclidean_distance.d dub.json examples/data/stop_words examples/data/trndocs.dat examples/data/words examples/lda_hoffman_sparse.d examples/means_of_columns.d examples/median_filter.d index.d meson.build source/mir/glas/l1.d source/mir/glas/l2.d source/mir/glas/package.d source/mir/model/lda/hoffman.d source/mir/sparse/blas/axpy.d source/mir/sparse/blas/dot.d source/mir/sparse/blas/gemm.d source/mir/sparse/blas/gemv.d source/mir/sparse/blas/package.d source/mir/sparse/package.d subprojects/mir-algorithm.wrap subprojects/mir-core.wrap subprojects/mir-linux-kernel.wrap subprojects/mir-random.wrap <<<<<< network # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-fuse.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Allocation routines that construct ndslices from ndranges. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2018-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |See_also: $(SUBMODULE concatenation) submodule. | |Macros: |SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.fuse; | |import mir.internal.utility; |import mir.ndslice.slice; |import mir.primitives; |import mir.qualifier; |import std.meta; |import std.traits; | |import mir.math.common: optmath; | |@optmath: | |/++ |Fuses ndrange `r` into GC-allocated (`fuse`) or RC-allocated (`rcfuse`) ndslice. Can be used to join rows or columns into a matrix. | |Params: | Dimensions = (optional) indexes of dimensions to be brought to the first position |Returns: | ndslice |+/ |/// |alias fuse(Dimensions...) = fuseImpl!(false, Dimensions); |/// |alias rcfuse(Dimensions...) = fuseImpl!(true, Dimensions); |/// ditto |template fuseImpl(bool RC, Dimensions...) |{ | import mir.ndslice.internal: isSize_t, toSize_t; | static if (!allSatisfy!(isSize_t, Dimensions)) | alias fuseImpl = .fuseImpl!(RC, staticMap!(toSize_t, Dimensions)); | else | /++ | Params: | r = parallelotope (ndrange) with length/shape and input range primitives. | +/ | @optmath auto fuseImpl(NDRange)(NDRange r) | if (hasShape!NDRange) | { | import mir.conv: emplaceRef; | import mir.algorithm.iteration: each; | import mir.ndslice.allocation; | auto shape = fuseShape(r); | alias T = FuseElementType!NDRange; | alias UT = Unqual!T; | static if (RC) | { | import mir.rc.array: RCI; | alias R = Slice!(RCI!T, fuseDimensionCount!NDRange); | Slice!(RCI!UT, fuseDimensionCount!NDRange) ret; | } | else | { | alias R = Slice!(T*, fuseDimensionCount!NDRange); | Slice!(UT*, fuseDimensionCount!NDRange) ret; | } | static if (Dimensions.length) | { | import mir.ndslice.topology: iota; | import mir.ndslice.dynamic: transposed, completeTranspose; | enum perm = completeTranspose!(shape.length)([Dimensions]); | size_t[shape.length] shapep; | foreach(i; Iota!(shape.length)) | shapep[i] = shape[perm[i]]; | // enum iperm = perm.length.iota[completeTranspose!(shape.length)([Dimensions])[].sliced].slice; | alias InverseDimensions = aliasSeqOf!( | (size_t[] perm){ | auto ar = new size_t[perm.length]; | ar.sliced[perm.sliced] = perm.length.iota; | return ar; | }(perm) | ); | static if (RC) | { | ret = shapep.uninitRcslice!UT; | ret.lightScope.transposed!InverseDimensions.each!(emplaceRef!T)(r); | } | else | { | if (__ctfe) | { | ret = shapep.slice!UT; | ret.transposed!InverseDimensions.each!"a = b"(r); | } | else | { | ret = shapep.uninitSlice!UT; | ret.transposed!InverseDimensions.each!(emplaceRef!T)(r); | } | | } | } | else | { | static if (RC) | { | ret = shape.uninitRCslice!UT; | ret.lightScope.each!(emplaceRef!T)(r); | } | else | { | if (__ctfe) | { | ret = shape.slice!UT; | ret.each!"a = b"(r); | } | else | { | ret = shape.uninitSlice!UT; | ret.each!(emplaceRef!T)(r); | } | } | } | static if (RC) | { | import core.lifetime: move; | return move(*(() @trusted => cast(R*)&ret)()); | } | else | { | return *(() @trusted => cast(R*)&ret)(); | } | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.fuse; | import mir.ndslice.slice : Contiguous, Slice; | import mir.ndslice.topology: iota; | import mir.rc.array: RCI; | | enum ror = [ | [0, 1, 2, 3], | [4, 5, 6, 7], | [8, 9,10,11]]; | | // 0 1 2 3 | // 4 5 6 7 | // 8 9 10 11 | auto matrix = ror.fuse; | | auto rcmatrix = ror.rcfuse; // nogc version | | assert(matrix == [3, 4].iota); | assert(rcmatrix == [3, 4].iota); | static assert(ror.fuse == [3, 4].iota); // CTFE-able | | // matrix is contiguos | static assert(is(typeof(matrix) == Slice!(int*, 2))); | static assert(is(typeof(rcmatrix) == Slice!(RCI!int, 2))); |} | |/// Transposed |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.fuse; | import mir.ndslice.topology: iota; | import mir.ndslice.dynamic: transposed; | import mir.ndslice.slice : Contiguous, Slice; | | enum ror = [ | [0, 1, 2, 3], | [4, 5, 6, 7], | [8, 9,10,11]]; | | // 0 4 8 | // 1 5 9 | // 2 6 10 | // 3 7 11 | | // `!1` brings dimensions under index 1 to the front (0 index). | auto matrix = ror.fuse!1; | | assert(matrix == [3, 4].iota.transposed!1); | // TODO: CTFE | // static assert(ror.fuse!1 == [3, 4].iota.transposed!1); // CTFE-able | // matrix is contiguos | static assert(is(typeof(matrix) == Slice!(int*, 2))); |} | | |/// 3D |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.fuse; | import mir.ndslice.topology: iota; | import mir.ndslice.dynamic: transposed; | | auto ror = | [[[ 0, 1, 2, 3], | [ 4, 5, 6, 7]], | [[ 8, 9,10,11], | [12,13,14,15]]]; | | auto nd = [2, 2, 4].iota; | | assert(ror.fuse == nd); | assert(ror.fuse!2 == nd.transposed!2); | assert(ror.fuse!(1, 2) == nd.transposed!(1, 2)); | assert(ror.fuse!(2, 1) == nd.transposed!(2, 1)); |} | |/// Work with RC Arrays of RC Arrays |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.fuse; | import mir.ndslice.slice; | import mir.ndslice.topology: map; | import mir.rc.array; | | Slice!(const(double)*, 2) conv(RCArray!(const RCArray!(const double)) a) | { | return a[].map!"a[]".fuse; | } |} | |private template fuseDimensionCount(R) |{ | static if (is(typeof(R.init.shape) : size_t[N], size_t N) && (isDynamicArray!R || __traits(hasMember, R, "front"))) | { | import mir.ndslice.topology: repeat; | enum size_t fuseDimensionCount = N + fuseDimensionCount!(DeepElementType!R); | } | else | enum size_t fuseDimensionCount = 0; |} | |/+ |TODO docs |+/ |size_t[fuseDimensionCount!Range] fuseShape(Range)(Range r) | if (hasShape!Range) |{ | // auto outerShape = r.shape; | enum N = r.shape.length; | static if (N == typeof(return).length) | { | return r.shape; | } | else | { | import mir.ndslice.topology: repeat; | typeof(return) ret; | ret[0 .. N] = r.shape; | if (!ret[0 .. N].anyEmptyShape) | ret[N .. $] = fuseShape(mixin("r" ~ ".front".repeat(N).fuseCells.field)); | return ret; | } |} | |private template FuseElementType(NDRange) |{ | import mir.ndslice.topology: repeat; | alias FuseElementType = typeof(mixin("NDRange.init" ~ ".front".repeat(fuseDimensionCount!NDRange).fuseCells.field)); |} | |/++ |Fuses `cells` into GC-allocated ndslice. | |Params: | cells = ndrange of ndcells, ndrange and ndcell should have `shape` and multidimensional input range primivies (`front!d`, `empty!d`, `popFront!d`). |Returns: ndslice composed of fused cells. |See_also: $(SUBREF chunks, chunks) |+/ |auto fuseCells(S)(S cells) |{ | alias T = DeepElementType!(DeepElementType!S); | alias UT = Unqual!T; | if (__ctfe) | { | import mir.ndslice.allocation: slice; | auto ret = cells.fuseCellsShape.slice!UT; | ret.fuseCellsAssign!"a = b" = cells; | static if (is(T == immutable)) | return (() @trusted => cast(immutable) ret)()[]; | else | static if (is(T == const)) | return (() @trusted => cast(const) ret)()[]; | else | return ret; | } | else | { | import mir.ndslice.allocation: uninitSlice; | import mir.conv; | auto ret = cells.fuseCellsShape.uninitSlice!UT; | ret.fuseCellsAssign!(emplaceRef!T) = cells; | alias R = Slice!(T*, ret.N); | return R(ret._structure, (() @trusted => cast(T*)ret._iterator)()); | } |} | |/// 1D |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | enum ar = [[0, 1], [], [2, 3, 4, 5], [6], [7, 8, 9]]; | static assert ([[0, 1], [], [2, 3, 4, 5], [6], [7, 8, 9]].fuseCells == 10.iota); | assert (ar.fuseCells == 10.iota); |} | |/// 2D |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | import mir.ndslice.chunks; | | auto sl = iota(11, 17); | assert(sl.chunks!(0, 1)(3, 4).fuseCells == sl); |} | |/+ |TODO docs |+/ |auto fuseCellsAssign(alias fun = "a = b", Iterator, size_t N, SliceKind kind, S)(Slice!(Iterator, N, kind) to, S cells) |{ | assert(to.shape == cells.fuseCellsShape, "'cells.fuseCellsShape' should be equal to 'to.shape'"); | | if (cells.anyEmpty) | goto R; | | import mir.functional: naryFun; | import mir.ndslice.topology: canonical; | static if (kind == Contiguous) | fuseCellsEmplaceImpl!(naryFun!fun, 0, N)(to.canonical, cells); | else | fuseCellsEmplaceImpl!(naryFun!fun, 0, N)(to, cells); | R: return to; |} | |/+ |TODO docs |+/ |size_t[S.init.shape.length] fuseCellsShape(S)(S cells) @property |{ | typeof(return) ret; | enum N = ret.length; | static if (N == 1) | { | foreach (ref e; cells) | ret[0] += e.length; | } | else | { | import mir.ndslice.topology: repeat; | enum expr = "e" ~ ".front".repeat(N).fuseCells.field; | foreach (i; Iota!N) | for (auto e = cells.save; !e.empty!i; e.popFront!i) | ret[i] += mixin(expr).length!i; | } | return ret; |} | |private auto fuseCellsEmplaceImpl(alias fun, size_t i, size_t M, Iterator, size_t N, SliceKind kind, S)(Slice!(Iterator, N, kind) to, S cells) |{ | do | { | auto from = cells.front; | static if (M == 1) | { | auto n = from.length!i; | } | else | { | import mir.ndslice.topology: repeat; | enum expr = "from" ~ ".front".repeat(N - 1 - i).fuseCells.field; | auto n = mixin(expr).length!i; | } | assert (to.length!i >= n); | static if (i + 1 == M) | { | import mir.algorithm.iteration: each; | each!fun(to.selectFront!i(n), from); | } | else | { | .fuseCellsEmplaceImpl!(fun, i + 1, N)(to.selectFront!i(n), from); | } | to.popFrontExactly!i(n); | cells.popFront; | } | while(!cells.empty); | return to; |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/fuse.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-series.lst |/++ |$(H1 Index-series) | |The module contains $(LREF Series) data structure with special iteration and indexing methods. |It is aimed to construct index or time-series using Mir and Phobos algorithms. | |Public_imports: $(MREF mir,ndslice,slice). | |Copyright: Copyright © 2017, Kaleidic Associates Advisory Limited |Authors: Ilya Yaroshenko | |Macros: |NDSLICE = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.series; | |public import mir.ndslice.slice; |public import mir.ndslice.sorting: sort; |import mir.ndslice.iterator: IotaIterator; |import mir.ndslice.sorting: transitionIndex; |import mir.qualifier; |import std.traits; | |/++ |See_also: $(LREF unionSeries), $(LREF troykaSeries), $(LREF troykaGalop). |+/ |@safe version(mir_test) unittest |{ | import mir.ndslice; | import mir.series; | | import mir.array.allocation: array; | import mir.algorithm.setops: multiwayUnion; | | import std.datetime: Date; | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | import std.exception: collectExceptionMsg; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [ | Date(2017, 01, 01), | Date(2017, 03, 01), | Date(2017, 04, 01)]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 05, 01)]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // asSlice method | ////////////////////////////////////// | assert(series0 | .asSlice | // ref qualifier is optional | .map!((ref key, ref value) => key.month == value) | .all); | | ////////////////////////////////////// | // get* methods | ////////////////////////////////////// | | auto refDate = Date(2017, 03, 01); | auto missingDate = Date(2016, 03, 01); | | // default value | double defaultValue = 100; | assert(series0.get(refDate, defaultValue) == 3); | assert(series0.get(missingDate, defaultValue) == defaultValue); | | // Exceptions handlers | assert(series0.get(refDate) == 3); | assert(series0.get(refDate, new Exception("My exception msg")) == 3); | assert(series0.getVerbose(refDate) == 3); | assert(series0.getExtraVerbose(refDate, "My exception msg") == 3); | | assert(collectExceptionMsg!Exception( | series0.get(missingDate) | ) == "Series double[Date]: Missing required key"); | | assert(collectExceptionMsg!Exception( | series0.get(missingDate, new Exception("My exception msg")) | ) == "My exception msg"); | | assert(collectExceptionMsg!Exception( | series0.getVerbose(missingDate) | ) == "Series double[Date]: Missing 2016-Mar-01 key"); | | assert(collectExceptionMsg!Exception( | series0.getExtraVerbose(missingDate, "My exception msg") | ) == "My exception msg. Series double[Date]: Missing 2016-Mar-01 key"); | | // assign with get* | series0.get(refDate) = 100; | assert(series0.get(refDate) == 100); | series0.get(refDate) = 3; | | // tryGet | double val; | assert(series0.tryGet(refDate, val)); | assert(val == 3); | assert(!series0.tryGet(missingDate, val)); | assert(val == 3); // val was not changed | | ////////////////////////////////////// | // Merges multiple series into one. | // Allocates using GC. M | // Makes exactly two allocations per merge: | // one for index/time and one for data. | ////////////////////////////////////// | auto m0 = unionSeries(series0, series1); | auto m1 = unionSeries(series1, series0); // order is matter | | assert(m0.index == [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 03, 01), | Date(2017, 04, 01), | Date(2017, 05, 01)]); | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | | ////////////////////////////////////// | // Joins two time-series into a one with two columns. | ////////////////////////////////////// | auto u = [index0, index1].multiwayUnion; | auto index = u.move.array; | auto data = slice!double([index.length, 2], 0); // initialized to 0 value | auto series = index.series(data); | | series[0 .. $, 0][] = series0; // fill first column | series[0 .. $, 1][] = series1; // fill second column | | assert(data == [ | [1, 10], | [0, 20], | [3, 0], | [4, 0], | [0, 50]]); |} | |/// |unittest{ | | import mir.series; | | double[int] map; | map[1] = 4.0; | map[2] = 5.0; | map[4] = 6.0; | map[5] = 10.0; | map[10] = 11.0; | | const s = series(map); | | double value; | int key; | assert(s.tryGet(2, value) && value == 5.0); | assert(!s.tryGet(8, value)); | | assert(s.tryGetNext(2, value) && value == 5.0); | assert(s.tryGetPrev(2, value) && value == 5.0); | assert(s.tryGetNext(8, value) && value == 11.0); | assert(s.tryGetPrev(8, value) && value == 10.0); | assert(!s.tryGetFirst(8, 9, value)); | assert(s.tryGetFirst(2, 10, value) && value == 5.0); | assert(s.tryGetLast(2, 10, value) && value == 11.0); | assert(s.tryGetLast(2, 8, value) && value == 10.0); | | key = 2; assert(s.tryGetNextUpdateKey(key, value) && key == 2 && value == 5.0); | key = 2; assert(s.tryGetPrevUpdateKey(key, value) && key == 2 && value == 5.0); | key = 8; assert(s.tryGetNextUpdateKey(key, value) && key == 10 && value == 11.0); | key = 8; assert(s.tryGetPrevUpdateKey(key, value) && key == 5 && value == 10.0); | key = 2; assert(s.tryGetFirstUpdateLower(key, 10, value) && key == 2 && value == 5.0); | key = 10; assert(s.tryGetLastUpdateKey(2, key, value) && key == 10 && value == 11.0); | key = 8; assert(s.tryGetLastUpdateKey(2, key, value) && key == 5 && value == 10.0); |} | |import mir.ndslice.slice; |import mir.ndslice.internal: is_Slice, isIndex; |import mir.math.common: optmath; | |import std.meta; | |@optmath: | |/++ |Plain index/time observation data structure. |Observation are used as return tuple for for indexing $(LREF Series). |+/ |struct mir_observation(Index, Data) |{ | /// Date, date-time, time, or index. | Index index; | /// An alias for time-series index. | alias time = index; | /// An alias for key-value representation. | alias key = index; | /// Value or ndslice. | Data data; | /// An alias for key-value representation. | alias value = data; |} | |/// ditto |alias Observation = mir_observation; | |/// Convenient function for $(LREF Observation) construction. |auto observation(Index, Data)(Index index, Data data) |{ | return mir_observation!(Index, Data)(index, data); |} | |/++ |Convinient alias for 1D Contiguous $(LREF Series). |+/ |alias SeriesMap(K, V) = mir_series!(K*, V*); | |/// |version(mir_test) unittest |{ | import std.traits; | import mir.series; | | static assert (is(SeriesMap!(string, double) == Series!(string*, double*))); | | /// LHS, RHS | static assert (isAssignable!(SeriesMap!(string, double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(string, double), typeof(null))); | | static assert (isAssignable!(SeriesMap!(const string, double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(string, const double), SeriesMap!(string, double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, double))); | | static assert (isAssignable!(SeriesMap!(immutable string, double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(immutable string, const double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(immutable string, double))); | static assert (isAssignable!(SeriesMap!(string, immutable double), SeriesMap!(string, immutable double))); | static assert (isAssignable!(SeriesMap!(const string, immutable double), SeriesMap!(string, immutable double))); | static assert (isAssignable!(SeriesMap!(const string, const double), SeriesMap!(string, immutable double))); | // etc |} | |/++ |Plain index series data structure. | |`*.index[i]`/`*.key[i]`/`*.time` corresponds to `*.data[i]`/`*.value`. | |Index is assumed to be sorted. |$(LREF sort) can be used to normalise a series. |+/ |struct mir_series(IndexIterator_, Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous) |{ | private enum doUnittest = is(typeof(this) == Series!(int*, double*)); | | /// | alias IndexIterator = IndexIterator_; | | /// | alias Iterator = Iterator_; | | /// | enum size_t N = N_; | | /// | enum SliceKind kind = kind_; | | /// | Slice!(Iterator, N, kind) _data; | | /// | IndexIterator _index; | | /// Index / Key / Time type aliases | alias Index = typeof(this.front.index); | /// ditto | alias Key = Index; | /// ditto | alias Time = Index; | /// Data / Value type aliases | alias Data = typeof(this.front.data); | /// ditto | alias Value = Data; | | /// An alias for time-series index. | alias time = index; | /// An alias for key-value representation. | alias key = index; | /// An alias for key-value representation. | alias value = data; | | private enum defaultMsg() = "Series " ~ Unqual!(this.Data).stringof ~ "[" ~ Unqual!(this.Index).stringof ~ "]: Missing"; | private static immutable defaultExc() = new Exception(defaultMsg!() ~ " required key"); | |@optmath: | | /// | this()(Slice!IndexIterator index, Slice!(Iterator, N, kind) data) | { | assert(index.length == data.length, "Series constructor: index and data lengths must be equal."); | _data = data; | _index = index._iterator; | } | | | /// Construct from null | this(typeof(null)) | { | _data = _data.init; | _index = _index.init; | } | | /// | bool opEquals(RIndexIterator, RIterator, size_t RN, SliceKind rkind, )(Series!(RIndexIterator, RIterator, RN, rkind) rhs) const | { | return this.lightScopeIndex == rhs.lightScopeIndex && this._data.lightScope == rhs._data.lightScope; | } | | /++ | Index series is assumed to be sorted. | | `IndexIterator` is an iterator on top of date, date-time, time, or numbers or user defined types with defined `opCmp`. | For example, `Date*`, `DateTime*`, `immutable(long)*`, `mir.ndslice.iterator.IotaIterator`. | +/ | auto index()() @property @trusted | { | return _index.sliced(_data._lengths[0]); | } | | /// ditto | auto index()() @property @trusted const | { | return _index.lightConst.sliced(_data._lengths[0]); | } | | /// ditto | auto index()() @property @trusted immutable | { | return _index.lightImmutable.sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted const | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | private auto lightScopeIndex()() @property @trusted immutable | { | return .lightScope(_index).sliced(_data._lengths[0]); | } | | /++ | Data is any ndslice with only one constraints, | `data` and `index` lengths should be equal. | +/ | auto data()() @property @trusted | { | return _data; | } | | /// ditto | auto data()() @property @trusted const | { | return _data[]; | } | | /// ditto | auto data()() @property @trusted immutable | { | return _data[]; | } | | /// | typeof(this) opBinary(string op : "~")(typeof(this) rhs) | { | return unionSeries(this.lightScope, rhs.lightScope); | } | | /// ditto | auto opBinary(string op : "~")(const typeof(this) rhs) const | { | return unionSeries(this.lightScope, rhs.lightScope); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = series0 ~ series1; | auto m1 = series1 ~ series0; | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | const series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = series0 ~ series1; | auto m1 = series1 ~ series0; | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | } | | /++ | Special `[] =` index-assign operator for index-series. | Assigns data from `r` with index intersection. | If a index index in `r` is not in the index index for this series, then no op-assign will take place. | This and r series are assumed to be sorted. | | Params: | r = rvalue index-series | +/ | void opIndexAssign(IndexIterator_, Iterator_, size_t N_, SliceKind kind_) | (Series!(IndexIterator_, Iterator_, N_, kind_) r) | { | opIndexOpAssign!("", IndexIterator_, Iterator_, N_, kind_)(r); | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | auto index = [1, 2, 3, 4]; | auto data = [10.0, 10, 10, 10]; | auto series = index.series(data); | | auto rindex = [0, 2, 4, 5]; | auto rdata = [1.0, 2, 3, 4]; | auto rseries = rindex.series(rdata); | | // series[] = rseries; | series[] = rseries; | assert(series.data == [10, 2, 10, 3]); | } | | /++ | Special `[] op=` index-op-assign operator for index-series. | Op-assigns data from `r` with index intersection. | If a index index in `r` is not in the index index for this series, then no op-assign will take place. | This and r series are assumed to be sorted. | | Params: | rSeries = rvalue index-series | +/ | void opIndexOpAssign(string op, IndexIterator_, Iterator_, size_t N_, SliceKind kind_) | (auto ref Series!(IndexIterator_, Iterator_, N_, kind_) rSeries) | { | auto l = this.lightScope; | auto r = rSeries.lightScope; | if (r.empty) | return; | if (l.empty) | return; | Unqual!(typeof(*r._index)) rf = *r._index; | Unqual!(typeof(*l._index)) lf = *l._index; | goto Begin; | R: | r.popFront; | if (r.empty) | goto End; | rf = *r._index; | Begin: | if (lf > rf) | goto R; | if (lf < rf) | goto L; | E: | static if (N != 1) | mixin("l.data.front[] " ~ op ~ "= r.data.front;"); | else | mixin("l.data.front " ~ op ~ "= r.data.front;"); | | r.popFront; | if (r.empty) | goto End; | rf = *r._index; | L: | l.popFront; | if (l.empty) | goto End; | lf = *l._index; | | if (lf < rf) | goto L; | if (lf == rf) | goto E; | goto R; | End: | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | auto index = [1, 2, 3, 4]; | auto data = [10.0, 10, 10, 10]; | auto series = index.series(data); | | auto rindex = [0, 2, 4, 5]; | auto rdata = [1.0, 2, 3, 4]; | auto rseries = rindex.series(rdata); | | series[] += rseries; | assert(series.data == [10, 12, 10, 13]); | } | | /++ | This function uses a search with policy sp to find the largest left subrange on which | `t < key` is true for all `t`. | The search schedule and its complexity are documented in `std.range.SearchPolicy`. | +/ | auto lowerBound(Index)(auto ref scope const Index key) | { | return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key))); | } | | /// ditto | auto lowerBound(Index)(auto ref scope const Index key) const | { | return opIndex(opSlice(0, lightScopeIndex.transitionIndex(key))); | } | | | /++ | This function uses a search with policy sp to find the largest right subrange on which | `t > key` is true for all `t`. | The search schedule and its complexity are documented in `std.range.SearchPolicy`. | +/ | auto upperBound(Index)(auto ref scope const Index key) | { | return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length)); | } | | /// ditto | auto upperBound(Index)(auto ref scope const Index key) const | { | return opIndex(opSlice(lightScopeIndex.transitionIndex!"a <= b"(key), length)); | } | | /** | Gets data for the index. | Params: | key = index | _default = default value is returned if the series does not contains the index. | Returns: | data that corresponds to the index or default value. | */ | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) @trusted | if (!is(Value : const(Exception))) | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default; | } | | /// ditto | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) const | if (!is(Value : const(Exception))) | { | return this.lightScope.get(key, _default); | } | | /// ditto | ref get(Index, Value)(auto ref scope const Index key, return ref Value _default) immutable | if (!is(Value : const(Exception))) | { | return this.lightScope.get(key, _default); | } | | auto get(Index, Value)(auto ref scope const Index key, Value _default) @trusted | if (!is(Value : const(Exception))) | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key ? _data[idx] : _default; | } | | /// ditto | auto get(Index, Value)(auto ref scope const Index key, Value _default) const | if (!is(Value : const(Exception))) | { | import core.lifetime: forward; | return this.lightScope.get(key, forward!_default); | } | | /// ditto | auto get(Index, Value)(auto ref scope const Index key, Value _default) immutable | if (!is(Value : const(Exception))) | { | import core.lifetime: forward; | return this.lightScope.get(key, forward!_default); | } | | /** | Gets data for the index. | Params: | key = index | exc = (lazy, optional) exception to throw if the series does not contains the index. | Returns: data that corresponds to the index. | Throws: | Exception if the series does not contains the index. | See_also: $(LREF Series.getVerbose), $(LREF Series.tryGet) | */ | auto ref get(Index)(auto ref scope const Index key) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | if (idx < _data._lengths[0] && _index[idx] == key) | { | return _data[idx]; | } | throw defaultExc!(); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | if (idx < _data._lengths[0] && _index[idx] == key) | { | return _data[idx]; | } | throw exc; | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key) const | { | return this.lightScope.get(key); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) const | { | return this.lightScope.get(key, exc); | } | | | /// ditto | auto ref get(Index)(auto ref scope const Index key) immutable | { | return this.lightScope.get(key); | } | | /// ditto | auto ref get(Index)(auto ref scope const Index key, lazy const Exception exc) immutable | { | return this.lightScope.get(key, exc); | } | | /** | Gets data for the index (verbose exception). | Params: | key = index | Returns: data that corresponds to the index. | Throws: | Detailed exception if the series does not contains the index. | See_also: $(LREF Series.get), $(LREF Series.tryGet) | */ | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) | { | import std.format: format; | return this.get(key, new Exception(format("%s %s key", defaultMsg!(), key), file, line)); | } | | /// ditto | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) const | { | return this.lightScope.getVerbose(key, file, line); | } | | /// ditto | auto ref getVerbose(Index)(auto ref scope const Index key, string file = __FILE__, int line = __LINE__) immutable | { | return this.lightScope.getVerbose(key, file, line); | } | | /** | Gets data for the index (extra verbose exception). | Params: | key = index | Returns: data that corresponds to the index. | Throws: | Detailed exception if the series does not contains the index. | See_also: $(LREF Series.get), $(LREF Series.tryGet) | */ | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) | { | import std.format: format; | return this.get(key, new Exception(format("%s. %s %s key", exceptionInto, defaultMsg!(), key), file, line)); | } | | /// ditto | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) const | { | return this.lightScope.getExtraVerbose(key, exceptionInto, file, line); | } | | /// ditto | auto ref getExtraVerbose(Index)(auto ref scope const Index key, string exceptionInto, string file = __FILE__, int line = __LINE__) immutable | { | return this.lightScope.getExtraVerbose(key, exceptionInto, file, line); | } | | /// | bool contains(Index)(auto ref scope const Index key) const @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | return idx < _data._lengths[0] && _index[idx] == key; | } | | /// | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | bool cond = idx < _data._lengths[0] && _index[idx] == key; | static if (__traits(compiles, &_data[size_t.init])) | { | if (cond) | return &_data[idx]; | return null; | } | else | { | return bool(cond); | } | } | | /// ditto | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) const | { | return key in this.lightScope; | } | | /// ditto | auto opBinaryRight(string op : "in", Index)(auto ref scope const Index key) immutable | { | return key in this.lightScope; | } | | /++ | Tries to get the first value, such that `key_i == key`. | | Returns: `true` on success. | +/ | bool tryGet(Index, Value)(Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0] && _index[idx] == key; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGet(Index, Value)(Index key, scope ref Value val) const | { | return this.lightScope.tryGet(key, val); | } | | /// ditto | bool tryGet(Index, Value)(Index key, scope ref Value val) immutable | { | return this.lightScope.tryGet(key, val); | } | | /++ | Tries to get the first value, such that `key_i >= key`. | | Returns: `true` on success. | +/ | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0]; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) const | { | return this.lightScope.tryGetNext(key, val); | } | | /// ditto | bool tryGetNext(Index, Value)(auto ref scope const Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetNext(key, val); | } | | /++ | Tries to get the first value, such that `key_i >= key`. | Updates `key` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(key); | auto cond = idx < _data._lengths[0]; | if (cond) | { | key = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) const | { | return this.lightScope.tryGetNextUpdateKey(key, val); | } | | /// ditto | bool tryGetNextUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetNextUpdateKey(key, val); | } | | /++ | Tries to get the last value, such that `key_i <= key`. | | Returns: `true` on success. | +/ | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(key) - 1; | auto cond = 0 <= sizediff_t(idx); | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) const | { | return this.lightScope.tryGetPrev(key, val); | } | | /// ditto | bool tryGetPrev(Index, Value)(auto ref scope const Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetPrev(key, val); | } | | /++ | Tries to get the last value, such that `key_i <= key`. | Updates `key` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(key) - 1; | auto cond = 0 <= sizediff_t(idx); | if (cond) | { | key = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) const | { | return this.lightScope.tryGetPrevUpdateKey(key, val); | } | | /// ditto | bool tryGetPrevUpdateKey(Index, Value)(scope ref Index key, scope ref Value val) immutable | { | return this.lightScope.tryGetPrevUpdateKey(key, val); | } | | /++ | Tries to get the first value, such that `lowerBound <= key_i <= upperBound`. | | Returns: `true` on success. | +/ | bool tryGetFirst(Index, Value)(auto ref scope const Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(lowerBound); | auto cond = idx < _data._lengths[0] && _index[idx] <= upperBound; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetFirst(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetFirst(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetFirst(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetFirst(lowerBound, upperBound, val); | } | | /++ | Tries to get the first value, such that `lowerBound <= key_i <= upperBound`. | Updates `lowerBound` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex(lowerBound); | auto cond = idx < _data._lengths[0] && _index[idx] <= upperBound; | if (cond) | { | lowerBound = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetFirstUpdateLower(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetFirstUpdateLower(Index, Value)(ref Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetFirstUpdateLower(lowerBound, upperBound, val); | } | | /++ | Tries to get the last value, such that `lowerBound <= key_i <= upperBound`. | | Returns: `true` on success. | +/ | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(upperBound) - 1; | auto cond = 0 <= sizediff_t(idx) && _index[idx] >= lowerBound; | if (cond) | val = _data[idx]; | return cond; | } | | /// ditto | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetLast(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetLast(Index, Value)(Index lowerBound, auto ref scope const Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetLast(lowerBound, upperBound, val); | } | | /++ | Tries to get the last value, such that `lowerBound <= key_i <= upperBound`. | Updates `upperBound` with `key_i`. | | Returns: `true` on success. | +/ | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) @trusted | { | size_t idx = lightScopeIndex.transitionIndex!"a <= b"(upperBound) - 1; | auto cond = 0 <= sizediff_t(idx) && _index[idx] >= lowerBound; | if (cond) | { | upperBound = _index[idx]; | val = _data[idx]; | } | return cond; | } | | /// ditto | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) const | { | return this.lightScope.tryGetLastUpdateKey(lowerBound, upperBound, val); | } | | /// ditto | bool tryGetLastUpdateKey(Index, Value)(Index lowerBound, ref Index upperBound, scope ref Value val) immutable | { | return this.lightScope.tryGetLastUpdateKey(lowerBound, upperBound, val); | } | | /++ | Returns: | 1D Slice with creared with $(NDSLICE topology, zip) ([0] - key, [1] - value). | See_also: | $(NDSLICE topology, map) uses multiargument lambdas to handle zipped slices. | +/ | auto asSlice()() @property | { | import mir.ndslice.topology: zip, map, ipack; | static if (N == 1) | return index.zip(data); | else | return index.zip(data.ipack!1.map!"a"); | } | | /// ditto | auto asSlice()() const @property | { | return opIndex.asSlice; | } | | /// ditto | auto asSlice()() immutable @property | { | return opIndex.asSlice; | } | | /// ndslice-like primitives | bool empty(size_t dimension = 0)() const @property | if (dimension < N) | { | return !length!dimension; | } | | /// ditto | size_t length(size_t dimension = 0)() const @property | if (dimension < N) | { | return _data.length!dimension; | } | | /// ditto | auto front(size_t dimension = 0)() @property | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension) | { | return index.series(data.front!dimension); | } | else | { | return index.front.observation(data.front); | } | } | | /// ditto | auto back(size_t dimension = 0)() @property | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension) | { | return index.series(_data.back!dimension); | } | else | { | return index.back.observation(_data.back); | } | } | | /// ditto | void popFront(size_t dimension = 0)() @trusted | if (dimension < N) | { | assert(!empty!dimension); | static if (dimension == 0) | _index++; | _data.popFront!dimension; | } | | /// ditto | void popBack(size_t dimension = 0)() | if (dimension < N) | { | assert(!empty!dimension); | _data.popBack!dimension; | } | | /// ditto | void popFrontExactly(size_t dimension = 0)(size_t n) @trusted | if (dimension < N) | { | assert(length!dimension >= n); | static if (dimension == 0) | _index += n; | _data.popFrontExactly!dimension(n); | } | | /// ditto | void popBackExactly(size_t dimension = 0)(size_t n) | if (dimension < N) | { | assert(length!dimension >= n); | _data.popBackExactly!dimension(n); | } | | /// ditto | void popFrontN(size_t dimension = 0)(size_t n) | if (dimension < N) | { | auto len = length!dimension; | n = n <= len ? n : len; | popFrontExactly!dimension(n); | } | | /// ditto | void popBackN(size_t dimension = 0)(size_t n) | if (dimension < N) | { | auto len = length!dimension; | n = n <= len ? n : len; | popBackExactly!dimension(n); | } | | /// ditto | Slice!(IotaIterator!size_t) opSlice(size_t dimension = 0)(size_t i, size_t j) const | if (dimension < N) | in | { | assert(i <= j, | "Series.opSlice!" ~ dimension.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": difference between the right and the left bounds" | ~ " must be less than or equal to the length of the given dimension."; | assert(j - i <= _data._lengths[dimension], | "Series.opSlice!" ~ dimension.stringof ~ errorMsg); | } | do | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /// ditto | size_t opDollar(size_t dimension = 0)() const | { | return _data.opDollar!dimension; | } | | /// ditto | auto opIndex(Slices...)(Slices slices) | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | static if (Slices.length == 0) | { | return this; | } | else | static if (is_Slice!(Slices[0])) | { | return index[slices[0]].series(data[slices]); | } | else | { | return index[slices[0]].observation(data[slices]); | } | } | | /// ditto | auto opIndex(Slices...)(Slices slices) const | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | return lightConst.opIndex(slices); | } | | /// ditto | auto opIndex(Slices...)(Slices slices) immutable | if (allSatisfy!(templateOr!(is_Slice, isIndex), Slices)) | { | return lightImmutable.opIndex(slices); | } | | /// | ref opAssign(typeof(this) rvalue) return @trusted | { | import mir.utility: swap; | this._data._structure = rvalue._data._structure; | swap(this._data._iterator, rvalue._data._iterator); | swap(this._index, rvalue._index); | return this; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, RIndexIterator) && isAssignable!(Iterator, RIterator)) | { | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | this._data._structure = rvalue._data._structure; | this._data._iterator = rvalue._data._iterator.move; | this._index = rvalue._index.move; | return this; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(auto ref const Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, LightConstOf!RIndexIterator) && isAssignable!(Iterator, LightConstOf!RIterator)) | { | return this = rvalue.opIndex; | } | | /// ditto | ref opAssign(RIndexIterator, RIterator)(auto ref immutable Series!(RIndexIterator, RIterator, N, kind) rvalue) return | if (isAssignable!(IndexIterator, LightImmutableOf!RIndexIterator) && isAssignable!(Iterator, LightImmutableOf!RIterator)) | { | return this = rvalue.opIndex; | } | | /// ditto | ref opAssign(typeof(null)) return | { | return this = this.init; | } | | /// ditto | auto save()() @property | { | return this; | } | | /// | Series!(LightScopeOf!IndexIterator, LightScopeOf!Iterator, N, kind) lightScope()() @trusted scope return @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// ditto | Series!(LightConstOf!(LightScopeOf!IndexIterator), LightConstOf!(LightScopeOf!Iterator), N, kind) lightScope()() @trusted scope return const @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// ditto | Series!(LightConstOf!(LightScopeOf!IndexIterator), LightConstOf!(LightScopeOf!Iterator), N, kind) lightScope()() @trusted scope return immutable @property | { | return typeof(return)(lightScopeIndex, _data.lightScope); | } | | /// | Series!(LightConstOf!IndexIterator, LightConstOf!Iterator, N, kind) lightConst()() scope return const @property @trusted | { | return index.series(data); | } | | /// | Series!(LightImmutableOf!IndexIterator, LightImmutableOf!Iterator, N, kind) lightImmutable()() scope return immutable @property @trusted | { | return index.series(data); | } | | /// | auto toConst()() const @property | { | return index.toConst.series(data.toConst); | } | | /// | void toString(Writer, Spec)(auto ref Writer w, const ref Spec f) const | { | import std.format: formatValue, formatElement; | import std.range: put; | | if (f.spec != 's' && f.spec != '(') | throw new Exception("incompatible format character for Mir Series argument: %" ~ f.spec); | | enum defSpec = "%s" ~ f.keySeparator ~ "%s" ~ f.seqSeparator; | auto fmtSpec = f.spec == '(' ? f.nested : defSpec; | | if (f.spec == 's') | put(w, f.seqBefore); | if (length) for (size_t i = 0;;) | { | auto fmt = Spec(fmtSpec); | fmt.writeUpToNextSpec(w); | if (f.flDash) | { | formatValue(w, index[i], fmt); | fmt.writeUpToNextSpec(w); | formatValue(w, data[i], fmt); | } | else | { | formatElement(w, index[i], fmt); | fmt.writeUpToNextSpec(w); | formatElement(w, data[i], fmt); | } | if (f.sep !is null) | { | fmt.writeUpToNextSpec(w); | if (++i != length) | put(w, f.sep); | else | break; | } | else | { | if (++i != length) | fmt.writeUpToNextSpec(w); | else | break; | } | } | if (f.spec == 's') | put(w, f.seqAfter); | } | | version(mir_test) | /// | unittest | { | import mir.series: series, sort; | auto s = ["b", "a"].series([9, 8]).sort; | | import std.conv : to; | assert(s.to!string == `["a":8, "b":9]`); | | import std.format : format; | assert("%s".format(s) == `["a":8, "b":9]`); | assert("%(%s %s | %)".format(s) == `"a" 8 | "b" 9`); | assert("%-(%s,%s\n%)\n".format(s) == "a,8\nb,9\n"); | } |} | |/// ditto |alias Series = mir_series; | |/// 1-dimensional data |@safe pure version(mir_test) unittest |{ | auto index = [1, 2, 3, 4]; | auto data = [2.1, 3.4, 5.6, 7.8]; | auto series = index.series(data); | const cseries = series; | | assert(series.contains(2)); | assert( ()@trusted{ return (2 in series) is &data[1]; }() ); | | assert(!series.contains(5)); | assert( ()@trusted{ return (5 in series) is null; }() ); | | assert(series.lowerBound(2) == series[0 .. 1]); | assert(series.upperBound(2) == series[2 .. $]); | | assert(cseries.lowerBound(2) == cseries[0 .. 1]); | assert(cseries.upperBound(2) == cseries[2 .. $]); | | // slicing type deduction for const / immutable series | static assert(is(typeof(series[]) == | Series!(int*, double*))); | static assert(is(typeof(cseries[]) == | Series!(const(int)*, const(double)*))); | static assert(is(typeof((cast(immutable) series)[]) == | Series!(immutable(int)*, immutable(double)*))); | | /// slicing | auto seriesSlice = series[1 .. $ - 1]; | assert(seriesSlice.index == index[1 .. $ - 1]); | assert(seriesSlice.data == data[1 .. $ - 1]); | static assert(is(typeof(series) == typeof(seriesSlice))); | | /// indexing | assert(series[1] == observation(2, 3.4)); | | /// range primitives | assert(series.length == 4); | assert(series.front == observation(1, 2.1)); | | series.popFront; | assert(series.front == observation(2, 3.4)); | | series.popBackN(10); | assert(series.empty); |} | |/// 2-dimensional data |@safe pure version(mir_test) unittest |{ | import std.datetime: Date; | import mir.ndslice.topology: canonical, iota; | | size_t row_length = 5; | | auto index = [ | Date(2017, 01, 01), | Date(2017, 02, 01), | Date(2017, 03, 01), | Date(2017, 04, 01)]; | | // 1, 2, 3, 4, 5 | // 6, 7, 8, 9, 10 | // 11, 12, 13, 14, 15 | // 16, 17, 18, 19, 20 | auto data = iota!int([index.length, row_length], 1); | | // canonical and universal ndslices are more flexible then contiguous | auto series = index.series(data.canonical); | | /// slicing | auto seriesSlice = series[1 .. $ - 1, 2 .. 4]; | assert(seriesSlice.index == index[1 .. $ - 1]); | assert(seriesSlice.data == data[1 .. $ - 1, 2 .. 4]); | | static if (kindOf!(typeof(series.data)) != Contiguous) | static assert(is(typeof(series) == typeof(seriesSlice))); | | /// indexing | assert(series[1, 4] == observation(Date(2017, 02, 01), 10)); | assert(series[2] == observation(Date(2017, 03, 01), iota!int([row_length], 11))); | | /// range primitives | assert(series.length == 4); | assert(series.length!1 == 5); | | series.popFront!1; | assert(series.length!1 == 4); |} | |/// Construct from null |@safe pure nothrow @nogc version(mir_test) unittest |{ | import mir.series; | alias Map = Series!(string*, double*); | Map a = null; | auto b = Map(null); | assert(a.empty); | assert(b.empty); | | auto fun(Map a = null) | { | | } |} | |/++ |Convenient function for $(LREF Series) construction. |See_also: $(LREF assocArray) |Attention: | This overloads do not sort the data. | User should call $(LREF directly) if index was not sorted. |+/ |auto series(IndexIterator, Iterator, size_t N, SliceKind kind) | ( | Slice!IndexIterator index, | Slice!(Iterator, N, kind) data, | ) |{ | assert(index.length == data.length); | return Series!(IndexIterator, Iterator, N, kind)(index, data); |} | |/// ditto |auto series(Index, Data)(Index[] index, Data[] data) |{ | assert(index.length == data.length); | return .series(index.sliced, data.sliced); |} | |/// ditto |auto series(IndexIterator, Data)(Slice!IndexIterator index, Data[] data) |{ | assert(index.length == data.length); | return .series(index, data.sliced); |} | |/// ditto |auto series(Index, Iterator, size_t N, SliceKind kind)(Index[] index, Slice!(Iterator, N, kind) data) |{ | assert(index.length == data.length); | return .series(index.sliced, data); |} | |/** |Constructs a GC-allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa = associative array or a pointer to associative array |Returns: | sorted GC-allocated series. |See_also: $(LREF assocArray) |*/ |Series!(K*, V*) series(RK, RV, K = RK, V = RV)(RV[RK] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.conv: to; | const size_t length = aa.length; | alias R = typeof(return); | if (__ctfe) | { | K[] keys; | V[] values; | foreach(ref kv; aa.byKeyValue) | { | keys ~= kv.key.to!K; | values ~= kv.value.to!V; | } | auto ret = series(keys, values); | .sort((()@trusted=>cast(Series!(Unqual!K*, Unqual!V*))ret)()); | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return *cast(R*) &ret; }(); | } | import mir.ndslice.allocation: uninitSlice; | Series!(Unqual!K*, Unqual!V*) ret = series(length.uninitSlice!(Unqual!K), length.uninitSlice!(Unqual!V)); | auto it = ret; | foreach(ref kv; aa.byKeyValue) | { | import mir.conv: emplaceRef; | emplaceRef!K(it.index.front, kv.key.to!K); | emplaceRef!V(it._data.front, kv.value.to!V); | it.popFront; | } | .sort(ret); | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return *cast(R*) &ret; }(); |} | |/// ditto |Series!(RK*, RV*) series(K, V, RK = const K, RV = const V)(const V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .series!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |Series!(RK*, RV*) series( K, V, RK = immutable K, RV = immutable V)(immutable V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .series!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto series(K, V)(V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return series(*a); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto s = [1: 1.5, 3: 3.3, 2: 20.9].series; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 20.9, 3.3]); | assert(s.data[s.findIndex(2)] == 20.9); |} | |pure nothrow version(mir_test) unittest |{ | immutable aa = [1: 1.5, 3: 3.3, 2: 2.9]; | auto s = aa.series; | s = cast() s; | s = cast(const) s; | s = cast(immutable) s; | s = s; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); |} | | |/** |Constructs a RC-allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa = associative array or a pointer to associative array |Returns: | sorted RC-allocated series. |See_also: $(LREF assocArray) |*/ |auto rcseries(RK, RV, K = RK, V = RV)(RV[RK] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.rc.array; | import mir.conv: to; | alias R = Series!(RCI!K, RCI!V); | const size_t length = aa.length; | auto ret = series(length.mininitRcarray!(Unqual!K).asSlice, length.mininitRcarray!(Unqual!V).asSlice); | auto it = ret.lightScope; | foreach(ref kv; aa.byKeyValue) | { | import mir.conv: emplaceRef; | emplaceRef!K(it.lightScopeIndex.front, kv.key.to!K); | emplaceRef!V(it._data.front, kv.value.to!V); | it.popFront; | } | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | .sort(ret.lightScope); | static if (is(typeof(ret) == R)) | return ret; | else | return ()@trusted{ return (*cast(R*) &ret); }(); |} | |/// ditto |auto rcseries(K, V, RK = const K, RV = const V)(const V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .rcseries!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto rcseries( K, V, RK = immutable K, RV = immutable V)(immutable V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return .rcseries!(K, V, RK, RV)((()@trusted => cast(V[K]) aa)()); |} | |/// ditto |auto rcseries(K, V)(V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return rcseries(*a); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto s = [1: 1.5, 3: 3.3, 2: 20.9].rcseries; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 20.9, 3.3]); | assert(s.data[s.findIndex(2)] == 20.9); |} | |// pure nothrow |version(mir_test) unittest |{ | import mir.rc.array; | immutable aa = [1: 1.5, 3: 3.3, 2: 2.9]; | auto s = aa.rcseries; | Series!(RCI!(const int), RCI!(const double)) c; | s = cast() s; | c = s; | s = cast(const) s; | s = cast(immutable) s; | s = s; | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); |} | |/++ |Constructs a manually allocated series from an associative array. |Performs exactly two allocations. | |Params: | aa == associative array or a pointer to associative array |Returns: | sorted manually allocated series. |+/ |Series!(K*, V*) makeSeries(Allocator, K, V)(auto ref Allocator allocator, V[K] aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | import mir.ndslice.allocation: makeUninitSlice; | import mir.conv: emplaceRef; | | immutable size_t length = aa.length; | | auto ret = series( | allocator.makeUninitSlice!(Unqual!K)(length), | allocator.makeUninitSlice!(Unqual!V)(length)); | | auto it = ret; | foreach(ref kv; aa.byKeyValue) | { | it.index.front.emplaceRef!K(kv.key); | it.data.front.emplaceRef!V(kv.value); | it.popFront; | } | | ret.sort; | static if (is(typeof(ret) == typeof(return))) | return ret; | else | return ()@trusted{ return cast(typeof(return)) ret; }(); |} | |/// ditto |Series!(K*, V*) makeSeries(Allocator, K, V)(auto ref Allocator allocator, V[K]* aa) | if (is(typeof(K.init < K.init)) && is(typeof(Unqual!K.init < Unqual!K.init))) |{ | return makeSeries(allocator, *a); |} | |/// |pure nothrow version(mir_test) unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.building_blocks.region; | | InSituRegion!(1024) allocator; | auto aa = [1: 1.5, 3: 3.3, 2: 2.9]; | | auto s = (double[int] aa) @nogc @trusted pure nothrow { | return allocator.makeSeries(aa); | }(aa); | | auto indexArray = s.index.field; | auto dataArray = s.data.field; | | assert(s.index == [1, 2, 3]); | assert(s.data == [1.5, 2.9, 3.3]); | assert(s.data[s.findIndex(2)] == 2.9); | | allocator.dispose(indexArray); | allocator.dispose(dataArray); |} | |/++ |Returns a newly allocated associative array from a range of key/value tuples. | |Params: | series = index / time $(LREF Series), may not be sorted | |Returns: A newly allocated associative array out of elements of the input |_series. Returns a null associative |array reference when given an empty _series. | |Duplicates: Associative arrays have unique keys. If r contains duplicate keys, |then the result will contain the value of the last pair for that key in r. |+/ |auto assocArray(IndexIterator, Iterator, size_t N, SliceKind kind) | (Series!(IndexIterator, Iterator, N, kind) series) |{ | alias SK = series.Key; | alias SV = series.Value; | alias UK = Unqual!SK; | alias UV = Unqual!SV; | static if (isImplicitlyConvertible!(SK, UK)) | alias K = UK; | else | alias K = SK; | static if (isImplicitlyConvertible!(SV, UV)) | alias V = UV; | else | alias V = SV; | static assert(isMutable!V, "mir.series.assocArray: value type ( " ~ V.stringof ~ " ) must be mutable"); | | V[K] aa; | aa.insertOrAssign = series; | return aa; |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.ndslice; //iota and etc | import mir.series; | | auto s = ["c", "a", "b"].series(3.iota!int); | assert(s.assocArray == [ | "c": 0, | "a": 1, | "b": 2, | ]); |} | |/// Returns: true if `U` is a $(LREF Series); |enum isSeries(U) = is(U : Series!(IndexIterator, Iterator, N, kind), IndexIterator, Iterator, size_t N, SliceKind kind); | |/++ |Finds an index such that `series.index[index] == key`. | |Params: | series = series | key = index to find in the series |Returns: | `size_t.max` if the series does not contain the key and appropriate index otherwise. |+/ |size_t findIndex(IndexIterator, Iterator, size_t N, SliceKind kind, Index)(Series!(IndexIterator, Iterator, N, kind) series, auto ref scope const Index key) |{ | auto idx = series.lightScopeIndex.transitionIndex(key); | if (idx < series._data._lengths[0] && series.index[idx] == key) | { | return idx; | } | return size_t.max; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto index = [1, 2, 3, 4].sliced; | auto data = [2.1, 3.4, 5.6, 7.8].sliced; | auto series = index.series(data); | | assert(series.data[series.findIndex(3)] == 5.6); | assert(series.findIndex(0) == size_t.max); |} | |/++ |Finds a backward index such that `series.index[$ - backward_index] == key`. | |Params: | series = series | key = index key to find in the series |Returns: | `0` if the series does not contain the key and appropriate backward index otherwise. |+/ |size_t find(IndexIterator, Iterator, size_t N, SliceKind kind, Index)(Series!(IndexIterator, Iterator, N, kind) series, auto ref scope const Index key) |{ | auto idx = series.lightScopeIndex.transitionIndex(key); | auto bidx = series._data._lengths[0] - idx; | if (bidx && series.index[idx] == key) | { | return bidx; | } | return 0; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto index = [1, 2, 3, 4].sliced; | auto data = [2.1, 3.4, 5.6, 7.8].sliced; | auto series = index.series(data); | | if (auto bi = series.find(3)) | { | assert(series.data[$ - bi] == 5.6); | } | else | { | assert(0); | } | | assert(series.find(0) == 0); |} | |/++ |Iterates union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key (and left side value) | cfun = trinary function that accepts left side key, (left side value,) and right side value | rfun = binary function that accepts right side key (and right side value) |+/ |template troykaGalop(alias lfun, alias cfun, alias rfun) |{ | import std.range.primitives: isInputRange; | | /++ | Params: | lhs = left hand series | rhs = right hand series | +/ | pragma(inline, false) | void troykaGalop( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | if (lhs.empty) | goto R0; | if (rhs.empty) | goto L1; | for(;;) | { | if (lhs.index.front < rhs.index.front) | { | lfun(lhs.index.front, lhs.data.front); | lhs.popFront; | if (lhs.empty) | goto R1; | continue; | } | else | if (lhs.index.front > rhs.index.front) | { | rfun(rhs.index.front, rhs.data.front); | rhs.popFront; | if (rhs.empty) | goto L1; | continue; | } | else | { | cfun(lhs.index.front, lhs.data.front, rhs.data.front); | lhs.popFront; | rhs.popFront; | if (rhs.empty) | goto L0; | if (lhs.empty) | goto R1; | continue; | } | } | | L0: | if (lhs.empty) | return; | L1: | do | { | lfun(lhs.index.front, lhs.data.front); | lhs.popFront; | } while(!lhs.empty); | return; | | R0: | if (rhs.empty) | return; | R1: | do | { | rfun(rhs.index.front, rhs.data.front); | rhs.popFront; | } while(!rhs.empty); | return; | } | | /++ | Params: | lhs = left hand input range | rhs = right hand input range | +/ | pragma(inline, false) | void troykaGalop (LeftRange, RightRange)(LeftRange lhs, RightRange rhs) | if (isInputRange!LeftRange && isInputRange!RightRange && !isSeries!LeftRange && !isSeries!RightRange) | { | if (lhs.empty) | goto R0; | if (rhs.empty) | goto L1; | for(;;) | { | if (lhs.front < rhs.front) | { | lfun(lhs.front); | lhs.popFront; | if (lhs.empty) | goto R1; | continue; | } | else | if (lhs.front > rhs.front) | { | rfun(rhs.front); | rhs.popFront; | if (rhs.empty) | goto L1; | continue; | } | else | { | cfun(lhs.front, rhs.front); | lhs.popFront; | rhs.popFront; | if (rhs.empty) | goto L0; | if (lhs.empty) | goto R1; | continue; | } | } | | L0: | if (lhs.empty) | return; | L1: | do | { | lfun(lhs.front); | lhs.popFront; | } while(!lhs.empty); | return; | | R0: | if (rhs.empty) | return; | R1: | do | { | rfun(rhs.front); | rhs.popFront; | } while(!rhs.empty); | return; | } |} | |/++ |Constructs union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key and left side value | cfun = trinary function that accepts left side key, left side value, and right side value | rfun = binary function that accepts right side key and right side value |+/ |template troykaSeries(alias lfun, alias cfun, alias rfun) |{ | /++ | Params: | lhs = left hand series | rhs = right hand series | Returns: | GC-allocated union series with length equal to $(LREF troykaLength) | +/ | auto troykaSeries | ( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | alias I = CommonType!(typeof(lhs.index.front), typeof(rhs.index.front)); | alias E = CommonType!( | typeof(lfun(lhs.index.front, lhs.data.front)), | typeof(cfun(lhs.index.front, lhs.data.front, rhs.data.front)), | typeof(rfun(rhs.index.front, rhs.data.front)), | ); | alias R = Series!(I*, E*); | alias UI = Unqual!I; | alias UE = Unqual!E; | const length = troykaLength(lhs.index, rhs.index); | import mir.ndslice.allocation: uninitSlice; | auto index = length.uninitSlice!UI; | auto data = length.uninitSlice!UE; | auto ret = index.series(data); | alias algo = troykaSeriesImpl!(lfun, cfun, rfun); | algo!(I, E)(lhs.lightScope, rhs.lightScope, ret); | return (()@trusted => cast(R) ret)(); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice; | auto a = [1, 2, 3, 9].sliced.series(iota!int([4], 1)); | auto b = [0, 2, 4, 9].sliced.series(iota!int([4], 1) * 10.0); | alias unionAlgorithm = troykaSeries!( | (key, left) => left, | (key, left, right) => left + right, | (key, right) => -right, | ); | auto c = unionAlgorithm(a, b); | assert(c.index == [0, 1, 2, 3, 4, 9]); | assert(c.data == [-10, 1, 22, 3, -30, 44]); |} | |/++ |Constructs union using three functions to handle each intersection case separately. |Params: | lfun = binary function that accepts left side key and left side value | cfun = trinary function that accepts left side key, left side value, and right side value | rfun = binary function that accepts right side key and right side value |+/ |template rcTroykaSeries(alias lfun, alias cfun, alias rfun) |{ | /++ | Params: | lhs = left hand series | rhs = right hand series | Returns: | RC-allocated union series with length equal to $(LREF troykaLength) | +/ | auto rcTroykaSeries | ( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | )( | auto ref Series!(IndexIterL, IterL, LN, lkind) lhs, | auto ref Series!(IndexIterR, IterR, RN, rkind) rhs, | ) | { | import mir.rc.array; | alias I = CommonType!(typeof(lhs.index.front), typeof(rhs.index.front)); | alias E = CommonType!( | typeof(lfun(lhs.index.front, lhs.data.front)), | typeof(cfun(lhs.index.front, lhs.data.front, rhs.data.front)), | typeof(rfun(rhs.index.front, rhs.data.front)), | ); | alias R = Series!(RCI!I, RCI!E); | alias UI = Unqual!I; | alias UE = Unqual!E; | const length = troykaLength(lhs.index, rhs.index); | import mir.ndslice.allocation: uninitSlice; | auto ret = length.mininitRcarray!UI.asSlice.series(length.mininitRcarray!UE.asSlice); | alias algo = troykaSeriesImpl!(lfun, cfun, rfun); | algo!(I, E)(lhs.lightScope, rhs.lightScope, ret.lightScope); | return (()@trusted => *cast(R*) &ret)(); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice; | auto a = [1, 2, 3, 9].sliced.series(iota!int([4], 1)); | auto b = [0, 2, 4, 9].sliced.series(iota!int([4], 1) * 10.0); | alias unionAlgorithm = rcTroykaSeries!( | (key, left) => left, | (key, left, right) => left + right, | (key, right) => -right, | ); | auto c = unionAlgorithm(a, b); | assert(c.index == [0, 1, 2, 3, 4, 9]); | assert(c.data == [-10, 1, 22, 3, -30, 44]); |} | | |/++ |Length for Troyka union handlers. |Params: | lhs = left hand side series/range | rhs = right hand side series/range |Returns: Total count of lambda function calls in $(LREF troykaGalop) union handler. |+/ |size_t troykaLength( | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, |)( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, |) |{ | return troykaLength(lhs.index, rhs.index); |} | |/// ditto |size_t troykaLength(LeftRange, RightRange)(LeftRange lhs, RightRange rhs) | if (!isSeries!LeftRange && !isSeries!RightRange) |{ | size_t length; | alias counter = (scope auto ref _) => ++length; | alias ccounter = (scope auto ref _l, scope auto ref _r) => ++length; | troykaGalop!(counter, ccounter, counter)(lhs, rhs); | return length; |} | |/// |template troykaSeriesImpl(alias lfun, alias cfun, alias rfun) |{ | /// | void troykaSeriesImpl | ( | I, E, | IndexIterL, IterL, size_t LN, SliceKind lkind, | IndexIterR, IterR, size_t RN, SliceKind rkind, | UI, UE, | )( | Series!(IndexIterL, IterL, LN, lkind) lhs, | Series!(IndexIterR, IterR, RN, rkind) rhs, | Series!(UI*, UE*) uninitSlice, | ) | { | import mir.conv: emplaceRef; | troykaGalop!( | (auto ref key, auto ref value) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(lfun(key, value)); | uninitSlice.popFront; | }, | (auto ref key, auto ref lvalue, auto ref rvalue) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(cfun(key, lvalue, rvalue)); | uninitSlice.popFront; | }, | (auto ref key, auto ref value) { | uninitSlice.index.front.emplaceRef!I(key); | uninitSlice.data.front.emplaceRef!E(rfun(key, value)); | uninitSlice.popFront; | }, | )(lhs, rhs); | assert(uninitSlice.length == 0); | } |} | |/** |Merges multiple (time) series into one. |Makes exactly one memory allocation for two series union |and two memory allocation for three and more series union. | |Params: | seriesTuple = variadic static array of composed of series, each series must be sorted. |Returns: sorted GC-allocated series. |See_also $(LREF Series.opBinary) $(LREF makeUnionSeries) |*/ |auto unionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C)(Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!false(seriesTuple); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = unionSeries(series0, series1); | auto m1 = unionSeries(series1, series0); | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import std.datetime: Date; | | ////////////////////////////////////// | // Constructs three time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | auto index2 = [1, 6]; | auto data2 = [100.0, 600]; | auto series2 = index2.series(data2); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | // Order is matter. | // The first slice has higher priority. | auto m0 = unionSeries(series0, series1, series2); | auto m1 = unionSeries(series1, series0, series2); | auto m2 = unionSeries(series2, series0, series1); | | assert(m0.index == m1.index); | assert(m0.index == m2.index); | assert(m0.data == [ 1, 20, 3, 4, 50, 600]); | assert(m1.data == [ 10, 20, 3, 4, 50, 600]); | assert(m2.data == [100, 20, 3, 4, 50, 600]); |} | |/** |Merges multiple (time) series into one. | |Params: | allocator = memory allocator | seriesTuple = variadic static array of composed of series. |Returns: sorted manually allocated series. |See_also $(LREF unionSeries) |*/ |auto makeUnionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C, Allocator)(auto ref Allocator allocator, Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!false(seriesTuple, allocator); |} | |/// |@system pure nothrow version(mir_test) unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.building_blocks.region; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | | InSituRegion!(1024) allocator; | | auto m0 = allocator.makeUnionSeries(series0, series1); | auto m1 = allocator.makeUnionSeries(series1, series0); // order is matter | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); | | /// series should have the same sizes as after allocation | allocator.dispose(m0.index.field); | allocator.dispose(m0.data.field); | allocator.dispose(m1.index.field); | allocator.dispose(m1.data.field); |} | |/** |Merges multiple (time) series into one. | |Params: | seriesTuple = variadic static array of composed of series. |Returns: sorted manually allocated series. |See_also $(LREF unionSeries) |*/ |auto rcUnionSeries(IndexIterator, Iterator, size_t N, SliceKind kind, size_t C)(Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple...) | if (C > 1) |{ | return unionSeriesImplPrivate!true(seriesTuple); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.rc.array; | | ////////////////////////////////////// | // Constructs two time-series. | ////////////////////////////////////// | auto index0 = [1,3,4]; | | auto data0 = [1.0, 3, 4]; | auto series0 = index0.series(data0); | | auto index1 = [1,2,5]; | | auto data1 = [10.0, 20, 50]; | auto series1 = index1.series(data1); | | ////////////////////////////////////// | // Merges multiple series into one. | ////////////////////////////////////// | | Series!(RCI!int, RCI!double) m0 = rcUnionSeries(series0, series1); | Series!(RCI!int, RCI!double) m1 = rcUnionSeries(series1, series0); // order is matter | | assert(m0.index == m1.index); | assert(m0.data == [ 1, 20, 3, 4, 50]); | assert(m1.data == [10, 20, 3, 4, 50]); |} | |/** |Initialize preallocated series using union of multiple (time) series. |Doesn't make any allocations. | |Params: | seriesTuple = dynamic array composed of series. | uninitSeries = uninitialized series with exactly required length. |*/ |pragma(inline, false) |auto unionSeriesImpl(I, E, | IndexIterator, Iterator, size_t N, SliceKind kind, UI, UE)( | Series!(IndexIterator, Iterator, N, kind)[] seriesTuple, | Series!(UI*, UE*, N) uninitSeries, | ) |{ | import mir.conv: emplaceRef; | import mir.algorithm.setops: multiwayUnion; | | enum N = N; | alias I = DeepElementType!(typeof(seriesTuple[0].index)); | alias E = DeepElementType!(typeof(seriesTuple[0]._data)); | | if(uninitSeries.length) | { | auto u = seriesTuple.multiwayUnion!"a.index < b.index"; | do | { | auto obs = u.front; | emplaceRef!I(uninitSeries.index.front, obs.index); | static if (N == 1) | emplaceRef!E(uninitSeries._data.front, obs.data); | else | each!(emplaceRef!E)(uninitSeries._data.front, obs.data); | u.popFront; | uninitSeries.popFront; | } | while(uninitSeries.length); | } |} | |private auto unionSeriesImplPrivate(bool rc, IndexIterator, Iterator, size_t N, SliceKind kind, size_t C, Allocator...)(ref Series!(IndexIterator, Iterator, N, kind)[C] seriesTuple, ref Allocator allocator) | if (C > 1 && Allocator.length <= 1) |{ | import mir.algorithm.setops: unionLength; | import mir.ndslice.topology: iota; | import mir.internal.utility: Iota; | import mir.ndslice.allocation: uninitSlice, makeUninitSlice; | static if (rc) | import mir.rc.array; | | Slice!IndexIterator[C] indeces; | foreach (i; Iota!C) | indeces[i] = seriesTuple[i].index; | | immutable len = indeces[].unionLength; | | alias I = typeof(seriesTuple[0].index.front); | alias E = typeof(seriesTuple[0].data.front); | static if (rc) | alias R = Series!(RCI!I, RCI!E, N); | else | alias R = Series!(I*, E*, N); | alias UI = Unqual!I; | alias UE = Unqual!E; | | static if (N > 1) | { | auto shape = seriesTuple[0]._data._lengths; | shape[0] = len; | | foreach (ref sl; seriesTuple[1 .. $]) | foreach (i; Iota!(1, N)) | if (seriesTuple._data[0]._lengths[i] != sl._data._lengths[i]) | assert(0, "shapes mismatch"); | } | else | { | alias shape = len; | } | | static if (rc == false) | { | static if (Allocator.length) | auto ret = (()@trusted => allocator[0].makeUninitSlice!UI(len).series(allocator[0].makeUninitSlice!UE(shape)))(); | else | auto ret = (()@trusted => len.uninitSlice!UI.series(shape.uninitSlice!UE))(); | } | else | { | static if (Allocator.length) | static assert(0, "rcUnionSeries with allocators is not implemented."); | else | auto ret = (()@trusted => | len | .mininitRcarray!UI | .asSlice | .series( | shape | .iota | .elementCount | .mininitRcarray!UE | .asSlice | .sliced(shape)))(); | } | | static if (C == 2) // fast path | { | alias algo = troykaSeriesImpl!( | ref (scope ref key, scope return ref left) => left, | ref (scope ref key, scope return ref left, scope return ref right) => left, | ref (scope ref key, scope return ref right) => right, | ); | algo!(I, E)(seriesTuple[0], seriesTuple[1], ret.lightScope); | } | else | { | unionSeriesImpl!(I, E)(seriesTuple, ret.lightScope); | } | | return () @trusted {return *cast(R*) &ret; }(); |} | |/** |Inserts or assigns a series to the associative array `aa`. |Params: | aa = associative array | series = series |Returns: | associative array |*/ |ref V[K] insertOrAssign(V, K, IndexIterator, Iterator, size_t N, SliceKind kind)(return ref V[K] aa, auto ref Series!(IndexIterator, Iterator, N, kind) series) @property |{ | auto s = series.lightScope; | foreach (i; 0 .. s.length) | { | aa[s.index[i]] = s.data[i]; | } | return aa; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = [1: 3.0, 4: 2.0]; | auto s = series([1, 2, 3], [10, 20, 30]); | a.insertOrAssign = s; | assert(a.series == series([1, 2, 3, 4], [10.0, 20, 30, 2])); |} | |/** |Inserts a series to the associative array `aa`. |Params: | aa = associative array | series = series |Returns: | associative array |*/ |ref V[K] insert(V, K, IndexIterator, Iterator, size_t N, SliceKind kind)(return ref V[K] aa, auto ref Series!(IndexIterator, Iterator, N, kind) series) @property |{ | auto s = series.lightScope; | foreach (i; 0 .. s.length) | { | if (s.index[i] in aa) | continue; | aa[s.index[i]] = s.data[i]; | } | return aa; |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = [1: 3.0, 4: 2.0]; | auto s = series([1, 2, 3], [10, 20, 30]); | a.insert = s; | assert(a.series == series([1, 2, 3, 4], [3.0, 20, 30, 2])); |} | | |static if (__VERSION__ < 2078) |//////////////////// OBJECT.d |{ | |private: | |extern (C) |{ | // from druntime/src/rt/aaA.d | | // size_t _aaLen(in void* p) pure nothrow @nogc; | private void* _aaGetY(void** paa, const TypeInfo_AssociativeArray ti, in size_t valuesize, in void* pkey) pure nothrow; | // inout(void)* _aaGetRvalueX(inout void* p, in TypeInfo keyti, in size_t valuesize, in void* pkey); | inout(void)[] _aaValues(inout void* p, in size_t keysize, in size_t valuesize, const TypeInfo tiValArray) pure nothrow; | inout(void)[] _aaKeys(inout void* p, in size_t keysize, const TypeInfo tiKeyArray) pure nothrow; | void* _aaRehash(void** pp, in TypeInfo keyti) pure nothrow; | void _aaClear(void* p) pure nothrow; | | // alias _dg_t = extern(D) int delegate(void*); | // int _aaApply(void* aa, size_t keysize, _dg_t dg); | | // alias _dg2_t = extern(D) int delegate(void*, void*); | // int _aaApply2(void* aa, size_t keysize, _dg2_t dg); | | // private struct AARange { void* impl; size_t idx; } | alias AARange = ReturnType!(object._aaRange); | AARange _aaRange(void* aa) pure nothrow @nogc @safe; | bool _aaRangeEmpty(AARange r) pure nothrow @nogc @safe; | void* _aaRangeFrontKey(AARange r) pure nothrow @nogc @safe; | void* _aaRangeFrontValue(AARange r) pure nothrow @nogc @safe; | void _aaRangePopFront(ref AARange r) pure nothrow @nogc @safe; | |} | |auto byKeyValue(T : V[K], K, V)(T aa) pure nothrow @nogc @safe |{ | import core.internal.traits : substInout; | | static struct Result | { | AARange r; | | pure nothrow @nogc: | @property bool empty() @safe { return _aaRangeEmpty(r); } | @property auto front() | { | static struct Pair | { | // We save the pointers here so that the Pair we return | // won't mutate when Result.popFront is called afterwards. | private void* keyp; | private void* valp; | | @property ref key() inout | { | auto p = (() @trusted => cast(substInout!K*) keyp) (); | return *p; | }; | @property ref value() inout | { | auto p = (() @trusted => cast(substInout!V*) valp) (); | return *p; | }; | } | return Pair(_aaRangeFrontKey(r), | _aaRangeFrontValue(r)); | } | void popFront() @safe { return _aaRangePopFront(r); } | @property Result save() { return this; } | } | | return Result(_aaToRange(aa)); |} | |auto byKeyValue(T : V[K], K, V)(T* aa) pure nothrow @nogc |{ | return (*aa).byKeyValue(); |} | |// this should never be made public. |private AARange _aaToRange(T: V[K], K, V)(ref T aa) pure nothrow @nogc @safe |{ | // ensure we are dealing with a genuine AA. | static if (is(const(V[K]) == const(T))) | alias realAA = aa; | else | const(V[K]) realAA = aa; | return _aaRange(() @trusted { return cast(void*)realAA; } ()); |} | |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/series.d has no code <<<<<< EOF # path=source-mir-sparse-blas-axpy.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas.axpy; | |import std.traits; |import mir.ndslice.slice; |import mir.sparse; |import mir.series; | |/++ |Constant times a vector plus a vector. | |Params: | x = sparse vector | y = dense vector | alpha = scalar |Returns: | `y = alpha * x + y` |+/ |void axpy( | CR, | V1 : Series!(I1, T1), | I1, T1, V2) |(in CR alpha, V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |in |{ 28| if (x.index.length) 27| assert(x.index[$-1] < y.length); |} |body |{ | import mir.internal.utility; | 321| foreach (size_t i; 0 .. x.index.length) | { 79| auto j = x.index[i]; 79| y[j] = alpha * x.value[i] + y[j]; | } |} | |/// |unittest |{ | import mir.series; 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} | |unittest |{ | import mir.series; 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y.sliced); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} | |unittest |{ 1| auto x = series([0, 3, 5, 9, 10], [1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; 1| axpy(2.0, x, y.slicedField); 1| assert(y == [2.0, 1.0, 2, 9, 4, 13, 6, 7, 8, 27, 36, 11, 12]); |} source/mir/sparse/blas/axpy.d is 100% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-iterator.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Iterator is a type with a pointer like behavior. |An ndslice can be created on top of an iterator using $(SUBREF slice, sliced). | |$(BOOKTABLE $(H2 Iterators), |$(TR $(TH Iterator Name) $(TH Used By)) |$(T2 BytegroupIterator, $(SUBREF topology, bytegroup).) |$(T2 CachedIterator, $(SUBREF topology, cached), $(SUBREF topology, cachedGC).) |$(T2 ChopIterator, $(SUBREF topology, chopped)) |$(T2 FieldIterator, $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others.) |$(T2 FlattenedIterator, $(SUBREF topology, flattened)) |$(T2 IndexIterator, $(SUBREF topology, indexed)) |$(T2 IotaIterator, $(SUBREF topology, iota)) |$(T2 MapIterator, $(SUBREF topology, map)) |$(T2 MemberIterator, $(SUBREF topology, member)) |$(T2 RetroIterator, $(SUBREF topology, retro)) |$(T2 SliceIterator, $(SUBREF topology, map) in composition with $(LREF MapIterator) for packed slices.) |$(T2 SlideIterator, $(SUBREF topology, diff), $(SUBREF topology, pairwise), and $(SUBREF topology, slide).) |$(T2 StairsIterator, $(SUBREF topology, stairs)) |$(T2 StrideIterator, $(SUBREF topology, stride)) |$(T2 SubSliceIterator, $(SUBREF topology, subSlices)) |$(T2 TripletIterator, $(SUBREF topology, triplets)) |$(T2 ZipIterator, $(SUBREF topology, zip)) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.iterator; | |import mir.internal.utility: Iota; |import mir.math.common: optmath; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.slice: SliceKind, Slice, Universal, Canonical, Contiguous, isSlice; |import mir.qualifier; |import mir.conv; |import std.traits; | |private static immutable assumeZeroShiftExceptionMsg = "*.assumeFieldsHaveZeroShift: shift is not zero!"; |version(D_Exceptions) | private static immutable assumeZeroShiftException = new Exception(assumeZeroShiftExceptionMsg); | |@optmath: | |enum std_ops = q{ | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin(op ~ "_iterator;"); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |}; | |/++ |Step counter. | |`IotaIterator` is used by $(SUBREF topology, iota). |+/ |struct IotaIterator(I) | if (isIntegral!I || isPointer!I) |{ |@optmath: | | /// | I _index; | | static if (isPointer!I) | /// | auto lightConst()() const @property | { | static if (isIntegral!I) | return IotaIterator!I(_index); | else | return IotaIterator!(LightConstOf!I)(_index); | } | | static if (isPointer!I) | /// | auto lightImmutable()() immutable @property | { | static if (isIntegral!I) | return IotaIterator!I(_index); | else | return IotaIterator!(LightImmutableOf!I)(_index); | } | | I opUnary(string op : "*")() 0000000| { return _index; } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin(op ~ `_index;`); } | | I opIndex()(ptrdiff_t index) scope const 0000000| { return cast(I)(_index + index); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == `+` || op == `-`) | { mixin(`_index ` ~ op ~ `= index;`); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(const typeof(this) right) scope const | { return cast(ptrdiff_t)(this._index - right._index); } | | bool opEquals()(const typeof(this) right) scope const 0000000| { return this._index == right._index; } | | auto opCmp()(const typeof(this) right) scope const 0000000| { return this._index - right._index; } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | assert(*iota == 0); | | // iteration | ++iota; | assert(*iota == 1); | | assert(iota[2] == 3); | assert(iota[-1] == 0); | | --iota; | assert(*iota == 0); | | // opBinary | assert(*(iota + 2) == 2); | assert(*(iota - 3) == -3); | assert((iota - 3) - iota == -3); | | // construction | assert(*IotaIterator!int(3) == 3); | assert(iota - 1 < iota); |} | |/// |pure nothrow @nogc version(mir_test) unittest |{ | int[32] data; | auto iota = IotaIterator!(int*)(data.ptr); | assert(*iota == data.ptr); | | // iteration | ++iota; | assert(*iota == 1 + data.ptr); | | assert(iota[2] == 3 + data.ptr); | assert(iota[-1] == 0 + data.ptr); | | --iota; | assert(*iota == 0 + data.ptr); | | // opBinary | assert(*(iota + 2) == 2 + data.ptr); | assert(*(iota - 3) == -3 + data.ptr); | assert((iota - 3) - iota == -3); | | // construction | assert(*IotaIterator!(int*)(data.ptr) == data.ptr); | assert(iota - 1 < iota); |} | |auto RetroIterator__map(Iterator, alias fun)(ref RetroIterator!Iterator it) |{ | auto iterator = it._iterator._mapIterator!fun; | return RetroIterator!(typeof(iterator))(iterator); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota(9).retro.map!(a => a).slice; | uint r; | auto w = iota(9).retro.map!(a => a).map!(a => a * r).slice; |} | |/++ |Reverse directions for an iterator. | |`RetroIterator` is used by $(SUBREF topology, retro). |+/ |struct RetroIterator(Iterator) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return RetroIterator!(LightConstOf!Iterator)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return RetroIterator!(LightImmutableOf!Iterator)(.lightImmutable(_iterator)); | } | | /// | static alias __map(alias fun) = RetroIterator__map!(Iterator, fun); | | auto ref opUnary(string op : "*")() | { return *_iterator; } | | void opUnary(string op : "--")() | { ++_iterator; } | | void opUnary(string op : "++")() | { --_iterator; } | | auto ref opIndex()(ptrdiff_t index) | { return _iterator[-index]; } | | void opOpAssign(string op : "-")(ptrdiff_t index) scope | { _iterator += index; } | | void opOpAssign(string op : "+")(ptrdiff_t index) scope | { _iterator -= index; } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return right._iterator - this._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return right._iterator == this._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return right._iterator - this._iterator; | else | return right._iterator.opCmp(this._iterator); | } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | RetroIterator!(IotaIterator!int) retro; | | ++iota; | --retro; | assert(*retro == *iota); | | --iota; | ++retro; | assert(*retro == *iota); | | assert(retro[-7] == iota[7]); | | iota += 100; | retro -= 100; | assert(*retro == *iota); | | iota -= 100; | retro += 100; | assert(*retro == *iota); | | assert(*(retro + 10) == *(iota - 10)); | | assert(retro - 1 < retro); | | assert((retro - 5) - retro == -5); | | iota = IotaIterator!int(3); | retro = RetroIterator!(IotaIterator!int)(iota); | assert(*retro == *iota); |} | |auto StrideIterator__map(Iterator, alias fun)(StrideIterator!Iterator it) |{ | auto iterator = it._iterator._mapIterator!fun; | return StrideIterator!(typeof(iterator))(it._stride, iterator); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota([3], 0, 3).map!(a => a).slice; | uint r; | auto w = iota([3], 0, 3).map!(a => a).map!(a => a * r).slice; |} | |/++ |Iterates an iterator with a fixed strides. | |`StrideIterator` is used by $(SUBREF topology, stride). |+/ |struct StrideIterator(Iterator) |{ |@optmath: | /// | ptrdiff_t _stride; | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return StrideIterator!(LightConstOf!Iterator)(_stride, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return StrideIterator!(LightImmutableOf!Iterator)(_stride, .lightImmutable(_iterator)); | } | | /// | static alias __map(alias fun) = StrideIterator__map!(Iterator, fun); | | auto ref opUnary(string op : "*")() | { return *_iterator; } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin("_iterator " ~ op[0] ~ "= _stride;"); } | | auto ref opIndex()(ptrdiff_t index) | { return _iterator[index * _stride]; } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index * _stride;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return (this._iterator - right._iterator) / _stride; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | ptrdiff_t ret = this._iterator - right._iterator; | else | ptrdiff_t ret = this._iterator.opCmp(right._iterator); | return _stride >= 0 ? ret : -ret; | } |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | IotaIterator!int iota; | StrideIterator!(IotaIterator!int) stride; | stride._stride = -3; | | iota -= stride._stride; | --stride; | assert(*stride == *iota); | | iota += stride._stride; | ++stride; | assert(*stride == *iota); | | assert(stride[7] == iota[7 * stride._stride]); | | iota -= 100 * stride._stride; | stride -= 100; | assert(*stride == *iota); | | iota += 100 * stride._stride; | stride += 100; | assert(*stride == *iota); | | assert(*(stride + 10) == *(iota + 10 * stride._stride)); | | assert(stride - 1 < stride); | | assert((stride - 5) - stride == -5); | | iota = IotaIterator!int(3); | stride = StrideIterator!(IotaIterator!int)(3, iota); | assert(*stride == *iota); |} | |package template _zip_types(Iterators...) |{ | alias AliasSeq(T...) = T; | static if (Iterators.length) | { | enum i = Iterators.length - 1; | alias T = typeof(Iterators[i].init[sizediff_t.init]); | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | { | import mir.functional: Ref; | alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), Ref!T); | } | else | alias _zip_types = AliasSeq!(_zip_types!(Iterators[0 .. i]), T); | } | else | alias _zip_types = AliasSeq!(); |} | |package template _zip_fronts(Iterators...) |{ | static if (Iterators.length) | { | enum i = Iterators.length - 1; | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "_ref(*_iterators[" ~ i.stringof ~ "]), "; | else | enum _zip_fronts = _zip_fronts!(Iterators[0 .. i]) ~ "*_iterators[" ~ i.stringof ~ "], "; | } | else | enum _zip_fronts = ""; |} | |package template _zip_index(Iterators...) |{ | static if (Iterators.length) | { | enum i = Iterators.length - 1; | static if (__traits(compiles, &Iterators[i].init[sizediff_t.init])) | enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_ref(_iterators[" ~ i.stringof ~ "][index]), "; | else | enum _zip_index = _zip_index!(Iterators[0 .. i]) ~ "_iterators[" ~ i.stringof ~ "][index], "; | } | else | enum _zip_index = ""; |} | |/++ |Iterates multiple iterators in lockstep. | |`ZipIterator` is used by $(SUBREF topology, zip). |+/ |struct ZipIterator(Iterators...) | if (Iterators.length > 1) |{ |@optmath: | import std.traits: ConstOf, ImmutableOf; | import std.meta: staticMap; | import mir.functional: RefTuple, Ref, _ref; | /// | Iterators _iterators; | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | alias Ret = ZipIterator!(staticMap!(LightConstOf, Iterators)); | enum ret = "Ret(%(.lightConst(_iterators[%s]),%)]))".format(_iterators.length.iota); | return mixin(ret); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | alias Ret = ZipIterator!(staticMap!(LightImmutableOf, Iterators)); | enum ret = "Ret(%(.lightImmutable(_iterators[%s]),%)]))".format(_iterators.length.iota); | return mixin(ret); | } | | auto opUnary(string op : "*")() | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | | auto opUnary(string op : "*")() const | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | auto opUnary(string op : "*")() immutable | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_fronts!Iterators ~ ")"); } | | void opUnary(string op)() scope | if (op == "++" || op == "--") | { | foreach (ref _iterator; _iterators) | mixin(op ~ `_iterator;`); | } | | auto opIndex()(ptrdiff_t index) | { return mixin("RefTuple!(_zip_types!Iterators)(" ~ _zip_index!Iterators ~ ")"); } | | auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index) | if (Types.length == Iterators.length) | { | foreach(i, ref val; value.expand) | { | _iterators[i][index] = val; | } | return opIndex(index); | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "+" || op == "-") | { | foreach (ref _iterator; _iterators) | mixin(`_iterator ` ~ op ~ `= index;`); | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterators[0] - right._iterators[0]; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterators[0] == right._iterators[0]; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!(Iterators[0])) | return this._iterators[0] - right._iterators[0]; | else | return this._iterators[0].opCmp(right._iterators[0]); | } | | import std.meta: anySatisfy; | static if (anySatisfy!(hasZeroShiftFieldMember, Iterators)) | /// Defined if at least one of `Iterators` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | import std.meta: staticMap; | alias _fields = _iterators; | return mixin("ZipField!(staticMap!(ZeroShiftField, Iterators))(" ~ applyAssumeZeroShift!Iterators ~ ")"); | } |} | |/// |pure nothrow @nogc version(mir_test) unittest |{ | import mir.ndslice.traits: isIterator; | | double[10] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; | alias ItA = IotaIterator!int; | alias ItB = double*; | alias ItZ = ZipIterator!(ItA, ItB); | auto zip = ItZ(ItA(3), data.ptr); | assert((*zip).a == 3); | assert((*zip).b == 1); | | // iteration | ++zip; | assert((*zip).a == 3 + 1); | assert((*zip).b == 1 + 1); | assert(&(*zip).b() == data.ptr + 1); | | assert(zip[4].a == 3 + 5); | assert(zip[4].b == 1 + 5); | assert(&zip[4].b() == data.ptr + 5); | | --zip; | assert((*zip).a == 3); | assert((*zip).b == 1); | | assert((*(zip + 2)).a == 3 + 2); | assert((*(zip - 3)).a == 3 + -3); | assert((*(zip + 2)).b == 1 + 2); | assert((*(zip + 3 - 3)).b == 1); | assert((zip - 3).opBinary!"-"(zip) == -3); | | assert(zip == zip); | assert(zip - 1 < zip); | | static assert(isIterator!(ZipIterator!(double*, int*))); | static assert(isIterator!(ZipIterator!(immutable(double)*, immutable(int)*))); |} | |/// |struct CachedIterator(Iterator, CacheIterator, FlagIterator) |{ | /// | Iterator _iterator; | /// | CacheIterator _caches; | /// | FlagIterator _flags; | |@optmath: | | /// | auto lightScope()() scope @property | { | return CachedIterator!(LightScopeOf!Iterator, LightScopeOf!CacheIterator, LightScopeOf!FlagIterator)( | .lightScope(_iterator), | .lightScope(_caches), | .lightScope(_flags), | ); | } | | /// | auto lightScope()() scope const @property | { | return lightConst.lightScope; | } | | /// | auto lightScope()() scope immutable @property | { | return lightImmutable.lightScope; | } | | /// | auto lightConst()() const @property | { | return CachedIterator!(LightConstOf!Iterator, CacheIterator, FlagIterator)( | .lightConst(_iterator), | *cast(CacheIterator*)&_caches, | *cast(FlagIterator*)&_flags, | ); | } | | /// | auto lightImmutable()() immutable @property @trusted | { | return CachedIterator!(LightImmutableOf!Iterator, CacheIterator, FlagIterator)( | .lightImmutable(_iterator), | *cast(CacheIterator*)&_caches, | *cast(FlagIterator*)&_flags, | ); | } | | private alias T = typeof(Iterator.init[0]); | private alias UT = Unqual!T; | | auto opUnary(string op : "*")() | { | if (_expect(!*_flags, false)) | { | _flags[0] = true; | emplaceRef!T(*cast(UT*)&*_caches, *_iterator); | } | return *_caches; | } | | auto opIndex()(ptrdiff_t index) | { | if (_expect(!_flags[index], false)) | { | _flags[index] = true; | emplaceRef!T(*cast(UT*)&(_caches[index]), _iterator[index]); | } | return _caches[index]; | } | | auto ref opIndexAssign(T)(auto ref T val, ptrdiff_t index) | { | _flags[index] = true; | return _caches[index] = val; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | mixin(op ~ "_iterator;"); | mixin(op ~ "_caches;"); | mixin(op ~ "_flags;"); | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { | mixin("_iterator" ~ op ~ "= index;"); | mixin("_caches" ~ op ~ "= index;"); | mixin("_flags" ~ op ~ "= index;"); | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |} | |private enum map_primitives = q{ | | import mir.functional: RefTuple, unref; | | auto ref opUnary(string op : "*")() | { | static if (is(typeof(*_iterator) : RefTuple!T, T...)) | { | auto t = *_iterator; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(*_iterator); | } | | auto ref opIndex(ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_iterator[index]); | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ") = value"); | } | else | return _fun(_iterator[index]) = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin(op ~ "_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return mixin(op ~ "_fun(_iterator[index])"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_fun(" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ ")" ~ op ~ "= value"); | } | else | return mixin("_fun(_iterator[index])" ~ op ~ "= value"); | } | } |}; | |/++ |`VmapIterator` is used by $(SUBREF topology, map). |+/ |struct VmapIterator(Iterator, Fun) |{ |@optmath: | | /// | Iterator _iterator; | /// | Fun _fun; | | /// | auto lightConst()() const @property | { | return VmapIterator!(LightConstOf!Iterator, LightConstOf!Fun)(.lightConst(_iterator), .lightConst(_fun)); | } | | /// | auto lightImmutable()() immutable @property | { | return VmapIterator!(LightImmutableOf!Iterator, LightImmutableOf!Fun)(.lightImmutable(_iterator), .lightImmutable(_fun)); | } | | mixin(map_primitives); | mixin(std_ops); | | static if (hasZeroShiftFieldMember!Iterator) | /// | auto assumeFieldsHaveZeroShift() @property | { | return _vmapField(_iterator.assumeFieldsHaveZeroShift, _fun); | } |} | |auto MapIterator__map(Iterator, alias fun0, alias fun)(ref MapIterator!(Iterator, fun0) it) |{ | return MapIterator!(Iterator, fun)(it._iterator); |} | |/++ |`MapIterator` is used by $(SUBREF topology, map). |+/ |struct MapIterator(Iterator, alias _fun) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return MapIterator!(LightConstOf!Iterator, _fun)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return MapIterator!(LightImmutableOf!Iterator, _fun)(.lightImmutable(_iterator)); | } | | import mir.functional: pipe; | /// | static alias __map(alias fun1) = MapIterator__map!(Iterator, _fun, pipe!(_fun, fun1)); | | mixin(map_primitives); | mixin(std_ops); | | static if (hasZeroShiftFieldMember!Iterator) | /// | auto assumeFieldsHaveZeroShift() @property | { | return _mapField!_fun(_iterator.assumeFieldsHaveZeroShift); | } |} | |/+ |Creates a mapped iterator. Uses `__map` if possible. |+/ |auto _mapIterator(alias fun, Iterator)(Iterator iterator) |{ | static if (__traits(hasMember, Iterator, "__map")) | { | static if (is(Iterator : MapIterator!(Iter0, fun0), Iter0, alias fun0) | && !__traits(compiles, Iterator.__map!fun(iterator))) | { | // https://github.com/libmir/mir-algorithm/issues/111 | debug(mir) pragma(msg, __FUNCTION__~" not coalescing chained map calls into a single lambda, possibly because of multiple embedded context pointers"); | return MapIterator!(Iterator, fun)(iterator); | } | else | return Iterator.__map!fun(iterator); | } | else | return MapIterator!(Iterator, fun)(iterator); |} | | |/+ |Creates a mapped iterator. Uses `__vmap` if possible. |+/ |auto _vmapIterator(Iterator, Fun)(Iterator iterator, Fun fun) |{ | static if (__traits(hasMember, Iterator, "__vmap")) | return Iterator.__vmap(iterator, fun); | else | return MapIterator!(Iterator, fun)(iterator); |} | |@safe pure nothrow @nogc version(mir_test) unittest |{ | // https://github.com/libmir/mir-algorithm/issues/111 | import mir.ndslice.topology : iota, map; | import mir.functional : pipe; | | static auto foo(T)(T x) | { | return x.map!(a => a + 1); | } | | static auto bar(T)(T x) | { | return foo(x).map!(a => a + 2); | } | | auto data = iota(5); | auto result = iota([5], 3); | | auto x = data.map!(a => a + 1).map!(a => a + 2); | assert(x == result); | | auto y = bar(data); | assert(y == result); |} | |/++ |`MemberIterator` is used by $(SUBREF topology, member). |+/ |struct MemberIterator(Iterator, string member) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return MemberIterator!(LightConstOf!Iterator, member)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return MemberIterator!(LightImmutableOf!Iterator, member)(.lightImmutable(_iterator)); | } | | auto ref opUnary(string op : "*")() | { | return __traits(getMember, *_iterator, member); | } | | auto ref opIndex()(ptrdiff_t index) | { | return __traits(getMember, _iterator[index], member); | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | return __traits(getMember, _iterator[index], member) = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | return mixin(op ~ "__traits(getMember, _iterator[index], member)"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | return mixin("__traits(getMember, _iterator[index], member)" ~ op ~ "= value"); | } | } | | mixin(std_ops); |} | |/++ |`BytegroupIterator` is used by $(SUBREF topology, Bytegroup) and $(SUBREF topology, bytegroup). |+/ |struct BytegroupIterator(Iterator, size_t count, DestinationType) | if (count) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return BytegroupIterator!(LightConstOf!Iterator, count, DestinationType)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return BytegroupIterator!(LightImmutableOf!Iterator, count, DestinationType)(.lightImmutable(_iterator)); | } | | package(mir) alias Byte = Unqual!(typeof(_iterator[0])); | | version(LittleEndian) | private enum BE = false; | else | private enum BE = true; | | private union U | { | DestinationType value; | static if (DestinationType.sizeof > Byte[count].sizeof && BE && isScalarType!DestinationType) | { | struct | { | ubyte[DestinationType.sizeof - Byte[count].sizeof] shiftPayload; | Byte[count] bytes; | } | } | else | { | Byte[count] bytes; | } | } | | DestinationType opUnary(string op : "*")() | { | U ret = { value: DestinationType.init }; | foreach (i; Iota!count) | ret.bytes[i] = _iterator[i]; | return ret.value; | } | | DestinationType opIndex()(ptrdiff_t index) | { | return *(this + index); | } | | DestinationType opIndexAssign(T)(T val, ptrdiff_t index) scope | { | auto it = this + index; | U ret = { value: val }; | foreach (i; Iota!count) | it._iterator[i] = ret.bytes[i]; | return ret.value; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin("_iterator " ~ op[0] ~ "= count;"); } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index * count;"); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return (this._iterator - right._iterator) / count; } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | static if (isPointer!Iterator) | return this._iterator - right._iterator; | else | return this._iterator.opCmp(right._iterator); | } |} | |auto SlideIterator__map(Iterator, size_t params, alias fun0, alias fun)(SlideIterator!(Iterator, params, fun0) it) |{ | return SlideIterator!(Iterator, params, fun)(it._iterator); |} | |/++ |`SlideIterator` is used by $(SUBREF topology, diff) and $(SUBREF topology, slide). |+/ |struct SlideIterator(Iterator, size_t params, alias fun) | if (params > 1) |{ |@optmath: | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return SlideIterator!(LightConstOf!Iterator, params, fun)(.lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return SlideIterator!(LightImmutableOf!Iterator, params, fun)(.lightImmutable(_iterator)); | } | | import mir.functional: pipe; | /// | static alias __map(alias fun1) = SlideIterator__map!(Iterator, params, fun, pipe!(fun, fun1)); | | auto ref opUnary(string op : "*")() | { | return mixin("fun(" ~ _iotaArgs!(params, "_iterator[", "], ") ~ ")"); | } | | auto ref opIndex()(ptrdiff_t index) | { | return mixin("fun(" ~ _iotaArgs!(params, "_iterator[index + ", "], ") ~ ")"); | } | | mixin(std_ops); |} | |/// |version(mir_test) unittest |{ | import mir.functional: naryFun; | auto data = [1, 3, 8, 18]; | auto diff = SlideIterator!(int*, 2, naryFun!"b - a")(data.ptr); | assert(*diff == 2); | assert(diff[1] == 5); | assert(diff[2] == 10); |} | |auto IndexIterator__map(Iterator, Field, alias fun)(ref IndexIterator!(Iterator, Field) it) |{ | auto field = it._field._mapField!fun; | return IndexIterator!(Iterator, typeof(field))(it._iterator, field); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto indices = [4, 3, 1, 2, 0, 4].sliced; | auto v = iota(5).indexed(indices).map!(a => a).slice; | uint r; | auto w = iota(5).indexed(indices).map!(a => a).map!(a => a * r).slice; |} | |/++ |Iterates a field using an iterator. | |`IndexIterator` is used by $(SUBREF topology, indexed). |+/ |struct IndexIterator(Iterator, Field) |{ | import mir.functional: RefTuple, unref; | |@optmath: | /// | Iterator _iterator; | /// | Field _field; | | /// | auto lightConst()() const @property | { | return IndexIterator!(LightConstOf!Iterator, LightConstOf!Field)(.lightConst(_iterator), .lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return IndexIterator!(LightImmutableOf!Iterator, LightImmutableOf!Field)(.lightImmutable(_iterator), _field.lightImmutable); | } | | /// | static alias __map(alias fun) = IndexIterator__map!(Iterator, Field, fun); | | auto ref opUnary(string op : "*")() | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = *_iterator; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return _field[*_iterator]; | } | | auto ref opIndex()(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return _field[_iterator[index]]; | } | | static if (!__traits(compiles, &opIndex(ptrdiff_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) scope | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "] = value"); | } | else | return _field[_iterator[index]] = value; | } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin(op ~ "_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]"); | } | else | return mixin(op ~ "_field[_iterator[index]]"); | } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { | static if (is(typeof(_iterator[0]) : RefTuple!T, T...)) | { | auto t = _iterator[index]; | return mixin("_field[" ~ _iotaArgs!(T.length, "t.expand[", "].unref, ") ~ "]" ~ op ~ "= value"); | } | else | return mixin("_field[_iterator[index]]" ~ op ~ "= value"); | } | } | | mixin(std_ops); |} | |/++ |Iterates chunks in a sliceable using an iterator composed of indexes. | |Definition: |---- |auto index = iterator[i]; |auto elem = sliceable[index[0] .. index[1]]; |---- |+/ |struct SubSliceIterator(Iterator, Sliceable) |{ |@optmath: | /// | Iterator _iterator; | /// | Sliceable _sliceable; | | /// | auto lightConst()() const @property | { | return SubSliceIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return SubSliceIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable); | } | | auto ref opUnary(string op : "*")() | { | auto i = *_iterator; | return _sliceable[i[0] .. i[1]]; | } | | auto ref opIndex()(ptrdiff_t index) | { | auto i = _iterator[index]; | return _sliceable[i[0] .. i[1]]; | } | | mixin(std_ops); |} | |/++ |Iterates chunks in a sliceable using an iterator composed of indexes stored consequently. | |Definition: |---- |auto elem = _sliceable[_iterator[index] .. _iterator[index + 1]]; |---- |+/ |struct ChopIterator(Iterator, Sliceable) |{ |@optmath: | /// | Iterator _iterator; | /// | Sliceable _sliceable; | | /// | auto lightConst()() const @property | { | return ChopIterator!(LightConstOf!Iterator, LightConstOf!Sliceable)(.lightConst(_iterator), _sliceable.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return ChopIterator!(LightImmutableOf!Iterator, LightImmutableOf!Sliceable)(.lightImmutable(_iterator), _sliceable.lightImmutable); | } | | auto ref opUnary(string op : "*")() | { | return _sliceable[*_iterator .. _iterator[1]]; | } | | auto ref opIndex()(ptrdiff_t index) | { | return _sliceable[_iterator[index] .. _iterator[index + 1]]; | } | | mixin(std_ops); |} | |/++ |Iterates on top of another iterator and returns a slice |as a multidimensional window at the current position. | |`SliceIterator` is used by $(SUBREF topology, map) for packed slices. |+/ |struct SliceIterator(Iterator, size_t N = 1, SliceKind kind = Contiguous) |{ |@optmath: | /// | alias Element = Slice!(Iterator, N, kind); | /// | Element._Structure _structure; | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return SliceIterator!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return SliceIterator!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator)); | } | | auto opUnary(string op : "*")() | { | return Element(_structure, _iterator); | } | | auto opIndex()(ptrdiff_t index) | { | return Element(_structure, _iterator + index); | } | | mixin(std_ops); |} | |public auto FieldIterator__map(Field, alias fun)(FieldIterator!(Field) it) |{ | import mir.ndslice.field: _mapField; | auto field = it._field._mapField!fun; | return FieldIterator!(typeof(field))(it._index, field); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = ndiota(3, 3).map!(a => a).slice; | uint r; | auto w = ndiota(3, 3).map!(a => a).map!(a => a[0] * r).slice; |} | |/++ |Creates an iterator on top of a field. | |`FieldIterator` is used by $(SUBREF slice, slicedField), $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others. |+/ |struct FieldIterator(Field) |{ |@optmath: | /// | ptrdiff_t _index; | /// | Field _field; | | /// | auto lightConst()() const @property | { 0000000| return FieldIterator!(LightConstOf!Field)(_index, .lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return FieldIterator!(LightImmutableOf!Field)(_index, .lightImmutable(_field)); | } | | /// | static alias __map(alias fun) = FieldIterator__map!(Field, fun); | | /// | Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) scope const | { | assert(i <= j); | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /++ | Returns: | `_field[_index + sl.i .. _index + sl.j]`. | +/ | auto opIndex()(Slice!(IotaIterator!size_t) sl) | { | auto idx = _index + sl._iterator._index; | return _field[idx .. idx + sl.length]; | } | | auto ref opUnary(string op : "*")() 0000000| { return _field[_index]; } | | void opUnary(string op)() scope | if (op == "++" || op == "--") | { mixin(op ~ `_index;`); } | | auto ref opIndex()(ptrdiff_t index) 0000000| { return _field[_index + index]; } | | static if (!__traits(compiles, &_field[_index])) | { | auto ref opIndexAssign(T)(auto ref T value, ptrdiff_t index) | { return _field[_index + index] = value; } | | auto ref opIndexUnary(string op)(ptrdiff_t index) | { mixin (`return ` ~ op ~ `_field[_index + index];`); } | | auto ref opIndexOpAssign(string op, T)(T value, ptrdiff_t index) | { mixin (`return _field[_index + index] ` ~ op ~ `= value;`); } | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "+" || op == "-") | { mixin(`_index ` ~ op ~ `= index;`); } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._index - right._index; } | | bool opEquals()(scope ref const typeof(this) right) scope const 0000000| { return this._index == right._index; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const 0000000| { return this._index - right._index; } | | /// | auto assumeFieldsHaveZeroShift() @property | { 0000000| if (_expect(_index != 0, false)) | { | version (D_Exceptions) 0000000| throw assumeZeroShiftException; | else | assert(0, assumeZeroShiftExceptionMsg); | } | static if (hasZeroShiftFieldMember!Field) | return _field.assumeFieldsHaveZeroShift; | else 0000000| return _field; | } |} | |auto FlattenedIterator__map(Iterator, size_t N, SliceKind kind, alias fun)(FlattenedIterator!(Iterator, N, kind) it) |{ | import mir.ndslice.topology: map; | auto slice = it._slice.map!fun; | return FlattenedIterator!(TemplateArgsOf!(typeof(slice)))(it._indexes, slice); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.allocation; | auto v = iota(3, 3).universal.flattened.map!(a => a).slice; | uint r; | auto w = iota(3, 3).universal.flattened.map!(a => a).map!(a => a * r).slice; |} | |/++ |Creates an iterator on top of all elements in a slice. | |`FieldIterator` is used by $(SUBREF topology, bitwise), $(SUBREF topology, ndiota), and others. |+/ |struct FlattenedIterator(Iterator, size_t N, SliceKind kind) | if (N > 1 && (kind == Universal || kind == Canonical)) |{ |@optmath: | /// | ptrdiff_t[N] _indexes; | /// | Slice!(Iterator, N, kind) _slice; | | /// | auto lightConst()() const @property | { | return FlattenedIterator!(LightConstOf!Iterator, N, kind)(_indexes, _slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return FlattenedIterator!(LightImmutableOf!Iterator, N, kind)(_indexes, _slice.lightImmutable); | } | | /// | static alias __map(alias fun) = FlattenedIterator__map!(Iterator, N, kind, fun); | | private ptrdiff_t getShift()(ptrdiff_t n) | { | ptrdiff_t _shift; | n += _indexes[$ - 1]; | foreach_reverse (i; Iota!(1, N)) | { | immutable v = n / ptrdiff_t(_slice._lengths[i]); | n %= ptrdiff_t(_slice._lengths[i]); | static if (i == _slice.S) | _shift += (n - _indexes[i]); | else | _shift += (n - _indexes[i]) * _slice._strides[i]; | n = _indexes[i - 1] + v; | } | _shift += (n - _indexes[0]) * _slice._strides[0]; | return _shift; | } | | auto ref opUnary(string op : "*")() | { | return *_slice._iterator; | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | foreach_reverse (i; Iota!N) | { | static if (i == _slice.S) | mixin(op ~ `_slice._iterator;`); | else | mixin(`_slice._iterator ` ~ op[0] ~ `= _slice._strides[i];`); | mixin (op ~ `_indexes[i];`); | static if (i) | { | static if (op == "++") | { | if (_indexes[i] < _slice._lengths[i]) | return; | static if (i == _slice.S) | _slice._iterator -= _slice._lengths[i]; | else | _slice._iterator -= _slice._lengths[i] * _slice._strides[i]; | _indexes[i] = 0; | } | else | { | if (_indexes[i] >= 0) | return; | static if (i == _slice.S) | _slice._iterator += _slice._lengths[i]; | else | _slice._iterator += _slice._lengths[i] * _slice._strides[i]; | _indexes[i] = _slice._lengths[i] - 1; | } | } | } | } | | auto ref opIndex()(ptrdiff_t index) | { | return _slice._iterator[getShift(index)]; | } | | static if (isMutable!(_slice.DeepElement) && !_slice.hasAccessByRef) | /// | auto ref opIndexAssign(E)(scope ref E elem, size_t index) scope return | { | return _slice._iterator[getShift(index)] = elem; | } | | void opOpAssign(string op : "+")(ptrdiff_t n) scope | { | ptrdiff_t _shift; | n += _indexes[$ - 1]; | foreach_reverse (i; Iota!(1, N)) | { | immutable v = n / ptrdiff_t(_slice._lengths[i]); | n %= ptrdiff_t(_slice._lengths[i]); | static if (i == _slice.S) | _shift += (n - _indexes[i]); | else | _shift += (n - _indexes[i]) * _slice._strides[i]; | _indexes[i] = n; | n = _indexes[i - 1] + v; | } | _shift += (n - _indexes[0]) * _slice._strides[0]; | _indexes[0] = n; | foreach_reverse (i; Iota!(1, N)) | { | if (_indexes[i] >= 0) | break; | _indexes[i] += _slice._lengths[i]; | _indexes[i - 1]--; | } | _slice._iterator += _shift; | } | | void opOpAssign(string op : "-")(ptrdiff_t n) scope | { this += -n; } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { | ptrdiff_t ret = this._indexes[0] - right._indexes[0]; | foreach (i; Iota!(1, N)) | { | ret *= _slice._lengths[i]; | ret += this._indexes[i] - right._indexes[i]; | } | return ret; | } | | bool opEquals()(scope ref const typeof(this) right) scope const | { | foreach_reverse (i; Iota!N) | if (this._indexes[i] != right._indexes[i]) | return false; | return true; | } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { | foreach (i; Iota!(N - 1)) | if (auto ret = this._indexes[i] - right._indexes[i]) | return ret; | return this._indexes[$ - 1] - right._indexes[$ - 1]; | } |} | |version(mir_test) unittest |{ | import mir.ndslice.topology; | import mir.ndslice.slice; | | auto it0 = iota(3, 4).universal.flattened._iterator; | auto it1 = it0; | assert(it0 == it1); | it0 += 5; | assert(it0 > it1); | it0 -= 5; | assert(*it0 == *it1); | assert(it0 == it1); | it0 += 5; | it0 += 7; | it0 -= 9; | assert(it0 > it1); | it1 += 3; | assert(*it0 == *it1); | assert(it0 == it1); | assert(it0 <= it1); | assert(it0 >= it1); | | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | ++it0; | | assert(it0 - it1 == 9); | assert(it1 - it0 == -9); | | ++it0; | | assert(it0 - it1 == 10); | assert(it1 - it0 == -10); | | --it0; | | assert(it0 - it1 == 9); | assert(it1 - it0 == -9); | assert(it0[-9] == *it1); | assert(*it0 == it1[9]); | | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | --it0; | assert(*it0 == *it1); | assert(it0 == it1); | assert(it0 <= it1); | assert(it0 >= it1); |} | |/++ |`StairsIterator` is used by $(SUBREF topology, stairs). |+/ |struct StairsIterator(Iterator, string direction) | if (direction == "+" || direction == "-") |{ | /// | size_t _length; | | /// | Iterator _iterator; | | /// | auto lightConst()() const @property | { | return StairsIterator!(LightConstOf!Iterator, direction)(_length, .lightConst(_iterator)); | } | | /// | auto lightImmutable()() immutable @property | { | return StairsIterator!(LightImmutableOf!Iterator, direction)(_length, .lightImmutable(_iterator)); | } | |@optmath: | | /// | Slice!Iterator opUnary(string op : "*")() | { | import mir.ndslice.slice: sliced; | return _iterator.sliced(_length); | } | | /// | Slice!Iterator opIndex()(ptrdiff_t index) | { | import mir.ndslice.slice: sliced; | static if (direction == "+") | { | auto newLength = _length + index; | auto shift = ptrdiff_t(_length + newLength - 1) * index / 2; | } | else | { | auto newLength = _length - index; | auto shift = ptrdiff_t(_length + newLength + 1) * index / 2; | } | assert(ptrdiff_t(newLength) >= 0); | return (_iterator + shift).sliced(newLength); | } | | void opUnary(string op)() scope | if (op == "--" || op == "++") | { | static if (op == "++") | { | _iterator += _length; | static if (direction == "+") | ++_length; | else | --_length; | } | else | { | assert(_length); | static if (direction == "+") | --_length; | else | ++_length; | _iterator -= _length; | } | } | | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { | static if (op == direction) | auto newLength = _length + index; | else | auto newLength = _length - index; | static if (direction == "+") | auto shift = ptrdiff_t(_length + newLength - 1) * index / 2; | else | auto shift = ptrdiff_t(_length + newLength + 1) * index / 2; | assert(ptrdiff_t(newLength) >= 0); | _length = newLength; | static if (op == "+") | _iterator += shift; | else | _iterator -= shift; | } | | auto opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { | auto ret = this; | mixin(`ret ` ~ op ~ `= index;`); | return ret; | } | | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { | static if (direction == "+") | return this._length - right._length; | else | return right._length - this._length; | } | | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._length == right._length; } | | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { return this - right; } |} | |/// |version(mir_test) unittest |{ | // 0 | // 1 2 | // 3 4 5 | // 6 7 8 9 | // 10 11 12 13 14 | auto it = StairsIterator!(IotaIterator!size_t, "+")(1, IotaIterator!size_t()); | assert(*it == [0]); | assert(it[4] == [10, 11, 12, 13, 14]); | assert(*(it + 4) == [10, 11, 12, 13, 14]); | ++it; | assert(*it == [1, 2]); | it += 3; | assert(*it == [10, 11, 12, 13, 14]); | assert(it[-3] == [1, 2]); | assert(*(it - 3) == [1, 2]); | assert(it + 1 > it); | assert(it + 1 - 1 == it); | assert(it - 3 - it == -3); | --it; | assert(*it == [6, 7, 8, 9]); |} | |/// |version(mir_test) unittest |{ | // [0, 1, 2, 3, 4], | // [5, 6, 7, 8], | // [9, 10, 11], | // [12, 13], | // [14]]); | | auto it = StairsIterator!(IotaIterator!size_t, "-")(5, IotaIterator!size_t()); | assert(*it == [0, 1, 2, 3, 4]); | assert(it[4] == [14]); | assert(*(it + 4) == [14]); | ++it; | assert(*it == [5, 6, 7, 8]); | it += 3; | assert(*it == [14]); | assert(it[-3] == [5, 6, 7, 8]); | assert(*(it - 3) == [5, 6, 7, 8]); | assert(it + 1 > it); | assert(it + 1 - 1 == it); | assert(it - 3 - it == -3); | --it; | assert(*it == [12, 13]); |} | |/++ |Element type of $(LREF TripletIterator). |+/ |struct Triplet(Iterator, SliceKind kind = Contiguous) |{ |@optmath: | /// | size_t _iterator; | /// | Slice!(Iterator, 1, kind) _slice; | | /// | auto lightConst()() const @property | { | return Triplet!(LightConstOf!Iterator, kind)(_iterator, slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return Triplet!(LightImmutableOf!Iterator, kind)(_iterator, slice.lightImmutable); | } | | @property | { | /// | auto ref center() | { | assert(_iterator < _slice.length); | return _slice[_iterator]; | } | | /// | Slice!(Iterator, 1, kind) left() | { | assert(_iterator < _slice.length); | return _slice[0 .. _iterator]; | } | | /// | Slice!(Iterator, 1, kind) right() | { | assert(_iterator < _slice.length); | return _slice[_iterator + 1 .. $]; | } | } |} | |/++ |Iterates triplets position in a slice. | |`TripletIterator` is used by $(SUBREF topology, triplets). |+/ |struct TripletIterator(Iterator, SliceKind kind = Contiguous) |{ |@optmath: | | /// | size_t _iterator; | /// | Slice!(Iterator, 1, kind) _slice; | | /// | auto lightConst()() const @property | { | return TripletIterator!(LightConstOf!Iterator, kind)(_iterator, _slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | return TripletIterator!(LightImmutableOf!Iterator, kind)(_iterator, _slice.lightImmutable); | } | | /// | Triplet!(Iterator, kind) opUnary(string op : "*")() | { | return typeof(return)(_iterator, _slice); | } | | /// | Triplet!(Iterator, kind) opIndex()(ptrdiff_t index) | { | return typeof(return)(_iterator + index, _slice); | } | | mixin(std_ops); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/iterator.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-dynamic.lst |/++ |$(SCRIPT inhibitQuickIndex = 1;) | |This is a submodule of $(MREF mir, ndslice). | |Operators only change strides and lengths of a slice. |The range of a slice remains unmodified. |All operators return slice as the type of the argument, maybe except slice kind. | |$(BOOKTABLE $(H2 Transpose operators), | |$(TR $(TH Function Name) $(TH Description)) |$(T2 transposed, Permutes dimensions. $(BR) | `iota(3, 4, 5, 6, 7).transposed!(4, 0, 1).shape` returns `[7, 3, 4, 5, 6]`.) |$(T2 swapped, Swaps dimensions $(BR) | `iota(3, 4, 5).swapped!(1, 2).shape` returns `[3, 5, 4]`.) |$(T2 everted, Reverses the order of dimensions $(BR) | `iota(3, 4, 5).everted.shape` returns `[5, 4, 3]`.) |) |See also $(SUBREF topology, evertPack). | |$(BOOKTABLE $(H2 Iteration operators), | |$(TR $(TH Function Name) $(TH Description)) |$(T2 strided, Multiplies the stride of a selected dimension by a factor.$(BR) | `iota(13, 40).strided!(0, 1)(2, 5).shape` equals to `[7, 8]`.) |$(T2 reversed, Reverses the direction of iteration for selected dimensions. $(BR) | `slice.reversed!0` returns the slice with reversed direction of iteration for top level dimension.) |$(T2 allReversed, Reverses the direction of iteration for all dimensions. $(BR) | `iota(4, 5).allReversed` equals to `20.iota.retro.sliced(4, 5)`.) |) | |$(BOOKTABLE $(H2 Other operators), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 rotated, Rotates two selected dimensions by `k*90` degrees. $(BR) | `iota(2, 3).rotated` equals to `[[2, 5], [1, 4], [0, 3]]`.) |$(T2 dropToHypercube, Returns maximal multidimensional cube of a slice.) |$(T2 normalizeStructure, Reverses iteration order for dimensions with negative strides, they become not negative; |and sorts dimensions according to the strides, dimensions with larger strides are going first.) |) | |$(H2 Bifacial operators) | |Some operators are bifacial, |i.e. they have two versions: one with template parameters, and another one |with function parameters. Versions with template parameters are preferable |because they allow compile time checks and can be optimized better. | |$(BOOKTABLE , | |$(TR $(TH Function Name) $(TH Variadic) $(TH Template) $(TH Function)) |$(T4 swapped, No, `slice.swapped!(2, 3)`, `slice.swapped(2, 3)`) |$(T4 rotated, No, `slice.rotated!(2, 3)(-1)`, `slice.rotated(2, 3, -1)`) |$(T4 strided, Yes/No, `slice.strided!(1, 2)(20, 40)`, `slice.strided(1, 20).strided(2, 40)`) |$(T4 transposed, Yes, `slice.transposed!(1, 4, 3)`, `slice.transposed(1, 4, 3)`) |$(T4 reversed, Yes, `slice.reversed!(0, 2)`, `slice.reversed(0, 2)`) |) | |Bifacial interface of $(LREF drop), $(LREF dropBack) |$(LREF dropExactly), and $(LREF dropBackExactly) |is identical to that of $(LREF strided). | |Bifacial interface of $(LREF dropOne) and $(LREF dropBackOne) |is identical to that of $(LREF reversed). | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Copyright: Copyright © 2016, Ilya Yaroshenko | |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4)) |+/ |module mir.ndslice.dynamic; | | |import std.traits; |import std.meta; | |import mir.math.common: optmath; |import mir.internal.utility: Iota; |import mir.ndslice.internal; |import mir.ndslice.slice; |import mir.utility; | |@optmath: | |/++ |Reverses iteration order for dimensions with negative strides, they become not negative; |and sorts dimensions according to the strides, dimensions with larger strides are going first. | |Params: | slice = a slice to normalize dimension |Returns: | `true` if the slice can be safely casted to $(SUBREF slice, Contiguous) kind using $(SUBREF topology, assumeContiguous) and false otherwise. |+/ |bool normalizeStructure(Iterator, size_t N, SliceKind kind)(ref Slice!(Iterator, N, kind) slice) |{ | static if (kind == Contiguous) | { | return true; | } | else | { | import mir.utility: min; | enum Y = min(slice.S, N); | foreach(i; Iota!Y) | if (slice._stride!i < 0) | slice = slice.reversed!i; | static if (N == 1) | return slice._stride!0 == 1; | else | static if (N == 2 && kind == Canonical) | { | return slice._stride!0 == slice.length!1; | } | else | { | import mir.series: series, sort; | import mir.ndslice.topology: zip, iota; | auto l = slice._lengths[0 .. Y]; | auto s = slice._strides[0 .. Y]; | s.series(l).sort!"a > b"; | return slice.shape.iota.strides == slice.strides; | } | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | | auto g = iota(2, 3); //contiguous | auto c = g.reversed!0; //canonical | auto u = g.transposed.allReversed; //universal | | assert(g.normalizeStructure); | assert(c.normalizeStructure); | assert(u.normalizeStructure); | | assert(c == g); | assert(u == g); | | c.popFront!1; | u.popFront!1; | | assert(!c.normalizeStructure); | assert(!u.normalizeStructure); |} | |private enum _swappedCode = q{ | with (slice) | { | auto tl = _lengths[dimensionA]; | auto ts = _strides[dimensionA]; | _lengths[dimensionA] = _lengths[dimensionB]; | _strides[dimensionA] = _strides[dimensionB]; | _lengths[dimensionB] = tl; | _strides[dimensionB] = ts; | } | return slice; |}; | |/++ |Swaps two dimensions. | |Params: | slice = input slice | dimensionA = first dimension | dimensionB = second dimension |Returns: | n-dimensional slice |See_also: $(LREF everted), $(LREF transposed) |+/ |template swapped(size_t dimensionA, size_t dimensionB) |{ | /// | @optmath auto swapped(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice) | { | static if (kind == Universal || kind == Canonical && dimensionA + 1 < N && dimensionB + 1 < N) | { | alias slice = _slice; | } | else static if (dimensionA + 1 < N && dimensionB + 1 < N) | { | import mir.ndslice.topology: canonical; | auto slice = _slice.canonical; | } | else | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | { | enum i = 0; | alias dimension = dimensionA; | mixin DimensionCTError; | } | { | enum i = 1; | alias dimension = dimensionB; | mixin DimensionCTError; | } | mixin (_swappedCode); | } |} | |/// ditto |auto swapped(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice, size_t dimensionA, size_t dimensionB) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | { | alias dimension = dimensionA; | mixin (DimensionRTError); | } | { | alias dimension = dimensionB; | mixin (DimensionRTError); | } | mixin (_swappedCode); |} | |/// ditto |Slice!(Iterator, 2, Universal) swapped(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice) |{ | return slice.swapped!(0, 1); |} | |/// Template |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | | assert(iota(3, 4, 5, 6) | .swapped!(2, 1) | .shape == cast(size_t[4])[3, 5, 4, 6]); | | assert(iota(3, 4, 5, 6) | .swapped!(3, 1) | .shape == cast(size_t[4])[3, 6, 5, 4]); |} | |/// Function |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | | assert(iota(3, 4, 5, 6) | .swapped(1, 2) | .shape == cast(size_t[4])[3, 5, 4, 6]); | | assert(iota(3, 4, 5, 6) | .swapped(1, 3) | .shape == cast(size_t[4])[3, 6, 5, 4]); |} | |/// 2D |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | assert(iota(3, 4) | .swapped | .shape == cast(size_t[2])[4, 3]); |} | |private enum _rotatedCode = q{ | k &= 0b11; | if (k == 0) | return slice; | if (k == 2) | return slice.allReversed; | static if (__traits(compiles, { enum _enum = dimensionA + dimensionB; })) | { | slice = slice.swapped!(dimensionA, dimensionB); | if (k == 1) | return slice.reversed!dimensionA; | else | return slice.reversed!dimensionB; | } | else | { | slice = slice.swapped (dimensionA, dimensionB); | if (k == 1) | return slice.reversed(dimensionA); | else | return slice.reversed(dimensionB); | } |}; | |/++ |Rotates two selected dimensions by `k*90` degrees. |The order of dimensions is important. |If the slice has two dimensions, the default direction is counterclockwise. | |Params: | slice = input slice | dimensionA = first dimension | dimensionB = second dimension | k = rotation counter, can be negative |Returns: | n-dimensional slice |+/ |template rotated(size_t dimensionA, size_t dimensionB) |{ | /// | @optmath auto rotated(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice, sizediff_t k = 1) | { | static if (kind == Universal || kind == Canonical && dimensionA + 1 < N && dimensionB + 1 < N) | { | alias slice = _slice; | } | else static if (dimensionA + 1 < N && dimensionB + 1 < N) | { | import mir.ndslice.topology: canonical; | auto slice = _slice.canonical; | } | else | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | { | enum i = 0; | alias dimension = dimensionA; | mixin DimensionCTError; | } | { | enum i = 1; | alias dimension = dimensionB; | mixin DimensionCTError; | } | mixin (_rotatedCode); | } |} | |/// ditto |auto rotated(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice, size_t dimensionA, size_t dimensionB, sizediff_t k = 1) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | { | alias dimension = dimensionA; | mixin (DimensionRTError); | } | { | alias dimension = dimensionB; | mixin (DimensionRTError); | } | mixin (_rotatedCode); |} | |/// ditto |Slice!(Iterator, 2, Universal) rotated(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice, sizediff_t k = 1) |{ | return .rotated!(0, 1)(slice, k); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | auto slice = iota(2, 3); | | auto a = [[0, 1, 2], | [3, 4, 5]]; | | auto b = [[2, 5], | [1, 4], | [0, 3]]; | | auto c = [[5, 4, 3], | [2, 1, 0]]; | | auto d = [[3, 0], | [4, 1], | [5, 2]]; | | assert(slice.rotated ( 4) == a); | assert(slice.rotated!(0, 1)(-4) == a); | assert(slice.rotated (1, 0, 8) == a); | | assert(slice.rotated == b); | assert(slice.rotated!(0, 1)(-3) == b); | assert(slice.rotated (1, 0, 3) == b); | | assert(slice.rotated ( 6) == c); | assert(slice.rotated!(0, 1)( 2) == c); | assert(slice.rotated (0, 1, -2) == c); | | assert(slice.rotated ( 7) == d); | assert(slice.rotated!(0, 1)( 3) == d); | assert(slice.rotated (1, 0, ) == d); |} | |/++ |Reverses the order of dimensions. | |Params: | _slice = input slice |Returns: | n-dimensional slice |See_also: $(LREF swapped), $(LREF transposed) |+/ |auto everted(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice) |{ | static if (kind == Universal) | { | alias slice = _slice; | } | else | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | with(slice) foreach (i; Iota!(N / 2)) | { | swap(_lengths[i], _lengths[N - i - 1]); | swap(_strides[i], _strides[N - i - 1]); | } | return slice; |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | assert(iota(3, 4, 5) | .everted | .shape == cast(size_t[3])[5, 4, 3]); |} | |private enum _transposedCode = q{ | size_t[typeof(slice).N] lengths_; | ptrdiff_t[max(typeof(slice).S, size_t(1))] strides_; | with(slice) foreach (i; Iota!N) | { | lengths_[i] = _lengths[perm[i]]; | static if (i < typeof(slice).S) | strides_[i] = _strides[perm[i]]; | } | with(slice) foreach (i; Iota!(N, slice.N)) | { | lengths_[i] = _lengths[i]; | static if (i < typeof(slice).S) | strides_[i] = _strides[i]; | } | return typeof(slice)(lengths_, strides_[0 .. typeof(slice).S], slice._iterator); |}; | |package size_t[N] completeTranspose(size_t N)(size_t[] dimensions) |{ | assert(dimensions.length <= N); | size_t[N] ctr; | uint[N] mask; | foreach (i, ref dimension; dimensions) | { | mask[dimension] = true; | ctr[i] = dimension; | } | size_t j = dimensions.length; | foreach (i, e; mask) | if (e == false) | ctr[j++] = i; | return ctr; |} | |/++ |N-dimensional transpose operator. |Brings selected dimensions to the first position. |Params: | slice = input slice | Dimensions = indexes of dimensions to be brought to the first position | dimensions = indexes of dimensions to be brought to the first position |Returns: | n-dimensional slice |See_also: $(LREF swapped), $(LREF everted) |+/ |template transposed(Dimensions...) | if (Dimensions.length) |{ | static if (!allSatisfy!(isSize_t, Dimensions)) | alias transposed = .transposed!(staticMap!(toSize_t, Dimensions)); | else | /// | @optmath auto transposed(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice) | { | import mir.algorithm.iteration: any; | enum s = N; | enum hasRowStride = [Dimensions].sliced.any!(a => a + 1 == s); | static if (kind == Universal || kind == Canonical && !hasRowStride) | { | alias slice = _slice; | } | else | static if (hasRowStride) | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | else | { | import mir.ndslice.topology: canonical; | auto slice = _slice.canonical; | } | mixin DimensionsCountCTError; | foreach (i, dimension; Dimensions) | mixin DimensionCTError; | static assert(isValidPartialPermutation!(N)([Dimensions]), | "Failed to complete permutation of dimensions " ~ Dimensions.stringof | ~ tailErrorMessage!()); | enum perm = completeTranspose!(N)([Dimensions]); | static assert(perm.isPermutation, __PRETTY_FUNCTION__ ~ ": internal error."); | mixin (_transposedCode); | } |} | |///ditto |auto transposed(Iterator, size_t N, SliceKind kind, size_t M)(Slice!(Iterator, N, kind) _slice, size_t[M] dimensions...) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | | mixin (DimensionsCountRTError); | foreach (dimension; dimensions) | mixin (DimensionRTError); | assert(dimensions.isValidPartialPermutation!(N), | "Failed to complete permutation of dimensions." | ~ tailErrorMessage!()); | immutable perm = completeTranspose!(N)(dimensions); | assert(perm.isPermutation, __PRETTY_FUNCTION__ ~ ": internal error."); | mixin (_transposedCode); |} | |///ditto |Slice!(Iterator, 2, Universal) transposed(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice) |{ | return .transposed!(1, 0)(slice); |} | |/// Template |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | | assert(iota(3, 4, 5, 6, 7) | .transposed!(3, 1, 0) | .shape == cast(size_t[5])[6, 4, 3, 5, 7]); | | assert(iota(3, 4, 5, 6, 7) | .transposed!(4, 1, 0) | .shape == cast(size_t[5])[7, 4, 3, 5, 6]); |} | |/// Function |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | | assert(iota(3, 4, 5, 6, 7) | .transposed(3, 1, 0) | .shape == cast(size_t[5])[6, 4, 3, 5, 7]); | | assert(iota(3, 4, 5, 6, 7) | .transposed(4, 1, 0) | .shape == cast(size_t[5])[7, 4, 3, 5, 6]); |} | |/// Single-argument function |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | | assert(iota(3, 4, 5, 6, 7) | .transposed(3) | .shape == cast(size_t[5])[6, 3, 4, 5, 7]); | | assert(iota(3, 4, 5, 6, 7) | .transposed(4) | .shape == cast(size_t[5])[7, 3, 4, 5, 6]); |} | |/// _2-dimensional transpose |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota; | assert(iota(3, 4) | .transposed | .shape == cast(size_t[2])[4, 3]); |} | |private enum _reversedCode = q{ | with (slice) | { | if (_lengths[dimension]) | _iterator += _strides[dimension] * (_lengths[dimension] - 1); | _strides[dimension] = -_strides[dimension]; | } |}; | |/++ |Reverses the direction of iteration for all dimensions. |Params: | _slice = input slice |Returns: | n-dimensional slice |+/ |Slice!(Iterator, N, Universal) allReversed(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | foreach (dimension; Iota!N) | { | mixin (_reversedCode); | } | return slice; |} | |/// |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology: iota, retro; | assert(iota(4, 5).allReversed == iota(4, 5).retro); |} | |/++ |Reverses the direction of iteration for selected dimensions. | |Params: | _slice = input slice | Dimensions = indexes of dimensions to reverse order of iteration | dimensions = indexes of dimensions to reverse order of iteration |Returns: | n-dimensional slice |+/ |template reversed(Dimensions...) | if (Dimensions.length) |{ | static if (!allSatisfy!(isSize_t, Dimensions)) | alias reversed = .reversed!(staticMap!(toSize_t, Dimensions)); | else | /// | @optmath auto reversed(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice) @trusted | { | import mir.algorithm.iteration: any; | enum s = N; | enum hasRowStride = [Dimensions].sliced.any!(a => a + 1 == s); | static if (kind == Universal || kind == Canonical && !hasRowStride) | { | alias slice = _slice; | } | else | static if (hasRowStride) | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | else | { | import mir.ndslice.topology: canonical; | auto slice = _slice.canonical; | } | foreach (i, dimension; Dimensions) | { | mixin DimensionCTError; | mixin (_reversedCode); | } | return slice; | } |} | |///ditto |Slice!(Iterator, N, Universal) reversed(Iterator, size_t N, SliceKind kind, size_t M)(Slice!(Iterator, N, kind) _slice, size_t[M] dimensions...) | @trusted | if (M) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | foreach (dimension; dimensions) | mixin (DimensionRTError); | foreach (i; Iota!(0, M)) | { | auto dimension = dimensions[i]; | mixin (_reversedCode); | } | return slice; |} | |/// ditto |auto reversed(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | return .reversed!0(slice); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | | auto slice = iota([2, 2], 1); | assert(slice == [[1, 2], [3, 4]]); | | // Default | assert(slice.reversed == [[3, 4], [1, 2]]); | | // Template | assert(slice.reversed! 0 == [[3, 4], [1, 2]]); | assert(slice.reversed! 1 == [[2, 1], [4, 3]]); | assert(slice.reversed!(0, 1) == [[4, 3], [2, 1]]); | assert(slice.reversed!(1, 0) == [[4, 3], [2, 1]]); | assert(slice.reversed!(1, 1) == [[1, 2], [3, 4]]); | assert(slice.reversed!(0, 0, 0) == [[3, 4], [1, 2]]); | | // Function | assert(slice.reversed (0) == [[3, 4], [1, 2]]); | assert(slice.reversed (1) == [[2, 1], [4, 3]]); | assert(slice.reversed (0, 1) == [[4, 3], [2, 1]]); | assert(slice.reversed (1, 0) == [[4, 3], [2, 1]]); | assert(slice.reversed (1, 1) == [[1, 2], [3, 4]]); | assert(slice.reversed (0, 0, 0) == [[3, 4], [1, 2]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota, canonical; | auto slice = iota([2, 2], 1).canonical; | assert(slice == [[1, 2], [3, 4]]); | | // Template | assert(slice.reversed! 0 == [[3, 4], [1, 2]]); | assert(slice.reversed!(0, 0, 0) == [[3, 4], [1, 2]]); | | // Function | assert(slice.reversed (0) == [[3, 4], [1, 2]]); | assert(slice.reversed (0, 0, 0) == [[3, 4], [1, 2]]); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology; | import std.algorithm.comparison : equal; | import std.range : chain; | auto i0 = iota([4], 0); auto r0 = i0.retro; | auto i1 = iota([4], 4); auto r1 = i1.retro; | auto i2 = iota([4], 8); auto r2 = i2.retro; | auto slice = iota(3, 4).universal; | assert(slice .flattened.equal(chain(i0, i1, i2))); | // Template | assert(slice.reversed!(0) .flattened.equal(chain(i2, i1, i0))); | assert(slice.reversed!(1) .flattened.equal(chain(r0, r1, r2))); | assert(slice.reversed!(0, 1) .flattened.equal(chain(r2, r1, r0))); | assert(slice.reversed!(1, 0) .flattened.equal(chain(r2, r1, r0))); | assert(slice.reversed!(1, 1) .flattened.equal(chain(i0, i1, i2))); | assert(slice.reversed!(0, 0, 0).flattened.equal(chain(i2, i1, i0))); | // Function | assert(slice.reversed (0) .flattened.equal(chain(i2, i1, i0))); | assert(slice.reversed (1) .flattened.equal(chain(r0, r1, r2))); | assert(slice.reversed (0, 1) .flattened.equal(chain(r2, r1, r0))); | assert(slice.reversed (1, 0) .flattened.equal(chain(r2, r1, r0))); | assert(slice.reversed (1, 1) .flattened.equal(chain(i0, i1, i2))); | assert(slice.reversed (0, 0, 0).flattened.equal(chain(i2, i1, i0))); |} | |private enum _stridedCode = q{ | assert(factor > 0, "factor must be positive" | ~ tailErrorMessage!()); | immutable rem = slice._lengths[dimension] % factor; | slice._lengths[dimension] /= factor; | if (slice._lengths[dimension]) //do not remove `if (...)` | slice._strides[dimension] *= factor; | if (rem) | slice._lengths[dimension]++; |}; | |/++ |Multiplies the stride of the selected dimension by a factor. | |Params: | Dimensions = indexes of dimensions to be strided | dimension = indexe of a dimension to be strided | factor = step extension factors |Returns: | n-dimensional slice |+/ |template strided(Dimensions...) | if (Dimensions.length) |{ | static if (!allSatisfy!(isSize_t, Dimensions)) | alias strided = .strided!(staticMap!(toSize_t, Dimensions)); | else | /++ | Params: | slice = input slice | factors = list of step extension factors | +/ | @optmath auto strided(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice, Repeat!(Dimensions.length, ptrdiff_t) factors) | { | import mir.algorithm.iteration: any; | enum s = N; | enum hasRowStride = [Dimensions].sliced.any!(a => a + 1 == s); | static if (kind == Universal || kind == Canonical && !hasRowStride) | { | alias slice = _slice; | } | else | static if (hasRowStride) | { | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | } | else | { | import mir.ndslice.topology: canonical; | auto slice = _slice.canonical; | } | foreach (i, dimension; Dimensions) | { | mixin DimensionCTError; | immutable factor = factors[i]; | mixin (_stridedCode); | } | return slice; | } |} | |///ditto |Slice!(Iterator, N, Universal) strided(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) _slice, size_t dimension, ptrdiff_t factor) |{ | import mir.ndslice.topology: universal; | auto slice = _slice.universal; | mixin (DimensionRTError); | mixin (_stridedCode); | return slice; |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | auto slice = iota(3, 4); | | assert(slice | == [[0,1,2,3], [4,5,6,7], [8,9,10,11]]); | | // Template | assert(slice.strided!0(2) | == [[0,1,2,3], [8,9,10,11]]); | | assert(slice.strided!1(3) | == [[0, 3], [4, 7], [8, 11]]); | | assert(slice.strided!(0, 1)(2, 3) | == [[0, 3], [8, 11]]); | | // Function | assert(slice.strided(0, 2) | == [[0,1,2,3], [8,9,10,11]]); | | assert(slice.strided(1, 3) | == [[0, 3], [4, 7], [8, 11]]); | | assert(slice.strided(0, 2).strided(1, 3) | == [[0, 3], [8, 11]]); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota, universal; | static assert(iota(13, 40).universal.strided!(0, 1)(2, 5).shape == [7, 8]); | static assert(iota(93).universal.strided!(0, 0)(7, 3).shape == [5]); |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota, canonical; | auto slice = iota(3, 4).canonical; | | assert(slice | == [[0,1,2,3], [4,5,6,7], [8,9,10,11]]); | | // Template | assert(slice.strided!0(2) | == [[0,1,2,3], [8,9,10,11]]); | | // Function | assert(slice.strided(0, 2) | == [[0,1,2,3], [8,9,10,11]]); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology; | import std.algorithm.comparison : equal; | import std.range : chain; | auto i0 = iota([4], 0); auto s0 = stride(i0, 3); | auto i1 = iota([4], 4); auto s1 = stride(i1, 3); | auto i2 = iota([4], 8); auto s2 = stride(i2, 3); | auto slice = iota(3, 4).universal; | assert(slice .flattened.equal(chain(i0, i1, i2))); | // Template | assert(slice.strided!0(2) .flattened.equal(chain(i0, i2))); | assert(slice.strided!1(3) .flattened.equal(chain(s0, s1, s2))); | assert(slice.strided!(0, 1)(2, 3).flattened.equal(chain(s0, s2))); | // Function | assert(slice.strided(0, 2).flattened.equal(chain(i0, i2))); | assert(slice.strided(1, 3).flattened.equal(chain(s0, s1, s2))); | assert(slice.strided(0, 2).strided(1, 3).flattened.equal(chain(s0, s2))); |} | |/++ |Returns maximal multidimensional cube. | |Params: | slice = input slice |Returns: | n-dimensional slice |+/ |Slice!(Iterator, N, kind) dropToHypercube(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | if (kind == Canonical || kind == Universal) |do |{ | size_t length = slice._lengths[0]; | foreach (i; Iota!(1, N)) | if (length > slice._lengths[i]) | length = slice._lengths[i]; | foreach (i; Iota!N) | slice._lengths[i] = length; | return slice; |} | |/// ditto |Slice!(Iterator, N, Canonical) dropToHypercube(Iterator, size_t N)(Slice!(Iterator, N) slice) |{ | import mir.ndslice.topology: canonical; | return slice.canonical.dropToHypercube; |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology: iota, canonical, universal; | | assert(iota(5, 3, 6, 7) | .dropToHypercube | .shape == cast(size_t[4])[3, 3, 3, 3]); | | assert(iota(5, 3, 6, 7) | .universal | .dropToHypercube | .shape == cast(size_t[4])[3, 3, 3, 3]); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/dynamic.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-core-1.1.2-mir-core-source-mir-primitives.lst |/++ |Templates used to check primitives and |range primitives for arrays with multi-dimensional like API support. | |Note: |UTF strings behaves like common arrays in Mir. |`std.uni.byCodePoint` can be used to create a range of characters. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2017-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |+/ |module mir.primitives; | |import mir.internal.utility; |import mir.math.common: optmath; |import std.traits; | |@optmath: | |/++ |Returns: `true` if `R` has a `length` member that returns an |integral type implicitly convertible to `size_t`. | |`R` does not have to be a range. |+/ |enum bool hasLength(R) = is(typeof( |(const R r, inout int = 0) |{ | size_t l = r.length; |})); | |/// |@safe version(mir_test) unittest |{ | static assert(hasLength!(char[])); | static assert(hasLength!(int[])); | static assert(hasLength!(inout(int)[])); | | struct B { size_t length() const { return 0; } } | struct C { @property size_t length() const { return 0; } } | static assert(hasLength!(B)); | static assert(hasLength!(C)); |} | |/++ |Returns: `true` if `R` has a `shape` member that returns an static array type of size_t[N]. |+/ |enum bool hasShape(R) = is(typeof( |(const R r, inout int = 0) |{ | auto l = r.shape; | alias F = typeof(l); | import std.traits; | static assert(isStaticArray!F); | static assert(is(ForeachType!F == size_t)); |})); | |/// |@safe version(mir_test) unittest |{ | static assert(hasShape!(char[])); | static assert(hasShape!(int[])); | static assert(hasShape!(inout(int)[])); | | struct B { size_t length() const { return 0; } } | struct C { @property size_t length() const { return 0; } } | static assert(hasShape!(B)); | static assert(hasShape!(C)); |} | |/// |auto shape(Range)(scope const auto ref Range range) @property | if (hasLength!Range || hasShape!Range) |{ | static if (__traits(hasMember, Range, "shape")) | { | return range.shape; | } | else | { | size_t[1] ret; | ret[0] = range.length; | return ret; | } |} | |/// |version(mir_test) unittest |{ | static assert([2, 2, 2].shape == [3]); |} | |/// |template DimensionCount(T) |{ | import mir.ndslice.slice: Slice, SliceKind; | /// Extracts dimension count from a $(LREF Slice). Alias for $(LREF isSlice). | static if(is(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind)) | enum size_t DimensionCount = N; | else | static if (hasShape!T) | enum size_t DimensionCount = typeof(T.init.shape).length; | else | enum size_t DimensionCount = 1; |} | |package(mir) bool anyEmptyShape(size_t N)(scope const auto ref size_t[N] shape) @property |{ | foreach (i; Iota!N) | if (shape[i] == 0) | return true; | return false; |} | |/// |bool anyEmpty(Range)(scope const auto ref Range range) @property | if (hasShape!Range || __traits(hasMember, Range, "anyEmpty")) |{ | static if (__traits(hasMember, Range, "anyEmpty")) | { | return range.anyEmpty; | } | else | static if (__traits(hasMember, Range, "shape")) | { | return anyEmptyShape(range.shape); | } | else | { | return range.empty; | } |} | |/// |size_t elementCount(Range)(scope const auto ref Range range) @property | if (hasShape!Range || __traits(hasMember, Range, "elementCount")) |{ | static if (__traits(hasMember, Range, "elementCount")) | { | return range; | } | else | { | auto sh = range.shape; | size_t ret = sh[0]; | foreach(i; Iota!(1, sh.length)) | { | ret *= sh[i]; | } | return ret; | } |} | |deprecated("use elementCount instead") |alias elementsCount = elementCount; | | |/++ |Returns the element type of a struct with `.DeepElement` inner alias or a type of common array. |Returns `ForeachType` if struct does not have `.DeepElement` member. |+/ |template DeepElementType(S) | if (is(S == struct) || is(S == class) || is(S == interface)) |{ | static if (__traits(hasMember, S, "DeepElement")) | alias DeepElementType = S.DeepElement; | else | alias DeepElementType = ForeachType!S; |} | |/// ditto |alias DeepElementType(S : T[], T) = T; | |/+ ARRAY PRIMITIVES +/ |pragma(inline, true): | |/// |bool empty(size_t dim = 0, T)(scope const T[] ar) | if (!dim) |{ | return !ar.length; |} | |/// |version(mir_test) |unittest |{ | assert((int[]).init.empty); | assert(![1].empty!0); // Slice-like API |} | |/// |ref inout(T) front(size_t dim = 0, T)(scope return inout(T)[] ar) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length, "Accessing front of an empty array."); | return ar[0]; |} | |/// |version(mir_test) |unittest |{ | assert(*&[3, 4].front == 3); // access be ref | assert([3, 4].front!0 == 3); // Slice-like API |} | | |/// |ref inout(T) back(size_t dim = 0, T)(scope return inout(T)[] ar) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length, "Accessing back of an empty array."); | return ar[$ - 1]; |} | |/// |version(mir_test) |unittest |{ | assert(*&[3, 4].back == 4); // access be ref | assert([3, 4].back!0 == 4); // Slice-like API |} | |/// |void popFront(size_t dim = 0, T)(scope ref inout(T)[] ar) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length, "Evaluating popFront() on an empty array."); | ar = ar[1 .. $]; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4]; | ar.popFront; | assert(ar == [4]); | ar.popFront!0; // Slice-like API | assert(ar == []); |} | |/// |void popBack(size_t dim = 0, T)(scope ref inout(T)[] ar) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length, "Evaluating popBack() on an empty array."); | ar = ar[0 .. $ - 1]; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4]; | ar.popBack; | assert(ar == [3]); | ar.popBack!0; // Slice-like API | assert(ar == []); |} | |/// |size_t popFrontN(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n) | if (!dim && !is(Unqual!T[] == void[])) |{ | n = ar.length < n ? ar.length : n; | ar = ar[n .. $]; | return n; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4]; | ar.popFrontN(1); | assert(ar == [4]); | ar.popFrontN!0(10); // Slice-like API | assert(ar == []); |} | |/// |size_t popBackN(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n) | if (!dim && !is(Unqual!T[] == void[])) |{ | n = ar.length < n ? ar.length : n; | ar = ar[0 .. $ - n]; | return n; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4]; | ar.popBackN(1); | assert(ar == [3]); | ar.popBackN!0(10); // Slice-like API | assert(ar == []); |} | |/// |void popFrontExactly(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length >= n, "Evaluating *.popFrontExactly(n) on an array with length less then n."); | ar = ar[n .. $]; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4, 5]; | ar.popFrontExactly(2); | assert(ar == [5]); | ar.popFrontExactly!0(1); // Slice-like API | assert(ar == []); |} | |/// |void popBackExactly(size_t dim = 0, T)(scope ref inout(T)[] ar, size_t n) | if (!dim && !is(Unqual!T[] == void[])) |{ | assert(ar.length >= n, "Evaluating *.popBackExactly(n) on an array with length less then n."); | ar = ar[0 .. $ - n]; |} | |/// |version(mir_test) |unittest |{ | auto ar = [3, 4, 5]; | ar.popBackExactly(2); | assert(ar == [3]); | ar.popBackExactly!0(1); // Slice-like API | assert(ar == []); |} | |/// |size_t length(size_t d : 0, T)(in T[] array) | if (d == 0) |{ | return array.length; |} | |/// |version(mir_test) |unittest |{ | assert([1, 2].length!0 == 2); | assert([1, 2].elementCount == 2); |} | |/// |inout(T)[] save(T)(scope return inout(T)[] array) |{ | return array; |} | |/// |version(mir_test) |unittest |{ | auto a = [1, 2]; | assert(a is a.save); |} ../../../.dub/packages/mir-core-1.1.2/mir-core/source/mir/primitives.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.13-mir-random-source-mir-random-package.lst |/++ |$(SCRIPT inhibitQuickIndex = 1;) | |Basic API to construct non-uniform random number generators and stochastic algorithms. |Non-uniform and uniform random variable can be found at `mir.random.variable`. | |$(TABLE $(H2 Generation functions), |$(TR $(TH Function Name) $(TH Description)) |$(T2 rand, Generates real, integral, boolean, and enumerated uniformly distributed values.) |$(T2 randIndex, Generates uniformly distributed index.) |$(T2 randGeometric, Generates geometric distribution with `p = 1/2`.) |$(T2 randExponential2, Generates scaled Exponential distribution.) |) | |$(TABLE $(H2 Phobos Compatibility), |$(TR $(TH Template Name) $(TH Description)) |$(T2 PhobosRandom, Extends a Mir random number engine to meet Phobos `std.random` interface) |$(T2 isPhobosUniformRNG, Tests if type is a Phobos-style uniform RNG) |) | |Publicly includes `mir.random.engine`. | |Authors: Ilya Yaroshenko, Nathan Sashihara |Copyright: Copyright, Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, random, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | |+/ |module mir.random; | |import std.traits; |import mir.bitop: cttz; |import mir.math.common: log2; | |public import mir.random.engine; | |version (LDC) |{ | import ldc.intrinsics: llvm_expect; | // LDC 1.8.0 supports llvm_expect in CTFE. | private template _ctfeExpect(string expr, string expected) | { | static if (__traits(compiles, { enum a = llvm_expect(123, 456); static assert(a == 123); })) | private enum _ctfeExpect = "llvm_expect("~expr~","~expected~")"; | else | private enum _ctfeExpect = expr; | } |} |else version (GNU) |{ | import gcc.builtins: __builtin_expect; | private enum _ctfeExpect(string expr, string expected) = `__builtin_expect(`~expr~`,`~expected~`)`; |} |else |{ | private enum _ctfeExpect(string expr, string expected) = expr; |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed integer for interval `[T.min .. T.max]`. |+/ |T rand(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum)) |{ | alias R = EngineReturnType!G; | enum P = T.sizeof / R.sizeof; | static if (P > 1) | { | _Uab!(R[P],T) u = void; | version(LittleEndian) | foreach (ref e; u.asArray) | e = gen(); | else | foreach_reverse (ref e; u.asArray) | e = gen(); | return u.asInteger; | } | else static if (preferHighBits!G && P == 0) | { | version(LDC) pragma(inline, true); | return cast(T) (gen() >>> ((R.sizeof - T.sizeof) * 8)); | } | else | { | version(LDC) pragma(inline, true); | return cast(T) gen(); | } |} | |/// ditto |T rand(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && isIntegral!T && !is(T == enum)) |{ | return rand!(T, G)(*gen); |} | |/// ditto |T rand(T)() | if (isIntegral!T && !is(T == enum)) |{ | return rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = rand!short; | auto n = rand!ulong; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.rand!short; | auto n = gen.rand!ulong; |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed boolean. |+/ |bool rand(T : bool, G)(scope ref G gen) | if (isSaturatedRandomEngine!G) |{ | import std.traits : Signed; | return 0 > cast(Signed!(EngineReturnType!G)) gen(); |} | |/// ditto |bool rand(T : bool, G)(scope G* gen) | if (isSaturatedRandomEngine!G) |{ | return rand!(T, G)(*gen); |} | |/// ditto |bool rand(T : bool)() |{ | return rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = rand!bool; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.rand!bool; |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | auto s = gen.rand!bool; |} | |private alias Iota(size_t j) = Iota!(0, j); | |private template Iota(size_t i, size_t j) |{ | import std.meta; | static assert(i <= j, "Iota: i should be less than or equal to j"); | static if (i == j) | alias Iota = AliasSeq!(); | else | alias Iota = AliasSeq!(i, Iota!(i + 1, j)); |} | |/+ |Returns pseudo-random integer with the low `bitsWanted` bits set to |random values and the remaining high bits all 0. |+/ |private T _randBits(T, uint bitsWanted, G)(scope ref G gen) |if (bitsWanted >= 0 && bitsWanted <= T.sizeof * 8 | && (is(T == uint) || is(T == ulong) || is(T == size_t))) |{ | static if (EngineReturnType!G.sizeof >= T.sizeof) | auto bits = gen(); | else | auto bits = gen.rand!T; | static if (preferHighBits!G) | { | enum rshift = (typeof(bits).sizeof * 8) - bitsWanted; | return cast(T) (bits >>> rshift); | } | else | { | enum mask = (typeof(bits)(1) << bitsWanted) - 1; | return cast(T) (bits & typeof(bits)(mask)); | } |} | |/++ |Params: | gen = saturated random number generator |Returns: | Uniformly distributed enumeration. |+/ |T rand(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && is(T == enum)) |{ | static if (is(T : long)) | enum tiny = [EnumMembers!T] == [Iota!(EnumMembers!T.length)]; | else | enum tiny = false; | enum n = [EnumMembers!T].length; | // If `gen` produces 32 bits or fewer at a time and we have fewer | // than 2^^32 elements, use a `uint` index. | static if (n <= uint.max && EngineReturnType!G.max <= uint.max) | alias IndexType = uint; | else | alias IndexType = size_t; | | static if ((n & (n - 1)) == 0) | { | // Optimized case: power of 2. | import core.bitop : bsr; | enum bitsWanted = bsr(n); | IndexType index = _randBits!(IndexType, bitsWanted)(gen); | } | else | { | // General case. | IndexType index = gen.randIndex!IndexType(n); | } | | static if (tiny) | { | return cast(T) index; | } | else | { | static immutable T[EnumMembers!T.length] members = [EnumMembers!T]; | return members[index]; | } |} | |/// ditto |T rand(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && is(T == enum)) |{ | return rand!(T, G)(*gen); |} | |/// ditto |T rand(T)() | if (is(T == enum)) |{ | return .rand!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | enum A { a, b, c } | auto e = rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A { a, b, c } | auto e = gen.rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A : dchar { a, b, c } | auto e = gen.rand!A; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | enum A : string { a = "a", b = "b", c = "c" } | auto e = gen.rand!A; |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | enum A : dchar { a, b, c, d } | auto e = gen.rand!A; |} | |private static union _U |{ | real r; | struct | { | version(LittleEndian) | { | ulong m; | ushort e; | } | else | { | ushort e; | align(2) | ulong m; | } | } |} | |private static union _Uab(A,B) if (A.sizeof == B.sizeof && !is(Unqual!A == Unqual!B)) |{ | A a; | B b; | | private import std.traits: isArray, isIntegral, isFloatingPoint; | | static if (isArray!A && !isArray!B) | alias asArray = a; | static if (isArray!B && !isArray!A) | alias asArray = b; | | static if (isIntegral!A && !isIntegral!B) | alias asInteger = a; | static if (isIntegral!B && !isIntegral!A) | alias asInteger = b; | | static if (isFloatingPoint!A && !isFloatingPoint!B) | alias asFloatingPoint = a; | static if (isFloatingPoint!B && !isFloatingPoint!A) | alias asFloatingPoint = b; |} | |/++ |Params: | gen = saturated random number generator | boundExp = bound exponent (optional). `boundExp` must be less or equal to `T.max_exp`. |Returns: | Uniformly distributed real for interval `(-2^^boundExp , 2^^boundExp)`. |Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`. |+/ |T rand(T, G)(scope ref G gen, sizediff_t boundExp = 0) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | assert(boundExp <= T.max_exp); | static if (T.mant_dig == float.mant_dig) | { | enum W = T.sizeof * 8 - T.mant_dig;//8 | _Uab!(int,float) u = void; | u.asInteger = gen.rand!uint; | enum uint EXPMASK = 0x7F80_0000; | boundExp -= T.min_exp - 1; | size_t exp = EXPMASK & u.asInteger; | exp = boundExp - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W); | u.asInteger &= ~EXPMASK; | if(cast(sizediff_t)exp < 0) | { | exp = -cast(sizediff_t)exp; | uint m = u.asInteger & int.max; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | u.asInteger = (u.asInteger & ~int.max) ^ m; | exp = 0; | } | u.asInteger = cast(uint)(exp << (T.mant_dig - 1)) ^ u.asInteger; | return u.asFloatingPoint; | } | else | static if (T.mant_dig == double.mant_dig) | { | enum W = T.sizeof * 8 - T.mant_dig; //11 | _Uab!(long,double) u = void; | u.asInteger = gen.rand!ulong; | enum ulong EXPMASK = 0x7FF0_0000_0000_0000; | boundExp -= T.min_exp - 1; | ulong exp = EXPMASK & u.asInteger; | exp = ulong(boundExp) - (exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W); | u.asInteger &= ~EXPMASK; | if(cast(long)exp < 0) | { | exp = -cast(sizediff_t)exp; | ulong m = u.asInteger & long.max; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | u.asInteger = (u.asInteger & ~long.max) ^ m; | exp = 0; | } | u.asInteger = (exp << (T.mant_dig - 1)) ^ u.asInteger; | return u.asFloatingPoint; | } | else | static if (T.mant_dig == 64) | { | enum W = 15; | auto d = gen.rand!uint; | auto m = gen.rand!ulong; | enum uint EXPMASK = 0x7FFF; | boundExp -= T.min_exp - 1; | size_t exp = EXPMASK & d; | exp = boundExp - (exp ? cttz(exp) : gen.randGeometric + W); | if (cast(sizediff_t)exp > 0) | m |= ~long.max; | else | { | m &= long.max; | exp = -cast(sizediff_t)exp; | if(exp >= T.mant_dig) | m = 0; | else | m >>= cast(uint)exp; | exp = 0; | } | d = cast(uint) exp ^ (d & ~EXPMASK); | _U ret = void; | ret.e = cast(ushort)d; | ret.m = m; | return ret.r; | } | /// TODO: quadruple | else static assert(0); |} | |/// ditto |T rand(T, G)(scope G* gen, sizediff_t boundExp = 0) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | return rand!(T, G)(*gen, boundExp); |} | |/// ditto |T rand(T)(sizediff_t boundExp = 0) | if (isFloatingPoint!T) |{ | return rand!T(rne, boundExp); |} | | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | import mir.math.common: fabs; | | auto a = rand!float; | assert(-1 < a && a < +1); | | auto b = rand!double(4); | assert(-16 < b && b < +16); | | auto c = rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = rand!real.fabs; | assert(0.0L <= d && d < 1.0L); |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.math.common: fabs; | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | | auto a = gen.rand!float; | assert(-1 < a && a < +1); | | auto b = gen.rand!double(4); | assert(-16 < b && b < +16); | | auto c = gen.rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = gen.rand!real.fabs; | assert(0.0L <= d && d < 1.0L); |} | |/// Subnormal numbers |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto x = gen.rand!double(double.min_exp-1); | assert(-double.min_normal < x && x < double.min_normal); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | import mir.math.common: fabs; | import std.meta: AliasSeq; | | auto a = rne.rand!float; | assert(-1 < a && a < +1); | | auto b = rne.rand!double(4); | assert(-16 < b && b < +16); | | auto c = rne.rand!double(-2); | assert(-0.25 < c && c < +0.25); | | auto d = rne.rand!real.fabs; | assert(0.0L <= d && d < 1.0L); | | foreach(T; AliasSeq!(float, double, real)) | { | auto f = rne.rand!T(T.min_exp-1); | assert(f.fabs < T.min_normal, T.stringof); | } |} | |/++ |Params: | gen = uniform random number generator | m = positive module |Returns: | Uniformly distributed integer for interval `[0 .. m$(RPAREN)`. |+/ |T randIndex(T, G)(scope ref G gen, T _m) | if(isSaturatedRandomEngine!G && isUnsigned!T) |{ | immutable m = _m + 0u; | static if (EngineReturnType!G.sizeof >= T.sizeof * 2) | alias MaybeR = EngineReturnType!G; | else static if (uint.sizeof >= T.sizeof * 2) | alias MaybeR = uint; | else static if (ulong.sizeof >= T.sizeof * 2) | alias MaybeR = ulong; | else static if (is(ucent) && __traits(compiles, {static assert(ucent.sizeof >= T.sizeof * 2);})) | mixin ("alias MaybeR = ucent;"); | else | alias MaybeR = void; | | static if (!is(MaybeR == void)) | { | alias R = MaybeR; | static assert(R.sizeof >= T.sizeof * 2); | //Use Daniel Lemire's fast alternative to modulo reduction: | //https://lemire.me/blog/2016/06/30/fast-random-shuffling/ | R randombits = cast(R) gen.rand!T; | R multiresult = randombits * m; | T leftover = cast(T) multiresult; | if (mixin(_ctfeExpect!(`leftover < m`, `false`))) | { | immutable threshold = -m % m ; | while (leftover < threshold) | { | randombits = cast(R) gen.rand!T; | multiresult = randombits * m; | leftover = cast(T) multiresult; | } | } | enum finalshift = T.sizeof * 8; | return cast(T) (multiresult >>> finalshift); | } | else | { | import mir.utility : extMul; | //Use Daniel Lemire's fast alternative to modulo reduction: | //https://lemire.me/blog/2016/06/30/fast-random-shuffling/ | auto u = extMul!T(gen.rand!T, m); | if (mixin(_ctfeExpect!(`u.low < m`, `false`))) | { | immutable T threshold = -m % m; | while (u.low < threshold) | { | u = extMul!T(gen.rand!T, m); | } | } | return u.high; | } |} | |/// ditto |T randIndex(T, G)(scope G* gen, T m) | if(isSaturatedRandomEngine!G && isUnsigned!T) |{ | return randIndex!(T, G)(*gen, m); |} | |/// ditto |T randIndex(T)(T m) | if(isUnsigned!T) |{ | return randIndex!T(rne, m); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto s = randIndex(100u); | auto n = randIndex!ulong(-100); |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random; | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto s = gen.randIndex!uint(100); | auto n = gen.randIndex!ulong(-100); |} | |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | //CTFE check. | import std.meta : AliasSeq; | import mir.random.engine.xoshiro : Xoroshiro128Plus; | foreach (IntType; AliasSeq!(ubyte,ushort,uint,ulong)) | { | enum IntType e = (){auto g = Xoroshiro128Plus(1); return g.randIndex!IntType(100);}(); | auto gen = Xoroshiro128Plus(1); | assert(e == gen.randIndex!IntType(100)); | } |} | |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | //Test production of ulong from ulong generator. | import mir.random.engine.xoshiro; | auto gen = Xoroshiro128Plus(1); | enum ulong limit = 10; | enum count = 10; | ulong[limit] buckets; | foreach (_; 0 .. count) | { | ulong x = gen.randIndex!ulong(limit); | assert(x < limit); | buckets[cast(size_t) x] += 1; | } | foreach (i, x; buckets) | assert(x != count, "All values were the same!"); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | //Coverage. Impure because uses thread-local. | Random* gen = threadLocalPtr!Random; | auto s = gen.randIndex!uint(100); | auto n = gen.randIndex!ulong(-100); |} | |/++ | Returns: `n >= 0` such that `P(n) := 1 / (2^^(n + 1))`. |+/ |size_t randGeometric(G)(scope ref G gen) | if(isSaturatedRandomEngine!G) |{ | alias R = EngineReturnType!G; | static if (R.sizeof >= size_t.sizeof) | alias T = size_t; | else | alias T = R; | for(size_t count = 0;; count += T.sizeof * 8) | if(auto val = gen.rand!T()) | return count + cttz(val); |} | |/// ditto |size_t randGeometric(G)(scope G* gen) | if(isSaturatedRandomEngine!G) |{ | return randGeometric!(G)(*gen); |} | |/// ditto |size_t randGeometric()() |{ | return randGeometric(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | size_t s = randGeometric; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xoshiro; | auto gen = Xoroshiro128Plus(1); | | size_t s = gen.randGeometric; |} | |/++ |Params: | gen = saturated random number generator |Returns: | `X ~ Exp(1) / log(2)`. |Note: `fabs` can be used to get a value from positive interval `[0, 2^^boundExp$(RPAREN)`. |+/ |T randExponential2(T, G)(scope ref G gen) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | enum W = T.sizeof * 8 - T.mant_dig - 1 - bool(T.mant_dig == 64); | static if (is(T == float)) | { | _Uab!(uint,float) u = void; | u.asInteger = gen.rand!uint; | enum uint EXPMASK = 0xFF80_0000; | auto exp = EXPMASK & u.asInteger; | u.asInteger &= ~EXPMASK; | u.asInteger ^= 0x3F000000; // 0.5 | auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W; | auto x = u.asFloatingPoint; | } | else | static if (is(T == double)) | { | _Uab!(ulong,double) u = void; | u.asInteger = gen.rand!ulong; | enum ulong EXPMASK = 0xFFF0_0000_0000_0000; | auto exp = EXPMASK & u.asInteger; | u.asInteger &= ~EXPMASK; | u.asInteger ^= 0x3FE0000000000000; // 0.5 | auto y = exp ? cttz(exp) - (T.mant_dig - 1) : gen.randGeometric + W; | auto x = u.asFloatingPoint; | } | else | static if (T.mant_dig == 64) | { | _U ret = void; | ret.e = 0x3FFE; | ret.m = gen.rand!ulong | ~long.max; | auto y = gen.randGeometric; | auto x = ret.r; | } | /// TODO: quadruple | else static assert(0); | | if (x == 0.5f) | return y; | else | return -log2(x) + y; |} | |/// ditto |T randExponential2(T, G)(scope G* gen) | if (isSaturatedRandomEngine!G && isFloatingPoint!T) |{ | return randExponential2!(T, G)(*gen); |} | |/// ditto |T randExponential2(T)() | if (isFloatingPoint!T) |{ | return randExponential2!T(rne); |} | |/// |@nogc nothrow @safe version(mir_random_test) unittest |{ | auto v = randExponential2!double; |} | |/// |@nogc nothrow @safe pure version(mir_random_test) unittest |{ | import mir.random.engine.xorshift; | auto gen = Xorshift(1); | auto v = gen.randExponential2!double(); |} | |/++ |$(LINK2 https://dlang.org/phobos/std_random.html#.isUniformRNG, |Tests if T is a Phobos-style uniform RNG.) |+/ |template isPhobosUniformRNG(T) |{ | import std.random: isUniformRNG; | enum bool isPhobosUniformRNG = isUniformRNG!T; |} | |/++ |Extends a Mir-style random number generator to also be a Phobos-style |uniform RNG. If `Engine` is already a Phobos-style uniform RNG, |`PhobosRandom` is just an alias for `Engine`. |+/ |struct PhobosRandom(Engine) if (isRandomEngine!Engine && !isPhobosUniformRNG!Engine)//Doesn't need to be saturated. |{ | alias Uint = EngineReturnType!Engine; | private Engine _engine; | private Uint _front; | | /// Default constructor and copy constructor are disabled. | @disable this(); | /// ditto | @disable this(this); | | /// Forward constructor arguments to `Engine`. | this(A...)(auto ref A args) | if (is(typeof(Engine(args)))) | { | _engine = Engine(args); | _front = _engine.opCall(); | } | | /// Phobos-style random interface. | enum bool isUniformRandom = true; | /// ditto | enum Uint min = Uint.min;//Always normalized. | /// ditto | enum Uint max = Engine.max;//Might not be saturated. | /// ditto | enum bool empty = false; | /// ditto | @property Uint front()() const { return _front; } | /// ditto | void popFront()() { _front = _engine.opCall(); } | /// ditto | void seed(A...)(auto ref A args) if (is(typeof(Engine(args)))) | { | _engine.__ctor(args); | _front = _engine.opCall(); | } | | /// Retain support for Mir-style random interface. | enum bool isRandomEngine = true; | /// ditto | enum bool preferHighBits = .preferHighBits!Engine; | /// ditto | Uint opCall()() | { | Uint result = _front; | _front = _engine.opCall(); | return result; | } | | /// | @property ref inout(Engine) engine()() inout @nogc nothrow pure @safe | { | return _engine; | } |} | |/// ditto |template PhobosRandom(Engine) if (isRandomEngine!Engine && isPhobosUniformRNG!Engine) |{ | alias PhobosRandom = Engine; |} | |/// |@nogc nothrow pure @safe version(mir_random_test) unittest |{ | import mir.random.engine.xorshift: Xorshift1024StarPhi; | import std.random: isSeedable, isPhobosUniformRNG = isUniformRNG; | | alias RNG = PhobosRandom!Xorshift1024StarPhi; | | //Phobos interface | static assert(isPhobosUniformRNG!(RNG, ulong)); | static assert(isSeedable!(RNG, ulong)); | //Mir interface | static assert(isSaturatedRandomEngine!RNG); | static assert(is(EngineReturnType!RNG == ulong)); | | auto gen = Xorshift1024StarPhi(1); | auto rng = RNG(1); | assert(gen() == rng.front); | rng.popFront(); | assert(gen() == rng.front); | rng.popFront(); | assert(gen() == rng()); | | gen.__ctor(1); | rng.seed(1); | assert(gen() == rng()); |} ../../../.dub/packages/mir-random-2.2.13/mir-random/source/mir/random/package.d has no code <<<<<< EOF # path=source-mir-sparse-blas-package.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas; | |public import mir.sparse.blas.dot; |public import mir.sparse.blas.axpy; |public import mir.sparse.blas.gemv; |public import mir.sparse.blas.gemm; source/mir/sparse/blas/package.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.13-mir-random-source-mir-random-engine-package.lst |/++ |$(SCRIPT inhibitQuickIndex = 1;) |Uniform random engines. | |$(B Sections:) | $(LINK2 #Convenience, Convenience) |• $(LINK2 #Entropy, Entropy) |• $(LINK2 #ThreadLocal, Thread-Local) |• $(LINK2 #Traits, Traits) |• $(LINK2 #CInterface, C Interface) | |$(BOOKTABLE | |$(LEADINGROW Convenience) |$(TR | $(RROW Random, Default random number _engine)) | $(RROW rne, Per-thread uniquely-seeded instance of default `Random`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).) | |$(LEADINGROW Entropy) |$(TR | $(RROW unpredictableSeed, Seed of `size_t` using system entropy. May use `unpredictableSeed!UIntType` for unsigned integers of different sizes.) | $(RROW genRandomNonBlocking, Fills a buffer with system entropy, returning number of bytes copied or negative number on error) | $(RROW genRandomBlocking, Fills a buffer with system entropy, possibly waiting if the system believes it has insufficient entropy. Returns 0 on success.)) | |$(LEADINGROW Thread-Local (when $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS) enabled)) |$(TR | $(TR $(TDNW $(LREF threadLocal)`!(Engine)`) $(TD Per-thread uniquely-seeded instance of any specified `Engine`. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS).)) | $(TR $(TDNW $(LREF threadLocalPtr)`!(Engine)`) $(TD `@safe` pointer to `threadLocal!Engine`. Always initializes before return. $(I Warning: do not share between threads!))) | $(TR $(TDNW $(LREF threadLocalInitialized)`!(Engine)`) $(TD Explicitly manipulate "is seeded" flag for thread-local instance. Not needed by most library users.)) | $(TR $(TDNW $(LREF setThreadLocalSeed)`!(Engine, A...)`) $(TD Initialize thread-local `Engine` with a known seed rather than a random seed.)) | ) | |$(LEADINGROW Traits) |$(TR | $(RROW EngineReturnType, Get return type of random number _engine's `opCall()`) | $(RROW isRandomEngine, Check if is random number _engine) | $(RROW isSaturatedRandomEngine, Check if random number _engine `G` such that `G.max == EngineReturnType!(G).max`) | $(RROW preferHighBits, Are the high bits of the _engine's output known to have better statistical properties than the low bits?)) | |$(LEADINGROW C Interface) | $(RROW mir_random_engine_ctor, Perform any necessary setup. Automatically called by DRuntime.) | $(RROW mir_random_engine_dtor, Release any resources. Automatically called by DRuntime.) | $(RROW mir_random_genRandomNonBlocking, External name for $(LREF genRandomNonBlocking)) | $(RROW mir_random_genRandomBlocking, External name for $(LREF genRandomBlocking)) |) | |Copyright: Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: Ilya Yaroshenko | |Macros: | T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | RROW = $(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.random.engine; | |version (OSX) | version = Darwin; |else version (iOS) | version = Darwin; |else version (TVOS) | version = Darwin; |else version (WatchOS) | version = Darwin; | |// A secure arc4random implementation that uses some modern algorithm rather |// than ARC4 may be used synonymously with non-blocking system entropy. |version (CRuntime_Bionic) | version = SecureARC4Random; // ChaCha20 |version (Darwin) | version = SecureARC4Random; // AES |version (OpenBSD) | version = SecureARC4Random; // ChaCha20 |version (NetBSD) | version = SecureARC4Random; // ChaCha20 | |// A legacy arc4random should not be used when cryptographic security |// is required but may used for `unpredictableSeed`. |version (CRuntime_UClibc) | version = LegacyARC4Random; // ARC4 |version (FreeBSD) | version = LegacyARC4Random; // ARC4 |version (DragonFlyBSD) | version = LegacyARC4Random; // ARC4 |version (BSD) | version = LegacyARC4Random; // Unknown implementation | |version (SecureARC4Random) | version = AnyARC4Random; |version (LegacyARC4Random) | version = AnyARC4Random; | |version (D_betterC) | private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = false; |else | private enum bool THREAD_LOCAL_STORAGE_AVAILABLE = __traits(compiles, { static size_t x = 0; }); | |import std.traits; | |import mir.random.engine.mersenne_twister; | |/++ |Like `std.traits.ReturnType!T` but it works even if |T.opCall is a function template. |+/ |template EngineReturnType(T) |{ | import std.traits : ReturnType; | static if (is(ReturnType!T)) | alias EngineReturnType = ReturnType!T; | else | alias EngineReturnType = typeof(T.init()); |} | |/++ |Test if T is a random engine. |A type should define `enum isRandomEngine = true;` to be a random engine. |+/ |template isRandomEngine(T) |{ | static if (is(typeof(T.isRandomEngine) : bool) && is(typeof(T.init()))) | { | private alias R = typeof(T.init()); | static if (T.isRandomEngine && isUnsigned!R) | enum isRandomEngine = is(typeof({ | enum max = T.max; | static assert(is(typeof(T.max) == R)); | })); | else enum isRandomEngine = false; | } | else enum isRandomEngine = false; |} | |/++ |Test if T is a saturated random-bit generator. |A random number generator is saturated if `T.max == ReturnType!T.max`. |A type should define `enum isRandomEngine = true;` to be a random engine. |+/ |template isSaturatedRandomEngine(T) |{ | static if (isRandomEngine!T) | enum isSaturatedRandomEngine = T.max == EngineReturnType!T.max; | else | enum isSaturatedRandomEngine = false; |} | |/++ |Are the high bits of the engine's output known to have |better statistical properties than the low bits of the |output? This property is set by checking the value of |an optional enum named `preferHighBits`. If the property |is missing it is treated as false. | |This should be specified as true for: | |+/ |template preferHighBits(G) | if (isSaturatedRandomEngine!G) |{ | static if (__traits(compiles, { enum bool e = G.preferHighBits; })) | private enum bool preferHighBits = G.preferHighBits; | else | private enum bool preferHighBits = false; |} | |/* | * Marker indicating it's safe to construct from void | * (i.e. the constructor doesn't depend on the struct | * being in an initially valid state). | * Either checks an explicit flag `_isVoidInitOkay` | * or tests to make sure that the structure contains | * nothing that looks like a pointer or an index into | * an array. Also ensures that there is not an elaborate | * destructor since it could be called when the struct | * is in an invalid state. | * Non-public because we don't want to commit to this | * design. | */ |package template _isVoidInitOkay(G) if (isRandomEngine!G && is(G == struct)) |{ | static if (is(typeof(G._isVoidInitOkay) : bool)) | enum bool _isVoidInitOkay = G._isVoidInitOkay; | else static if (!hasNested!G && !hasElaborateDestructor!G) | { | import std.meta : allSatisfy; | static if (allSatisfy!(isScalarType, FieldTypeTuple!G)) | //All members are scalars. | enum bool _isVoidInitOkay = true; | else static if (FieldTypeTuple!(G).length == 1 && isStaticArray!(FieldTypeTuple!(G)[0])) | //Only has one member which is a static array of scalars. | enum bool _isVoidInitOkay = isScalarType!(typeof(FieldTypeTuple!(G)[0].init[0])); | else | enum bool _isVoidInitOkay = false; | } | else | enum bool _isVoidInitOkay = false; |} |@nogc nothrow pure @safe version(mir_random_test) |{ | import mir.random.engine.mersenne_twister: Mt19937, Mt19937_64; | //Ensure that this property is set for the Mersenne Twister, | //whose internal state is huge enough for this to potentially | //matter: | static assert(_isVoidInitOkay!Mt19937); | static assert(_isVoidInitOkay!Mt19937_64); | //Check that the property is set for a moderately-sized PRNG. | import mir.random.engine.xorshift: Xorshift1024StarPhi; | static assert(_isVoidInitOkay!Xorshift1024StarPhi); | //Check that PRNGs not explicitly marked as void-init safe | //can be inferred as such if they only have scalar fields. | import mir.random.engine.pcg: pcg32, pcg32_oneseq; | import mir.random.engine.splitmix: SplitMix64; | static assert(_isVoidInitOkay!pcg32); | static assert(_isVoidInitOkay!pcg32_oneseq); | static assert(_isVoidInitOkay!SplitMix64); | //Check that PRNGs not explicitly marked as void-init safe | //can be inferred as such if their only field is a static | //array of scalars. | import mir.random.engine.xorshift: Xorshift128, Xoroshiro128Plus; | static assert(_isVoidInitOkay!Xorshift128); | static assert(_isVoidInitOkay!Xoroshiro128Plus); |} | |version (D_Ddoc) |{ | /++ | A "good" seed for initializing random number engines. Initializing | with $(D_PARAM unpredictableSeed) makes engines generate different | random number sequences every run. | | Returns: | A single unsigned integer seed value, different on each successive call | +/ | pragma(inline, true) | @property size_t unpredictableSeed() @trusted nothrow @nogc | { | return unpredictableSeed!size_t; | } |} | |/// ditto |pragma(inline, true) |@property T unpredictableSeed(T = size_t)() @trusted nothrow @nogc | if (isUnsigned!T) |{ | import mir.utility: _expect; 0000000| T seed = void; | version (AnyARC4Random) | { | // If we just need 32 bits it's faster to call arc4random() | // than arc4random_buf(&seed, seed.sizeof). | static if (T.sizeof <= uint.sizeof) | seed = cast(T) arc4random(); | else | arc4random_buf(&seed, seed.sizeof); | } 0000000| else if (_expect(genRandomNonBlocking(&seed, seed.sizeof) != T.sizeof, false)) | { | // fallback to old time/thread-based implementation in case of errors 0000000| seed = cast(T) fallbackSeed(); | } 0000000| return seed; |} | |// Old name of `unpredictableSeedOf!T`. Undocumented but |// defined so existing code using mir.random won't break. |deprecated("Use unpredictableSeed!T instead of unpredictableSeedOf!T") |public alias unpredictableSeedOf(T) = unpredictableSeed!T; | |version (mir_random_test) @nogc nothrow @safe unittest |{ | // Check unpredictableSeed syntax works with or without parentheses. | auto a = unpredictableSeed; | auto b = unpredictableSeed!uint; | auto c = unpredictableSeed!ulong; | static assert(is(typeof(a) == size_t)); | static assert(is(typeof(b) == uint)); | static assert(is(typeof(c) == ulong)); | | auto d = unpredictableSeed(); | auto f = unpredictableSeed!uint(); | auto g = unpredictableSeed!ulong(); | static assert(is(typeof(d) == size_t)); | static assert(is(typeof(f) == uint)); | static assert(is(typeof(g) == ulong)); |} | |// Is llvm_readcyclecounter supported on this platform? |// We need to whitelist platforms where it is known to work because if it |// isn't supported it will compile but always return 0. |// https://llvm.org/docs/LangRef.html#llvm-readcyclecounter-intrinsic |version(LDC) |{ | // The only architectures the documentation says are supported are | // x86 and Alpha. x86 uses RDTSC and Alpha uses RPCC. | version(X86_64) version = LLVMReadCycleCounter; | // Do *not* support 32-bit x86 because some x86 processors don't | // support `rdtsc` and because on x86 (but not x86-64) Linux | // `prctl` can disable a process's ability to use `rdtsc`. | else version(Alpha) version = LLVMReadCycleCounter; |} | | |pragma(inline, false) |private ulong fallbackSeed()() |{ | // fallback to old time/thread-based implementation in case of errors | version(LLVMReadCycleCounter) | { | import ldc.intrinsics : llvm_readcyclecounter; | ulong ticks = llvm_readcyclecounter(); | } | else version(D_InlineAsm_X86_64) | { | // RDTSC takes around 22 clock cycles. | ulong ticks = void; | asm @nogc nothrow | { | rdtsc; | shl RDX, 32; | xor RDX, RAX; | mov ticks, RDX; | } | } | //else version(D_InlineAsm_X86) | //{ | // // We don't use `rdtsc` with version(D_InlineAsm_X86) because | // // some x86 processors don't support `rdtsc` and because on | // // x86 (but not x86-64) Linux `prctl` can disable a process's | // // ability to use `rdtsc`. | // static assert(0); | //} | else version(Windows) | { | import core.sys.windows.winbase : QueryPerformanceCounter; | ulong ticks = void; | QueryPerformanceCounter(cast(long*)&ticks); | } | else version(Darwin) | { | import core.time : mach_absolute_time; | ulong ticks = mach_absolute_time(); | } | else version(Posix) | { | import core.sys.posix.time : clock_gettime, CLOCK_MONOTONIC, timespec; 0000000| timespec ts = void; 0000000| const tserr = clock_gettime(CLOCK_MONOTONIC, &ts); | // Should never fail. Only allowed arror codes are | // EINVAL if the 1st argument is an invalid clock ID and | // EFAULT if the 2nd argument is an invalid address. 0000000| assert(tserr == 0, "Call to clock_gettime failed."); 0000000| ulong ticks = (cast(ulong) ts.tv_sec << 32) ^ ts.tv_nsec; | } | version(Posix) | { | import core.sys.posix.unistd : getpid; | import core.sys.posix.pthread : pthread_self; 0000000| auto pid = cast(uint) getpid; 0000000| auto tid = cast(uint) pthread_self(); | } | else | version(Windows) | { | import core.sys.windows.winbase : GetCurrentProcessId, GetCurrentThreadId; | auto pid = cast(uint) GetCurrentProcessId; | auto tid = cast(uint) GetCurrentThreadId; | } 0000000| ulong k = ((cast(ulong)pid << 32) ^ tid) + ticks; 0000000| k ^= k >> 33; 0000000| k *= 0xff51afd7ed558ccd; 0000000| k ^= k >> 33; 0000000| k *= 0xc4ceb9fe1a85ec53; 0000000| k ^= k >> 33; 0000000| return k; |} | |/// |@safe version(mir_random_test) unittest |{ | auto rnd = Random(unpredictableSeed); | auto n = rnd(); | static assert(is(typeof(n) == size_t)); |} | |/++ |The "default", "favorite", "suggested" random number generator type on |the current platform. It is an alias for one of the |generators. You may want to use it if (1) you need to generate some |nice random numbers, and (2) you don't care for the minutiae of the |method being used. |+/ |static if (is(size_t == uint)) | alias Random = Mt19937; |else | alias Random = Mt19937_64; | |/// |version(mir_random_test) unittest |{ | import std.traits; | static assert(isSaturatedRandomEngine!Random); | static assert(is(EngineReturnType!Random == size_t)); |} | |static if (THREAD_LOCAL_STORAGE_AVAILABLE) |{ | /++ | Thread-local instance of the default $(LREF Random) allocated and seeded independently | for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS). | +/ | alias rne = threadLocal!Random; | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import std.complex; | | auto c = complex(rne.rand!real, rne.rand!real); | | int[10] array; | foreach (ref e; array) | e = rne.rand!int; | auto picked = array[rne.randIndex(array.length)]; | } | | private static struct TL(Engine) | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | static bool initialized; | static if (_isVoidInitOkay!Engine) | static Engine engine = void; | else static if (__traits(compiles, { Engine defaultConstructed; })) | static Engine engine; | else | static Engine engine = Engine.init; | | static if (is(ucent) && is(typeof((ucent t) => Engine(t)))) | alias seed_t = ucent; | else static if (is(typeof((ulong t) => Engine(t)))) | alias seed_t = ulong; | else static if (is(typeof((uint t) => Engine(t)))) | alias seed_t = uint; | else | alias seed_t = EngineReturnType!Engine; | | pragma(inline, false) // Usually called only once per thread. | private static void reseed() | { 0000000| engine.__ctor(unpredictableSeed!(seed_t)); 0000000| initialized = true; | } | } | /++ | `threadLocal!Engine` returns a reference to a thread-local instance of | the specified random number generator allocated and seeded uniquely | for each thread. Requires $(LINK2 https://en.wikipedia.org/wiki/Thread-local_storage, TLS). | | `threadLocalPtr!Engine` is a pointer to the area of thread-local | storage used by `threadLocal!Engine`. This function is provided because | the compiler can infer it is `@safe`, unlike `&(threadLocal!Engine)`. | Like `threadLocal!Engine` this function will auto-initialize the engine. | $(I Do not share pointers returned by threadLocalPtr between | threads!) | | `threadLocalInitialized!Engine` is a low-level way to explicitly change | the "initialized" flag used by `threadLocal!Engine` to determine whether | the Engine needs to be seeded. Setting this to `false` gives a way of | forcing the next call to `threadLocal!Engine` to reseed. In general this | is unnecessary but there are some specialized use cases where users have | requested this ability. | +/ | @property ref Engine threadLocal(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | import mir.utility: _expect; 0000000| if (_expect(!TL!Engine.initialized, false)) | { 0000000| TL!Engine.reseed(); | } 0000000| return TL!Engine.engine; | } | /// ditto | @property Engine* threadLocalPtr(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | import mir.utility: _expect; | if (_expect(!TL!Engine.initialized, false)) | { | TL!Engine.reseed(); | } | return &TL!Engine.engine; | } | /// ditto | @property ref bool threadLocalInitialized(Engine)() | if (isSaturatedRandomEngine!Engine && is(Engine == struct)) | { | version (DigitalMars) | pragma(inline);//DMD may fail to inline this. | else | pragma(inline, true); | return TL!Engine.initialized; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | | alias gen = threadLocal!Xorshift1024StarPhi; | double x = gen.rand!double; | size_t i = gen.randIndex(100u); | ulong a = gen.rand!ulong; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | //If you need a pointer to the engine, getting it like this is @safe: | Random* ptr = threadLocalPtr!Random; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | //If you need to mark the engine as uninitialized to force a reseed, | //you can do it like this: | threadLocalInitialized!Xorshift1024StarPhi = false; | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.mersenne_twister; | //You can mark the engine as already initialized to skip | //automatic seeding then initialize it yourself, for instance | //if you want to use a known seed rather than a random one. | threadLocalInitialized!Mt19937 = true; | immutable uint[4] customSeed = [0x123, 0x234, 0x345, 0x456]; | threadLocal!Mt19937.__ctor(customSeed); | foreach(_; 0..999) | threadLocal!Mt19937.rand!uint; | assert(3460025646u == threadLocal!Mt19937.rand!uint); | } | /// | @nogc nothrow @safe version(mir_random_test) unittest | { | import mir.random; | import mir.random.engine.xorshift; | | alias gen = threadLocal!Xorshift1024StarPhi; | | //If you want to you can call the generator's opCall instead of using | //rand!T but it is somewhat clunky because of the ambiguity of | //@property syntax: () looks like optional function parentheses. | static assert(!__traits(compiles, {ulong x0 = gen();}));//<-- Won't work | static assert(is(typeof(gen()) == Xorshift1024StarPhi));//<-- because the type is this. | ulong x1 = gen.opCall();//<-- This works though. | ulong x2 = gen()();//<-- This also works. | | //But instead of any of those you should really just use gen.rand!T. | ulong x3 = gen.rand!ulong; | } |// /// |// @nogc nothrow pure @safe version(mir_random_test) unittest |// { |// //If you want something like Phobos std.random.rndGen and |// //don't care about the specific algorithm you can do this: |// alias rndGen = threadLocal!Random; |// } | | @nogc nothrow @system version(mir_random_test) unittest | { | //Verify Returns same instance every time per thread. | import mir.random; | import mir.random.engine.xorshift; | | Xorshift1024StarPhi* addr = &(threadLocal!Xorshift1024StarPhi()); | Xorshift1024StarPhi* sameAddr = &(threadLocal!Xorshift1024StarPhi()); | assert(addr is sameAddr); | assert(sameAddr is threadLocalPtr!Xorshift1024StarPhi); | } | | /++ | Sets or resets the _seed of `threadLocal!Engine` using the given arguments. | It is not necessary to call this except if you wish to ensure the | PRNG uses a known _seed. | +/ | void setThreadLocalSeed(Engine, A...)(auto ref A seed) | if (isSaturatedRandomEngine!Engine && is(Engine == struct) | && A.length >= 1 && is(typeof((ref A a) => Engine(a)))) | { | TL!Engine.initialized = true; | TL!Engine.engine.__ctor(seed); | } | /// | @nogc nothrow @system version(mir_random_test) unittest | { | import mir.random; | | alias rnd = threadLocal!Random; | | setThreadLocalSeed!Random(123); | immutable float x = rnd.rand!float; | | assert(x != rnd.rand!float); | | setThreadLocalSeed!Random(123); | immutable float y = rnd.rand!float; | | assert(x == y); | } |} |else |{ | static assert(!THREAD_LOCAL_STORAGE_AVAILABLE); | | @property ref Random rne()() | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocal(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocalPtr(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template threadLocalInitialized(T) | { | static assert(0, "Thread-local storage not available!"); | } | | template setThreadLocalSeed(T, A...) | { | static assert(0, "Thread-local storage not available!"); | } |} | |version(linux) |{ | import mir.linux._asm.unistd; | enum bool LINUX_NR_GETRANDOM = (__traits(compiles, {enum e = NR_getrandom;})); | //If X86_64 or X86 are missing there is a problem with the library. | static if (!LINUX_NR_GETRANDOM) | { | version (X86_64) | static assert(0, "Missing linux syscall constants!"); | version (X86) | static assert(0, "Missing linux syscall constants!"); | } |} |else | enum bool LINUX_NR_GETRANDOM = false; | |static if (LINUX_NR_GETRANDOM) |{ | // getrandom was introduced in Linux 3.17 | private __gshared bool getRandomFailedENOSYS = false; | | private extern(C) int syscall(size_t ident, size_t n, size_t arg1, size_t arg2) @nogc nothrow; | | /* | * Flags for getrandom(2) | * | * GRND_NONBLOCK Don't block and return EAGAIN instead | * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom | */ | private enum GRND_NONBLOCK = 0x0001; | private enum GRND_RANDOM = 0x0002; | | private enum GETRANDOM = NR_getrandom; | | /* | http://man7.org/linux/man-pages/man2/getrandom.2.html | If the urandom source has been initialized, reads of up to 256 bytes | will always return as many bytes as requested and will not be | interrupted by signals. No such guarantees apply for larger buffer | sizes. | */ | private ptrdiff_t genRandomImplSysBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { 0000000| while (len > 0) | { 0000000| auto res = syscall(GETRANDOM, cast(size_t) ptr, len, 0); 0000000| if (res >= 0) | { 0000000| len -= res; 0000000| ptr += res; | } | else | { 0000000| return res; | } | } 0000000| return 0; | } | | /* | * If the GRND_NONBLOCK flag is set, then | * getrandom() does not block in these cases, but instead | * immediately returns -1 with errno set to EAGAIN. | */ | private ptrdiff_t genRandomImplSysNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { 0000000| return syscall(GETRANDOM, cast(size_t) ptr, len, GRND_NONBLOCK); | } |} | |version(AnyARC4Random) |extern(C) private @nogc nothrow |{ | void arc4random_buf(scope void* buf, size_t nbytes) @system; | uint arc4random() @trusted; |} | |version(Darwin) |{ | //On Darwin /dev/random is identical to /dev/urandom (neither blocks | //when there is low system entropy) so there is no point mucking | //about with file descriptors. Just use arc4random_buf for both. |} |else version(Posix) |{ | import core.stdc.stdio : fclose, feof, ferror, fopen, fread; | alias IOType = typeof(fopen("a", "b")); | private __gshared IOType fdRandom; | version (SecureARC4Random) | { | //Don't need /dev/urandom if we have arc4random_buf. | } | else | private __gshared IOType fdURandom; | | | /* The /dev/random device is a legacy interface which dates back to a | time where the cryptographic primitives used in the implementation of | /dev/urandom were not widely trusted. It will return random bytes | only within the estimated number of bits of fresh noise in the | entropy pool, blocking if necessary. /dev/random is suitable for | applications that need high quality randomness, and can afford | indeterminate delays. | | When the entropy pool is empty, reads from /dev/random will block | until additional environmental noise is gathered. | */ | private ptrdiff_t genRandomImplFileBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { 0000000| if (fdRandom is null) | { 0000000| fdRandom = fopen("/dev/random", "r"); 0000000| if (fdRandom is null) 0000000| return -1; | } | 0000000| while (len > 0) | { 0000000| auto res = fread(ptr, 1, len, fdRandom); 0000000| len -= res; 0000000| ptr += res; | // check for possible permanent errors 0000000| if (len != 0) | { 0000000| if (fdRandom.ferror) 0000000| return -1; | 0000000| if (fdRandom.feof) 0000000| return -1; | } | } | 0000000| return 0; | } |} | |version (SecureARC4Random) |{ | //Don't need /dev/urandom if we have arc4random_buf. |} |else version(Posix) |{ | /** | When read, the /dev/urandom device returns random bytes using a | pseudorandom number generator seeded from the entropy pool. Reads | from this device do not block (i.e., the CPU is not yielded), but can | incur an appreciable delay when requesting large amounts of data. | When read during early boot time, /dev/urandom may return data prior | to the entropy pool being initialized. | */ | private ptrdiff_t genRandomImplFileNonBlocking()(scope void* ptr, size_t len) @nogc nothrow @system | { 0000000| if (fdURandom is null) | { 0000000| fdURandom = fopen("/dev/urandom", "r"); 0000000| if (fdURandom is null) 0000000| return -1; | } | 0000000| auto res = fread(ptr, 1, len, fdURandom); | // check for possible errors 0000000| if (res != len) | { 0000000| if (fdURandom.ferror) 0000000| return -1; | 0000000| if (fdURandom.feof) 0000000| return -1; | } 0000000| return res; | } |} | |version(Windows) |{ | // the wincrypt headers in druntime are broken for x64! | private alias ULONG_PTR = size_t; // uint in druntime | private alias BOOL = bool; | private alias DWORD = uint; | private alias LPCWSTR = wchar*; | private alias PBYTE = ubyte*; | private alias HCRYPTPROV = ULONG_PTR; | private alias LPCSTR = const(char)*; | | private extern(Windows) BOOL CryptGenRandom(HCRYPTPROV, DWORD, PBYTE) @nogc @safe nothrow; | private extern(Windows) BOOL CryptAcquireContextA(HCRYPTPROV*, LPCSTR, LPCSTR, DWORD, DWORD) @nogc nothrow; | private extern(Windows) BOOL CryptAcquireContextW(HCRYPTPROV*, LPCWSTR, LPCWSTR, DWORD, DWORD) @nogc nothrow; | private extern(Windows) BOOL CryptReleaseContext(HCRYPTPROV, ULONG_PTR) @nogc nothrow; | | private __gshared ULONG_PTR hProvider; | | private auto initGetRandom()() @nogc @trusted nothrow | { | import core.sys.windows.winbase : GetLastError; | import core.sys.windows.winerror : NTE_BAD_KEYSET; | import core.sys.windows.wincrypt : PROV_RSA_FULL, CRYPT_NEWKEYSET, CRYPT_VERIFYCONTEXT, CRYPT_SILENT; | | // https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886(v=vs.85).aspx | // For performance reasons, we recommend that you set the pszContainer | // parameter to NULL and the dwFlags parameter to CRYPT_VERIFYCONTEXT | // in all situations where you do not require a persisted key. | // CRYPT_SILENT is intended for use with applications for which the UI cannot be displayed by the CSP. | if (!CryptAcquireContextW(&hProvider, null, null, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) | { | if (GetLastError() == NTE_BAD_KEYSET) | { | // Attempt to create default container | if (!CryptAcquireContextA(&hProvider, null, null, PROV_RSA_FULL, CRYPT_NEWKEYSET | CRYPT_SILENT)) | return 1; | } | else | { | return 1; | } | } | | return 0; | } |} | |/++ |Constructs the mir random seed generators. |This constructor needs to be called once $(I before) |other calls in `mir.random.engine`. | |Automatically called by DRuntime. |+/ |extern(C) void mir_random_engine_ctor() @system nothrow @nogc |{ | version(Windows) | { | if (hProvider == 0) | initGetRandom; | } |} | |/++ |Destructs the mir random seed generators. | |Automatically called by DRuntime. |+/ |extern(C) void mir_random_engine_dtor() @system nothrow @nogc |{ | version(Windows) | { | if (hProvider > 0) | CryptReleaseContext(hProvider, 0); | } | else | version(Darwin) | { | | } | else | version(Posix) | { 0000000| if (fdRandom !is null) 0000000| fdRandom.fclose; | | version (SecureARC4Random) | { | //Don't need /dev/urandom if we have arc4random_buf. | } 0000000| else if (fdURandom !is null) 0000000| fdURandom.fclose; | } |} | | |version(D_BetterC) |{ | pragma(crt_constructor) | extern(C) void mir_random_engine_ctor_() @system nothrow @nogc | { | mir_random_engine_ctor(); | } | | pragma(crt_destructor) | extern(C) void mir_random_engine_dtor_() @system nothrow @nogc | { | mir_random_engine_dtor(); | } |} |else |{ | /// Automatically calls the extern(C) module constructor | shared static this() | { 1| mir_random_engine_ctor(); | } | | /// Automatically calls the extern(C) module destructor | shared static ~this() | { 0000000| mir_random_engine_dtor(); | } |} | |/++ |Fills a buffer with random data. |If not enough entropy has been gathered, it will block. | |Note that on Mac OS X this method will never block. | |Params: | ptr = pointer to the buffer to fill | len = length of the buffer (in bytes) | |Returns: | A non-zero integer if an error occurred. |+/ |extern(C) ptrdiff_t mir_random_genRandomBlocking(scope void* ptr , size_t len) @nogc nothrow @system |{ | version(Windows) | { | static if (DWORD.max >= size_t.max) | while(!CryptGenRandom(hProvider, len, cast(PBYTE) ptr)) {} | else | while (len != 0) | { | import mir.utility : min; | const n = min(DWORD.max, len); | if (CryptGenRandom(hProvider, cast(DWORD) n, cast(PBYTE) ptr)) | { | len -= n; | } | } | return 0; | } | else version (Darwin) | { | arc4random_buf(ptr, len); | return 0; | } | else | { | static if (LINUX_NR_GETRANDOM) 0000000| if (!getRandomFailedENOSYS) // harmless data race | { | import core.stdc.errno; 0000000| ptrdiff_t result = genRandomImplSysBlocking(ptr, len); 0000000| if (result >= 0) 0000000| return result; 0000000| if (errno != ENOSYS) 0000000| return result; 0000000| getRandomFailedENOSYS = true; // harmless data race | } 0000000| return genRandomImplFileBlocking(ptr, len); | } |} | |/// ditto |alias genRandomBlocking = mir_random_genRandomBlocking; | |/// ditto |ptrdiff_t genRandomBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted |{ | pragma(inline, true); | return mir_random_genRandomBlocking(buffer.ptr, buffer.length); |} | |/// |@safe nothrow version(mir_random_test) unittest |{ | ubyte[] buf = new ubyte[10]; | genRandomBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |@nogc nothrow @safe version(mir_random_test) unittest |{ | ubyte[10] buf; | genRandomBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |/++ |Fills a buffer with random data. |If not enough entropy has been gathered, it won't block. |Hence the error code should be inspected. | |On Linux >= 3.17 genRandomNonBlocking is guaranteed to succeed for 256 bytes and |fewer. | |On Mac OS X, OpenBSD, and NetBSD genRandomNonBlocking is guaranteed to |succeed for any number of bytes. | |Params: | buffer = the buffer to fill | len = length of the buffer (in bytes) | |Returns: | The number of bytes filled - a negative number if an error occurred |+/ |extern(C) size_t mir_random_genRandomNonBlocking(scope void* ptr, size_t len) @nogc nothrow @system |{ | version(Windows) | { | static if (DWORD.max < size_t.max) | if (len > DWORD.max) | len = DWORD.max; | if (!CryptGenRandom(hProvider, cast(DWORD) len, cast(PBYTE) ptr)) | return -1; | return len; | } | else version(SecureARC4Random) | { | arc4random_buf(ptr, len); | return len; | } | else | { | static if (LINUX_NR_GETRANDOM) 0000000| if (!getRandomFailedENOSYS) // harmless data race | { | import core.stdc.errno; 0000000| ptrdiff_t result = genRandomImplSysNonBlocking(ptr, len); 0000000| if (result >= 0) 0000000| return result; 0000000| if (errno != ENOSYS) 0000000| return result; 0000000| getRandomFailedENOSYS = true; // harmless data race | } 0000000| return genRandomImplFileNonBlocking(ptr, len); | } |} |/// ditto |alias genRandomNonBlocking = mir_random_genRandomNonBlocking; |/// ditto |size_t genRandomNonBlocking()(scope ubyte[] buffer) @nogc nothrow @trusted |{ | pragma(inline, true); | return mir_random_genRandomNonBlocking(buffer.ptr, buffer.length); |} | |/// |@safe nothrow version(mir_random_test) unittest |{ | ubyte[] buf = new ubyte[10]; | genRandomNonBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} | |@nogc nothrow @safe |version(mir_random_test) unittest |{ | ubyte[10] buf; | genRandomNonBlocking(buf); | | int sum; | foreach (b; buf) | sum += b; | | assert(sum > 0, "Only zero points generated"); |} ../../../.dub/packages/mir-random-2.2.13/mir-random/source/mir/random/engine/package.d is 1% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-math-sum.lst |/++ |This module contains summation algorithms. | |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko | |Copyright: Copyright © 2015-, Ilya Yaroshenko |+/ |module mir.math.sum; | |/// |version(mir_test) |unittest |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.topology: map; | auto ar = [1, 1e100, 1, -1e100].sliced.map!"a * 10_000"; | const r = 20_000; | assert(r == ar.sum!"kbn"); | assert(r == ar.sum!"kb2"); | assert(r == ar.sum!"precise"); |} | |/// |version(mir_test) |unittest |{ | import mir.ndslice.slice: sliced, slicedField; | import mir.ndslice.topology: map, iota, retro; | import mir.ndslice.concatenation: concatenation; | import mir.math.common; | auto ar = 1000 | .iota | .map!(n => 1.7L.pow(n+1) - 1.7L.pow(n)) | ; | real d = 1.7L.pow(1000); | assert(sum!"precise"(concatenation(ar, [-d].sliced).slicedField) == -1); | assert(sum!"precise"(ar.retro, -d) == -1); |} | |/++ |`Naive`, `Pairwise` and `Kahan` algorithms can be used for user defined types. |+/ |version(mir_test) |unittest |{ | import std.traits : isFloatingPoint; | static struct Quaternion(F) | if (isFloatingPoint!F) | { | F[4] rijk; | | /// + and - operator overloading | Quaternion opBinary(string op)(auto ref const Quaternion rhs) const | if (op == "+" || op == "-") | { | Quaternion ret ; | foreach (i, ref e; ret.rijk) | mixin("e = rijk[i] "~op~" rhs.rijk[i];"); | return ret; | } | | /// += and -= operator overloading | Quaternion opOpAssign(string op)(auto ref const Quaternion rhs) | if (op == "+" || op == "-") | { | foreach (i, ref e; rijk) | mixin("e "~op~"= rhs.rijk[i];"); | return this; | } | | ///constructor with single FP argument | this(F f) | { | rijk[] = f; | } | | ///assigment with single FP argument | void opAssign(F f) | { | rijk[] = f; | } | } | | Quaternion!double q, p, r; | q.rijk = [0, 1, 2, 4]; | p.rijk = [3, 4, 5, 9]; | r.rijk = [3, 5, 7, 13]; | | assert(r == [p, q].sum!"naive"); | assert(r == [p, q].sum!"pairwise"); | assert(r == [p, q].sum!"kahan"); |} | |/++ |All summation algorithms available for complex numbers. |+/ |version(mir_test) |unittest |{ | cdouble[] ar = [1.0 + 2i, 2 + 3i, 3 + 4i, 4 + 5i]; | cdouble r = 10 + 14i; | assert(r == ar.sum!"fast"); | assert(r == ar.sum!"naive"); | assert(r == ar.sum!"pairwise"); | assert(r == ar.sum!"kahan"); | version(LDC) // DMD Internal error: backend/cgxmm.c 628 | { | assert(r == ar.sum!"kbn"); | assert(r == ar.sum!"kb2"); | } | assert(r == ar.sum!"precise"); |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.topology: repeat, iota; | | //simple integral summation | assert(sum([ 1, 2, 3, 4]) == 10); | | //with initial value | assert(sum([ 1, 2, 3, 4], 5) == 15); | | //with integral promotion | assert(sum([false, true, true, false, true]) == 3); | assert(sum(ubyte.max.repeat(100)) == 25_500); | | //The result may overflow | assert(uint.max.repeat(3).sum() == 4_294_967_293U ); | //But a seed can be used to change the summation primitive | assert(uint.max.repeat(3).sum(ulong.init) == 12_884_901_885UL); | | //Floating point summation | assert(sum([1.0, 2.0, 3.0, 4.0]) == 10); | | //Type overriding | static assert(is(typeof(sum!double([1F, 2F, 3F, 4F])) == double)); | static assert(is(typeof(sum!double([1F, 2F, 3F, 4F], 5F)) == double)); | assert(sum([1F, 2, 3, 4]) == 10); | assert(sum([1F, 2, 3, 4], 5F) == 15); | | //Force pair-wise floating point summation on large integers | import mir.math : approxEqual; | assert(iota!long([4096], uint.max / 2).sum(0.0) | .approxEqual((uint.max / 2) * 4096.0 + 4096.0 * 4096.0 / 2)); |} | |/// Precise summation |version(mir_test) |nothrow @nogc unittest |{ | import mir.ndslice.topology: iota, map; | import core.stdc.tgmath: pow; | assert(iota(1000).map!(n => 1.7L.pow(real(n)+1) - 1.7L.pow(real(n))) | .sum!"precise" == -1 + 1.7L.pow(1000.0L)); |} | |/// Precise summation with output range |version(mir_test) |nothrow @nogc unittest |{ | import mir.ndslice.topology: iota, map; | import mir.math.common; | auto r = iota(1000).map!(n => 1.7L.pow(n+1) - 1.7L.pow(n)); | Summator!(real, Summation.precise) s = 0.0; | s.put(r); | s -= 1.7L.pow(1000); | assert(s.sum() == -1); |} | |/// Precise summation with output range |version(mir_test) |nothrow @nogc unittest |{ | import mir.math.common; | float M = 2.0f ^^ (float.max_exp-1); | double N = 2.0 ^^ (float.max_exp-1); | auto s = Summator!(float, Summation.precise)(0); | s += M; | s += M; | assert(float.infinity == s.sum()); //infinity | auto e = cast(Summator!(double, Summation.precise)) s; | assert(e.sum() < double.infinity); | assert(N+N == e.sum()); //finite number |} | |/// Moving mean |version(mir_test) |unittest |{ | import mir.ndslice.topology: linspace; | import mir.math.sum; | import mir.array.allocation: array; | | class MovingAverage | { | Summator!(double, Summation.precise) summator; | double[] circularBuffer; | size_t frontIndex; | | double avg() @property const | { | return summator.sum() / circularBuffer.length; | } | | this(double[] buffer) | { | assert(buffer.length); | circularBuffer = buffer; | summator = 0; | summator.put(buffer); | } | | ///operation without rounding | void put(double x) | { | import mir.utility: swap; | summator += x; | swap(circularBuffer[frontIndex++], x); | summator -= x; | frontIndex %= circularBuffer.length; | } | } | | /// ma always keeps precise average of last 1000 elements | auto ma = new MovingAverage(linspace!double([1000], [0.0, 999]).array); | assert(ma.avg == (1000 * 999 / 2) / 1000.0); | /// move by 10 elements | foreach(x; linspace!double([10], [1000.0, 1009.0])) | ma.put(x); | assert(ma.avg == (1010 * 1009 / 2 - 10 * 9 / 2) / 1000.0); |} | |version(X86) | version = X86_Any; |version(X86_64) | version = X86_Any; | |/++ |SIMD Vectors |Bugs: ICE 1662 (dmd only) |+/ |version(LDC) |version(X86_Any) |version(mir_test) |unittest |{ | import core.simd; | import std.meta : AliasSeq; | double2 a = 1, b = 2, c = 3, d = 6; | with(Summation) | { | foreach (algo; AliasSeq!(naive, fast, pairwise, kahan)) | { | assert([a, b, c].sum!algo.array == d.array); | assert([a, b].sum!algo(c).array == d.array); | } | } |} | |import std.traits; |private alias AliasSeq(T...) = T; |import mir.internal.utility: Iota, isComplex; |import mir.math.common: fabs; | |private alias isNaN = x => x != x; |private alias isFinite = x => x.fabs < x.infinity; |private alias isInfinity = x => x.fabs == x.infinity; | | |private template chainSeq(size_t n) |{ | static if (n) | alias chainSeq = AliasSeq!(n, chainSeq!(n / 2)); | else | alias chainSeq = AliasSeq!(); |} | |/++ |Summation algorithms. |+/ |enum Summation |{ | /++ | Performs `pairwise` summation for floating point based types and `fast` summation for integral based types. | +/ | appropriate, | | /++ | $(WEB en.wikipedia.org/wiki/Pairwise_summation, Pairwise summation) algorithm. | +/ | pairwise, | | /++ | Precise summation algorithm. | The value of the sum is rounded to the nearest representable | floating-point number using the $(LUCKY round-half-to-even rule). | The result can differ from the exact value on `X86`, `nextDown(proir) <= result && result <= nextUp(proir)`. | The current implementation re-establish special value semantics across iterations (i.e. handling ±inf). | | References: $(LINK2 http://www.cs.cmu.edu/afs/cs/project/quake/public/papers/robust-arithmetic.ps, | "Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric Predicates", Jonathan Richard Shewchuk), | $(LINK2 http://bugs.python.org/file10357/msum4.py, Mark Dickinson's post at bugs.python.org). | +/ | | /+ | Precise summation function as msum() by Raymond Hettinger in | , | enhanced with the exact partials sum and roundoff from Mark | Dickinson's post at . | See those links for more details, proofs and other references. | IEEE 754R floating point semantics are assumed. | +/ | precise, | | /++ | $(WEB en.wikipedia.org/wiki/Kahan_summation, Kahan summation) algorithm. | +/ | /+ | --------------------- | s := x[1] | c := 0 | FOR k := 2 TO n DO | y := x[k] - c | t := s + y | c := (t - s) - y | s := t | END DO | --------------------- | +/ | kahan, | | /++ | $(LUCKY Kahan-Babuška-Neumaier summation algorithm). `KBN` gives more accurate results then `Kahan`. | +/ | /+ | --------------------- | s := x[1] | c := 0 | FOR i := 2 TO n DO | t := s + x[i] | IF ABS(s) >= ABS(x[i]) THEN | c := c + ((s-t)+x[i]) | ELSE | c := c + ((x[i]-t)+s) | END IF | s := t | END DO | s := s + c | --------------------- | +/ | kbn, | | /++ | $(LUCKY Generalized Kahan-Babuška summation algorithm), order 2. `KB2` gives more accurate results then `Kahan` and `KBN`. | +/ | /+ | --------------------- | s := 0 ; cs := 0 ; ccs := 0 | FOR j := 1 TO n DO | t := s + x[i] | IF ABS(s) >= ABS(x[i]) THEN | c := (s-t) + x[i] | ELSE | c := (x[i]-t) + s | END IF | s := t | t := cs + c | IF ABS(cs) >= ABS(c) THEN | cc := (cs-t) + c | ELSE | cc := (c-t) + cs | END IF | cs := t | ccs := ccs + cc | END FOR | RETURN s+cs+ccs | --------------------- | +/ | kb2, | | /++ | Naive algorithm (one by one). | +/ | naive, | | /++ | SIMD optimized summation algorithm. | +/ | fast, |} | |/++ |Output range for summation. |+/ |struct Summator(T, Summation summation) | if (isMutable!T) |{ | static if (is(T == class) || is(T == interface) || hasElaborateAssign!T) | static assert (summation == Summation.naive, | "Classes, interfaces, and structures with " | ~ "elaborate constructor support only naive summation."); | | static if (summation == Summation.fast) | { | version (LDC) | { | import ldc.attributes: fastmath; | alias attr = fastmath; | } | else | { | alias attr = AliasSeq!(); | } | } | else | { | alias attr = AliasSeq!(); | } | | @attr: | | static if (summation != Summation.pairwise) | @disable this(); | | static if (summation == Summation.pairwise) | private enum bool fastPairwise = | is(F == float) || | is(F == double) || | is(F == cfloat) || | is(F == cdouble) || | is(F : __vector(W[N]), W, size_t N); | //false; | | alias F = T; | | static if (summation == Summation.precise) | { | import std.internal.scopebuffer; | import mir.math.ieee: signbit; | private: | enum F M = (cast(F)(2)) ^^ (T.max_exp - 1); | F[16] scopeBufferArray = 0; | ScopeBuffer!F partials; | //sum for NaN and infinity. | F s; | //Overflow Degree. Count of 2^^F.max_exp minus count of -(2^^F.max_exp) | sizediff_t o; | | | /++ | Compute the sum of a list of nonoverlapping floats. | On input, partials is a list of nonzero, nonspecial, | nonoverlapping floats, strictly increasing in magnitude, but | possibly not all having the same sign. | On output, the sum of partials gives the error in the returned | result, which is correctly rounded (using the round-half-to-even | rule). | Two floating point values x and y are non-overlapping if the least significant nonzero | bit of x is more significant than the most significant nonzero bit of y, or vice-versa. | +/ | static F partialsReduce(F s, in F[] partials) | in | { | debug(numeric) assert(!partials.length || .isFinite(s)); | } | do | { | bool _break; | foreach_reverse (i, y; partials) | { | s = partialsReducePred(s, y, i ? partials[i-1] : 0, _break); | if (_break) | break; | debug(numeric) assert(.isFinite(s)); | } | return s; | } | | static F partialsReducePred(F s, F y, F z, out bool _break) | out(result) | { | debug(numeric) assert(.isFinite(result)); | } | do | { | F x = s; | s = x + y; | F d = s - x; | F l = y - d; | debug(numeric) | { | assert(.isFinite(x)); | assert(.isFinite(y)); | assert(.isFinite(s)); | assert(fabs(y) < fabs(x)); | } | if (l) | { | //Make half-even rounding work across multiple partials. | //Needed so that sum([1e-16, 1, 1e16]) will round-up the last | //digit to two instead of down to zero (the 1e-16 makes the 1 | //slightly closer to two). Can guarantee commutativity. | if (z && !signbit(l * z)) | { | l *= 2; | x = s + l; | F t = x - s; | if (l == t) | s = x; | } | _break = true; | } | return s; | } | | //Returns corresponding infinity if is overflow and 0 otherwise. | F overflow()() const | { | if (o == 0) | return 0; | if (partials.length && (o == -1 || o == 1) && signbit(o * partials[$-1])) | { | // problem case: decide whether result is representable | F x = o * M; | F y = partials[$-1] / 2; | F h = x + y; | F d = h - x; | F l = (y - d) * 2; | y = h * 2; | d = h + l; | F t = d - h; | version(X86) | { | if (!.isInfinity(cast(T)y) || !.isInfinity(sum())) | return 0; | } | else | { | if (!.isInfinity(cast(T)y) || | ((partials.length > 1 && !signbit(l * partials[$-2])) && t == l)) | return 0; | } | } | return F.infinity * o; | } | } | else | static if (summation == Summation.kb2) | { | F s; | F cs; | F ccs; | } | else | static if (summation == Summation.kbn) | { | F s; | F c; | } | else | static if (summation == Summation.kahan) | { | F s; | F c; | F y; // do not declare in the loop/put (algo can be used for matrixes and etc) | F t; // ditto | } | else | static if (summation == Summation.pairwise) | { | package size_t counter; | size_t index; | static if (fastPairwise) | { | enum registersCount= 16; | F[size_t.sizeof * 8] partials; | } | else | { | F[size_t.sizeof * 8] partials; | } | } | else | static if (summation == Summation.naive) | { | F s; | } | else | static if (summation == Summation.fast) | { | F s; | } | else | static assert(0, "Unsupported summation type for std.numeric.Summator."); | | |public: | | /// | this()(T n) | { | static if (summation == Summation.precise) | { | partials = scopeBuffer(scopeBufferArray); | s = 0.0; | o = 0; | if (n) put(n); | } | else | static if (summation == Summation.kb2) | { | s = n; | static if (isComplex!T) | { | cs = 0 + 0fi; | ccs = 0 + 0fi; | } | else | { | cs = 0.0; | ccs = 0.0; | } | } | else | static if (summation == Summation.kbn) | { | s = n; | static if (isComplex!T) | c = 0 + 0fi; | else | c = 0.0; | } | else | static if (summation == Summation.kahan) | { | s = n; | static if (isComplex!T) | c = 0 + 0fi; | else | c = 0.0; | } | else | static if (summation == Summation.pairwise) | { | counter = index = 1; | partials[0] = n; | } | else | static if (summation == Summation.naive) | { | s = n; | } | else | static if (summation == Summation.fast) | { | s = n; | } | else | static assert(0); | } | | // free ScopeBuffer | static if (summation == Summation.precise) | ~this() | { | version(LDC) pragma(inline, true); | partials.free; | } | | // copy ScopeBuffer if necessary | static if (summation == Summation.precise) | this(this) | { | auto a = partials[]; | if (scopeBufferArray.ptr !is a.ptr) | { | partials = scopeBuffer(scopeBufferArray); | partials.put(a); | } | } | | ///Adds `n` to the internal partial sums. | void put(N)(N n) | if (__traits(compiles, {T a = n; a = n; a += n;})) | { | static if (isCompesatorAlgorithm!summation) | F x = n; | static if (summation == Summation.precise) | { | if (.isFinite(x)) | { | size_t i; | foreach (y; partials[]) | { | F h = x + y; | if (.isInfinity(cast(T)h)) | { | if (fabs(x) < fabs(y)) | { | F t = x; x = y; y = t; | } | //h == -F.infinity | if (signbit(h)) | { | x += M; | x += M; | o--; | } | //h == +F.infinity | else | { | x -= M; | x -= M; | o++; | } | debug(numeric) assert(x.isFinite); | h = x + y; | } | debug(numeric) assert(h.isFinite); | F l; | if (fabs(x) < fabs(y)) | { | F t = h - y; | l = x - t; | } | else | { | F t = h - x; | l = y - t; | } | debug(numeric) assert(l.isFinite); | if (l) | { | partials[i++] = l; | } | x = h; | } | partials.length = i; | if (x) | { | partials.put(x); | } | } | else | { | s += x; | } | } | else | static if (summation == Summation.kb2) | { | static if (isFloatingPoint!F) | { | F t = s + x; | F c = 0; | if (fabs(s) >= fabs(x)) | { | F d = s - t; | c = d + x; | } | else | { | F d = x - t; | c = d + s; | } | s = t; | t = cs + c; | if (fabs(cs) >= fabs(c)) | { | F d = cs - t; | d += c; | ccs += d; | } | else | { | F d = c - t; | d += cs; | ccs += d; | } | cs = t; | } | else | { | F t = s + x; | if (fabs(s.re) < fabs(x.re)) | { | auto s_re = s.re; | auto x_re = x.re; | s = x_re + s.im * 1fi; | x = s_re + x.im * 1fi; | } | if (fabs(s.im) < fabs(x.im)) | { | auto s_im = s.im; | auto x_im = x.im; | s = s.re + x_im * 1fi; | x = x.re + s_im * 1fi; | } | F c = (s-t)+x; | s = t; | if (fabs(cs.re) < fabs(c.re)) | { | auto c_re = c.re; | auto cs_re = cs.re; | c = cs_re + c.im * 1fi; | cs = c_re + cs.im * 1fi; | } | if (fabs(cs.im) < fabs(c.im)) | { | auto c_im = c.im; | auto cs_im = cs.im; | c = c.re + cs_im * 1fi; | cs = cs.re + c_im * 1fi; | } | F d = cs - t; | d += c; | ccs += d; | cs = t; | } | } | else | static if (summation == Summation.kbn) | { | static if (isFloatingPoint!F) | { | F t = s + x; | if (fabs(s) >= fabs(x)) | { | F d = s - t; | d += x; | c += d; | } | else | { | F d = x - t; | d += s; | c += d; | } | s = t; | } | else | { | F t = s + x; | if (fabs(s.re) < fabs(x.re)) | { | auto s_re = s.re; | auto x_re = x.re; | s = x_re + s.im * 1fi; | x = s_re + x.im * 1fi; | } | if (fabs(s.im) < fabs(x.im)) | { | auto s_im = s.im; | auto x_im = x.im; | s = s.re + x_im * 1fi; | x = x.re + s_im * 1fi; | } | F d = s - t; | d += x; | c += d; | s = t; | } | } | else | static if (summation == Summation.kahan) | { | y = x - c; | t = s + y; | c = t - s; | c -= y; | s = t; | } | else | static if (summation == Summation.pairwise) | { | import mir.bitop: cttz; | ++counter; | partials[index] = n; | foreach (_; 0 .. cttz(counter)) | { | immutable newIndex = index - 1; | partials[newIndex] += partials[index]; | index = newIndex; | } | ++index; | } | else | static if (summation == Summation.naive) | { | s += n; | } | else | static if (summation == Summation.fast) | { | s += n; | } | else | static assert(0); | } | | ///ditto | void put(Range)(Range r) | if (isIterable!Range) | { | static if (summation == Summation.pairwise && fastPairwise && isDynamicArray!Range) | { | F[registersCount] v; | foreach (i, n; chainSeq!registersCount) | { | if (r.length >= n * 2) do | { | foreach (j; Iota!n) | v[j] = cast(F) r[j]; | foreach (j; Iota!n) | v[j] += cast(F) r[n + j]; | foreach (m; chainSeq!(n / 2)) | foreach (j; Iota!m) | v[j] += v[m + j]; | put(v[0]); | r = r[n * 2 .. $]; | } | while (!i && r.length >= n * 2); | } | if (r.length) | { | put(cast(F) r[0]); | r = r[1 .. $]; | } | assert(r.length == 0); | } | else | static if (summation == Summation.fast) | { | static if (isComplex!T) | F s0 = 0 + 0fi; | else | F s0 = 0; | foreach (ref elem; r) | s0 += elem; | s += s0; | } | else | { | foreach (ref elem; r) | put(elem); | } | } | | import mir.ndslice.slice; | | /// ditto | void put(Range: Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind)(Range r) | { | static if (N > 1 && kind == Contiguous) | { | import mir.ndslice.topology: flattened; | this.put(r.flattened); | } | else | static if (isPointer!Iterator && kind == Contiguous) | { | this.put(r.field); | } | else | static if (summation == Summation.fast && N == 1) | { | static if (isComplex!T) | F s0 = 0 + 0fi; | else | F s0 = 0; | import mir.algorithm.iteration: reduce; | s0 = s0.reduce!"a + b"(r); | s += s0; | } | else | { | foreach(elem; r) | this.put(elem); | } | } | | /+ | Adds `x` to the internal partial sums. | This operation doesn't re-establish special | value semantics across iterations (i.e. handling ±inf). | Preconditions: `isFinite(x)`. | +/ | version(none) | static if (summation == Summation.precise) | package void unsafePut()(F x) | in { | assert(.isFinite(x)); | } | do { | size_t i; | foreach (y; partials[]) | { | F h = x + y; | debug(numeric) assert(.isFinite(h)); | F l; | if (fabs(x) < fabs(y)) | { | F t = h - y; | l = x - t; | } | else | { | F t = h - x; | l = y - t; | } | debug(numeric) assert(.isFinite(l)); | if (l) | { | partials[i++] = l; | } | x = h; | } | partials.length = i; | if (x) | { | partials.put(x); | } | } | | ///Returns the value of the sum. | T sum()() scope const | { | /++ | Returns the value of the sum, rounded to the nearest representable | floating-point number using the round-half-to-even rule. | The result can differ from the exact value on `X86`, `nextDown`proir) <= result && result <= nextUp(proir)). | +/ | static if (summation == Summation.precise) | { | debug(mir_sum) | { | foreach (y; partials[]) | { | assert(y); | assert(y.isFinite); | } | //TODO: Add Non-Overlapping check to std.math | import mir.ndslice.slice: sliced; | import mir.ndslice.sorting: isSorted; | import mir.ndslice.topology: map; | assert(partials[].sliced.map!fabs.isSorted); | } | | if (s) | return s; | auto parts = partials[]; | F y = 0.0; | //pick last | if (parts.length) | { | y = parts[$-1]; | parts = parts[0..$-1]; | } | if (o) | { | immutable F of = o; | if (y && (o == -1 || o == 1) && signbit(of * y)) | { | // problem case: decide whether result is representable | y /= 2; | F x = of * M; | immutable F h = x + y; | F t = h - x; | F l = (y - t) * 2; | y = h * 2; | if (.isInfinity(cast(T)y)) | { | // overflow, except in edge case... | x = h + l; | t = x - h; | y = parts.length && t == l && !signbit(l*parts[$-1]) ? | x * 2 : | F.infinity * of; | parts = null; | } | else if (l) | { | bool _break; | y = partialsReducePred(y, l, parts.length ? parts[$-1] : 0, _break); | if (_break) | parts = null; | } | } | else | { | y = F.infinity * of; | parts = null; | } | } | return partialsReduce(y, parts); | } | else | static if (summation == Summation.kb2) | { | return s + (cs + ccs); | } | else | static if (summation == Summation.kbn) | { | return s + c; | } | else | static if (summation == Summation.kahan) | { | return s; | } | else | static if (summation == Summation.pairwise) | { | F s = summationInitValue!T; | assert((counter == 0) == (index == 0)); | foreach_reverse (ref e; partials[0 .. index]) | { | static if (is(F : __vector(W[N]), W, size_t N)) | s += cast(Unqual!F) e; //DMD bug workaround | else | s += e; | } | return s; | } | else | static if (summation == Summation.naive) | { | return s; | } | else | static if (summation == Summation.fast) | { | return s; | } | else | static assert(0); | } | | version(none) | static if (summation == Summation.precise) | F partialsSum()() const | { | debug(numeric) partialsDebug; | auto parts = partials[]; | F y = 0.0; | //pick last | if (parts.length) | { | y = parts[$-1]; | parts = parts[0..$-1]; | } | return partialsReduce(y, parts); | } | | ///Returns `Summator` with extended internal partial sums. | C opCast(C : Summator!(P, _summation), P, Summation _summation)() const | if ( | _summation == summation && | isMutable!C && | P.max_exp >= T.max_exp && | P.mant_dig >= T.mant_dig | ) | { | static if (is(P == T)) | return this; | else | static if (summation == Summation.precise) | { | auto ret = typeof(return).init; | ret.s = s; | ret.o = o; | ret.partials = scopeBuffer(ret.scopeBufferArray); | foreach (p; partials[]) | { | ret.partials.put(p); | } | enum exp_diff = P.max_exp / T.max_exp; | static if (exp_diff) | { | if (ret.o) | { | immutable f = ret.o / exp_diff; | immutable t = cast(int)(ret.o % exp_diff); | ret.o = f; | ret.put((P(2) ^^ T.max_exp) * t); | } | } | return ret; | } | else | static if (summation == Summation.kb2) | { | auto ret = typeof(return).init; | ret.s = s; | ret.cs = cs; | ret.ccs = ccs; | return ret; | } | else | static if (summation == Summation.kbn) | { | auto ret = typeof(return).init; | ret.s = s; | ret.c = c; | return ret; | } | else | static if (summation == Summation.kahan) | { | auto ret = typeof(return).init; | ret.s = s; | ret.c = c; | return ret; | } | else | static if (summation == Summation.pairwise) | { | auto ret = typeof(return).init; | ret.counter = counter; | ret.index = index; | foreach (i; 0 .. index) | ret.partials[i] = partials[i]; | return ret; | } | else | static if (summation == Summation.naive) | { | auto ret = typeof(return).init; | ret.s = s; | return ret; | } | else | static if (summation == Summation.fast) | { | auto ret = typeof(return).init; | ret.s = s; | return ret; | } | else | static assert(0); | } | | /++ | `cast(C)` operator overloading. Returns `cast(C)sum()`. | See also: `cast` | +/ | C opCast(C)() const if (is(Unqual!C == T)) | { | return cast(C)sum(); | } | | ///Operator overloading. | // opAssign should initialize partials. | void opAssign(T rhs) | { | static if (summation == Summation.precise) | { | partials.free; | partials = scopeBuffer(scopeBufferArray); | s = 0.0; | o = 0; | if (rhs) put(rhs); | } | else | static if (summation == Summation.kb2) | { | s = rhs; | static if (isComplex!T) | { | cs = 0 + 0fi; | ccs = 0 + 0fi; | } | else | { | cs = 0.0; | ccs = 0.0; | } | } | else | static if (summation == Summation.kbn) | { | s = rhs; | static if (isComplex!T) | c = 0 + 0fi; | else | c = 0.0; | } | else | static if (summation == Summation.kahan) | { | s = rhs; | static if (isComplex!T) | c = 0 + 0fi; | else | c = 0.0; | } | else | static if (summation == Summation.pairwise) | { | counter = 1; | index = 1; | partials[0] = rhs; | } | else | static if (summation == Summation.naive) | { | s = rhs; | } | else | static if (summation == Summation.fast) | { | s = rhs; | } | else | static assert(0); | } | | ///ditto | void opOpAssign(string op : "+")(T rhs) | { | put(rhs); | } | | ///ditto | void opOpAssign(string op : "+")(ref const Summator rhs) | { | static if (summation == Summation.precise) | { | s += rhs.s; | o += rhs.o; | foreach (f; rhs.partials[]) | put(f); | } | else | static if (summation == Summation.kb2) | { | put(rhs.ccs); | put(rhs.cs); | put(rhs.s); | } | else | static if (summation == Summation.kbn) | { | put(rhs.c); | put(rhs.s); | } | else | static if (summation == Summation.kahan) | { | put(rhs.s); | } | else | static if (summation == Summation.pairwise) | { | foreach_reverse (e; rhs.partials[0 .. rhs.index]) | put(e); | counter -= rhs.index; | counter += rhs.counter; | } | else | static if (summation == Summation.naive) | { | put(rhs.s); | } | else | static if (summation == Summation.fast) | { | put(rhs.s); | } | else | static assert(0); | } | | ///ditto | void opOpAssign(string op : "-")(T rhs) | { | static if (summation == Summation.precise) | { | put(-rhs); | } | else | static if (summation == Summation.kb2) | { | put(-rhs); | } | else | static if (summation == Summation.kbn) | { | put(-rhs); | } | else | static if (summation == Summation.kahan) | { | y = 0.0; | y -= rhs; | y -= c; | t = s + y; | c = t - s; | c -= y; | s = t; | } | else | static if (summation == Summation.pairwise) | { | put(-rhs); | } | else | static if (summation == Summation.naive) | { | s -= rhs; | } | else | static if (summation == Summation.fast) | { | s -= rhs; | } | else | static assert(0); | } | | ///ditto | void opOpAssign(string op : "-")(ref const Summator rhs) | { | static if (summation == Summation.precise) | { | s -= rhs.s; | o -= rhs.o; | foreach (f; rhs.partials[]) | put(-f); | } | else | static if (summation == Summation.kb2) | { | put(-rhs.ccs); | put(-rhs.cs); | put(-rhs.s); | } | else | static if (summation == Summation.kbn) | { | put(-rhs.c); | put(-rhs.s); | } | else | static if (summation == Summation.kahan) | { | this -= rhs.s; | } | else | static if (summation == Summation.pairwise) | { | foreach_reverse (e; rhs.partials[0 .. rhs.index]) | put(-e); | counter -= rhs.index; | counter += rhs.counter; | } | else | static if (summation == Summation.naive) | { | s -= rhs.s; | } | else | static if (summation == Summation.fast) | { | s -= rhs.s; | } | else | static assert(0); | } | | /// | | version(mir_test) | @nogc nothrow unittest | { | import mir.math.common; | import mir.ndslice.topology: iota, map; | auto r1 = iota(500).map!(a => 1.7L.pow(a+1) - 1.7L.pow(a)); | auto r2 = iota([500], 500).map!(a => 1.7L.pow(a+1) - 1.7L.pow(a)); | Summator!(real, Summation.precise) s1 = 0, s2 = 0.0; | foreach (e; r1) s1 += e; | foreach (e; r2) s2 -= e; | s1 -= s2; | s1 -= 1.7L.pow(1000); | assert(s1.sum() == -1); | } | | | version(mir_test) | @nogc nothrow unittest | { | with(Summation) | foreach (summation; AliasSeq!(kahan, kbn, kb2, precise, pairwise)) | foreach (T; AliasSeq!(float, double, real)) | { | Summator!(T, summation) sum = 1; | sum += 3; | assert(sum.sum == 4); | sum -= 10; | assert(sum.sum == -6); | Summator!(T, summation) sum2 = 3; | sum -= sum2; | assert(sum.sum == -9); | sum2 = 100; | sum += 100; | assert(sum.sum == 91); | auto sum3 = cast(Summator!(real, summation))sum; | assert(sum3.sum == 91); | sum = sum2; | } | } | | | version(mir_test) | @nogc nothrow unittest | { | import mir.math.common: approxEqual; | with(Summation) | foreach (summation; AliasSeq!(naive, fast)) | foreach (T; AliasSeq!(float, double, real)) | { | Summator!(T, summation) sum = 1; | sum += 3.5; | assert(sum.sum.approxEqual(4.5)); | sum = 2; | assert(sum.sum == 2); | sum -= 4; | assert(sum.sum.approxEqual(-2)); | } | } | | static if (summation == Summation.precise) | { | ///Returns `true` if current sum is a NaN. | bool isNaN()() const | { | return .isNaN(s); | } | | ///Returns `true` if current sum is finite (not infinite or NaN). | bool isFinite()() const | { | if (s) | return false; | return !overflow; | } | | ///Returns `true` if current sum is ±∞. | bool isInfinity()() const | { | return .isInfinity(s) || overflow(); | } | } | else static if (isFloatingPoint!F) | { | ///Returns `true` if current sum is a NaN. | bool isNaN()() const | { | return .isNaN(sum()); | } | | ///Returns `true` if current sum is finite (not infinite or NaN). | bool isFinite()() const | { | return .isFinite(sum()); | } | | ///Returns `true` if current sum is ±∞. | bool isInfinity()() const | { | return .isInfinity(sum()); | } | } | else | { | //User defined types | } |} | |version(mir_test) |unittest |{ | import mir.functional: RefTuple, refTuple; | import mir.ndslice.topology: map, iota, retro; | import mir.array.allocation: array; | import std.math: isInfinity, isFinite, isNaN; | | Summator!(double, Summation.precise) summator = 0.0; | | enum double M = (cast(double)2) ^^ (double.max_exp - 1); | RefTuple!(double[], double)[] tests = [ | refTuple(new double[0], 0.0), | refTuple([0.0], 0.0), | refTuple([1e100, 1.0, -1e100, 1e-100, 1e50, -1, -1e50], 1e-100), | refTuple([1e308, 1e308, -1e308], 1e308), | refTuple([-1e308, 1e308, 1e308], 1e308), | refTuple([1e308, -1e308, 1e308], 1e308), | refTuple([M, M, -2.0^^1000], 1.7976930277114552e+308), | refTuple([M, M, M, M, -M, -M, -M], 8.9884656743115795e+307), | refTuple([2.0^^53, -0.5, -2.0^^-54], 2.0^^53-1.0), | refTuple([2.0^^53, 1.0, 2.0^^-100], 2.0^^53+2.0), | refTuple([2.0^^53+10.0, 1.0, 2.0^^-100], 2.0^^53+12.0), | refTuple([2.0^^53-4.0, 0.5, 2.0^^-54], 2.0^^53-3.0), | refTuple([M-2.0^^970, -1, M], 1.7976931348623157e+308), | refTuple([double.max, double.max*2.^^-54], double.max), | refTuple([double.max, double.max*2.^^-53], double.infinity), | refTuple(iota([1000], 1).map!(a => 1.0/a).array , 7.4854708605503451), | refTuple(iota([1000], 1).map!(a => (-1.0)^^a/a).array, -0.69264743055982025), //0.693147180559945309417232121458176568075500134360255254120680... | refTuple(iota([1000], 1).map!(a => 1.0/a).retro.array , 7.4854708605503451), | refTuple(iota([1000], 1).map!(a => (-1.0)^^a/a).retro.array, -0.69264743055982025), | refTuple([double.infinity, -double.infinity, double.nan], double.nan), | refTuple([double.nan, double.infinity, -double.infinity], double.nan), | refTuple([double.infinity, double.nan, double.infinity], double.nan), | refTuple([double.infinity, double.infinity], double.infinity), | refTuple([double.infinity, -double.infinity], double.nan), | refTuple([-double.infinity, 1e308, 1e308, -double.infinity], -double.infinity), | refTuple([M-2.0^^970, 0.0, M], double.infinity), | refTuple([M-2.0^^970, 1.0, M], double.infinity), | refTuple([M, M], double.infinity), | refTuple([M, M, -1], double.infinity), | refTuple([M, M, M, M, -M, -M], double.infinity), | refTuple([M, M, M, M, -M, M], double.infinity), | refTuple([-M, -M, -M, -M], -double.infinity), | refTuple([M, M, -2.^^971], double.max), | refTuple([M, M, -2.^^970], double.infinity), | refTuple([-2.^^970, M, M, -0X0.0000000000001P-0 * 2.^^-1022], double.max), | refTuple([M, M, -2.^^970, 0X0.0000000000001P-0 * 2.^^-1022], double.infinity), | refTuple([-M, 2.^^971, -M], -double.max), | refTuple([-M, -M, 2.^^970], -double.infinity), | refTuple([-M, -M, 2.^^970, 0X0.0000000000001P-0 * 2.^^-1022], -double.max), | refTuple([-0X0.0000000000001P-0 * 2.^^-1022, -M, -M, 2.^^970], -double.infinity), | refTuple([2.^^930, -2.^^980, M, M, M, -M], 1.7976931348622137e+308), | refTuple([M, M, -1e307], 1.6976931348623159e+308), | refTuple([1e16, 1., 1e-16], 10_000_000_000_000_002.0), | ]; | foreach (i, test; tests) | { | summator = 0.0; | foreach (t; test.a) summator.put(t); | auto r = test.b; | auto s = summator.sum; | assert(summator.isNaN() == r.isNaN()); | assert(summator.isFinite() == r.isFinite()); | assert(summator.isInfinity() == r.isInfinity()); | assert(s == r || s.isNaN && r.isNaN); | } |} | |/++ |Sums elements of `r`, which must be a finite |iterable. | |A seed may be passed to `sum`. Not only will this seed be used as an initial |value, but its type will be used if it is not specified. | |Note that these specialized summing algorithms execute more primitive operations |than vanilla summation. Therefore, if in certain cases maximum speed is required |at expense of precision, one can use $(LREF, Summation.fast). | |Returns: | The sum of all the elements in the range r. |+/ |template sum(F, Summation summation = Summation.appropriate) | if (isFloatingPoint!F && isMutable!F) |{ | template sum(Range) | { | F sum(Range r) | { | return SummationAlgo!(summation, Range, F)(r); | } | | F sum(Range r, F seed) | { | return SummationAlgo!(summation, Range, F)(r, seed); | } | } |} | |///ditto |template sum(Summation summation = Summation.appropriate) |{ | auto sum(Range)(Range r) | { | return SummationAlgo!(summation, Range, sumType!Range)(r); | } | | F sum(Range, F)(Range r, F seed) | { | return SummationAlgo!(summation, Range, F)(r, seed); | } |} | |///ditto |template sum(F, string summation) | if (isFloatingPoint!F && isMutable!F) |{ | mixin("alias sum = .sum!(F, Summation." ~ summation ~ ");"); |} | |///ditto |template sum(string summation) |{ | mixin("alias sum = .sum!(Summation." ~ summation ~ ");"); |} | | | |version(mir_test) |@safe pure nothrow unittest |{ | static assert(is(typeof(sum([cast( byte)1])) == int)); | static assert(is(typeof(sum([cast(ubyte)1])) == int)); | static assert(is(typeof(sum([ 1, 2, 3, 4])) == int)); | static assert(is(typeof(sum([ 1U, 2U, 3U, 4U])) == uint)); | static assert(is(typeof(sum([ 1L, 2L, 3L, 4L])) == long)); | static assert(is(typeof(sum([1UL, 2UL, 3UL, 4UL])) == ulong)); | | int[] empty; | assert(sum(empty) == 0); | assert(sum([42]) == 42); | assert(sum([42, 43]) == 42 + 43); | assert(sum([42, 43, 44]) == 42 + 43 + 44); | assert(sum([42, 43, 44, 45]) == 42 + 43 + 44 + 45); |} | | |version(mir_test) |@safe pure nothrow unittest |{ | static assert(is(typeof(sum([1.0, 2.0, 3.0, 4.0])) == double)); | static assert(is(typeof(sum!double([ 1F, 2F, 3F, 4F])) == double)); | const(float[]) a = [1F, 2F, 3F, 4F]; | static assert(is(typeof(sum!double(a)) == double)); | const(float)[] b = [1F, 2F, 3F, 4F]; | static assert(is(typeof(sum!double(a)) == double)); | | double[] empty; | assert(sum(empty) == 0); | assert(sum([42.]) == 42); | assert(sum([42., 43.]) == 42 + 43); | assert(sum([42., 43., 44.]) == 42 + 43 + 44); | assert(sum([42., 43., 44., 45.5]) == 42 + 43 + 44 + 45.5); |} | |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.topology: iota; | assert(iota(2, 3).sum == 15); |} | |version(mir_test) |@safe pure nothrow unittest |{ | import std.container; | static assert(is(typeof(sum!double(SList!float()[])) == double)); | static assert(is(typeof(sum(SList!double()[])) == double)); | static assert(is(typeof(sum(SList!real()[])) == real)); | | assert(sum(SList!double()[]) == 0); | assert(sum(SList!double(1)[]) == 1); | assert(sum(SList!double(1, 2)[]) == 1 + 2); | assert(sum(SList!double(1, 2, 3)[]) == 1 + 2 + 3); | assert(sum(SList!double(1, 2, 3, 4)[]) == 10); |} | | |version(mir_test) |pure nothrow unittest // 12434 |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.topology: map; | immutable a = [10, 20]; | auto s = a.sliced; | auto s1 = sum(a); // Error | auto s2 = s.map!(x => x).sum; // Error |} | |version(mir_test) |unittest |{ | import std.bigint; | import mir.ndslice.topology: repeat; | | auto a = BigInt("1_000_000_000_000_000_000").repeat(10); | auto b = (ulong.max/2).repeat(10); | auto sa = a.sum(); | auto sb = b.sum(BigInt(0)); //reduce ulongs into bigint | assert(sa == BigInt("10_000_000_000_000_000_000")); | assert(sb == (BigInt(ulong.max/2) * 10)); |} | |version(mir_test) |unittest |{ | with(Summation) | foreach (F; AliasSeq!(float, double, real)) | { | F[] ar = [1, 2, 3, 4]; | F r = 10; | assert(r == ar.sum!fast()); | assert(r == ar.sum!pairwise()); | assert(r == ar.sum!kahan()); | assert(r == ar.sum!kbn()); | assert(r == ar.sum!kb2()); | } |} | |version(LDC) |version(X86_Any) |version(mir_test) |unittest |{ | import core.simd; | static if (__traits(compiles, double2.init + double2.init)) | { | | alias S = Summation; | alias sums = AliasSeq!(S.kahan, S.pairwise, S.naive, S.fast); | | double2[] ar = [double2([1.0, 2]), double2([2, 3]), double2([3, 4]), double2([4, 6])]; | double2 c = double2([10, 15]); | | foreach (sumType; sums) | { | double2 s = ar.sum!(sumType); | assert(s.array == c.array); | } | } |} | |version(LDC) |version(X86_Any) |version(mir_test) |unittest |{ | import core.simd; | import mir.ndslice.topology: iota, as; | | alias S = Summation; | alias sums = AliasSeq!(S.kahan, S.pairwise, S.naive, S.fast, S.precise, | S.kbn, S.kb2); | | int[2] ns = [9, 101]; | | foreach (n; ns) | { | foreach (sumType; sums) | { | auto ar = iota(n).as!double; | double c = n * (n - 1) / 2; // gauss for n=100 | double s = ar.sum!(sumType); | assert(s == c); | } | } |} | |/++ |Precise summation. |+/ |private F sumPrecise(Range, F)(Range r, F seed = summationInitValue!F) | if (isFloatingPoint!F || isComplex!F) |{ | static if (isFloatingPoint!F) | { | auto sum = Summator!(F, Summation.precise)(seed); | sum.put(r); | return sum.sum; | } | else | { | alias T = typeof(F.init.re); | auto sumRe = Summator!(T, Summation.precise)(seed.re); | auto sumIm = Summator!(T, Summation.precise)(seed.im); | import mir.ndslice.slice: isSlice; | static if (isSlice!Range) | { | import mir.algorithm.iteration: each; | r.each!((auto ref elem) | { | sumRe.put(elem.re); | sumIm.put(elem.im); | }); | } | else | { | foreach (ref elem; r) | { | sumRe.put(elem.re); | sumIm.put(elem.im); | } | } | return sumRe.sum + sumIm.sum * 1fi; | } |} | |private template SummationAlgo(Summation summation, Range, F) |{ | static if (summation == Summation.precise) | alias SummationAlgo = sumPrecise!(Range, F); | else | static if (summation == Summation.appropriate) | { | static if (isSummable!(Range, F)) | alias SummationAlgo = SummationAlgo!(Summation.pairwise, Range, F); | else | static if (is(F == class) || is(F == struct) || is(F == interface)) | alias SummationAlgo = SummationAlgo!(Summation.naive, Range, F); | else | alias SummationAlgo = SummationAlgo!(Summation.fast, Range, F); | } | else | { | F SummationAlgo(Range r) | { | static if (__traits(compiles, {Summator!(F, summation) sum;})) | Summator!(F, summation) sum; | else | auto sum = Summator!(F, summation)(summationInitValue!F); | sum.put(r); | return sum.sum; | } | | F SummationAlgo(Range r, F s) | { | auto sum = Summator!(F, summation)(s); | sum.put(r); | return sum.sum; | } | } |} | |private T summationInitValue(T)() |{ | static if (__traits(compiles, {T a = 0.0;})) | { | T a = 0.0; | return a; | } | else | static if (__traits(compiles, {T a = 0;})) | { | T a = 0; | return a; | } | else | static if (__traits(compiles, {T a = 0 + 0fi;})) | { | T a = 0 + 0fi; | return a; | } | else | { | return T.init; | } |} | |package template sumType(Range) |{ | import mir.ndslice.slice: isSlice, DeepElementType; | static if (isSlice!Range) | alias T = Unqual!(DeepElementType!(Range.This)); | else | alias T = Unqual!(ForeachType!Range); | alias sumType = typeof(T.init + T.init); |} | |/++ |+/ |template fillCollapseSums(Summation summation, alias combineParts, combineElements...) |{ | import mir.ndslice.slice: Slice, SliceKind; | /++ | +/ | auto ref fillCollapseSums(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) data) @property | { | import mir.algorithm.iteration; | import mir.functional: naryFun; | import mir.ndslice.topology: iota, triplets; | foreach (triplet; data.length.iota.triplets) with(triplet) | { | auto ref ce(size_t i)() | { | static if (summation == Summation.fast) | { | return | sum!summation(naryFun!(combineElements[i])(center, left )) + | sum!summation(naryFun!(combineElements[i])(center, right)); | } | else | { | Summator!summation summator = 0; | summator.put(naryFun!(combineElements[i])(center, left)); | summator.put(naryFun!(combineElements[i])(center, right)); | return summator.sum; | } | } | alias sums = staticMap!(ce, Iota!(combineElements.length)); | data[center] = naryFun!combineParts(center, sums); | } | } |} | |package: | |template isSummable(F) |{ | enum bool isSummable = | __traits(compiles, | { | F a = 0.1, b, c; | b = 2.3; | c = a + b; | c = a - b; | a += b; | a -= b; | }); |} | |template isSummable(Range, F) |{ | enum bool isSummable = | isIterable!Range && | isImplicitlyConvertible!(sumType!Range, F) && | isSummable!F; |} | |version(mir_test) |unittest |{ | import mir.ndslice.topology: iota; | static assert(isSummable!(typeof(iota([size_t.init])), double)); |} | |private enum bool isCompesatorAlgorithm(Summation summation) = | summation == Summation.precise | || summation == Summation.kb2 | || summation == Summation.kbn | || summation == Summation.kahan; | | |version(mir_test) |unittest |{ | import mir.ndslice; | | auto p = iota([2, 3, 4, 5]); | auto a = p.as!double; | auto b = a.flattened; | auto c = a.slice; | auto d = c.flattened; | auto s = p.flattened.sum; | | assert(a.sum == s); | assert(b.sum == s); | assert(c.sum == s); | assert(d.sum == s); | | assert(a.canonical.sum == s); | assert(b.canonical.sum == s); | assert(c.canonical.sum == s); | assert(d.canonical.sum == s); | | assert(a.universal.transposed!3.sum == s); | assert(b.universal.sum == s); | assert(c.universal.transposed!3.sum == s); | assert(d.universal.sum == s); | | assert(a.sum!"fast" == s); | assert(b.sum!"fast" == s); | assert(c.sum!(float, "fast") == s); | assert(d.sum!"fast" == s); | | assert(a.canonical.sum!"fast" == s); | assert(b.canonical.sum!"fast" == s); | assert(c.canonical.sum!"fast" == s); | assert(d.canonical.sum!"fast" == s); | | assert(a.universal.transposed!3.sum!"fast" == s); | assert(b.universal.sum!"fast" == s); | assert(c.universal.transposed!3.sum!"fast" == s); | assert(d.universal.sum!"fast" == s); | |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/math/sum.d has no code <<<<<< EOF # path=source-mir-sparse-package.lst |/++ |$(H2 Sparse Tensors) | |License: $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |+/ |module mir.sparse; | |import std.traits; |import std.meta; | |import mir.ndslice.slice; |public import mir.ndslice.field: SparseField; |public import mir.ndslice.iterator: ChopIterator, FieldIterator; |public import mir.series: isSeries, Series, mir_series, series; |public import mir.ndslice.slice: CoordinateValue, Slice, mir_slice; |public import mir.ndslice.topology: chopped; | |//TODO: replace with `static foreach` |private template Iota(size_t i, size_t j) |{ | static assert(i <= j, "Iota: i should be less than or equal to j"); | static if (i == j) | alias Iota = AliasSeq!(); | else | alias Iota = AliasSeq!(i, Iota!(i + 1, j)); |} | |/++ |Sparse tensors represented in Dictionary of Keys (DOK) format. | |Params: | N = dimension count | lengths = list of dimension lengths |Returns: | `N`-dimensional slice composed of indeces |See_also: $(LREF Sparse) |+/ |Sparse!(T, N) sparse(T, size_t N)(size_t[N] lengths...) |{ 12| T[size_t] table; 12| table[0] = 0; 12| table.remove(0); 12| assert(table !is null); 12| with (typeof(return)) return FieldIterator!(SparseField!T)(0, SparseField!T(table)).sliced(lengths); |} | |/// |pure unittest |{ 1| auto slice = sparse!double(2, 3); 1| slice[0][] = 1; 1| slice[0, 1] = 2; 1| --slice[0, 0]; 1| slice[1, 2] += 4; | 1| assert(slice == [[0, 2, 1], [0, 0, 4]]); | | import std.range.primitives: isRandomAccessRange; | static assert(isRandomAccessRange!(Sparse!(double, 2))); | | import mir.ndslice.slice: Slice, DeepElementType; | static assert(is(Sparse!(double, 2) : Slice!(FieldIterator!(SparseField!double), 2))); | static assert(is(DeepElementType!(Sparse!(double, 2)) == double)); |} | |/++ |Returns unsorted forward range of (coordinate, value) pairs. | |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto byCoordinateValue(size_t N, T)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByCoordinateValue | { | private sizediff_t[N-1] _strides; | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| auto iv = _range.front; 5| size_t index = iv.key; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| CoordinateValue!(T, N) ret; | foreach (i; Iota!(0, N - 1)) | { 5| ret.index[i] = index / _strides[i]; 5| index %= _strides[i]; | } 5| ret.index[N - 1] = index; 5| ret.value = iv.value; 5| return ret; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByCoordinateValue(slice.strides[0..N-1], length, l, r, slice._iterator._field._table.byKeyValue); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | alias CV = CoordinateValue!(double, 2); | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.byCoordinateValue.array.sort() == [ | CV([0, 1], 2), | CV([0, 2], 1), | CV([1, 2], 4), | CV([2, 0], 6), | CV([2, 1], 7)]); |} | |/++ |Returns unsorted forward range of coordinates. |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto byCoordinate(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByCoordinate | { | private sizediff_t[N-1] _strides; | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKey())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| size_t index = _range.front; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| size_t[N] ret; | foreach (i; Iota!(0, N - 1)) | { 5| ret[i] = index / _strides[i]; 5| index %= _strides[i]; | } 5| ret[N - 1] = index; 5| return ret; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByCoordinate(slice.strides[0 .. N - 1], length, l, r, slice._iterator._field._table.byKey); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.byCoordinate.array.sort() == [ | [0, 1], | [0, 2], | [1, 2], | [2, 0], | [2, 1]]); |} | |/++ |Returns unsorted forward range of values. |Params: | slice = sparse slice with pure structure. Any operations on structure of a slice are not allowed. |+/ |auto onlyByValue(T, size_t N)(Slice!(FieldIterator!(SparseField!T), N) slice) |{ | struct ByValue | { | mixin _sparse_range_methods!(typeof(slice._iterator._field._table.byKeyValue())); | | auto front() @property | {S: 5| assert(!_range.empty); 5| auto iv = _range.front; 5| size_t index = iv.key; 10| if (!(_l <= index && index < _r)) | { 0000000| _range.popFront; 0000000| goto S; | } 5| return iv.value; | } | } 1| size_t l = slice._iterator._index; 1| size_t r = l + slice.elementCount; 1| size_t length = slice._iterator._field._table.byKey.countInInterval(l, r); 1| return ByValue(length, l, r, slice._iterator._field._table.byKeyValue); |} | |/// |pure unittest |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | 1| auto slice = sparse!double(3, 3); 1| slice[] = [[0, 2, 1], [0, 0, 4], [6, 7, 0]]; 1| assert(slice.onlyByValue.array.sort() == [1, 2, 4, 6, 7]); |} | |pragma(inline, false) |private size_t countInInterval(Range)(Range range, size_t l, size_t r) |{ 3| size_t count; 51| foreach(ref i; range) 30| if (l <= i && i < r) 15| count++; 3| return count; |} | |private mixin template _sparse_range_methods(Range) |{ | private size_t _length, _l, _r; | private Range _range; | | void popFront() | { 15| assert(!_range.empty); 15| _range.popFront; 15| _length--; | } | | bool empty() const @property | { 0000000| return _length == 0; | } | | auto save() @property | { 0000000| auto ret = this; 0000000| ret._range = ret._range.save; 0000000| return ret; | } | | size_t length() const @property | { 3| return _length; | } |} | |/++ |Returns compressed tensor. |Note: allocates using GC. |+/ |auto compress(I = uint, J = size_t, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) slice) | if (N > 1) |{ 8| return compressWithType!(DeepElementType!(Slice!(Iterator, N, kind)), I, J)(slice); |} | |/// Sparse tensor compression |unittest |{ 1| auto sparse = sparse!double(5, 3); 1| sparse[] = | [[0, 2, 1], | [0, 0, 4], | [0, 0, 0], | [6, 0, 9], | [0, 0, 5]]; | 1| auto crs = sparse.compressWithType!double; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 3, | // [2, 1, 4, 6, 9, 5], | // [1, 2, 2, 0, 2, 2], | // [0, 2, 3, 3, 5, 6])); |} | |/// Sparse tensor compression |unittest |{ 1| auto sparse = sparse!double(5, 8); 1| sparse[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sparse.compressWithType!double; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); |} | |/// Dense tensor compression |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 3); 1| sl[] = | [[0, 2, 1], | [0, 0, 4], | [0, 0, 0], | [6, 0, 9], | [0, 0, 5]]; | 1| auto crs = sl.compressWithType!double; | | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 3, | // [2, 1, 4, 6, 9, 5], | // [1, 2, 2, 0, 2, 2], | // [0, 2, 3, 3, 5, 6])); |} | |/// Dense tensor compression |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 8); 1| sl[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sl.compress; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); |} | |/++ |Returns compressed tensor with different element type. |Note: allocates using GC. |+/ |Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1) | compressWithType(V, I = uint, J = size_t, T, size_t N) | (Slice!(FieldIterator!(SparseField!T), N) slice) | if (is(T : V) && N > 1 && isUnsigned!I) |{ | import mir.array.allocation: array; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: iota; 8| auto compressedData = slice | .iterator | ._field | ._table | .series!(size_t, T, I, V); 8| auto pointers = new J[slice.shape[0 .. N - 1].iota.elementCount + 1]; 16| size_t k = 1, shift; 8| pointers[0] = 0; 8| pointers[1] = 0; 8| const rowLength = slice.length!(N - 1); 233| if(rowLength) foreach (ref index; compressedData.index.field) | { | for(;;) | { 90| sizediff_t newIndex = index - shift; 90| if (newIndex >= rowLength) | { 23| pointers[k + 1] = pointers[k]; 23| shift += rowLength; 23| k++; 23| continue; | } 67| index = cast(I)newIndex; 67| pointers[k] = cast(J) (pointers[k] + 1); 67| break; | } | | } 8| pointers[k + 1 .. $] = pointers[k]; 8| return compressedData.chopped(pointers); |} | | |/// ditto |Slice!(ChopIterator!(J*, Series!(I*, V*)), N - 1) | compressWithType(V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (!is(Iterator : FieldIterator!(SparseField!ST), ST) && is(DeepElementType!(Slice!(Iterator, N, kind)) : V) && N > 1 && isUnsigned!I) |{ | import std.array: appender; | import mir.ndslice.topology: pack, flattened; 4| auto vapp = appender!(V[]); 4| auto iapp = appender!(I[]); 4| auto psl = slice.pack!1; 4| auto count = psl.elementCount; 4| auto pointers = new J[count + 1]; | 4| pointers[0] = 0; 4| auto elems = psl.flattened; 4| size_t j = 0; 72| foreach (ref pointer; pointers[1 .. $]) | { 20| auto row = elems.front; 20| elems.popFront; 20| size_t i; 445| foreach (e; row) | { 135| if (e) | { 24| vapp.put(e); 24| iapp.put(cast(I)i); 24| j++; | } 135| i++; | } 20| pointer = cast(J)j; | } 4| return iapp.data.series(vapp.data).chopped(pointers); |} | | |/++ |Re-compresses a compressed tensor. Makes all values, indeces and pointers consequent in memory. | |Sparse slice is iterated twice. The first tine it is iterated to get length of each sparse row, the second time - to copy the data. | |Note: allocates using GC. |+/ |Slice!(ChopIterator!(J*, Series!(I*, V*)), N) | recompress | (V, I = uint, J = size_t, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) sparseSlice) | if (isSeries!(DeepElementType!(Slice!(Iterator, N, kind)))) |{ | import mir.algorithm.iteration: each; | import mir.conv: to, emplaceRef; | import mir.ndslice.allocation: uninitSlice; | import mir.ndslice.topology: pack, flattened, as, member, zip; | | size_t count = sparseSlice.elementCount; | size_t length; | auto pointers = uninitSlice!J(count + 1); | pointers.front = 0; | sparseSlice | .member!"data" | .member!"elementCount" | .each!((len, ref ptr) {ptr = length += len;})(pointers[1 .. $]); | | auto i = uninitSlice!I(length); | auto v = uninitSlice!V(length); | | auto ret = i.series(v).chopped(pointers); | | sparseSlice | .each!((a, b) { | b.index[] = a.index.as!I; | b.value.each!(emplaceRef!V)(a.value.as!V); | })(ret); | | return ret; |} | |/// |unittest |{ | import mir.ndslice.topology: universal; | import mir.ndslice.allocation: slice; | 1| auto sl = slice!double(5, 8); 1| sl[] = | [[0, 2, 0, 0, 0, 0, 0, 1], | [0, 0, 0, 0, 0, 0, 0, 4], | [0, 0, 0, 0, 0, 0, 0, 0], | [6, 0, 0, 0, 0, 0, 0, 9], | [0, 0, 0, 0, 0, 0, 0, 5]]; | 1| auto crs = sl.compress; | // assert(crs.iterator._field == CompressedField!(double, uint, uint)( | // 8, | // [2, 1, 4, 6, 9, 5], | // [1, 7, 7, 0, 7, 7], | // [0, 2, 3, 3, 5, 6])); | | import mir.ndslice.dynamic: reversed; 1| auto rec = crs.reversed.recompress!real; 1| auto rev = sl.universal.reversed.compressWithType!real; 1| assert(rev.structure == rec.structure); | // assert(rev.iterator._field.values == rec.iterator._field.values); | // assert(rev.iterator._field.indeces == rec.iterator._field.indeces); | // assert(rev.iterator._field.pointers == rec.iterator._field.pointers); |} | |/++ |Sparse Slice in Dictionary of Keys (DOK) format. |+/ |alias Sparse(T, size_t N = 1) = Slice!(FieldIterator!(SparseField!T), N); | |/// |alias CompressedVector(T, I = uint) = Series!(T*, I*); | |/// |alias CompressedMatrix(T, I = uint) = Slice!(ChopIterator!(J*, Series!(T*, I*))); | |/// |alias CompressedTensor(T, size_t N, I = uint, J = size_t) = Slice!(ChopIterator!(J*, Series!(T*, I*)), N - 1); | |///ditto |alias CompressedTensor(T, size_t N : 1, I = uint) = Series!(I*, T*); source/mir/sparse/package.d is 92% covered <<<<<< EOF # path=source-mir-model-lda-hoffman.lst |/** | |$(H3 Online variational Bayes for latent Dirichlet allocation) | |References: | Hoffman, Matthew D., Blei, David M. and Bach, Francis R.. | "Online Learning for Latent Dirichlet Allocation.." | Paper presented at the meeting of the NIPS, 2010. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |*/ |module mir.model.lda.hoffman; | |import std.traits; | |/++ |Batch variational Bayes for LDA with mini-batches. |+/ |struct LdaHoffman(F) | if (isFloatingPoint!F) |{ | import std.parallelism; | import mir.ndslice.iterator: FieldIterator; | import mir.ndslice.topology: iota; | | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | import mir.math.common; | import mir.sparse; | | private alias Vector = Slice!(F*); | private alias Matrix = Slice!(F*, 2); | | private size_t D; | private F alpha; | private F eta; | private F kappa; | private F _tau; | private F eps; | | private Matrix _lambda; // [k, w] | private Matrix _beta; // [k, w] | | private TaskPool tp; | | private F[][] _lambdaTemp; | | @disable this(); | @disable this(this); | | /++ | Params: | K = theme count | W = dictionary size | D = approximate total number of documents in a collection. | alpha = Dirichlet document-topic prior (0.1) | eta = Dirichlet word-topic prior (0.1) | tau0 = tau0 ≧ 0 slows down the early iterations of the algorithm. | kappa = `kappa belongs to $(LPAREN)0.5, 1]`, controls the rate at which old values of lambda are forgotten. | `lambda = (1 - rho(tau)) lambda + rho lambda', rho(tau) = (tau0 + tau)^(-kappa)`. Use `kappa = 0` for Batch variational Bayes LDA. | eps = Stop iterations if `||lambda - lambda'||_l1 < s * eps`, where `s` is a documents count in a batch. | tp = task pool | +/ 0000000| this(size_t K, size_t W, size_t D, F alpha, F eta, F tau0, F kappa, F eps = 1e-5, TaskPool tp = taskPool()) | { | import mir.random; | 0000000| this.D = D; 0000000| this.alpha = alpha; 0000000| this.eta = eta; 0000000| this._tau = tau0; 0000000| this.kappa = kappa; 0000000| this.eps = eps; 0000000| this.tp = tp; | 0000000| _lambda = slice!F(K, W); 0000000| _beta = slice!F(K, W); 0000000| _lambdaTemp = new F[][](tp.size + 1, W); | | import std.math: fabs; 0000000| auto gen = Random(unpredictableSeed); 0000000| foreach (r; _lambda) 0000000| foreach (ref e; r) 0000000| e = (gen.rand!F.fabs + 0.9) / 1.901; | 0000000| updateBeta(); | } | | /// | void updateBeta() | { 0000000| foreach (i; tp.parallel(lambda.length.iota)) 0000000| unparameterize(lambda[i], beta[i]); | } | | /++ | Posterior over the topics | +/ | Slice!(F*, 2) beta() @property | { 0000000| return _beta; | } | | /++ | Parameterized posterior over the topics. | +/ | Slice!(F*, 2) lambda() @property | { 0000000| return _lambda; | } | | /++ | Count of already seen documents. | Slows down the iterations of the algorithm. | +/ | F tau() const @property | { 0000000| return _tau; | } | | /// ditto | void tau(F v) @property | { 0000000| _tau = v; | } | | /++ | Accepts mini-batch and performs multiple E-step iterations for each document and single M-step. | | This implementation is optimized for sparse documents, | which contain much less unique words than a dictionary. | | Params: | n = mini-batch, a collection of compressed documents. | maxIterations = maximal number of iterations for s This implementation is optimized for sparse documents, |ingle document in a batch for E-step. | +/ | size_t putBatch(SliceKind kind, C, I, J)(Slice!(ChopIterator!(J*, Series!(I*, C*)), 1, kind) n, size_t maxIterations) | { | return putBatchImpl(n.recompress!F, maxIterations); | } | | private size_t putBatchImpl(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n, size_t maxIterations) | { | import std.math: isFinite; | import mir.sparse.blas.dot; | import mir.sparse.blas.gemv; | import mir.ndslice.dynamic: transposed; | import mir.ndslice.topology: universal; | import mir.internal.utility; | | immutable S = n.length; | immutable K = _lambda.length!0; | immutable W = _lambda.length!1; | _tau += S; | auto theta = slice!F(S, K); | auto nsave = saveN(n); | | immutable rho = pow!F(F(tau), -kappa); | auto thetat = theta.universal.transposed; | auto _gamma = slice!F(tp.size + 1, K); | shared size_t ret; | // E step | foreach (d; tp.parallel(S.iota)) | { | auto gamma = _gamma[tp.workerIndex]; | gamma[] = 1; | auto nd = n[d]; | auto thetad = theta[d]; | for (size_t c; ;c++) | { | unparameterize(gamma, thetad); | | selectiveGemv!"/"(_beta.universal.transposed, thetad, nd); | F sum = 0; | { | auto beta = _beta; | auto th = thetad; | foreach (ref g; gamma) | { | if (!th.front.isFinite) | th.front = F.max; | auto value = dot(nd, beta.front) * th.front + alpha; | sum += fabs(value - g); | g = value; | beta.popFront; | th.popFront; | } | } | if (c < maxIterations && sum > eps * K) | { | nd.value[] = nsave[d].value; | continue; | } | import core.atomic; | ret.atomicOp!"+="(c); | break; | } | } | // M step | foreach (k; tp.parallel(K.iota)) | { | auto lambdaTemp = _lambdaTemp[tp.workerIndex]; | gemtv!F(F(1), n, thetat[k], F(0), lambdaTemp.sliced); | import mir.algorithm.iteration: each; | each!((ref l, bk, lt) {l = (1 - rho) * l + | rho * (eta + (F(D) / F(S)) * bk * lt);})(_lambda[k], _beta[k],lambdaTemp.sliced); | unparameterize(_lambda[k], _beta[k]); | } | return ret; | } | | private auto saveN(Slice!(ChopIterator!(size_t*, Series!(uint*, F*))) n) | { | import mir.series: series; | import mir.ndslice.topology: chopped, universal; 0000000| return n.iterator._sliceable.index | .series(n.iterator._sliceable.value.dup) | .chopped(n.iterator._iterator.sliced(n.length + 1)); | } | | private static void unparameterize(Vector param, Vector posterior) | { 0000000| assert(param.structure == posterior.structure); | import mir.ndslice.topology: zip; | import mir.math.func.expdigamma; | import mir.math.sum: sum; 0000000| immutable c = 1 / expDigamma(sum(param)); 0000000| foreach (e; zip(param, posterior)) 0000000| e.b = c * expDigamma(e.a); | } |} | |unittest |{ | alias ff = LdaHoffman!double; |} source/mir/model/lda/hoffman.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-ndfield.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |NdField is a type with `opIndex(size_t[N] index...)` primitive. |An ndslice can be created on top of a ndField using $(SUBREF slice, slicedNdField). | |$(BOOKTABLE $(H2 NdFields), |$(TR $(TH NdField Name) $(TH Used By)) |$(T2 Cartesian, $(SUBREF topology, cartesian)) |$(T2 Kronecker, $(SUBREF topology, kronecker)) |) | |See_also: $(SUBREF concatenation, concatenation). | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.ndfield; | |import mir.qualifier; |import mir.internal.utility; |import mir.ndslice.internal; |import mir.ndslice.slice; |import mir.primitives; |import std.meta; | |private template _indexes(NdFields...) |{ | static if (NdFields.length == 0) | enum _indexes = ""; | else | { | alias Next = NdFields[0 .. $ - 1]; | enum i = Next.length; | enum _indexes = ._indexes!Next ~ | "_fields[" ~ i.stringof ~ "][" ~ _indexes_range!([staticMap!(DimensionCount, Next)].sum, DimensionCount!(NdFields[$ - 1])) ~ "], "; | } |} | |private template _indexes_range(size_t begin, size_t count) |{ | static if (count == 0) | enum _indexes_range = ""; | else | { | enum next = count - 1; | enum elem = begin + next; | enum _indexes_range = ._indexes_range!(begin, next) ~ "indexes[" ~ elem.stringof ~ "], "; | } |} | |/// |struct Cartesian(NdFields...) | if (NdFields.length > 1) |{ | /// | NdFields _fields; | | package(mir) enum size_t M(size_t f) = [staticMap!(DimensionCount, NdFields[0..f])].sum; | package(mir) enum size_t N = M!(NdFields.length); | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | return mixin("Cartesian!(staticMap!(LightConstOf, NdFields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota)); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | return mixin("Cartesian!(staticMap!(LightImmutableOf, NdFields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota)); | } | | /// | size_t length(size_t d = 0)() @safe scope const @property | { | foreach(f, NdField; NdFields) | static if (M!f <= d && M!(f + 1) > d) | { | enum d = d - M!f; | static if (d) | return _fields[f].length!(d - M!f); | else | return _fields[f].length; | } | } | | /// | size_t[N] shape()() @safe scope const @property | { | typeof(return) ret; | foreach(f, NdField; NdFields) | { | static if (hasShape!NdField) | { | auto s = _fields[f].shape; | foreach(j; Iota!(s.length)) | ret[M!f + j] = s[j]; | } | else | { | ret[M!f] = _fields[f].length; | } | } | return ret; | } | | /// | size_t elementCount()() @safe scope const @property | { | size_t ret = 1; | foreach (f, NdField; NdFields) | ret *= _fields[f].elementCount; | return ret; | } | | /// | auto opIndex(size_t[N] indexes...) | { | import mir.functional : refTuple; | return mixin("refTuple(" ~ _indexes!(NdFields) ~ ")"); | } |} | |private template _kr_indexes(size_t n) |{ | static if (n == 0) | enum _kr_indexes = ""; | else | { | enum i = n - 1; | enum _kr_indexes = ._kr_indexes!i ~ "_fields[" ~ i.stringof ~ "][ind[" ~ i.stringof ~ "]], "; | } |} | |/// |struct Kronecker(alias fun, NdFields...) | if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields[1 .. $])) |{ | /// | NdFields _fields; | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | return mixin("Kronecker!(fun, staticMap!(LightConstOf, NdFields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota)); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | return mixin("Kronecker!(fun, staticMap!(LightImmutableOf, NdFields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota)); | } | | private enum N = DimensionCount!(NdFields[$-1]); | | /// | size_t length(size_t d = 0)() scope const @property | { | static if (d == 0) | { | size_t ret = 1; | foreach (f, NdField; NdFields) | ret *= _fields[f].length; | } | else | { | size_t ret = 1; | foreach (f, NdField; NdFields) | ret *= _fields[f].length!d; | } | return ret; | } | | | /// | size_t[N] shape()() scope const @property | { | static if (N > 1) | { | size_t[N] ret = 1; | foreach (f, NdField; NdFields) | { | auto s = _fields[f].shape; | foreach(i; Iota!N) | ret[i] *= s[i]; | } | return ret; | } | else | { | size_t[1] ret = 1; | foreach (f, NdField; NdFields) | ret[0] *= _fields[f].length; | return ret; | } | } | | /// | size_t elementCount()() scope const @property | { | size_t ret = 1; | foreach (f, NdField; NdFields) | ret *= _fields[f].elementCount; | ret; | } | | /// | auto ref opIndex()(size_t[N] indexes...) | { | static if (N > 1) | size_t[N][NdFields.length] ind; | else | size_t[NdFields.length] ind; | foreach_reverse (f, NdField; NdFields) | { | static if (f) | { | static if (hasShape!(NdFields[f])) | { | auto s = _fields[f].shape; | } | else | { | size_t[1] s; | s[0] = _fields[f].length; | } | static if (N > 1) | { | foreach(i; Iota!N) | { | ind[f][i] = indexes[i] % s[i]; | indexes[i] /= s[i]; | } | } | else | { | ind[f] = indexes[0] % s[0]; | indexes[0] /= s[0]; | } | } | else | { | static if (N > 1) | { | foreach(i; Iota!N) | ind[f][i] = indexes[i]; | } | else | { | ind[f] = indexes[0]; | } | } | } | return mixin("fun(" ~ _kr_indexes!(ind.length) ~ ")"); | } |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/ndfield.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-internal.lst |module mir.ndslice.internal; | |import mir.internal.utility : isFloatingPoint, Iota; |import mir.math.common: optmath; |import mir.ndslice.iterator: IotaIterator; |import mir.ndslice.slice; |import mir.primitives; |import std.meta; |import std.traits; | |@optmath: | |template ConstIfPointer(T) |{ | static if (isPointer!T) | alias ConstIfPointer = const(PointerTarget!T)*; | else | alias ConstIfPointer = T; |} | |public import mir.utility: _expect; | |struct RightOp(string op, T) |{ | T value; | | auto lightConst()() const @property | { | import mir.qualifier; | return RightOp!(op, LightConstOf!T)(value.lightConst); | } | | auto lightImmutable()() immutable @property | { | import mir.qualifier; | return RightOp!(op, LightImmutableOf!T)(value.lightImmutable); | } | | this()(ref T v) { value = v; } | this()(T v) { value = v; } | auto ref opCall(F)(auto ref F right) | { | static if (op == "^^" && isNumeric!T && isFloatingPoint!F) | { | import mir.math.common: pow; | return pow(value, right); | } | else | { | return mixin("value " ~ op ~ " right"); | } | } |} | |struct LeftOp(string op, T) |{ | T value; | | auto lightConst()() const @property | { | import mir.qualifier; | return LeftOp!(op, LightConstOf!T)(value.lightConst); | } | | auto lightImmutable()() immutable @property | { | import mir.qualifier; | return LeftOp!(op, LightImmutableOf!T)(value.lightImmutable); | } | | this()(ref T v) { value = v; } | this()(T v) { value = v; } | auto ref opCall(F)(auto ref F left) | { | static if (op == "^^" && isFloatingPoint!T && isNumeric!F) | { | import mir.math.common: pow; | return pow(left, value); | } | else | { | return mixin("left " ~ op ~ " value"); | } | } |} | |private template _prod(size_t len) | if (len) |{ | static if (len == 1) | enum _prod = "elems[0]"; | else | { | enum i = len - 1; | enum _prod = ._prod!i ~ " * elems[" ~ i.stringof ~ "]"; | } |} | |auto product(Elems...)(auto ref Elems elems) |{ | return mixin(_prod!(Elems.length)); |} | | |template _iotaArgs(size_t length, string prefix, string suffix) |{ | static if (length) | { | enum i = length - 1; | enum _iotaArgs = _iotaArgs!(i, prefix, suffix) ~ prefix ~ i.stringof ~ suffix; | } | else | enum _iotaArgs = ""; |} | |alias _IteratorOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = Iterator; | |E maxElem(E)(E[] arr...) |{ | auto ret = Unqual!E.min; | foreach(e; arr) | if (e > ret) | ret = e; | return ret; |} | |E minElem(E)(E[] arr...) |{ | auto ret = Unqual!E.max; | foreach(e; arr) | if (e < ret) | ret = e; | return ret; |} | |size_t sum()(size_t[] packs) |{ | size_t s; | foreach(pack; packs) | s += pack; | return s; |} | | |size_t[] reverse()(size_t[] ar) |{ | foreach(i, e; ar[0..$/2]) | { | ar[i] = ar[$ - i - 1]; | ar[$ - i - 1] = e; | } | return ar; |} | |enum indexError(size_t pos, size_t N) = | "index at position " ~ pos.stringof | ~ " from the range [0 .." ~ N.stringof ~ ")" | ~ " must be less than corresponding length."; | |enum string tailErrorMessage( | string fun = __FUNCTION__, | string pfun = __PRETTY_FUNCTION__) = |" |- - - |Error in function |" ~ fun ~ " |- - - |Function prototype |" ~ pfun ~ " |_____"; | |mixin template DimensionsCountCTError() |{ | static assert(Dimensions.length <= N, | "Dimensions list length = " ~ Dimensions.length.stringof | ~ " should be less than or equal to N = " ~ N.stringof | ~ tailErrorMessage!()); |} | |enum DimensionsCountRTError = q{ | assert(dimensions.length <= N, | "Dimensions list length should be less than or equal to N = " ~ N.stringof | ~ tailErrorMessage!()); |}; | |mixin template DimensionCTError() |{ | static assert(dimension >= 0, | "dimension = " ~ dimension.stringof ~ " at position " | ~ i.stringof ~ " should be greater than or equal to 0" | ~ tailErrorMessage!()); | static assert(dimension < N, | "dimension = " ~ dimension.stringof ~ " at position " | ~ i.stringof ~ " should be less than N = " ~ N.stringof | ~ tailErrorMessage!()); | static assert(dimension < slice.S, | "dimension = " ~ dimension.stringof ~ " at position " | ~ i.stringof ~ " should be less than " ~ (slice.S).stringof ~ ". " | ~ "`universal` and `canonical` from `mir.ndslice.topology` can be used to relax slice kind." | ~ tailErrorMessage!()); |} | |enum DimensionRTError = q{ | static if (isSigned!(typeof(dimension))) | assert(dimension >= 0, "dimension should be greater than or equal to 0" | ~ tailErrorMessage!()); | assert(dimension < N, "dimension should be less than N = " ~ N.stringof | ~ tailErrorMessage!()); | assert(dimension < slice.S, | "dimension should be less than " ~ slice.S.stringof ~ ". " | ~ "`universal` and `canonical` from `mir.ndslice.topology` can be used to relax slice kind." | ~ tailErrorMessage!()); |}; | |private alias IncFront(Seq...) = AliasSeq!(Seq[0] + 1, Seq[1 .. $]); | |private alias DecFront(Seq...) = AliasSeq!(Seq[0] - 1, Seq[1 .. $]); | |private enum bool isNotZero(alias t) = t != 0; | |alias NSeqEvert(Seq...) = Filter!(isNotZero, DecFront!(Reverse!(IncFront!Seq))); | |//alias Parts(Seq...) = DecAll!(IncFront!Seq); | |alias Snowball(Seq...) = AliasSeq!(size_t.init, SnowballImpl!(size_t.init, Seq)); | |private template SnowballImpl(size_t val, Seq...) |{ | static if (Seq.length == 0) | alias SnowballImpl = AliasSeq!(); | else | alias SnowballImpl = AliasSeq!(Seq[0] + val, SnowballImpl!(Seq[0] + val, Seq[1 .. $])); |} | |private template DecAll(Seq...) |{ | static if (Seq.length == 0) | alias DecAll = AliasSeq!(); | else | alias DecAll = AliasSeq!(Seq[0] - 1, DecAll!(Seq[1 .. $])); |} | |//template SliceFromSeq(Range, Seq...) |//{ |// static if (Seq.length == 0) |// alias SliceFromSeq = Range; |// else |// { |// import mir.ndslice.slice : Slice; |// alias SliceFromSeq = SliceFromSeq!(Slice!(Seq[$ - 1], Range), Seq[0 .. $ - 1]); |// } |//} | |template DynamicArrayDimensionsCount(T) |{ | static if (isDynamicArray!T) | enum size_t DynamicArrayDimensionsCount = 1 + DynamicArrayDimensionsCount!(typeof(T.init[0])); | else | enum size_t DynamicArrayDimensionsCount = 0; |} | |bool isPermutation(size_t N)(auto ref in size_t[N] perm) |{ | int[N] mask; | return isValidPartialPermutationImpl(perm, mask); |} | |version(mir_test) unittest |{ | assert(isPermutation([0, 1])); | // all numbers 0..N-1 need to be part of the permutation | assert(!isPermutation([1, 2])); | assert(!isPermutation([0, 2])); | // duplicates are not allowed | assert(!isPermutation([0, 1, 1])); | | size_t[0] emptyArr; | // empty permutations are not allowed either | assert(!isPermutation(emptyArr)); |} | |bool isValidPartialPermutation(size_t N)(in size_t[] perm) |{ | int[N] mask; | return isValidPartialPermutationImpl(perm, mask); |} | |private bool isValidPartialPermutationImpl(size_t N)(in size_t[] perm, ref int[N] mask) |{ | if (perm.length == 0) | return false; | foreach (j; perm) | { | if (j >= N) | return false; | if (mask[j]) //duplicate | return false; | mask[j] = true; | } | return true; |} | |enum toSize_t(size_t i) = i; |enum isSize_t(alias i) = is(typeof(i) == size_t); |enum isIndex(I) = is(I : size_t); |template is_Slice(S) |{ | static if (is(S : Slice!(IotaIterator!I), I)) | enum is_Slice = __traits(isIntegral, I); | else | enum is_Slice = false; |} | |alias Repeat(size_t N : 0, T...) = AliasSeq!(); | |private enum isReference(P) = | hasIndirections!P | || isFunctionPointer!P | || is(P == interface); | |enum hasReference(T) = anySatisfy!(isReference, RepresentationTypeTuple!T); | |alias ImplicitlyUnqual(T) = Select!(isImplicitlyConvertible!(T, Unqual!T), Unqual!T, T); |alias ImplicitlyUnqual(T : T*) = T*; | |size_t lengthsProduct(size_t N)(auto ref in size_t[N] lengths) |{ 0000000| size_t length = lengths[0]; | foreach (i; Iota!(1, N)) 0000000| length *= lengths[i]; 0000000| return length; |} | |pure nothrow version(mir_test) unittest |{ | const size_t[3] lengths = [3, 4, 5]; | assert(lengthsProduct(lengths) == 60); | assert(lengthsProduct([3, 4, 5]) == 60); |} | | |package(mir) template frontOf(args...) |{ | static if (args.length == 0) | enum frontOf = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls()() | { | return arg.front; | } | alias frontOf = AliasSeq!(ls, frontOf!(args[1..$])); | } |} | |package(mir) template frontOfDim(size_t dim, args...) |{ | static if (args.length == 0) | enum frontOfDim = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls() | { | return arg.front!dim; | } | alias frontOfDim = AliasSeq!(ls, frontOfDim!(dim, args[1..$])); | } |} | |package(mir) template selectFrontOf(alias input, args...) |{ | static if (args.length == 0) | enum selectFrontOf = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls()() | { | return arg.lightScope.selectFront!0(input); | } | alias selectFrontOf = AliasSeq!(ls, selectFrontOf!(input, args[1..$])); | } |} | |package(mir) template selectBackOf(alias input, args...) |{ | static if (args.length == 0) | enum selectBackOf = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls()() | { | return arg.selectBack!0(input); | } | alias selectBackOf = AliasSeq!(ls, selectBackOf!(input, args[1..$])); | } |} | |package(mir) template frontSelectFrontOf(alias input, args...) |{ | static if (args.length == 0) | enum frontSelectFrontOf = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls()() | { | return arg.lightScope.front.selectFront!0(input); | } | alias frontSelectFrontOf = AliasSeq!(ls, frontSelectFrontOf!(input, args[1..$])); | } |} | |package(mir) template frontSelectBackOf(alias input, args...) |{ | static if (args.length == 0) | enum frontSelectBackOf = args; | else | { | alias arg = args[0]; | @optmath @property auto ref ls()() | { | return arg.lightScope.front.selectBack!0(input); | } | alias frontSelectBackOf = AliasSeq!(ls, frontSelectBackOf!(input, args[1..$])); | } |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/internal.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-topology.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Selectors create new views and iteration patterns over the same data, without copying. | |$(BOOKTABLE $(H2 SliceKind Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 universal, Converts a slice to universal $(SUBREF slice, SliceKind).) |$(T2 canonical, Converts a slice to canonical $(SUBREF slice, SliceKind).) |$(T2 assumeCanonical, Converts a slice to canonical $(SUBREF slice, SliceKind) (unsafe).) |$(T2 assumeContiguous, Converts a slice to contiguous $(SUBREF slice, SliceKind) (unsafe).) | |) | |$(BOOKTABLE $(H2 Sequence Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 cycle, Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice) |$(T2 iota, Contiguous Slice with initial flattened (contiguous) index.) |$(T2 linspace, Evenly spaced numbers over a specified interval.) |$(T2 magic, Magic square.) |$(T2 ndiota, Contiguous Slice with initial multidimensional index.) |$(T2 repeat, Slice with identical values) |) | |. | |$(BOOKTABLE $(H2 Products), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 cartesian, Cartesian product.) |$(T2 kronecker, Kronecker product.) | |) | |$(BOOKTABLE $(H2 Representation Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 as, Convenience function that creates a lazy view, |where each element of the original slice is converted to a type `T`.) |$(T2 bitpack, Bitpack slice over an unsigned integral slice.) |$(T2 bitwise, Bitwise slice over an unsigned integral slice.) |$(T2 bytegroup, Groups existing slice into fixed length chunks and uses them as data store for destination type.) |$(T2 cached, Random access cache. It is usefull in combiation with $(LREF map) and $(LREF vmap).) |$(T2 cachedGC, Random access cache auto-allocated in GC heap. It is usefull in combiation with $(LREF map) and $(LREF vmap).) |$(T2 diff, Differences between vector elements.) |$(T2 flattened, Contiguous 1-dimensional slice of all elements of a slice.) |$(T2 map, Multidimensional functional map.) |$(T2 member, Field (element's member) projection.) |$(T2 orthogonalReduceField, Functional deep-element wise reduce of a slice composed of fields or iterators.) |$(T2 pairwise, Pairwise map for vectors.) |$(T2 pairwiseMapSubSlices, Maps pairwise indexes pairs to subslices.) |$(T2 retro, Reverses order of iteration for all dimensions.) |$(T2 slide, Sliding map for vectors.) |$(T2 stairs, Two functions to pack, unpack, and iterate triangular and symmetric matrix storage.) |$(T2 stride, Strides 1-dimensional slice.) |$(T2 subSlices, Maps indexes pairs to subslices.) |$(T2 triplets, Constructs a lazy view of triplets with `left`, `center`, and `right` members. The topology is usefull for Math and Physics.) |$(T2 unzip, Selects a slice from a zipped slice.) |$(T2 zip, Zips slices into a slice of refTuples.) |) | | |$(BOOKTABLE $(H2 Shape Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 blocks, n-dimensional slice composed of n-dimensional non-overlapping blocks. If the slice has two dimensions, it is a block matrix.) |$(T2 diagonal, 1-dimensional slice composed of diagonal elements) |$(T2 reshape, New slice with changed dimensions for the same data) |$(T2 windows, n-dimensional slice of n-dimensional overlapping windows. If the slice has two dimensions, it is a sliding window.) | |) | |$(BOOKTABLE $(H2 Subspace Selectors), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 pack , Returns slice of slices.) |$(T2 ipack , Returns slice of slices.) |$(T2 unpack , Merges two hight dimension packs. See also $(SUBREF fuse, fuse).) |$(T2 evertPack, Reverses dimension packs.) |$(T2 byDim , Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them.) | |) | |Subspace selectors serve to generalize and combine other selectors easily. |For a slice of `Slice!(Iterator, N, kind)` type `slice.pack!K` creates a slice of |slices of `Slice!(kind, [N - K, K], Iterator)` type by packing |the last `K` dimensions of the top dimension pack, |and the type of element of $(LREF flattened) is `Slice!(Iterator, K)`. |Another way to use $(LREF pack) is transposition of dimension packs using |$(LREF evertPack). |Examples of use of subspace selectors are available for selectors, |$(SUBREF slice, Slice.shape), and $(SUBREF slice, Slice.elementCount). | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Sponsors: Part of this work has been sponsored by $(LINK2 http://symmetryinvestments.com, Symmetry Investments) and Kaleidic Associates. | | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4)) |+/ |module mir.ndslice.topology; | |import std.meta; | |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.iterator; |import mir.ndslice.ndfield; |import mir.ndslice.slice; |import mir.primitives; |import mir.qualifier; |import mir.utility: min; | |private immutable choppedExceptionMsg = "bounds passed to chopped are out of sliceable bounds."; |version (D_Exceptions) private immutable choppedException = new Exception(choppedExceptionMsg); | |@optmath: | |/++ |Converts a slice to universal kind. | |Params: | slice = a slice |Returns: | universal slice |See_also: | $(LREF canonical), | $(LREF assumeCanonical), | $(LREF assumeContiguous). |+/ |auto universal(Iterator, size_t N, SliceKind kind, Labels...)(Slice!(Iterator, N, kind, Labels) slice) |{ | static if (kind == Universal) | { | return slice; | } | else | static if (is(Iterator : RetroIterator!It, It)) | { | return slice.retro.universal.retro; | } | else | { | alias Ret = Slice!(Iterator, N, Universal, Labels); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | static if (kind == Canonical) | { | foreach (i; Iota!(slice.S)) | strides[i] = slice._strides[i]; | strides[$-1] = 1; | } | else | { | ptrdiff_t ball = 1; | foreach_reverse (i; Iota!(Ret.S)) | { | strides[i] = ball; | static if (i) | ball *= slice._lengths[i]; | } | } | return Ret(lengths, strides, slice._iterator, slice._labels); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3, 1]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).canonical.universal; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3, 1]); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | auto dataframe = slice!(double, int, string)(2, 3); | dataframe.label[] = [1, 2]; | dataframe.label!1[] = ["Label1", "Label2", "Label3"]; | | auto universaldf = dataframe.universal; | assert(universaldf._lengths == [2, 3]); | assert(universaldf._strides == [3, 1]); | | assert(is(typeof(universaldf) == | Slice!(double*, 2, Universal, int*, string*))); | assert(universaldf.label!0[0] == 1); | assert(universaldf.label!1[1] == "Label2"); |} | |/++ |Converts a slice to canonical kind. | |Params: | slice = contiguous or canonical slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF assumeCanonical), | $(LREF assumeContiguous). |+/ |Slice!(Iterator, N, N == 1 ? Contiguous : Canonical, Labels) | canonical | (Iterator, size_t N, SliceKind kind, Labels...) | (Slice!(Iterator, N, kind, Labels) slice) | if (kind == Contiguous || kind == Canonical) |{ | static if (kind == Canonical || N == 1) | return slice; | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | ptrdiff_t ball = 1; | foreach_reverse (i; Iota!(Ret.S)) | { | ball *= slice._lengths[i + 1]; | strides[i] = ball; | } | return Ret(lengths, strides, slice._iterator, slice._labels); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).canonical; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3]); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | auto dataframe = slice!(double, int, string)(2, 3); | dataframe.label[] = [1, 2]; | dataframe.label!1[] = ["Label1", "Label2", "Label3"]; | | auto canonicaldf = dataframe.canonical; | assert(canonicaldf._lengths == [2, 3]); | assert(canonicaldf._strides == [3]); | | assert(is(typeof(canonicaldf) == | Slice!(double*, 2, Canonical, int*, string*))); | assert(canonicaldf.label!0[0] == 1); | assert(canonicaldf.label!1[1] == "Label2"); |} | |/++ |Converts a slice to canonical kind (unsafe). | |Params: | slice = a slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF canonical), | $(LREF assumeContiguous). |+/ |Slice!(Iterator, N, Canonical, Labels) | assumeCanonical | (Iterator, size_t N, SliceKind kind, Labels...) | (Slice!(Iterator, N, kind, Labels) slice) |{ | static if (kind == Contiguous) | return slice.canonical; | else | static if (kind == Canonical) | return slice; | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | foreach (i; Iota!(Ret.S)) | strides[i] = slice._strides[i]; | return Ret(lengths, strides, slice._iterator, slice._labels); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal.assumeCanonical; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | assert(slice._strides == [3]); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | auto dataframe = slice!(double, int, string)(2, 3); | dataframe.label[] = [1, 2]; | dataframe.label!1[] = ["Label1", "Label2", "Label3"]; | | auto assmcanonicaldf = dataframe.assumeCanonical; | assert(assmcanonicaldf._lengths == [2, 3]); | assert(assmcanonicaldf._strides == [3]); | | assert(is(typeof(assmcanonicaldf) == | Slice!(double*, 2, Canonical, int*, string*))); | assert(assmcanonicaldf.label!0[0] == 1); | assert(assmcanonicaldf.label!1[1] == "Label2"); |} | |/++ |Converts a slice to contiguous kind (unsafe). | |Params: | slice = a slice |Returns: | canonical slice |See_also: | $(LREF universal), | $(LREF canonical), | $(LREF assumeCanonical). |+/ |Slice!(Iterator, N, Contiguous, Labels) | assumeContiguous | (Iterator, size_t N, SliceKind kind, Labels...) | (Slice!(Iterator, N, kind, Labels) slice) |{ | static if (kind == Contiguous) | return slice; | else | { | return typeof(return)(slice._lengths, slice._iterator, slice._labels); | } |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto slice = iota(2, 3).universal.assumeContiguous; | assert(slice == [[0, 1, 2], [3, 4, 5]]); | assert(slice._lengths == [2, 3]); | static assert(slice._strides.length == 0); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | auto dataframe = slice!(double, int, string)(2, 3); | dataframe.label[] = [1, 2]; | dataframe.label!1[] = ["Label1", "Label2", "Label3"]; | | auto assmcontdf = dataframe.canonical.assumeContiguous; | assert(assmcontdf._lengths == [2, 3]); | static assert(assmcontdf._strides.length == 0); | | assert(is(typeof(assmcontdf) == | Slice!(double*, 2, Contiguous, int*, string*))); | assert(assmcontdf.label!0[0] == 1); | assert(assmcontdf.label!1[1] == "Label2"); |} | |/++ |+/ |auto assumeFieldsHaveZeroShift(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (__traits(hasMember, Iterator, "assumeFieldsHaveZeroShift")) |{ | return slice._iterator.assumeFieldsHaveZeroShift.slicedField(slice._lengths); |} | |/++ |Creates a packed slice, i.e. slice of slices. |Packs the last `P` dimensions. |The function does not allocate any data. | |Params: | P = size of dimension pack | slice = a slice to pack |Returns: | `slice.pack!p` returns `Slice!(kind, [N - p, p], Iterator)` |See_also: $(LREF ipack) |+/ |Slice!(SliceIterator!(Iterator, P, P == 1 && kind == Canonical ? Contiguous : kind), N - P, Universal) |pack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | if (P && P < N) |{ | return slice.ipack!(N - P); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice : sliced, Slice; | | auto a = iota(3, 4, 5, 6); | auto b = a.pack!2; | | static immutable res1 = [3, 4]; | static immutable res2 = [5, 6]; | assert(b.shape == res1); | assert(b[0, 0].shape == res2); | assert(a == b.unpack); | assert(a.pack!2 == b); | static assert(is(typeof(b) == typeof(a.pack!2))); |} | |/++ |Creates a packed slice, i.e. slice of slices. |Packs the last `N - P` dimensions. |The function does not allocate any data. | |Params: | + = size of dimension pack | slice = a slice to pack |See_also: $(LREF pack) |+/ |Slice!(SliceIterator!(Iterator, N - P, N - P == 1 && kind == Canonical ? Contiguous : kind), P, Universal) |ipack(size_t P, Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | if (P && P < N) |{ | alias Ret = typeof(return); | alias It = Ret.Iterator; | alias EN = It.Element.N; | alias ES = It.Element.S; | auto sl = slice.universal; | static if (It.Element.kind == Contiguous) | return Ret( | cast( size_t[P]) sl._lengths[0 .. P], | cast(ptrdiff_t[P]) sl._strides[0 .. P], | It( | cast(size_t[EN]) sl._lengths[P .. $], | sl._iterator)); | else | return Ret( | cast( size_t[P]) sl._lengths[0 .. P], | cast(ptrdiff_t[P]) sl._strides[0 .. P], | It( | cast( size_t[EN]) sl._lengths[P .. $], | cast(ptrdiff_t[ES]) sl._strides[P .. $ - (It.Element.kind == Canonical)], | sl._iterator)); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice : sliced, Slice; | | auto a = iota(3, 4, 5, 6); | auto b = a.ipack!2; | | static immutable res1 = [3, 4]; | static immutable res2 = [5, 6]; | assert(b.shape == res1); | assert(b[0, 0].shape == res2); | assert(a.ipack!2 == b); | static assert(is(typeof(b) == typeof(a.ipack!2))); |} | |/++ |Unpacks a packed slice. | |The functions does not allocate any data. | |Params: | slice = packed slice |Returns: | unpacked slice, that is a view on the same data. | |See_also: $(LREF pack), $(LREF evertPack) |+/ |Slice!(Iterator, N + M, min(innerKind, Canonical)) | unpack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind) | (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice) |{ | alias Ret = typeof(return); | size_t[N + M] lengths; | auto strides = sizediff_t[Ret.S].init; | auto outerStrides = slice.strides; | auto innerStrides = Slice!(Iterator, M, innerKind)( | slice._iterator._structure, | slice._iterator._iterator, | ).strides; | foreach(i; Iota!N) | lengths[i] = slice._lengths[i]; | foreach(i; Iota!N) | strides[i] = outerStrides[i]; | foreach(i; Iota!M) | lengths[N + i] = slice._iterator._structure[0][i]; | foreach(i; Iota!(Ret.S - N)) | strides[N + i] = innerStrides[i]; | return Ret(lengths, strides, slice._iterator._iterator); |} | |/++ |Reverses the order of dimension packs. |This function is used in a functional pipeline with other selectors. | |Params: | slice = packed slice |Returns: | packed slice | |See_also: $(LREF pack), $(LREF unpack) |+/ |Slice!(SliceIterator!(Iterator, N, outerKind), M, innerKind) |evertPack(Iterator, size_t M, SliceKind innerKind, size_t N, SliceKind outerKind) | (Slice!(SliceIterator!(Iterator, M, innerKind), N, outerKind) slice) |{ | return typeof(return)( | slice._iterator._structure, | typeof(return).Iterator( | slice._structure, | slice._iterator._iterator)); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : transposed; | auto slice = iota(3, 4, 5, 6, 7, 8, 9, 10, 11).universal; | assert(slice | .pack!2 | .evertPack | .unpack | == slice.transposed!( | slice.shape.length-2, | slice.shape.length-1)); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.allocation: slice; | static assert(is(typeof( | slice!int(6) | .sliced(1,2,3) | .pack!1 | .evertPack() | ) | == Slice!(SliceIterator!(int*, 2, Universal), 1))); |} | | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto a = iota(3, 4, 5, 6, 7, 8, 9, 10, 11); | auto b = a.pack!2.unpack; | static assert(is(typeof(a.canonical) == typeof(b))); | assert(a == b); |} | |/++ |Returns a slice, the elements of which are equal to the initial flattened index value. | |Params: | N = dimension count | lengths = list of dimension lengths | start = value of the first element in a slice (optional for integer `I`) | stride = value of the stride between elements (optional) |Returns: | n-dimensional slice composed of indexes |See_also: $(LREF ndiota) |+/ |Slice!(IotaIterator!I, N) |iota | (I = sizediff_t, size_t N)(size_t[N] lengths...) | if (__traits(isIntegral, I)) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!I(I.init).sliced(lengths); |} | |///ditto |Slice!(IotaIterator!sizediff_t, N) |iota | (size_t N)(size_t[N] lengths, sizediff_t start) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!sizediff_t(start).sliced(lengths); |} | |///ditto |Slice!(StrideIterator!(IotaIterator!sizediff_t), N) |iota | (size_t N)(size_t[N] lengths, sizediff_t start, size_t stride) |{ | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!sizediff_t)(stride, IotaIterator!sizediff_t(start)).sliced(lengths); |} | |///ditto |template iota(I) | if (__traits(isIntegral, I)) |{ | /// | Slice!(IotaIterator!I, N) | iota | (size_t N)(size_t[N] lengths, I start) | if (__traits(isIntegral, I)) | { | import mir.ndslice.slice : sliced; | return IotaIterator!I(start).sliced(lengths); | } | | ///ditto | Slice!(StrideIterator!(IotaIterator!I), N) | iota | (size_t N)(size_t[N] lengths, I start, size_t stride) | if (__traits(isIntegral, I)) | { | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths); | } |} | |///ditto |Slice!(IotaIterator!I, N) |iota | (I, size_t N)(size_t[N] lengths, I start) | if (is(I P : P*)) |{ | import mir.ndslice.slice : sliced; | return IotaIterator!I(start).sliced(lengths); |} | |///ditto |Slice!(StrideIterator!(IotaIterator!I), N) |iota | (I, size_t N)(size_t[N] lengths, I start, size_t stride) | if (is(I P : P*)) |{ | import mir.ndslice.slice : sliced; | return StrideIterator!(IotaIterator!I)(stride, IotaIterator!I(start)).sliced(lengths); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(2, 3); | static immutable array = | [[0, 1, 2], | [3, 4, 5]]; | | assert(slice == array); | | static assert(is(DeepElementType!(typeof(slice)) == sizediff_t)); |} | |/// |pure nothrow @nogc |version(mir_test) unittest |{ | int[6] data; | auto slice = iota([2, 3], data.ptr); | assert(slice[0, 0] == data.ptr); | assert(slice[0, 1] == data.ptr + 1); | assert(slice[1, 0] == data.ptr + 3); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto im = iota([10, 5], 100); | assert(im[2, 1] == 111); // 100 + 2 * 5 + 1 | | //slicing works correctly | auto cm = im[1 .. $, 3 .. $]; | assert(cm[2, 1] == 119); // 119 = 100 + (1 + 2) * 5 + (3 + 1) |} | |/// `iota` with step |@safe pure nothrow version(mir_test) unittest |{ | auto sl = iota([2, 3], 10, 10); | | assert(sl == [[10, 20, 30], | [40, 50, 60]]); |} | |/++ |Returns a 1-dimensional slice over the main diagonal of an n-dimensional slice. |`diagonal` can be generalized with other selectors such as |$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice). | |Params: | slice = input slice |Returns: | 1-dimensional slice composed of diagonal elements |See_also: $(LREF antidiagonal) |+/ |Slice!(Iterator, 1, N == 1 ? kind : Universal) | diagonal | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (N == 1) | { | return slice; | } | else | { | alias Ret = typeof(return); | size_t[Ret.N] lengths; | auto strides = sizediff_t[Ret.S].init; | lengths[0] = slice._lengths[0]; | foreach (i; Iota!(1, N)) | if (lengths[0] > slice._lengths[i]) | lengths[0] = slice._lengths[i]; | foreach (i; Iota!(1, Ret.N)) | lengths[i] = slice._lengths[i + N - 1]; | auto rstrides = slice.strides; | strides[0] = rstrides[0]; | foreach (i; Iota!(1, N)) | strides[0] += rstrides[i]; | foreach (i; Iota!(1, Ret.S)) | strides[i] = rstrides[i + N - 1]; | return Ret(lengths, strides, slice._iterator); | } |} | |/// Matrix, main diagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 0 4 | | static immutable d = [0, 4]; | assert(iota(2, 3).diagonal == d); |} | |/// Non-square matrix |@safe pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 | | // | 2 3 | | // | 4 5 | | // ------- | //-> | // | 0 3 | | | assert(iota(3, 2).diagonal == iota([2], 0, 3)); |} | |/// Loop through diagonal |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | | auto slice = slice!int(3, 3); | int i; | foreach (ref e; slice.diagonal) | e = ++i; | assert(slice == [ | [1, 0, 0], | [0, 2, 0], | [0, 0, 3]]); |} | |/// Matrix, subdiagonal |@safe @nogc pure nothrow |version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 1 5 | | static immutable d = [1, 5]; | auto a = iota(2, 3).canonical; | a.popFront!1; | assert(a.diagonal == d); |} | |/// 3D, main diagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // - - - - - - | // | 6 7 8 | | // | 9 10 11 | | // ----------- | //-> | // | 0 10 | | static immutable d = [0, 10]; | assert(iota(2, 2, 3).diagonal == d); |} | |/// 3D, subdiagonal |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // - - - - - - | // | 6 7 8 | | // | 9 10 11 | | // ----------- | //-> | // | 1 11 | | static immutable d = [1, 11]; | auto a = iota(2, 2, 3).canonical; | a.popFront!2; | assert(a.diagonal == d); |} | |/// 3D, diagonal plain |@nogc @safe pure nothrow |version(mir_test) unittest |{ | // ----------- | // | 0 1 2 | | // | 3 4 5 | | // | 6 7 8 | | // - - - - - - | // | 9 10 11 | | // | 12 13 14 | | // | 15 16 17 | | // - - - - - - | // | 18 20 21 | | // | 22 23 24 | | // | 24 25 26 | | // ----------- | //-> | // ----------- | // | 0 4 8 | | // | 9 13 17 | | // | 18 23 26 | | // ----------- | | static immutable d = | [[ 0, 4, 8], | [ 9, 13, 17], | [18, 22, 26]]; | | auto slice = iota(3, 3, 3) | .pack!2 | .evertPack | .diagonal | .evertPack; | | assert(slice == d); |} | |/++ |Returns a 1-dimensional slice over the main antidiagonal of an 2D-dimensional slice. |`antidiagonal` can be generalized with other selectors such as |$(LREF blocks) (diagonal blocks) and $(LREF windows) (multi-diagonal slice). | |It runs from the top right corner to the bottom left corner. | |Pseudo_code: |------ |auto antidiagonal = slice.dropToHypercube.reversed!1.diagonal; |------ | |Params: | slice = input slice |Returns: | 1-dimensional slice composed of antidiagonal elements. |See_also: $(LREF diagonal) |+/ |Slice!(Iterator, 1, Universal) | antidiagonal | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N == 2) |{ | import mir.ndslice.dynamic : dropToHypercube, reversed; | return slice.dropToHypercube.reversed!1.diagonal; |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ----- | // | 0 1 | | // | 2 3 | | // ----- | //-> | // | 1 2 | | static immutable c = [1, 2]; | import std.stdio; | assert(iota(2, 2).antidiagonal == c); |} | |/// |@safe @nogc pure nothrow version(mir_test) unittest |{ | // ------- | // | 0 1 2 | | // | 3 4 5 | | // ------- | //-> | // | 1 3 | | static immutable d = [1, 3]; | assert(iota(2, 3).antidiagonal == d); |} | |/++ |Returns an n-dimensional slice of n-dimensional non-overlapping blocks. |`blocks` can be generalized with other selectors. |For example, `blocks` in combination with $(LREF diagonal) can be used to get a slice of diagonal blocks. |For overlapped blocks, combine $(LREF windows) with $(SUBREF dynamic, strided). | |Params: | N = dimension count | slice = slice to be split into blocks | rlengths_ = dimensions of block, residual blocks are ignored |Returns: | packed `N`-dimensional slice composed of `N`-dimensional slices | |See_also: $(SUBREF chunks, ._chunks) |+/ |Slice!(SliceIterator!(Iterator, N, N == 1 ? Universal : min(kind, Canonical)), N, Universal) | blocks | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, size_t[N] rlengths_...) |in |{ | foreach (i, length; rlengths_) | assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive" | ~ tailErrorMessage!()); |} |do |{ | size_t[N] lengths; | size_t[N] rlengths = rlengths_; | sizediff_t[N] strides; | foreach (dimension; Iota!N) | lengths[dimension] = slice._lengths[dimension] / rlengths[dimension]; | auto rstrides = slice.strides; | foreach (i; Iota!N) | { | strides[i] = rstrides[i]; | if (lengths[i]) //do not remove `if (...)` | strides[i] *= rlengths[i]; | } | return typeof(return)( | lengths, | strides, | typeof(return).Iterator( | rlengths, | rstrides[0 .. typeof(return).DeepElement.S], | slice._iterator)); |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | auto slice = slice!int(5, 8); | auto blocks = slice.blocks(2, 3); | int i; | foreach (blocksRaw; blocks) | foreach (block; blocksRaw) | block[] = ++i; | | assert(blocks == | [[[[1, 1, 1], [1, 1, 1]], | [[2, 2, 2], [2, 2, 2]]], | [[[3, 3, 3], [3, 3, 3]], | [[4, 4, 4], [4, 4, 4]]]]); | | assert( slice == | [[1, 1, 1, 2, 2, 2, 0, 0], | [1, 1, 1, 2, 2, 2, 0, 0], | | [3, 3, 3, 4, 4, 4, 0, 0], | [3, 3, 3, 4, 4, 4, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Diagonal blocks |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation; | auto slice = slice!int(5, 8); | auto blocks = slice.blocks(2, 3); | auto diagonalBlocks = blocks.diagonal.unpack; | | diagonalBlocks[0][] = 1; | diagonalBlocks[1][] = 2; | | assert(diagonalBlocks == | [[[1, 1, 1], [1, 1, 1]], | [[2, 2, 2], [2, 2, 2]]]); | | assert(blocks == | [[[[1, 1, 1], [1, 1, 1]], | [[0, 0, 0], [0, 0, 0]]], | [[[0, 0, 0], [0, 0, 0]], | [[2, 2, 2], [2, 2, 2]]]]); | | assert(slice == | [[1, 1, 1, 0, 0, 0, 0, 0], | [1, 1, 1, 0, 0, 0, 0, 0], | | [0, 0, 0, 2, 2, 2, 0, 0], | [0, 0, 0, 2, 2, 2, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Matrix divided into vertical blocks |@safe pure version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 13); | auto blocks = slice | .pack!1 | .evertPack | .blocks(3) | .unpack; | | int i; | foreach (block; blocks) | block[] = ++i; | | assert(slice == | [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0], | [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 0]]); |} | |/++ |Returns an n-dimensional slice of n-dimensional overlapping windows. |`windows` can be generalized with other selectors. |For example, `windows` in combination with $(LREF diagonal) can be used to get a multi-diagonal slice. | |Params: | N = dimension count | slice = slice to be iterated | rlengths = dimensions of windows |Returns: | packed `N`-dimensional slice composed of `N`-dimensional slices |+/ |Slice!(SliceIterator!(Iterator, N, N == 1 ? kind : min(kind, Canonical)), N, Universal) | windows | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, size_t[N] rlengths...) |in |{ | foreach (i, length; rlengths) | assert(length > 0, "length of dimension = " ~ i.stringof ~ " must be positive" | ~ tailErrorMessage!()); |} |do |{ | size_t[N] rls = rlengths; | size_t[N] lengths; | foreach (dimension; Iota!N) | lengths[dimension] = slice._lengths[dimension] >= rls[dimension] ? | slice._lengths[dimension] - rls[dimension] + 1 : 0; | auto rstrides = slice.strides; | static if (typeof(return).DeepElement.S) | return typeof(return)( | lengths, | rstrides, | typeof(return).Iterator( | rls, | rstrides[0 .. typeof(return).DeepElement.S], | slice._iterator)); | else | return typeof(return)( | lengths, | rstrides, | typeof(return).Iterator( | rls, | slice._iterator)); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice.windows(2, 3); | | int i; | foreach (windowsRaw; windows) | foreach (window; windowsRaw) | ++window[]; | | assert(slice == | [[1, 2, 3, 3, 3, 3, 2, 1], | | [2, 4, 6, 6, 6, 6, 4, 2], | [2, 4, 6, 6, 6, 6, 4, 2], | [2, 4, 6, 6, 6, 6, 4, 2], | | [1, 2, 3, 3, 3, 3, 2, 1]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice.windows(2, 3); | windows[1, 2][] = 1; | windows[1, 2][0, 1] += 1; | windows.unpack[1, 2, 0, 1] += 1; | | assert(slice == | [[0, 0, 0, 0, 0, 0, 0, 0], | | [0, 0, 1, 3, 1, 0, 0, 0], | [0, 0, 1, 1, 1, 0, 0, 0], | | [0, 0, 0, 0, 0, 0, 0, 0], | [0, 0, 0, 0, 0, 0, 0, 0]]); |} | |/// Multi-diagonal matrix |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(8, 8); | auto windows = slice.windows(3, 3); | | auto multidiagonal = windows | .diagonal | .unpack; | foreach (window; multidiagonal) | window[] += 1; | | assert(slice == | [[ 1, 1, 1, 0, 0, 0, 0, 0], | [ 1, 2, 2, 1, 0, 0, 0, 0], | [ 1, 2, 3, 2, 1, 0, 0, 0], | [0, 1, 2, 3, 2, 1, 0, 0], | [0, 0, 1, 2, 3, 2, 1, 0], | [0, 0, 0, 1, 2, 3, 2, 1], | [0, 0, 0, 0, 1, 2, 2, 1], | [0, 0, 0, 0, 0, 1, 1, 1]]); |} | |/// Sliding window over matrix columns |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.slice; | auto slice = slice!int(5, 8); | auto windows = slice | .pack!1 | .evertPack | .windows(3) | .unpack; | | foreach (window; windows) | window[] += 1; | | assert(slice == | [[1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1], | [1, 2, 3, 3, 3, 3, 2, 1]]); |} | |/// Overlapping blocks using windows |@safe pure nothrow version(mir_test) unittest |{ | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // | 20 21 22 23 24 | | // ---------------- | //-> | // --------------------- | // | 0 1 2 | 2 3 4 | | // | 5 6 7 | 7 8 9 | | // | 10 11 12 | 12 13 14 | | // | - - - - - - - - - - | | // | 10 11 13 | 12 13 14 | | // | 15 16 17 | 17 18 19 | | // | 20 21 22 | 22 23 24 | | // --------------------- | | import mir.ndslice.slice; | import mir.ndslice.dynamic : strided; | | auto overlappingBlocks = iota(5, 5) | .windows(3, 3) | .universal | .strided!(0, 1)(2, 2); | | assert(overlappingBlocks == | [[[[ 0, 1, 2], [ 5, 6, 7], [10, 11, 12]], | [[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14]]], | [[[10, 11, 12], [15, 16, 17], [20, 21, 22]], | [[12, 13, 14], [17, 18, 19], [22, 23, 24]]]]); |} | |version(mir_test) unittest |{ | auto w = iota(9, 9).windows(3, 3); | assert(w.front == w[0]); |} | |/++ |Error codes for $(LREF reshape). |+/ |enum ReshapeError |{ | /// No error | none, | /// Slice should be not empty | empty, | /// Total element count should be the same | total, | /// Structure is incompatible with new shape | incompatible, |} | |/++ |Returns a new slice for the same data with different dimensions. | |Params: | slice = slice to be reshaped | rlengths = list of new dimensions. One of the lengths can be set to `-1`. | In this case, the corresponding dimension is inferable. | err = $(LREF ReshapeError) code |Returns: | reshaped slice |+/ |Slice!(Iterator, M, kind) reshape | (Iterator, size_t N, SliceKind kind, size_t M) | (Slice!(Iterator, N, kind) slice, ptrdiff_t[M] rlengths, ref int err) |{ | static if (kind == Canonical) | { | auto r = slice.universal.reshape(rlengths, err); | assert(err || r._strides[$-1] == 1); | r._strides[$-1] = 1; | return r.assumeCanonical; | } | else | { | alias Ret = typeof(return); | auto structure = Ret._Structure.init; | alias lengths = structure[0]; | foreach (i; Iota!M) | lengths[i] = rlengths[i]; | | /// Code size optimization | immutable size_t eco = slice.elementCount; | size_t ecn = lengths[0 .. rlengths.length].iota.elementCount; | if (eco == 0) | { | err = ReshapeError.empty; | goto R; | } | foreach (i; Iota!M) | if (lengths[i] == -1) | { | ecn = -ecn; | lengths[i] = eco / ecn; | ecn *= lengths[i]; | break; | } | if (eco != ecn) | { | err = ReshapeError.total; | goto R; | } | static if (kind == Universal) | { | for (size_t oi, ni, oj, nj; oi < N && ni < M; oi = oj, ni = nj) | { | size_t op = slice._lengths[oj++]; | size_t np = lengths[nj++]; | | for (;;) | { | if (op < np) | op *= slice._lengths[oj++]; | if (op > np) | np *= lengths[nj++]; | if (op == np) | break; | } | while (oj < N && slice._lengths[oj] == 1) oj++; | while (nj < M && lengths[nj] == 1) nj++; | | for (size_t l = oi, r = oi + 1; r < oj; r++) | if (slice._lengths[r] != 1) | { | if (slice._strides[l] != slice._lengths[r] * slice._strides[r]) | { | err = ReshapeError.incompatible; | goto R; | } | l = r; | } | assert((oi == N) == (ni == M)); | | structure[1][nj - 1] = slice._strides[oj - 1]; | foreach_reverse (i; ni .. nj - 1) | structure[1][i] = lengths[i + 1] * structure[1][i + 1]; | } | } | foreach (i; Iota!(M, Ret.N)) | lengths[i] = slice._lengths[i + N - M]; | static if (M < Ret.S) | foreach (i; Iota!(M, Ret.S)) | structure[1][i] = slice._strides[i + N - M]; | err = 0; | return Ret(structure, slice._iterator); | R: | return Ret(structure, slice._iterator.init); | } |} | |/// |@safe nothrow pure |version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | int err; | auto slice = iota(3, 4) | .universal | .allReversed | .reshape([-1, 3], err); | assert(err == 0); | assert(slice == | [[11, 10, 9], | [ 8, 7, 6], | [ 5, 4, 3], | [ 2, 1, 0]]); |} | |/// Reshaping with memory allocation |@safe pure version(mir_test) unittest |{ | import mir.ndslice.slice: sliced; | import mir.ndslice.allocation: slice; | import mir.ndslice.dynamic : reversed; | | auto reshape2(S, size_t M)(S sl, ptrdiff_t[M] lengths) | { | int err; | // Tries to reshape without allocation | auto ret = sl.reshape(lengths, err); | if (!err) | return ret; | if (err == ReshapeError.incompatible) | // allocates, flattens, reshapes with `sliced`, converts to universal kind | return sl.slice.flattened.sliced(cast(size_t[M])lengths).universal; | throw new Exception("total elements count is different or equals to zero"); | } | | auto sl = iota!int(3, 4) | .slice | .universal | .reversed!0; | | assert(reshape2(sl, [4, 3]) == | [[ 8, 9, 10], | [11, 4, 5], | [ 6, 7, 0], | [ 1, 2, 3]]); |} | |nothrow @safe pure version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | auto slice = iota(1, 1, 3, 2, 1, 2, 1).universal.allReversed; | int err; | assert(slice.reshape([1, -1, 1, 1, 3, 1], err) == | [[[[[[11], [10], [9]]]], | [[[[ 8], [ 7], [6]]]], | [[[[ 5], [ 4], [3]]]], | [[[[ 2], [ 1], [0]]]]]]); | assert(err == 0); |} | |// Issue 15919 |nothrow @nogc @safe pure |version(mir_test) unittest |{ | int err; | assert(iota(3, 4, 5, 6, 7).pack!2.reshape([4, 3, 5], err)[0, 0, 0].shape == cast(size_t[2])[6, 7]); | assert(err == 0); |} | |nothrow @nogc @safe pure version(mir_test) unittest |{ | import mir.ndslice.slice; | | int err; | auto e = iota(1); | // resize to the wrong dimension | auto s = e.reshape([2], err); | assert(err == ReshapeError.total); | e.popFront; | // test with an empty slice | e.reshape([1], err); | assert(err == ReshapeError.empty); |} | |nothrow @nogc @safe pure |version(mir_test) unittest |{ | auto pElements = iota(3, 4, 5, 6, 7) | .pack!2 | .flattened; | assert(pElements[0][0] == iota(7)); | assert(pElements[$-1][$-1] == iota([7], 2513)); |} | |/++ |A contiguous 1-dimensional slice of all elements of a slice. |`flattened` iterates existing data. |The order of elements is preserved. | |`flattened` can be generalized with other selectors. | |Params: | slice = slice to be iterated |Returns: | contiguous 1-dimensional slice of elements of the `slice` |+/ |Slice!(FlattenedIterator!(Iterator, N, kind)) | flattened | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N != 1 && kind != Contiguous) |{ | size_t[typeof(return).N] lengths; | sizediff_t[typeof(return)._iterator._indexes.length] indexes; | lengths[0] = slice.elementCount; | return typeof(return)(lengths, FlattenedIterator!(Iterator, N, kind)(indexes, slice)); |} | |/// ditto |Slice!Iterator | flattened | (Iterator, size_t N) | (Slice!(Iterator, N) slice) |{ | static if (N == 1) | { | return slice; | } | else | { | import core.lifetime: move; 0000000| size_t[typeof(return).N] lengths; 0000000| lengths[0] = slice.elementCount; 0000000| return typeof(return)(lengths, slice._iterator.move); | } |} | |/// ditto |Slice!(StrideIterator!Iterator) | flattened | (Iterator) | (Slice!(Iterator, 1, Universal) slice) |{ | return slice.hideStride; |} | |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | auto sl1 = iota(2, 3).slice.universal.pack!1.flattened; | auto sl2 = iota(2, 3).slice.canonical.pack!1.flattened; | auto sl3 = iota(2, 3).slice.pack!1.flattened; |} | |/// Regular slice |@safe @nogc pure nothrow version(mir_test) unittest |{ | assert(iota(4, 5).flattened == iota(20)); | assert(iota(4, 5).canonical.flattened == iota(20)); | assert(iota(4, 5).universal.flattened == iota(20)); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | assert(iota(4).flattened == iota(4)); | assert(iota(4).canonical.flattened == iota(4)); | assert(iota(4).universal.flattened == iota(4)); |} | |/// Packed slice |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.dynamic; | assert(iota(3, 4, 5, 6, 7).pack!2.flattened[1] == iota([6, 7], 6 * 7)); |} | |/// Properties |@safe pure nothrow version(mir_test) unittest |{ | auto elems = iota(3, 4).universal.flattened; | | elems.popFrontExactly(2); | assert(elems.front == 2); | /// `_index` is available only for canonical and universal ndslices. | assert(elems._iterator._indexes == [0, 2]); | | elems.popBackExactly(2); | assert(elems.back == 9); | assert(elems.length == 8); |} | |/// Index property |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | auto slice = new long[20].sliced(5, 4); | | for (auto elems = slice.universal.flattened; !elems.empty; elems.popFront) | { | ptrdiff_t[2] index = elems._iterator._indexes; | elems.front = index[0] * 10 + index[1] * 3; | } | assert(slice == | [[ 0, 3, 6, 9], | [10, 13, 16, 19], | [20, 23, 26, 29], | [30, 33, 36, 39], | [40, 43, 46, 49]]); |} | |@safe pure nothrow version(mir_test) unittest |{ | auto elems = iota(3, 4).universal.flattened; | assert(elems.front == 0); | assert(elems.save[1] == 1); |} | |/++ |Random access and slicing |+/ |nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | auto elems = iota(4, 5).slice.flattened; | | elems = elems[11 .. $ - 2]; | | assert(elems.length == 7); | assert(elems.front == 11); | assert(elems.back == 17); | | foreach (i; 0 .. 7) | assert(elems[i] == i + 11); | | // assign an element | elems[2 .. 6] = -1; | assert(elems[2 .. 6] == repeat(-1, 4)); | | // assign an array | static ar = [-1, -2, -3, -4]; | elems[2 .. 6] = ar; | assert(elems[2 .. 6] == ar); | | // assign a slice | ar[] *= 2; | auto sl = ar.sliced(ar.length); | elems[2 .. 6] = sl; | assert(elems[2 .. 6] == sl); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : allReversed; | | auto slice = iota(3, 4, 5); | | foreach (ref e; slice.universal.flattened.retro) | { | //... | } | | foreach_reverse (ref e; slice.universal.flattened) | { | //... | } | | foreach (ref e; slice.universal.allReversed.flattened) | { | //... | } |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import std.range.primitives : isRandomAccessRange, hasSlicing; | auto elems = iota(4, 5).flattened; | static assert(isRandomAccessRange!(typeof(elems))); | static assert(hasSlicing!(typeof(elems))); |} | |// Checks strides |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic; | import std.range.primitives : isRandomAccessRange; | auto elems = iota(4, 5).universal.everted.flattened; | static assert(isRandomAccessRange!(typeof(elems))); | | elems = elems[11 .. $ - 2]; | auto elems2 = elems; | foreach (i; 0 .. 7) | { | assert(elems[i] == elems2.front); | elems2.popFront; | } |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.slice; | import mir.ndslice.dynamic; | import std.range.primitives : isRandomAccessRange, hasLength; | | auto range = (3 * 4 * 5 * 6 * 7).iota; | auto slice0 = range.sliced(3, 4, 5, 6, 7).universal; | auto slice1 = slice0.transposed!(2, 1).pack!2; | auto elems0 = slice0.flattened; | auto elems1 = slice1.flattened; | | foreach (S; AliasSeq!(typeof(elems0), typeof(elems1))) | { | static assert(isRandomAccessRange!S); | static assert(hasLength!S); | } | | assert(elems0.length == slice0.elementCount); | assert(elems1.length == 5 * 4 * 3); | | auto elems2 = elems1; | foreach (q; slice1) | foreach (w; q) | foreach (e; w) | { | assert(!elems2.empty); | assert(e == elems2.front); | elems2.popFront; | } | assert(elems2.empty); | | elems0.popFront(); | elems0.popFrontExactly(slice0.elementCount - 14); | assert(elems0.length == 13); | assert(elems0 == range[slice0.elementCount - 13 .. slice0.elementCount]); | | foreach (elem; elems0) {} |} | |// Issue 15549 |version(mir_test) unittest |{ | import std.range.primitives; | import mir.ndslice.allocation; | alias A = typeof(iota(1, 2, 3, 4).pack!1); | static assert(isRandomAccessRange!A); | static assert(hasLength!A); | static assert(hasSlicing!A); | alias B = typeof(slice!int(1, 2, 3, 4).pack!3); | static assert(isRandomAccessRange!B); | static assert(hasLength!B); | static assert(hasSlicing!B); |} | |// Issue 16010 |version(mir_test) unittest |{ | auto s = iota(3, 4).flattened; | foreach (_; 0 .. s.length) | s = s[1 .. $]; |} | |/++ |Returns a slice, the elements of which are equal to the initial multidimensional index value. |For a flattened (contiguous) index, see $(LREF iota). | |Params: | N = dimension count | lengths = list of dimension lengths |Returns: | `N`-dimensional slice composed of indexes |See_also: $(LREF iota) |+/ |Slice!(FieldIterator!(ndIotaField!N), N) | ndiota | (size_t N) | (size_t[N] lengths...) | if (N) |{ | return FieldIterator!(ndIotaField!N)(0, ndIotaField!N(lengths[1 .. $])).sliced(lengths); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = ndiota(2, 3); | static immutable array = | [[[0, 0], [0, 1], [0, 2]], | [[1, 0], [1, 1], [1, 2]]]; | | assert(slice == array); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto im = ndiota(7, 9); | | assert(im[2, 1] == [2, 1]); | | //slicing works correctly | auto cm = im[1 .. $, 4 .. $]; | assert(cm[2, 1] == [3, 5]); |} | |version(mir_test) unittest |{ | auto r = ndiota(1); | auto d = r.front; | r.popFront; | import std.range.primitives; | static assert(isRandomAccessRange!(typeof(r))); |} | |/++ |Evenly spaced numbers over a specified interval. | |Params: | T = floating point or complex numbers type | lengths = list of dimension lengths. Each length must be greater then 1. | intervals = list of [start, end] pairs. |Returns: | `n`-dimensional grid of evenly spaced numbers over specified intervals. |See_also: $(LREF) |+/ |auto linspace(T, size_t N)(size_t[N] lengths, T[2][N] intervals...) | if (N && (isFloatingPoint!T || isComplex!T)) |{ 0000000| Repeat!(N, LinspaceField!T) fields; | foreach(i; Iota!N) | { 0000000| assert(lengths[i] > 1, "linspace: all lengths must be greater then 1."); 0000000| fields[i] = LinspaceField!T(lengths[i], intervals[i][0], intervals[i][1]); | } | static if (N == 1) 0000000| return slicedField(fields); | else | return cartesian(fields); |} | |// example from readme |version(mir_test) unittest |{ | import mir.ndslice; | // import std.stdio: writefln; | | enum fmt = "%(%(%.2f %)\n%)\n"; | | auto a = magic(5).as!float; | // writefln(fmt, a); | | auto b = linspace!float([5, 5], [1f, 2f], [0f, 1f]).map!"a * a + b"; | // writefln(fmt, b); | | auto c = slice!float(5, 5); | c[] = transposed(a + b / 2); |} | |/// 1D |@safe pure nothrow |version(mir_test) unittest |{ | auto s = linspace!double([5], [1.0, 2.0]); | assert(s == [1.0, 1.25, 1.5, 1.75, 2.0]); | | // reverse order | assert(linspace!double([5], [2.0, 1.0]) == s.retro); | | // remove endpoint | s.popBack; | assert(s == [1.0, 1.25, 1.5, 1.75]); |} | |/// 2D |@safe pure nothrow |version(mir_test) unittest |{ | import mir.functional: refTuple; | | auto s = linspace!double([5, 3], [1.0, 2.0], [0.0, 1.0]); | | assert(s == [ | [refTuple(1.00, 0.00), refTuple(1.00, 0.5), refTuple(1.00, 1.0)], | [refTuple(1.25, 0.00), refTuple(1.25, 0.5), refTuple(1.25, 1.0)], | [refTuple(1.50, 0.00), refTuple(1.50, 0.5), refTuple(1.50, 1.0)], | [refTuple(1.75, 0.00), refTuple(1.75, 0.5), refTuple(1.75, 1.0)], | [refTuple(2.00, 0.00), refTuple(2.00, 0.5), refTuple(2.00, 1.0)], | ]); | | assert(s.map!"a * b" == [ | [0.0, 0.500, 1.00], | [0.0, 0.625, 1.25], | [0.0, 0.750, 1.50], | [0.0, 0.875, 1.75], | [0.0, 1.000, 2.00], | ]); |} | |/// Complex numbers |@safe pure nothrow |version(mir_test) unittest |{ | auto s = linspace!cdouble([3], [1.0 + 0i, 2.0 + 4i]); | assert(s == [1.0 + 0i, 1.5 + 2i, 2.0 + 4i]); |} | |/++ |Returns a slice with identical elements. |`RepeatSlice` stores only single value. |Params: | lengths = list of dimension lengths |Returns: | `n`-dimensional slice composed of identical values, where `n` is dimension count. |+/ |Slice!(FieldIterator!(RepeatField!T), M, Universal) | repeat(T, size_t M)(T value, size_t[M] lengths...) @trusted | if (M && !isSlice!T) |{ | size_t[M] ls = lengths; | return typeof(return)( | ls, | sizediff_t[M].init, | typeof(return).Iterator(0, RepeatField!T(cast(RepeatField!T.UT) value))); |} | |/// ditto |Slice!(SliceIterator!(Iterator, N, kind), M, Universal) | repeat | (SliceKind kind, size_t N, Iterator, size_t M) | (Slice!(Iterator, N, kind) slice, size_t[M] lengths...) | if (M) |{ | size_t[M] ls = lengths; | return typeof(return)( | ls, | sizediff_t[M].init, | typeof(return).Iterator( | slice._structure, | slice._iterator)); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | auto sl = iota(3).repeat(4); | assert(sl == [[0, 1, 2], | [0, 1, 2], | [0, 1, 2], | [0, 1, 2]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.dynamic : transposed; | | auto sl = iota(3) | .repeat(4) | .unpack | .universal | .transposed; | | assert(sl == [[0, 0, 0, 0], | [1, 1, 1, 1], | [2, 2, 2, 2]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | | auto sl = iota([3], 6).slice; | auto slC = sl.repeat(2, 3); | sl[1] = 4; | assert(slC == [[[6, 4, 8], | [6, 4, 8], | [6, 4, 8]], | [[6, 4, 8], | [6, 4, 8], | [6, 4, 8]]]); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto sl = repeat(4.0, 2, 3); | assert(sl == [[4.0, 4.0, 4.0], | [4.0, 4.0, 4.0]]); | | static assert(is(DeepElementType!(typeof(sl)) == double)); | | sl[1, 1] = 3; | assert(sl == [[3.0, 3.0, 3.0], | [3.0, 3.0, 3.0]]); |} | |/++ |Cycle repeates 1-dimensional field/range/array/slice in a fixed length 1-dimensional slice. |+/ |auto cycle(Field)(Field field, size_t loopLength, size_t length) | if (!isSlice!Field && !is(Field : T[], T)) |{ | return CycleField!Field(loopLength, field).slicedField(length); |} | |/// ditto |auto cycle(size_t loopLength, Field)(Field field, size_t length) | if (!isSlice!Field && !is(Field : T[], T)) |{ | static assert(loopLength); | return CycleField!(Field, loopLength)(field).slicedField(length); |} | |/// ditto |auto cycle(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length) |{ | assert(slice.length); | static if (kind == Universal) | return slice.hideStride.cycle(length); | else | return CycleField!Iterator(slice._lengths[0], slice._iterator).slicedField(length); |} | |/// ditto |auto cycle(size_t loopLength, Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice, size_t length) |{ | static assert(loopLength); | assert(loopLength <= slice.length); | static if (kind == Universal) | return slice.hideStride.cycle!loopLength(length); | else | return CycleField!(Iterator, loopLength)(slice._iterator).slicedField(length); |} | |/// ditto |auto cycle(T)(T[] array, size_t length) |{ | return cycle(array.sliced, length); |} | |/// ditto |auto cycle(size_t loopLength, T)(T[] array, size_t length) |{ | return cycle!loopLength(array.sliced, length); |} | | |/// ditto |auto cycle(size_t loopLength, T)(T withAsSlice, size_t length) | if (hasAsSlice!T) |{ | return cycle!loopLength(withAsSlice.asSlice, length); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto slice = iota(3); | assert(slice.cycle(7) == [0, 1, 2, 0, 1, 2, 0]); | assert(slice.cycle!2(7) == [0, 1, 0, 1, 0, 1, 0]); | assert([0, 1, 2].cycle(7) == [0, 1, 2, 0, 1, 2, 0]); | assert([4, 3, 2, 1].cycle!4(7) == [4, 3, 2, 1, 4, 3, 2]); |} | |/++ |Strides 1-dimensional slice. |Params: | slice = 1-dimensional unpacked slice. | factor = positive stride size. |Returns: | Contiguous slice with strided iterator. |See_also: $(SUBREF dynamic, strided) |+/ |auto stride | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice, ptrdiff_t factor) | if (N == 1) |in |{ | assert (factor > 0, "factor must be positive."); |} |do |{ | static if (kind == Contiguous) | return slice.universal.stride(factor); | else | { | import mir.ndslice.dynamic: strided; | return slice.strided!0(factor).hideStride; | } |} | |/// ditto |auto stride(T)(T[] array, ptrdiff_t factor) |{ | return stride(array.sliced, factor); |} | |/// ditto |auto stride(T)(T withAsSlice, ptrdiff_t factor) | if (hasAsSlice!T) |{ | return stride(withAsSlice.asSlice, factor); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(6); | static immutable str = [0, 2, 4]; | assert(slice.stride(2) == str); | assert(slice.universal.stride(2) == str); |} | |/++ |Reverses order of iteration for all dimensions. |Params: | slice = Unpacked slice. |Returns: | Slice with reversed order of iteration for all dimensions. |See_also: $(SUBREF dynamic, reversed), $(SUBREF dynamic, allReversed). |+/ |auto retro | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | @trusted |{ | static if (kind == Contiguous || kind == Canonical) | { | size_t[slice.N] lengths; | foreach (i; Iota!(slice.N)) | lengths[i] = slice._lengths[i]; | static if (slice.S) | { | sizediff_t[slice.S] strides; | foreach (i; Iota!(slice.S)) | strides[i] = slice._strides[i]; | alias structure = AliasSeq!(lengths, strides); | } | else | { | alias structure = lengths; | } | static if (is(Iterator : RetroIterator!It, It)) | { | alias Ret = Slice!(It, N, kind); | return Ret(structure, slice._iterator._iterator - slice.lastIndex); | } | else | { | alias Ret = Slice!(RetroIterator!Iterator, N, kind); | return Ret(structure, RetroIterator!Iterator(slice._iterator + slice.lastIndex)); | } | } | else | { | import mir.ndslice.dynamic: allReversed; | return slice.allReversed; | } |} | |/// ditto |auto retro(T)(T[] array) |{ | return retro(array.sliced); |} | |/// ditto |auto retro(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return retro(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc version(mir_test) unittest |{ | auto slice = iota(2, 3); | static immutable reversed = [[5, 4, 3], [2, 1, 0]]; | assert(slice.retro == reversed); | assert(slice.canonical.retro == reversed); | assert(slice.universal.retro == reversed); | | static assert(is(typeof(slice.retro.retro) == typeof(slice))); | static assert(is(typeof(slice.canonical.retro.retro) == typeof(slice.canonical))); | static assert(is(typeof(slice.universal.retro) == typeof(slice.universal))); |} | |/++ |Bitwise slice over an integral slice. |Params: | slice = a contiguous or canonical slice on top of integral iterator. |Returns: A bitwise slice. |+/ |auto bitwise | (Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init])) | (Slice!(Iterator, N, kind) slice) | if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical)) |{ | static if (is(Iterator : FieldIterator!Field, Field)) | { | enum simplified = true; | alias It = FieldIterator!(BitField!Field); | } | else | { | enum simplified = false; | alias It = FieldIterator!(BitField!Iterator); | } | alias Ret = Slice!(It, N, kind); | auto structure_ = Ret._Structure.init; | foreach(i; Iota!(Ret.N)) | structure_[0][i] = slice._lengths[i]; | structure_[0][$ - 1] *= I.sizeof * 8; | foreach(i; Iota!(Ret.S)) | structure_[1][i] = slice._strides[i]; | static if (simplified) | return Ret(structure_, It(slice._iterator._index * I.sizeof * 8, BitField!Field(slice._iterator._field))); | else | return Ret(structure_, It(0, BitField!Iterator(slice._iterator))); |} | |/// ditto |auto bitwise(T)(T[] array) |{ | return bitwise(array.sliced); |} | |/// ditto |auto bitwise(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bitwise(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | auto bits = data[].bitwise; | assert(bits.length == data.length * size_t.sizeof * 8); | bits[111] = true; | assert(bits[111]); | | bits.popFront; | assert(bits[110]); | bits[] = true; | bits[110] = false; | bits = bits[10 .. $]; | assert(bits[100] == false); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | auto slice = FieldIterator!(size_t[])(0, data[]).sliced(10); | slice.popFrontExactly(2); | auto bits_normal = data[].sliced.bitwise; | auto bits = slice.bitwise; | assert(bits.length == (data.length - 2) * size_t.sizeof * 8); | bits[111] = true; | assert(bits[111]); | assert(bits_normal[111 + size_t.sizeof * 2 * 8]); | | bits.popFront; | assert(bits[110]); | bits[] = true; | bits[110] = false; | bits = bits[10 .. $]; | assert(bits[100] == false); |} | |/++ |Bitwise field over an integral field. |Params: | field = an integral field. |Returns: A bitwise field. |+/ |auto bitwiseField(Field, I = typeof(Field.init[size_t.init]))(Field field) | if (__traits(isUnsigned, I)) |{ | return BitField!(Field, I)(field); |} | |/++ |Bitpack slice over an integral slice. | |Bitpack is used to represent unsigned integer slice with fewer number of bits in integer binary representation. | |Params: | pack = counts of bits in the integer. | slice = a contiguous or canonical slice on top of integral iterator. |Returns: A bitpack slice. |+/ |auto bitpack | (size_t pack, Iterator, size_t N, SliceKind kind, I = typeof(Iterator.init[size_t.init])) | (Slice!(Iterator, N, kind) slice) | if (__traits(isIntegral, I) && (kind == Contiguous || kind == Canonical) && pack > 1) |{ | static if (is(Iterator : FieldIterator!Field, Field) && I.sizeof * 8 % pack == 0) | { | enum simplified = true; | alias It = FieldIterator!(BitpackField!(Field, pack)); | } | else | { | enum simplified = false; | alias It = FieldIterator!(BitpackField!(Iterator, pack)); | } | alias Ret = Slice!(It, N, kind); | auto structure = Ret._Structure.init; | foreach(i; Iota!(Ret.N)) | structure[0][i] = slice._lengths[i]; | structure[0][$ - 1] *= I.sizeof * 8; | structure[0][$ - 1] /= pack; | foreach(i; Iota!(Ret.S)) | structure[1][i] = slice._strides[i]; | static if (simplified) | return Ret(structure, It(slice._iterator._index * I.sizeof * 8 / pack, BitpackField!(Field, pack)(slice._iterator._field))); | else | return Ret(structure, It(0, BitpackField!(Iterator, pack)(slice._iterator))); |} | |/// ditto |auto bitpack(size_t pack, T)(T[] array) |{ | return bitpack!pack(array.sliced); |} | |/// ditto |auto bitpack(size_t pack, T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bitpack!pack(withAsSlice.asSlice); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | size_t[10] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto packs = data[].bitpack!6; | assert(packs.length == data.length * size_t.sizeof * 8 / 6); | packs[$ - 1] = 24; | assert(packs[$ - 1] == 24); | | packs.popFront; | assert(packs[$ - 1] == 24); |} | |/++ |Bytegroup slice over an integral slice. | |Groups existing slice into fixed length chunks and uses them as data store for destination type. | |Correctly handles scalar types on both little-endian and big-endian platforms. | |Params: | group = count of iterator items used to store the destination type. | DestinationType = deep element type of the result slice. | slice = a contiguous or canonical slice. |Returns: A bytegroup slice. |+/ |Slice!(BytegroupIterator!(Iterator, group, DestinationType), N, kind) |bytegroup | (size_t group, DestinationType, Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if ((kind == Contiguous || kind == Canonical) && group) |{ | auto structure = slice._structure; | structure[0][$ - 1] /= group; | return typeof(return)(structure, BytegroupIterator!(Iterator, group, DestinationType)(slice._iterator)); |} | | |/// ditto |auto bytegroup(size_t pack, DestinationType, T)(T[] array) |{ | return bytegroup!(pack, DestinationType)(array.sliced); |} | |/// ditto |auto bytegroup(size_t pack, DestinationType, T)(T withAsSlice) | if (hasAsSlice!T) |{ | return bytegroup!(pack, DestinationType)(withAsSlice.asSlice); |} | |/// 24 bit integers |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.slice : DeepElementType, sliced; | | ubyte[20] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto int24ar = data[].bytegroup!(3, int); // 24 bit integers | assert(int24ar.length == data.length / 3); | | enum checkInt = ((1 << 20) - 1); | | int24ar[3] = checkInt; | assert(int24ar[3] == checkInt); | | int24ar.popFront; | assert(int24ar[2] == checkInt); | | static assert(is(DeepElementType!(typeof(int24ar)) == int)); |} | |/// 48 bit integers |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.slice : DeepElementType, sliced; | ushort[20] data; | // creates a packed unsigned integer slice with max allowed value equal to `2^^6 - 1 == 63`. | auto int48ar = data[].sliced.bytegroup!(3, long); // 48 bit integers | assert(int48ar.length == data.length / 3); | | enum checkInt = ((1L << 44) - 1); | | int48ar[3] = checkInt; | assert(int48ar[3] == checkInt); | | int48ar.popFront; | assert(int48ar[2] == checkInt); | | static assert(is(DeepElementType!(typeof(int48ar)) == long)); |} | |/++ |Implements the homonym function (also known as `transform`) present |in many languages of functional flavor. The call `map!(fun)(slice)` |returns a slice of which elements are obtained by applying `fun` |for all elements in `slice`. The original slices are |not changed. Evaluation is done lazily. | |Note: | $(SUBREF dynamic, transposed) and | $(SUBREF topology, pack) can be used to specify dimensions. |Params: | fun = One or more functions. |See_Also: | $(LREF cached), $(LREF vmap), $(LREF indexed), | $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip), | $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function)) |+/ |template map(fun...) | if (fun.length) |{ | import mir.functional: adjoin, naryFun, pipe; | static if (fun.length == 1) | { | static if (__traits(isSame, naryFun!(fun[0]), fun[0]) && !__traits(isSame, naryFun!"a", fun[0])) | { | alias f = fun[0]; | @optmath: | /++ | Params: | slice = An input slice. | Returns: | a slice with each fun applied to all the elements. If there is more than one | fun, the element type will be `Tuple` containing one element for each fun. | +/ | auto map(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | alias Iterator = typeof(_mapIterator!f(slice._iterator)); | import mir.ndslice.traits: isIterator; | static assert(isIterator!Iterator, "mir.ndslice.map: probably the lambda function contains a compile time bug."); | return Slice!(Iterator, N, kind)(slice._structure, _mapIterator!f(slice._iterator)); | } | | /// ditto | auto map(T)(T[] array) | { | return map(array.sliced); | } | | /// ditto | auto map(T)(T withAsSlice) | if (hasAsSlice!T) | { | return map(withAsSlice.asSlice); | } | } | else | static if (__traits(isSame, naryFun!"a", fun[0])) | { | /// | @optmath auto map(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | return slice; | } | | /// ditto | auto map(T)(T[] array) | { | return array.sliced; | } | | /// ditto | auto map(T)(T withAsSlice) | if (hasAsSlice!T) | { | return withAsSlice.asSlice; | } | } | else alias map = .map!(naryFun!fun); | } | else alias map = .map!(adjoin!fun); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto s = iota(2, 3).map!(a => a * 3); | assert(s == [[ 0, 3, 6], | [ 9, 12, 15]]); |} | |/// String lambdas |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | assert(iota(2, 3).map!"a * 2" == [[0, 2, 4], [6, 8, 10]]); |} | |/// Packed tensors. |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, windows; | import mir.math.sum: sum; | | // iota windows map sums ( reduce!"a + b" ) | // -------------- | // ------- | --- --- | ------ | // | 0 1 2 | => || 0 1 || 1 2 || => | 8 12 | | // | 3 4 5 | || 3 4 || 4 5 || ------ | // ------- | --- --- | | // -------------- | auto s = iota(2, 3) | .windows(2, 2) | .map!sum; | | assert(s == [[8, 12]]); |} | |/// Zipped tensors |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | auto z = zip(sl1, sl2); | | assert(zip(sl1, sl2).map!"a + b" == sl1 + sl2); |} | |/++ |Multiple functions can be passed to `map`. |In that case, the element type of `map` is a refTuple containing |one element for each function. |+/ |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto sl = iota(2, 3); | auto s = sl.map!("a + a", "a * a"); | | auto sums = [[0, 2, 4], [6, 8, 10]]; | auto products = [[0, 1, 4], [9, 16, 25]]; | | assert(s.map!"a[0]" == sl + sl); | assert(s.map!"a[1]" == sl * sl); |} | |/++ |`map` can be aliased to a symbol and be used separately: |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | alias halfs = map!"double(a) / 2"; | assert(halfs(iota(2, 3)) == [[0.0, 0.5, 1], [1.5, 2, 2.5]]); |} | |/++ |Type normalization |+/ |version(mir_test) unittest |{ | import mir.functional : pipe; | import mir.ndslice.topology : iota; | auto a = iota(2, 3).map!"a + 10".map!(pipe!("a * 2", "a + 1")); | auto b = iota(2, 3).map!(pipe!("a + 10", "a * 2", "a + 1")); | assert(a == b); | static assert(is(typeof(a) == typeof(b))); |} | |/// |pure version(mir_test) unittest |{ | import std.algorithm.iteration : sum, reduce; | import mir.utility : max; | import mir.ndslice.dynamic : transposed; | /// Returns maximal column average. | auto maxAvg(S)(S matrix) { | return reduce!max(matrix.universal.transposed.pack!1.map!sum) | / double(matrix.length); | } | // 1 2 | // 3 4 | auto matrix = iota([2, 2], 1); | assert(maxAvg(matrix) == 3); |} | | |/++ |Implements the homonym function (also known as `transform`) present |in many languages of functional flavor. The call `slice.vmap(fun)` |returns a slice of which elements are obtained by applying `fun` |for all elements in `slice`. The original slices are |not changed. Evaluation is done lazily. | |Note: | $(SUBREF dynamic, transposed) and | $(SUBREF topology, pack) can be used to specify dimensions. |Params: | slice = ndslice | callable = callable object, structure, delegate, or function pointer. |See_Also: | $(LREF cached), $(LREF map), $(LREF indexed), | $(LREF pairwise), $(LREF subSlices), $(LREF slide), $(LREF zip), | $(HTTP en.wikipedia.org/wiki/Map_(higher-order_function), Map (higher-order function)) |+/ |@optmath auto vmap(Iterator, size_t N, SliceKind kind, Callable) | ( | Slice!(Iterator, N, kind) slice, | Callable callable, | ) |{ | alias It = VmapIterator!(Iterator, Callable); | return Slice!(It, N, kind)(slice._structure, It(slice._iterator, callable)); |} | |/// ditto |auto vmap(T, Callable)(T[] array, Callable callable) |{ | return vmap(array.sliced, callable); |} | |/// ditto |auto vmap(T, Callable)(T withAsSlice, Callable callable) | if (hasAsSlice!T) |{ | return vmap(withAsSlice.asSlice, callable); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | static struct Mul { | double factor; this(double f) { factor = f; } | auto opCall(long x) const {return x * factor; } | auto lightConst()() const @property { return Mul(factor); } | } | | auto callable = Mul(3); | auto s = iota(2, 3).vmap(callable); | | assert(s == [[ 0, 3, 6], | [ 9, 12, 15]]); |} | |/// Packed tensors. |@safe pure nothrow |version(mir_test) unittest |{ | import mir.math.sum: sum; | import mir.ndslice.topology : iota, windows; | | // iota windows vmap scaled sums | // -------------- | // ------- | --- --- | ----- | // | 0 1 2 | => || 0 1 || 1 2 || => | 4 6 | | // | 3 4 5 | || 3 4 || 4 5 || ----- | // ------- | --- --- | | // -------------- | | struct Callable | { | double factor; | this(double f) {factor = f;} | auto opCall(S)(S x) { return x.sum * factor; } | | auto lightConst()() const @property { return Callable(factor); } | auto lightImmutable()() immutable @property { return Callable(factor); } | } | | auto callable = Callable(0.5); | | auto s = iota(2, 3) | .windows(2, 2) | .vmap(callable); | | assert(s == [[4, 6]]); |} | |/// Zipped tensors |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | struct Callable | { | double factor; | this(double f) {factor = f;} | auto opCall(S, T)(S x, T y) { return x + y * factor; } | | auto lightConst()() const { return Callable(factor); } | auto lightImmutable()() immutable { return Callable(factor); } | } | | auto callable = Callable(10); | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | auto z = zip(sl1, sl2); | | assert(zip(sl1, sl2).vmap(callable) == | [[10, 21, 32], | [43, 54, 65]]); |} | |// TODO |/+ |Multiple functions can be passed to `vmap`. |In that case, the element type of `vmap` is a refTuple containing |one element for each function. |+/ |@safe pure nothrow |version(none) version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto s = iota(2, 3).vmap!("a + a", "a * a"); | | auto sums = [[0, 2, 4], [6, 8, 10]]; | auto products = [[0, 1, 4], [9, 16, 25]]; | | foreach (i; 0..s.length!0) | foreach (j; 0..s.length!1) | { | auto values = s[i, j]; | assert(values.a == sums[i][j]); | assert(values.b == products[i][j]); | } |} | |private auto hideStride | (Iterator, SliceKind kind) | (Slice!(Iterator, 1, kind) slice) |{ | static if (kind == Universal) | return Slice!(StrideIterator!Iterator)( | slice._lengths, | StrideIterator!Iterator(slice._strides[0], slice._iterator)); | else | return slice; |} | |private auto unhideStride | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) |{ | static if (is(Iterator : StrideIterator!It, It)) | { | static if (kind == Universal) | { | alias Ret = SliceKind!(It, N, Universal); | auto strides = slice._strides; | foreach(i; Iota!(Ret.S)) | strides[i] = slice._strides[i] * slice._iterator._stride; | return Slice!(It, N, Universal)(slice._lengths, strides, slice._iterator._iterator); | } | else | return slice.universal.unhideStride; | } | else | return slice; |} | |/++ |Creates a random access cache for lazyly computed elements. |Params: | original = original ndslice | caches = cached values | flags = array composed of flags that indicates if values are already computed |Returns: | ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice. |See_also: $(LREF cachedGC), $(LREF map), $(LREF vmap), $(LREF indexed) |+/ |Slice!(CachedIterator!(Iterator, CacheIterator, FlagIterator), N, kind) | cached(Iterator, SliceKind kind, size_t N, CacheIterator, FlagIterator)( | Slice!(Iterator, N, kind) original, | Slice!(CacheIterator, N, kind) caches, | Slice!(FlagIterator, N, kind) flags, | ) |{ | assert(original.shape == caches.shape, "caches.shape should be equal to original.shape"); | assert(original.shape == flags.shape, "flags.shape should be equal to original.shape"); | return typeof(return)( | original._structure, | IteratorOf!(typeof(return))( | original._iterator, | caches._iterator, | flags._iterator, | )); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology: cached, iota, map; | import mir.ndslice.allocation: bitSlice, uninitSlice; | | int[] funCalls; | | auto v = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }); | auto flags = v.length.bitSlice; | auto cache = v.length.uninitSlice!int; | // cached lazy slice: 1 2 4 8 16 | auto sl = v.cached(cache, flags); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); | funCalls = []; | | // set values directly | sl[1 .. 3] = 5; | assert(sl[1] == 5); | assert(sl[2] == 5); | // no function calls | assert(funCalls == []); |} | |/// Cache of immutable elements |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice: DeepElementType; | import mir.ndslice.topology: cached, iota, map, as; | import mir.ndslice.allocation: bitSlice, uninitSlice; | | int[] funCalls; | | auto v = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .as!(immutable int); | auto flags = v.length.bitSlice; | auto cache = v.length.uninitSlice!(immutable int); | | // cached lazy slice: 1 2 4 8 16 | auto sl = v.cached(cache, flags); | | static assert(is(DeepElementType!(typeof(sl)) == immutable int)); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); |} | |/++ |Creates a random access cache for lazyly computed elements. |Params: | original = ND Contiguous or 1D Universal ndslice. |Returns: | ndslice, which is internally composed of three ndslices: `original`, allocated cache and allocated bit-ndslice. |See_also: $(LREF cached), $(LREF map), $(LREF vmap), $(LREF indexed) |+/ |Slice!(CachedIterator!(Iterator, typeof(Iterator.init[0])*, FieldIterator!(BitField!(size_t*))), N) | cachedGC(Iterator, size_t N)(Slice!(Iterator, N) original) @trusted |{ | import std.traits: hasElaborateAssign, Unqual; | import mir.ndslice.allocation: bitSlice, slice, uninitSlice; | alias C = typeof(Iterator.init[0]); | alias UC = Unqual!C; | static if (hasElaborateAssign!UC) | alias newSlice = slice; | else | alias newSlice = uninitSlice; | return typeof(return)( | original._structure, | IteratorOf!(typeof(return))( | original._iterator, | newSlice!C(original._lengths)._iterator, | original._lengths.bitSlice._iterator, | )); |} | |/// ditto |auto cachedGC(Iterator)(Slice!(Iterator, 1, Universal) from) |{ | return from.flattened.cachedGC; |} | |/// ditto |auto cachedGC(T)(T withAsSlice) | if (hasAsSlice!T) |{ | return cachedGC(withAsSlice.asSlice); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology: cachedGC, iota, map; | | int[] funCalls; | | // cached lazy slice: 1 2 4 8 16 | auto sl = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .cachedGC; | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); | funCalls = []; | | // set values directly | sl[1 .. 3] = 5; | assert(sl[1] == 5); | assert(sl[2] == 5); | // no function calls | assert(funCalls == []); |} | |/// Cache of immutable elements |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.slice: DeepElementType; | import mir.ndslice.topology: cachedGC, iota, map, as; | | int[] funCalls; | | // cached lazy slice: 1 2 4 8 16 | auto sl = 5.iota!int | .map!((i) { | funCalls ~= i; | return 2 ^^ i; | }) | .as!(immutable int) | .cachedGC; | | static assert(is(DeepElementType!(typeof(sl)) == immutable int)); | | assert(funCalls == []); | assert(sl[1] == 2); // remember result | assert(funCalls == [1]); | assert(sl[1] == 2); // reuse result | assert(funCalls == [1]); | | assert(sl[0] == 1); | assert(funCalls == [1, 0]); |} | |/++ |Convenience function that creates a lazy view, |where each element of the original slice is converted to the type `T`. |It uses $(LREF map) and $(REF_ALTTEXT $(TT to), to, mir,conv)$(NBSP) |composition under the hood. |Params: | slice = a slice to create a view on. |Returns: | A lazy slice with elements converted to the type `T`. |See_also: $(LREF map), $(LREF vmap) |+/ |template as(T) |{ | /// | @optmath auto as(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | static if (is(slice.DeepElement == T)) | return slice; | else | static if (is(Iterator : T*)) | return slice.toConst; | else | { | import mir.conv: to; | return map!(to!T)(slice); | } | } | | /// ditto | auto as(S)(S[] array) | { | return as(array.sliced); | } | | /// ditto | auto as(S)(S withAsSlice) | if (hasAsSlice!S) | { | return as(withAsSlice.asSlice); | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : diagonal, as; | | auto matrix = slice!double([2, 2], 0); | auto stringMatrixView = matrix.as!int; | assert(stringMatrixView == | [[0, 0], | [0, 0]]); | | matrix.diagonal[] = 1; | assert(stringMatrixView == | [[1, 0], | [0, 1]]); | | /// allocate new slice composed of strings | Slice!(int*, 2) stringMatrix = stringMatrixView.slice; |} | |/// Special behavior for pointers to a constant data. |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.slice : Contiguous, Slice; | | Slice!(double*, 2) matrix = slice!double([2, 2], 0); | Slice!(const(double)*, 2) const_matrix = matrix.as!(const double); |} | |/++ |Takes a field `source` and a slice `indexes`, and creates a view of source as if its elements were reordered according to indexes. |`indexes` may include only a subset of the elements of `source` and may also repeat elements. | |Params: | source = a filed, source of data. `source` must be an array or a pointer, or have `opIndex` primitive. Full random access range API is not required. | indexes = a slice, source of indexes. |Returns: | n-dimensional slice with the same kind, shape and strides. | |See_also: `indexed` is similar to $(LREF, vmap), but a field (`[]`) is used instead of a function (`()`), and order of arguments is reversed. |+/ |Slice!(IndexIterator!(Iterator, Field), N, kind) | indexed(Field, Iterator, size_t N, SliceKind kind) | (Field source, Slice!(Iterator, N, kind) indexes) |{ | return typeof(return)( | indexes._structure, | IndexIterator!(Iterator, Field)( | indexes._iterator, | source)); |} | |/// ditto |auto indexed(Field, S)(Field source, S[] indexes) |{ | return indexed(source, indexes.sliced); |} | |/// ditto |auto indexed(Field, S)(Field source, S indexes) | if (hasAsSlice!S) |{ | return indexed(source, indexes.asSlice); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto source = [1, 2, 3, 4, 5]; | auto indexes = [4, 3, 1, 2, 0, 4]; | auto ind = source.indexed(indexes); | assert(ind == [5, 4, 2, 3, 1, 5]); | | assert(ind.retro == source.indexed(indexes.retro)); | | ind[3] += 10; // for index 2 | // 0 1 2 3 4 | assert(source == [1, 2, 13, 4, 5]); |} | |/++ |Maps indexes pairs to subslices. |Params: | sliceable = pointer, array, ndslice, series, or something sliceable with `[a .. b]`. | slices = ndslice composed of indexes pairs. |Returns: | ndslice composed of subslices. |See_also: $(LREF chopped), $(LREF pairwise). |+/ |Slice!(SubSliceIterator!(Iterator, Sliceable), N, kind) | subSlices(Iterator, size_t N, SliceKind kind, Sliceable)( | Sliceable sliceable, | Slice!(Iterator, N, kind) slices, | ) |{ | return typeof(return)( | slices._structure, | SubSliceIterator!(Iterator, Sliceable)(slices._iterator, sliceable) | ); |} | |/// ditto |auto subSlices(S, Sliceable)(Sliceable sliceable, S[] slices) |{ | return subSlices(sliceable, slices.sliced); |} | |/// ditto |auto subSlices(S, Sliceable)(Sliceable sliceable, S slices) | if (hasAsSlice!S) |{ | return subSlices(sliceable, slices.asSlice); |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.functional: staticArray; | auto subs =[ | staticArray(2, 4), | staticArray(2, 10), | ]; | auto sliceable = 10.iota; | | auto r = sliceable.subSlices(subs); | assert(r == [ | iota([4 - 2], 2), | iota([10 - 2], 2), | ]); |} | |/++ |Maps indexes pairs to subslices. |Params: | bounds = ndslice composed of consequent (`a_i <= a_(i+1)`) pairwise index bounds. | sliceable = pointer, array, ndslice, series, or something sliceable with `[a_i .. a_(i+1)]`. |Returns: | ndslice composed of subslices. |See_also: $(LREF pairwise), $(LREF subSlices). |+/ |Slice!(ChopIterator!(Iterator, Sliceable)) chopped(Iterator, Sliceable)( | Sliceable sliceable, | Slice!Iterator bounds, | ) |in |{ | debug(mir) | foreach(b; bounds.pairwise!"a <= b") | assert(b); |} |do { | import core.lifetime: move; | sizediff_t length = bounds._lengths[0] <= 1 ? 0 : bounds._lengths[0] - 1; | static if (hasLength!Sliceable) | { | if (length && bounds[length - 1] > sliceable.length) | { | version (D_Exceptions) | throw choppedException; | else | assert(0, choppedExceptionMsg); | } | } | | return typeof(return)([size_t(length)], ChopIterator!(Iterator, Sliceable)(bounds._iterator.move, sliceable.move)); |} | |/// ditto |auto chopped(S, Sliceable)(Sliceable sliceable, S[] bounds) |{ | return chopped(sliceable, bounds.sliced); |} | |/// ditto |auto chopped(S, Sliceable)(Sliceable sliceable, S bounds) | if (hasAsSlice!S) |{ | return chopped(sliceable, bounds.asSlice); |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.functional: staticArray; | import mir.ndslice.slice : sliced; | auto pairwiseIndexes = [2, 4, 10].sliced; | auto sliceable = 10.iota; | | auto r = sliceable.chopped(pairwiseIndexes); | assert(r == [ | iota([4 - 2], 2), | iota([10 - 4], 4), | ]); |} | |/++ |Groups slices into a slice of refTuples. The slices must have identical strides or be 1-dimensional. |Params: | sameStrides = if `true` assumes that all slices has the same strides. | slices = list of slices |Returns: | n-dimensional slice of elements refTuple |See_also: $(SUBREF slice, Slice.strides). |+/ |auto zip | (bool sameStrides = false, Slices...)(Slices slices) | if (Slices.length > 1 && allSatisfy!(isConvertibleToSlice, Slices)) |{ | static if (allSatisfy!(isSlice, Slices)) | { | enum N = Slices[0].N; | foreach(i, S; Slices[1 .. $]) | { | static assert(S.N == N, "zip: all Slices must have the same dimension count"); | assert(slices[i + 1]._lengths == slices[0]._lengths, "zip: all slices must have the same lengths"); | static if (sameStrides) | assert(slices[i + 1].strides == slices[0].strides, "zip: all slices must have the same strides when unpacked"); | } | static if (!sameStrides && minElem(staticMap!(kindOf, Slices)) != Contiguous) | { | static assert(N == 1, "zip: cannot zip canonical and universal multidimensional slices if `sameStrides` is false"); | mixin(`return .zip(` ~ _iotaArgs!(Slices.length, "slices[", "].hideStride, ") ~`);`); | } | else | { | enum kind = maxElem(staticMap!(kindOf, Slices)); | alias Iterator = ZipIterator!(staticMap!(_IteratorOf, Slices)); | alias Ret = Slice!(Iterator, N, kind); | auto structure = Ret._Structure.init; | structure[0] = slices[0]._lengths; | foreach (i; Iota!(Ret.S)) | structure[1][i] = slices[0]._strides[i]; | return Ret(structure, mixin("Iterator(" ~ _iotaArgs!(Slices.length, "slices[", "]._iterator, ") ~ ")")); | } | } | else | { | return .zip(toSlices!slices); | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : flattened, iota; | | auto alpha = iota!int(4, 3); | auto beta = slice!int(4, 3).universal; | | auto m = zip!true(alpha, beta); | foreach (r; m) | foreach (e; r) | e.b = e.a; | assert(alpha == beta); | | beta[] = 0; | foreach (e; m.flattened) | e.b = cast(int)e.a; | assert(alpha == beta); |} | |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : flattened, iota; | | auto alpha = iota!int(4).universal; | auto beta = new int[4]; | | auto m = zip(alpha, beta); | foreach (e; m) | e.b = e.a; | assert(alpha == beta); |} | |/++ |Selects a slice from a zipped slice. |Params: | name = name of a slice to unzip. | slice = zipped slice |Returns: | unzipped slice |+/ |auto unzip | (char name, size_t N, SliceKind kind, Iterators...) | (Slice!(ZipIterator!Iterators, N, kind) slice) |{ | enum size_t i = name - 'a'; | static assert(i < Iterators.length, `unzip: constraint: size_t(name - 'a') < Iterators.length`); | return Slice!(Iterators[i], N, kind)(slice._structure, slice._iterator._iterators[i]).unhideStride; |} | |/// |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : iota; | | auto alpha = iota!int(4, 3); | auto beta = iota!int([4, 3], 1).slice; | | auto m = zip(alpha, beta); | | static assert(is(typeof(unzip!'a'(m)) == typeof(alpha))); | static assert(is(typeof(unzip!'b'(m)) == typeof(beta))); | | assert(m.unzip!'a' == alpha); | assert(m.unzip!'b' == beta); |} | |private enum TotalDim(NdFields...) = [staticMap!(DimensionCount, NdFields)].sum; | |/++ |Sliding map for vectors. |Works with packed slices. | |Suitable for simple convolution algorithms. | |Params: | params = windows length. | fun = map functions with `params` arity. |See_also: $(LREF pairwise), $(LREF diff). |+/ |template slide(size_t params, alias fun) | if (params <= 'z' - 'a' + 1) |{ | import mir.functional: naryFun; | | static if (params > 1 && __traits(isSame, naryFun!fun, fun)) | { | @optmath: | /++ | Params: | slice = An input slice with first dimension pack equals to one (e.g. 1-dimensional for not packed slices). | Returns: | 1d-slice composed of `fun(slice[i], ..., slice[i + params - 1])`. | +/ | auto slide(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | if (N == 1) | { | auto s = slice.map!"a".flattened; | if (cast(sizediff_t)s._lengths[0] < sizediff_t(params - 1)) | s._lengths[0] = 0; | else | s._lengths[0] -= params - 1; | | alias I = SlideIterator!(_IteratorOf!(typeof(s)), params, fun); | return Slice!(I)( | s._structure, | I(s._iterator)); | } | | /// ditto | auto slide(S)(S[] slice) | { | return slide(slice.sliced); | } | | /// ditto | auto slide(S)(S slice) | if (hasAsSlice!S) | { | return slide(slice.asSlice); | } | } | else | static if (params == 1) | alias slide = .map!(naryFun!fun); | else alias slide = .slide!(params, naryFun!fun); |} | |/// |version(mir_test) unittest |{ | auto data = 10.iota; | auto sw = data.slide!(3, "a + 2 * b + c"); | | import mir.utility: max; | assert(sw.length == max(0, cast(ptrdiff_t)data.length - 3 + 1)); | assert(sw == sw.length.iota.map!"(a + 1) * 4"); | assert(sw == [4, 8, 12, 16, 20, 24, 28, 32]); |} | |/++ |Pairwise map for vectors. |Works with packed slices. | |Params: | fun = function to accumulate | lag = an integer indicating which lag to use |Returns: lazy ndslice composed of `fun(a_n, a_n+1)` values. | |See_also: $(LREF slide), $(LREF subSlices). |+/ |alias pairwise(alias fun, size_t lag = 1) = slide!(lag + 1, fun); | |/// |version(mir_test) unittest |{ | assert([2, 4, 3, -1].sliced.pairwise!"a + b" == [6, 7, 2]); |} | |/++ |Differences between vector elements. |Works with packed slices. | |Params: | lag = an integer indicating which lag to use |Returns: lazy differences. | |See_also: $(LREF slide), $(LREF slide). |+/ |alias diff(size_t lag = 1) = pairwise!(('a' + lag) ~ " - a", lag); | |/// |version(mir_test) unittest |{ | assert([2, 4, 3, -1].sliced.diff == [2, -1, -4]); |} | |/// packed slices |version(mir_test) unittest |{ | // 0 1 2 3 | // 4 5 6 7 | // 8 9 10 11 | auto s = iota(3, 4); | import std.stdio; | assert(iota(3, 4).byDim!0.diff == [ | [4, 4, 4, 4], | [4, 4, 4, 4]]); | assert(iota(3, 4).byDim!1.diff == [ | [1, 1, 1], | [1, 1, 1], | [1, 1, 1]]); |} | | |/++ |Cartesian product. | |Constructs lazy cartesian product $(SUBREF slice, Slice) without memory allocation. | |Params: | fields = list of fields with lengths or ndFields with shapes |Returns: $(SUBREF ndfield, Cartesian)`!NdFields(fields).`$(SUBREF slice, slicedNdField)`;` |+/ |auto cartesian(NdFields...)(NdFields fields) | if (NdFields.length > 1 && allSatisfy!(templateOr!(hasShape, hasLength), NdFields)) |{ | return Cartesian!NdFields(fields).slicedNdField; |} | |/// 1D x 1D |version(mir_test) unittest |{ | auto a = [10, 20, 30]; | auto b = [ 1, 2, 3]; | | auto c = cartesian(a, b) | .map!"a + b"; | | assert(c == [ | [11, 12, 13], | [21, 22, 23], | [31, 32, 33]]); |} | |/// 1D x 2D |version(mir_test) unittest |{ | auto a = [10, 20, 30]; | auto b = iota([2, 3], 1); | | auto c = cartesian(a, b) | .map!"a + b"; | | assert(c.shape == [3, 2, 3]); | | assert(c == [ | [ | [11, 12, 13], | [14, 15, 16], | ], | [ | [21, 22, 23], | [24, 25, 26], | ], | [ | [31, 32, 33], | [34, 35, 36], | ]]); |} | |/// 1D x 1D x 1D |version(mir_test) unittest |{ | auto u = [100, 200]; | auto v = [10, 20, 30]; | auto w = [1, 2]; | | auto c = cartesian(u, v, w) | .map!"a + b + c"; | | assert(c.shape == [2, 3, 2]); | | assert(c == [ | [ | [111, 112], | [121, 122], | [131, 132], | ], | [ | [211, 212], | [221, 222], | [231, 232], | ]]); |} | | | |/++ |$(LINK2 https://en.wikipedia.org/wiki/Kronecker_product, Kronecker product). | |Constructs lazy kronecker product $(SUBREF slice, Slice) without memory allocation. |+/ |template kronecker(alias fun = product) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | | /++ | Params: | fields = list of either fields with lengths or ndFields with shapes. | All ndFields must have the same dimension count. | Returns: | $(SUBREF ndfield, Kronecker)`!(fun, NdFields)(fields).`$(SUBREF slice, slicedNdField) | +/ | @optmath auto kronecker(NdFields...)(NdFields fields) | if (allSatisfy!(hasShape, NdFields) || allSatisfy!(hasLength, NdFields)) | { | return Kronecker!(fun, NdFields)(fields).slicedNdField; | } | else | alias kronecker = .kronecker!(naryFun!fun); |} | |/// 2D |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | // eye | auto a = slice!double([4, 4], 0); | a.diagonal[] = 1; | | auto b = [ 1, -1, | -1, 1].sliced(2, 2); | | auto c = kronecker(a, b); | | assert(c == [ | [ 1, -1, 0, 0, 0, 0, 0, 0], | [-1, 1, 0, 0, 0, 0, 0, 0], | [ 0, 0, 1, -1, 0, 0, 0, 0], | [ 0, 0, -1, 1, 0, 0, 0, 0], | [ 0, 0, 0, 0, 1, -1, 0, 0], | [ 0, 0, 0, 0, -1, 1, 0, 0], | [ 0, 0, 0, 0, 0, 0, 1, -1], | [ 0, 0, 0, 0, 0, 0, -1, 1]]); |} | |/// 1D |version(mir_test) unittest |{ | auto a = iota([3], 1); | | auto b = [ 1, -1]; | | auto c = kronecker(a, b); | | assert(c == [1, -1, 2, -2, 3, -3]); |} | |/// 2D with 3 arguments |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.slice : sliced; | | auto a = [ 1, 2, | 3, 4].sliced(2, 2); | | auto b = [ 1, 0, | 0, 1].sliced(2, 2); | | auto c = [ 1, -1, | -1, 1].sliced(2, 2); | | auto d = kronecker(a, b, c); | | assert(d == [ | [ 1, -1, 0, 0, 2, -2, 0, 0], | [-1, 1, 0, 0, -2, 2, 0, 0], | [ 0, 0, 1, -1, 0, 0, 2, -2], | [ 0, 0, -1, 1, 0, 0, -2, 2], | [ 3, -3, 0, 0, 4, -4, 0, 0], | [-3, 3, 0, 0, -4, 4, 0, 0], | [ 0, 0, 3, -3, 0, 0, 4, -4], | [ 0, 0, -3, 3, 0, 0, -4, 4]]); |} | |/++ |$(HTTPS en.wikipedia.org/wiki/Magic_square, Magic square). |Params: | length = square matrix length. |Returns: | Lazy magic matrix. |+/ |auto magic(size_t length) |{ 0000000| assert(length > 0); | static if (is(size_t == ulong)) | assert(length <= uint.max); | else 0000000| assert(length <= ushort.max); | import mir.ndslice.field: MagicField; 0000000| return MagicField(length).slicedField(length, length); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.math.sum; | import mir.ndslice: slice, magic, byDim, map, as, repeat, diagonal, antidiagonal; | | bool isMagic(S)(S matrix) | { | auto n = matrix.length; | auto c = n * (n * n + 1) / 2; // magic number | return // check shape | matrix.length!0 > 0 && matrix.length!0 == matrix.length!1 | && // each row sum should equal magic number | matrix.byDim!0.map!sum == c.repeat(n) | && // each columns sum should equal magic number | matrix.byDim!1.map!sum == c.repeat(n) | && // diagonal sum should equal magic number | matrix.diagonal.sum == c | && // antidiagonal sum should equal magic number | matrix.antidiagonal.sum == c; | } | | assert(isMagic(magic(1))); | assert(!isMagic(magic(2))); // 2x2 magic square does not exist | foreach(n; 3 .. 24) | assert(isMagic(magic(n))); | assert(isMagic(magic(3).as!double.slice)); |} | |/++ |Chops 1D input slice into n chunks with ascending or descending lengths. | |`stairs` can be used to pack and unpack symmetric and triangular matrix storage. | |Note: `stairs` is defined for 1D (packet) input and 2D (general) input. | This part of documentation is for 1D input. | |Params: | type = $(UL | $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`.) | $(LI `"+"` for stairs with lengths `1, 2, ..., n`;) | ) | slice = input slice with length equal to `n * (n + 1) / 2` | n = stairs count |Returns: | 1D contiguous slice composed of 1D contiguous slices. | |See_also: $(LREF triplets) $(LREF ._stairs.2) |+/ |Slice!(StairsIterator!(Iterator, type)) stairs(string type, Iterator)(Slice!Iterator slice, size_t n) | if (type == "+" || type == "-") |{ | assert(slice.length == (n + 1) * n / 2, "stairs: slice length must be equal to n * (n + 1) / 2, where n is stairs count."); | static if (type == "+") | size_t length = 1; | else | size_t length = n; | return StairsIterator!(Iterator, type)(length, slice._iterator).sliced(n); |} | |/// ditto |Slice!(StairsIterator!(S*, type)) stairs(string type, S)(S[] slice, size_t n) | if (type == "+" || type == "-") |{ | return stairs!type(slice.sliced, n); |} | |/// ditto |auto stairs(string type, S)(S slice, size_t n) | if (hasAsSlice!S && (type == "+" || type == "-")) |{ | return stairs!type(slice.asSlice, n); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, stairs; | | auto pck = 15.iota; | auto inc = pck.stairs!"+"(5); | auto dec = pck.stairs!"-"(5); | | assert(inc == [ | [0], | [1, 2], | [3, 4, 5], | [6, 7, 8, 9], | [10, 11, 12, 13, 14]]); | assert(inc[1 .. $][2] == [6, 7, 8, 9]); | | assert(dec == [ | [0, 1, 2, 3, 4], | [5, 6, 7, 8], | [9, 10, 11], | [12, 13], | [14]]); | assert(dec[1 .. $][2] == [12, 13]); | | static assert(is(typeof(inc.front) == typeof(pck))); | static assert(is(typeof(dec.front) == typeof(pck))); |} | |/++ |Slice composed of rows of lower or upper triangular matrix. | |`stairs` can be used to pack and unpack symmetric and triangular matrix storage. | |Note: `stairs` is defined for 1D (packet) input and 2D (general) input. | This part of documentation is for 2D input. | |Params: | type = $(UL | $(LI `"+"` for stairs with lengths `1, 2, ..., n`, lower matrix;) | $(LI `"-"` for stairs with lengths `n, n-1, ..., 1`, upper matrix.) | ) | slice = input slice with length equal to `n * (n + 1) / 2` |Returns: | 1D slice composed of 1D contiguous slices. | |See_also: $(LREF _stairs) $(SUBREF dynamic, transposed), $(LREF universal) |+/ |auto stairs(string type, Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) slice) | if (type == "+" || type == "-") |{ | assert(slice.length!0 == slice.length!1, "stairs: input slice must be a square matrix."); | static if (type == "+") | { | return slice | .pack!1 | .map!"a" | .zip([slice.length].iota!size_t(1)) | .map!"a[0 .. b]"; | } | else | { | return slice | .pack!1 | .map!"a" | .zip([slice.length].iota!size_t) | .map!"a[b .. $]"; | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, as, stairs; | | auto gen = [3, 3].iota.as!double; | auto inc = gen.stairs!"+"; | auto dec = gen.stairs!"-"; | | assert(inc == [ | [0], | [3, 4], | [6, 7, 8]]); | | assert(dec == [ | [0, 1, 2], | [4, 5], | [8]]); | | static assert(is(typeof(inc.front) == typeof(gen.front))); | static assert(is(typeof(dec.front) == typeof(gen.front))); | | ///////////////////////////////////////// | // Pack lower and upper matrix parts | auto n = gen.length; | auto m = n * (n + 1) / 2; | // allocate memory | import mir.ndslice.allocation: uninitSlice; | auto lowerData = m.uninitSlice!double; | auto upperData = m.uninitSlice!double; | // construct packed stairs | auto lower = lowerData.stairs!"+"(n); | auto upper = upperData.stairs!"-"(n); | // copy data | import mir.algorithm.iteration: each; | each!"a[] = b"(lower, inc); | each!"a[] = b"(upper, dec); | | assert(&lower[0][0] is &lowerData[0]); | assert(&upper[0][0] is &upperData[0]); | | assert(lowerData == [0, 3, 4, 6, 7, 8]); | assert(upperData == [0, 1, 2, 4, 5, 8]); |} | |/++ |Returns a slice that can be iterated by dimension. Transposes dimensions on top and then packs them. | |Combines $(LREF transposed) and $(LREF ipack). | |Params: | Dimensions = dimensions to perform iteration on |Returns: | n-dimensional slice ipacked to allow iteration by dimension |See_also: | $(LREF slice), | $(LREF ipack), | $(LREF transposed). |+/ |template byDim(Dimensions...) | if (Dimensions.length > 0) |{ | import mir.ndslice.internal : isSize_t; | import std.meta : allSatisfy; | | static if (!allSatisfy!(isSize_t, Dimensions)) | { | import std.meta : staticMap; | import mir.ndslice.internal : toSize_t; | | alias byDim = .byDim!(staticMap!(toSize_t, Dimensions)); | } | else | { | import mir.ndslice.slice : Slice, SliceKind; | /++ | Params: | slice = input slice (may not be 1-dimensional slice) | Returns: | n-dimensional slice ipacked to allow iteration by dimension | +/ | @optmath auto byDim(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | import mir.ndslice.topology : ipack; | import mir.ndslice.internal : DimensionsCountCTError; | | mixin DimensionsCountCTError; | | static if (N == 1) | { | return slice; | } | else | { | import mir.ndslice.dynamic: transposed; | return slice | .transposed!Dimensions | .ipack!(Dimensions.length); | } | } | } |} | |/// 2-dimensional slice support |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4); | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |/// 3-dimensional slice support, N-dimensional also supported |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, universal, flattened, reshape; | import mir.ndslice.dynamic : strided, transposed; | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // - - - - - - - - | // | 20 21 22 23 24 | | // | 25 26 27 28 29 | | // | 30 31 32 33 34 | | // | 35 36 37 38 39 | | // - - - - - - - - | // | 40 41 42 43 44 | | // | 45 46 47 48 49 | | // | 50 51 52 53 54 | | // | 55 56 57 58 59 | | // ---------------- | auto slice = iota(3, 4, 5); | //-> | // | 4 5 | | //-> | // | 3 5 | | //-> | // | 3 4 | | //-> | // | 5 4 | | //-> | // | 3 | | //-> | // | 4 | | //-> | // | 5 | | size_t[2] shape45 = [4, 5]; | size_t[2] shape35 = [3, 5]; | size_t[2] shape34 = [3, 4]; | size_t[2] shape54 = [5, 4]; | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | size_t[1] shape5 = [5]; | | // ---------------- | // | 0 1 2 3 4 | | // | 5 6 7 8 9 | | // | 10 11 12 13 14 | | // | 15 16 17 18 19 | | // - - - - - - - - | // | 20 21 22 23 24 | | // | 25 26 27 28 29 | | // | 30 31 32 33 34 | | // | 35 36 37 38 39 | | // - - - - - - - - | // | 40 41 42 43 44 | | // | 45 46 47 48 49 | | // | 50 51 52 53 54 | | // | 55 56 57 58 59 | | // ---------------- | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape45); | assert(x.front == iota([4, 5])); | x.popFront; | assert(x.front == iota([4, 5], (4 * 5))); | | // ---------------- | // | 0 1 2 3 4 | | // | 20 21 22 23 24 | | // | 40 41 42 43 44 | | // - - - - - - - - | // | 5 6 7 8 9 | | // | 25 26 27 28 29 | | // | 45 46 47 48 49 | | // - - - - - - - - | // | 10 11 12 13 14 | | // | 30 31 32 33 34 | | // | 50 51 52 53 54 | | // - - - - - - - - | // | 15 16 17 18 19 | | // | 35 36 37 38 39 | | // | 55 56 57 58 59 | | // ---------------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape35); | int err; | assert(y.front == slice.universal.strided!1(4).reshape([3, -1], err)); | y.popFront; | assert(y.front.front == iota([5], 5)); | | // ------------- | // | 0 5 10 15 | | // | 20 25 30 35 | | // | 40 45 50 55 | | // - - - - - - - | // | 1 6 11 16 | | // | 21 26 31 36 | | // | 41 46 51 56 | | // - - - - - - - | // | 2 7 12 17 | | // | 22 27 32 37 | | // | 42 47 52 57 | | // - - - - - - - | // | 3 8 13 18 | | // | 23 28 33 38 | | // | 43 48 53 58 | | // - - - - - - - | // | 4 9 14 19 | | // | 24 29 34 39 | | // | 44 49 54 59 | | // ------------- | auto z = slice.byDim!2; | assert(z.shape == shape5); | assert(z.front.shape == shape34); | assert(z.front == iota([3, 4], 0, 5)); | z.popFront; | assert(z.front.front == iota([4], 1, 5)); | | // ---------- | // | 0 20 40 | | // | 5 25 45 | | // | 10 30 50 | | // | 15 35 55 | | // - - - - - | // | 1 21 41 | | // | 6 26 46 | | // | 11 31 51 | | // | 16 36 56 | | // - - - - - | // | 2 22 42 | | // | 7 27 47 | | // | 12 32 52 | | // | 17 37 57 | | // - - - - - | // | 3 23 43 | | // | 8 28 48 | | // | 13 33 53 | | // | 18 38 58 | | // - - - - - | // | 4 24 44 | | // | 9 29 49 | | // | 14 34 54 | | // | 19 39 59 | | // ---------- | auto a = slice.byDim!(2, 1); | assert(a.shape == shape54); | assert(a.front.shape == shape4); | assert(a.front.unpack == iota([3, 4], 0, 5).universal.transposed!1); | a.popFront; | assert(a.front.front == iota([3], 1, 20)); |} | |// Ensure works on canonical |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, canonical; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4).canonical; | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |// Ensure works on universal |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, universal; | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto slice = iota(3, 4).universal; | //-> | // | 3 | | //-> | // | 4 | | size_t[1] shape3 = [3]; | size_t[1] shape4 = [4]; | | // ------------ | // | 0 1 2 3 | | // | 4 5 6 7 | | // | 8 9 10 11 | | // ------------ | auto x = slice.byDim!0; | assert(x.shape == shape3); | assert(x.front.shape == shape4); | assert(x.front == iota(4)); | x.popFront; | assert(x.front == iota([4], 4)); | | // --------- | // | 0 4 8 | | // | 1 5 9 | | // | 2 6 10 | | // | 3 7 11 | | // --------- | auto y = slice.byDim!1; | assert(y.shape == shape4); | assert(y.front.shape == shape3); | assert(y.front == iota([3], 0, 4)); | y.popFront; | assert(y.front == iota([3], 1, 4)); |} | |// 1-dimensional slice support |@safe @nogc pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // ------- | // | 0 1 2 | | // ------- | auto slice = iota(3); | auto x = slice.byDim!0; | assert(x == slice); |} | |/++ |Field (element's member) projection. | |Params: | name = element's member name |Returns: | lazy n-dimensional slice of the same shape |See_also: | $(LREF map) |+/ | |template member(string name) | if (name.length) |{ | /++ | Params: | slice = n-dimensional slice composed of structs, classes or unions | Returns: | lazy n-dimensional slice of the same shape | +/ | Slice!(MemberIterator!(Iterator, name), N, kind) member(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | return typeof(return)(slice._structure, MemberIterator!(Iterator, name)(slice._iterator)); | } | | /// ditto | Slice!(MemberIterator!(T*, name)) member(T)(T[] array) | { | return member(array.sliced); | } | | /// ditto | auto member(T)(T withAsSlice) | if (hasAsSlice!T) | { | return member(withAsSlice.asSlice); | } |} | |/// |version(mir_test) |@safe pure unittest |{ | // struct, union or class | struct S | { | // Property support | // Getter always must be defined. | double _x; | double x() @property | { | return x; | } | void x(double x) @property | { | _x = x; | } | | /// Field support | double y; | | /// Zero argument function support | double f() | { | return _x * 2; | } | } | | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto matrix = slice!S(2, 3); | matrix.member!"x"[] = [2, 3].iota; | matrix.member!"y"[] = matrix.member!"f"; | assert(matrix.member!"y" == [2, 3].iota * 2); |} | |/++ |Functional deep-element wise reduce of a slice composed of fields or iterators. |+/ |template orthogonalReduceField(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | { | @optmath: | /++ | Params: | slice = Non empty input slice composed of fields or iterators. | Returns: | a lazy field with each element of which is reduced value of element of the same index of all iterators. | +/ | OrthogonalReduceField!(Iterator, fun, I) orthogonalReduceField(I, Iterator)(I initialValue, Slice!Iterator slice) | { | return typeof(return)(slice, initialValue); | } | | /// ditto | OrthogonalReduceField!(T*, fun, I) orthogonalReduceField(I, T)(I initialValue, T[] array) | { | return orthogonalReduceField(initialValue, array.sliced); | } | | /// ditto | auto orthogonalReduceField(I, T)(I initialValue, T withAsSlice) | if (hasAsSlice!T) | { | return orthogonalReduceField(initialValue, withAsSlice.asSlice); | } | } | else alias orthogonalReduceField = .orthogonalReduceField!(naryFun!fun); |} | |/// bit array operations |version(mir_test) |unittest |{ | import mir.ndslice.slice: slicedField; | import mir.ndslice.allocation: bitSlice; | import mir.ndslice.dynamic: strided; | import mir.ndslice.topology: iota, orthogonalReduceField; | auto len = 100; | auto a = len.bitSlice; | auto b = len.bitSlice; | auto c = len.bitSlice; | a[len.iota.strided!0(7)][] = true; | b[len.iota.strided!0(11)][] = true; | c[len.iota.strided!0(13)][] = true; | | // this is valid since bitslices above are oroginal slices of allocated memory. | auto and = | orthogonalReduceField!"a & b"(size_t.max, [ | a.iterator._field._field, // get raw data pointers | b.iterator._field._field, | c.iterator._field._field, | ]) // operation on size_t | .bitwiseField | .slicedField(len); | | assert(and == (a & b & c)); |} | |/++ |Constructs a lazy view of triplets with `left`, `center`, and `right` members. | |Returns: Slice of the same length composed of $(SUBREF iterator, Triplet) triplets. |The `center` member is type of a slice element. |The `left` and `right` members has the same type as slice. | |The module contains special function $(LREF collapse) to handle |left and right side of triplets in one expression. | |Params: | slice = a slice or an array to iterate over | |Example: |------ |triplets(eeeeee) => | |||c|lllll| ||r|c|llll| ||rr|c|lll| ||rrr|c|ll| ||rrrr|c|l| ||rrrrr|c|| |------ | |See_also: $(LREF stairs). |+/ |Slice!(TripletIterator!(Iterator, kind)) triplets(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) slice) |{ | return typeof(return)(slice.length, typeof(return).Iterator(0, slice)); |} | |/// ditto |Slice!(TripletIterator!(T*)) triplets(T)(scope return T[] slice) |{ | return .triplets(slice.sliced); |} | |/// ditto |auto triplets(string type, S)(S slice, size_t n) | if (hasAsSlice!S) |{ | return .triplets(slice.asSlice); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: triplets, member, iota; | | auto a = [4, 5, 2, 8]; | auto h = a.triplets; | | assert(h[1].center == 5); | assert(h[1].left == [4]); | assert(h[1].right == [2, 8]); | | h[1].center = 9; | assert(a[1] == 9); | | assert(h.member!"center" == a); | | // `triplets` topology can be used with iota to index a slice | auto s = a.sliced; | auto w = s.length.iota.triplets[1]; | | assert(&s[w.center] == &a[1]); | assert(s[w.left].field is a[0 .. 1]); | assert(s[w.right].field is a[2 .. $]); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/topology.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-rc-array.lst |/++ |$(H1 Thread-safe reference-counted arrays and iterators). |+/ |module mir.rc.array; | |import mir.primitives: hasLength; |import mir.qualifier; |import mir.rc.context; |import mir.type_info; |import std.traits; | |package static immutable allocationExcMsg = "mir_rcarray: out of memory error."; | |version (D_Exceptions) |{ | import core.exception: OutOfMemoryError; | package static immutable allocationError = new OutOfMemoryError(allocationExcMsg); |} | |/++ |Thread safe reference counting array. | |`__xdtor` if any is used to destruct objects. | |The implementation never adds roots into the GC. |+/ |struct mir_rcarray(T) |{ | /// | package T* _payload; | package ref inout(mir_rc_context) context() inout scope return pure nothrow @nogc @trusted @property | { | assert(_payload); | return (cast(inout(mir_rc_context)*)_payload)[-1]; | } | package void _reset() { _payload = null; } | | package alias ThisTemplate = .mir_rcarray; | package alias _thisPtr = _payload; | | /// | void proxySwap(ref typeof(this) rhs) pure nothrow @nogc @safe | { | auto t = this._payload; | this._payload = rhs._payload; | rhs._payload = t; | } | | /// | mixin CommonRCImpl; | | /// | ~this() nothrow | { | static if (hasDestructor!T) | { | if (false) // break @safe and pure attributes | { | Unqual!T* object; | (*object).__xdtor(); | } | } | if (this) | { | (() @trusted { mir_rc_decrease_counter(context); })(); | debug _reset; | } | } | | /// | this(this) scope @trusted pure nothrow @nogc | { | if (this) | { | mir_rc_increase_counter(context); | } | } | | /// | size_t length() @trusted scope pure nothrow @nogc const @property | { | return _payload !is null ? context.length : 0; | } | | /// | inout(T)* ptr() @system scope inout | { | return _payload; | } | | /// | ref opIndex(size_t i) @trusted scope inout | { | assert(_payload); | assert(i < context.length); | return _payload[i]; | } | | /// | inout(T)[] opIndex() @trusted scope inout | { | return _payload !is null ? _payload[0 .. context.length] : null; | } | | /// | size_t opDollar(size_t pos : 0)() @trusted scope pure nothrow @nogc const | { | return length; | } | | /// | auto asSlice() @property | { | import mir.ndslice.slice: mir_slice; | alias It = mir_rci!T; | return mir_slice!It([length], It(this)); | } | | /// | auto asSlice() const @property | { | import mir.ndslice.slice: mir_slice; | alias It = mir_rci!(const T); | return mir_slice!It([length], It(this.lightConst)); | } | | /// | auto asSlice() immutable @property | { | import mir.ndslice.slice: mir_slice; | alias It = mir_rci!(immutable T); | return mir_slice!It([length], It(this.lightImmutable)); | } | | /// | auto moveToSlice() @property | { | import core.lifetime: move; | import mir.ndslice.slice: mir_slice; | alias It = mir_rci!T; | return mir_slice!It([length], It(move(this))); | } | | /++ | Params: | length = array length | initialize = Flag, don't initialize memory with default value if `false`. | deallocate = Flag, never deallocates memory if `false`. | +/ | this(size_t length, bool initialize = true, bool deallocate = true) @trusted @nogc | { | if (length == 0) | return; | Unqual!T[] ar; | () @trusted { | static if (is(T == class) || is(T == interface)) | auto ctx = mir_rc_create(mir_get_type_info!T, length, mir_get_payload_ptr!T, initialize, deallocate); | else | auto ctx = mir_rc_create(mir_get_type_info!T, length, mir_get_payload_ptr!T, initialize, deallocate); | if (!ctx) | { | version(D_Exceptions) | throw allocationError; | else | assert(0, allocationExcMsg); | } | _payload = cast(T*)(ctx + 1); | ar = cast(Unqual!T[])_payload[0 .. length]; | } (); | if (initialize || hasElaborateAssign!(Unqual!T)) | { | import mir.conv: uninitializedFillDefault; | uninitializedFillDefault(ar); | } | } | | static if (isImplicitlyConvertible!(const T, T)) | static if (isImplicitlyConvertible!(const Unqual!T, T)) | package alias V = const Unqual!T; | else | package alias V = const T; | else | package alias V = T; | |} | |/// ditto |alias RCArray = mir_rcarray; | |/// |version(mir_test) |@safe pure @nogc nothrow |unittest |{ | auto a = RCArray!double(10); | foreach(i, ref e; a) | e = i; | auto b = a; | assert(b[$ - 1] == 9); | foreach(i, ref e; b) | assert(e == i); | b[4] = 100; | assert(a[4] == 100); | | import mir.ndslice.slice; | | auto s = a.asSlice; // as RC random access range (ndslice) | static assert(is(typeof(s) == Slice!(RCI!double))); | static assert(is(typeof(s) == mir_slice!(mir_rci!double))); | | auto r = a[]; // scope array | static assert(is(typeof(r) == double[])); | | auto fs = r.sliced; // scope fast random access range (ndslice) | static assert(is(typeof(fs) == Slice!(double*))); |} | |package template LikeArray(Range) |{ | static if (__traits(identifier, Range) == "mir_slice") | { | import mir.ndslice.slice; | enum LikeArray = is(Range : Slice!(T*, N, kind), T, size_t N, SliceKind kind); | } | else | { | enum LikeArray = false; | } |} | |/// |auto rcarray(T = void, Range)(ref Range range) | if (is(T == void) && !is(Range == LightScopeOf!Range)) |{ | return .rcarray(range.lightScope); |} | |/// ditto |auto rcarray(T = void, Range)(Range range) | if (is(T == void) && isIterable!Range && is(Range == LightScopeOf!Range) && !isArray!Range) |{ | static if (LikeArray!Range) | { | return .rcarray(range.field); | } | else | { | return .rcarray!(ForeachType!Range)(range); | } |} | |/// ditto |RCArray!V rcarray(T = void, V)(V[] values...) | if (is(T == void) && hasIndirections!V) |{ | return .rcarray(values, true); |} | |/// ditto |RCArray!V rcarray(T = void, V)(scope V[] values...) | if (is(T == void) && !hasIndirections!V) |{ | return .rcarray(values, true); |} | |/// ditto |RCArray!V rcarray(T = void, V)(V[] values, bool deallocate) | if (is(T == void) && hasIndirections!V) |{ | return .rcarray!V(values, deallocate); |} | |/// ditto |RCArray!V rcarray(T = void, V)(scope V[] values, bool deallocate) | if (is(T == void) && !hasIndirections!V) |{ | return .rcarray!V(values, deallocate); |} | |/// ditto |template rcarray(T) | if(!is(T == E[], E) && !is(T == void)) |{ | import std.range.primitives: isInputRange, isInfinite; | | /// | auto rcarray(Range)(ref Range range) | if (!is(Range == LightScopeOf!Range)) | { | return .rcarray!T(range.lightScope); | } | | /// ditto | auto rcarray(Range)(Range range) | if ((isInputRange!Range || isIterable!Range) && !isInfinite!Range && !isArray!Range || isPointer!Range && (isInputRange!(PointerTarget!Range) || isIterable!(PointerTarget!Range))) | { | static if (LikeArray!Range) | { | return .rcarray!T(range.field); | } | else static if (hasLength!Range) | { | import mir.conv: emplaceRef; | auto ret = RCArray!T(range.length, false); | size_t i; | static if (isInputRange!Range) | for (; !range.empty; range.popFront) | ret[i++].emplaceRef!T(range.front); | else | static if (isPointer!Range) | foreach (e; *range) | ret[i++].emplaceRef!T(e); | else | foreach (e; range) | ret[i++].emplaceRef!T(e); | return ret; | } | else | { | import mir.appender: ScopedBuffer; | import mir.conv: emplaceRef; | ScopedBuffer!T a; | static if (isInputRange!Range) | for (; !range.empty; range.popFront) | a.put(range.front); | else | static if (isPointer!Range) | foreach (e; *range) | a.put(e); | else | foreach (e; range) | a.put(e); | scope values = a.data; | auto ret = RCArray!T(values.length, false); | ()@trusted { | a.moveDataAndEmplaceTo(ret[]); | }(); | return ret; | } | } | | /// ditto | RCArray!T rcarray(V)(V[] values...) | if (hasIndirections!V) | { | return .rcarray!T(values, true); | } | | /// ditto | RCArray!T rcarray(V)(scope V[] values...) | if (!hasIndirections!V) | { | return .rcarray!T(values, true); | } | | /// ditto | RCArray!T rcarray(V)(V[] values, bool deallocate) | if (hasIndirections!V) | { | auto ret = mir_rcarray!T(values.length, false, deallocate); | static if (!hasElaborateAssign!(Unqual!T) && is(Unqual!V == Unqual!T)) | { | ()@trusted { | import core.stdc.string: memcpy; | memcpy(cast(void*)ret.ptr, cast(const void*)values.ptr, values.length * T.sizeof); | }(); | } | else | { | import mir.conv: emplaceRef; | auto lhs = ret[]; | foreach (i, ref e; values) | lhs[i].emplaceRef!T(e); | } | return ret; | } | | /// ditto | RCArray!T rcarray(V)(scope V[] values, bool deallocate) | if (!hasIndirections!V) | { | auto ret = mir_rcarray!T(values.length, false); | static if (!hasElaborateAssign!(Unqual!T) && is(Unqual!V == Unqual!T)) | { | ()@trusted { | import core.stdc.string: memcpy; | memcpy(cast(void*)ret.ptr, cast(const void*)values.ptr, values.length * T.sizeof); | }(); | } | else | { | import mir.conv: emplaceRef; | auto lhs = ret[]; | foreach (i, ref e; values) | lhs[i].emplaceRef!T(e); | } | return ret; | } |} | |/// |version(mir_test) |@safe pure @nogc nothrow |unittest |{ | RCArray!double a = rcarray!double(1.0, 2, 5, 3); | assert(a[0] == 1); | assert(a[$ - 1] == 3); | | auto s = rcarray!char("hello!"); | assert(s[0] == 'h'); | assert(s[$ - 1] == '!'); | | alias rcstring = rcarray!(immutable char); | auto r = rcstring("string"); | assert(r[0] == 's'); | assert(r[$ - 1] == 'g'); |} | |/// With Input Ranges |version(mir_test) |@safe pure @nogc nothrow |unittest |{ | import std.algorithm.iteration: filter; | static immutable numbers = [3, 2, 5, 2, 3, 7, 3]; | static immutable filtered = [5.0, 7]; | auto result = numbers.filter!(a => a > 3).rcarray!(immutable double); | static assert(is(typeof(result) == RCArray!(immutable double))); | assert (result[] == filtered); |} | |/++ |Params: | length = array length | deallocate = Flag, never deallocates memory if `false`. |Returns: minimally initialized rcarray. |+/ |RCArray!T mininitRcarray(T)(size_t length, bool deallocate = true) |{ | return RCArray!T(length, false, deallocate); |} | |/// |@safe pure nothrow @nogc unittest |{ | auto a = mininitRcarray!double(5); | assert(a.length == 5); | assert(a._counter == 1); | a[][] = 0; // a.opIndex()[] = 0; |} | |/++ |Thread safe reference counting iterator. |+/ |struct mir_rci(T) |{ | import mir.ndslice.slice: Slice; | import mir.ndslice.iterator: IotaIterator; | | /// | T* _iterator; | | /// | RCArray!T _array; | | /// | this(RCArray!T array) | { | this._iterator = (()@trusted => array.ptr)(); | this._array.proxySwap(array); | } | | /// | this(T* _iterator, RCArray!T array) | { | this._iterator = _iterator; | this._array.proxySwap(array); | } | | /// | inout(T)* lightScope()() scope return inout @property @trusted | { | debug | { | assert(_array._payload <= _iterator); | assert(_iterator is null || _iterator <= _array._payload + _array.length); | } | return _iterator; | } | | /// | ref opAssign(typeof(null)) scope return nothrow | { | pragma(inline, true); | _iterator = null; | _array = null; | return this; | } | | /// | ref opAssign(return typeof(this) rhs) scope return @trusted | { | _iterator = rhs._iterator; | _array.proxySwap(rhs._array); | return this; | } | | /// | ref opAssign(Q)(return mir_rci!Q rhs) scope return nothrow | if (isImplicitlyConvertible!(Q*, T*)) | { | static if (__VERSION__ >= 2085) import core.lifetime: move; else import std.algorithm.mutation: move; | _iterator = rhs._iterator; | _array = move(rhs._array); | return this; | } | | /// | mir_rci!(const T) lightConst()() scope return const nothrow @property | { return typeof(return)(_iterator, _array.lightConst); } | | /// | mir_rci!(immutable T) lightImmutable()() scope return immutable nothrow @property | { return typeof(return)(_iterator, _array.lightImmutable); } | | /// | ref inout(T) opUnary(string op : "*")() inout scope return | { | debug | { | assert(_iterator); | assert(_array._payload); | assert(_array._payload <= _iterator); | assert(_iterator <= _array._payload + _array.length); | } | return *_iterator; | } | | /// | ref inout(T) opIndex(ptrdiff_t index) inout scope return @trusted | { | debug | { | assert(_iterator); | assert(_array._payload); | assert(_array._payload <= _iterator + index); | assert(_iterator + index <= _array._payload + _array.length); | } | return _iterator[index]; | } | | /// Returns: slice type of `Slice!(IotaIterator!size_t)` | Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) @safe scope const | if (dimension == 0) | in | { | assert(i <= j, "RCI!T.opSlice!0: the left opSlice boundary must be less than or equal to the right bound."); | } | do | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /// Returns: ndslice on top of the refcounted iterator | auto opIndex(Slice!(IotaIterator!size_t) slice) | { | import core.lifetime: move; | auto it = this; | it += slice._iterator._index; | return Slice!(RCI!T)(slice.length, it.move); | } | | /// ditto | auto opIndex(Slice!(IotaIterator!size_t) slice) const | { | import core.lifetime: move; | auto it = lightConst; | it += slice._iterator._index; | return Slice!(RCI!(const T))(slice.length, it.move); | } | | /// | void opUnary(string op)() scope | if (op == "--" || op == "++") | { mixin(op ~ "_iterator;"); } | | /// | void opOpAssign(string op)(ptrdiff_t index) scope | if (op == "-" || op == "+") | { mixin("_iterator " ~ op ~ "= index;"); } | | /// | mir_rci!T opBinary(string op)(ptrdiff_t index) | if (op == "+" || op == "-") | { return mir_rci!T(_iterator + index, _array); } | | /// | mir_rci!(const T) opBinary(string op)(ptrdiff_t index) const | if (op == "+" || op == "-") | { return mir_rci!T(_iterator + index, _array); } | | /// | mir_rci!(immutable T) opBinary(string op)(ptrdiff_t index) immutable | if (op == "+" || op == "-") | { return mir_rci!T(_iterator + index, _array); } | | /// | ptrdiff_t opBinary(string op : "-")(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } | | /// | bool opEquals()(scope ref const typeof(this) right) scope const | { return this._iterator == right._iterator; } | | /// | ptrdiff_t opCmp()(scope ref const typeof(this) right) scope const | { return this._iterator - right._iterator; } |} | |/// ditto |alias RCI = mir_rci; | |/// |version(mir_test) |@safe @nogc unittest |{ | | import mir.ndslice.traits: isIterator; | import mir.ndslice.slice; | import mir.rc.array; | auto slice = mir_rcarray!double(10).asSlice; | static assert(isIterator!(RCI!double)); | static assert(is(typeof(slice) == Slice!(RCI!double))); | auto matrix = slice.sliced(2, 5); | static assert(is(typeof(matrix) == Slice!(RCI!double, 2))); | slice[7] = 44; | assert(matrix[1, 2] == 44); |} | |/// |version(mir_test) |@safe @nogc unittest |{ | import mir.ndslice.slice; | import mir.rc.array; | | alias rcvec = Slice!(RCI!double); | | RCI!double a, b; | a = b; | | RCI!(const double) ca, cb; | ca = cb; | ca = cast(const) cb; | | void foo(scope ref rcvec x, scope ref rcvec y) | { | x[] = y[]; | x[1] = y[1]; | x[1 .. $] += y[1 .. $]; | x = x.save; | } |} | |version(mir_test) |@safe @nogc unittest |{ | import mir.ndslice; | import mir.rc.array; | import mir.series; | | @safe void bar(ref const mir_rcarray!(const double) a, ref mir_rcarray!(const double) b) | { | b = a; | } | | @safe void bari(ref immutable mir_rcarray!(immutable double) a, ref mir_rcarray!(immutable double) b) | { | b = a; | } | | @safe void foo(ref const RCI!(const double) a, ref RCI!(const double) b) | { | b = a; | } | | @safe void fooi(ref immutable RCI!(immutable double) a, ref RCI!(immutable double) b) | { | b = a; | } | | struct S | { | uint i; | @safe pure: | ~this() {} | } | | @safe void goo(ref const Series!(RCI!(const double), RCI!(const S)) a, ref Series!(RCI!(const double), RCI!(const S)) b) | { | b = a; | } | | @safe void gooi(ref immutable Series!(RCI!(immutable double), RCI!(const S)) a, ref Series!(RCI!(immutable double), RCI!(const S)) b) | { | b = a; | } | | struct C | { | Series!(RCI!(const S), RCI!(const S)) a; | Series!(RCI!(const S), RCI!(const S)) b; | } | | C a, b; | a = b; | a = cast(const) b; |} | |version(mir_test) |unittest |{ | import mir.ndslice.slice: Slice; | static RCArray!int foo() @safe | { | auto ret = RCArray!int(10); | return ret; | } | | | static Slice!(RCI!int) bat() @safe | { | auto ret = RCArray!int(10); | return ret.asSlice; | } | | static Slice!(RCI!int) bar() @safe | { | auto ret = RCArray!int(10); | auto d = ret.asSlice; | return d; | } |} | |version(mir_test) |@safe unittest |{ | import core.stdc.stdio; | | struct S | { | uint s; | this(this) @nogc nothrow @safe | { | // () @trusted { | // puts("this(this)\n"); | // } (); | } | | ~this() nothrow @nogc @safe | { | // () @trusted { | // if (s) | // puts("~this()\n"); | // else | // puts("~this() - zero\n"); | // } (); | } | } | | struct C | { | S s; | } | | S[1] d = [S(1)]; | auto r = rcarray(d); |} | |version(mir_test) |unittest |{ | import mir.small_string; | alias S = SmallString!32u; | auto ars = [S("123"), S("422")]; | alias R = mir_rcarray!S; | auto rc = ars.rcarray!S; |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/rc/array.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-mutation.lst |/++ |$(H2 Multidimensional mutation algorithms) | |This is a submodule of $(MREF mir,ndslice). | |$(BOOKTABLE $(H2 Function), |$(TR $(TH Function Name) $(TH Description)) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2020-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.mutation; | |import mir.ndslice.slice: Slice, SliceKind; | |/++ |Copies n-dimensional minor. |+/ |void copyMinor(size_t N, IteratorFrom, SliceKind KindFrom, IteratorTo, SliceKind KindTo, IndexIterator)( | Slice!(IteratorFrom, N, KindFrom) from, | Slice!(IteratorTo, N, KindTo) to, | Slice!IndexIterator[N] indexes... |) |in { | import mir.internal.utility: Iota; | static foreach (i; Iota!N) | assert(indexes[i].length == to.length!i); |} |do { | static if (N == 1) | to[] = from[indexes[0]]; | else | foreach (i; 0 .. indexes[0].length) | { | copyMinor!(N - 1)(from[indexes[0][i]], to[i], indexes[1 .. N]); | } |} | |/// |version(mir_test) |@safe pure nothrow |unittest |{ | import mir.ndslice; | // 0 1 2 3 | // 4 5 6 7 | // 8 9 10 11 | auto a = iota!int(3, 4); | auto b = slice!int(2, 2); | copyMinor(a, b, [2, 1].sliced, [0, 3].sliced); | assert(b == [[8, 11], [4, 7]]); |} | |/++ |Reverses data in the 1D slice. |+/ |void reverseInPlace(Iterator)(Slice!Iterator slice) |{ | import mir.utility : swap; | foreach (i; 0 .. slice.length / 2) | swap(slice[i], slice[$ - (i + 1)]); |} | |/// |version(mir_test) |@safe pure nothrow |unittest |{ | import mir.ndslice; | auto s = 5.iota.slice; | s.reverseInPlace; | assert([4, 3, 2, 1, 0]); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/mutation.d has no code <<<<<< EOF # path=source-mir-sparse-blas-gemm.lst |/++ |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |+/ |module mir.sparse.blas.gemm; | |import std.traits; |import mir.ndslice.slice; |import mir.ndslice.iterator; |import mir.ndslice.allocation: slice; |import mir.sparse; |import mir.series; | |/++ |General matrix-matrix multiplication. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | b = dense matrix | beta = scalar | c = dense matrix |Returns: | `c = alpha * a × b + beta * c` if beta does not equal null and `c = alpha * a × b` otherwise. |+/ |void gemm( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 2, kind2) b, | in CL beta, | Slice!(Iterator3, 2, kind3) c) |in |{ 1| assert(a.length!0 == c.length!0); 1| assert(b.length!1 == c.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; 1| auto ct = c.universal.transposed; 14| foreach (x; b.universal.transposed) | { | import mir.sparse.blas.gemv: gemv; 4| gemv(alpha, a, x, beta, ct.front); 4| ct.popFront; | } |} | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto sp = sparse!int(3, 5); 1| sp[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto a = sp.compress; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | 1| auto c = slice!double(3, 4); | 1| gemm(1.0, a, b, 0, c); | 1| assert(c == | [[-42.0, 35, -7, 77], | [-69.0, -21, -42, 21], | [23.0, 69, 3, 29]]); |} | | |/++ |General matrix-matrix multiplication with transformation. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | b = dense matrix | beta = scalar | c = dense matrix |Returns: | `c = alpha * aᵀ × b + beta * c` if beta does not equal null and `c = alpha * aᵀ × b` otherwise. |+/ |void gemtm( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 2, kind2) b, | in CL beta, | Slice!(Iterator3, 2, kind3) c) |in |{ 1| assert(a.length!0 == b.length!0); 1| assert(b.length!1 == c.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; 1| auto ct = c.universal.transposed; 14| foreach (x; b.universal.transposed) | { | import mir.sparse.blas.gemv: gemtv; 4| gemtv(alpha, a, x, beta, ct.front); 4| ct.popFront; | } |} | | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto sp = sparse!int(5, 3); 1| sp[] = | [[-5, -1, -5], | [1, -5, -2], | [7, 6, -3], | [7, 3, 6], | [-4, -3, 0]]; | 1| auto a = sp.compress; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | 1| auto c = slice!double(3, 4); | 1| gemtm(1.0, a, b, 0, c); | 1| assert(c == | [[-42.0, 35, -7, 77], | [-69.0, -21, -42, 21], | [23.0, 69, 3, 29]]); |} | |/++ |Selective general matrix multiplication with selector sparse matrix. |Params: | a = dense matrix | b = dense matrix | c = sparse matrix (CSR format) |Returns: | `c[available indexes] = (a × b)[available indexes]`. |+/ |void selectiveGemm(string op = "", SliceKind kind1, SliceKind kind2, SliceKind kind3, T, T3, I3, J3) |(Slice!(T*, 2, kind1) a, Slice!(T*, 2, kind2) b, Slice!(ChopIterator!(J3*, Series!(I3*, T3*)), 1, kind3) c) |in |{ 1| assert(a.length!1 == b.length!0); 1| assert(c.length!0 == a.length!0); 11| foreach (r; c) 3| if (r.index.length) 2| assert(r.index[$-1] < b.length!1); |} |body |{ | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed; | import mir.sparse.blas.gemv: selectiveGemv; | 1| auto bt = b.universal.transposed; 11| foreach (r; c) | { 3| selectiveGemv!op(bt, a.front, r); 3| a.popFront; | } |} | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto a = slice!double(3, 5); 1| a[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto b = slice!double(5, 4); 1| b[] = | [[-5.0, -3, 3, 1], | [4.0, 3, 6, 4], | [-4.0, -2, -2, 2], | [-1.0, 9, 4, 8], | [9.0, 8, 3, -2]]; | | // a * b == | // [[-42.0, 35, -7, 77], | // [-69.0, -21, -42, 21], | // [23.0, 69, 3, 29]]); | 1| auto cs = sparse!double(3, 4); 1| cs[0, 2] = 1; 1| cs[0, 1] = 3; 1| cs[2, 3] = 2; | 1| auto c = cs.compress; | 1| selectiveGemm!"*"(a, b, c); 1| assert(c.length == 3); 1| assert(c[0].index == [1, 2]); 1| assert(c[0].value == [105, -7]); 1| assert(c[1].empty); 1| assert(c[2].index == [3]); 1| assert(c[2].value == [58]); |} source/mir/sparse/blas/gemm.d is 100% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-traits.lst |/++ |$(H2 Multidimensional traits) | |This is a submodule of $(MREF mir,ndslice). | |$(BOOKTABLE $(H2 Function), |$(TR $(TH Function Name) $(TH Description)) | |$(T2 isVector, Test if type is a one-dimensional slice.) |$(T2 isMatrix, Test if type is a two-dimensional slice.) |$(T2 isContiguousSlice, Test if type is a contiguous slice.) |$(T2 isCanonicalSlice, Test if type is a canonical slice.) |$(T2 isUniversalSlice, Test if type is a universal slice.) |$(T2 isContiguousVector, Test if type is a contiguous one-dimensional slice.) |$(T2 isUniversalVector, Test if type is a universal one-dimensional slice.) |$(T2 isContiguousMatrix, Test if type is a contiguous two-dimensional slice.) |$(T2 isCanonicalMatrix, Test if type is a canonical two-dimensional slice.) |$(T2 isUniversalMatrix, Test if type is a universal two-dimensional slice.) |$(T2 isIterator, Test if type is a random access iterator.) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright $(COPYRIGHT) 2016-, Ilya Yaroshenko, John Hall |Authors: John Hall | | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ | |module mir.ndslice.traits; | |import mir.ndslice.slice : Slice, SliceKind, Contiguous, Universal, Canonical; | |/// Test if type is a one-dimensional slice. |enum bool isVector(T) = is(T : Slice!(Iterator, 1, kind), SliceKind kind, Iterator); | |/// Test if type is a two-dimensional slice. |enum bool isMatrix(T) = is(T : Slice!(Iterator, 2, kind), SliceKind kind, Iterator); | |/// Test if type is a contiguous slice. |enum bool isContiguousSlice(T) = is(T : Slice!(Iterator, N, Contiguous), Iterator, size_t N); | |/// Test if type is a canonical slice. |enum bool isCanonicalSlice(T) = is(T : Slice!(Iterator, N, Canonical), Iterator, size_t N); | |/// Test if type is a universal slice. |enum bool isUniversalSlice(T) = is(T : Slice!(Iterator, N, Universal), Iterator, size_t N); | |/// Test if type is a contiguous one-dimensional slice. |enum bool isContiguousVector(T) = is(T : Slice!(Iterator, 1, Contiguous), Iterator); | |/// Test if type is a universal one-dimensional slice. |enum bool isUniversalVector(T) = is(T : Slice!(Iterator, 1, Universal), Iterator); | |/// Test if type is a contiguous two-dimensional slice. |enum bool isContiguousMatrix(T) = is(T : Slice!(Iterator, 2, Contiguous), Iterator); | |/// Test if type is a canonical two-dimensional slice. |enum bool isCanonicalMatrix(T) = is(T : Slice!(Iterator, 2, Canonical), Iterator); | |/// Test if type is a universal two-dimensional slice. |enum bool isUniversalMatrix(T) = is(T : Slice!(Iterator, 2, Universal), Iterator); | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.slice : Slice; | | alias S1 = Slice!(int*); | static assert(isContiguousVector!S1); | static assert(!isUniversalVector!S1); | | static assert(!isContiguousMatrix!S1); | static assert(!isCanonicalMatrix!S1); | static assert(!isUniversalMatrix!S1); | | static assert(isVector!S1); | static assert(!isMatrix!S1); | | static assert(isContiguousSlice!S1); | static assert(!isCanonicalSlice!S1); | static assert(!isUniversalSlice!S1); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S2 = Slice!(float*, 1, Universal); | static assert(!isContiguousVector!S2); | static assert(isUniversalVector!S2); | | static assert(!isContiguousMatrix!S2); | static assert(!isCanonicalMatrix!S2); | static assert(!isUniversalMatrix!S2); | | static assert(isVector!S2); | static assert(!isMatrix!S2); | | static assert(!isContiguousSlice!S2); | static assert(!isCanonicalSlice!S2); | static assert(isUniversalSlice!S2); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S3 = Slice!(byte*, 2); | static assert(!isContiguousVector!S3); | static assert(!isUniversalVector!S3); | | static assert(isContiguousMatrix!S3); | static assert(!isCanonicalMatrix!S3); | static assert(!isUniversalMatrix!S3); | | static assert(!isVector!S3); | static assert(isMatrix!S3); | | static assert(isContiguousSlice!S3); | static assert(!isCanonicalSlice!S3); | static assert(!isUniversalSlice!S3); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S4 = Slice!(int*, 2, Canonical); | static assert(!isContiguousVector!S4); | static assert(!isUniversalVector!S4); | | static assert(!isContiguousMatrix!S4); | static assert(isCanonicalMatrix!S4); | static assert(!isUniversalMatrix!S4); | | static assert(!isVector!S4); | static assert(isMatrix!S4); | | static assert(!isContiguousSlice!S4); | static assert(isCanonicalSlice!S4); | static assert(!isUniversalSlice!S4); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S5 = Slice!(int*, 2, Universal); | static assert(!isContiguousVector!S5); | static assert(!isUniversalVector!S5); | | static assert(!isContiguousMatrix!S5); | static assert(!isCanonicalMatrix!S5); | static assert(isUniversalMatrix!S5); | | static assert(!isVector!S5); | static assert(isMatrix!S5); | | static assert(!isContiguousSlice!S5); | static assert(!isCanonicalSlice!S5); | static assert(isUniversalSlice!S5); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S6 = Slice!(int*, 3); | | static assert(!isContiguousVector!S6); | static assert(!isUniversalVector!S6); | | static assert(!isContiguousMatrix!S6); | static assert(!isCanonicalMatrix!S6); | static assert(!isUniversalMatrix!S6); | | static assert(!isVector!S6); | static assert(!isMatrix!S6); | | static assert(isContiguousSlice!S6); | static assert(!isCanonicalSlice!S6); | static assert(!isUniversalSlice!S6); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S7 = Slice!(int*, 3, Canonical); | | static assert(!isContiguousVector!S7); | static assert(!isUniversalVector!S7); | | static assert(!isContiguousMatrix!S7); | static assert(!isCanonicalMatrix!S7); | static assert(!isUniversalMatrix!S7); | | static assert(!isVector!S7); | static assert(!isMatrix!S7); | | static assert(!isContiguousSlice!S7); | static assert(isCanonicalSlice!S7); | static assert(!isUniversalSlice!S7); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias S8 = Slice!(int*, 3, Universal); | | static assert(!isContiguousVector!S8); | static assert(!isUniversalVector!S8); | | static assert(!isContiguousMatrix!S8); | static assert(!isCanonicalMatrix!S8); | static assert(!isUniversalMatrix!S8); | | static assert(!isVector!S8); | static assert(!isMatrix!S8); | | static assert(!isContiguousSlice!S8); | static assert(!isCanonicalSlice!S8); | static assert(isUniversalSlice!S8); |} | |/// |template isIterator(T) |{ | enum isIterator = __traits(compiles, (T a, T b) | { | sizediff_t diff = a - b; | ++a; | ++b; | --a; | --b; | void foo(V)(auto ref V v) | { | | } | foo(a[sizediff_t(3)]); | auto c = a + sizediff_t(3); | auto d = a - sizediff_t(3); | a += sizediff_t(3); | a -= sizediff_t(3); | foo(*a); | }); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/traits.d has no code <<<<<< EOF # path=source-mir-sparse-blas-dot.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.sparse.blas.dot; | |import std.traits; |import mir.ndslice.slice; |import mir.sparse; |import mir.series; | |/++ |Dot product of two vectors | |Params: | x = sparse vector | y = sparse vector |Returns: | scalar `xᵀ × y` |+/ |Unqual!(CommonType!(T1, T2)) dot( | V1 : Series!(I1*, T1*), | V2 : Series!(I2*, T2*), | T1, T2, I1, I2) |(V1 x, V2 y) |{ 1| return dot!(typeof(return))(x, y); |} | |/// ditto |D dot( | D, | V1 : Series!(I1*, T1*), | V2 : Series!(I2*, T2*), | T1, T2, I1, I2) |(V1 x, V2 y) |{ | 2| typeof(return) s = 0; | 2| uint done = 2; 2| Unqual!I1 ai0 = void; 2| Unqual!I2 bi0 = void; | 4| if (x.length && y.length) for (;;) | { 8| bi0 = y.index[0]; 8| if (x.index[0] < bi0) | { | do | { 4| x.popFront; 4| if (x.length == 0) | { 0000000| break; | } | } 4| while (x.index[0] < bi0); 4| done = 2; | } 8| if (--done == 0) | { 2| goto L; | } 6| ai0 = x.index[0]; 6| if (y.index[0] < ai0) | { | do | { 4| y.popFront; 4| if (y.length == 0) | { 0000000| break; | } | } 4| while (y.index[0] < ai0); 4| done = 2; | } 6| if (--done == 0) | { 2| goto L; | } 4| continue; | L: 4| s = x.value[0] * y.value[0] + s; 4| x.popFront; 4| if (x.length == 0) | { 0000000| break; | } 4| y.popFront; 4| if (y.length == 0) | { 2| break; | } | } | 2| return s; |} | |/// |unittest |{ | import mir.series; | 1| auto x = series([0u, 3, 5, 9, 100], [1, 3, 4, 9, 10]); 1| auto y = series([1u, 3, 4, 9], [1, 10, 100, 1000]); | // x = [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, ... ,10] | // y = [0, 1, 0, 10, 0, 0, 0, 0, 0, 1000] 1| assert(dot(x, y) == 9030); 1| assert(dot!double(x, y) == 9030); |} | |/++ |Dot product of two vectors. |Params: | x = sparse vector | y = dense vector |Returns: | scalar `x × y` |+/ |Unqual!(CommonType!(T1, ForeachType!V2)) dot( | V1 : Series!(I1*, T1*), | T1, I1, V2) |(V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |{ 21| return dot!(typeof(return))(x, y); |} | |///ditto |D dot( | D, | V1 : Series!(I1*, T1*), | T1, I1, V2) |(V1 x, V2 y) | if (isDynamicArray!V2 || isSlice!V2) |in |{ 21| if (x.length) 21| assert(x.index[$-1] < y.length); |} |body |{ | | import mir.internal.utility; | | alias T2 = ForeachType!V2; | | alias F = Unqual!(CommonType!(T1, T2)); 21| F s = 0; 324| foreach (size_t i; 0 .. x.index.length) | { 87| s = y[x.index[i]] * x.value[i] + s; | } | 21| return s; |} | |/// |unittest |{ | import mir.series; | 1| auto x = [0u, 3, 5, 9, 10].series([1.0, 3, 4, 9, 13]); 1| auto y = [0.0, 1.0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; | // x: [1, 0, 0, 3, 0, 4, 0, 0, 0, 9, 13, 0, 0, 0] | // y: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] 1| auto r = 0 + 3 * 3 + 4 * 5 + 9 * 9 + 13 * 10; 1| assert(dot(x, y) == r); 1| assert(dot(x, y.sliced) == r); 1| assert(dot(x, y.slicedField) == r); |} source/mir/sparse/blas/dot.d is 94% covered <<<<<< EOF # path=source-mir-glas-l2.lst |/++ |$(H2 Level 2) | |$(SCRIPT inhibitQuickIndex = 1;) | |This is a submodule of $(MREF mir,glas). | |The Level 2 BLAS perform matrix-vector operations. | |Note: GLAS is singe thread for now. | |$(BOOKTABLE $(H2 Matrix-vector operations), | |$(TR $(TH Function Name) $(TH Description)) |$(T2 gemv, general matrix-vector multiplication, $(RED partially optimized)) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.glas.l2; | |import std.traits; |import std.meta; | |import mir.math.common; |import mir.internal.utility; |import mir.ndslice.slice; | |import mir.glas.l1; | |import mir.math.common: fastmath; | |@fastmath: | |/++ |$(RED DRAFT) |Performs general matrix-vector multiplication. | |Pseudo_code: `y := alpha A × x + beta y`. | |Params: | alpha = scalar | asl = `m ⨉ n` matrix | xsl = `n ⨉ 1` vector | beta = scalar. When `beta` is supplied as zero then the vector `ysl` need not be set on input. | ysl = `m ⨉ 1` vector | |Note: | GLAS does not require transposition parameters. | Use $(NDSLICEREF iteration, transposed) | to perform zero cost `Slice` transposition. | |BLAS: SGEMV, DGEMV, (CGEMV, ZGEMV are not implemented for now) |+/ |nothrow @nogc @system |void gemv(A, B, C, | SliceKind kindA, | SliceKind kindB, | SliceKind kindC, | ) |( | C alpha, | Slice!(const(A)*, 2, kindA) asl, | Slice!(const(B)*, 1, kindB) xsl, | C beta, | Slice!(C*, 1, kindC) ysl, |) | if (allSatisfy!(isNumeric, A, B, C)) |in |{ 1| assert(asl.length!0 == ysl.length, "constraint: asl.length!0 == ysl.length"); 1| assert(asl.length!1 == xsl.length, "constraint: asl.length!1 == xsl.length"); |} |body |{ | import mir.ndslice.dynamic: reversed; | static assert(is(Unqual!C == C), msgWrongType); 1| if (ysl.empty) 0000000| return; 1| if (beta == 0) | { 1| ysl[] = 0; | } | else 0000000| if (beta == 1) | { 0000000| ysl[] *= beta; | } 1| if (xsl.empty) 0000000| return; | do | { 3| ysl.front += alpha * dot(asl.front, xsl); 3| asl.popFront; 3| ysl.popFront; | } 3| while (ysl.length); |} | |/// |unittest |{ | import mir.ndslice; | 1| auto a = slice!double(3, 5); 1| a[] = | [[-5, 1, 7, 7, -4], | [-1, -5, 6, 3, -3], | [-5, -2, -3, 6, 0]]; | 1| auto b = slice!double(5); 1| b[] = | [-5.0, | 4.0, | -4.0, | -1.0, | 9.0]; | 1| auto c = slice!double(3); | 1| gemv!(double, double, double)(1.0, a, b, 0.0, c); | 1| assert(c == | [-42.0, | -69.0, | 23.0]); |} source/mir/glas/l2.d is 80% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-filling.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Initialisation routines. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: 2019 Symmetry Investments Group and Kaleidic Associates Advisory Limited. |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.filling; | |import mir.ndslice.slice: Slice, SliceKind; | |/++ |Fills a matrix with the terms of a geometric progression in each row. |Params: | matrix = `m × n` matrix to fill | vec = vector of progression coefficients length of `m` |See_also: $(LINK2 https://en.wikipedia.org/wiki/Vandermonde_matrix, Vandermonde matrix) |+/ |void fillVandermonde(F, SliceKind matrixKind, SliceKind kind)(Slice!(F*, 2, matrixKind) matrix, Slice!(const(F)*, 1, kind) vec) |in { | assert(matrix.length == vec.length); |} |do { | import mir.conv: to; | | foreach (v; matrix) | { | F a = vec.front; | vec.popFront; | F x = to!F(1); | foreach (ref e; v) | { | e = x; | x *= a; | } | } |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/filling.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-array-allocation.lst |/** |Functions and types that manipulate built-in arrays and associative arrays. | |This module provides all kinds of functions to create, manipulate or convert arrays: | |$(SCRIPT inhibitQuickIndex = 1;) |$(BOOKTABLE , |$(TR $(TH Function Name) $(TH Description) |) | $(TR $(TD $(LREF _array)) | $(TD Returns a copy of the input in a newly allocated dynamic _array. | )) |) | |Copyright: Copyright Andrei Alexandrescu 2008-, Jonathan M Davis 2011-, and Ilya Yaroshenko (Mir rework) 2018- | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: $(HTTP erdani.org, Andrei Alexandrescu) and Jonathan M Davis | |Source: $(PHOBOSSRC std/_array.d) |*/ |module mir.array.allocation; | |import mir.functional; |import mir.primitives; | |import std.traits; |import std.range.primitives: isInfinite, isInputRange, ElementType; | |/** | * Allocates an array and initializes it with copies of the elements | * of range $(D r). | * | * Narrow strings are handled as a special case in an overload. | * | * Params: | * r = range (or aggregate with $(D opApply) function) whose elements are copied into the allocated array | * Returns: | * allocated and initialized array | */ |auto array(Range)(Range r) |if ((isInputRange!Range || isIterable!Range) && !isInfinite!Range && !isStaticArray!Range || isPointer!Range && (isInputRange!(PointerTarget!Range) || isIterable!(PointerTarget!Range))) |{ | static if (isIterable!Range) | alias E = ForeachType!Range; | else | static if (isPointer!Range && isIterable!(PointerTarget!Range)) | alias E = ForeachType!(PointerTarget!Range); | else | alias E = ElementType!Range; | | if (__ctfe) | { | // Compile-time version to avoid memcpy calls. | // Also used to infer attributes of array(). | E[] result; | static if (isInputRange!Range) | for (; !r.empty; r.popFront) | result ~= r.front; | else | static if (isPointer!Range) | foreach (e; *r) | result ~= e; | else | foreach (e; r) | result ~= e; | return result; | } | | import mir.primitives: hasLength; | | static if (hasLength!Range) | { | auto length = r.length; | if (length == 0) | return null; | | import mir.conv : emplaceRef; | import std.array: uninitializedArray; | | auto result = (() @trusted => uninitializedArray!(Unqual!E[])(length))(); | | static if (isInputRange!Range) | { | foreach(ref e; result) | { | emplaceRef!E(e, r.front); | r.popFront; | } | } | else | static if (isPointer!Range) | { | auto it = result; | foreach(ref f; *r) | { | emplaceRef!E(it[0], f); | it = it[1 .. $]; | } | } | else | { | auto it = result; | foreach(ref f; r) | { | emplaceRef!E(it[0], f); | it = it[1 .. $]; | } | } | | return (() @trusted => cast(E[]) result)(); | } | else | { | import mir.appender: ScopedBuffer; | ScopedBuffer!E a; | static if (isInputRange!Range) | for (; !r.empty; r.popFront) | a.put(r.front); | else | static if (isPointer!Range) | foreach (e; *r) | a.put(e); | else | foreach (e; r) | a.put(e); | return .array(a.data); | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | auto a = array([1, 2, 3, 4, 5][]); | assert(a == [ 1, 2, 3, 4, 5 ]); |} | |@safe pure nothrow version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | struct Foo | { | int a; | } | auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]); | assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)])); |} | |@safe pure nothrow version(mir_test) unittest |{ | struct MyRange | { | enum front = 123; | enum empty = true; | void popFront() {} | } | | auto arr = (new MyRange).array; | assert(arr.empty); |} | |@system pure nothrow version(mir_test) unittest |{ | immutable int[] a = [1, 2, 3, 4]; | auto b = (&a).array; | assert(b == a); |} | |@system version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | struct Foo | { | int a; | void opAssign(Foo) | { | assert(0); | } | auto opEquals(Foo foo) | { | return a == foo.a; | } | } | auto a = array([Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)][]); | assert(equal(a, [Foo(1), Foo(2), Foo(3), Foo(4), Foo(5)])); |} | |@safe version(mir_test) unittest |{ | // Issue 12315 | static struct Bug12315 { immutable int i; } | enum bug12315 = [Bug12315(123456789)].array(); | static assert(bug12315[0].i == 123456789); |} | |@safe version(mir_test) unittest |{ | import mir.ndslice.topology: repeat; | static struct S{int* p;} | auto a = array(immutable(S).init.repeat(5)); | assert(a.length == 5); |} | |/// |@safe version(mir_test) unittest |{ | assert("Hello D".array == "Hello D"); | assert("Hello D"w.array == "Hello D"w); | assert("Hello D"d.array == "Hello D"d); |} | |@system version(mir_test) unittest |{ | // @system due to array!string | import std.conv : to; | | static struct TestArray { int x; string toString() @safe { return to!string(x); } } | | static struct OpAssign | { | uint num; | this(uint num) { this.num = num; } | | // Templating opAssign to make sure the bugs with opAssign being | // templated are fixed. | void opAssign(T)(T rhs) { this.num = rhs.num; } | } | | static struct OpApply | { | int opApply(scope int delegate(ref int) dg) | { | int res; | foreach (i; 0 .. 10) | { | res = dg(i); | if (res) break; | } | | return res; | } | } | | auto a = array([1, 2, 3, 4, 5][]); | assert(a == [ 1, 2, 3, 4, 5 ]); | | auto b = array([TestArray(1), TestArray(2)][]); | assert(b == [TestArray(1), TestArray(2)]); | | class C | { | int x; | this(int y) { x = y; } | override string toString() const @safe { return to!string(x); } | } | auto c = array([new C(1), new C(2)][]); | assert(c[0].x == 1); | assert(c[1].x == 2); | | auto d = array([1.0, 2.2, 3][]); | assert(is(typeof(d) == double[])); | assert(d == [1.0, 2.2, 3]); | | auto e = [OpAssign(1), OpAssign(2)]; | auto f = array(e); | assert(e == f); | | assert(array(OpApply.init) == [0,1,2,3,4,5,6,7,8,9]); | assert(array("ABC") == "ABC"); | assert(array("ABC".dup) == "ABC"); |} | |//Bug# 8233 |@safe version(mir_test) unittest |{ | assert(array("hello world"d) == "hello world"d); | immutable a = [1, 2, 3, 4, 5]; | assert(array(a) == a); | const b = a; | assert(array(b) == a); | | //To verify that the opAssign branch doesn't get screwed up by using Unqual. | //EDIT: array no longer calls opAssign. | struct S | { | ref S opAssign(S)(const ref S rhs) | { | assert(0); | } | | int i; | } | | alias AliasSeq(T...) = T; | foreach (T; AliasSeq!(S, const S, immutable S)) | { | auto arr = [T(1), T(2), T(3), T(4)]; | assert(array(arr) == arr); | } |} | |@safe version(mir_test) unittest |{ | //9824 | static struct S | { | @disable void opAssign(S); | int i; | } | auto arr = [S(0), S(1), S(2)]; | arr.array; |} | |// Bugzilla 10220 |@safe version(mir_test) unittest |{ | import mir.algorithm.iteration : equal; | import std.exception; | import mir.ndslice.topology: repeat; | | static struct S | { | int val; | | @disable this(); | this(int v) { val = v; } | } | static immutable r = S(1).repeat(2).array(); | assert(equal(r, [S(1), S(1)])); |} | |@safe version(mir_test) unittest |{ | //Turn down infinity: | static assert(!is(typeof( | repeat(1).array() | ))); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/array/allocation.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-chunks.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |The module contains $(LREF _chunks) routine. |$(LREF Chunks) structure is multidimensional random access range with slicing. | |$(SUBREF slice, slicedField), $(SUBREF slice, slicedNdField) can be used to construct ndslice view on top of $(LREF Chunks). | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.chunks; | |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.internal; |import mir.ndslice.iterator: IotaIterator; |import mir.ndslice.slice; | |import std.meta; |import std.traits; | |/++ |Creates $(LREF Chunks). | |Params: | Dimensions = compile time list of dimensions to chunk | |See_also: $(SUBREF topology, blocks) $(SUBREF fuse, fuseCells) |+/ |template chunks(Dimensions...) | if (Dimensions.length) |{ | static if (allSatisfy!(isSize_t, Dimensions)) | /++ | Params: | slice = Slice to chunk. | chunkLengths = Chunk shape. It must not have a zero length. | Returns: $(LREF Chunks). | +/ | Chunks!([Dimensions], Iterator, N, kind == Contiguous && [Dimensions] != [0] ? Canonical : kind) | chunks(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice, size_t[Dimensions.length] chunkLengths...) | { | static if (kindOf!(typeof(typeof(return).init._slice)) != kind) | { | import mir.ndslice.topology: canonical; | auto p = slice.canonical; | } | else | { | alias p = slice; | } | auto ret = typeof(return)(chunkLengths, p); | foreach (i; Iota!(Dimensions.length)) | ret._norm!i; | return ret; | } | else | alias chunks = .chunks!(staticMap!(toSize_t, Dimensions)); |} | |/// ditto |Chunks!([0], Iterator, N, kind) chunks(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice, size_t chunkLength) |{ | return .chunks!0(slice, chunkLength); |} | | |/// 1Dx1D |@safe pure nothrow @nogc version(mir_test) unittest |{ | import mir.ndslice.chunks: chunks, isChunks; | import mir.ndslice.topology: iota; | | // 0 1 2 3 4 5 6 7 8 9 10 | auto sl = iota(11); | // 0 1 2 | 3 4 5 | 6 7 8 | 9 10 | auto ch = sl.chunks(3); | | static assert(isChunks!(typeof(ch)) == [0]); // isChunks returns dimension indexes | | assert(ch.length == 4); | assert(ch.shape == cast(size_t[1])[4]); | | // 0 1 2 | assert(ch.front == iota([3], 0)); | ch.popFront; | | // 3 4 5 | assert(ch.front == iota([3], 3)); | assert(ch.length == 3); | | // 9 10 | assert(ch[$ - 1] == ch.back); | assert(ch.back == iota([2], 9)); | | ch.popBack; | assert(ch.back == iota([3], 6)); | | assert(ch[$ - 1 .. $].length == 1); | assert(ch[$ .. $].length == 0); | assert(ch[0 .. 0].empty); | | import std.range.primitives: isRandomAccessRange; | static assert(isRandomAccessRange!(typeof(ch))); |} | |/// 2Dx2D |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.chunks: chunks, isChunks; | import mir.ndslice.topology: iota; | | // 0 1 2 3 4 5 6 7 8 9 | // 10 11 12 13 14 15 16 17 18 19 | // 20 21 22 23 24 25 26 27 28 29 | // 30 31 32 33 34 35 36 37 38 39 | // 40 41 42 43 44 45 46 47 48 49 | // 50 51 52 53 54 55 56 57 58 59 | // 60 61 62 63 64 65 66 67 68 69 | // 70 71 72 73 74 75 76 77 78 79 | // 80 81 82 83 84 85 86 87 88 89 | // 90 91 92 93 94 95 96 97 98 99 | // 100 101 102 103 104 105 106 107 108 109 | auto sl = iota(11, 10); // [0, 1, .. 10] | | // ---------------- ---------------- -------- | // | 0 1 2 3 | | 4 5 6 7 | | 8 9 | | // | 10 11 12 13 | | 14 15 16 17 | | 18 19 | | // | 20 21 22 23 | | 24 25 26 27 | | 28 29 | | // |----------------| |----------------| |--------| | // | 30 31 32 33 | | 34 35 36 37 | | 38 39 | | // | 40 41 42 43 | | 44 45 46 47 | | 48 49 | | // | 50 51 52 53 | | 54 55 56 57 | | 58 59 | | // |----------------| |----------------| |--------| | // | 60 61 62 63 | | 64 65 66 67 | | 68 69 | | // | 70 71 72 73 | | 74 75 76 77 | | 78 79 | | // | 80 81 82 83 | | 84 85 86 87 | | 88 89 | | // |----------------| |----------------| |--------| | // | 90 91 92 93 | | 94 95 96 97 | | 98 99 | | // |100 101 102 103 | |104 105 106 107 | |108 109 | | // ---------------- ---------------- -------- | // Chunk columns first, then blocks rows. | auto ch = sl.chunks!(1, 0)(4, 3); | | assert(ch.shape == [3, 4]); | assert(ch.slice == sl); | assert(ch.front.slice == sl[0 .. $, 0 .. 4]); | | assert(ch.front.front == sl[0 .. 3, 0 .. 4]); | | assert(ch.front!0[1] == sl[3 .. 6, 0 .. 4]); | assert(ch.front!1[1] == sl[0 .. 3, 4 .. 8]); | | assert (ch[$ - 1, $ - 1] == [[98, 99], [108, 109]]); | | static assert(isChunks!(typeof(ch)) == [1, 0]); // isChunks returns dimension indexes | | assert(ch.length == 3); | assert(ch.length!1 == 4); | | ch.popFront; | assert(ch.front.front == sl[0 .. 3, 4 .. 8]); | ch.popFront!1; | assert(ch.front.front == sl[3 .. 6, 4 .. 8]); | | assert(ch.back.slice == sl[3 .. $, 8 .. $]); | ch.popBack; | assert(ch.back.slice == sl[3 .. $, 4 .. 8]); | | import std.range.primitives: isRandomAccessRange; | static assert(isRandomAccessRange!(typeof(ch))); |} | |/// 1Dx2D |version(mir_test) unittest |{ | import mir.ndslice.chunks: chunks, isChunks; | import mir.ndslice.topology: iota; | | // 0 1 2 3 4 5 6 7 8 9 | // 10 11 12 13 14 15 16 17 18 19 | // 20 21 22 23 24 25 26 27 28 29 | // 30 31 32 33 34 35 36 37 38 39 | auto sl = iota(4, 10); // [0, 1, .. 10] | | // ---------------- ---------------- -------- | // | 0 1 2 3 | | 4 5 6 7 | | 8 9 | | // | 10 11 12 13 | | 14 15 16 17 | | 18 19 | | // | 20 21 22 23 | | 24 25 26 27 | | 28 29 | | // | 30 31 32 33 | | 34 35 36 37 | | 38 39 | | // ---------------- ---------------- -------- | // Chunk columns | auto ch = sl.chunks!1(4); | | assert(ch.slice == sl); | assert(ch.front == sl[0 .. $, 0 .. 4]); | | assert(ch.back == sl[0 .. $, 8 .. $]); | | import std.range.primitives: isRandomAccessRange; | static assert(isRandomAccessRange!(typeof(ch))); |} | |// conversion to ndslice |version(mir_test) unittest |{ | import mir.ndslice.slice : slicedField; | import mir.ndslice.chunks: chunks; | import mir.ndslice.topology: iota, map; | import mir.math.sum: sum; | | // 0 1 2 3 4 5 6 7 8 9 10 | auto sl = iota(11); | // 0 1 2 | 3 4 5 | 6 7 8 | 9 10 | auto ch = sl.chunks(3); | // 3 | 12 | 21 | 19 | auto s = ch.slicedField.map!sum; | assert(s == [3, 12, 21, 19]); |} | |/++ |+/ |struct Chunks(size_t[] dimensions, Iterator, size_t N = 1, SliceKind kind = Contiguous) |{ |@optmath: | | /++ | Chunk shape. | +/ | size_t[dimensions.length] chunkLengths()() @property { return _chunkLengths; } | /// ditto | size_t[dimensions.length] _chunkLengths; | | /// | auto lightConst()() const @property | { | import mir.qualifier; | return Chunks!(dimensions, LightConstOf!Iterator, N, kind)(_chunkLengths, _slice.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | import mir.qualifier; | return Chunks!(dimensions, LightImmutableOf!Iterator, N, kind)(_chunkLengths, _slice.lightImmutable); | } | | alias DeepElement = Slice!(Iterator, N, kind); | | /++ | Underlying ndslice. | It always correspond to current chunks state. | Its shape equal to the concatenation of the all chunks. | +/ | Slice!(Iterator, N, kind) slice()() @property { return _slice; } | /// | Slice!(Iterator, N, kind) _slice; | | private auto _norm(size_t dimensionIndex = 0)() @property | { | assert(_chunkLengths[dimensionIndex]); | enum dimension = dimensions[dimensionIndex]; | if (_expect(_slice._lengths[dimension] < _chunkLengths[dimensionIndex], false) && _slice._lengths[dimension]) | _chunkLengths[dimensionIndex] = _slice._lengths[dimension]; | } | | private auto _wrap(size_t dimensionIndex, S)(ref S ret) | { | static if (dimensions.length == 1) | { | return ret; | } | else | { | size_t[dimensions.length - 1] rcl; | foreach (i, j; AliasSeq!(Iota!dimensionIndex, Iota!(dimensionIndex + 1, dimensions.length))) | rcl[i] = _chunkLengths[j]; | enum newDims = dimensions[0 .. dimensionIndex] ~ dimensions[dimensionIndex + 1 .. $]; | return .Chunks!(newDims, Iterator, N, typeof(ret).kind)(rcl, ret); | } | } | | private ref size_t sliceLength(size_t dimensionIndex)() @property | { | enum dimension = dimensions[dimensionIndex]; | return _slice._lengths[dimension]; | } | | /// ndslice-like primitives | bool empty(size_t dimensionIndex = 0)() const @property | if (dimensionIndex < dimensions.length) | { | enum dimension = dimensions[dimensionIndex]; | return _slice.empty!(dimension); | } | | /// | size_t[dimensions.length] shape()() const @property | { | typeof(return) ret; | foreach(dimensionIndex; Iota!(ret.length)) | { | enum dimension = dimensions[dimensionIndex]; | auto l = _slice._lengths[dimension]; | auto n = _chunkLengths[dimensionIndex]; | ret[dimensionIndex] = l / n + (l % n != 0); | } | return ret; | } | | /// ditto | size_t length(size_t dimensionIndex = 0)() const @property | if (dimensionIndex < dimensions.length) | { | enum dimension = dimensions[dimensionIndex]; | auto l = _slice._lengths[dimension]; | auto n = _chunkLengths[dimensionIndex]; | return l / n + (l % n != 0); | } | | /// ditto | auto front(size_t dimensionIndex = 0)() @property | if (dimensionIndex < dimensions.length) | { | enum dimension = dimensions[dimensionIndex]; | assert(_chunkLengths[dimensionIndex] <= _slice._lengths[dimension]); | auto ret = _slice.selectFront!dimension(_chunkLengths[dimensionIndex]); | return _wrap!dimensionIndex(ret); | } | | /// | auto back(size_t dimensionIndex = 0)() @property | if (dimensionIndex < dimensions.length) | { | assert(!empty!dimensionIndex); | enum dimension = dimensions[dimensionIndex]; | auto l = _slice._lengths[dimension]; | auto n = _chunkLengths[dimensionIndex]; | auto rshift = l % n; | rshift = !rshift ? n : rshift; | auto len = _slice._lengths[dimension]; | auto ret = _slice.select!dimension(len - rshift, len); | return _wrap!dimensionIndex(ret); | } | | /// ditto | void popFront(size_t dimensionIndex = 0)() | if (dimensionIndex < dimensions.length) | { | enum dimension = dimensions[dimensionIndex]; | assert(!empty!dimensionIndex); | _slice.popFrontExactly!dimension(_chunkLengths[dimensionIndex]); | _norm!dimensionIndex; | } | | /// ditto | void popBack(size_t dimensionIndex = 0)() | if (dimensionIndex < dimensions.length) | { | assert(!empty!dimensionIndex); | enum dimension = dimensions[dimensionIndex]; | auto l = _slice._lengths[dimension]; | auto n = _chunkLengths[dimensionIndex]; | auto rshift = l % n; | rshift = !rshift ? n : rshift; | _slice.popBackExactly!dimension(rshift); | _norm!dimensionIndex; | } | | /// ditto | Slice!(IotaIterator!size_t) opSlice(size_t dimensionIndex)(size_t i, size_t j) const | if (dimensionIndex < dimensions.length) | in | { | assert(i <= j, | "Chunks.opSlice!" ~ dimensionIndex.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": the right opSlice boundary must be less than or equal to the length of the given dimensionIndex."; | assert(j <= length!dimensionIndex, | "Chunks.opSlice!" ~ dimensionIndex.stringof ~ errorMsg); | } | do | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /// ditto | ChunksSlice!() opSlice(size_t dimensionIndex)(size_t i, ChunksDollar!() j) const | if (dimensionIndex < dimensions.length) | in | { | assert(i <= j, | "Chunks.opSlice!" ~ dimensionIndex.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": the right opSlice boundary must be less than or equal to the length of the given dimensionIndex."; | assert(j <= length!dimensionIndex, | "Chunks.opSlice!" ~ dimensionIndex.stringof ~ errorMsg); | } | do | { | return typeof(return)(i, j); | } | | /// ditto | ChunksDollar!() opDollar(size_t dimensionIndex)() @property | { | enum dimension = dimensions[dimensionIndex]; | return ChunksDollar!()(_slice._lengths[dimension], _chunkLengths[dimensionIndex]); | } | | /// ditto | auto opIndex(Slices...)(Slices slices) | if (Slices.length <= dimensions.length) | { | static if (slices.length == 0) | { | return this; | } | else | { | alias slice = slices[0]; | alias S = Slices[0]; | static if (isIndex!S) | { | auto next = this.select!0(slice); | } | else | static if (is_Slice!S) | { | auto i = slice._iterator._index; | auto j = i + slice._lengths[0]; | auto next = this.select!0(i, j); | } | else | { | auto next = this.select!0(slice.i, slice.j); | } | static if (slices.length > 1) | { | return next[slices[1 .. $]]; | } | else | { | return next; | } | } | } | | /// ditto | auto opIndex()(size_t[dimensions.length] index) | { | auto next = this.select!0(index[0]); | static if (dimensions.length == 1) | { | return next; | } | else | { | return next[index[1 .. $]]; | } | } | | /// ditto | auto save()() @property | { | return this; | } | | /// | auto select(size_t dimensionIndex = 0)(size_t index) @property | if (dimensionIndex < dimensions.length) | { | enum dimension = dimensions[dimensionIndex]; | auto chl = _chunkLengths[dimensionIndex]; | auto shiftL = chl * index; | assert(shiftL <= _slice._lengths[dimension]); | auto shiftR = shiftL + chl; | if (_expect(shiftR > _slice._lengths[dimension], false)) | { | shiftR = _slice._lengths[dimension]; | } | auto ret = _slice.select!dimension(shiftL, shiftR); | return _wrap!dimensionIndex(ret); | } | | /// ditto | auto select(size_t dimensionIndex = 0)(size_t i, size_t j) @property | if (dimensionIndex < dimensions.length) | { | assert(i <= j); | enum dimension = dimensions[dimensionIndex]; | auto chl = _chunkLengths[dimensionIndex]; | auto shiftL = chl * i; | auto shiftR = chl * j; | assert(shiftL <= _slice._lengths[dimension]); | assert(shiftR <= _slice._lengths[dimension]); | if (_expect(shiftR > _slice._lengths[dimension], false)) | { | shiftR = _slice._lengths[dimension]; | if (_expect(shiftL > _slice._lengths[dimension], false)) | { | shiftL = _slice._lengths[dimension]; | } | } | auto ret = _slice.select!dimension(shiftL, shiftR); | import std.meta: aliasSeqOf; | return ret.chunks!(aliasSeqOf!dimensions)(_chunkLengths); | } | | // undocumented | auto select(size_t dimensionIndex = 0)(ChunksSlice!() sl) @property | if (dimensionIndex < dimensions.length) | { | assert(sl.i <= _slice._lengths[dimension]); | assert(sl.chunkLength == _chunkLengths[dimensionIndex]); | assert(sl.length == _slice._lengths[dimension]); | | enum dimension = dimensions[dimensionIndex]; | auto chl = _chunkLengths[dimensionIndex]; | auto len = sl.i * chl; | assert(len <= _slice._lengths[dimension]); | if (_expect(len > _slice._lengths[dimension], false)) | len = _slice._lengths[dimension]; | auto ret = _slice.selectBack!dimension(len); | import std.meta: aliasSeqOf; | return ret.chunks!(aliasSeqOf!dimensions)(_chunkLengths); | } |} | |// undocumented |struct ChunksSlice() |{ | size_t i; | ChunksDollar!() j; |} | |// undocumented |struct ChunksDollar() |{ | size_t length; | size_t chunkLength; | size_t value()() @property | { | return length / chunkLength + (length % chunkLength != 0); | } | alias value this; |} | |/++ |Checks if T is $(LREF Chunks) type. |Returns: | array of dimension indexes. |+/ |template isChunks(T) |{ | static if (is(T : Chunks!(dimensions, Iterator, N, kind), size_t[] dimensions, Iterator, size_t N, SliceKind kind)) | enum isChunks = dimensions; | else | enum isChunks = size_t[].init; |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.chunks: chunks, isChunks; | import mir.ndslice.topology: iota; | | static assert(isChunks!int == null); | static assert(isChunks!(typeof(iota(20, 30).chunks!(1, 0)(3, 7))) == [1, 0]); |} | |/++ |Evaluates `popFront!dimmensionIndex` for multiple $(LREF Chunks) structures at once. |All chunks structures must have for the appropriate dimension the same chunk lengths and the same underlying slice lengths. | |Params: | dimmensionIndex = dimensionIndex | master = the fist chunks structure | followers = following chunks structures |+/ |void popFrontTuple(size_t dimmensionIndex = 0, Master, Followers...)(ref Master master, ref Followers followers) | if (isChunks!Master && allSatisfy!(isChunks, Followers)) |in |{ | foreach (ref follower; followers) | { | assert(follower.sliceLength!dimmensionIndex == master.sliceLength!dimmensionIndex); | assert(follower._chunkLengths[dimmensionIndex] == master._chunkLengths[dimmensionIndex]); | } |} |do |{ | master._slice.popFrontExactly!(isChunks!Master[dimmensionIndex])(master._chunkLengths[dimmensionIndex]); | foreach (i, ref follower; followers) | { | follower._slice.popFrontExactly!(isChunks!(Followers[i])[dimmensionIndex])(master._chunkLengths[dimmensionIndex]); | // hint for optimizer | follower.sliceLength!dimmensionIndex = master.sliceLength!dimmensionIndex; | } | if (_expect(master.sliceLength!dimmensionIndex < master._chunkLengths[dimmensionIndex], false) && master.sliceLength!dimmensionIndex) | { | master._chunkLengths[dimmensionIndex] = master.sliceLength!dimmensionIndex; | foreach(ref follower; followers) | { | follower._chunkLengths[dimmensionIndex] = master._chunkLengths[dimmensionIndex]; | } | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.chunks: chunks; | import mir.ndslice.topology: iota; | | auto a = iota(10, 20).chunks!(0, 1)(3, 7); | auto b = iota(20, 10).chunks!(1, 0)(3, 7); | | auto as = a; | auto bs = b; | | as.popFront; | bs.popFront; | | popFrontTuple(a, b); | | assert(as.slice == a.slice); | assert(bs.slice == b.slice); | | assert(as.chunkLengths == a.chunkLengths); | assert(bs.chunkLengths == b.chunkLengths); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/chunks.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-math-func-expdigamma.lst |/** |License: $(LINK2 http://boost.org/LICENSE_1_0.txt, Boost License 1.0). | |Authors: Ilya Yaroshenko |*/ |module mir.math.func.expdigamma; | |/++ |Optimized and more precise analog of `y = exp(digamma(x))`. | |Returns: | `exp(digamma(x))` |+/ |F expDigamma(F)(in F x) |{ | import mir.math.common; | | static immutable F[7] c = [ | F(1.0 / 24), | F(1.0L / 48), | F(23.0L / 5760), | F(17.0L / 3840), | F(10_099.0L / 2_903_040), | F(2501.0L / 1_161_216), | F(795_697.0L / 199_065_600), | ]; | | if (!(x >= 0)) | return F.nan; | F s = x; | F w = 0; | while ( s < F(10) ) | { | w += 1 / s; | s += 1; | } | F y = F(-0.5); | F t = 1; | import mir.internal.utility; | foreach (i; Iota!(0, c.length)) | { | t *= s; | y += c[i] / t; | } | y += s; | y /= exp(w); | return y; |} | |version(mir_test) |unittest |{ | import std.meta; | import std.mathspecial; | assert(approxEqual(expDigamma(0.001), exp(digamma(0.001)))); | assert(approxEqual(expDigamma(0.1), exp(digamma(0.1)))); | assert(approxEqual(expDigamma(1.0), exp(digamma(1.0)))); | assert(approxEqual(expDigamma(2.3), exp(digamma(2.3)))); | assert(approxEqual(expDigamma(20.0), exp(digamma(20.0)))); | assert(approxEqual(expDigamma(40.0), exp(digamma(40.0)))); | foreach (F; AliasSeq!(float, double, real)) | { | assert(expDigamma!F(0.0) == 0); | assert(expDigamma!F(0.0.nextUp) >= 0); | assert(expDigamma!F(0.0.min_normal) >= 0); | assert(expDigamma!F(0.5.nextUp) >= 0); | assert(expDigamma!F(0.5.nextDown) >= 0); | foreach (i; 1 .. 10) | { | assert(expDigamma(F(i)) >= expDigamma(F(i).nextDown)); | assert(expDigamma(F(i)) <= expDigamma(F(i).nextUp)); | } | } |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/math/func/expdigamma.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-sorting.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Note: | The combination of | $(SUBREF topology, pairwise) with lambda `"a <= b"` (`"a < b"`) and $(SUBREF algorithm, all) can be used | to check if an ndslice is sorted (strictly monotonic). | $(SUBREF topology iota) can be used to make an index. | $(SUBREF topology map) and $(SUBREF topology zip) can be used to create Schwartzian transform. | See also the examples in the module. | | |See_also: $(SUBREF topology, flattened) | |`isSorted` and `isStrictlyMonotonic` | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Andrei Alexandrescu 2008-2016, Ilya Yaroshenko 2016-, |Authors: Andrei Alexandrescu (Phobos), Ilya Yaroshenko (API, rework, Mir adoptation) | |Macros: | SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.ndslice.sorting; | |/// Check if ndslice is sorted, or strictly monotonic. |@safe pure version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.slice: sliced; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: pairwise; | | auto arr = [1, 1, 2].sliced; | | assert(arr.pairwise!"a <= b".all); | assert(!arr.pairwise!"a < b".all); | | arr = [4, 3, 2, 1].sliced; | | assert(!arr.pairwise!"a <= b".all); | assert(!arr.pairwise!"a < b".all); | | sort(arr); | | assert(arr.pairwise!"a <= b".all); | assert(arr.pairwise!"a < b".all); |} | |/// Create index |version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.allocation: slice; | import mir.ndslice.slice: sliced; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: iota, pairwise; | | auto arr = [4, 2, 3, 1].sliced; | | auto index = arr.length.iota.slice; | index.sort!((a, b) => arr[a] < arr[b]); | | assert(arr[index].pairwise!"a <= b".all); |} | |/// Schwartzian transform |version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.allocation: slice; | import mir.ndslice.slice: sliced; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: zip, map, pairwise; | | alias transform = (a) => (a - 3) ^^ 2; | | auto arr = [4, 2, 3, 1].sliced; | | arr.map!transform.slice.zip(arr).sort!((l, r) => l.a < r.a); | | assert(arr.map!transform.pairwise!"a <= b".all); |} | |import mir.ndslice.slice; |import mir.math.common: optmath; | |@optmath: | |@safe pure version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.topology: pairwise; | | auto a = [1, 2, 3].sliced; | assert(a[0 .. 0].pairwise!"a <= b".all); | assert(a[0 .. 1].pairwise!"a <= b".all); | assert(a.pairwise!"a <= b".all); | auto b = [1, 3, 2].sliced; | assert(!b.pairwise!"a <= b".all); | | // ignores duplicates | auto c = [1, 1, 2].sliced; | assert(c.pairwise!"a <= b".all); |} | |@safe pure version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.topology: pairwise; | | assert([1, 2, 3][0 .. 0].sliced.pairwise!"a < b".all); | assert([1, 2, 3][0 .. 1].sliced.pairwise!"a < b".all); | assert([1, 2, 3].sliced.pairwise!"a < b".all); | assert(![1, 3, 2].sliced.pairwise!"a < b".all); | assert(![1, 1, 2].sliced.pairwise!"a < b".all); |} | | |/++ |Sorts ndslice, array, or series. | |See_also: $(SUBREF topology, flattened). |+/ |template sort(alias less = "a < b") |{ | import mir.functional: naryFun; | import mir.series: Series; | static if (__traits(isSame, naryFun!less, less)) | { |@optmath: | /++ | Sort one-dimensional series. | +/ | Slice!(Iterator, N, kind) sort(Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, N, kind) slice) | { | if (false) // break safety | { | import mir.utility : swapStars; | auto elem = typeof(*slice._iterator).init; | elem = elem; | auto l = less(elem, elem); | } | import mir.ndslice.topology: flattened; | if (slice.anyEmpty) | return slice; | .quickSortImpl!less(slice.flattened); | return slice; | } | | /++ | Sort for arrays | +/ | T[] sort(T)(T[] ar) | { | return .sort!less(ar.sliced).field; | } | | /++ | Sort for one-dimensional Series. | +/ | Series!(IndexIterator, Iterator, N, kind) | sort(IndexIterator, Iterator, size_t N, SliceKind kind) | (Series!(IndexIterator, Iterator, N, kind) series) | if (N == 1) | { | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: zip; | with(series) | index.zip(data).sort!((a, b) => less(a.a, b.a)); | return series; | } | | /++ | Sort for n-dimensional Series. | +/ | Series!(IndexIterator, Iterator, N, kind) | sort( | IndexIterator, | Iterator, | size_t N, | SliceKind kind, | SortIndexIterator, | DataIterator, | ) | ( | Series!(IndexIterator, Iterator, N, kind) series, | Slice!SortIndexIterator indexBuffer, | Slice!DataIterator dataBuffer, | ) | { | import mir.algorithm.iteration: each; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: iota, zip, ipack, evertPack; | | assert(indexBuffer.length == series.length); | assert(dataBuffer.length == series.length); | indexBuffer[] = indexBuffer.length.iota!(typeof(indexBuffer.front)); | series.index.zip(indexBuffer).sort!((a, b) => less(a.a, b.a)); | series.data.ipack!1.evertPack.each!((sl){ | { | assert(sl.shape == dataBuffer.shape); | dataBuffer[] = sl[indexBuffer]; | sl[] = dataBuffer; | }}); | return series; | } | } | else | alias sort = .sort!(naryFun!less); |} | |/// |@safe pure version(mir_test) unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.slice; | import mir.ndslice.sorting: sort; | import mir.ndslice.topology: pairwise; | | int[10] arr = [7,1,3,2,9,0,5,4,8,6]; | | auto data = arr[].sliced(arr.length); | data.sort(); | assert(data.pairwise!"a <= b".all); |} | |/// one-dimensional series |pure version(mir_test) unittest |{ | import mir.series; | | auto index = [4, 2, 1, 3, 0].sliced; | auto data = [5.6, 3.4, 2.1, 7.8, 0.1].sliced; | auto series = index.series(data); | series.sort; | assert(series.index == [0, 1, 2, 3, 4]); | assert(series.data == [0.1, 2.1, 3.4, 7.8, 5.6]); | /// initial index and data are the same | assert(index.iterator is series.index.iterator); | assert(data.iterator is series.data.iterator); | | foreach(obs; series) | { | static assert(is(typeof(obs) == Observation!(int, double))); | } |} | |/// two-dimensional series |pure version(mir_test) unittest |{ | import mir.series; | import mir.ndslice.allocation: uninitSlice; | | auto index = [4, 2, 3, 1].sliced; | auto data = | [2.1, 3.4, | 5.6, 7.8, | 3.9, 9.0, | 4.0, 2.0].sliced(4, 2); | auto series = index.series(data); | | series.sort( | uninitSlice!size_t(series.length), // index buffer | uninitSlice!double(series.length), // data buffer | ); | | assert(series.index == [1, 2, 3, 4]); | assert(series.data == | [[4.0, 2.0], | [5.6, 7.8], | [3.9, 9.0], | [2.1, 3.4]]); | /// initial index and data are the same | assert(index.iterator is series.index.iterator); | assert(data.iterator is series.data.iterator); |} | |void quickSortImpl(alias less, Iterator)(Slice!Iterator slice) @trusted |{ | import mir.utility : swap, swapStars; | | enum max_depth = 64; | enum naive_est = 1024 / slice.Element!0.sizeof; | enum size_t naive = 32 > naive_est ? 32 : naive_est; | //enum size_t naive = 1; | static assert(naive >= 1); | | for(;;) | { | auto l = slice._iterator; | auto r = l; | r += slice.length; | | static if (naive > 1) | { | if (slice.length <= naive) | { | auto p = r; | --p; | while(p != l) | { | --p; | //static if (is(typeof(() nothrow | // { | // auto t = slice[0]; if (less(t, slice[0])) slice[0] = slice[0]; | // }))) | //{ | auto d = p; | import mir.functional: unref; | auto temp = unref(*d); | auto c = d; | ++c; | if (less(*c, temp)) | { | do | { | d[0] = *c; | ++d; | ++c; | } | while (c != r && less(*c, temp)); | d[0] = temp; | } | //} | //else | //{ | // auto d = p; | // auto c = d; | // ++c; | // while (less(*c, *d)) | // { | // swap(*d, *c); | // d = c; | // ++c; | // if (c == maxJ) break; | // } | //} | } | return; | } | } | else | { | if(slice.length <= 1) | return; | } | | // partition | auto lessI = l; | --r; | auto pivotIdx = l + slice.length / 2; | setPivot!less(slice.length, l, pivotIdx, r); | import mir.functional: unref; | auto pivot = unref(*pivotIdx); | --lessI; | auto greaterI = r; | swapStars(pivotIdx, greaterI); | | outer: for (;;) | { | do ++lessI; | while (less(*lessI, pivot)); | assert(lessI <= greaterI, "sort: invalid comparison function."); | for (;;) | { | if (greaterI == lessI) | break outer; | --greaterI; | if (!less(pivot, *greaterI)) | break; | } | assert(lessI <= greaterI, "sort: invalid comparison function."); | if (lessI == greaterI) | break; | swapStars(lessI, greaterI); | } | | swapStars(r, lessI); | | ptrdiff_t len = lessI - l; | auto tail = slice[len + 1 .. $]; | slice = slice[0 .. len]; | if (tail.length > slice.length) | swap(slice, tail); | quickSortImpl!less(tail); | } |} | |void setPivot(alias less, Iterator)(size_t length, ref Iterator l, ref Iterator mid, ref Iterator r) @trusted |{ | if (length < 512) | { | if (length >= 32) | medianOf!less(l, mid, r); | return; | } | auto quarter = length >> 2; | auto b = mid - quarter; | auto e = mid + quarter; | medianOf!less(l, e, mid, b, r); |} | |void medianOf(alias less, Iterator) | (ref Iterator a, ref Iterator b, ref Iterator c) @trusted |{ | import mir.utility : swapStars; | if (less(*c, *a)) // c < a | { | if (less(*a, *b)) // c < a < b | { | swapStars(a, b); | swapStars(a, c); | } | else // c < a, b <= a | { | swapStars(a, c); | if (less(*b, *a)) swapStars(a, b); | } | } | else // a <= c | { | if (less(*b, *a)) // b < a <= c | { | swapStars(a, b); | } | else // a <= c, a <= b | { | if (less(*c, *b)) swapStars(b, c); | } | } | assert(!less(*b, *a)); | assert(!less(*c, *b)); |} | |void medianOf(alias less, Iterator) | (ref Iterator a, ref Iterator b, ref Iterator c, ref Iterator d, ref Iterator e) @trusted |{ | import mir.utility : swapStars; // Credit: Teppo Niinimäki | version(unittest) scope(success) | { | assert(!less(*c, *a)); | assert(!less(*c, *b)); | assert(!less(*d, *c)); | assert(!less(*e, *c)); | } | | if (less(*c, *a)) swapStars(a, c); | if (less(*d, *b)) swapStars(b, d); | if (less(*d, *c)) | { | swapStars(c, d); | swapStars(a, b); | } | if (less(*e, *b)) swapStars(b, e); | if (less(*e, *c)) | { | swapStars(c, e); | if (less(*c, *a)) swapStars(a, c); | } | else | { | if (less(*c, *b)) swapStars(b, c); | } |} | |/++ |Returns: `true` if a sorted array contains the value. | |Params: | test = strict ordering symmetric predicate | |For non-symmetric predicates please use a structure with two `opCall`s or an alias of two global functions, |that correponds to `(array[i], value)` and `(value, array[i])` cases. | |See_also: $(LREF transitionIndex). |+/ |template assumeSortedContains(alias test = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!test, test)) | { |@optmath: | /++ | Params: | slice = sorted one-dimensional slice or array. | v = value to test with. It is passed to second argument. | +/ | bool assumeSortedContains(Iterator, SliceKind kind, V) | (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v) | { | auto ti = transitionIndex!test(slice, v); | return ti < slice.length && !test(v, slice[ti]); | } | | /// ditto | bool assumeSortedContains(T, V)(scope T[] ar, auto ref scope const V v) | { | return .assumeSortedContains!test(ar.sliced, v); | } | } | else | alias assumeSortedContains = .assumeSortedContains!(naryFun!test); |} | |/++ |Returns: the smallest index of a sorted array such | that the index corresponds to the arrays element at the index according to the predicate | and `-1` if the array doesn't contain corresponding element. | |Params: | test = strict ordering symmetric predicate. | |For non-symmetric predicates please use a structure with two `opCall`s or an alias of two global functions, |that correponds to `(array[i], value)` and `(value, array[i])` cases. | |See_also: $(LREF transitionIndex). |+/ |template assumeSortedEqualIndex(alias test = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!test, test)) | { |@optmath: | /++ | Params: | slice = sorted one-dimensional slice or array. | v = value to test with. It is passed to second argument. | +/ | sizediff_t assumeSortedEqualIndex(Iterator, SliceKind kind, V) | (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v) | { | auto ti = transitionIndex!test(slice, v); | return ti < slice.length && !test(v, slice[ti]) ? ti : -1; | } | | /// ditto | sizediff_t assumeSortedEqualIndex(T, V)(scope T[] ar, auto ref scope const V v) | { | return .assumeSortedEqualIndex!test(ar.sliced, v); | } | } | else | alias assumeSortedEqualIndex = .assumeSortedEqualIndex!(naryFun!test); |} | |/// |version(mir_test) |@safe pure unittest |{ | // sorted: a < b | auto a = [0, 1, 2, 3, 4, 6]; | | assert(a.assumeSortedEqualIndex(2) == 2); | assert(a.assumeSortedEqualIndex(5) == -1); | | // <= non strict predicates doesn't work | assert(a.assumeSortedEqualIndex!"a <= b"(2) == -1); |} | |/++ |Computes transition index using binary search. |It is low-level API for lower and upper bounds of a sorted array. | |Params: | test = ordering predicate for (`(array[i], value)`) pairs. | |See_also: $(SUBREF topology, assumeSortedEqualIndex). |+/ |template transitionIndex(alias test = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!test, test)) | { |@optmath: | /++ | Params: | slice = sorted one-dimensional slice or array. | v = value to test with. It is passed to second argument. | +/ | size_t transitionIndex(Iterator, SliceKind kind, V) | (auto ref Slice!(Iterator, 1, kind) slice, auto ref scope const V v) | { | size_t first = 0, count = slice.length; | while (count > 0) | { | immutable step = count / 2, it = first + step; | if (test(slice[it], v)) | { | first = it + 1; | count -= step + 1; | } | else | { | count = step; | } | } | return first; | } | | /// ditto | size_t transitionIndex(T, V)(scope T[] ar, auto ref scope const V v) | { | return .transitionIndex!test(ar.sliced, v); | } | | } | else | alias transitionIndex = .transitionIndex!(naryFun!test); |} | |/// |version(mir_test) |@safe pure unittest |{ | // sorted: a < b | auto a = [0, 1, 2, 3, 4, 6]; | | auto i = a.transitionIndex(2); | assert(i == 2); | auto lowerBound = a[0 .. i]; | | auto j = a.transitionIndex!"a <= b"(2); | assert(j == 3); | auto upperBound = a[j .. $]; | | assert(a.transitionIndex(a[$-1]) == a.length - 1); | assert(a.transitionIndex!"a <= b"(a[$-1]) == a.length); |} | |/++ |Computes an index for `r` based on the comparison `less`. The |index is a sorted array of indices into the original |range. This technique is similar to sorting, but it is more flexible |because (1) it allows "sorting" of immutable collections, (2) allows |binary search even if the original collection does not offer random |access, (3) allows multiple indexes, each on a different predicate, |and (4) may be faster when dealing with large objects. However, using |an index may also be slower under certain circumstances due to the |extra indirection, and is always larger than a sorting-based solution |because it needs space for the index in addition to the original |collection. The complexity is the same as `sort`'s. |Params: | less = The comparison to use. | r = The slice/array to index. |Returns: Index slice/array. |+/ |Slice!(I*) makeIndex(I = size_t, alias less = "a < b", Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) r) |{ | import mir.functional: naryFun; | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | return r | .length | .iota!I | .slice | .sort!((a, b) => naryFun!less(r[a], r[b])); |} | |/// |I[] makeIndex(I = size_t, alias less = "a < b", T)(scope T[] r) |{ | return .makeIndex!(I, less)(r.sliced).field; |} | |/// |version(mir_test) |@system unittest |{ | import mir.algorithm.iteration: all; | import mir.ndslice.topology: indexed, pairwise; | | immutable arr = [ 2, 3, 1, 5, 0 ]; | auto index = arr.makeIndex; | | assert(arr.indexed(index).pairwise!"a < b".all); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/sorting.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-random-2.2.13-mir-random-source-mir-random-engine-mersenne_twister.lst |/++ |The Mersenne Twister generator. | |Copyright: Copyright Andrei Alexandrescu 2008 - 2009, Ilya Yaroshenko 2016-. |License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: $(HTTP erdani.org, Andrei Alexandrescu) Ilya Yaroshenko (rework) |+/ |module mir.random.engine.mersenne_twister; | |import std.traits; | |/++ |The $(LUCKY Mersenne Twister) generator. |+/ |struct MersenneTwisterEngine(UIntType, size_t w, size_t n, size_t m, size_t r, | UIntType a, size_t u, UIntType d, size_t s, | UIntType b, size_t t, | UIntType c, size_t l, UIntType f) | if (isUnsigned!UIntType) |{ | /// | enum isRandomEngine = true; | | static assert(0 < w && w <= UIntType.sizeof * 8); | static assert(1 <= m && m <= n); | static assert(0 <= r && 0 <= u && 0 <= s && 0 <= t && 0 <= l); | static assert(r <= w && u <= w && s <= w && t <= w && l <= w); | static assert(0 <= a && 0 <= b && 0 <= c); | | @disable this(); | @disable this(this); | | /// Largest generated value. | enum UIntType max = UIntType.max >> (UIntType.sizeof * 8u - w); | static assert(a <= max && b <= max && c <= max && f <= max); | | private enum UIntType lowerMask = (cast(UIntType) 1u << r) - 1; | private enum UIntType upperMask = ~lowerMask & max; | | /** | Parameters for the generator. | */ | enum size_t wordSize = w; | enum size_t stateSize = n; /// ditto | enum size_t shiftSize = m; /// ditto | enum size_t maskBits = r; /// ditto | enum UIntType xorMask = a; /// ditto | enum size_t temperingU = u; /// ditto | enum UIntType temperingD = d; /// ditto | enum size_t temperingS = s; /// ditto | enum UIntType temperingB = b; /// ditto | enum size_t temperingT = t; /// ditto | enum UIntType temperingC = c; /// ditto | enum size_t temperingL = l; /// ditto | enum UIntType initializationMultiplier = f; /// ditto | | | /// The default seed value. | enum UIntType defaultSeed = 5489; | | /++ | Current reversed payload index with initial value equals to `n-1` | +/ | size_t index = void; | | private UIntType _z = void; | | /++ | Reversed(!) payload. | +/ | UIntType[n] data = void; | | /* | * Marker indicating it's safe to construct from void | * (i.e. the constructor doesn't depend on the struct | * being in an initially valid state). | * Non-public because we don't want to commit to this | * design. | */ | package enum bool _isVoidInitOkay = true; | | /++ | Constructs a MersenneTwisterEngine object. | +/ 0000000| this(UIntType value) @safe pure nothrow @nogc | { | static if (max == UIntType.max) 0000000| data[$-1] = value; | else | data[$-1] = value & max; 0000000| foreach_reverse (size_t i, ref e; data[0 .. $-1]) | { 0000000| e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } 0000000| index = n-1; 0000000| opCall(); | } | | /++ | Constructs a MersenneTwisterEngine object. | | Note that `MersenneTwisterEngine([123])` will not result in | the same initial state as `MersenneTwisterEngine(123)`. | +/ | this()(scope const(UIntType)[] array) @safe pure nothrow @nogc | { | static if (is(UIntType == uint)) | { | enum UIntType f2 = 1664525u; | enum UIntType f3 = 1566083941u; | } | else static if (is(UIntType == ulong)) | { | enum UIntType f2 = 3935559000370003845uL; | enum UIntType f3 = 2862933555777941757uL; | } | else | static assert(0, "init by slice only supported if UIntType is uint or ulong!"); | | data[$-1] = cast(UIntType) (19650218u & max); | foreach_reverse (size_t i, ref e; data[0 .. $-1]) | { | e = f * (data[i + 1] ^ (data[i + 1] >> (w - 2))) + cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | index = n-1; | if (array.length == 0) | { | opCall(); | return; | } | | size_t final_mix_index = void; | | if (array.length >= n) | { | size_t j = 0; | //Handle all but tail. | while (array.length - j >= n - 1) | { | foreach_reverse (i, ref e; data[0 .. $-1]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | e &= max; | ++j; | } | data[$ - 1] = data[0]; | } | //Handle tail. | size_t i = n - 2; | while (j < array.length) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | ++j; | --i; | } | //Set the index for use by the next pass. | final_mix_index = i; | } | else | { | size_t i = n - 2; | //Handle all but tail. | while (i >= array.length) | { | foreach (j; 0 .. array.length) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | --i; | } | } | //Handle tail. | size_t j = 0; | while (i != cast(size_t) -1) | { | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | ++j; | --i; | } | data[$ - 1] = data[0]; | i = n - 2; | data[i] = (data[i] ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f2)) | + array[j] + cast(UIntType) j; | static if (max != UIntType.max) | data[i] &= max; | //Set the index for use by the next pass. | final_mix_index = n - 2; | } | | foreach_reverse (i, ref e; data[0 .. final_mix_index]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3)) | - cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | foreach_reverse (i, ref e; data[final_mix_index .. n-1]) | { | e = (e ^ ((data[i+1] ^ (data[i+1] >> (w - 2))) * f3)) | - cast(UIntType)(n - (i + 1)); | static if (max != UIntType.max) | e &= max; | } | data[$-1] = (cast(UIntType)1) << ((UIntType.sizeof * 8) - 1); /* MSB is 1; assuring non-zero initial array */ | opCall(); | } | | /++ | Advances the generator. | +/ | UIntType opCall() @safe pure nothrow @nogc | { | // This function blends two nominally independent | // processes: (i) calculation of the next random | // variate from the cached previous `data` entry | // `_z`, and (ii) updating `data[index]` and `_z` | // and advancing the `index` value to the next in | // sequence. | // | // By interweaving the steps involved in these | // procedures, rather than performing each of | // them separately in sequence, the variables | // are kept 'hot' in CPU registers, allowing | // for significantly faster performance. 0000000| sizediff_t index = this.index; 0000000| sizediff_t next = index - 1; 0000000| if(next < 0) 0000000| next = n - 1; 0000000| auto z = _z; 0000000| sizediff_t conj = index - m; 0000000| if(conj < 0) 0000000| conj = index - m + n; | static if (d == UIntType.max) 0000000| z ^= (z >> u); | else 0000000| z ^= (z >> u) & d; 0000000| auto q = data[index] & upperMask; 0000000| auto p = data[next] & lowerMask; 0000000| z ^= (z << s) & b; 0000000| auto y = q | p; 0000000| auto x = y >> 1; 0000000| z ^= (z << t) & c; 0000000| if (y & 1) 0000000| x ^= a; 0000000| auto e = data[conj] ^ x; 0000000| z ^= (z >> l); 0000000| _z = data[index] = e; 0000000| this.index = next; 0000000| return z; | } |} | |/++ |A $(D MersenneTwisterEngine) instantiated with the parameters of the |original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister, |MT19937), generating uniformly-distributed 32-bit numbers with a |period of 2 to the power of 19937. | |This is recommended for random number generation on 32-bit systems |unless memory is severely restricted, in which case a |$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift) |would be the generator of choice. |+/ |alias Mt19937 = MersenneTwisterEngine!(uint, 32, 624, 397, 31, | 0x9908b0df, 11, 0xffffffff, 7, | 0x9d2c5680, 15, | 0xefc60000, 18, 1812433253); | |/// |@safe version(mir_random_test) unittest |{ | import mir.random.engine; | | // bit-masking by generator maximum is necessary | // to handle 64-bit `unpredictableSeed` | auto gen = Mt19937(unpredictableSeed & Mt19937.max); | auto n = gen(); | | import std.traits; | static assert(is(ReturnType!gen == uint)); |} | |/++ |A $(D MersenneTwisterEngine) instantiated with the parameters of the |original engine $(HTTP en.wikipedia.org/wiki/Mersenne_Twister, |MT19937), generating uniformly-distributed 64-bit numbers with a |period of 2 to the power of 19937. | |This is recommended for random number generation on 64-bit systems |unless memory is severely restricted, in which case a |$(REF_ALTTEXT Xorshift, Xorshift, mir, random, engine, xorshift) |would be the generator of choice. |+/ |alias Mt19937_64 = MersenneTwisterEngine!(ulong, 64, 312, 156, 31, | 0xb5026f5aa96619e9, 29, 0x5555555555555555, 17, | 0x71d67fffeda60000, 37, | 0xfff7eee000000000, 43, 6364136223846793005); | |/// |@safe version(mir_random_test) unittest |{ | import mir.random.engine; | | auto gen = Mt19937_64(unpredictableSeed); | auto n = gen(); | | import std.traits; | static assert(is(ReturnType!gen == ulong)); |} | |@safe nothrow version(mir_random_test) unittest |{ | import mir.random.engine; | | static assert(isSaturatedRandomEngine!Mt19937); | static assert(isSaturatedRandomEngine!Mt19937_64); | auto gen = Mt19937(Mt19937.defaultSeed); | foreach(_; 0 .. 9999) | gen(); | assert(gen() == 4123659995); | | auto gen64 = Mt19937_64(Mt19937_64.defaultSeed); | foreach(_; 0 .. 9999) | gen64(); | assert(gen64() == 9981545732273789042uL); |} | |version(mir_random_test) unittest |{ | enum val = [1341017984, 62051482162767]; | alias MT(UIntType, uint w) = MersenneTwisterEngine!(UIntType, w, 624, 397, 31, | 0x9908b0df, 11, 0xffffffff, 7, | 0x9d2c5680, 15, | 0xefc60000, 18, 1812433253); | | import std.meta: AliasSeq; | foreach (i, R; AliasSeq!(MT!(ulong, 32), MT!(ulong, 48))) | { | static if (R.wordSize == 48) static assert(R.max == 0xFFFFFFFFFFFF); | auto a = R(R.defaultSeed); | foreach(_; 0..999) | a(); | assert(val[i] == a()); | } |} | |@safe nothrow @nogc version(mir_random_test) unittest |{ | //Verify that seeding with an array gives the same result as the reference | //implementation. | | //32-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.tgz | immutable uint[4] seed32 = [0x123u, 0x234u, 0x345u, 0x456u]; | auto gen32 = Mt19937(seed32); | foreach(_; 0..999) | gen32(); | assert(3460025646u == gen32()); | | //64-bit: www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/mt19937-64.tgz | immutable ulong[4] seed64 = [0x12345uL, 0x23456uL, 0x34567uL, 0x45678uL]; | auto gen64 = Mt19937_64(seed64); | foreach(_; 0..999) | gen64(); | assert(994412663058993407uL == gen64()); |} ../../../.dub/packages/mir-random-2.2.13/mir-random/source/mir/random/engine/mersenne_twister.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-field.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |Field is a type with `opIndex()(ptrdiff_t index)` primitive. |An iterator can be created on top of a field using $(SUBREF iterator, FieldIterator). |An ndslice can be created on top of a field using $(SUBREF slice, slicedField). | |$(BOOKTABLE $(H2 Fields), |$(TR $(TH Field Name) $(TH Used By)) |$(T2 BitField, $(SUBREF topology, bitwise)) |$(T2 BitpackField, $(SUBREF topology, bitpack)) |$(T2 CycleField, $(SUBREF topology, cycle) (2 kinds)) |$(T2 LinspaceField, $(SUBREF topology, linspace)) |$(T2 MagicField, $(SUBREF topology, magic)) |$(T2 MapField, $(SUBREF topology, map) and $(SUBREF topology, mapField)) |$(T2 ndIotaField, $(SUBREF topology, ndiota)) |$(T2 OrthogonalReduceField, $(SUBREF topology, orthogonalReduceField)) |$(T2 RepeatField, $(SUBREF topology, repeat)) |$(T2 SparseField, Used for mutable DOK sparse matrixes ) |) | | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.field; | |import mir.internal.utility: Iota; |import mir.math.common: optmath; |import mir.ndslice.internal; |import mir.qualifier; | |@optmath: | |package template ZeroShiftField(T) |{ | static if (hasZeroShiftFieldMember!T) | alias ZeroShiftField = typeof(T.init.assumeFieldsHaveZeroShift()); | else | alias ZeroShiftField = T; |} | |package enum hasZeroShiftFieldMember(T) = __traits(hasMember, T, "assumeFieldsHaveZeroShift"); | |package auto applyAssumeZeroShift(Types...)() |{ | import mir.ndslice.topology; | string str; | foreach(i, T; Types) | static if (hasZeroShiftFieldMember!T) | str ~= "_fields[" ~ i.stringof ~ "].assumeFieldsHaveZeroShift, "; | else | str ~= "_fields[" ~ i.stringof ~ "], "; | return str; |} | |auto MapField__map(Field, alias fun, alias fun1)(ref MapField!(Field, fun) f) |{ | import mir.functional: pipe; | return MapField!(Field, pipe!(fun, fun1))(f._field); |} | | |/++ |`MapField` is used by $(SUBREF topology, map). |+/ |struct MapField(Field, alias _fun) |{ |@optmath: | /// | Field _field; | | /// | auto lightConst()() const @property | { | return MapField!(LightConstOf!Field, _fun)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return MapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field)); | } | | /++ | User defined constructor used by $(LREF mapField). | +/ | static alias __map(alias fun1) = MapField__map!(Field, _fun, fun1); | | auto ref opIndex(T...)(auto ref T index) | { | import mir.functional: RefTuple, unref; | static if (is(typeof(_field[index]) : RefTuple!K, K...)) | { | auto t = _field[index]; | return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_field[index]); | } | | static if (__traits(hasMember, Field, "length")) | auto length() const @property | { | return _field.length; | } | | static if (__traits(hasMember, Field, "shape")) | auto shape() const @property | { | return _field.shape; | } | | static if (__traits(hasMember, Field, "elementCount")) | auto elementCount() const @property | { | return _field.elementCount; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return _mapField!_fun(_field.assumeFieldsHaveZeroShift); | } |} | |/++ |`VmapField` is used by $(SUBREF topology, map). |+/ |struct VmapField(Field, Fun) |{ |@optmath: | /// | Field _field; | /// | Fun _fun; | | /// | auto lightConst()() const @property | { | return VmapField!(LightConstOf!Field, _fun)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return VmapField!(LightImmutableOf!Field, _fun)(.lightImmutable(_field)); | } | | auto ref opIndex(T...)(auto ref T index) | { | import mir.functional: RefTuple, unref; | static if (is(typeof(_field[index]) : RefTuple!K, K...)) | { | auto t = _field[index]; | return mixin("_fun(" ~ _iotaArgs!(K.length, "t.expand[", "].unref, ") ~ ")"); | } | else | return _fun(_field[index]); | } | | static if (__traits(hasMember, Field, "length")) | auto length() const @property | { | return _field.length; | } | | static if (__traits(hasMember, Field, "shape")) | auto shape() const @property | { | return _field.shape; | } | | static if (__traits(hasMember, Field, "elementCount")) | auto elementCount()const @property | { | return _field.elementCount; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return _vmapField(_field.assumeFieldsHaveZeroShift, _fun); | } |} | |/+ |Creates a mapped field. Uses `__map` if possible. |+/ |auto _mapField(alias fun, Field)(Field field) |{ | import mir.functional: naryFun; | static if (( | __traits(isSame, fun, naryFun!"a|b") || | __traits(isSame, fun, naryFun!"a^b") || | __traits(isSame, fun, naryFun!"a&b") || | __traits(isSame, fun, naryFun!"a | b") || | __traits(isSame, fun, naryFun!"a ^ b") || | __traits(isSame, fun, naryFun!"a & b")) && | is(Field : ZipField!(BitField!(LeftField, I), BitField!(RightField, I)), LeftField, RightField, I)) | { | import mir.ndslice.topology: bitwiseField; | auto f = ZipField!(LeftField, RightField)(field._fields[0]._field, field._fields[1]._field)._mapField!fun; | return f.bitwiseField!(typeof(f), I); | } | else | static if (__traits(hasMember, Field, "__map")) | return Field.__map!fun(field); | else | return MapField!(Field, fun)(field); |} | |/+ |Creates a mapped field. Uses `__vmap` if possible. |+/ |auto _vmapField(Field, Fun)(Field field, Fun fun) |{ | static if (__traits(hasMember, Field, "__vmap")) | return Field.__vmap(field, fun); | else | return VmapField!(Field, Fun)(field, fun); |} | |/++ |Iterates multiple fields in lockstep. | |`ZipField` is used by $(SUBREF topology, zipFields). |+/ |struct ZipField(Fields...) | if (Fields.length > 1) |{ |@optmath: | import mir.functional: RefTuple, Ref, _ref; | import std.meta: anySatisfy; | | /// | Fields _fields; | | /// | auto lightConst()() const @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(LightConstOf, Fields))(%(_fields[%s].lightConst,%)].lightConst)".format(_fields.length.iota)); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(LightImmutableOf, Fields))(%(_fields[%s].lightImmutable,%)].lightImmutable)".format(_fields.length.iota)); | } | | auto opIndex()(ptrdiff_t index) | { | alias Iterators = Fields; | alias _iterators = _fields; | import mir.ndslice.iterator: _zip_types, _zip_index; | return mixin("RefTuple!(_zip_types!Fields)(" ~ _zip_index!Fields ~ ")"); | } | | auto opIndexAssign(Types...)(RefTuple!(Types) value, ptrdiff_t index) | if (Types.length == Fields.length) | { | foreach(i, ref val; value.expand) | { | _fields[i][index] = val; | } | return opIndex(index); | } | | static if (anySatisfy!(hasZeroShiftFieldMember, Fields)) | /// Defined if at least one of `Fields` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | import std.meta: staticMap; | return mixin("ZipField!(staticMap!(ZeroShiftField, Fields))(" ~ applyAssumeZeroShift!Fields ~ ")"); | } |} | |/++ |`RepeatField` is used by $(SUBREF topology, repeat). |+/ |struct RepeatField(T) |{ | import std.traits: Unqual; | |@optmath: | alias UT = Unqual!T; | | /// | UT _value; | | /// | auto lightConst()() const @property @trusted | { | return RepeatField!(const T)(cast(UT) _value); | } | | /// | auto lightImmutable()() immutable @property @trusted | { | return RepeatField!(immutable T)(cast(UT) _value); | } | | auto ref T opIndex()(ptrdiff_t) @trusted | { return cast(T) _value; } |} | |/++ |`BitField` is used by $(SUBREF topology, bitwise). |+/ |struct BitField(Field, I = typeof(cast()Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ |@optmath: | import mir.bitop: ctlz; | package(mir) alias E = I; | package(mir) enum shift = ctlz(I.sizeof) + 3; | | /// | Field _field; | | /// optimization for bitwise operations | auto __vmap(Fun : LeftOp!(op, bool), string op)(Fun fun) | if (op == "|" || op == "&" || op == "^") | { | import mir.ndslice.topology: bitwiseField; | return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField; | } | | /// ditto | auto __vmap(Fun : RightOp!(op, bool), string op)(Fun fun) | if (op == "|" || op == "&" || op == "^") | { | import mir.ndslice.topology: bitwiseField; | return _vmapField(_field, RightOp!(op, I)(I(0) - fun.value)).bitwiseField; | } | | /// ditto | auto __vmap(Fun)(Fun fun) | { | return VmapField!(typeof(this), Fun)(this, fun); | } | | /// ditto | alias __map(alias fun) = BitField__map!(Field, I, fun); | | /// | auto lightConst()() const @property | { | return BitField!(LightConstOf!Field, I)(mir.qualifier.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return BitField!(LightImmutableOf!Field, I)(mir.qualifier.lightImmutable(_field)); | } | | bool opIndex()(size_t index) | { | import mir.bitop: bt; | return bt!(Field, I)(_field, index) != 0; | } | | bool opIndexAssign()(bool value, size_t index) | { | import mir.bitop: bta; | bta!(Field, I)(_field, index, value); | return value; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return BitField!(ZeroShiftField!Field, I)(_field.assumeFieldsHaveZeroShift); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.iterator: FieldIterator; | ushort[10] data; | auto f = FieldIterator!(BitField!(ushort*))(0, BitField!(ushort*)(data.ptr)); | f[123] = true; | f++; | assert(f[122]); |} | |auto BitField__map(Field, I, alias fun)(BitField!(Field, I) field) |{ | import mir.functional: naryFun; | static if (__traits(isSame, fun, naryFun!"~a")) | { | import mir.ndslice.topology: bitwiseField; | auto f = _mapField!fun(field._field); | return f.bitwiseField!(typeof(f), I); | } | else | { | return field; | } |} | |/++ |`BitpackField` is used by $(SUBREF topology, bitpack). |+/ |struct BitpackField(Field, uint pack, I = typeof(cast()Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ | //static assert(); |@optmath: | package(mir) alias E = I; | package(mir) enum mask = (I(1) << pack) - 1; | package(mir) enum bits = I.sizeof * 8; | | /// | Field _field; | | /// | auto lightConst()() const @property | { | return BitpackField!(LightConstOf!Field, pack)(.lightConst(_field)); | } | | /// | auto lightImmutable()() immutable @property | { | return BitpackField!(LightImmutableOf!Field, pack)(.lightImmutable(_field)); | } | | I opIndex()(size_t index) | { | index *= pack; | size_t start = index % bits; | index /= bits; | auto ret = (_field[index] >>> start) & mask; | static if (bits % pack) | { | sizediff_t end = start - (bits - pack); | if (end > 0) | ret ^= cast(I)(_field[index + 1] << (bits - end)) >>> (bits - pack); | } | return cast(I) ret; | } | | I opIndexAssign()(I value, size_t index) | { | import std.traits: Unsigned; | assert(cast(Unsigned!I)value <= mask); | index *= pack; | size_t start = index % bits; | index /= bits; | _field[index] = cast(I)((_field[index] & ~(mask << start)) ^ (value << start)); | static if (bits % pack) | { | sizediff_t end = start - (bits - pack); | if (end > 0) | _field[index + 1] = cast(I)((_field[index + 1] & ~((I(1) << end) - 1)) ^ (value >>> (pack - end))); | } | return value; | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return BitpackField!(ZeroShiftField!Field, pack, I)(_field.assumeFieldsHaveZeroShift); | } |} | |/// |unittest |{ | import mir.ndslice.iterator: FieldIterator; | ushort[10] data; | auto f = FieldIterator!(BitpackField!(ushort*, 6))(0, BitpackField!(ushort*, 6)(data.ptr)); | f[0] = cast(ushort) 31; | f[1] = cast(ushort) 13; | f[2] = cast(ushort) 8; | f[3] = cast(ushort) 43; | f[4] = cast(ushort) 28; | f[5] = cast(ushort) 63; | f[6] = cast(ushort) 39; | f[7] = cast(ushort) 23; | f[8] = cast(ushort) 44; | | assert(f[0] == 31); | assert(f[1] == 13); | assert(f[2] == 8); | assert(f[3] == 43); | assert(f[4] == 28); | assert(f[5] == 63); | assert(f[6] == 39); | assert(f[7] == 23); | assert(f[8] == 44); | assert(f[9] == 0); | assert(f[10] == 0); | assert(f[11] == 0); |} | |unittest |{ | import mir.ndslice.slice; | import mir.ndslice.topology; | import mir.ndslice.sorting; | uint[2] data; | auto packed = data[].sliced.bitpack!18; | assert(packed.length == 3); | packed[0] = 5; | packed[1] = 3; | packed[2] = 2; | packed.sort; | assert(packed[0] == 2); | assert(packed[1] == 3); | assert(packed[2] == 5); |} | |/// |struct OrthogonalReduceField(FieldsIterator, alias fun, T) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// non empty slice | | Slice!FieldsIterator _fields; | | /// | T _initialValue; | | /// | auto lightConst()() const @property | { | auto fields = _fields.lightConst; | return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue); | } | | /// | auto lightImmutable()() immutable @property | { | auto fields = _fields.lightImmutable; | return OrthogonalReduceField!(fields.Iterator, fun, T)(fields, _initialValue); | } | | /// `r = fun(r, fields[i][index]);` reduction by `i` | auto opIndex()(size_t index) | { | import std.traits: Unqual; | auto fields = _fields; | T r = _initialValue; | if (!fields.empty) do | { | r = cast(T) fun(r, fields.front[index]); | fields.popFront; | } | while(!fields.empty); | return r; | } |} | |/// |struct CycleField(Field) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// Cycle length | size_t _length; | /// | Field _field; | | /// | auto lightConst()() const @property | { | auto field = .lightConst(_field); | return CycleField!(typeof(field))(_length, field); | } | | /// | auto lightImmutable()() immutable @property | { | auto field = .lightImmutable(_field); | return CycleField!(typeof(field))(_length, field); | } | | /// | auto ref opIndex()(size_t index) | { | return _field[index % _length]; | } | | /// | static if (!__traits(compiles, &opIndex(size_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, size_t index) | { | return _field[index % _length] = value; | } | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return CycleField!(ZeroShiftField!Field)(_length, _field.assumeFieldsHaveZeroShift); | } |} | |/// |struct CycleField(Field, size_t length) |{ | import mir.ndslice.slice: Slice; | |@optmath: | /// Cycle length | enum _length = length; | /// | Field _field; | | /// | auto lightConst()() const @property | { | auto field = .lightConst(_field); | return CycleField!(typeof(field), _length)(field); | } | | /// | auto lightImmutable()() immutable @property | { | auto field = .lightImmutable(_field); | return CycleField!(typeof(field), _length)(field); | } | | /// | auto ref opIndex()(size_t index) | { | return _field[index % _length]; | } | | /// | static if (!__traits(compiles, &opIndex(size_t.init))) | { | auto ref opIndexAssign(T)(auto ref T value, size_t index) | { | return _field[index % _length] = value; | } | } | | static if (hasZeroShiftFieldMember!Field) | /// Defined if `Field` has member `assumeFieldsHaveZeroShift`. | auto assumeFieldsHaveZeroShift() @property | { | return CycleField!(ZeroShiftField!Field, _length)(_field.assumeFieldsHaveZeroShift); | } |} | |/++ |`ndIotaField` is used by $(SUBREF topology, ndiota). |+/ |struct ndIotaField(size_t N) | if (N) |{ |@optmath: | /// | size_t[N - 1] _lengths; | | /// | auto lightConst()() const @property | { | return ndIotaField!N(_lengths); | } | | /// | auto lightImmutable()() const @property | { | return ndIotaField!N(_lengths); | } | | /// | size_t[N] opIndex()(size_t index) const | { | size_t[N] indexes; | foreach_reverse (i; Iota!(N - 1)) | { | indexes[i + 1] = index % _lengths[i]; | index /= _lengths[i]; | } | indexes[0] = index; | return indexes; | } |} | |/++ |`LinspaceField` is used by $(SUBREF topology, linspace). |+/ |struct LinspaceField(T) |{ | /// | size_t _length; | | /// | T _start = cast(T) 0, _stop = cast(T) 0; | | /// | auto lightConst()() scope const @property | { 0000000| return LinspaceField!T(_length, _start, _stop); | } | | /// | auto lightImmutable()() scope const @property | { | return LinspaceField!T(_length, _start, _stop); | } | | // no fastmath | /// | T opIndex()(sizediff_t index) scope const | { 0000000| sizediff_t d = _length - 1; 0000000| auto v = typeof(T.init.re)(d - index); 0000000| auto w = typeof(T.init.re)(index); 0000000| v /= d; 0000000| w /= d; 0000000| auto a = v * _start; 0000000| auto b = w * _stop; 0000000| return a + b; | } | |@optmath: | | /// | size_t length(size_t dimension = 0)() scope const @property | if (dimension == 0) | { 0000000| return _length; | } | | /// | size_t[1] shape()() scope const @property @nogc | { | return [_length]; | } |} | |/++ |Magic square field. |+/ |struct MagicField |{ |@optmath: |@safe pure nothrow @nogc: | | /++ | Magic Square size. | +/ | size_t _n; | |scope const: | | /// | MagicField lightConst()() @property | { 0000000| return this; | } | | /// | MagicField lightImmutable()() @property | { | return this; | } | | /// | size_t length(size_t dimension = 0)() @property | if(dimension <= 2) | { 0000000| return _n * _n; | } | | /// | size_t[1] shape() @property | { 0000000| return [_n * _n]; | } | | /// | size_t opIndex(size_t index) | { | pragma(inline, false); 0000000| auto d = index / _n; 0000000| auto m = index % _n; 0000000| if (_n & 1) | { | //d = _n - 1 - d; // MATLAB synchronization | //index = d * _n + m; // ditto 0000000| auto r = (index + 1 - d + (_n - 3) / 2) % _n; 0000000| auto c = (_n * _n - index + 2 * d) % _n; 0000000| return r * _n + c + 1; | } | else 0000000| if ((_n & 2) == 0) | { 0000000| auto a = (d + 1) & 2; 0000000| auto b = (m + 1) & 2; 0000000| return a != b ? index + 1: _n * _n - index; | } | else | { 0000000| auto n = _n / 2 ; 0000000| size_t shift; 0000000| ptrdiff_t q; 0000000| ptrdiff_t p = m - n; 0000000| if (p >= 0) | { 0000000| m = p; 0000000| shift = n * n; 0000000| auto mul = m <= n / 2 + 1; 0000000| q = d - n; 0000000| if (q >= 0) | { 0000000| d = q; 0000000| mul = !mul; | } 0000000| if (mul) | { 0000000| shift *= 2; | } | } | else | { 0000000| auto mul = m < n / 2; 0000000| q = d - n; 0000000| if (q >= 0) | { 0000000| d = q; 0000000| mul = !mul; | } 0000000| if (d == n / 2 && (m == 0 || m == n / 2)) | { 0000000| mul = !mul; | } 0000000| if (mul) | { 0000000| shift = n * n * 3; | } | } 0000000| index = d * n + m; 0000000| auto r = (index + 1 - d + (n - 3) / 2) % n; 0000000| auto c = (n * n - index + 2 * d) % n; 0000000| return r * n + c + 1 + shift; | } | } |} | |/++ |`SparseField` is used to represent Sparse ndarrays in mutable DOK format. |+/ |struct SparseField(T) |{ | /// | T[size_t] _table; | | /// | auto lightConst()() const @trusted | { | return SparseField!(const T)(cast(const(T)[size_t])_table); | } | | /// | auto lightImmutable()() immutable @trusted | { | return SparseField!(immutable T)(cast(immutable(T)[size_t])_table); | } | | /// | T opIndex()(size_t index) | { | import std.traits: isScalarType; | static if (isScalarType!T) | return _table.get(index, cast(T)0); | else | return _table.get(index, null); | } | | /// | T opIndexAssign()(T value, size_t index) | { | import std.traits: isScalarType; | static if (isScalarType!T) | { | if (value != 0) | _table[index] = value; | else | _table.remove(index); | } | else | { | if (value !is null) | _table[index] = value; | else | _table.remove(index); | } | return value; | } | | /// | T opIndexUnary(string op)(size_t index) | if (op == `++` || op == `--`) | { | import std.traits: isScalarType; | mixin (`auto value = ` ~ op ~ `_table[index];`); | static if (isScalarType!T) | { | if (value == 0) | _table.remove(index); | } | else | { | if (value is null) | _table.remove(index); | } | return value; | } | | /// | T opIndexOpAssign(string op)(T value, size_t index) | if (op == `+` || op == `-`) | { | import std.traits: isScalarType; | mixin (`value = _table[index] ` ~ op ~ `= value;`); // this works | static if (isScalarType!T) | { | if (value == 0) | _table.remove(index); | } | else | { | if (value is null) | _table.remove(index); | } | return value; | } |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/field.d is 0% covered <<<<<< EOF # path=-tmp-dub_test_root_4bf7bff3_362e_4c06_a544_2a25e4773300.lst |module dub_test_root; |import std.typetuple; |static import mir.glas.l1; |static import mir.glas.l2; |static import mir.model.lda.hoffman; |static import mir.sparse.blas.axpy; |static import mir.sparse.blas.dot; |static import mir.sparse.blas.gemm; |static import mir.sparse.blas.gemv; |alias allModules = TypeTuple!(mir.glas.l1, mir.glas.l2, mir.model.lda.hoffman, mir.sparse.blas.axpy, mir.sparse.blas.dot, mir.sparse.blas.gemm, mir.sparse.blas.gemv); | | import std.stdio; | import core.runtime; | 0000000| void main() { writeln("All unit tests have been run successfully."); } | shared static this() { | version (Have_tested) { | import tested; | import core.runtime; | import std.exception; | Runtime.moduleUnitTester = () => true; | //runUnitTests!app(new JsonTestResultWriter("results.json")); | enforce(runUnitTests!allModules(new ConsoleTestResultWriter), "Unit tests failed."); | } | } | /tmp/dub_test_root_4bf7bff3_362e_4c06_a544_2a25e4773300.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-algorithm-iteration.lst |// Written in the D programming language. |/** |This module contains generic _iteration algorithms. |$(SCRIPT inhibitQuickIndex = 1;) | |$(BOOKTABLE $(H2 Function), |$(TR $(TH Function Name) $(TH Description)) |$(T2 all, Checks if all elements satisfy to a predicate.) |$(T2 any, Checks if at least one element satisfy to a predicate.) |$(T2 cmp, Compares two slices.) |$(T2 count, Counts elements in a slices according to a predicate.) |$(T2 each, Iterates all elements.) |$(T2 eachLower, Iterates lower triangle of matrix.) |$(T2 eachUploPair, Iterates upper and lower pairs of elements in square matrix.) |$(T2 eachUpper, Iterates upper triangle of matrix.) |$(T2 equal, Compares two slices for equality.) |$(T2 find, Finds backward index.) |$(T2 findIndex, Finds index.) |$(T2 isSymmetric, Checks if the matrix is symmetric.) |$(T2 maxIndex, Finds index of the maximum.) |$(T2 maxPos, Finds backward index of the maximum.) |$(T2 minIndex, Finds index of the minimum.) |$(T2 minmaxIndex, Finds indexes of the minimum and the maximum.) |$(T2 minmaxPos, Finds backward indexes of the minimum and the maximum.) |$(T2 minPos, Finds backward index of the minimum.) |$(T2 nBitsToCount, Сount bits until set bit count is reached.) |$(T2 reduce, Accumulates all elements.) |$(T2 uniq, Iterates over the unique elements in a range, which is assumed sorted.) |) | |All operators are suitable to change slices using `ref` argument qualification in a function declaration. |Note, that string lambdas in Mir are `auto ref` functions. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-2018, Ilya Yaroshenko, 2018-, Mir community |Authors: Ilya Yaroshenko, John Michael Hall, Andrei Alexandrescu (original Phobos code) | |Copyright: Andrei Alexandrescu 2008-. Ilya Yaroshenko 2017- |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: , Ilya Yaroshenko (Mir & BetterC rework). |Source: $(PHOBOSSRC std/algorithm/_iteration.d) |Macros: | NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) | T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) | */ |module mir.algorithm.iteration; | |import mir.functional: naryFun; |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.field: BitField; |import mir.ndslice.internal; |import mir.ndslice.iterator: FieldIterator, RetroIterator; |import mir.ndslice.slice; |import mir.primitives; |import mir.qualifier; |import std.meta; |import std.range.primitives: isInputRange, isBidirectionalRange, isInfinite, isForwardRange, ElementType; |import std.traits; | |@optmath: | | |/+ |Bitslice representation for accelerated bitwise algorithm. |1-dimensional contiguousitslice can be split into three chunks: head bits, body chunks, and tail bits. | |Bitslice can have head bits because it has slicing and the zero bit may not be aligned to the zero of a body chunk. |+/ |private struct BitSliceAccelerator(Field, I = typeof(Field.init[size_t.init])) | if (__traits(isUnsigned, I)) |{ | import mir.bitop; | import mir.qualifier: lightConst; | import mir.ndslice.traits: isIterator; | import mir.ndslice.iterator: FieldIterator; | import mir.ndslice.field: BitField; | | /// | alias U = typeof(I + 1u); | /// body bits chunks | static if (isIterator!Field) | Slice!Field bodyChunks; | else | Slice!(FieldIterator!Field) bodyChunks; | /// head length | int headLength; | /// tail length | int tailLength; | |@optmath: | | this(Slice!(FieldIterator!(BitField!(Field, I))) slice) | { | enum mask = bitShiftMask!I; | enum shift = bitElemShift!I; | size_t length = slice.length; | size_t index = slice._iterator._index; | if (auto hlen = index & mask) | { | auto l = I.sizeof * 8 - hlen; | if (l > length) | { | // central problem | headLength = -cast(int) length; | tailLength = cast(int) hlen; | goto F; | } | else | { | headLength = cast(uint) l; | length -= l; | index += l; | } | } | tailLength = cast(int) (length & mask); | F: | length >>= shift; | index >>= shift; | bodyChunks._lengths[0] = length; | static if (isIterator!Field) | { | bodyChunks._iterator = slice._iterator._field._field; | bodyChunks._iterator += index; | } | else | { | bodyChunks._iterator._index = index; | bodyChunks._iterator._field = slice._iterator._field._field; | } | } | |scope const: | | bool isCentralProblem() | { | return headLength < 0; | } | | U centralBits() | { | assert(isCentralProblem); | return *bodyChunks._iterator.lightConst >>> tailLength; | } | | uint centralLength() | { | assert(isCentralProblem); | return -headLength; | } | | /// head bits (last `headLength` bits are valid). | U headBits() | { | assert(!isCentralProblem); | if (headLength == 0) | return U.init; | static if (isIterator!Field) | return bodyChunks._iterator.lightConst[-1]; | else | return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index - 1]; | } | | /// tail bits (first `tailLength` bits are valid). | U tailBits() | { | assert(!isCentralProblem); | if (tailLength == 0) | return U.init; | static if (isIterator!Field) | return bodyChunks._iterator.lightConst[bodyChunks.length]; | else | return bodyChunks._iterator._field.lightConst[bodyChunks._iterator._index + bodyChunks.length]; | } | | U negCentralMask() | { | return U.max << centralLength; | } | | U negHeadMask() | { | return U.max << headLength; | } | | U negTailMask() | { | return U.max << tailLength; | } | | U negCentralMaskS() | { | return U.max >> centralLength; | } | | U negHeadMaskS() | { | return U.max >> headLength; | } | | U negTailMaskS() | { | return U.max >> tailLength; | } | | U centralBitsWithRemainingZeros() | { | return centralBits & ~negCentralMask; | } | | U centralBitsWithRemainingZerosS() | { | return centralBits << (U.sizeof * 8 - centralLength); | } | | U headBitsWithRemainingZeros() | { | return headBits >>> (I.sizeof * 8 - headLength); | } | | U headBitsWithRemainingZerosS() | { | static if (U.sizeof > I.sizeof) | return (headBits << (U.sizeof - I.sizeof) * 8) & ~negTailMaskS; | else | return headBits & ~negTailMaskS; | } | | U tailBitsWithRemainingZeros() | { | return tailBits & ~negTailMask; | } | | U tailBitsWithRemainingZerosS() | { | return tailBits << (U.sizeof * 8 - tailLength); | } | | U centralBitsWithRemainingOnes() | { | return centralBits | negCentralMask; | } | | U centralBitsWithRemainingOnesS() | { | return centralBitsWithRemainingZerosS | negCentralMaskS; | } | | U headBitsWithRemainingOnes() | { | return headBitsWithRemainingZeros | negHeadMask; | } | | U headBitsWithRemainingOnesS() | { | return headBitsWithRemainingZerosS | negHeadMaskS; | } | | U tailBitsWithRemainingOnes() | { | return tailBits | negTailMask; | } | | U tailBitsWithRemainingOnesS() | { | return tailBitsWithRemainingZerosS | negTailMaskS; | } | | size_t ctpop() | { | import mir.bitop: ctpop; | if (isCentralProblem) | return centralBitsWithRemainingZeros.ctpop; | size_t ret; | if (headLength) | ret = cast(size_t) headBitsWithRemainingZeros.ctpop; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | ret += cast(size_t) bc.front.ctpop; | bc.popFront; | } | while(bc.length); | } | if (tailBits) | ret += cast(size_t) tailBitsWithRemainingZeros.ctpop; | return ret; | } | | bool any() | { | if (isCentralProblem) | return centralBitsWithRemainingZeros != 0; | if (headBitsWithRemainingZeros != 0) | return true; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | if (bc.front != 0) | return true; | bc.popFront; | } | while(bc.length); | } | if (tailBitsWithRemainingZeros != 0) | return true; | return false; | } | | bool all() | { | if (isCentralProblem) | return centralBitsWithRemainingOnes != U.max; | size_t ret; | if (headBitsWithRemainingOnes != U.max) | return false; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | if (bc.front != I.max) | return false; | bc.popFront; | } | while(bc.length); | } | if (tailBitsWithRemainingOnes != U.max) | return false; | return true; | } | | size_t cttz() | { | U v; | size_t ret; | if (isCentralProblem) | { | v = centralBitsWithRemainingOnes; | if (v) | goto R; | ret = centralLength; | goto L; | } | v = headBitsWithRemainingOnes; | if (v) | goto R; | ret = headLength; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.front; | if (v) | goto R; | ret += I.sizeof * 8; | bc.popFront; | } | while(bc.length); | } | v = tailBitsWithRemainingOnes; | if (v) | goto R; | ret += tailLength; | goto L; | R: | ret += v.cttz; | L: | return ret; | } | | size_t ctlz() | { | U v; | size_t ret; | if (isCentralProblem) | { | v = centralBitsWithRemainingOnes; | if (v) | goto R; | ret = centralLength; | goto L; | } | v = tailBitsWithRemainingOnesS; | if (v) | goto R; | ret = tailLength; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.back; | if (v) | goto R; | ret += I.sizeof * 8; | bc.popBack; | } | while(bc.length); | } | v = headBitsWithRemainingOnesS; | if (v) | goto R; | ret += headLength; | goto L; | R: | ret += v.ctlz; | L: | return ret; | } | | sizediff_t nBitsToCount(size_t count) | { | size_t ret; | if (count == 0) | return count; | U v, cnt; | if (isCentralProblem) | { | v = centralBitsWithRemainingZeros; | goto E; | } | v = headBitsWithRemainingZeros; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += headLength; | count -= cast(size_t) cnt; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.front; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += I.sizeof * 8; | count -= cast(size_t) cnt; | bc.popFront; | } | while(bc.length); | } | v = tailBitsWithRemainingZeros; | E: | cnt = v.ctpop; | if (cnt >= count) | goto R; | return -1; | R: | return ret + v.nTrailingBitsToCount(count); | } | | sizediff_t retroNBitsToCount(size_t count) | { | if (count == 0) | return count; | size_t ret; | U v, cnt; | if (isCentralProblem) | { | v = centralBitsWithRemainingZerosS; | goto E; | } | v = tailBitsWithRemainingZerosS; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += tailLength; | count -= cast(size_t) cnt; | if (bodyChunks.length) | { | auto bc = bodyChunks.lightConst; | do | { | v = bc.back; | cnt = v.ctpop; | if (cnt >= count) | goto R; | ret += I.sizeof * 8; | count -= cast(size_t) cnt; | bc.popBack; | } | while(bc.length); | } | v = headBitsWithRemainingZerosS; | E: | cnt = v.ctpop; | if (cnt >= count) | goto R; | return -1; | R: | return ret + v.nLeadingBitsToCount(count); | } |} | |/++ |Сount bits until set bit count is reached. Works with ndslices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |Returns: bit count if set bit count is reached or `-1` otherwise. |+/ |sizediff_t nBitsToCount(Field, I)(Slice!(FieldIterator!(BitField!(Field, I))) bitSlice, size_t count) |{ | return BitSliceAccelerator!(Field, I)(bitSlice).nBitsToCount(count); |} | |///ditto |sizediff_t nBitsToCount(Field, I)(Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))) bitSlice, size_t count) |{ | import mir.ndslice.topology: retro; | return BitSliceAccelerator!(Field, I)(bitSlice.retro).retroNBitsToCount(count); |} | |/// |pure unittest |{ | import mir.ndslice.allocation: bitSlice; | import mir.ndslice.topology: retro; | auto s = bitSlice(1000); | s[50] = true; | s[100] = true; | s[200] = true; | s[300] = true; | s[400] = true; | assert(s.nBitsToCount(4) == 301); | assert(s.retro.nBitsToCount(4) == 900); |} | |private void checkShapesMatch( | string fun = __FUNCTION__, | string pfun = __PRETTY_FUNCTION__, | Slices...) | (scope ref const Slices slices) | if (Slices.length > 1) |{ | enum msg = "all arguments must be slices" ~ tailErrorMessage!(fun, pfun); | enum msgShape = "all slices must have the same shape" ~ tailErrorMessage!(fun, pfun); | enum N = slices[0].shape.length; | foreach (i, Slice; Slices) | { | static if (i == 0) 0000000| continue; | else | static if (slices[i].shape.length == N) 0000000| assert(slices[i].shape == slices[0].shape, msgShape); | else | { | import mir.ndslice.fuse: fuseShape; | static assert(slices[i].fuseShape.length >= N); | assert(cast(size_t[N])slices[i].fuseShape[0 .. N] == slices[0].shape, msgShape); | } | } |} | | |package(mir) template allFlattened(args...) |{ | static if (args.length) | { | alias arg = args[0]; | @optmath @property ls()() | { | import mir.ndslice.topology: flattened; 0000000| return flattened(arg); | } | alias allFlattened = AliasSeq!(ls, allFlattened!(args[1..$])); | } | else | alias allFlattened = AliasSeq!(); |} | |private template areAllContiguousSlices(Slices...) |{ | import mir.ndslice.traits: isContiguousSlice; | static if (allSatisfy!(isContiguousSlice, Slices)) | enum areAllContiguousSlices = Slices[0].N > 1 && areAllContiguousSlicesImpl!(Slices[0].N, Slices[1 .. $]); | else | enum areAllContiguousSlices = false; |} | |private template areAllContiguousSlicesImpl(size_t N, Slices...) |{ | static if (Slices.length == 0) | enum areAllContiguousSlicesImpl = true; | else | enum areAllContiguousSlicesImpl = Slices[0].N == N && areAllContiguousSlicesImpl!(N, Slices[1 .. $]); |} | |version(LDC) {} |else version(GNU) {} |else version (Windows) {} |else version (X86_64) |{ | //Compiling with DMD for x86-64 for Linux & OS X with optimizations enabled, | //"Tensor mutation on-the-fly" unittest was failing. Disabling inlining | //caused it to succeed. | //TODO: Rework so this is unnecessary! | version = Mir_disable_inlining_in_reduce; |} | |version(Mir_disable_inlining_in_reduce) |{ | private enum Mir_disable_inlining_in_reduce = true; | | private template _naryAliases(size_t n) | { | static if (n == 0) | enum _naryAliases = ""; | else | { | enum i = n - 1; | enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n"; | } | } | | private template nonInlinedNaryFun(alias fun) | { | import mir.math.common : optmath; | static if (is(typeof(fun) : string)) | { | /// Specialization for string lambdas | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (args.length <= 26) | { | pragma(inline,false); | mixin(_naryAliases!(Args.length)); | return mixin(fun); | } | } | else static if (is(typeof(fun.opCall) == function)) | { | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (is(typeof(fun.opCall(args)))) | { | pragma(inline,false); | return fun.opCall(args); | } | } | else | { | @optmath auto ref nonInlinedNaryFun(Args...)(auto ref Args args) | if (is(typeof(fun(args)))) | { | pragma(inline,false); | return fun(args); | } | } | } |} |else |{ | private enum Mir_disable_inlining_in_reduce = false; |} | |S reduceImpl(alias fun, S, Slices...)(S seed, scope Slices slices) |{ | do | { | static if (DimensionCount!(Slices[0]) == 1) | seed = fun(seed, frontOf!slices); | else | seed = .reduceImpl!fun(seed, frontOf!slices); | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return seed; |} | |/++ |Implements the homonym function (also known as `accumulate`, |`compress`, `inject`, or `fold`) present in various programming |languages of functional flavor. The call `reduce!(fun)(seed, slice1, ..., sliceN)` |first assigns `seed` to an internal variable `result`, |also called the accumulator. Then, for each set of element `x1, ..., xN` in |`slice1, ..., sliceN`, `result = fun(result, x1, ..., xN)` gets evaluated. Finally, |`result` is returned. | |`reduce` allows to iterate multiple slices in the lockstep. | |Note: | $(NDSLICEREF topology, pack) can be used to specify dimensions. |Params: | fun = A function. |See_Also: | $(HTTP llvm.org/docs/LangRef.html#fast-math-flags, LLVM IR: Fast Math Flags) | | $(HTTP en.wikipedia.org/wiki/Fold_(higher-order_function), Fold (higher-order function)) |+/ |template reduce(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun) | && !Mir_disable_inlining_in_reduce) | /++ | Params: | seed = An initial accumulation value. | slices = One or more slices, range, and arrays. | Returns: | the accumulated `result` | +/ | @optmath auto reduce(S, Slices...)(S seed, scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return .reduce!fun(seed, allFlattened!(allLightScope!slices)); | } | else | { | if (slices[0].anyEmpty) | return cast(Unqual!S) seed; | static if (is(S : Unqual!S)) | alias UT = Unqual!S; | else | alias UT = S; | return reduceImpl!(fun, UT, Slices)(seed, allLightScope!slices); | } | } | else version(Mir_disable_inlining_in_reduce) | //As above, but with inlining disabled. | @optmath auto reduce(S, Slices...)(S seed, scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return .reduce!fun(seed, allFlattened!(allLightScope!slices)); | } | else | { | if (slices[0].anyEmpty) | return cast(Unqual!S) seed; | static if (is(S : Unqual!S)) | alias UT = Unqual!S; | else | alias UT = S; | return reduceImpl!(nonInlinedNaryFun!fun, UT, Slices)(seed, allLightScope!slices); | } | } | else | alias reduce = .reduce!(naryFun!fun); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | auto ar = [1, 2, 3]; | auto s = 0.reduce!"a + b"(ar); | assert (s == 6); |} | |/// Single slice |version(mir_test) |unittest |{ | import mir.ndslice.topology : iota; | | //| 0 1 2 | => 3 | | //| 3 4 5 | => 12 | => 15 | auto sl = iota(2, 3); | | // sum of all element in the slice | auto res = size_t(0).reduce!"a + b"(sl); | | assert(res == 15); |} | |/// Multiple slices, dot product |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 1 2 3 | | //| 4 5 6 | | auto b = iota([2, 3], 1).as!double.slice; | | alias dot = reduce!"a + b * c"; | auto res = dot(0.0, a, b); | | // check the result: | import mir.ndslice.topology : flattened; | import std.numeric : dotProduct; | assert(res == dotProduct(a.flattened, b.flattened)); |} | |/// Zipped slices, dot product |pure |version(mir_test) unittest |{ | import std.typecons : Yes; | import std.numeric : dotProduct; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota, zip, universal; | import mir.math.common : optmath; | | static @optmath T fmuladd(T, Z)(const T a, Z z) | { | return a + z.a * z.b; | } | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3).as!double.slice.universal; | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1).as!double.slice; | | // slices must have the same strides for `zip!true`. | assert(sl1.strides == sl2.strides); | | auto z = zip!true(sl1, sl2); | | auto dot = reduce!fmuladd(0.0, z); | | assert(dot == dotProduct(iota(6), iota([6], 1))); |} | |/// Tensor mutation on-the-fly |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | import mir.math.common : optmath; | | static @optmath T fun(T)(const T a, ref T b) | { | return a + b++; | } | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3).as!double.slice; | | auto res = reduce!fun(double(0), sl); | | assert(res == 15); | | //| 1 2 3 | | //| 4 5 6 | | assert(sl == iota([2, 3], 1)); |} | |/++ |Packed slices. | |Computes minimum value of maximum values for each row. |+/ |version(mir_test) |unittest |{ | import mir.math.common; | import mir.ndslice.allocation : slice; | import mir.ndslice.dynamic : transposed; | import mir.ndslice.topology : as, iota, pack, map, universal; | | alias maxVal = (a) => reduce!fmax(-double.infinity, a); | alias minVal = (a) => reduce!fmin(double.infinity, a); | alias minimaxVal = (a) => minVal(a.pack!1.map!maxVal); | | auto sl = iota(2, 3).as!double.slice; | | // Vectorized computation: row stride equals 1. | //| 0 1 2 | => | 2 | | //| 3 4 5 | => | 5 | => 2 | auto res = minimaxVal(sl); | assert(res == 2); | | // Common computation: row stride does not equal 1. | //| 0 1 2 | | 0 3 | => | 3 | | //| 3 4 5 | => | 1 4 | => | 4 | | // | 2 5 | => | 5 | => 3 | auto resT = minimaxVal(sl.universal.transposed); | assert(resT == 3); |} | |/// Dlang Range API support. |version(mir_test) |unittest |{ | import mir.algorithm.iteration: each; | import std.range: phobos_iota = iota; | | int s; | // 0 1 2 3 | 4.phobos_iota.each!(i => s += i); | assert(s == 6); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto a = reduce!"a + b"(size_t(7), iota([0, 1], 1)); | assert(a == 7); |} | |void eachImpl(alias fun, Slices...)(scope Slices slices) |{ | foreach(ref slice; slices) | assert(!slice.empty); | do | { | static if (DimensionCount!(Slices[0]) == 1) | fun(frontOf!slices); | else | .eachImpl!fun(frontOf!slices); | foreach_reverse(i; Iota!(Slices.length)) | slices[i].popFront; | } | while(!slices[0].empty); |} | |/++ |The call `each!(fun)(slice1, ..., sliceN)` |evaluates `fun` for each set of elements `x1, ..., xN` in |`slice1, ..., sliceN` respectively. | |`each` allows to iterate multiple slices in the lockstep. |Params: | fun = A function. |Note: | $(NDSLICEREF dynamic, transposed) and | $(NDSLICEREF topology, pack) can be used to specify dimensions. |See_Also: | This is functionally similar to $(LREF reduce) but has not seed. |+/ |template each(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | +/ | @optmath auto each(Slices...)(scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | .each!fun(allFlattened!(allLightScope!slices)); | } | else | { | if (slices[0].anyEmpty) | return; | eachImpl!fun(allLightScope!slices); | } | } | else | alias each = .each!(naryFun!fun); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | auto ar = [1, 2, 3]; | ar.each!"a *= 2"; | assert (ar == [2, 4, 6]); |} | |/// Single slice, multiply-add |version(mir_test) |unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3).as!double.slice; | | sl.each!((ref a) { a = a * 10 + 5; }); | | assert(sl == | [[ 5, 15, 25], | [35, 45, 55]]); |} | |/// Swap two slices |version(mir_test) |unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | auto b = iota([2, 3], 10).as!double.slice; | | each!swap(a, b); | | assert(a == iota([2, 3], 10)); | assert(b == iota([2, 3], 0)); |} | |/// Swap two zipped slices |version(mir_test) |unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | auto a = iota([2, 3], 0).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | auto b = iota([2, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.each!(z => swap(z.a, z.b)); | | assert(a == iota([2, 3], 10)); | assert(b == iota([2, 3], 0)); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | iota(0, 2).each!((a){i++;}); | assert(i == 0); |} | |/++ |The call `eachUploPair!(fun)(matrix)` |evaluates `fun` for each pair (`matrix[j, i]`, `matrix[i, j]`), |for i <= j (default) or i < j (if includeDiagonal is false). | |Params: | fun = A function. | includeDiagonal = true if applying function to diagonal, | false (default) otherwise. |+/ |template eachUploPair(alias fun, bool includeDiagonal = false) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | matrix = Square matrix. | +/ | auto eachUploPair(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix) | in | { | assert(matrix.length!0 == matrix.length!1, "matrix must be square."); | } | do | { | static if (kind == Contiguous) | { | import mir.ndslice.topology: canonical; | .eachUploPair!(fun, includeDiagonal)(matrix.canonical); | } | else | { | static if (includeDiagonal == true) | { | if (matrix.length) do | { | eachImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1); | matrix.popFront!1; | matrix.popFront!0; | // hint for optimizer | matrix._lengths[1] = matrix._lengths[0]; | } | while (matrix.length); | } | else | { | if (matrix.length) for(;;) | { | assert(!matrix.empty!0); | assert(!matrix.empty!1); | auto l = matrix.lightScope.front!1; | auto u = matrix.lightScope.front!0; | matrix.popFront!1; | matrix.popFront!0; | l.popFront; | u.popFront; | // hint for optimizer | matrix._lengths[1] = matrix._lengths[0] = l._lengths[0] = u._lengths[0]; | if (u.length == 0) | break; | eachImpl!fun(u, l); | } | } | } | } | } | else | { | alias eachUploPair = .eachUploPair!(naryFun!fun, includeDiagonal); | } |} | |/// Transpose matrix in place. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, universal; | import mir.ndslice.dynamic: transposed; | import mir.utility: swap; | | auto m = iota(4, 4).slice; | | m.eachUploPair!swap; | | assert(m == iota(4, 4).universal.transposed); |} | |/// Reflect Upper matrix part to lower part. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, universal; | import mir.ndslice.dynamic: transposed; | import mir.utility: swap; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | | m.eachUploPair!((u, ref l) { l = u; }); | | assert(m == [ | [0, 1, 2], | [1, 4, 5], | [2, 5, 8]]); |} | |/// Fill lower triangle and diagonal with zeroes. |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 1 2 3 | // 4 5 6 | // 7 8 9 | auto m = iota([3, 3], 1).slice; | | m.eachUploPair!((u, ref l) { l = 0; }, true); | | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0]]); |} | |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | m.eachUploPair!((u, ref l) { l = l + 1; }, true); | assert(m == [ | [1, 1, 2], | [4, 5, 5], | [7, 8, 9]]); |} | |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | // 0 1 2 | // 3 4 5 | // 6 7 8 | auto m = iota(3, 3).slice; | m.eachUploPair!((u, ref l) { l = l + 1; }, false); | | assert(m == [ | [0, 1, 2], | [4, 4, 5], | [7, 8, 8]]); |} | |/++ |Checks if the matrix is symmetric. |+/ |template isSymmetric(alias fun = "a == b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | matrix = 2D ndslice. | +/ | bool isSymmetric(Iterator, SliceKind kind)(Slice!(Iterator, 2, kind) matrix) | { | static if (kind == Contiguous) | { | import mir.ndslice.topology: canonical; | return .isSymmetric!fun(matrix.canonical); | } | else | { | if (matrix.length!0 != matrix.length!1) | return false; | if (matrix.length) do | { | if (!allImpl!fun(matrix.lightScope.front!0, matrix.lightScope.front!1)) | { | return false; | } | matrix.popFront!1; | matrix.popFront!0; | matrix._lengths[1] = matrix._lengths[0]; | } | while (matrix.length); | return true; | } | } | else | alias isSymmetric = .isSymmetric!(naryFun!fun); |} | |/// |version(mir_test) |unittest |{ | import mir.ndslice.topology: iota; | assert(iota(2, 2).isSymmetric == false); | | assert( | [1, 2, | 2, 3].sliced(2, 2).isSymmetric == true); |} | |bool minPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[N] backwardIndex, scope ref Iterator iterator, Slice!(Iterator, N, kind) slice) |{ | bool found; | do | { | static if (slice.shape.length == 1) | { | if (fun(*slice._iterator, *iterator)) | { | backwardIndex[0] = slice.length; | iterator = slice._iterator; | found = true; | } | } | else | { | if (minPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front)) | { | backwardIndex[0] = slice.length; | found = true; | } | } | slice.popFront; | } | while(!slice.empty); | return found; |} | |bool[2] minmaxPosImpl(alias fun, Iterator, size_t N, SliceKind kind)(scope ref size_t[2][N] backwardIndex, scope ref Iterator[2] iterator, Slice!(Iterator, N, kind) slice) |{ | bool[2] found; | do | { | static if (slice.shape.length == 1) | { | if (fun(*slice._iterator, *iterator[0])) | { | backwardIndex[0][0] = slice.length; | iterator[0] = slice._iterator; | found[0] = true; | } | else | if (fun(*iterator[1], *slice._iterator)) | { | backwardIndex[0][1] = slice.length; | iterator[1] = slice._iterator; | found[1] = true; | } | } | else | { | auto r = minmaxPosImpl!(fun, LightScopeOf!Iterator, N - 1, kind)(backwardIndex[1 .. $], iterator, lightScope(slice).front); | if (r[0]) | { | backwardIndex[0][0] = slice.length; | } | if (r[1]) | { | backwardIndex[0][1] = slice.length; | } | } | slice.popFront; | } | while(!slice.empty); | return found; |} | |/++ |Finds a positions (ndslices) such that |`position[0].first` is minimal and `position[1].first` is maximal elements in the slice. | |Position is sub-ndslice of the same dimension in the right-$(RPAREN)down-$(RPAREN)etc$(LPAREN)$(LPAREN) corner. | |Params: | pred = A predicate. | |See_also: | $(LREF minmaxIndex), | $(LREF minPos), | $(LREF maxPos), | $(NDSLICEREF slice, Slice.backward). |+/ |template minmaxPos(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | 2 subslices with minimal and maximal `first` elements. | +/ | @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind)[2] | minmaxPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) pret; | if (!slice.anyEmpty) | { | size_t[2][N] ret; | auto it = slice._iterator; | Iterator[2] iterator = [it, it]; | minmaxPosImpl!(pred, Iterator, N, kind)(ret, iterator, lightScope(slice)); | foreach (i; Iota!N) | { | pret[0]._lengths[i] = ret[i][0]; | pret[1]._lengths[i] = ret[i][1]; | } | pret[0]._iterator = iterator[0]; | pret[1]._iterator = iterator[1]; | } | auto strides = slice.strides; | foreach(i; Iota!(0, pret[0].S)) | { | pret[0]._strides[i] = strides[i]; | pret[1]._strides[i] = strides[i]; | } | return pret; | } | else | alias minmaxPos = .minmaxPos!(naryFun!pred); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 2, | ].sliced(3, 4); | | auto pos = s.minmaxPos; | | assert(pos[0] == s[$ - 2 .. $, $ - 3 .. $]); | assert(pos[1] == s[$ - 1 .. $, $ - 2 .. $]); | | assert(pos[0].first == -4); | assert(s.backward(pos[0].shape) == -4); | assert(pos[1].first == 7); | assert(s.backward(pos[1].shape) == 7); |} | |/++ |Finds a backward indexes such that |`slice[indexes[0]]` is minimal and `slice[indexes[1]]` is maximal elements in the slice. | |Params: | pred = A predicate. | |See_also: | $(LREF minmaxIndex), | $(LREF minPos), | $(LREF maxPos), | $(REF Slice.backward, mir,ndslice,slice). |+/ |template minmaxIndex(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Subslice with minimal (maximal) `first` element. | +/ | @optmath size_t[N][2] minmaxIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) pret = size_t.max; | if (!slice.anyEmpty) | { | auto shape = slice.shape; | size_t[2][N] ret; | foreach (i; Iota!N) | { | ret[i][1] = ret[i][0] = shape[i]; | } | auto it = slice._iterator; | Iterator[2] iterator = [it, it]; | minmaxPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, iterator, lightScope(slice)); | foreach (i; Iota!N) | { | pret[0][i] = slice._lengths[i] - ret[i][0]; | pret[1][i] = slice._lengths[i] - ret[i][1]; | } | } | return pret; | } | else | alias minmaxIndex = .minmaxIndex!(naryFun!pred); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto indexes = s.minmaxIndex; | | assert(indexes == [[1, 1], [2, 3]]); | assert(s[indexes[0]] == -4); | assert(s[indexes[1]] == 8); |} | |/++ |Finds a backward index such that |`slice.backward(index)` is minimal(maximal). | |Params: | pred = A predicate. | |See_also: | $(LREF minIndex), | $(LREF maxPos), | $(LREF maxIndex), | $(REF Slice.backward, mir,ndslice,slice). |+/ |template minPos(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Multidimensional backward index such that element is minimal(maximal). | Backward index equals zeros, if slice is empty. | +/ | @optmath Slice!(Iterator, N, kind == Contiguous && N > 1 ? Canonical : kind) | minPos(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | typeof(return) ret = { _iterator : slice._iterator }; | if (!slice.anyEmpty) | { | minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret._lengths, ret._iterator, lightScope(slice)); | } | auto strides = slice.strides; | foreach(i; Iota!(0, ret.S)) | { | ret._strides[i] = strides[i]; | } | return ret; | } | else | alias minPos = .minPos!(naryFun!pred); |} | |/// ditto |template maxPos(alias pred = "a < b") |{ | import mir.functional: naryFun, reverseArgs; | alias maxPos = minPos!(reverseArgs!(naryFun!pred)); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 2, | ].sliced(3, 4); | | auto pos = s.minPos; | | assert(pos == s[$ - 2 .. $, $ - 3 .. $]); | assert(pos.first == -4); | assert(s.backward(pos.shape) == -4); | | pos = s.maxPos; | | assert(pos == s[$ - 1 .. $, $ - 2 .. $]); | assert(pos.first == 7); | assert(s.backward(pos.shape) == 7); |} | |/++ |Finds an index such that |`slice[index]` is minimal(maximal). | |Params: | pred = A predicate. | |See_also: | $(LREF minIndex), | $(LREF maxPos), | $(LREF maxIndex). |+/ |template minIndex(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slice = ndslice. | Returns: | Multidimensional index such that element is minimal(maximal). | Index elements equal to `size_t.max`, if slice is empty. | +/ | @optmath size_t[N] minIndex(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) | { | size_t[N] ret = size_t.max; | if (!slice.anyEmpty) | { | ret = slice.shape; | minPosImpl!(pred, LightScopeOf!Iterator, N, kind)(ret, slice._iterator, lightScope(slice)); | foreach (i; Iota!N) | ret[i] = slice._lengths[i] - ret[i]; | } | return ret; | } | else | alias minIndex = .minIndex!(naryFun!pred); |} | |/// ditto |template maxIndex(alias pred = "a < b") |{ | import mir.functional: naryFun, reverseArgs; | alias maxIndex = minIndex!(reverseArgs!(naryFun!pred)); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | 2, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto index = s.minIndex; | | assert(index == [1, 1]); | assert(s[index] == -4); | | index = s.maxIndex; | | assert(index == [2, 3]); | assert(s[index] == 8); |} | |/// |version(mir_test) |unittest |{ | auto s = [ | -8, 6, 4, -3, | 0, -4, -3, 3, | -3, -2, 7, 8, | ].sliced(3, 4); | | auto index = s.minIndex; | | assert(index == [0, 0]); | assert(s[index] == -8); |} | |version(mir_test) |unittest |{ | auto s = [ | 0, 1, 2, 3, | 4, 5, 6, 7, | 8, 9, 10, 11 | ].sliced(3, 4); | | auto index = s.minIndex; | assert(index == [0, 0]); | assert(s[index] == 0); | | index = s.maxIndex; | assert(index == [2, 3]); | assert(s[index] == 11); |} | |bool findImpl(alias fun, size_t N, Slices...)(scope ref size_t[N] backwardIndex, Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | auto cnt = BitSliceAccelerator!(Field, I)(slices[0]).cttz; | if (cnt = -1) | return false; | backwardIndex[0] = slices[0].length - cnt; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | import mir.ndslice.topology: retro; | auto cnt = BitSliceAccelerator!(Field, I)(slices[0].retro).ctlz; | if (cnt = -1) | return false; | backwardIndex[0] = slices[0].length - cnt; | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if (fun(frontOf!slices)) | { | backwardIndex[0] = slices[0].length; | return true; | } | } | else | { | if (findImpl!fun(backwardIndex[1 .. $], frontOf!slices)) | { | backwardIndex[0] = slices[0].length; | return true; | } | } | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return false; | } |} | |/++ |Finds an index such that |`pred(slices[0][index], ..., slices[$-1][index])` is `true`. | |Params: | pred = A predicate. | |See_also: | $(LREF find), | $(LREF any). |Optimization: | `findIndex!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template findIndex(alias pred) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | Multidimensional index such that the predicate is true. | Index equals `size_t.max`, if the predicate evaluates `false` for all indexes. | Constraints: | All slices must have the same shape. | +/ | @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) findIndex(Slices...)(Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | size_t[DimensionCount!(Slices[0])] ret = -1; | auto lengths = slices[0].shape; | if (!slices[0].anyEmpty && findImpl!pred(ret, allLightScope!slices)) | foreach (i; Iota!(DimensionCount!(Slices[0]))) | ret[i] = lengths[i] - ret[i]; | static if (DimensionCount!(Slices[0]) > 1) | return ret; | else | return ret[0]; | } | else | alias findIndex = .findIndex!(naryFun!pred); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto sl = iota(5); | size_t index = sl.findIndex!"a == 3"; | | assert(index == 3); | assert(sl[index] == 3); | | assert(sl.findIndex!(a => a == 8) == size_t.max); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | size_t[2] index = sl.findIndex!(a => a == 3); | | assert(sl[index] == 3); | | index = sl.findIndex!"a == 6"; | assert(index[0] == size_t.max); | assert(index[1] == size_t.max); |} | |/++ |Finds a backward index such that |`pred(slices[0].backward(index), ..., slices[$-1].backward(index))` is `true`. | |Params: | pred = A predicate. | |Optimization: | To check if any element was found | use the last dimension (row index). | This will slightly optimize the code. |-------- |if (backwardIndex) |{ | auto elem1 = slice1.backward(backwardIndex); | //... | auto elemK = sliceK.backward(backwardIndex); |} |else |{ | // not found |} |-------- | |See_also: | $(LREF findIndex), | $(LREF any), | $(REF Slice.backward, mir,ndslice,slice). | |Optimization: | `find!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template find(alias pred) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | Multidimensional backward index such that the predicate is true. | Backward index equals zeros, if the predicate evaluates `false` for all indexes. | Constraints: | All slices must have the same shape. | +/ | @optmath Select!(DimensionCount!(Slices[0]) > 1, size_t[DimensionCount!(Slices[0])], size_t) find(Slices...)(auto ref Slices slices) | if (Slices.length && allSatisfy!(hasShape, Slices)) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | size_t[DimensionCount!(Slices[0])] ret; | if (!slices[0].anyEmpty) | findImpl!pred(ret, allLightScope!slices); | static if (DimensionCount!(Slices[0]) > 1) | return ret; | else | return ret[0]; | } | else | alias find = .find!(naryFun!pred); |} | |/// Ranges and arrays |version(mir_test) |unittest |{ | import std.range : iota; | | auto sl = iota(10); | size_t index = sl.find!"a == 3"; | | assert(sl[$ - index] == 3); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | size_t[2] bi = sl.find!"a == 3"; | assert(sl.backward(bi) == 3); | assert(sl[$ - bi[0], $ - bi[1]] == 3); | | bi = sl.find!"a == 6"; | assert(bi[0] == 0); | assert(bi[1] == 0); |} | |/// Multiple slices |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | size_t[2] bi = find!((a, b) => a * b == 39)(a, b); | assert(a.backward(bi) == 3); | assert(b.backward(bi) == 13); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | size_t[2] bi = zip!true(a, b).find!"a.a * a.b == 39"; | | assert(a.backward(bi) == 3); | assert(b.backward(bi) == 13); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a == 5) | return true; | a = 8; | return false; | } | | size_t[2] bi = sl.find!pred; | | assert(bi == [1, 1]); | assert(sl.backward(bi) == 5); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 8, 5]]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | size_t[2] bi = iota(2, 0).find!((elem){i++; return true;}); | assert(i == 0); | assert(bi == [0, 0]); |} | |size_t anyImpl(alias fun, Slices...)(scope Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | return BitSliceAccelerator!(Field, I)(slices[0]).any; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | return .anyImpl!fun(lightScope(slices[0]).retro); | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if (fun(frontOf!slices)) | return true; | } | else | { | if (anyImpl!fun(frontOf!slices)) | return true; | } | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return false; | } |} | |/++ |Like $(LREF find), but only returns whether or not the search was successful. | |Params: | pred = The predicate. |Optimization: | `any!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template any(alias pred = "a") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | Returns: | `true` if the search was successful and `false` otherwise. | Constraints: | All slices must have the same shape. | +/ | @optmath bool any(Slices...)(scope Slices slices) | if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return .any!pred(allFlattened!(allLightScope!slices)); | } | else | { | return !slices[0].anyEmpty && anyImpl!pred(allLightScope!slices); | } | } | else | alias any = .any!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.any!"a == 3"); | assert(!r.any!"a == 6"); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(sl.any!"a == 3"); | assert(!sl.any!"a == 6"); |} | |/// Multiple slices |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | assert(any!((a, b) => a * b == 39)(a, b)); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto a = iota(2, 3); | // 10 11 12 | // 13 14 15 | auto b = iota([2, 3], 10); | | // slices must have the same strides | | assert(zip!true(a, b).any!"a.a * a.b == 39"); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a == 5) | return true; | a = 8; | return false; | } | | assert(sl.any!pred); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 8, 5]]); |} | |size_t allImpl(alias fun, Slices...)(scope Slices slices) | if (Slices.length) |{ | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | return BitSliceAccelerator!(LightScopeOf!Field, I)(lightScope(slices[0])).all; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | return .allImpl!fun(lightScope(slices[0]).retro); | } | else | { | do | { | static if (DimensionCount!(Slices[0]) == 1) | { 0000000| if (!fun(frontOf!slices)) 0000000| return false; | } | else | { | if (!allImpl!fun(frontOf!slices)) | return false; | } 0000000| foreach_reverse(ref slice; slices) 0000000| slice.popFront; | } 0000000| while(!slices[0].empty); 0000000| return true; | } |} | |/++ |Checks if all of the elements verify `pred`. | |Params: | pred = The predicate. |Optimization: | `all!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template all(alias pred = "a") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = One or more slices. | Returns: | `true` all of the elements verify `pred` and `false` otherwise. | Constraints: | All slices must have the same shape. | +/ | @optmath bool all(Slices...)(scope Slices slices) | if ((Slices.length == 1 || !__traits(isSame, pred, "a")) && Slices.length) | { | static if (Slices.length > 1) 0000000| slices.checkShapesMatch; | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; 0000000| return .all!pred(allFlattened!(allLightScope!slices)); | } | else | { 0000000| return slices[0].anyEmpty || allImpl!pred(allLightScope!slices); | } | } | else | alias all = .all!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.all!"a < 6"); | assert(!r.all!"a < 5"); |} | |/// |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(sl.all!"a < 6"); | assert(!sl.all!"a < 5"); |} | |/// Multiple slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | assert(all!"a - b == 0"(sl, sl)); |} | |/// Zipped slices |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota, zip; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3); | | | assert(zip!true(sl, sl).all!"a.a - a.b == 0"); |} | |/// Mutation on-the-fly |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | // 0 1 2 | // 3 4 5 | auto sl = iota(2, 3).as!double.slice; | | static bool pred(T)(ref T a) | { | if (a < 4) | { | a = 8; | return true; | } | return false; | } | | assert(!sl.all!pred); | | // sl was changed | assert(sl == [[8, 8, 8], | [8, 4, 5]]); |} | |@safe pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | size_t i; | assert(iota(2, 0).all!((elem){i++; return true;})); | assert(i == 0); |} | |/++ |Counts elements in slices according to the `fun`. |Params: | fun = A predicate. | |Optimization: | `count!"a"` has accelerated specialization for slices created with $(REF bitwise, mir,ndslice,topology), $(REF bitSlice, mir,ndslice,allocation). |+/ |template count(alias fun) |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!fun, fun)) | /++ | Params: | slices = One or more slices, ranges, and arrays. | | Returns: The number elements according to the `fun`. | | Constraints: | All slices must have the same shape. | +/ | @optmath size_t count(Slices...)(scope Slices slices) | if (Slices.length) | { | static if (Slices.length > 1) | slices.checkShapesMatch; | static if (__traits(isSame, fun, naryFun!"true")) | { | return slices[0].elementCount; | } | else | static if (areAllContiguousSlices!Slices) | { | import mir.ndslice.topology: flattened; | return .count!fun(allFlattened!(allLightScope!slices)); | } | else | { | if (slices[0].anyEmpty) | return 0; | return countImpl!(fun)(allLightScope!slices); | } | } | else | alias count = .count!(naryFun!fun); | |} | |/// Ranges and arrays |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import std.range : iota; | // 0 1 2 3 4 5 | auto r = iota(6); | | assert(r.count!"true" == 6); | assert(r.count!"a" == 5); | assert(r.count!"a % 2" == 3); |} | |/// Single slice |version(mir_test) |unittest |{ | import mir.ndslice.topology : iota; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota(2, 3); | | assert(sl.count!"true" == 6); | assert(sl.count!"a" == 5); | assert(sl.count!"a % 2" == 3); |} | |/// Accelerated set bit count |version(mir_test) |unittest |{ | import mir.ndslice.topology: retro, iota, bitwise; | import mir.ndslice.allocation: slice; | | //| 0 1 2 | | //| 3 4 5 | | auto sl = iota!size_t(2, 3).bitwise; | | assert(sl.count!"true" == 6 * size_t.sizeof * 8); | | assert(sl.slice.count!"a" == 7); | | // accelerated | assert(sl.count!"a" == 7); | assert(sl.retro.count!"a" == 7); | | auto sl2 = iota!ubyte([6], 128).bitwise; | // accelerated | assert(sl2.count!"a" == 13); | assert(sl2[4 .. $].count!"a" == 13); | assert(sl2[4 .. $ - 1].count!"a" == 12); | assert(sl2[4 .. $ - 1].count!"a" == 12); | assert(sl2[41 .. $ - 1].count!"a" == 1); |} | |unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: bitwise, assumeFieldsHaveZeroShift; | auto sl = slice!uint([6]).bitwise; | auto slb = slice!ubyte([6]).bitwise; | slb[4] = true; | auto d = slb[4]; | auto c = assumeFieldsHaveZeroShift(slb & ~slb); | // pragma(msg, typeof(c)); | assert(!sl.any); | assert((~sl).all); | // pragma(msg, typeof(~slb)); | // pragma(msg, typeof(~slb)); | // assert(sl.findIndex); |} | |/++ |Compares two or more slices for equality, as defined by predicate `pred`. | |See_also: $(NDSLICEREF slice, Slice.opEquals) | |Params: | pred = The predicate. |+/ |template equal(alias pred = "a == b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | slices = Two or more slices, slices, ranges, and arrays. | | Returns: | `true` any of the elements verify `pred` and `false` otherwise. | +/ | bool equal(Slices...)(scope Slices slices) | if (Slices.length >= 2) | { | enum msg = "all arguments must be slices" ~ tailErrorMessage!(); | enum msgShape = "all slices must have the same dimension count" ~ tailErrorMessage!(); | import mir.internal.utility; | foreach (i, Slice; Slices) | { | // static assert (isSlice!Slice, msg); | static if (i) | { | static assert (DimensionCount!(Slices[i]) == DimensionCount!(Slices[0])); | foreach (j; Iota!(DimensionCount!(Slices[0]))) 0000000| if (slices[i].shape[j] != slices[0].shape[j]) 0000000| goto False; | } | } 0000000| return all!pred(allLightScope!slices); 0000000| False: return false; | } | else | alias equal = .equal!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow |version(mir_test) unittest |{ | import std.range : iota; | auto r = iota(6); | assert(r.equal([0, 1, 2, 3, 4, 5])); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | assert(equal(sl1, sl1)); | assert(sl1 == sl1); //can also use opEquals for two Slices | assert(equal!"2 * a == b + c"(sl1, sl1, sl1)); | | assert(equal!"a < b"(sl1, sl2)); | | assert(!equal(sl1[0 .. $ - 1], sl1)); | assert(!equal(sl1[0 .. $, 0 .. $ - 1], sl1)); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.algorithm.iteration: equal; | import mir.math.common: approxEqual; | import mir.ndslice.allocation: rcslice; | import mir.ndslice.topology: as, iota; | | auto x = 5.iota.as!double.rcslice; | auto y = x.rcslice; | | assert(equal(x, y)); | assert(equal!approxEqual(x, y)); |} | |ptrdiff_t cmpImpl(alias pred, A, B) | (scope A sl1, scope B sl2) | if (DimensionCount!A == DimensionCount!B) |{ | for (;;) | { | static if (DimensionCount!A == 1) | { | import mir.functional : naryFun; | if (naryFun!pred(sl1.front, sl2.front)) | return -1; | if (naryFun!pred(sl2.front, sl1.front)) | return 1; | } | else | { | if (auto res = .cmpImpl!pred(sl1.front, sl2.front)) | return res; | } | sl1.popFront; | if (sl1.empty) | return -cast(ptrdiff_t)(sl2.length > 1); | sl2.popFront; | if (sl2.empty) | return 1; | } |} | |/++ |Performs three-way recursive lexicographical comparison on two slices according to predicate `pred`. |Iterating `sl1` and `sl2` in lockstep, `cmp` compares each `N-1` dimensional element `e1` of `sl1` |with the corresponding element `e2` in `sl2` recursively. |If one of the slices has been finished,`cmp` returns a negative value if `sl1` has fewer elements than `sl2`, |a positive value if `sl1` has more elements than `sl2`, |and `0` if the ranges have the same number of elements. | |Params: | pred = The predicate. |+/ |template cmp(alias pred = "a < b") |{ | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | /++ | Params: | sl1 = First slice, range, or array. | sl2 = Second slice, range, or array. | | Returns: | `0` if both ranges compare equal. | Negative value if the first differing element of `sl1` is less than the corresponding | element of `sl2` according to `pred`. | Positive value if the first differing element of `sl2` is less than the corresponding | element of `sl1` according to `pred`. | +/ | ptrdiff_t cmp(A, B) | (scope A sl1, scope B sl2) | if (DimensionCount!A == DimensionCount!B) | { | auto b = sl2.anyEmpty; | if (sl1.anyEmpty) | { | if (!b) | return -1; | auto sh1 = sl1.shape; | auto sh2 = sl2.shape; | foreach (i; Iota!(DimensionCount!A)) | if (ptrdiff_t ret = sh1[i] - sh2[i]) | return ret; | return 0; | } | if (b) | return 1; | return cmpImpl!pred(lightScope(sl1), lightScope(sl2)); | } | else | alias cmp = .cmp!(naryFun!pred); |} | |/// Ranges and arrays |@safe pure nothrow |version(mir_test) unittest |{ | import std.range : iota; | | // 0 1 2 3 4 5 | auto r1 = iota(0, 6); | // 1 2 3 4 5 6 | auto r2 = iota(1, 7); | | assert(cmp(r1, r1) == 0); | assert(cmp(r1, r2) < 0); | assert(cmp!"a >= b"(r1, r2) > 0); |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | // 0 1 2 | // 3 4 5 | auto sl1 = iota(2, 3); | // 1 2 3 | // 4 5 6 | auto sl2 = iota([2, 3], 1); | | assert(cmp(sl1, sl1) == 0); | assert(cmp(sl1, sl2) < 0); | assert(cmp!"a >= b"(sl1, sl2) > 0); |} | |@safe pure nothrow @nogc |version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | auto sl1 = iota(2, 3); | auto sl2 = iota([2, 3], 1); | | assert(cmp(sl1[0 .. $ - 1], sl1) < 0); | assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 1]) > 0); | | assert(cmp(sl1[0 .. $ - 2], sl1) < 0); | assert(cmp(sl1, sl1[0 .. $, 0 .. $ - 3]) > 0); | assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) == 0); | assert(cmp(sl1[0 .. $, 0 .. $ - 3], sl1[0 .. $ - 1, 0 .. $ - 3]) > 0); | assert(cmp(sl1[0 .. $ - 1, 0 .. $ - 3], sl1[0 .. $, 0 .. $ - 3]) < 0); |} | |size_t countImpl(alias fun, Slices...)(scope Slices slices) |{ | size_t ret; | alias S = Slices[0]; | import mir.functional: naryFun; | import mir.ndslice.iterator: FieldIterator, RetroIterator; | import mir.ndslice.field: BitField; | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(FieldIterator!(BitField!(Field, I))), Field, I)) | { | ret = BitSliceAccelerator!(Field, I)(slices[0]).ctpop; | } | else | static if (__traits(isSame, fun, naryFun!"a") && is(S : Slice!(RetroIterator!(FieldIterator!(BitField!(Field, I)))), Field, I)) | { | // pragma(msg, S); | import mir.ndslice.topology: retro; | ret = .countImpl!fun(lightScope(slices[0]).retro); | } | else | do | { | static if (DimensionCount!(Slices[0]) == 1) | { | if(fun(frontOf!slices)) | ret++; | } | else | ret += .countImpl!fun(frontOf!slices); | foreach_reverse(ref slice; slices) | slice.popFront; | } | while(!slices[0].empty); | return ret; |} | |/++ |Returns: max length across all dimensions. |+/ |size_t maxLength(S)(auto ref scope S s) | if (hasShape!S) |{ | auto shape = s.shape; | size_t length = 0; | foreach(i; Iota!(shape.length)) | if (shape[i] > length) | length = shape[i]; | return length; |} | |/++ |The call `eachLower!(fun)(slice1, ..., sliceN)` evaluates `fun` on the lower |triangle in `slice1, ..., sliceN` respectively. | |`eachLower` allows iterating multiple slices in the lockstep. | |Params: | fun = A function |See_Also: | This is functionally similar to $(LREF each). |+/ |template eachLower(alias fun) |{ | import mir.functional : naryFun; | | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | inputs = One or more two-dimensional slices and an optional | integer, `k`. | | The value `k` determines which diagonals will have the function | applied: | For k = 0, the function is also applied to the main diagonal | For k = 1 (default), only the non-main diagonals below the main | diagonal will have the function applied. | For k > 1, fewer diagonals below the main diagonal will have the | function applied. | For k < 0, more diagonals above the main diagonal will have the | function applied. | +/ | void eachLower(Inputs...)(scope Inputs inputs) | if (((Inputs.length > 1) && | (isIntegral!(Inputs[$ - 1]))) || | (Inputs.length)) | { | import mir.ndslice.traits : isMatrix; | | size_t val; | | static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1]))) | { | immutable(sizediff_t) k = inputs[$ - 1]; | alias Slices = Inputs[0..($ - 1)]; | alias slices = inputs[0..($ - 1)]; | } | else | { | enum sizediff_t k = 1; | alias Slices = Inputs; | alias slices = inputs; | } | | static assert (allSatisfy!(isMatrix, Slices), | "eachLower: Every slice input must be a two-dimensional slice"); | static if (Slices.length > 1) | slices.checkShapesMatch; | if (slices[0].anyEmpty) | return; | | foreach(ref slice; slices) | assert(!slice.empty); | | immutable(size_t) m = slices[0].length!0; | immutable(size_t) n = slices[0].length!1; | | if ((n + k) < m) | { | val = m - (n + k); | .eachImpl!fun(selectBackOf!(val, slices)); | } | | size_t i; | | if (k > 0) | { | foreach(ref slice; slices) | slice.popFrontExactly!0(k); | i = k; | } | | do | { | val = i - k + 1; | .eachImpl!fun(frontSelectFrontOf!(val, slices)); | | foreach(ref slice; slices) | slice.popFront!0; | i++; | } while ((i < (n + k)) && (i < m)); | } | } | else | { | alias eachLower = .eachLower!(naryFun!fun); | } |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, canonical, universal; | alias AliasSeq(T...) = T; | | pure nothrow | void test(alias func)() | { | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = func(iota([3, 3], 1).slice); | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0]]); | } | | @safe pure nothrow @nogc | T identity(T)(T x) | { | return x; | } | | alias kinds = AliasSeq!(identity, canonical, universal); | test!(kinds[0]); | test!(kinds[1]); | test!(kinds[2]); |} | |/// |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3], | [0, 5, 6], | [0, 0, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3], | [4, 5, 6], | [0, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3, 4], | [0, 0, 7, 8], | [0, 0, 0, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3, 4], | [0, 6, 7, 8], | [0, 0, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3, 4], | [0, 0, 0, 8], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3, 4], | [5, 6, 7, 8], | [0, 10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0, 4], | [0, 0, 0, 0], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(0); | assert(m == [ | [0, 2, 3], | [0, 0, 6], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"; | assert(m == [ | [1, 2, 3], | [0, 5, 6], | [0, 0, 9], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(-1); | assert(m == [ | [0, 0, 3], | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(2); | assert(m == [ | [1, 2, 3], | [4, 5, 6], | [0, 8, 9], | [0, 0, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachLower!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |/// Swap two slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | eachLower!swap(a, b); | | assert(a == [ | [ 0, 1, 2], | [13, 4, 5], | [16, 17, 8]]); | assert(b == [ | [10, 11, 12], | [ 3, 14, 15], | [ 6, 7, 18]]); |} | |/// Swap two zipped slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.eachLower!(z => swap(z.a, z.b)); | | assert(a == [ | [ 0, 1, 2], | [13, 4, 5], | [16, 17, 8]]); | assert(b == [ | [10, 11, 12], | [ 3, 14, 15], | [ 6, 7, 18]]); |} | |/++ |The call `eachUpper!(fun)(slice1, ..., sliceN)` evaluates `fun` on the upper |triangle in `slice1, ..., sliceN`, respectively. | |`eachUpper` allows iterating multiple slices in the lockstep. | |Params: | fun = A function |See_Also: | This is functionally similar to $(LREF each). |+/ |template eachUpper(alias fun) |{ | import mir.functional: naryFun; | | static if (__traits(isSame, naryFun!fun, fun)) | { | /++ | Params: | inputs = One or more two-dimensional slices and an optional | integer, `k`. | | The value `k` determines which diagonals will have the function | applied: | For k = 0, the function is also applied to the main diagonal | For k = 1 (default), only the non-main diagonals above the main | diagonal will have the function applied. | For k > 1, fewer diagonals below the main diagonal will have the | function applied. | For k < 0, more diagonals above the main diagonal will have the | function applied. | +/ | void eachUpper(Inputs...)(scope Inputs inputs) | if (((Inputs.length > 1) && | (isIntegral!(Inputs[$ - 1]))) || | (Inputs.length)) | { | import mir.ndslice.traits : isMatrix; | | size_t val; | | static if ((Inputs.length > 1) && (isIntegral!(Inputs[$ - 1]))) | { | immutable(sizediff_t) k = inputs[$ - 1]; | alias Slices = Inputs[0..($ - 1)]; | alias slices = inputs[0..($ - 1)]; | } | else | { | enum sizediff_t k = 1; | alias Slices = Inputs; | alias slices = inputs; | } | | static assert (allSatisfy!(isMatrix, Slices), | "eachUpper: Every slice input must be a two-dimensional slice"); | static if (Slices.length > 1) | slices.checkShapesMatch; | if (slices[0].anyEmpty) | return; | | foreach(ref slice; slices) | assert(!slice.empty); | | immutable(size_t) m = slices[0].length!0; | immutable(size_t) n = slices[0].length!1; | | size_t i; | | if (k < 0) | { | val = -k; | .eachImpl!fun(selectFrontOf!(val, slices)); | | foreach(ref slice; slices) | slice.popFrontExactly!0(-k); | i = -k; | } | | do | { | val = (n - k) - i; | .eachImpl!fun(frontSelectBackOf!(val, slices)); | | foreach(ref slice; slices) | slice.popFront; | i++; | } while ((i < (n - k)) && (i < m)); | } | } | else | { | alias eachUpper = .eachUpper!(naryFun!fun); | } |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota, canonical, universal; | | pure nothrow | void test(alias func)() | { | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = func(iota([3, 3], 1).slice); | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0], | [4, 0, 0], | [7, 8, 0]]); | } | | @safe pure nothrow @nogc | T identity(T)(T x) | { | return x; | } | | alias kinds = AliasSeq!(identity, canonical, universal); | test!(kinds[0]); | test!(kinds[1]); | test!(kinds[2]); |} | |/// |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0], | [4, 5, 0], | [7, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [7, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0], | [4, 5, 6], | [7, 8, 9]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | auto m = iota([3, 3], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0, 0], | [5, 0, 0, 0], | [9, 10, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0, 0], | [5, 6, 0, 0], | [9, 10, 11, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0, 0], | [0, 0, 0, 0], | [9, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0, 0], | [5, 6, 7, 0], | [9, 10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 4 | | //| 5 6 7 8 | | //| 9 10 11 12 | | auto m = iota([3, 4], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0, 0], | [0, 0, 0, 0], | [0, 0, 0, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(0); | assert(m == [ | [0, 0, 0], | [4, 0, 0], | [7, 8, 0], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"; | assert(m == [ | [1, 0, 0], | [4, 5, 0], | [7, 8, 9], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(-1); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [7, 0, 0], | [10, 11, 0]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(2); | assert(m == [ | [1, 2, 0], | [4, 5, 6], | [7, 8, 9], | [10, 11, 12]]); |} | |pure nothrow |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | //| 1 2 3 | | //| 4 5 6 | | //| 7 8 9 | | //| 10 11 12 | | auto m = iota([4, 3], 1).slice; | m.eachUpper!"a = 0"(-2); | assert(m == [ | [0, 0, 0], | [0, 0, 0], | [0, 0, 0], | [10, 0, 0]]); |} | |/// Swap two slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | eachUpper!swap(a, b); | | assert(a == [ | [0, 11, 12], | [3, 4, 15], | [6, 7, 8]]); | assert(b == [ | [10, 1, 2], | [13, 14, 5], | [16, 17, 18]]); |} | |/// Swap two zipped slices |pure nothrow |version(mir_test) unittest |{ | import mir.utility : swap; | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : as, zip, iota; | | //| 0 1 2 | | //| 3 4 5 | | //| 6 7 8 | | auto a = iota([3, 3]).as!double.slice; | //| 10 11 12 | | //| 13 14 15 | | //| 16 17 18 | | auto b = iota([3, 3], 10).as!double.slice; | | auto z = zip(a, b); | | z.eachUpper!(z => swap(z.a, z.b)); | | assert(a == [ | [0, 11, 12], | [3, 4, 15], | [6, 7, 8]]); | assert(b == [ | [10, 1, 2], | [13, 14, 5], | [16, 17, 18]]); |} | |// uniq |/** |Lazily iterates unique consecutive elements of the given range (functionality |akin to the $(HTTP wikipedia.org/wiki/_Uniq, _uniq) system |utility). Equivalence of elements is assessed by using the predicate |$(D pred), by default $(D "a == b"). The predicate is passed to |$(REF nary, mir,functional), and can either accept a string, or any callable |that can be executed via $(D pred(element, element)). If the given range is |bidirectional, $(D uniq) also yields a |`std,range,primitives`. |Params: | pred = Predicate for determining equivalence between range elements. | r = An input range of elements to filter. |Returns: | An input range of | consecutively unique elements in the original range. If `r` is also a | forward range or bidirectional range, the returned range will be likewise. |*/ |Uniq!(naryFun!pred, Range) uniq(alias pred = "a == b", Range)(auto ref Range r) |if (isInputRange!Range && is(typeof(naryFun!pred(r.front, r.front)) == bool)) |{ | return typeof(return)(r); |} | |/// |@safe version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.algorithm.mutation : copy; | | int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ]; | assert(equal(uniq(arr), [ 1, 2, 3, 4, 5 ][])); | | // Filter duplicates in-place using copy | arr.length -= arr.uniq().copy(arr).length; | assert(arr == [ 1, 2, 3, 4, 5 ]); | | // Note that uniqueness is only determined consecutively; duplicated | // elements separated by an intervening different element will not be | // eliminated: | assert(equal(uniq([ 1, 1, 2, 1, 1, 3, 1]), [1, 2, 1, 3, 1])); |} | |/++ |Authros: $(HTTP erdani.com, Andrei Alexandrescu) (original Phobos code), Ilya Yaroshenko (betterC rework) |+/ |struct Uniq(alias pred, Range) |{ | Range _input; | | // this()(auto ref Range input) | // { | // alias AliasSeq(T...) = T; | // import core.lifetime: forward; | // AliasSeq!_input = forward!input; | // } | | ref opSlice() inout | { | return this; | } | | void popFront() scope | { | assert(!empty, "Attempting to popFront an empty uniq."); | auto last = _input.front; | do | { | _input.popFront(); | } | while (!_input.empty && pred(last, _input.front)); | } | | @property ElementType!Range front() | { | assert(!empty, "Attempting to fetch the front of an empty uniq."); | return _input.front; | } | | static if (isBidirectionalRange!Range) | { | void popBack() scope | { | assert(!empty, "Attempting to popBack an empty uniq."); | auto last = _input.back; | do | { | _input.popBack(); | } | while (!_input.empty && pred(last, _input.back)); | } | | @property ElementType!Range back() scope return | { | assert(!empty, "Attempting to fetch the back of an empty uniq."); | return _input.back; | } | } | | static if (isInfinite!Range) | { | enum bool empty = false; // Propagate infiniteness. | } | else | { | @property bool empty() const { return _input.empty; } | } | | static if (isForwardRange!Range) | { | @property typeof(this) save() scope return { | return typeof(this)(_input.save); | } | } |} | |version(none) |@safe version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.internal.test.dummyrange; | import std.range; | | int[] arr = [ 1, 2, 2, 2, 2, 3, 4, 4, 4, 5 ]; | auto r = uniq(arr); | static assert(isForwardRange!(typeof(r))); | | assert(equal(r, [ 1, 2, 3, 4, 5 ][])); | assert(equal(retro(r), retro([ 1, 2, 3, 4, 5 ][]))); | | foreach (DummyType; AllDummyRanges) | { | DummyType d; | auto u = uniq(d); | assert(equal(u, [1,2,3,4,5,6,7,8,9,10])); | | static assert(d.rt == RangeType.Input || isForwardRange!(typeof(u))); | | static if (d.rt >= RangeType.Bidirectional) | { | assert(equal(retro(u), [10,9,8,7,6,5,4,3,2,1])); | } | } |} | |@safe version(mir_test) unittest // https://issues.dlang.org/show_bug.cgi?id=17264 |{ | import std.algorithm.comparison : equal; | | const(int)[] var = [0, 1, 1, 2]; | assert(var.uniq.equal([0, 1, 2])); |} | |@safe version(mir_test) unittest { | import mir.ndslice.allocation; | import mir.math.common: approxEqual; | auto x = rcslice!double(2); | auto y = rcslice!double(2); | x[] = [2, 3]; | y[] = [2, 3]; | assert(equal!approxEqual(x,y)); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/algorithm/iteration.d is 0% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-core-1.1.2-mir-core-source-mir-math-common.lst |/++ |Common floating point math functions. | |This module has generic LLVM-oriented API compatible with all D compilers. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko, Phobos Team |+/ |module mir.math.common; | |import mir.internal.utility: isComplex, isFloatingPoint; | |version(LDC) |{ | static import ldc.attributes; | | private alias AliasSeq(T...) = T; | | /++ | Functions attribute, an alias for `AliasSeq!(llvmFastMathFlag("contract"));`. | | $(UL | $(LI 1. Allow floating-point contraction (e.g. fusing a multiply followed by an addition into a fused multiply-and-add). ) | ) | | Note: Can be used with all compilers. | +/ | alias fmamath = AliasSeq!(ldc.attributes.llvmFastMathFlag("contract")); | | /++ | Functions attribute, an alias for `AliasSeq!(llvmFastMathFlag("fast"))`. | | It is similar to $(LREF fastmath), but does not allow unsafe-fp-math. | This flag does NOT force LDC to use the reciprocal of an argument rather than perform division. | | This flag is default for string lambdas. | | Note: Can be used with all compilers. | +/ | alias optmath = AliasSeq!(ldc.attributes.llvmFastMathFlag("fast")); | | /++ | Functions attribute, an alias for `ldc.attributes.fastmath` . | | $(UL | | $(LI 1. Enable optimizations that make unsafe assumptions about IEEE math (e.g. that addition is associative) or may not work for all input ranges. | These optimizations allow the code generator to make use of some instructions which would otherwise not be usable (such as fsin on X86). ) | | $(LI 2. Allow optimizations to assume the arguments and result are not NaN. | Such optimizations are required to retain defined behavior over NaNs, | but the value of the result is undefined. ) | | $(LI 3. Allow optimizations to assume the arguments and result are not +$(BACKTICK)-inf. | Such optimizations are required to retain defined behavior over +$(BACKTICK)-Inf, | but the value of the result is undefined. ) | | $(LI 4. Allow optimizations to treat the sign of a zero argument or result as insignificant. ) | | $(LI 5. Allow optimizations to use the reciprocal of an argument rather than perform division. ) | | $(LI 6. Allow floating-point contraction (e.g. fusing a multiply followed by an addition into a fused multiply-and-add). ) | | $(LI 7. Allow algebraically equivalent transformations that may dramatically change results in floating point (e.g. reassociate). ) | ) | | Note: Can be used with all compilers. | +/ | alias fastmath = ldc.attributes.fastmath; |} |else |enum |{ | /++ | Functions attribute, an alias for `AliasSeq!(llvmFastMathFlag("contract"));`. | | $(UL | $(LI Allow floating-point contraction (e.g. fusing a multiply followed by an addition into a fused multiply-and-add). ) | ) | | Note: Can be used with all compilers. | +/ | fmamath, | | /++ | Functions attribute, an alias for `AliasSeq!(llvmAttr("unsafe-fp-math", "false"), llvmFastMathFlag("fast"))`. | | It is similar to $(LREF fastmath), but does not allow unsafe-fp-math. | This flag does NOT force LDC to use the reciprocal of an argument rather than perform division. | | This flag is default for string lambdas. | | Note: Can be used with all compilers. | +/ | optmath, | | /++ | Functions attribute, an alias for `ldc.attributes.fastmath = AliasSeq!(llvmAttr("unsafe-fp-math", "true"), llvmFastMathFlag("fast"))` . | | $(UL | | $(LI Enable optimizations that make unsafe assumptions about IEEE math (e.g. that addition is associative) or may not work for all input ranges. | These optimizations allow the code generator to make use of some instructions which would otherwise not be usable (such as fsin on X86). ) | | $(LI Allow optimizations to assume the arguments and result are not NaN. | Such optimizations are required to retain defined behavior over NaNs, | but the value of the result is undefined. ) | | $(LI Allow optimizations to assume the arguments and result are not +$(BACKTICK)-inf. | Such optimizations are required to retain defined behavior over +$(BACKTICK)-Inf, | but the value of the result is undefined. ) | | $(LI Allow optimizations to treat the sign of a zero argument or result as insignificant. ) | | $(LI Allow optimizations to use the reciprocal of an argument rather than perform division. ) | | $(LI Allow floating-point contraction (e.g. fusing a multiply followed by an addition into a fused multiply-and-add). ) | | $(LI Allow algebraically equivalent transformations that may dramatically change results in floating point (e.g. reassociate). ) | ) | | Note: Can be used with all compilers. | +/ | fastmath |} | |version(LDC) |{ | nothrow @nogc pure @safe: | | pragma(LDC_intrinsic, "llvm.sqrt.f#") | /// | T sqrt(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.sin.f#") | /// | T sin(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.cos.f#") | /// | T cos(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.powi.f#") | /// | T powi(T)(in T val, int power) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.pow.f#") | /// | T pow(T)(in T val, in T power) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.exp.f#") | /// | T exp(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.log.f#") | /// | T log(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.fma.f#") | /// | T fma(T)(T vala, T valb, T valc) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.fabs.f#") | /// | T fabs(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.floor.f#") | /// | T floor(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.exp2.f#") | /// | T exp2(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.log10.f#") | /// | T log10(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.log2.f#") | /// | T log2(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.ceil.f#") | /// | T ceil(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.trunc.f#") | /// | T trunc(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.rint.f#") | /// | T rint(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.nearbyint.f#") | /// | T nearbyint(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.copysign.f#") | /// | T copysign(T)(in T mag, in T sgn) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.round.f#") | /// | T round(T)(in T val) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.fmuladd.f#") | /// | T fmuladd(T)(in T vala, in T valb, in T valc) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.minnum.f#") | /// | T fmin(T)(in T vala, in T valb) if (isFloatingPoint!T); | | pragma(LDC_intrinsic, "llvm.maxnum.f#") | /// | T fmax(T)(in T vala, in T valb) if (isFloatingPoint!T); |} |else version(GNU) |{ | static import gcc.builtins; | | // Calls GCC builtin for either float (suffix "f"), double (no suffix), or real (suffix "l"). | private enum mixinGCCBuiltin(string fun) = | `static if (T.mant_dig == float.mant_dig) return gcc.builtins.__builtin_`~fun~`f(x);`~ | ` else static if (T.mant_dig == double.mant_dig) return gcc.builtins.__builtin_`~fun~`(x);`~ | ` else static if (T.mant_dig == real.mant_dig) return gcc.builtins.__builtin_`~fun~`l(x);`~ | ` else static assert(0);`; | | // As above but for two-argument function. | private enum mixinGCCBuiltin2(string fun) = | `static if (T.mant_dig == float.mant_dig) return gcc.builtins.__builtin_`~fun~`f(x, y);`~ | ` else static if (T.mant_dig == double.mant_dig) return gcc.builtins.__builtin_`~fun~`(x, y);`~ | ` else static if (T.mant_dig == real.mant_dig) return gcc.builtins.__builtin_`~fun~`l(x, y);`~ | ` else static assert(0);`; | | /// | T sqrt(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`sqrt`); } | /// | T sin(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`sin`); } | /// | T cos(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`cos`); } | /// | T pow(T)(in T x, in T power) if (isFloatingPoint!T) { alias y = power; mixin(mixinGCCBuiltin2!`pow`); } | /// | T powi(T)(in T x, int power) if (isFloatingPoint!T) { alias y = power; mixin(mixinGCCBuiltin2!`powi`); } | /// | T exp(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`exp`); } | /// | T log(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`log`); } | /// | T fabs(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`fabs`); } | /// | T floor(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`floor`); } | /// | T exp2(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`exp2`); } | /// | T log10(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`log10`); } | /// | T log2(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`log2`); } | /// | T ceil(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`ceil`); } | /// | T trunc(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`trunc`); } | /// | T rint(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`rint`); } | /// | T nearbyint(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`nearbyint`); } | /// | T copysign(T)(in T mag, in T sgn) if (isFloatingPoint!T) { alias y = sgn; mixin(mixinGCCBuiltin2!`copysign`); } | /// | T round(T)(in T x) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin!`round`); } | /// | T fmuladd(T)(in T a, in T b, in T c) if (isFloatingPoint!T) | { | static if (T.mant_dig == float.mant_dig) | return gcc.builtins.__builtin_fmaf(a, b, c); | else static if (T.mant_dig == double.mant_dig) | return gcc.builtins.__builtin_fma(a, b, c); | else static if (T.mant_dig == real.mant_dig) | return gcc.builtins.__builtin_fmal(a, b, c); | else | static assert(0); | } | version(mir_test) | unittest { assert(fmuladd!double(2, 3, 4) == 2 * 3 + 4); } | /// | T fmin(T)(in T x, in T y) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin2!`fmin`); } | /// | T fmax(T)(in T x, in T y) if (isFloatingPoint!T) { mixin(mixinGCCBuiltin2!`fmax`); } |} |else static if (__VERSION__ >= 2082) // DMD 2.082 onward. |{ | static import std.math; | static import core.stdc.math; | | // Calls either std.math or cmath function for either float (suffix "f") | // or double (no suffix). std.math will always be used during CTFE or for | // arguments with greater than double precision or if the cmath function | // is impure. | private enum mixinCMath(string fun) = | `pragma(inline, true); | static if (!is(typeof(std.math.`~fun~`(0.5f)) == float) | && is(typeof(() pure => core.stdc.math.`~fun~`f(0.5f)))) | if (!__ctfe) | { | static if (T.mant_dig == float.mant_dig) return core.stdc.math.`~fun~`f(x); | else static if (T.mant_dig == double.mant_dig) return core.stdc.math.`~fun~`(x); | } | return std.math.`~fun~`(x);`; | | // As above but for two-argument function (both arguments must be floating point). | private enum mixinCMath2(string fun) = | `pragma(inline, true); | static if (!is(typeof(std.math.`~fun~`(0.5f, 0.5f)) == float) | && is(typeof(() pure => core.stdc.math.`~fun~`f(0.5f, 0.5f)))) | if (!__ctfe) | { | static if (T.mant_dig == float.mant_dig) return core.stdc.math.`~fun~`f(x, y); | else static if (T.mant_dig == double.mant_dig) return core.stdc.math.`~fun~`(x, y); | } | return std.math.`~fun~`(x, y);`; | | // Some std.math functions have appropriate return types (float, | // double, real) without need for a wrapper. We can alias them | // directly but we leave the templates afterwards for documentation | // purposes and so explicit template instantiation still works. | // The aliases will always match before the templates. | // Note that you cannot put any "static if" around the aliases or | // compilation will fail due to conflict with the templates! | alias sqrt = std.math.sqrt; | alias sin = std.math.sin; | alias cos = std.math.cos; | alias exp = std.math.exp; | //alias fabs = std.math.fabs; | alias floor = std.math.floor; | alias exp2 = std.math.exp2; | alias ceil = std.math.ceil; | alias rint = std.math.rint; | | /// | T sqrt(T)(in T x) if (isFloatingPoint!T) { return std.math.sqrt(x); } | /// | T sin(T)(in T x) if (isFloatingPoint!T) { return std.math.sin(x); } | /// | T cos(T)(in T x) if (isFloatingPoint!T) { return std.math.cos(x); } | /// | T pow(T)(in T x, in T power) if (isFloatingPoint!T) { alias y = power; mixin(mixinCMath2!`pow`); } | /// | T powi(T)(in T x, int power) if (isFloatingPoint!T) { alias y = power; mixin(mixinCMath2!`pow`); } | /// | T exp(T)(in T x) if (isFloatingPoint!T) { return std.math.exp(x); } | /// | T log(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log`); } | /// | T fabs(T)(in T x) if (isFloatingPoint!T) { return std.math.fabs(x); } | /// | T floor(T)(in T x) if (isFloatingPoint!T) { return std.math.floor(x); } | /// | T exp2(T)(in T x) if (isFloatingPoint!T) { return std.math.exp2(x); } | /// | T log10(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log10`); } | /// | T log2(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log2`); } | /// | T ceil(T)(in T x) if (isFloatingPoint!T) { return std.math.ceil(x); } | /// | T trunc(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`trunc`); } | /// | T rint(T)(in T x) if (isFloatingPoint!T) { return std.math.rint(x); } | /// | T nearbyint(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`nearbyint`); } | /// | T copysign(T)(in T mag, in T sgn) if (isFloatingPoint!T) | { | alias x = mag; | alias y = sgn; | mixin(mixinCMath2!`copysign`); | } | /// | T round(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`round`); } | /// | T fmuladd(T)(in T a, in T b, in T c) if (isFloatingPoint!T) { return a * b + c; } | version(mir_test) | unittest { assert(fmuladd!double(2, 3, 4) == 2 * 3 + 4); } | /// | T fmin(T)(in T x, in T y) if (isFloatingPoint!T) | { | version (Windows) // https://issues.dlang.org/show_bug.cgi?id=19798 | { | version (CRuntime_Microsoft) | mixin(mixinCMath2!`fmin`); | else | return std.math.fmin(x, y); | } | else | mixin(mixinCMath2!`fmin`); | } | /// | T fmax(T)(in T x, in T y) if (isFloatingPoint!T) | { | version (Windows) // https://issues.dlang.org/show_bug.cgi?id=19798 | { | version (CRuntime_Microsoft) | mixin(mixinCMath2!`fmax`); | else | return std.math.fmax(x, y); | } | else | mixin(mixinCMath2!`fmax`); | } | | version (mir_test) @nogc nothrow pure @safe unittest | { | // Check the aliases are correct. | static assert(is(typeof(sqrt(1.0f)) == float)); | static assert(is(typeof(sin(1.0f)) == float)); | static assert(is(typeof(cos(1.0f)) == float)); | static assert(is(typeof(exp(1.0f)) == float)); | static assert(is(typeof(fabs(1.0f)) == float)); | static assert(is(typeof(floor(1.0f)) == float)); | static assert(is(typeof(exp2(1.0f)) == float)); | static assert(is(typeof(ceil(1.0f)) == float)); | static assert(is(typeof(rint(1.0f)) == float)); | | auto x = sqrt!float(2.0f); // Explicit template instantiation still works. | auto fp = &sqrt!float; // Can still take function address. | | // Test for DMD linker problem with fmin on Windows. | static assert(is(typeof(fmin!float(1.0f, 1.0f)))); | static assert(is(typeof(fmax!float(1.0f, 1.0f)))); | } |} |else // DMD version prior to 2.082 |{ | static import std.math; | static import core.stdc.math; | | // Calls either std.math or cmath function for either float (suffix "f") | // or double (no suffix). std.math will always be used during CTFE or for | // arguments with greater than double precision or if the cmath function | // is impure. | private enum mixinCMath(string fun) = | `pragma(inline, true); | static if (!is(typeof(std.math.`~fun~`(0.5f)) == float) | && is(typeof(() pure => core.stdc.math.`~fun~`f(0.5f)))) | if (!__ctfe) | { | static if (T.mant_dig == float.mant_dig) return core.stdc.math.`~fun~`f(x); | else static if (T.mant_dig == double.mant_dig) return core.stdc.math.`~fun~`(x); | } | return std.math.`~fun~`(x);`; | | // As above but for two-argument function (both arguments must be floating point). | private enum mixinCMath2(string fun) = | `pragma(inline, true); | static if (!is(typeof(std.math.`~fun~`(0.5f, 0.5f)) == float) | && is(typeof(() pure => core.stdc.math.`~fun~`f(0.5f, 0.5f)))) | if (!__ctfe) | { | static if (T.mant_dig == float.mant_dig) return core.stdc.math.`~fun~`f(x, y); | else static if (T.mant_dig == double.mant_dig) return core.stdc.math.`~fun~`(x, y); | } | return std.math.`~fun~`(x, y);`; | | // Some std.math functions have appropriate return types (float, | // double, real) without need for a wrapper. | alias sqrt = std.math.sqrt; | | /// | T sqrt(T)(in T x) if (isFloatingPoint!T) { return std.math.sqrt(x); } | /// | T sin(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`sin`); } | /// | T cos(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`cos`); } | /// | T pow(T)(in T x, in T power) if (isFloatingPoint!T) { alias y = power; mixin(mixinCMath2!`pow`); } | /// | T powi(T)(in T x, int power) if (isFloatingPoint!T) { alias y = power; mixin(mixinCMath2!`pow`); } | /// | T exp(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`exp`); } | /// | T log(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log`); } | /// | T fabs(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`fabs`); } | /// | T floor(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`floor`); } | /// | T exp2(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`exp2`); } | /// | T log10(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log10`); } | /// | T log2(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`log2`); } | /// | T ceil(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`ceil`); } | /// | T trunc(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`trunc`); } | /// | T rint(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`rint`); } | /// | T nearbyint(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`nearbyint`); } | /// | T copysign(T)(in T mag, in T sgn) if (isFloatingPoint!T) | { | alias x = mag; | alias y = sgn; | mixin(mixinCMath2!`copysign`); | } | /// | T round(T)(in T x) if (isFloatingPoint!T) { mixin(mixinCMath!`round`); } | /// | T fmuladd(T)(in T a, in T b, in T c) if (isFloatingPoint!T) { return a * b + c; } | version(mir_test) | unittest { assert(fmuladd!double(2, 3, 4) == 2 * 3 + 4); } | /// | T fmin(T)(in T x, in T y) if (isFloatingPoint!T) | { | version (Windows) // https://issues.dlang.org/show_bug.cgi?id=19798 | { | version (CRuntime_Microsoft) | mixin(mixinCMath2!`fmin`); | else | return std.math.fmin(x, y); | } | else | mixin(mixinCMath2!`fmin`); | } | /// | T fmax(T)(in T x, in T y) if (isFloatingPoint!T) | { | version (Windows) // https://issues.dlang.org/show_bug.cgi?id=19798 | { | version (CRuntime_Microsoft) | mixin(mixinCMath2!`fmax`); | else | return std.math.fmax(x, y); | } | else | mixin(mixinCMath2!`fmax`); | } | | version (mir_test) @nogc nothrow pure @safe unittest | { | // Check the aliases are correct. | static assert(is(typeof(sqrt(1.0f)) == float)); | auto x = sqrt!float(2.0f); // Explicit template instantiation still works. | auto fp = &sqrt!float; // Can still take function address. | | // Test for DMD linker problem with fmin on Windows. | static assert(is(typeof(fmin!float(1.0f, 1.0f)))); | static assert(is(typeof(fmax!float(1.0f, 1.0f)))); | } |} | |version (mir_test) |@nogc nothrow pure @safe unittest |{ | import mir.math: PI, feqrel; | assert(feqrel(pow(2.0L, -0.5L), cos(PI / 4)) >= real.mant_dig - 1); |} | |/// Overload for cdouble, cfloat and creal |@optmath auto fabs(T)(in T x) | if (isComplex!T) |{ | return x.re * x.re + x.im * x.im; |} | |/// |unittest |{ | assert(fabs(3 + 4i) == 25); |} | |/++ |Computes whether two values are approximately equal, admitting a maximum |relative difference, and a maximum absolute difference. |Params: | lhs = First item to compare. | rhs = Second item to compare. | maxRelDiff = Maximum allowable difference relative to `rhs`. Defaults to `0.5 ^^ 20`. | maxAbsDiff = Maximum absolute difference. Defaults to `0.5 ^^ 20`. | |Returns: | `true` if the two items are equal or approximately equal under either criterium. |+/ |bool approxEqual(T)(const T lhs, const T rhs, const T maxRelDiff = T(0x1p-20f), const T maxAbsDiff = T(0x1p-20f)) |{ | if (rhs == lhs) // infs | return true; | auto diff = fabs(lhs - rhs); | if (diff <= maxAbsDiff) | return true; | diff /= fabs(rhs); | return diff <= maxRelDiff; |} | |/// |@safe pure nothrow @nogc unittest |{ | assert(approxEqual(1.0, 1.0000001)); | assert(approxEqual(1.0f, 1.0000001f)); | assert(approxEqual(1.0L, 1.0000001L)); | | assert(approxEqual(10000000.0, 10000001)); | assert(approxEqual(10000000f, 10000001f)); | assert(!approxEqual(100000.0L, 100001L)); |} | |/// ditto |bool approxEqual(T : cfloat)(const T lhs, const T rhs, float maxRelDiff = 0x1p-20f, float maxAbsDiff = 0x1p-20f) |{ | return approxEqual(lhs.re, rhs.re, maxRelDiff, maxAbsDiff) | && approxEqual(lhs.im, rhs.im, maxRelDiff, maxAbsDiff); |} | |/// ditto |bool approxEqual(T : cdouble)(const T lhs, const T rhs, double maxRelDiff = 0x1p-20f, double maxAbsDiff = 0x1p-20f) |{ | return approxEqual(lhs.re, rhs.re, maxRelDiff, maxAbsDiff) | && approxEqual(lhs.im, rhs.im, maxRelDiff, maxAbsDiff); |} | |/// ditto |bool approxEqual(T : creal)(const T lhs, const T rhs, real maxRelDiff = 0x1p-20f, real maxAbsDiff = 0x1p-20f) |{ | return approxEqual(lhs.re, rhs.re, maxRelDiff, maxAbsDiff) | && approxEqual(lhs.im, rhs.im, maxRelDiff, maxAbsDiff); |} | |/// Complex types works as `approxEqual(l.re, r.re) && approxEqual(l.im, r.im)` |@safe pure nothrow @nogc unittest |{ | assert(approxEqual(1.0 + 1i, 1.0000001 + 1.0000001i)); | assert(!approxEqual(100000.0L + 0i, 100001L + 0i)); |} ../../../.dub/packages/mir-core-1.1.2/mir-core/source/mir/math/common.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-package.lst |/+ |## Guide for Slice/BLAS contributors | |1. Make sure functions are | a. inlined(!), | b. `@nogc`, | c. `nothrow`, | d. `pure`. | For this reason, it is preferable to use _simple_ `assert`s with messages | that can be computed at compile time. | The goals are: | 1. to reduce executable size for _any_ compilation mode | 2. to reduce template bloat in object files | 3. to reduce compilation time | 4. to allow users to write extern C bindings for code libraries on `Slice` type. | |2. `std.format`, `std.string`, and `std.conv` should not be used in error | message formatting.`"Use" ~ Concatenation.stringof`. | |3. `mixin template`s may be used for pretty error message formatting. | |4. `Exception`s/`enforce`s should no be used to check indexes and lengths. | Exceptions are only allowed for algorithms where validation of input data is | too complicated for the user. `reshape` function is a good example of a case | where Exceptions are required. | If a function might throw an exception, an example with exception handing should be added. | |5. For simple checks like matrix transposition, compile time flags should not be used. | It is much better to opt for runtime matrix transposition. | Furthermore, Slice type provides runtime matrix transposition out of the box. | |6. _Fortran_VS_C_ flags should not be used. They are about notation, | but not about the algorithm itself. For math world users, | a corresponding code example might be included in the documentation. | `transposed` / `everted` can be used in cache-friendly codes. | |7. Compile time evaluation should not be used to produce dummy types like `IdentityMatrix`. | |8. Memory allocation and algorithm logic should be separated whenever possible. | |9. CTFE version(mir_test) unittests should be added to new functions. |+/ | |/** |$(H1 Multidimensional Random Access Ranges) | |The package provides a multidimensional array implementation. |It would be well suited to creating machine learning and image |processing algorithms, but should also be general enough for use anywhere with |homogeneously-typed multidimensional data. |In addition, it includes various functions for iteration, accessing, and manipulation. | |Quick_Start: |$(SUBREF slice, sliced) is a function designed to create |a multidimensional view over a range. |Multidimensional view is presented by $(SUBREF slice, Slice) type. | |------ |import mir.ndslice; | |auto matrix = slice!double(3, 4); |matrix[] = 0; |matrix.diagonal[] = 1; | |auto row = matrix[2]; |row[3] = 6; |assert(matrix[2, 3] == 6); // D & C index order |------ | |Note: |In many examples $(REF iota, mir,_ndslice,topology) is used |instead of a regular array, which makes it |possible to carry out tests without memory allocation. | |$(SCRIPT inhibitQuickIndex = 1;) | |$(DIVC quickindex, |$(BOOKTABLE, | |$(TR $(TH Submodule) $(TH Declarations)) | |$(TR $(TDNW $(SUBMODULE slice) $(BR) | $(SMALL $(SUBREF slice, Slice) structure | $(BR) Basic constructors)) | $(TD | $(SUBREF slice, Canonical) | $(SUBREF slice, Contiguous) | $(SUBREF slice, DeepElementType) | $(SUBREF slice, isSlice) | $(SUBREF slice, kindOf) | $(SUBREF slice, Slice) | $(SUBREF slice, sliced) | $(SUBREF slice, slicedField) | $(SUBREF slice, slicedNdField) | $(SUBREF slice, SliceKind) | $(SUBREF slice, Structure) | $(SUBREF slice, Universal) | ) |) | |$(TR $(TDNW $(SUBMODULE allocation) $(BR) | $(SMALL Allocation utilities)) | $(TD | $(SUBREF allocation, bitRcslice) | $(SUBREF allocation, bitSlice) | $(SUBREF allocation, makeNdarray) | $(SUBREF allocation, makeSlice) | $(SUBREF allocation, makeUninitSlice) | $(SUBREF allocation, mininitRcslice) | $(SUBREF allocation, ndarray) | $(SUBREF allocation, rcslice) | $(SUBREF allocation, shape) | $(SUBREF allocation, slice) | $(SUBREF allocation, stdcFreeAlignedSlice) | $(SUBREF allocation, stdcFreeSlice) | $(SUBREF allocation, stdcSlice) | $(SUBREF allocation, stdcUninitAlignedSlice) | $(SUBREF allocation, stdcUninitSlice) | $(SUBREF allocation, uninitAlignedSlice) | $(SUBREF allocation, uninitSlice) | ) |) | |$(TR $(TDNW $(SUBMODULE topology) $(BR) | $(SMALL Subspace manipulations | $(BR) Advanced constructors | $(BR) SliceKind conversion utilities)) | $(TD | $(SUBREF topology, as) | $(SUBREF topology, assumeCanonical) | $(SUBREF topology, assumeContiguous) | $(SUBREF topology, bitpack) | $(SUBREF topology, bitwise) | $(SUBREF topology, blocks) | $(SUBREF topology, byDim) | $(SUBREF topology, bytegroup) | $(SUBREF topology, cached) | $(SUBREF topology, cachedGC) | $(SUBREF topology, canonical) | $(SUBREF topology, cartesian) | $(SUBREF topology, chopped) | $(SUBREF topology, cycle) | $(SUBREF topology, diagonal) | $(SUBREF topology, diff) | $(SUBREF topology, evertPack) | $(SUBREF topology, flattened) | $(SUBREF topology, indexed) | $(SUBREF topology, iota) | $(SUBREF topology, ipack) | $(SUBREF topology, kronecker) | $(SUBREF topology, linspace) | $(SUBREF topology, magic) | $(SUBREF topology, map) | $(SUBREF topology, member) | $(SUBREF topology, ndiota) | $(SUBREF topology, orthogonalReduceField) | $(SUBREF topology, pack) | $(SUBREF topology, pairwise) | $(SUBREF topology, repeat) | $(SUBREF topology, reshape) | $(SUBREF topology, ReshapeError) | $(SUBREF topology, retro) | $(SUBREF topology, slide) | $(SUBREF topology, stairs) | $(SUBREF topology, stride) | $(SUBREF topology, subSlices) | $(SUBREF topology, triplets) | $(SUBREF topology, universal) | $(SUBREF topology, unzip) | $(SUBREF topology, windows) | $(SUBREF topology, zip) | ) |) | |$(TR $(TDNW $(SUBMODULE filling) $(BR) | $(SMALL Specialized initialisation routines)) | $(TD | $(SUBREF filling, fillVandermonde) | ) |) | |$(TR $(TDNW $(SUBMODULE fuse) $(BR) | $(SMALL Data fusing (stacking) | $(BR) See also $(SUBMODULE concatenation) submodule. | )) | $(TD | $(SUBREF fuse, fuse) | $(SUBREF fuse, fuseCells) | ) |) | |$(TR $(TDNW $(SUBMODULE concatenation) $(BR) | $(SMALL Concatenation, padding, and algorithms | $(BR) See also $(SUBMODULE fuse) submodule. | )) | $(TD | $(SUBREF concatenation, forEachFragment) | $(SUBREF concatenation, isConcatenation) | $(SUBREF concatenation, pad) | $(SUBREF concatenation, padEdge) | $(SUBREF concatenation, padWrap) | $(SUBREF concatenation, padSymmetric) | $(SUBREF concatenation, concatenation) | $(SUBREF concatenation, Concatenation) | $(SUBREF concatenation, concatenationDimension) | $(SUBREF concatenation, until) | ) |) | |$(TR $(TDNW $(SUBMODULE dynamic) | $(BR) $(SMALL Dynamic dimension manipulators)) | $(TD | $(SUBREF dynamic, allReversed) | $(SUBREF dynamic, dropToHypercube) | $(SUBREF dynamic, everted) | $(SUBREF dynamic, normalizeStructure) | $(SUBREF dynamic, reversed) | $(SUBREF dynamic, rotated) | $(SUBREF dynamic, strided) | $(SUBREF dynamic, swapped) | $(SUBREF dynamic, transposed) | ) |) | |$(TR $(TDNW $(SUBMODULE sorting) | $(BR) $(SMALL Sorting utilities)) | $(TD | $(SUBREF sorting, sort) | Examples for `isSorted`, `isStrictlyMonotonic`, `makeIndex`, and `schwartzSort`. | ) |) | |$(TR $(TDNW $(SUBMODULE mutation) | $(BR) $(SMALL Mutation utilities)) | $(TD | $(SUBREF mutation, copyMinor) | $(SUBREF mutation, reverseInPlace) | ) |) | |$(TR $(TDNW $(SUBMODULE iterator) | $(BR) $(SMALL Declarations)) | $(TD | $(SUBREF iterator, BytegroupIterator) | $(SUBREF iterator, CachedIterator) | $(SUBREF iterator, ChopIterator) | $(SUBREF iterator, FieldIterator) | $(SUBREF iterator, FlattenedIterator) | $(SUBREF iterator, IndexIterator) | $(SUBREF iterator, IotaIterator) | $(SUBREF iterator, MapIterator) | $(SUBREF iterator, MemberIterator) | $(SUBREF iterator, RetroIterator) | $(SUBREF iterator, SliceIterator) | $(SUBREF iterator, SlideIterator) | $(SUBREF iterator, StairsIterator) | $(SUBREF iterator, StrideIterator) | $(SUBREF iterator, SubSliceIterator) | $(SUBREF iterator, Triplet) | $(SUBREF iterator, TripletIterator) | $(SUBREF iterator, ZipIterator) | ) |) | |$(TR $(TDNW $(SUBMODULE field) | $(BR) $(SMALL Declarations)) | $(TD | $(SUBREF field, BitField) | $(SUBREF field, BitpackField) | $(SUBREF field, CycleField) | $(SUBREF field, LinspaceField) | $(SUBREF field, MagicField) | $(SUBREF field, MapField) | $(SUBREF field, ndIotaField) | $(SUBREF field, OrthogonalReduceField) | $(SUBREF field, RepeatField) | ) |) | |$(TR $(TDNW $(SUBMODULE ndfield) | $(BR) $(SMALL Declarations)) | $(TD | $(SUBREF ndfield, Cartesian) | $(SUBREF ndfield, Kronecker) | ) |) | |$(TR $(TDNW $(SUBMODULE chunks) | $(BR) $(SMALL Declarations)) | $(TD | $(SUBREF field, chunks) | $(SUBREF field, Chunks) | $(SUBREF field, isChunks) | $(SUBREF field, popFrontTuple) | ) |) | |$(TR $(TDNW $(SUBMODULE traits) | $(BR) $(SMALL Declarations)) | $(TD | $(SUBREF traits, isIterator) | $(SUBREF traits, isVector) | $(SUBREF traits, isMatrix) | $(SUBREF traits, isContiguousSlice) | $(SUBREF traits, isCanonicalSlice) | $(SUBREF traits, isUniversalSlice) | $(SUBREF traits, isContiguousVector) | $(SUBREF traits, isUniversalVector) | $(SUBREF traits, isContiguousMatrix) | $(SUBREF traits, isCanonicalMatrix) | $(SUBREF traits, isUniversalMatrix) | ) |) | |)) | |$(H2 Example: Image Processing) | |A median filter is implemented as an example. The function |`movingWindowByChannel` can also be used with other filters that use a sliding |window as the argument, in particular with convolution matrices such as the |$(LINK2 https://en.wikipedia.org/wiki/Sobel_operator, Sobel operator). | |`movingWindowByChannel` iterates over an image in sliding window mode. |Each window is transferred to a `filter`, which calculates the value of the |pixel that corresponds to the given window. | |This function does not calculate border cases in which a window overlaps |the image partially. However, the function can still be used to carry out such |calculations. That can be done by creating an amplified image, with the edges |reflected from the original image, and then applying the given function to the |new file. | |Note: You can find the example at |$(LINK2 https://github.com/libmir/mir/blob/master/examples/median_filter.d, GitHub). | |------- |/++ |Params: | filter = unary function. Dimension window 2D is the argument. | image = image dimensions `(h, w, c)`, | where с is the number of channels in the image | nr = number of rows in the window | nс = number of columns in the window | |Returns: | image dimensions `(h - nr + 1, w - nc + 1, c)`, | where с is the number of channels in the image. | Dense data layout is guaranteed. |+/ |Slice!(ubyte*, 3) movingWindowByChannel |(Slice!(Universal, [3], ubyte*) image, size_t nr, size_t nc, ubyte delegate(Slice!(Universal, [2], ubyte*)) filter) |{ | // 0. 3D | // The last dimension represents the color channel. | return image | // 1. 2D composed of 1D | // Packs the last dimension. | .pack!1 | // 2. 2D composed of 2D composed of 1D | // Splits image into overlapping windows. | .windows(nr, nc) | // 3. 5D | // Unpacks the windows. | .unpack | .transposed!(0, 1, 4) | // 4. 5D | // Brings the color channel dimension to the third position. | .pack!2 | // 2D to pixel lazy conversion. | .map!filter | // Creates the new image. The only memory allocation in this function. | .slice; |} |------- | |A function that calculates the value of iterator median is also necessary. | |------- |/++ | |Params: | r = input range | buf = buffer with length no less than the number of elements in `r` |Returns: | median value over the range `r` |+/ |T median(Range, T)(Slice!(Universal, [2], Range) sl, T[] buf) |{ | import std.algorithm.sorting : topN; | // copy sl to the buffer | auto retPtr = reduce!( | (ptr, elem) { *ptr = elem; return ptr + 1;} )(buf.ptr, sl); | auto n = retPtr - buf.ptr; | buf[0 .. n].topN(n / 2); | return buf[n / 2]; |} |------- | |The `main` function: | |------- |void main(string[] args) |{ | import std.conv : to; | import std.getopt : getopt, defaultGetoptPrinter; | import std.path : stripExtension; | | uint nr, nc, def = 3; | auto helpInformation = args.getopt( | "nr", "number of rows in window, default value is " ~ def.to!string, &nr, | "nc", "number of columns in window, default value is equal to nr", &nc); | if (helpInformation.helpWanted) | { | defaultGetoptPrinter( | "Usage: median-filter [] []\noptions:", | helpInformation.options); | return; | } | if (!nr) nr = def; | if (!nc) nc = nr; | | auto buf = new ubyte[nr * nc]; | | foreach (name; args[1 .. $]) | { | import imageformats; // can be found at code.dlang.org | | IFImage image = read_image(name); | | auto ret = image.pixels | .sliced(cast(size_t)image.h, cast(size_t)image.w, cast(size_t)image.c) | .movingWindowByChannel | !(window => median(window, buf)) | (nr, nc); | | write_image( | name.stripExtension ~ "_filtered.png", | ret.length!1, | ret.length!0, | (&ret[0, 0, 0])[0 .. ret.elementCount]); | } |} |------- | |This program works both with color and grayscale images. | |------- |$ median-filter --help |Usage: median-filter [] [] |options: | --nr number of rows in window, default value is 3 | --nc number of columns in window default value equals to nr |-h --help This help information. |------- | |$(H2 Compared with `numpy.ndarray`) | |numpy is undoubtedly one of the most effective software packages that has |facilitated the work of many engineers and scientists. However, due to the |specifics of implementation of Python, a programmer who wishes to use the |functions not represented in numpy may find that the built-in functions |implemented specifically for numpy are not enough, and their Python |implementations work at a very low speed. Extending numpy can be done, but |is somewhat laborious as even the most basic numpy functions that refer |directly to `ndarray` data must be implemented in C for reasonable performance. | |At the same time, while working with `ndslice`, an engineer has access to the |whole set of standard D library, so the functions he creates will be as |efficient as if they were written in C. | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016, Ilya Yaroshenko |Authors: Ilya Yaroshenko |Acknowledgements: John Loughran Colvin | |Macros: |SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |TDNW2 = $0 |*/ |module mir.ndslice; | |public import mir.algorithm.iteration; |public import mir.ndslice.allocation; |public import mir.ndslice.chunks; |public import mir.ndslice.concatenation; |public import mir.ndslice.dynamic; |public import mir.ndslice.field; |public import mir.ndslice.filling; |public import mir.ndslice.fuse; |public import mir.ndslice.iterator; |public import mir.ndslice.mutation; |public import mir.ndslice.ndfield; |public import mir.ndslice.slice; |public import mir.ndslice.topology; |public import mir.ndslice.traits; | | |version(mir_test) unittest |{ | auto matrix = new double[12].sliced(3, 4); | matrix[] = 0; | matrix.diagonal[] = 1; | | auto row = matrix[2]; | row[3] = 6; | assert(matrix[2, 3] == 6); // D & C index order | //assert(matrix(3, 2) == 6); // Math & Fortran index order |} | |// relaxed example |version(mir_test) unittest |{ | import mir.qualifier; | | static Slice!(ubyte*, 3) movingWindowByChannel | (Slice!(ubyte*, 3, Universal) image, size_t nr, size_t nc, ubyte delegate(LightConstOf!(Slice!(ubyte*, 2, Universal))) filter) | { | return image | .pack!1 | .windows(nr, nc) | .unpack | .unpack | .transposed!(0, 1, 4) | .pack!2 | .map!filter | .slice; | } | | static T median(Iterator, T)(Slice!(Iterator, 2, Universal) sl, T[] buf) | { | import std.algorithm.sorting : topN; | // copy sl to the buffer | auto retPtr = reduce!( | (ptr, elem) { | *ptr = elem; | return ptr + 1; | } )(buf.ptr, sl); | auto n = retPtr - buf.ptr; | buf[0 .. n].topN(n / 2); | return buf[n / 2]; | } | | import std.conv : to; | import std.getopt : getopt, defaultGetoptPrinter; | import std.path : stripExtension; | | auto args = ["std"]; | uint nr, nc, def = 3; | auto helpInformation = args.getopt( | "nr", "number of rows in window, default value is " ~ def.to!string, &nr, | "nc", "number of columns in window default value equals to nr", &nc); | if (helpInformation.helpWanted) | { | defaultGetoptPrinter( | "Usage: median-filter [] []\noptions:", | helpInformation.options); | return; | } | if (!nr) nr = def; | if (!nc) nc = nr; | | auto buf = new ubyte[nr * nc]; | | foreach (name; args[1 .. $]) | { | auto ret = | movingWindowByChannel | (new ubyte[300].sliced(10, 10, 3).universal, nr, nc, window => median(window, buf)); | } |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | immutable r = 1000.iota; | | auto t0 = r.sliced(1000); | assert(t0.front == 0); | assert(t0.back == 999); | assert(t0[9] == 9); | | auto t1 = t0[10 .. 20]; | assert(t1.front == 10); | assert(t1.back == 19); | assert(t1[9] == 19); | | t1.popFront(); | assert(t1.front == 11); | t1.popFront(); | assert(t1.front == 12); | | t1.popBack(); | assert(t1.back == 18); | t1.popBack(); | assert(t1.back == 17); | | assert(t1 == iota([6], 12)); |} | |pure nothrow version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.range : iota; | import mir.array.allocation : array; | auto r = 1000.iota.array; | | auto t0 = r.sliced(1000); | assert(t0.length == 1000); | assert(t0.front == 0); | assert(t0.back == 999); | assert(t0[9] == 9); | | auto t1 = t0[10 .. 20]; | assert(t1.front == 10); | assert(t1.back == 19); | assert(t1[9] == 19); | | t1.popFront(); | assert(t1.front == 11); | t1.popFront(); | assert(t1.front == 12); | | t1.popBack(); | assert(t1.back == 18); | t1.popBack(); | assert(t1.back == 17); | | assert(t1.equal(iota(12, 18))); | | t1.front = 13; | assert(t1.front == 13); | t1.front++; | assert(t1.front == 14); | t1.front += 2; | assert(t1.front == 16); | t1.front = 12; | assert((t1.front = 12) == 12); | | t1.back = 13; | assert(t1.back == 13); | t1.back++; | assert(t1.back == 14); | t1.back += 2; | assert(t1.back == 16); | t1.back = 12; | assert((t1.back = 12) == 12); | | t1[3] = 13; | assert(t1[3] == 13); | t1[3]++; | assert(t1[3] == 14); | t1[3] += 2; | assert(t1[3] == 16); | t1[3] = 12; | assert((t1[3] = 12) == 12); | | t1[3 .. 5] = 100; | assert(t1[2] != 100); | assert(t1[3] == 100); | assert(t1[4] == 100); | assert(t1[5] != 100); | | t1[3 .. 5] += 100; | assert(t1[2] < 100); | assert(t1[3] == 200); | assert(t1[4] == 200); | assert(t1[5] < 100); | | --t1[3 .. 5]; | | assert(t1[2] < 100); | assert(t1[3] == 199); | assert(t1[4] == 199); | assert(t1[5] < 100); | | --t1[]; | assert(t1[3] == 198); | assert(t1[4] == 198); | | t1[] += 2; | assert(t1[3] == 200); | assert(t1[4] == 200); | | t1[].opIndexOpAssign!"*"(t1); | assert(t1[3] == 40000); | assert(t1[4] == 40000); | | | assert(&t1[$ - 1] is &(t1.back())); |} | |@safe @nogc pure nothrow version(mir_test) unittest |{ | import std.range : iota; | auto r = (10_000L * 2 * 3 * 4).iota; | | auto t0 = r.slicedField(10, 20, 30, 40); | assert(t0.length == 10); | assert(t0.length!0 == 10); | assert(t0.length!1 == 20); | assert(t0.length!2 == 30); | assert(t0.length!3 == 40); |} | |pure nothrow version(mir_test) unittest |{ | auto tensor = new int[3 * 4 * 8].sliced(3, 4, 8); | assert(&(tensor.back.back.back()) is &tensor[2, 3, 7]); | assert(&(tensor.front.front.front()) is &tensor[0, 0, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto slice = new int[24].sliced(2, 3, 4); | auto r0 = slice.pack!1[1, 2]; | slice.pack!1[1, 2][] = 4; | auto r1 = slice[1, 2]; | assert(slice[1, 2, 3] == 4); |} | |pure nothrow version(mir_test) unittest |{ | auto ar = new int[3 * 8 * 9]; | | auto tensor = ar.sliced(3, 8, 9); | tensor[0, 1, 2] = 4; | tensor[0, 1, 2]++; | assert(tensor[0, 1, 2] == 5); | tensor[0, 1, 2]--; | assert(tensor[0, 1, 2] == 4); | tensor[0, 1, 2] += 2; | assert(tensor[0, 1, 2] == 6); | | auto matrix = tensor[0 .. $, 1, 0 .. $]; | matrix[] = 10; | assert(tensor[0, 1, 2] == 10); | assert(matrix[0, 2] == tensor[0, 1, 2]); | assert(&matrix[0, 2] is &tensor[0, 1, 2]); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/package.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-core-1.1.2-mir-core-source-mir-bitop.lst |/++ |This module contains a collection of bit-level operations. | |Authors: Ilya Yaroshenko, Phobos & LDC Authors (original Phobos unittests, docs, conventions). |+/ |module mir.bitop; | |version(LDC) | import ldc.intrinsics; |version(GNU) | import gcc.builtins; | |import mir.math.common: fastmath; | |/// Right shift vallue for bit index to get element's index (5 for `uint`). |enum uint bitElemShift(T : ubyte) = 3; |/// ditto |enum uint bitElemShift(T : byte) = 3; |/// ditto |enum uint bitElemShift(T : ushort) = 4; |/// ditto |enum uint bitElemShift(T : short) = 4; |/// ditto |enum uint bitElemShift(T : uint) = 5; |/// ditto |enum uint bitElemShift(T : int) = 5; |/// ditto |enum uint bitElemShift(T : ulong) = 6; |/// ditto |enum uint bitElemShift(T : long) = 6; |static if (is(ucent)) |/// ditto |enum uint bitElemShift(T : ucent) = 7; |/// ditto |static if (is(cent)) |enum uint bitElemShift(T : cent) = 7; | |/// Bit mask for bit index to get element's bit shift (31 for uint). |enum uint bitShiftMask(T : ubyte) = 7; |/// ditto |enum uint bitShiftMask(T : byte) = 7; |/// ditto |enum uint bitShiftMask(T : ushort) = 15; |/// ditto |enum uint bitShiftMask(T : short) = 15; |/// ditto |enum uint bitShiftMask(T : uint) = 31; |/// ditto |enum uint bitShiftMask(T : int) = 31; |/// ditto |enum uint bitShiftMask(T : ulong) = 63; |/// ditto |enum uint bitShiftMask(T : long) = 63; |static if (is(ucent)) |/// ditto |enum uint bitShiftMask(T : ucent) = 127; |static if (is(cent)) |/// ditto |enum uint bitShiftMask(T : cent) = 127; | |// no effect on this function, but better for optimization of other @fastmath code that uses this |@fastmath: | | |/++ |+/ |T nTrailingBitsToCount(T)(in T value, in T popcnt) | if (__traits(isUnsigned, T)) |{ | import std.traits; | import mir.internal.utility: Iota; | alias S = Signed!(CommonType!(int, T)); | S mask = S(-1) << T.sizeof * 4; | foreach_reverse (s; Iota!(bitElemShift!T - 1)) | {{ | enum shift = 1 << s; | if (S(popcnt) > S(ctpop(cast(T)(value & ~mask)))) | mask <<= shift; | else | mask >>= shift; | }} | return cttz(cast(T)mask) + (S(popcnt) != ctpop(cast(T)(value & ~mask))); |} | |/// |unittest |{ | assert(nTrailingBitsToCount(0xF0u, 3u) == 7); | assert(nTrailingBitsToCount(0xE00u, 3u) == 12); | | foreach(uint i; 1 .. 32) | assert(nTrailingBitsToCount(uint.max, i) == i); |} | |/++ |+/ |T nLeadingBitsToCount(T)(in T value, in T popcnt) | if (__traits(isUnsigned, T)) |{ | import std.traits; | import mir.internal.utility: Iota; | alias S = Signed!(CommonType!(int, T)); | S mask = S(-1) << T.sizeof * 4; | foreach_reverse (s; Iota!(bitElemShift!T - 1)) | {{ | enum shift = 1 << s; | if (S(popcnt) > S(ctpop(cast(T)(value & mask)))) | mask >>= shift; | else | mask <<= shift; | }} | return ctlz(cast(T)~mask) + (S(popcnt) != ctpop(cast(T)(value & mask))); |} | |/// |unittest |{ | assert(nLeadingBitsToCount(0xF0u, 3u) == 32 - 5); | assert(nLeadingBitsToCount(0x700u, 3u) == 32 - 8); | | foreach(uint i; 1 .. 32) | assert(nLeadingBitsToCount(uint.max, i) == i); |} | |/++ |Tests the bit. |Returns: | A non-zero value if the bit was set, and a zero | if it was clear. |+/ |auto bt(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum) | if (__traits(isUnsigned, T)) |{ | auto index = bitnum >> bitElemShift!T; | auto mask = T(1) << (bitnum & bitShiftMask!T); | return p[index] & mask; |} | |/// |@system pure unittest |{ | size_t[2] array; | | array[0] = 2; | array[1] = 0x100; | | assert(bt(array.ptr, 1)); | assert(array[0] == 2); | assert(array[1] == 0x100); |} | |/++ |Tests and assign the bit. |Returns: | A non-zero value if the bit was set, and a zero if it was clear. |+/ |auto bta(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum, bool value) | if (__traits(isUnsigned, T)) |{ | auto index = bitnum >> bitElemShift!T; | auto shift = bitnum & bitShiftMask!T; | auto mask = T(1) << shift; | static if (__traits(compiles, &p[size_t.init])) | { | auto qp = &p[index]; | auto q = *qp; | auto ret = q & mask; | *qp = cast(T)((q & ~mask) ^ (T(value) << shift)); | } | else | { | auto q = p[index]; | auto ret = q & mask; | p[index] = cast(T)((q & ~mask) ^ (T(value) << shift)); | } | return ret; |} | |/++ |Tests and complements the bit. |Returns: | A non-zero value if the bit was set, and a zero if it was clear. |+/ |auto btc(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum) | if (__traits(isUnsigned, T)) |{ | auto index = bitnum >> bitElemShift!T; | auto mask = T(1) << (bitnum & bitShiftMask!T); | static if (__traits(compiles, &p[size_t.init])) | { | auto qp = &p[index]; | auto q = *qp; | auto ret = q & mask; | *qp = cast(T)(q ^ mask); | } | else | { | auto q = p[index]; | auto ret = q & mask; | p[index] = cast(T)(q ^ mask); | } | return ret; |} | |/++ |Tests and resets (sets to 0) the bit. |Returns: | A non-zero value if the bit was set, and a zero if it was clear. |+/ |auto btr(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum) | if (__traits(isUnsigned, T)) |{ | auto index = bitnum >> bitElemShift!T; | auto mask = T(1) << (bitnum & bitShiftMask!T); | static if (__traits(compiles, &p[size_t.init])) | { | auto qp = &p[index]; | auto q = *qp; | auto ret = q & mask; | *qp = cast(T)(q & ~mask); | } | else | { | auto q = p[index]; | auto ret = q & mask; | p[index] = cast(T)(q & ~mask); | } | return ret; |} | |/++ |Tests and sets the bit. |Params: |p = a non-NULL field / pointer to an array of unsigned integers. |bitnum = a bit number, starting with bit 0 of p[0], |and progressing. It addresses bits like the expression: |--- |p[index / (T.sizeof*8)] & (1 << (index & ((T.sizeof*8) - 1))) |--- |Returns: | A non-zero value if the bit was set, and a zero if it was clear. |+/ |auto bts(Field, T = typeof(Field.init[size_t.init]))(auto ref Field p, size_t bitnum) | if (__traits(isUnsigned, T)) |{ | auto index = bitnum >> bitElemShift!T; | auto mask = T(1) << (bitnum & bitShiftMask!T); | static if (__traits(compiles, &p[size_t.init])) | { | auto qp = &p[index]; | auto q = *qp; | auto ret = q & mask; | *qp = cast(T)(q | mask); | } | else | { | auto q = p[index]; | auto ret = q & mask; | p[index] = cast(T)(q | mask); | } | return ret; |} | |/// |@system pure unittest |{ | size_t[2] array; | | array[0] = 2; | array[1] = 0x100; | | assert(btc(array.ptr, 35) == 0); | if (size_t.sizeof == 8) | { | assert(array[0] == 0x8_0000_0002); | assert(array[1] == 0x100); | } | else | { | assert(array[0] == 2); | assert(array[1] == 0x108); | } | | assert(btc(array.ptr, 35)); | assert(array[0] == 2); | assert(array[1] == 0x100); | | assert(bts(array.ptr, 35) == 0); | if (size_t.sizeof == 8) | { | assert(array[0] == 0x8_0000_0002); | assert(array[1] == 0x100); | } | else | { | assert(array[0] == 2); | assert(array[1] == 0x108); | } | | assert(btr(array.ptr, 35)); | assert(array[0] == 2); | assert(array[1] == 0x100); |} | |/// The 'ctpop' family of intrinsics counts the number of bits set in a value. |T ctpop(T)(in T src) | if (__traits(isUnsigned, T)) |{ | version(LDC) if (!__ctfe) | return llvm_ctpop(src); | version(GNU) if (!__ctfe) | { | static if (T.sizeof < __builtin_clong.sizeof) | return cast(T) __builtin_popcount(src); | else static if (T.sizeof <= __builtin_clong.sizeof) | return cast(T) __builtin_popcountl(src); | else | return cast(T) __builtin_popcountll(src); | } | import core.bitop: popcnt; | return cast(T) popcnt(src); |} | |/++ |The 'ctlz' family of intrinsic functions counts the number of leading zeros in a variable. |Result is undefined if the argument is zero. |+/ |T ctlz(T)(in T src) | if (__traits(isUnsigned, T)) |{ | version(LDC) if (!__ctfe) | return llvm_ctlz(src, true); | version(GNU) if (!__ctfe) | { | // Do not zero-extend when counting leading zeroes. | static if (T.sizeof < __builtin_clong.sizeof && T.sizeof >= uint.sizeof) | return cast(T) __builtin_clz(src); | else static if (T.sizeof == __builtin_clong.sizeof) | return cast(T) __builtin_clzl(src); | else static if (T.sizeof > __builtin_clong.sizeof) | return cast(T) __builtin_clzll(src); | } | import core.bitop: bsr; | return cast(T)(T.sizeof * 8 - 1 - bsr(src)); |} | | |version (mir_test) @nogc nothrow pure @safe unittest |{ | assert(ctlz(cast(ubyte) 0b0011_1111) == 2); | assert(ctlz(cast(ushort) 0b0000_0001_1111_1111) == 7); |} | | |/++ |The 'cttz' family of intrinsic functions counts the number of trailing zeros. |Result is undefined if the argument is zero. |+/ |T cttz(T)(in T src) | if (__traits(isUnsigned, T)) |{ | version(LDC) if (!__ctfe) | return llvm_cttz(src, true); | version(GNU) if (!__ctfe) | { | static if (T.sizeof <__builtin_clong.sizeof) | return cast(T) __builtin_ctz(src); | else static if (T.sizeof <=__builtin_clong.sizeof) | return cast(T) __builtin_ctzl(src); | else | return cast(T) __builtin_ctzll(src); | } | import core.bitop: bsf; | return cast(T) bsf(src); |} ../../../.dub/packages/mir-core-1.1.2/mir-core/source/mir/bitop.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-core-1.1.2-mir-core-source-mir-utility.lst |/++ |Generic utilities. | |$(BOOKTABLE Cheat Sheet, |$(TR $(TH Function Name) $(TH Description)) |$(T2 swap, Swaps two values.) |$(T2 extMul, Extended unsigned multiplications.) |$(T2 min, Minimum value.) |$(T2 max, Maximum value.) |) | |Copyright: Andrei Alexandrescu 2008-2016, Ilya Yaroshenko 2016-. |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: Ilya Yaroshenko, $(HTTP erdani.com, Andrei Alexandrescu) (original std.* modules), |Macros: |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.utility; | |import std.traits; | |import mir.math.common: optmath; | |version(LDC) |pragma(LDC_inline_ir) R inlineIR(string s, R, P...)(P) @safe pure nothrow @nogc; | |@optmath: | |version(LDC) |{ | /// | public import ldc.intrinsics: _expect = llvm_expect; |} |else version(GNU) |{ | import gcc.builtins: __builtin_expect, __builtin_clong; | | /// | T _expect(T)(in T val, in T expected_val) if (__traits(isIntegral, T)) | { | static if (T.sizeof <= __builtin_clong.sizeof) | return cast(T) __builtin_expect(val, expected_val); | else | return val; | } |} |else |{ | /// | T _expect(T)(in T val, in T expected_val) if (__traits(isIntegral, T)) | { | return val; | } |} | |public import std.algorithm.mutation: swap; | |void swapStars(I1, I2)(auto ref I1 i1, auto ref I2 i2) |{ | static if (__traits(compiles, swap(*i1, *i2))) | { | swap(*i1, *i2); | } | else | { | import mir.functional: unref; | auto e = unref(*i1); | i1[0] = *i2; | i2[0] = e; | } |} | |/++ |Iterates the passed arguments and returns the minimum value. |Params: args = The values to select the minimum from. At least two arguments | must be passed, and they must be comparable with `<`. |Returns: The minimum of the passed-in values. |+/ |auto min(T...)(T args) | if (T.length >= 2) |{ | //Get "a" | static if (T.length <= 2) | alias a = args[0]; | else | auto a = min(args[0 .. ($+1)/2]); | alias T0 = typeof(a); | | //Get "b" | static if (T.length <= 3) | alias b = args[$-1]; | else | auto b = min(args[($+1)/2 .. $]); | alias T1 = typeof(b); | | static assert (is(typeof(a < b)), "Invalid arguments: Cannot compare types " ~ T0.stringof ~ " and " ~ T1.stringof ~ "."); | | static if ((isFloatingPoint!T0 && isNumeric!T1) || (isFloatingPoint!T1 && isNumeric!T0)) | { | import mir.math.common: fmin; | return fmin(a, b); | } | else | { | static if (isIntegral!T0 && isIntegral!T1) | static assert(isSigned!T0 == isSigned!T1, | "mir.utility.min is not defined for signed + unsigned pairs because of security reasons." | ~ "Please unify type or use a Phobos analog."); | //Do the "min" proper with a and b | return a < b ? a : b; | } |} | |@safe version(mir_test) unittest |{ | int a = 5; | short b = 6; | double c = 2; | auto d = min(a, b); | static assert(is(typeof(d) == int)); | assert(d == 5); | auto e = min(a, b, c); | static assert(is(typeof(e) == double)); | assert(e == 2); |} | |/++ |`min` is not defined for arguments of mixed signedness because of security reasons. |Please unify type or use a Phobos analog. |+/ |version(mir_test) unittest |{ | int a = -10; | uint b = 10; | static assert(!is(typeof(min(a, b)))); |} | | |/++ |Iterates the passed arguments and returns the minimum value. |Params: args = The values to select the minimum from. At least two arguments | must be passed, and they must be comparable with `<`. |Returns: The minimum of the passed-in values. |+/ |auto max(T...)(T args) | if (T.length >= 2) |{ | //Get "a" | static if (T.length <= 2) | alias a = args[0]; | else | auto a = max(args[0 .. ($+1)/2]); | alias T0 = typeof(a); | | //Get "b" | static if (T.length <= 3) | alias b = args[$-1]; | else | auto b = max(args[($+1)/2 .. $]); | alias T1 = typeof(b); | | static assert (is(typeof(a < b)), "Invalid arguments: Cannot compare types " ~ T0.stringof ~ " and " ~ T1.stringof ~ "."); | | static if ((isFloatingPoint!T0 && isNumeric!T1) || (isFloatingPoint!T1 && isNumeric!T0)) | { | import mir.math.common: fmax; | return fmax(a, b); | } | else | { | static if (isIntegral!T0 && isIntegral!T1) | static assert(isSigned!T0 == isSigned!T1, | "mir.utility.max is not defined for signed + unsigned pairs because of security reasons." | ~ "Please unify type or use a Phobos analog."); | //Do the "max" proper with a and b | return a > b ? a : b; | } |} | |/// |@safe version(mir_test) unittest |{ | int a = 5; | short b = 6; | double c = 2; | auto d = max(a, b); | static assert(is(typeof(d) == int)); | assert(d == 6); | auto e = min(a, b, c); | static assert(is(typeof(e) == double)); | assert(e == 2); |} | |/++ |`max` is not defined for arguments of mixed signedness because of security reasons. |Please unify type or use a Phobos analog. |+/ |version(mir_test) unittest |{ | int a = -10; | uint b = 10; | static assert(!is(typeof(max(a, b)))); |} | |/++ |Return type for $(LREF extMul); |+/ |struct ExtMulResult(I) | if (isIntegral!I) |{ | /// Lower I.sizeof * 8 bits | I low; | /// Higher I.sizeof * 8 bits | I high; |} | |/++ |Extended unsigned multiplications. |Performs U x U multiplication and returns $(LREF ExtMulResult)!U that contains extended result. |Params: | a = unsigned integer | b = unsigned integer |Returns: | 128bit result if U is ulong or 256bit result if U is ucent. |Optimization: | Algorithm is optimized for LDC (LLVM IR, any target) and for DMD (X86_64). |+/ |ExtMulResult!U extMul(U)(in U a, in U b) @nogc nothrow pure @safe | if(isUnsigned!U && U.sizeof >= ulong.sizeof) |{ | static if (is(U == ulong)) | alias H = uint; | else | alias H = ulong; | | enum hbc = H.sizeof * 8; | | static if (is(U == ulong) && __traits(compiles, ucent.init)) | { | auto ret = ucent(a) * b; | return typeof(return)(cast(ulong) ret, cast(ulong)(ret >>> 64)); | } | else | { | if (!__ctfe) | { | static if (size_t.sizeof == 4) | { | // https://github.com/ldc-developers/ldc/issues/2391 | } | else | version(LDC) | { | // LLVM IR by n8sh | pragma(inline, true); | static if (is(U == ulong)) | { | auto r = inlineIR!(` | %a = zext i64 %0 to i128 | %b = zext i64 %1 to i128 | %m = mul i128 %a, %b | %n = lshr i128 %m, 64 | %h = trunc i128 %n to i64 | %l = trunc i128 %m to i64 | %agg1 = insertvalue [2 x i64] undef, i64 %l, 0 | %agg2 = insertvalue [2 x i64] %agg1, i64 %h, 1 | ret [2 x i64] %agg2`, ulong[2])(a, b); | return ExtMulResult!U(r[0], r[1]); | } | else | static if (false) | { | auto r = inlineIR!(` | %a = zext i128 %0 to i256 | %b = zext i128 %1 to i256 | %m = mul i256 %a, %b | %n = lshr i256 %m, 128 | %h = trunc i256 %n to i128 | %l = trunc i256 %m to i128 | %agg1 = insertvalue [2 x i128] undef, i128 %l, 0 | %agg2 = insertvalue [2 x i128] %agg1, i128 %h, 1 | ret [2 x i128] %agg2`, ucent[2])(a, b); | return ExtMulResult!U(r[0], r[1]); | } | } | else | version(D_InlineAsm_X86_64) | { | static if (is(U == ulong)) | { | version(Windows) | { | ulong[2] r = extMul_X86_64(a, b); | return ExtMulResult!ulong(r[0], r[1]); | } | else | { | return extMul_X86_64(a, b); | } | } | } | } | | U al = cast(H)a; | U ah = a >>> hbc; | U bl = cast(H)b; | U bh = b >>> hbc; | | U p0 = al * bl; | U p1 = al * bh; | U p2 = ah * bl; | U p3 = ah * bh; | | H cy = cast(H)(((p0 >>> hbc) + cast(H)p1 + cast(H)p2) >>> hbc); | U lo = p0 + (p1 << hbc) + (p2 << hbc); | U hi = p3 + (p1 >>> hbc) + (p2 >>> hbc) + cy; | | return typeof(return)(lo, hi); | } |} | |/// |unittest |{ | immutable a = 0x93_8d_28_00_0f_50_a5_56; | immutable b = 0x54_c3_2f_e8_cc_a5_97_10; | enum c = extMul(a, b); // Compile time algorithm | assert(extMul(a, b) == c); // Fast runtime algorithm | static assert(c.high == 0x30_da_d1_42_95_4a_50_78); | static assert(c.low == 0x27_9b_4b_b4_9e_fe_0f_60); |} | |version(D_InlineAsm_X86_64) |{ | version(Windows) | private ulong[2] extMul_X86_64()(ulong a, ulong b) | { | asm @safe pure nothrow @nogc | { | naked; | mov RAX, RCX; | mul RDX; | ret; | } | } | else | private ExtMulResult!ulong extMul_X86_64()(ulong a, ulong b) | { | asm @safe pure nothrow @nogc | { | naked; | mov RAX, RDI; | mul RSI; | ret; | } | } |} | |version(LDC) {} else version(D_InlineAsm_X86_64) |@nogc nothrow pure @safe version(mir_test) unittest |{ | immutable a = 0x93_8d_28_00_0f_50_a5_56; | immutable b = 0x54_c3_2f_e8_cc_a5_97_10; | | version(Windows) | { | immutable ulong[2] r = extMul_X86_64(a, b); | immutable ExtMulResult!ulong c = ExtMulResult!ulong(r[0], r[1]); | } | else | { | immutable ExtMulResult!ulong c = extMul_X86_64(a, b); | } | | assert(c.high == 0x30_da_d1_42_95_4a_50_78); | assert(c.low == 0x27_9b_4b_b4_9e_fe_0f_60); |} ../../../.dub/packages/mir-core-1.1.2/mir-core/source/mir/utility.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-core-1.1.2-mir-core-source-mir-functional.lst |/++ |Functions that manipulate other functions. |This module provides functions for compile time function composition. These |functions are helpful when constructing predicates for the algorithms in |$(MREF mir, ndslice). |$(BOOKTABLE $(H2 Functions), |$(TR $(TH Function Name) $(TH Description)) | $(TR $(TD $(LREF naryFun)) | $(TD Create a unary, binary or N-nary function from a string. Most often | used when defining algorithms on ranges and slices. | )) | $(TR $(TD $(LREF pipe)) | $(TD Join a couple of functions into one that executes the original | functions one after the other, using one function's result for the next | function's argument. | )) | $(TR $(TD $(LREF not)) | $(TD Creates a function that negates another. | )) | $(TR $(TD $(LREF reverseArgs)) | $(TD Predicate that reverses the order of its arguments. | )) | $(TR $(TD $(LREF forward)) | $(TD Forwards function arguments with saving ref-ness. | )) | $(TR $(TD $(LREF refTuple)) | $(TD Removes $(LREF Ref) shell. | )) | $(TR $(TD $(LREF unref)) | $(TD Creates a $(LREF RefTuple) structure. | )) | $(TR $(TD $(LREF __ref)) | $(TD Creates a $(LREF Ref) structure. | )) |) |Copyright: Andrei Alexandrescu 2008 - 2009, Ilya Yaroshenko 2016-. |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Authors: Ilya Yaroshenko, $(HTTP erdani.org, Andrei Alexandrescu (some original code from std.functional)) | |Macros: |NDSLICE = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.functional; | |import std.meta; |import std.traits; | |private enum isRef(T) = is(T : Ref!T0, T0); | |import mir.math.common: optmath; | |public import core.lifetime : forward; | |@optmath: | |/++ |Constructs static array. |+/ |T[N] staticArray(T, size_t N)(T[N] a...) |{ | return a; |} | |/++ |Simple wrapper that holds a pointer. |It is used for as workaround to return multiple auto ref values. |+/ |struct Ref(T) | if (!isRef!T) |{ | @optmath: | | @disable this(); | /// | this(ref T value) @trusted | { | __ptr = &value; | } | /// | T* __ptr; | /// | ref inout(T) __value() inout @property { return *__ptr; } | /// | alias __value this; | /// | bool opEquals(scope Ref!T rhs) const scope | { | return __value == rhs.__value; | } | | static if (__traits(hasMember, T, "toHash") || __traits(isScalar, T)) | /// | size_t toHash() const | { | return hashOf(__value); | } |} | |/// Creates $(LREF Ref) wrapper. |Ref!T _ref(T)(ref T value) |{ | return Ref!T(value); |} | |private mixin template _RefTupleMixin(T...) | if (T.length <= 26) |{ | static if (T.length) | { | enum i = T.length - 1; | static if (isRef!(T[i])) | mixin(`@optmath @property ref ` ~ cast(char)('a' + i) ~ `() { return *expand[` ~ i.stringof ~ `].__ptr; }` ); | else | mixin(`alias ` ~ cast(char)('a' + i) ~ ` = expand[` ~ i.stringof ~ `];`); | mixin ._RefTupleMixin!(T[0 .. $-1]); | } |} | |/++ |Simplified tuple structure. Some fields may be type of $(LREF Ref). |Ref stores a pointer to a values. |+/ |struct RefTuple(T...) |{ | @optmath: | T expand; | alias expand this; | mixin _RefTupleMixin!T; |} | |/// Removes $(LREF Ref) shell. |alias Unref(V : Ref!T, T) = T; |/// ditto |alias Unref(V : RefTuple!T, T...) = RefTuple!(staticMap!(.Unref, T)); |/// ditto |alias Unref(V) = V; | |/++ |Returns: a $(LREF RefTuple) structure. |+/ |RefTuple!Args refTuple(Args...)(auto ref Args args) |{ | return RefTuple!Args(args); |} | |/// Removes $(LREF Ref) shell. |ref T unref(V : Ref!T, T)(scope return V value) |{ | return *value.__ptr; |} | |/// ditto |Unref!(RefTuple!T) unref(V : RefTuple!T, T...)(V value) |{ | typeof(return) ret; | foreach(i, ref elem; ret.expand) | elem = unref(value.expand[i]); | return ret; |} | |/// ditto |ref V unref(V)(scope return ref V value) |{ | return value; |} | |/// ditto |V unref(V)(V value) |{ | return value; |} | |private string joinStrings()(string[] strs) |{ | if (strs.length) | { | auto ret = strs[0]; | foreach(s; strs[1 .. $]) | ret ~= s; | return ret; | } | return null; |} | |/++ |Takes multiple functions and adjoins them together. The result is a |$(LREF RefTuple) with one element per passed-in function. Upon |invocation, the returned tuple is the adjoined results of all |functions. |Note: In the special case where only a single function is provided |(`F.length == 1`), adjoin simply aliases to the single passed function |(`F[0]`). |+/ |template adjoin(fun...) if (fun.length && fun.length <= 26) |{ | static if (fun.length != 1) | { | static if (Filter!(_needNary, fun).length == 0) | { | /// | @optmath auto adjoin(Args...)(auto ref Args args) | { | template _adjoin(size_t i) | { | static if (__traits(compiles, &fun[i](forward!args))) | enum _adjoin = "Ref!(typeof(fun[" ~ i.stringof ~ "](forward!args)))(fun[" ~ i.stringof ~ "](forward!args)), "; | else | enum _adjoin = "fun[" ~ i.stringof ~ "](forward!args), "; | } | | import mir.internal.utility; | mixin("return refTuple(" ~ [staticMap!(_adjoin, Iota!(fun.length))].joinStrings ~ ");"); | } | } | else alias adjoin = .adjoin!(staticMap!(naryFun, fun)); | } | else alias adjoin = naryFun!(fun[0]); |} | |/// |@safe version(mir_test) unittest |{ | static bool f1(int a) { return a != 0; } | static int f2(int a) { return a / 2; } | auto x = adjoin!(f1, f2)(5); | assert(is(typeof(x) == RefTuple!(bool, int))); | assert(x.a == true && x.b == 2); |} | |@safe version(mir_test) unittest |{ | static bool F1(int a) { return a != 0; } | auto x1 = adjoin!(F1)(5); | static int F2(int a) { return a / 2; } | auto x2 = adjoin!(F1, F2)(5); | assert(is(typeof(x2) == RefTuple!(bool, int))); | assert(x2.a && x2.b == 2); | auto x3 = adjoin!(F1, F2, F2)(5); | assert(is(typeof(x3) == RefTuple!(bool, int, int))); | assert(x3.a && x3.b == 2 && x3.c == 2); | | bool F4(int a) { return a != x1; } | alias eff4 = adjoin!(F4); | static struct S | { | bool delegate(int) @safe store; | int fun() { return 42 + store(5); } | } | S s; | s.store = (int a) { return eff4(a); }; | auto x4 = s.fun(); | assert(x4 == 43); |} | |//@safe |version(mir_test) unittest |{ | alias funs = staticMap!(naryFun, "a", "a * 2", "a * 3", "a * a", "-a"); | alias afun = adjoin!funs; | int a = 5, b = 5; | assert(afun(a) == refTuple(Ref!int(a), 10, 15, 25, -5)); | assert(afun(a) == refTuple(Ref!int(b), 10, 15, 25, -5)); | | static class C{} | alias IC = immutable(C); | IC foo(){return typeof(return).init;} | RefTuple!(IC, IC, IC, IC) ret1 = adjoin!(foo, foo, foo, foo)(); | | static struct S{int* p;} | alias IS = immutable(S); | IS bar(){return typeof(return).init;} | enum RefTuple!(IS, IS, IS, IS) ret2 = adjoin!(bar, bar, bar, bar)(); |} | |private template needOpCallAlias(alias fun) |{ | /* Determine whether or not naryFun need to alias to fun or | * fun.opCall. Basically, fun is a function object if fun(...) compiles. We | * want is(naryFun!fun) (resp., is(naryFun!fun)) to be true if fun is | * any function object. There are 4 possible cases: | * | * 1) fun is the type of a function object with static opCall; | * 2) fun is an instance of a function object with static opCall; | * 3) fun is the type of a function object with non-static opCall; | * 4) fun is an instance of a function object with non-static opCall. | * | * In case (1), is(naryFun!fun) should compile, but does not if naryFun | * aliases itself to fun, because typeof(fun) is an error when fun itself | * is a type. So it must be aliased to fun.opCall instead. All other cases | * should be aliased to fun directly. | */ | static if (is(typeof(fun.opCall) == function)) | { | enum needOpCallAlias = !is(typeof(fun)) && __traits(compiles, () { | return fun(Parameters!fun.init); | }); | } | else | enum needOpCallAlias = false; |} | |private template _naryAliases(size_t n) | if (n <= 26) |{ | static if (n == 0) | enum _naryAliases = ""; | else | { | enum i = n - 1; | enum _naryAliases = _naryAliases!i ~ "alias " ~ cast(char)('a' + i) ~ " = args[" ~ i.stringof ~ "];\n"; | } |} | |/++ |Transforms a string representing an expression into a binary function. The |string must use symbol names `a`, `b`, ..., `z` as the parameters. |If `fun` is not a string, `naryFun` aliases itself away to `fun`. |+/ |template naryFun(alias fun) |{ | static if (is(typeof(fun) : string)) | { | import mir.math.common; | /// Specialization for string lambdas | @optmath auto ref naryFun(Args...)(auto ref Args args) | if (args.length <= 26) | { | mixin(_naryAliases!(Args.length)); | return mixin(fun); | } | } | else static if (needOpCallAlias!fun) | alias naryFun = fun.opCall; | else | alias naryFun = fun; |} | |/// |@safe version(mir_test) unittest |{ | // Strings are compiled into functions: | alias isEven = naryFun!("(a & 1) == 0"); | assert(isEven(2) && !isEven(1)); |} | |/// |@safe version(mir_test) unittest |{ | alias less = naryFun!("a < b"); | assert(less(1, 2) && !less(2, 1)); | alias greater = naryFun!("a > b"); | assert(!greater("1", "2") && greater("2", "1")); |} | |/// `naryFun` accepts up to 26 arguments. |@safe version(mir_test) unittest |{ | assert(naryFun!("a * b + c")(2, 3, 4) == 10); |} | |/// `naryFun` can return by reference. |version(mir_test) unittest |{ | int a; | assert(&naryFun!("a")(a) == &a); |} | |/// `args` parameter tuple |version(mir_test) unittest |{ | assert(naryFun!("args[0] + args[1]")(2, 3) == 5); |} | |@safe version(mir_test) unittest |{ | static int f1(int a) { return a + 1; } | static assert(is(typeof(naryFun!(f1)(1)) == int)); | assert(naryFun!(f1)(41) == 42); | int f2(int a) { return a + 1; } | static assert(is(typeof(naryFun!(f2)(1)) == int)); | assert(naryFun!(f2)(41) == 42); | assert(naryFun!("a + 1")(41) == 42); | | int num = 41; | assert(naryFun!"a + 1"(num) == 42); | | // Issue 9906 | struct Seen | { | static bool opCall(int n) { return true; } | } | static assert(needOpCallAlias!Seen); | static assert(is(typeof(naryFun!Seen(1)))); | assert(naryFun!Seen(1)); | | Seen s; | static assert(!needOpCallAlias!s); | static assert(is(typeof(naryFun!s(1)))); | assert(naryFun!s(1)); | | struct FuncObj | { | bool opCall(int n) { return true; } | } | FuncObj fo; | static assert(!needOpCallAlias!fo); | static assert(is(typeof(naryFun!fo))); | assert(naryFun!fo(1)); | | // Function object with non-static opCall can only be called with an | // instance, not with merely the type. | static assert(!is(typeof(naryFun!FuncObj))); |} | |@safe version(mir_test) unittest |{ | static int f1(int a, string b) { return a + 1; } | static assert(is(typeof(naryFun!(f1)(1, "2")) == int)); | assert(naryFun!(f1)(41, "a") == 42); | string f2(int a, string b) { return b ~ "2"; } | static assert(is(typeof(naryFun!(f2)(1, "1")) == string)); | assert(naryFun!(f2)(1, "4") == "42"); | assert(naryFun!("a + b")(41, 1) == 42); | //@@BUG | //assert(naryFun!("return a + b;")(41, 1) == 42); | | // Issue 9906 | struct Seen | { | static bool opCall(int x, int y) { return true; } | } | static assert(is(typeof(naryFun!Seen))); | assert(naryFun!Seen(1,1)); | | struct FuncObj | { | bool opCall(int x, int y) { return true; } | } | FuncObj fo; | static assert(!needOpCallAlias!fo); | static assert(is(typeof(naryFun!fo))); | assert(naryFun!fo(1,1)); | | // Function object with non-static opCall can only be called with an | // instance, not with merely the type. | static assert(!is(typeof(naryFun!FuncObj))); |} | |/++ |N-ary predicate that reverses the order of arguments, e.g., given |`pred(a, b, c)`, returns `pred(c, b, a)`. |+/ |template reverseArgs(alias fun) |{ | /// | @optmath auto ref reverseArgs(Args...)(auto ref Args args) | if (is(typeof(fun(Reverse!args)))) | { | return fun(Reverse!args); | } | |} | |/// |@safe version(mir_test) unittest |{ | int abc(int a, int b, int c) { return a * b + c; } | alias cba = reverseArgs!abc; | assert(abc(91, 17, 32) == cba(32, 17, 91)); |} | |@safe version(mir_test) unittest |{ | int a(int a) { return a * 2; } | alias _a = reverseArgs!a; | assert(a(2) == _a(2)); |} | |@safe version(mir_test) unittest |{ | int b() { return 4; } | alias _b = reverseArgs!b; | assert(b() == _b()); |} | |@safe version(mir_test) unittest |{ | alias gt = reverseArgs!(naryFun!("a < b")); | assert(gt(2, 1) && !gt(1, 1)); | int x = 42; | bool xyz(int a, int b) { return a * x < b / x; } | auto foo = &xyz; | foo(4, 5); | alias zyx = reverseArgs!(foo); | assert(zyx(5, 4) == foo(4, 5)); |} | |/++ |Negates predicate `pred`. |+/ |template not(alias pred) |{ | static if (!is(typeof(pred) : string) && !needOpCallAlias!pred) | /// | @optmath bool not(T...)(auto ref T args) | { | return !pred(args); | } | else | alias not = .not!(naryFun!pred); |} | |/// |@safe version(mir_test) unittest |{ | import std.algorithm.searching : find; | import std.uni : isWhite; | string a = " Hello, world!"; | assert(find!(not!isWhite)(a) == "Hello, world!"); |} | |@safe version(mir_test) unittest |{ | assert(not!"a != 5"(5)); | assert(not!"a != b"(5, 5)); | | assert(not!(() => false)()); | assert(not!(a => a != 5)(5)); | assert(not!((a, b) => a != b)(5, 5)); | assert(not!((a, b, c) => a * b * c != 125 )(5, 5, 5)); |} | |private template _pipe(size_t n) |{ | static if (n) | { | enum i = n - 1; | enum _pipe = "f[" ~ i.stringof ~ "](" ~ ._pipe!i ~ ")"; | } | else | enum _pipe = "args"; |} | |private template _unpipe(alias fun) |{ | static if (__traits(compiles, TemplateOf!fun)) | static if (__traits(isSame, TemplateOf!fun, .pipe)) | alias _unpipe = TemplateArgsOf!fun; | else | alias _unpipe = fun; | else | alias _unpipe = fun; | |} | |private enum _needNary(alias fun) = is(typeof(fun) : string) || needOpCallAlias!fun; | |/++ |Composes passed-in functions `fun[0], fun[1], ...` returning a |function `f(x)` that in turn returns |`...(fun[1](fun[0](x)))...`. Each function can be a regular |functions, a delegate, a lambda, or a string. |+/ |template pipe(fun...) |{ | static if (fun.length != 1) | { | alias f = staticMap!(_unpipe, fun); | static if (f.length == fun.length && Filter!(_needNary, f).length == 0) | { | /// | @optmath auto ref pipe(Args...)(auto ref Args args) | { | return mixin (_pipe!(fun.length)); | } | } | else alias pipe = .pipe!(staticMap!(naryFun, f)); | } | else alias pipe = naryFun!(fun[0]); |} | |/// |@safe version(mir_test) unittest |{ | assert(pipe!("a + b", a => a * 10)(2, 3) == 50); |} | |/// `pipe` can return by reference. |version(mir_test) unittest |{ | int a; | assert(&pipe!("a", "a")(a) == &a); |} | |/// Template bloat reduction |version(mir_test) unittest |{ | enum a = "a * 2"; | alias b = e => e + 2; | | alias p0 = pipe!(pipe!(a, b), pipe!(b, a)); | alias p1 = pipe!(a, b, b, a); | | static assert(__traits(isSame, p0, p1)); |} | |@safe version(mir_test) unittest |{ | import std.algorithm.comparison : equal; | import std.algorithm.iteration : map; | import std.array : split; | import std.conv : to; | | // First split a string in whitespace-separated tokens and then | // convert each token into an integer | assert(pipe!(split, map!(to!(int)))("1 2 3").equal([1, 2, 3])); |} | | |struct AliasCall(T, string methodName, TemplateArgs...) |{ | T __this; | alias __this this; | | /// | auto lightConst()() const @property | { | import mir.qualifier; | return AliasCall!(LightConstOf!T, methodName, TemplateArgs)(__this.lightConst); | } | | /// | auto lightImmutable()() immutable @property | { | import mir.qualifier; | return AliasCall!(LightImmutableOf!T, methodName, TemplateArgs)(__this.lightImmutable); | } | | this()(auto ref T value) | { | __this = value; | } | auto ref opCall(Args...)(auto ref Args args) | { | mixin("return __this." ~ methodName ~ (TemplateArgs.length ? "!TemplateArgs" : "") ~ "(forward!args);"); | } |} | |/++ |Replaces call operator (`opCall`) for the value using its method. |The funciton is designed to use with $(NDSLICE, topology, vmap) or $(NDSLICE, topology, map). |Params: | methodName = name of the methods to use for opCall and opIndex | TemplateArgs = template arguments |+/ |template aliasCall(string methodName, TemplateArgs...) |{ | /++ | Params: | value = the value to wrap | Returns: | wrapped value with implemented opCall and opIndex methods | +/ | AliasCall!(T, methodName, TemplateArgs) aliasCall(T)(T value) @property | { | return typeof(return)(value); | } | | /// ditto | ref AliasCall!(T, methodName, TemplateArgs) aliasCall(T)(return ref T value) @property @trusted | { | return *cast(typeof(return)*) &value; | } |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | static struct S | { | auto lightConst()() const @property { return S(); } | | auto fun(size_t ct_param = 1)(size_t rt_param) const | { | return rt_param + ct_param; | } | } | | S s; | | auto sfun = aliasCall!"fun"(s); | assert(sfun(3) == 4); | | auto sfun10 = aliasCall!("fun", 10)(s); // uses fun!10 | assert(sfun10(3) == 13); |} ../../../.dub/packages/mir-core-1.1.2/mir-core/source/mir/functional.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-allocation.lst |/++ |This is a submodule of $(MREF mir,ndslice). | |It contains allocation utilities. | | |$(BOOKTABLE $(H2 Common utilities), |$(T2 shape, Returns a shape of a common n-dimensional array. ) |) | |$(BOOKTABLE $(H2 GC Allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 slice, Allocates a slice using GC.) |$(T2 bitSlice, GC-Allocates a bitwise packed n-dimensional boolean slice.) |$(T2 ndarray, Allocates a common n-dimensional array from a slice. ) |$(T2 uninitSlice, Allocates an uninitialized slice using GC. ) |) | |$(BOOKTABLE $(H2 Ref counted allocation utilities), |$(T2 rcslice, Allocates an n-dimensional reference-counted (thread-safe) slice.) |$(T2 bitRcslice, Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice.) |$(T2 mininitRcslice, Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice.) |) | |$(BOOKTABLE $(H2 Custom allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 makeNdarray, Allocates a common n-dimensional array from a slice using an allocator. ) |$(T2 makeSlice, Allocates a slice using an allocator. ) |$(T2 makeUninitSlice, Allocates an uninitialized slice using an allocator. ) |) | |$(BOOKTABLE $(H2 CRuntime allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 stdcSlice, Allocates a slice copy using `core.stdc.stdlib.malloc`) |$(T2 stdcUninitSlice, Allocates an uninitialized slice using `core.stdc.stdlib.malloc`.) |$(T2 stdcFreeSlice, Frees memory using `core.stdc.stdlib.free`) |) | |$(BOOKTABLE $(H2 Aligned allocation utilities), |$(TR $(TH Function Name) $(TH Description)) |$(T2 uninitAlignedSlice, Allocates an uninitialized aligned slice using GC. ) |$(T2 stdcUninitAlignedSlice, Allocates an uninitialized aligned slice using CRuntime.) |$(T2 stdcFreeAlignedSlice, Frees memory using CRuntime) |) | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.allocation; | |import mir.math.common: optmath; |import mir.ndslice.concatenation; |import mir.ndslice.field: BitField; |import mir.ndslice.internal; |import mir.ndslice.iterator: FieldIterator; |import mir.ndslice.slice; |import mir.rc.array; |import std.traits; |import std.meta: staticMap; | |@optmath: | |/++ |Allocates an n-dimensional reference-counted (thread-safe) slice. |Params: | lengths = List of lengths for each dimension. | init = Value to initialize with (optional). | slice = Slice to copy shape and data from (optional). |Returns: | n-dimensional slice |+/ |Slice!(RCI!T, N) | rcslice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | auto _lengths = lengths; | return typeof(return)(_lengths, RCI!T(RCArray!T(len))); |} | |/// ditto |Slice!(RCI!T, N) | rcslice(T, size_t N)(size_t[N] lengths, T init) |{ | auto ret = (()@trusted => mininitRcslice!T(lengths))(); | ret.lightScope.field[] = init; | return ret; |} | |/// ditto |auto rcslice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = (() @trusted => slice.shape.mininitRcslice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result.lightScope, slice.lightScope); | | return *(() @trusted => cast(Slice!(RCI!E, N)*) &result)(); |} | |/// ditto |auto rcslice(T)(T[] array) |{ | return rcslice(array.sliced); |} | |/// ditto |auto rcslice(T, I)(I[] array) | if (!isImplicitlyConvertible!(I[], T[])) |{ | import mir.ndslice.topology: as; | return rcslice(array.sliced.as!T); |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = rcslice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(RCI!int, 3))); | | // creates duplicate using `rcslice` | auto dup = tensor.rcslice; | assert(dup == tensor); |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = rcslice([2, 3], 5); | assert(tensor.elementCount == 2 * 3); | assert(tensor[1, 1] == 5); | | import mir.rc.array; | static assert(is(typeof(tensor) == Slice!(RCI!int, 2))); |} | |/// ditto |auto rcslice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation) |{ | alias T = Unqual!(concatenation.DeepElement); | auto ret = (()@trusted => mininitRcslice!T(concatenation.shape))(); | ret.lightScope.opIndexAssign(concatenation); | return ret; |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.ndslice.topology : iota; | import mir.ndslice.concatenation; | auto tensor = concatenation([2, 3].iota, [3].iota(6)).rcslice; | assert(tensor == [3, 3].iota); | | static assert(is(typeof(tensor) == Slice!(RCI!ptrdiff_t, 2))); |} | |/++ |Allocates an n-dimensional reference-counted (thread-safe) slice without memory initialisation. |Params: | lengths = List of lengths for each dimension. |Returns: | n-dimensional slice |+/ |Slice!(RCI!T, N) | uninitRCslice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | auto _lengths = lengths; | return typeof(return)(_lengths, RCI!T(RCArray!T(len, false))); |} | |/// |version(mir_test) |@safe pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = uninitRCslice!int(5, 6, 7); | tensor[] = 1; | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(RCI!int, 3))); |} | |/++ |Allocates a bitwise packed n-dimensional reference-counted (thread-safe) boolean slice. |Params: | lengths = List of lengths for each dimension. |Returns: | n-dimensional bitwise rcslice |See_also: $(SUBREF topology, bitwise). |+/ |Slice!(FieldIterator!(BitField!(RCI!size_t)), N) bitRcslice(size_t N)(size_t[N] lengths...) |{ | import mir.ndslice.topology: bitwise; | enum elen = size_t.sizeof * 8; | immutable len = lengths.lengthsProduct; | immutable dlen = (len / elen + (len % elen != 0)); | return RCArray!size_t(dlen).asSlice.bitwise[0 .. len].sliced(lengths); |} | |/// 1D |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto bitarray = 100.bitRcslice; // allocates 16 bytes total (plus RC context) | assert(bitarray.shape == cast(size_t[1])[100]); | assert(bitarray[72] == false); | bitarray[72] = true; | assert(bitarray[72] == true); |} | |/// 2D |@safe pure nothrow @nogc |version(mir_test) unittest |{ | auto bitmatrix = bitRcslice(20, 6); // allocates 16 bytes total (plus RC context) | assert(bitmatrix.shape == cast(size_t[2])[20, 6]); | assert(bitmatrix[3, 4] == false); | bitmatrix[3, 4] = true; | assert(bitmatrix[3, 4] == true); |} | |/++ |Allocates a minimally initialized n-dimensional reference-counted (thread-safe) slice. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous minimally initialized n-dimensional reference-counted (thread-safe) slice |+/ |Slice!(RCI!T, N) mininitRcslice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | auto _lengths = lengths; | return Slice!(RCI!T, N)(_lengths, RCI!T(mininitRcarray!T(len))); |} | |/// |version(mir_test) |pure nothrow @nogc unittest |{ | import mir.ndslice.slice: Slice; | import mir.rc.array: RCI; | auto tensor = mininitRcslice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(RCI!int, 3))); |} | |private alias Pointer(T) = T*; |private alias Pointers(Args...) = staticMap!(Pointer, Args); | |/++ |GC-Allocates an n-dimensional slice. |+/ |template slice(Args...) | if (Args.length) |{ | /// | alias LabelTypes = Args[1 .. $]; | /// | alias T = Args[0]; | | /++ | Params: | lengths = List of lengths for each dimension. | init = Value to initialize with (optional). | Returns: | initialzed n-dimensional slice | +/ | Slice!(T*, N, Contiguous, Pointers!LabelTypes) | slice(size_t N)(size_t[N] lengths...) | if (N >= LabelTypes.length) | { | auto shape = lengths; // DMD variadic bug workaround | immutable len = shape.lengthsProduct; | auto ret = typeof(return)(shape, len == 0 ? null : (()@trusted=>new T[len].ptr)()); | foreach (i, L; LabelTypes) // static | ret._labels[i] = (()@trusted=>new L[shape[i]].ptr)(); | return ret; | } | | /// ditto | Slice!(T*, N, Contiguous, Pointers!LabelTypes) | slice(size_t N)(size_t[N] lengths, T init) | if (N >= LabelTypes.length) | { | import mir.conv: emplaceRef; | import std.array : uninitializedArray; | immutable len = lengths.lengthsProduct; | auto arr = uninitializedArray!(Unqual!T[])(len); | foreach (ref e; arr) | emplaceRef(e, init); | auto ret = typeof(return)(lengths, len == 0 ? null : (()@trusted=>cast(T*)arr.ptr)()); | foreach (i, L; LabelTypes) // static | ret._labels[i] = (()@trusted=>new L[shape[i]].ptr)(); | return ret; | } |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = slice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.length!1 == 6); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(int*, 3))); |} | |/// 2D DataFrame example |version(mir_test) |@safe pure unittest |{ | import mir.ndslice.slice; | import mir.ndslice.allocation: slice; | | import std.datetime.date; | | auto dataframe = slice!(double, Date, string)(4, 3); | assert(dataframe.length == 4); | assert(dataframe.length!1 == 3); | assert(dataframe.elementCount == 4 * 3); | | static assert(is(typeof(dataframe) == | Slice!(double*, 2, Contiguous, Date*, string*))); | | // Dataframe labels are contiguous 1-dimensional slices. | | // Fill row labels | dataframe.label[] = [ | Date(2019, 1, 24), | Date(2019, 2, 2), | Date(2019, 2, 4), | Date(2019, 2, 5), | ]; | | assert(dataframe.label!0[2] == Date(2019, 2, 4)); | | // Fill column labels | dataframe.label!1[] = ["income", "outcome", "balance"]; | | assert(dataframe.label!1[2] == "balance"); | | // Change label element | dataframe.label!1[2] = "total"; | assert(dataframe.label!1[2] == "total"); | | // Attach a newly allocated label | dataframe.label!1 = ["Income", "Outcome", "Balance"].sliced; | | assert(dataframe.label!1[2] == "Balance"); |} | |/++ |GC-Allocates an n-dimensional slice. |Params: | lengths = List of lengths for each dimension. | init = Value to initialize with (optional). |Returns: | initialzed n-dimensional slice |+/ |Slice!(T*, N) | slice(size_t N, T)(size_t[N] lengths, T init) |{ | return .slice!T(lengths, init); |} | |// TODO: make it a dataframe compatible. This function performs copy. |/// ditto |auto slice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return slice.flattened.array.sliced(slice.shape); | } | else | { | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = (() @trusted => slice.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result, slice); | | return (() @trusted => cast(Slice!(E*, N)) result)(); | } |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | auto tensor = slice([2, 3], 5); | assert(tensor.elementCount == 2 * 3); | assert(tensor[1, 1] == 5); | | // creates duplicate using `slice` | auto dup = tensor.slice; | assert(dup == tensor); |} | |/// ditto |auto slice(size_t dim, Slices...)(Concatenation!(dim, Slices) concatenation) |{ | alias T = Unqual!(concatenation.DeepElement); | static if (hasElaborateAssign!T) | alias fun = .slice; | else | alias fun = .uninitSlice; | auto ret = (()@trusted => fun!T(concatenation.shape))(); | ret.opIndexAssign(concatenation); | return ret; |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | import mir.ndslice.topology : iota; | import mir.ndslice.concatenation; | auto tensor = concatenation([2, 3].iota, [3].iota(6)).slice; | assert(tensor == [3, 3].iota); | | static assert(is(typeof(tensor) == Slice!(ptrdiff_t*, 2))); |} | |/++ |GC-Allocates a bitwise packed n-dimensional boolean slice. |Params: | lengths = List of lengths for each dimension. |Returns: | n-dimensional bitwise slice |See_also: $(SUBREF topology, bitwise). |+/ |Slice!(FieldIterator!(BitField!(size_t*)), N) bitSlice(size_t N)(size_t[N] lengths...) |{ | import mir.ndslice.topology: bitwise; | enum elen = size_t.sizeof * 8; | immutable len = lengths.lengthsProduct; | immutable dlen = (len / elen + (len % elen != 0)); | return new size_t[dlen].sliced.bitwise[0 .. len].sliced(lengths); |} | |/// 1D |@safe pure version(mir_test) unittest |{ | auto bitarray = bitSlice(100); // allocates 16 bytes total | assert(bitarray.shape == [100]); | assert(bitarray[72] == false); | bitarray[72] = true; | assert(bitarray[72] == true); |} | |/// 2D |@safe pure version(mir_test) unittest |{ | auto bitmatrix = bitSlice(20, 6); // allocates 16 bytes total | assert(bitmatrix.shape == [20, 6]); | assert(bitmatrix[3, 4] == false); | bitmatrix[3, 4] = true; | assert(bitmatrix[3, 4] == true); |} | |/++ |GC-Allocates an uninitialized n-dimensional slice. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto uninitSlice(T, size_t N)(size_t[N] lengths...) |{ | immutable len = lengths.lengthsProduct; | import std.array : uninitializedArray; | auto arr = uninitializedArray!(T[])(len); | return arr.sliced(lengths); |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = uninitSlice!int(5, 6, 7); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | static assert(is(typeof(tensor) == Slice!(int*, 3))); |} | |/++ |GC-Allocates an uninitialized aligned an n-dimensional slice. |Params: | lengths = list of lengths for each dimension | alignment = memory alignment (bytes) |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto uninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system |{ | immutable len = lengths.lengthsProduct; | import std.array : uninitializedArray; | assert((alignment != 0) && ((alignment & (alignment - 1)) == 0), "'alignment' must be a power of two"); | size_t offset = alignment <= 16 ? 0 : alignment - 1; | void* basePtr = uninitializedArray!(byte[])(len * T.sizeof + offset).ptr; | T* alignedPtr = cast(T*)((cast(size_t)(basePtr) + offset) & ~offset); | return alignedPtr.sliced(lengths); |} | |/// |version(mir_test) |@system pure nothrow unittest |{ | import mir.ndslice.slice: Slice; | auto tensor = uninitAlignedSlice!double([5, 6, 7], 64); | tensor[] = 0; | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | assert(cast(size_t)(tensor.ptr) % 64 == 0); | static assert(is(typeof(tensor) == Slice!(double*, 3))); |} | |/++ |Allocates an array through a specified allocator and creates an n-dimensional slice over it. |See also $(MREF std, experimental, allocator). |Params: | alloc = allocator | lengths = list of lengths for each dimension | init = default value for array initialization | slice = slice to copy shape and data from |Returns: | a structure with fields `array` and `slice` |Note: | `makeSlice` always returns slice with mutable elements |+/ |auto makeSlice(Allocator, size_t N, Iterator)(auto ref Allocator alloc, Slice!(N, Iterator) slice) |{ | alias T = Unqual!(slice.DeepElement); | return makeSlice!(T)(alloc, slice); |} | |/// ditto |Slice!(T*, N) |makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...) |{ | import std.experimental.allocator : makeArray; | return alloc.makeArray!T(lengths.lengthsProduct).sliced(lengths); |} | |/// ditto |Slice!(T*, N) |makeSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths, T init) |{ | import std.experimental.allocator : makeArray; | immutable len = lengths.lengthsProduct; | auto array = alloc.makeArray!T(len, init); | return array.sliced(lengths); |} | |/// ditto |auto makeSlice(Allocator, Iterator, size_t N, SliceKind kind) | (auto ref Allocator allocator, Slice!(Iterator, N, kind) slice) |{ | import mir.conv: emplaceRef; | alias E = slice.DeepElement; | | auto result = allocator.makeUninitSlice!(Unqual!E)(slice.shape); | | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(result, slice); | | return cast(Slice!(E*, N)) result; |} | |/// Initialization with default value |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | import mir.algorithm.iteration: all; | import mir.ndslice.topology: map; | | auto sl = Mallocator.instance.makeSlice([2, 3, 4], 10); | auto ar = sl.field; | assert(sl.all!"a == 10"); | | auto sl2 = Mallocator.instance.makeSlice(sl.map!"a * 2"); | auto ar2 = sl2.field; | assert(sl2.all!"a == 20"); | | Mallocator.instance.dispose(ar); | Mallocator.instance.dispose(ar2); |} | |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | // cast to your own type | auto sl = makeSlice!double(Mallocator.instance, [2, 3, 4], 10); | auto ar = sl.field; | assert(sl[1, 1, 1] == 10.0); | Mallocator.instance.dispose(ar); |} | |/++ |Allocates an uninitialized array through a specified allocator and creates an n-dimensional slice over it. |See also $(MREF std, experimental, allocator). |Params: | alloc = allocator | lengths = list of lengths for each dimension |Returns: | a structure with fields `array` and `slice` |+/ |Slice!(T*, N) |makeUninitSlice(T, Allocator, size_t N)(auto ref Allocator alloc, size_t[N] lengths...) | if (N) |{ | if (immutable len = lengths.lengthsProduct) | { | auto mem = alloc.allocate(len * T.sizeof); | if (mem.length == 0) assert(0); | auto array = () @trusted { return cast(T[]) mem; }(); | return array.sliced(lengths); | } | else | { | return T[].init.sliced(lengths); | } |} | |/// |version(mir_test) |@system @nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | auto sl = makeUninitSlice!int(Mallocator.instance, 2, 3, 4); | auto ar = sl.field; | assert(ar.ptr is sl.iterator); | assert(ar.length == 24); | assert(sl.elementCount == 24); | | Mallocator.instance.dispose(ar); |} | |/++ |Allocates a common n-dimensional array from a slice. |Params: | slice = slice |Returns: | multidimensional D array |+/ |auto ndarray(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | import mir.array.allocation : array; | static if (slice.N == 1) | { | return array(slice); | } | else | { | import mir.ndslice.topology: ipack, map; | return array(slice.ipack!1.map!(a => .ndarray(a))); | } |} | |/// |version(mir_test) |@safe pure nothrow unittest |{ | import mir.ndslice.topology : iota; | auto slice = iota(3, 4); | auto m = slice.ndarray; | static assert(is(typeof(m) == sizediff_t[][])); // sizediff_t is long for 64 bit platforms | assert(m == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]); |} | |/++ |Allocates a common n-dimensional array using data from a slice. |Params: | alloc = allocator (optional) | slice = slice |Returns: | multidimensional D array |+/ |auto makeNdarray(T, Allocator, Iterator, size_t N, SliceKind kind)(auto ref Allocator alloc, Slice!(Iterator, N, kind) slice) |{ | import std.experimental.allocator : makeArray; | static if (slice.N == 1) | { | return makeArray!T(alloc, slice); | } | else | { | alias E = typeof(makeNdarray!T(alloc, slice[0])); | auto ret = makeArray!E(alloc, slice.length); | foreach (i, ref e; ret) | e = .makeNdarray!T(alloc, slice[i]); | return ret; | } |} | |/// |version(mir_test) |@nogc unittest |{ | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | import mir.ndslice.topology : iota; | | auto slice = iota(3, 4); | auto m = Mallocator.instance.makeNdarray!long(slice); | | static assert(is(typeof(m) == long[][])); | | static immutable ar = [[0L, 1, 2, 3], [4L, 5, 6, 7], [8L, 9, 10, 11]]; | assert(m == ar); | | foreach (ref row; m) | Mallocator.instance.dispose(row); | Mallocator.instance.dispose(m); |} | |/++ |Shape of a common n-dimensional array. |Params: | array = common n-dimensional array | err = error flag passed by reference |Returns: | static array of dimensions type of `size_t[n]` |+/ |auto shape(T)(T[] array, ref int err) |{ | static if (isDynamicArray!T) | { | size_t[1 + typeof(shape(T.init, err)).length] ret; | | if (array.length) | { | ret[0] = array.length; | ret[1..$] = shape(array[0], err); | if (err) | goto L; | foreach (ar; array) | { | if (shape(ar, err) != ret[1..$]) | err = 1; | if (err) | goto L; | } | } | } | else | { | size_t[1] ret; | ret[0] = array.length; | } | err = 0; |L: | return ret; |} | |/// |version(mir_test) |@safe pure unittest |{ | int err; | size_t[2] shape = [[1, 2, 3], [4, 5, 6]].shape(err); | assert(err == 0); | assert(shape == [2, 3]); | | [[1, 2], [4, 5, 6]].shape(err); | assert(err == 1); |} | |/// Slice from ndarray |version(mir_test) |unittest |{ | import mir.ndslice.allocation: slice, shape; | int err; | auto array = [[1, 2, 3], [4, 5, 6]]; | auto s = array.shape(err).slice!int; | s[] = [[1, 2, 3], [4, 5, 6]]; | assert(s == array); |} | |version(mir_test) |@safe pure unittest |{ | int err; | size_t[2] shape = (int[][]).init.shape(err); | assert(shape[0] == 0); | assert(shape[1] == 0); |} | |version(mir_test) |nothrow unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto sl = iota([0, 0], 1); | | assert(sl.empty!0); | assert(sl.empty!1); | | auto gcsl1 = sl.slice; | auto gcsl2 = slice!double(0, 0); | | import std.experimental.allocator; | import std.experimental.allocator.mallocator; | | auto sl2 = makeSlice!double(Mallocator.instance, 0, 0); | | Mallocator.instance.dispose(sl2.field); |} | |/++ |Allocates an uninitialized array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it. |Params: | lengths = list of lengths for each dimension |Returns: | contiguous uninitialized n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcFreeSlice) |+/ |Slice!(T*, N) stdcUninitSlice(T, size_t N)(size_t[N] lengths...) |{ | import core.stdc.stdlib: malloc; | immutable len = lengths.lengthsProduct; | auto ptr = len ? cast(T*) malloc(len * T.sizeof) : null; | return ptr.sliced(lengths); |} | |/++ |Allocates a copy of a slice using `core.stdc.stdlib.malloc`. |Params: | slice = n-dimensional slice |Returns: | contiguous n-dimensional slice |See_also: | $(LREF stdcUninitSlice), $(LREF stdcFreeSlice) |+/ |auto stdcSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) slice) |{ | alias E = slice.DeepElement; | alias T = Unqual!E; | static assert (!hasElaborateAssign!T, "stdcSlice is not miplemented for slices that have elaborate assign"); | auto ret = stdcUninitSlice!T(slice.shape); | | import mir.conv: emplaceRef; | import mir.algorithm.iteration: each; | each!(emplaceRef!E)(ret, slice); | return ret; |} | |/++ |Frees memory using `core.stdc.stdlib.free`. |Params: | slice = n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcUninitSlice) |+/ |void stdcFreeSlice(T, size_t N)(Slice!(T*, N) slice) |{ | import core.stdc.stdlib: free; | slice._iterator.free; |} | |/// |version(mir_test) |unittest |{ | import mir.ndslice.topology: iota; | | auto i = iota(3, 4); | auto s = i.stdcSlice; | auto t = s.shape.stdcUninitSlice!size_t; | | t[] = s; | | assert(t == i); | | s.stdcFreeSlice; | t.stdcFreeSlice; |} | |/++ |Allocates an uninitialized aligned array using `core.stdc.stdlib.malloc` and creates an n-dimensional slice over it. |Params: | lengths = list of lengths for each dimension | alignment = memory alignment (bytes) |Returns: | contiguous uninitialized n-dimensional slice |+/ |auto stdcUninitAlignedSlice(T, size_t N)(size_t[N] lengths, uint alignment) @system |{ | immutable len = lengths.lengthsProduct; | import mir.internal.memory: alignedAllocate; | auto arr = (cast(T*)alignedAllocate(len * T.sizeof, alignment))[0 .. len]; | return arr.sliced(lengths); |} | |/// |version(mir_test) |@system pure nothrow unittest |{ | auto tensor = stdcUninitAlignedSlice!double([5, 6, 7], 64); | assert(tensor.length == 5); | assert(tensor.elementCount == 5 * 6 * 7); | assert(cast(size_t)(tensor.ptr) % 64 == 0); | static assert(is(typeof(tensor) == Slice!(double*, 3))); | stdcFreeAlignedSlice(tensor); |} | |/++ |Frees aligned memory allocaged by CRuntime. |Params: | slice = n-dimensional slice |See_also: | $(LREF stdcSlice), $(LREF stdcUninitSlice) |+/ |void stdcFreeAlignedSlice(T, size_t N)(Slice!(T*, N) slice) |{ | import mir.internal.memory: alignedFree; | slice._iterator.alignedFree; |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/allocation.d has no code <<<<<< EOF # path=source-mir-glas-package.lst |/++ | |$(H1 GLAS (Generic Linear Algebra Subprograms)) | |The GLAS are generic routines that provide standard building blocks for performing vector and matrix operations. |The Level 1 GLAS perform scalar, vector and vector-vector operations, |the Level 2 GLAS perform matrix-vector operations, and the Level 3 GLAS perform matrix-matrix operations. | |$(H2 Implemented Routines) | |The list of already implemented features. | |$(BOOKTABLE , | $(TR | $(TH Modules) | $(TH Description) | ) | $(TR | $(TDNW $(SUBMODULE l1)) | $(TD vector operations 100% done, partially optimized for now) | ) | $(TR | $(TDNW $(SUBMODULE l2)) | $(TD matrix-vector operations %3 done, partially optimized for now) | ) | $(TR | $(TDNW l3 was moved to $(HTTP github.com/libmir/mir-glas, mir-glas)) | $(TD matrix-matrix operations 50% done) | ) |) | |GLAS is generalization of $(LINK2 http://www.netlib.org/blas/, BLAS) (Basic Linear Algebra Subprograms) |Because the BLAS are efficient, portable, and widely available, they are commonly used in the development of |high quality linear algebra or related software, such as |$(LINK2 http://www.netlib.org/lapack/, LAPACK), |$(LINK2 http://www.numpy.org/, NumPy), or $(LINK2 http://julialang.org/, The Julia language). | |Efficient Level 3 BLAS implementation requires |$(LINK2 https://en.wikipedia.org/wiki/CPU_cache, cache)-friendly matrix blocking. |In additional, $(LINK2 https://en.wikipedia.org/wiki/SIMD, SIMD) instructions should be used for all levels on modern architectures. | |$(H2 Why GLAS) | |GLAS is ... |
    |
  • fast to execute.
  • |
  • fast to compile.
  • |
  • fast to extend using $(MREF_ALTTEXT ndslices, mir, ndslice).
  • |
  • fast to add new instruction set targets.
  • |
| |$(H2 Optimization notes) | |GLAS requires recent $(LINK2 https://github.com/ldc-developers/ldc, LDC) >= 1.1.0-beta2. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |+/ |module mir.glas; | |public import mir.glas.l1; |public import mir.glas.l2; source/mir/glas/package.d has no code <<<<<< EOF # path=source-mir-glas-l1.lst |/++ |$(H2 Level 1) | |$(SCRIPT inhibitQuickIndex = 1;) | |This is a submodule of $(MREF mir,glas). | |The Level 1 GLAS perform vector and vector-vector operations. | |$(BOOKTABLE $(H2 Vector-vector operations), |$(T2 rot, apply Givens rotation) |$(T2 axpy, constant times a vector plus a vector) |$(T2 dot, dot product) |$(T2 dotc, dot product, conjugating the first vector) |) | |$(BOOKTABLE $(H2 Vector operations), |$(TR $(TH Function Name) $(TH Description)) |$(T2 nrm2, Euclidean norm) |$(T2 sqnrm2, square of Euclidean norm) |$(T2 asum, sum of absolute values) |$(T2 iamax, index of max abs value) |$(T2 amax, max abs value) |) | |All functions except $(LREF iamax) work with multidimensional tensors. | |GLAS does not provide `swap`, `scal`, and `copy` functions. |This functionality is part of $(MREF_ALTTEXT ndslice, mir, ndslice) package. Examples can be found below. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |Macros: |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |SUBMODULE = $(MREF_ALTTEXT $1, mir, glas, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, glas, $1)$(NBSP) |NDSLICEREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |+/ |module mir.glas.l1; | |/// SWAP |unittest |{ | import std.algorithm.mutation: swap; | import mir.ndslice.allocation: slice; | import mir.algorithm.iteration: each; | import std.typecons: Yes; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| each!(swap)(x, y); 1| assert(x == [4, 5, 6, 7]); 1| assert(y == [0, 1, 2, 3]); |} | |/// SCAL |unittest |{ | import mir.ndslice.allocation: slice; | import std.typecons: Yes; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| x[] *= 2.0; 1| assert(x == [0, 2, 4, 6]); |} | |/// COPY |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = x; 1| assert(y == [0, 1, 2, 3]); |} | |import mir.math.common; |import mir.internal.utility; |import mir.ndslice.slice; |import mir.algorithm.iteration : reduce, each; |import mir.math.common: fastmath; | |import std.traits: Unqual, isPointer; |import std.meta: allSatisfy; | |@fastmath: | |template _rot(alias c, alias s) |{ | @fastmath | void _rot(X, Y)(ref X xr, ref Y yr) | { 4| auto x = xr; 4| auto y = yr; 4| auto t1 = c * x + s * y; | static if (isComplex!(typeof(c))) | { | auto t2 = (c.re - c.im * 1fi) * y; | } | else 4| auto t2 = c * y; | static if (isComplex!(typeof(s))) | { | t2 -= (s.re - s.im * 1fi) * x; | } | else 4| t2 -= s * x; 4| xr = t1; 4| yr = t2; | } |} | |template _axpy(alias a) |{ | @fastmath | void _axpy(X, Y)(ref X x, ref Y y) | { 15| y += a * x; | } |} | |A _fmuladd(A, B, C)(A a, in B b, in C c) |{ 42| return a + b * c; |} | |A _fmuladdc(A, B, C)(A a, in B b, in C c) |{ | static if (isComplex!B) | { | return a + (b.re - b.im * 1fi) * c; | } | else | return a + b * c; |} | |A _nrm2(A, B)(A a, in B b) |{ | static if (isComplex!B) 4| return a + b.re * b.re + b.im * b.im; | else 8| return a + b * b; |} | |A _asum(A, B)(A a, in B b) |{ | static if (isComplex!B) | { 2| return a + (b.re.fabs + b.im.fabs); | } | else | static if (isFloatingPoint!B) | { 4| return a + b.fabs; | } | else | { | static if (isUnsigned!B) | return a + b; | else | return a + (b >= 0 ? b : -b); | } |} | |A _amax(A, B)(A a, in B b) |{ | static if (isComplex!B) | { 4| return a.fmax(b.re.fabs + b.im.fabs); | } | else | static if (isFloatingPoint!B) | { 6| return a.fmax(b.fabs); | } | else | { | static if (!isUnsigned!B) | b = (b >= 0 ? b : -b); | return a >= b ? a : b; | } |} | |private enum _shouldBeCastedToUnqual(T) = isPointer!T && !is(Unqual!T == T); | |/++ |Applies a plane rotation, where the `c` (cos) and `s` (sin) are scalars. |Uses unrolled loops for strides equal to one. |Params: | c = cos scalar | s = sin scalar | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SROT, DROT, CROT, ZROT, CSROT, ZDROTF |+/ |void rot(C, S, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in C c, in S s, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ 1| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 1| each!(_rot!(c, s))(x, y); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| auto a = slice!double(4); 1| auto b = slice!double(4); 1| double cos = 3.0 / 5; 1| double sin = 4.0 / 5; 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 15| foreach (i; 0 .. 4) | { 4| a[i] = cos * x[i] + sin * y[i]; 4| b[i] = cos * y[i] - sin * x[i]; | } 1| rot(cos, sin, x, y); 1| assert(x == a); 1| assert(y == b); |} | |/++ |Constant times a vector plus a vector. |Uses unrolled loops for strides equal to one. |Params: | a = scale parameter | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SAXPY, DAXPY, CAXPY, ZAXPY |+/ |void axpy(A, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(in A a, Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | static if (_shouldBeCastedToUnqual!Iterator2) | { | .axpy(a, cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y); | } | else | { 5| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 5| each!(_axpy!a)(x, y); | } |} | |/// SAXPY, DAXPY |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| axpy(2.0, x, y); 1| assert(y == [4, 7, 10, 13]); |} | |/// SAXPY, DAXPY |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto a = 3 + 4i; 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; 1| axpy(a, x, y); 1| assert(y == [a * (0 + 1i) + (4 + 5i), a * (2 + 3i) + (6 + 7i)]); |} | |/++ |Forms the dot product of two vectors. |Uses unrolled loops for strides equal to one. |Returns: dot product `conj(xᐪ) × y` |Params: | F = type for summation (optional template parameter) | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: SDOT, DDOT, SDSDOT, DSDOT, CDOTC, ZDOTC |+/ |F dot(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2)) | { | return .dot!F(cast(Slice!(Unqual!Iterator1, N, kind1))x, cast(Slice!(Unqual!Iterator2, N, kind2))y); | } | else | { 9| assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); 9| return reduce!(_fmuladd)(cast(F)(0), x, y); | } |} | |/// SDOT, DDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot(x, y) == 5 + 12 + 21); |} | |/// ditto |auto dot(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ 8| return .dot!(Unqual!(typeof(x[0] * y[0])))(x, y); |} | |/// SDOT, DDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| auto y = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot(x, y) == 5 + 12 + 21); |} | |/// SDSDOT, DSDOT |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!float(4); 1| auto y = slice!float(4); 1| x[] = [0, 1, 2, 3]; 1| y[] = [4, 5, 6, 7]; 1| assert(dot!real(x, y) == 5 + 12 + 21); // 80-bit FP for x86 CPUs |} | |/// CDOTU, ZDOTU |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; | version(LDC) // DMD Internal error: backend/cgxmm.c 628 | assert(dot(x, y) == (0 + 1i) * (4 + 5i) + (2 + 3i) * (6 + 7i)); |} | |/++ |Forms the dot product of two complex vectors. |Uses unrolled loops for strides equal to one. |Returns: dot product `xᐪ × y` |Params: | F = type for summation (optional template parameter) | x = first n-dimensional tensor | y = second n-dimensional tensor |BLAS: CDOTU, ZDOTU |+/ |F dotc(F, SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) | if (isComplex!(DeepElementType!(typeof(x))) && isComplex!(DeepElementType!(typeof(y)))) |{ | static if (allSatisfy!(_shouldBeCastedToUnqual, Iterator1, Iterator2)) | { | return .dotc!F(cast(Slice!(N, Unqual!Iterator1))x, cast(Slice!(N, Unqual!Iterator2))y); | } | else | { | assert(x.shape == y.shape, "constraints: x and y must have equal shapes"); | pragma(inline, false); | return reduce!(_fmuladdc)(cast(F)(0), x, y); | } |} | |/// ditto |auto dotc(SliceKind kind1, SliceKind kind2, size_t N, Iterator1, Iterator2)(Slice!(Iterator1, N, kind1) x, Slice!(Iterator2, N, kind2) y) |{ | return .dotc!(Unqual!(typeof(x[x.shape.init] * y[y.shape.init])))(x, y); |} | |/// CDOTC, ZDOTC |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| auto y = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; 1| y[] = [4 + 5i, 6 + 7i]; | version(LDC) // DMD Internal error: backend/cgxmm.c 628 | assert(dotc(x, y) == (0 + -1i) * (4 + 5i) + (2 + -3i) * (6 + 7i)); |} | |/++ |Returns the euclidean norm of a vector. |Uses unrolled loops for stride equal to one. |Returns: euclidean norm `sqrt(conj(xᐪ) × x)` |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |BLAS: SNRM2, DNRM2, SCNRM2, DZNRM2 |+/ |F nrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | return .sqnrm2!F(cast(Slice!(N, Unqual!R))x).sqrt; | else 2| return .sqnrm2!F(x).sqrt; |} | |/// ditto |auto nrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ 2| return .nrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x); |} | |/// SNRM2, DNRM2 |unittest |{ | import mir.ndslice.allocation: slice; | import std.math: sqrt, approxEqual; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9))); |} | |/// SCNRM2, DZNRM2 |unittest |{ | import mir.ndslice.allocation: slice; | import std.math: sqrt, approxEqual; | 1| auto x = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; | 1| assert(nrm2(x).approxEqual(sqrt(1.0 + 4 + 9))); |} | |/++ |Forms the square of the euclidean norm. |Uses unrolled loops for stride equal to one. |Returns: `conj(xᐪ) × x` |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |+/ |F sqnrm2(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .sqnrm2!F(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); 4| return reduce!(_nrm2)(F(0), x); | } |} | |/// ditto |auto sqnrm2(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ 2| return .sqnrm2!(realType!(typeof(x[x.shape.init] * x[x.shape.init])))(x); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| x[] = [0, 1, 2, 3]; 1| assert(sqnrm2(x) == 1.0 + 4 + 9); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| x[] = [0 + 1i, 2 + 3i]; | 1| assert(sqnrm2(x) == 1.0 + 4 + 9); |} | |/++ |Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and | returns a single precision result. |Returns: sum of the `|Re(.)| + |Im(.)|`'s |Params: | F = type for summation (optional template parameter) | x = n-dimensional tensor |BLAS: SASUM, DASUM, SCASUM, DZASUM |+/ |F asum(F, SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .asum!F(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); 2| return reduce!(_asum)(F(0), x); | } |} | |/// ditto |auto asum(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | alias T = DeepElementType!(typeof(x)); 2| return .asum!(realType!T)(x); |} | |/// SASUM, DASUM |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(4); 1| x[] = [0, -1, -2, 3]; 1| assert(asum(x) == 1 + 2 + 3); |} | |/// SCASUM, DZASUM |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(2); 1| x[] = [0 - 1i, -2 + 3i]; | 1| assert(asum(x) == 1 + 2 + 3); |} | |/++ |Finds the index of the first element having maximum `|Re(.)| + |Im(.)|`. |Return: index of the first element having maximum `|Re(.)| + |Im(.)|` |Params: x = 1-dimensional tensor |BLAS: ISAMAX, IDAMAX, ICAMAX, IZAMAX |+/ |sizediff_t iamax(Iterator, SliceKind kind)(Slice!(Iterator, 1, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .iamax(cast(Slice!(1, Unqual!R))x); | } | else | { | pragma(inline, false); 4| if (x.length == 0) 2| return -1; 2| if (x._stride == 0) 0000000| return 0; | alias T = Unqual!(DeepElementType!(typeof(x))); | alias F = realType!T; | static if (isFloatingPoint!F) 2| auto m = -double.infinity; | else | auto m = F.min; 2| sizediff_t l = x.length; 2| sizediff_t r = x.length; | do | { 10| auto f = x.front; | static if (isComplex!T) | { 4| auto e = f.re.fabs + f.im.fabs; | } | else | static if (isFloatingPoint!T) | { 6| auto e = f.fabs; | } | else | { | static if (isUnsigned!T) | auto e = f; | else | auto e = (f >= 0 ? f : -f); | } | 10| if (e > m) | { 6| m = e; 6| r = x.length; | } 10| x.popFront; | } 10| while (x.length); 2| return l - r; | } |} | |/// ISAMAX, IDAMAX |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(6); | // 0 1 2 3 4 5 1| x[] = [0, -1, -2, -3, 3, 2]; 1| assert(iamax(x) == 3); | // -1 for empty vectors 1| assert(iamax(x[0 .. 0]) == -1); |} | |/// ICAMAX, IZAMAX |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(4); | // 0 1 2 3 1| x[] = [0 + -1i, -2 + 3i, 2 + 3i, 2 + 2i]; | 1| assert(iamax(x) == 1); | // -1 for empty vectors 1| assert(iamax(x[$ .. $]) == -1); |} | |/++ |Takes the sum of the `|Re(.)| + |Im(.)|`'s of a vector and | returns a single precision result. |Returns: sum of the `|Re(.)| + |Im(.)|`'s |Params: | x = n-dimensional tensor |BLAS: SASUM, DASUM, SCASUM, DZASUM |+/ |auto amax(SliceKind kind, size_t N, Iterator)(Slice!(Iterator, N, kind) x) |{ | static if (_shouldBeCastedToUnqual!Iterator) | { | return .amax(cast(Slice!(N, Unqual!R))x); | } | else | { | pragma(inline, false); | alias T = DeepElementType!(typeof(x)); | alias F = realType!T; 4| return reduce!(_amax)(F(0), x); | } |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; 1| auto x = slice!double(6); 1| x[] = [0, -1, -2, -7, 6, 2]; 1| assert(amax(x) == 7); | // 0 for empty vectors 1| assert(amax(x[0 .. 0]) == 0); |} | |/// |unittest |{ | import mir.ndslice.allocation: slice; | 1| auto x = slice!cdouble(4); 1| x[] = [0 + -1i, -7 + 3i, 2 + 3i, 2 + 2i]; | 1| assert(amax(x) == 10); | // 0 for empty vectors 1| assert(amax(x[$ .. $]) == 0); |} source/mir/glas/l1.d is 99% covered <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-concatenation.lst |/++ |This is a submodule of $(MREF mir, ndslice). | |The module contains $(LREF ._concatenation) routine. |It construct $(LREF Concatenation) structure that can be |assigned to an ndslice of the same shape with `[] = ` or `[] op= `. | |$(SUBREF slice, slicedNdField) can be used to construct ndslice view on top of $(LREF Concatenation). | |$(SUBREF allocation, slice) has special overload for $(LREF Concatenation) that can be used to allocate new ndslice. | |$(BOOKTABLE $(H2 Concatenation constructors), |$(TR $(TH Function Name) $(TH Description)) |$(T2 ._concatenation, Creates a $(LREF Concatenation) view of multiple slices.) |$(T2 pad, Pads with a constant value.) |$(T2 padEdge, Pads with the edge values of slice.) |$(T2 padSymmetric, Pads with the reflection of the slice mirrored along the edge of the slice.) |$(T2 padWrap, Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning.) |) | | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2017-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |See_also: $(SUBMODULE fuse) submodule. | |Macros: |SUBMODULE = $(MREF_ALTTEXT $1, mir, ndslice, $1) |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |+/ |module mir.ndslice.concatenation; | |import std.traits; |import std.meta; | |import mir.internal.utility; |import mir.math.common: optmath; |import mir.ndslice.internal; |import mir.ndslice.slice; |import mir.primitives; | |@optmath: | |private template _expose(size_t maxN, size_t dim) |{ | static @optmath auto _expose(S)(S s) | { | static if (s.N == maxN) | { | return s; | } | else | { | static assert(s.shape.length == s.N, "Cannot create concatenation for packed slice of smaller dimension."); | import mir.ndslice.topology: repeat, unpack; | auto r = s.repeat(1).unpack; | static if (dim) | { | import mir.ndslice.dynamic: transposed; | return r.transposed!(Iota!(1, dim + 1)); | } | else | { | return r; | } | } | } |} | |private template _Expose(size_t maxN, size_t dim) |{ | alias _expose = ._expose!(maxN, dim); | alias _Expose(S) = ReturnType!(_expose!S); |} | | |/++ |Creates a $(LREF Concatenation) view of multiple slices. | |Can be used in combination with itself, $(LREF until), $(SUBREF, allocation, slice), |and $(SUBREF slice, Slice) assignment. | |Params: | slices = tuple of slices and/or concatenations. | |Returns: $(LREF Concatenation). |+/ |auto concatenation(size_t dim = 0, Slices...)(Slices slices) |{ | import mir.algorithm.iteration: reduce; | import mir.utility: min, max; | enum NOf(S) = S.N; | enum NArray = [staticMap!(NOf, Slices)]; | enum minN = size_t.max.reduce!min(NArray); | enum maxN = size_t.min.reduce!max(NArray); | static if (minN == maxN) | { | return Concatenation!(dim, Slices)(slices); | } | else | { | static assert(minN + 1 == maxN); | alias S = staticMap!(_Expose!(maxN, dim), Slices); | S s; | foreach (i, ref e; s) | e = _expose!(maxN, dim)(slices[i]); | return Concatenation!(dim, S)(s); | } |} | |/// Concatenation of slices with different dimmensions. |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: repeat, iota; | | // 0 0 0 | auto vector = size_t.init.repeat([3]); | | // 1 2 3 | // 4 5 6 | auto matrix = iota([2, 3], 1); | | assert(concatenation(vector, matrix).slice == [ | [0, 0, 0], | [1, 2, 3], | [4, 5, 6], | ]); | | vector.popFront; | assert(concatenation!1(vector, matrix).slice == [ | [0, 1, 2, 3], | [0, 4, 5, 6], | ]); |} | |/// Multidimensional |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | import mir.ndslice.slice : slicedNdField; | | // 0, 1, 2 | // 3, 4, 5 | auto a = iota(2, 3); | // 0, 1 | // 2, 3 | auto b = iota(2, 2); | // 0, 1, 2, 3, 4 | auto c = iota(1, 5); | | // 0, 1, 2, 0, 1 | // 3, 4, 5, 2, 3 | // | // 0, 1, 2, 3, 4 | // construction phase | auto s = concatenation(concatenation!1(a, b), c); | | // allocation phase | auto d = s.slice; | assert(d == [ | [0, 1, 2, 0, 1], | [3, 4, 5, 2, 3], | [0, 1, 2, 3, 4], | ]); | | // optimal fragmentation for output/writing/buffering | auto testData = [ | [0, 1, 2], [0, 1], | [3, 4, 5], [2, 3], | [0, 1, 2, 3, 4], | ]; | size_t i; | s.forEachFragment!((fragment) { | pragma(inline, false); //reduces template bloat | assert(fragment == testData[i++]); | }); | assert(i == testData.length); | | // lazy ndslice view | assert(s.slicedNdField == d); |} | |/// 1D |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | import mir.ndslice.slice : slicedNdField; | | size_t i; | auto a = 3.iota; | auto b = iota([6], a.length); | auto s = concatenation(a, b); | assert(s.length == a.length + b.length); | // fast iteration with until | s.until!((elem){ assert(elem == i++); return false; }); | // allocation with slice | assert(s.slice == s.length.iota); | // 1D or multidimensional assignment | auto d = slice!double(s.length); | d[] = s; | assert(d == s.length.iota); | d.opIndexOpAssign!"+"(s); | assert(d == iota([s.length], 0, 2)); | | // lazy ndslice view | assert(s.slicedNdField == s.length.iota); |} | |/// |enum bool isConcatenation(T) = is(T : Concatenation!(dim, Slices), size_t dim, Slices...); |/// |enum size_t concatenationDimension(T : Concatenation!(dim, Slices), size_t dim, Slices...) = dim; | |/// |struct Concatenation(size_t dim, Slices...) | if (Slices.length > 1) |{ | @optmath: | | /// | auto lightConst()() const @property | { | import std.format; | import mir.qualifier; | import mir.ndslice.topology: iota; | return mixin("Concatenation!(dim, staticMap!(LightConstOf, Slices))(%(_slices[%s].lightConst,%)].lightConst)".format(_slices.length.iota)); | } | | /// | auto lightImmutable()() immutable @property | { | import std.format; | import mir.ndslice.topology: iota; | import mir.qualifier; | return mixin("Concatenation!(dim, staticMap!(LightImmutableOf, Slices))(%(_slices[%s].lightImmutable,%)].lightImmutable)".format(_slices.length.iota)); | } | | /// Slices and sub-concatenations | Slices _slices; | | package enum N = typeof(Slices[0].shape).length; | | static assert(dim < N); | | alias DeepElement = CommonType!(staticMap!(DeepElementType, Slices)); | | /// Length primitive | size_t length(size_t d = 0)() const @property | { | static if (d == dim) | { | size_t length; | foreach(ref slice; _slices) | length += slice.length!d; | return length; | } | else | { | return _slices[0].length!d; | } | } | | /// Total elements count in the concatenation. | size_t elementCount()() const @property | { | size_t count = 1; | foreach(i; Iota!N) | count *= length!i; | return count; | } | | deprecated("use elementCount instead") | alias elementsCount = elementCount; | | /// Shape of the concatenation. | size_t[N] shape()() const @property | { | typeof(return) ret; | foreach(i; Iota!N) | ret[i] = length!i; | return ret; | } | | /// Multidimensional input range primitives | bool empty(size_t d = 0)() const @property | { | static if (d == dim) | { | foreach(ref slice; _slices) | if (slice.empty!d) | return true; | return false; | } | else | { | return _slices[0].empty!d; | } | } | | /// ditto | void popFront(size_t d = 0)() | { | static if (d == dim) | { | foreach(i, ref slice; _slices) | { | static if (i != Slices.length - 1) | if (slice.empty!d) | continue; | return slice.popFront!d; | } | } | else | { | foreach_reverse (ref slice; _slices) | slice.popFront!d; | } | } | | /// ditto | auto front(size_t d = 0)() | { | static if (d == dim) | { | foreach(i, ref slice; _slices) | { | static if (i != Slices.length - 1) | if (slice.empty!d) | continue; | return slice.front!d; | } | } | else | { | import mir.ndslice.internal: frontOfDim; | enum elemDim = d < dim ? dim - 1 : dim; | return concatenation!elemDim(frontOfDim!(d, _slices)); | } | } | | /// Simplest multidimensional random access primitive | auto opIndex()(size_t[N] indexes...) | { | foreach(i, ref slice; _slices[0 .. $-1]) | { | ptrdiff_t diff = indexes[dim] - slice.length!dim; | if (diff < 0) | return slice[indexes]; | indexes[dim] = diff; | } | assert(indexes[dim] < _slices[$-1].length!dim); | return _slices[$-1][indexes]; | } |} | | |/++ |Performs `fun(st.front!d)`. | |This functions is useful when `st.front!d` has not a common type and fails to compile. | |Can be used instead of $(LREF .Concatenation.front) |+/ |auto applyFront(size_t d = 0, alias fun, size_t dim, Slices...)(Concatenation!(dim, Slices) st) |{ | static if (d == dim) | { | foreach(i, ref slice; st._slices) | { | static if (i != Slices.length - 1) | if (slice.empty!d) | continue; | return fun(slice.front!d); | } | } | else | { | import mir.ndslice.internal: frontOfDim; | enum elemDim = d < dim ? dim - 1 : dim; | auto slices = st._slices; | return fun(concatenation!elemDim(frontOfDim!(d, slices))); | } |} | |/++ |Pads with a constant value. | |Params: | direction = padding direction. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | s = $(SUBREF slice, Slice) or ndField | value = initial value for padding | lengths = list of lengths | |Returns: $(LREF Concatenation) | |See_also: $(LREF ._concatenation) examples. |+/ |auto pad(string direction = "both", S, T, size_t N)(S s, T value, size_t[N] lengths...) | if (hasShape!S && N == typeof(S.shape).length) |{ | return .pad!([Iota!N], [Repeat!(N, direction)])(s, value, lengths); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([3], 1) | .pad(0, [2]) | .slice; | | assert(pad == [0, 0, 1, 2, 3, 0, 0]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .pad(0, [2, 1]) | .slice; | | assert(pad == [ | [0, 0, 0, 0], | [0, 0, 0, 0], | | [0, 1, 2, 0], | [0, 3, 4, 0], | | [0, 0, 0, 0], | [0, 0, 0, 0]]); |} | |/++ |Pads with a constant value. | |Params: | dimensions = dimensions to pad. | directions = padding directions. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | |Returns: $(LREF Concatenation) | |See_also: $(LREF ._concatenation) examples. |+/ |template pad(size_t[] dimensions, string[] directions) | if (dimensions.length && dimensions.length == directions.length) |{ | @optmath: | | /++ | Params: | s = $(SUBREF slice, Slice) or ndField | value = initial value for padding | lengths = list of lengths | Returns: $(LREF Concatenation) | See_also: $(LREF ._concatenation) examples. | +/ | auto pad(S, T)(S s, T value, size_t[dimensions.length] lengths...) | { | import mir.ndslice.topology: repeat; | | enum d = dimensions[$ - 1]; | enum q = directions[$ - 1]; | enum N = typeof(S.shape).length; | | size_t[N] len; | auto _len = s.shape; | foreach(i; Iota!(len.length)) | static if (i != d) | len[i] = _len[i]; | else | len[i] = lengths[$ - 1]; | | auto p = repeat(value, len); | static if (q == "both") | auto r = concatenation!d(p, s, p); | else | static if (q == "pre") | auto r = concatenation!d(p, s); | else | static if (q == "post") | auto r = concatenation!d(s, p); | else | static assert(0, `allowed directions are "both", "pre", and "post"`); | | static if (dimensions.length == 1) | return r; | else | return .pad!(dimensions[0 .. $ - 1], directions[0 .. $ - 1])(r, value, lengths[0 .. $ -1]); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .pad!([1], ["pre"])(0, [2]) | .slice; | | assert(pad == [ | [0, 0, 1, 2], | [0, 0, 3, 4]]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .pad!([0, 1], ["both", "post"])(0, [2, 1]) | .slice; | | assert(pad == [ | [0, 0, 0], | [0, 0, 0], | | [1, 2, 0], | [3, 4, 0], | | [0, 0, 0], | [0, 0, 0]]); |} | |/++ |Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning. | |Params: | direction = padding direction. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length. |Returns: $(LREF Concatenation) |See_also: $(LREF ._concatenation) examples. |+/ |auto padWrap(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...) |{ | return .padWrap!([Iota!N], [Repeat!(N, direction)])(s, lengths); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([3], 1) | .padWrap([2]) | .slice; | | assert(pad == [2, 3, 1, 2, 3, 1, 2]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padWrap([2, 1]) | .slice; | | assert(pad == [ | [2, 1, 2, 1], | [4, 3, 4, 3], | | [2, 1, 2, 1], | [4, 3, 4, 3], | | [2, 1, 2, 1], | [4, 3, 4, 3]]); |} | |/++ |Pads with the wrap of the slice along the axis. The first values are used to pad the end and the end values are used to pad the beginning. | |Params: | dimensions = dimensions to pad. | directions = padding directions. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | |Returns: $(LREF Concatenation) | |See_also: $(LREF ._concatenation) examples. |+/ |template padWrap(size_t[] dimensions, string[] directions) | if (dimensions.length && dimensions.length == directions.length) |{ | @optmath: | | /++ | Params: | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length. | Returns: $(LREF Concatenation) | See_also: $(LREF ._concatenation) examples. | +/ | auto padWrap(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...) | { | enum d = dimensions[$ - 1]; | enum q = directions[$ - 1]; | | static if (d == 0 || kind != Contiguous) | { | alias _s = s; | } | else | { | import mir.ndslice.topology: canonical; | auto _s = s.canonical; | } | | assert(lengths[$ - 1] <= s.length!d); | | static if (dimensions.length != 1) | alias next = .padWrap!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]); | | static if (q == "pre" || q == "both") | { | auto _pre = _s; | _pre.popFrontExactly!d(s.length!d - lengths[$ - 1]); | static if (dimensions.length == 1) | alias pre = _pre; | else | auto pre = next(_pre, lengths[0 .. $ - 1]); | } | | static if (q == "post" || q == "both") | { | auto _post = _s; | _post.popBackExactly!d(s.length!d - lengths[$ - 1]); | static if (dimensions.length == 1) | alias post = _post; | else | auto post = next(_post, lengths[0 .. $ - 1]); | } | | static if (dimensions.length == 1) | alias r = s; | else | auto r = next(s, lengths[0 .. $ - 1]); | | static if (q == "both") | return concatenation!d(pre, r, post); | else | static if (q == "pre") | return concatenation!d(pre, r); | else | static if (q == "post") | return concatenation!d(r, post); | else | static assert(0, `allowed directions are "both", "pre", and "post"`); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 3], 1) | .padWrap!([1], ["pre"])([1]) | .slice; | | assert(pad == [ | [3, 1, 2, 3], | [6, 4, 5, 6]]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padWrap!([0, 1], ["both", "post"])([2, 1]) | .slice; | | assert(pad == [ | [1, 2, 1], | [3, 4, 3], | | [1, 2, 1], | [3, 4, 3], | | [1, 2, 1], | [3, 4, 3]]); |} | |/++ |Pads with the reflection of the slice mirrored along the edge of the slice. | |Params: | direction = padding direction. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length. |Returns: $(LREF Concatenation) |See_also: $(LREF ._concatenation) examples. |+/ |auto padSymmetric(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...) |{ | return .padSymmetric!([Iota!N], [Repeat!(N, direction)])(s, lengths); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([3], 1) | .padSymmetric([2]) | .slice; | | assert(pad == [2, 1, 1, 2, 3, 3, 2]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padSymmetric([2, 1]) | .slice; | | assert(pad == [ | [3, 3, 4, 4], | [1, 1, 2, 2], | | [1, 1, 2, 2], | [3, 3, 4, 4], | | [3, 3, 4, 4], | [1, 1, 2, 2]]); |} | |/++ |Pads with the reflection of the slice mirrored along the edge of the slice. | |Params: | dimensions = dimensions to pad. | directions = padding directions. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | |Returns: $(LREF Concatenation) | |See_also: $(LREF ._concatenation) examples. |+/ |template padSymmetric(size_t[] dimensions, string[] directions) | if (dimensions.length && dimensions.length == directions.length) |{ | @optmath: | | /++ | Params: | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. Each length must be less or equal to the corresponding slice length. | Returns: $(LREF Concatenation) | See_also: $(LREF ._concatenation) examples. | +/ | auto padSymmetric(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...) | { | enum d = dimensions[$ - 1]; | enum q = directions[$ - 1]; | import mir.ndslice.dynamic: reversed; | | | static if (kind == Contiguous) | { | import mir.ndslice.topology: canonical; | auto __s = s.canonical; | } | else | { | alias __s = s; | } | | static if (kind == Universal || d != N - 1) | { | auto _s = __s.reversed!d; | } | else | static if (N == 1) | { | import mir.ndslice.topology: retro; | auto _s = s.retro; | } | else | { | import mir.ndslice.topology: retro; | auto _s = __s.retro.reversed!(Iota!d, Iota!(d + 1, N)); | } | | assert(lengths[$ - 1] <= s.length!d); | | static if (dimensions.length != 1) | alias next = .padSymmetric!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]); | | static if (q == "pre" || q == "both") | { | auto _pre = _s; | _pre.popFrontExactly!d(s.length!d - lengths[$ - 1]); | static if (dimensions.length == 1) | alias pre = _pre; | else | auto pre = next(_pre, lengths[0 .. $ - 1]); | } | | static if (q == "post" || q == "both") | { | auto _post = _s; | _post.popBackExactly!d(s.length!d - lengths[$ - 1]); | static if (dimensions.length == 1) | alias post = _post; | else | auto post = next(_post, lengths[0 .. $ - 1]); | } | | static if (dimensions.length == 1) | alias r = s; | else | auto r = next(s, lengths[0 .. $ - 1]); | | static if (q == "both") | return concatenation!d(pre, r, post); | else | static if (q == "pre") | return concatenation!d(pre, r); | else | static if (q == "post") | return concatenation!d(r, post); | else | static assert(0, `allowed directions are "both", "pre", and "post"`); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 3], 1) | .padSymmetric!([1], ["pre"])([2]) | .slice; | | assert(pad == [ | [2, 1, 1, 2, 3], | [5, 4, 4, 5, 6]]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padSymmetric!([0, 1], ["both", "post"])([2, 1]) | .slice; | | assert(pad == [ | [3, 4, 4], | [1, 2, 2], | | [1, 2, 2], | [3, 4, 4], | | [3, 4, 4], | [1, 2, 2]]); |} | |/++ |Pads with the edge values of slice. | |Params: | direction = padding direction. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. |Returns: $(LREF Concatenation) |See_also: $(LREF ._concatenation) examples. |+/ |auto padEdge(string direction = "both", Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[N] lengths...) |{ | return .padEdge!([Iota!N], [Repeat!(N, direction)])(s, lengths); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([3], 1) | .padEdge([2]) | .slice; | | assert(pad == [1, 1, 1, 2, 3, 3, 3]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padEdge([2, 1]) | .slice; | | assert(pad == [ | [1, 1, 2, 2], | [1, 1, 2, 2], | | [1, 1, 2, 2], | [3, 3, 4, 4], | | [3, 3, 4, 4], | [3, 3, 4, 4]]); |} | |/++ |Pads with the edge values of slice. | |Params: | dimensions = dimensions to pad. | directions = padding directions. | Direction can be one of the following values: `"both"`, `"pre"`, and `"post"`. | |Returns: $(LREF Concatenation) | |See_also: $(LREF ._concatenation) examples. |+/ |template padEdge(size_t[] dimensions, string[] directions) | if (dimensions.length && dimensions.length == directions.length) |{ | @optmath: | | /++ | Params: | s = $(SUBREF slice, Slice) | lengths = list of lengths for each dimension. | Returns: $(LREF Concatenation) | See_also: $(LREF ._concatenation) examples. | +/ | auto padEdge(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) s, size_t[dimensions.length] lengths...) | { | enum d = dimensions[$ - 1]; | enum q = directions[$ - 1]; | | static if (kind == Universal) | { | alias _s = s; | } | else | static if (d != N - 1) | { | import mir.ndslice.topology: canonical; | auto _s = s.canonical; | } | else | { | import mir.ndslice.topology: universal; | auto _s = s.universal; | } | | static if (dimensions.length != 1) | alias next = .padEdge!(dimensions[0 .. $ - 1], directions[0 .. $ - 1]); | | static if (q == "pre" || q == "both") | { | auto _pre = _s; | _pre._strides[d] = 0; | _pre._lengths[d] = lengths[$ - 1]; | static if (dimensions.length == 1) | alias pre = _pre; | else | auto pre = next(_pre, lengths[0 .. $ - 1]); | | } | | static if (q == "post" || q == "both") | { | auto _post = _s; | _post._iterator += _post.backIndex!d; | _post._strides[d] = 0; | _post._lengths[d] = lengths[$ - 1]; | static if (dimensions.length == 1) | alias post = _post; | else | auto post = next(_post, lengths[0 .. $ - 1]); | } | | static if (dimensions.length == 1) | alias r = s; | else | auto r = next( s, lengths[0 .. $ - 1]); | | static if (q == "both") | return concatenation!d(pre, r, post); | else | static if (q == "pre") | return concatenation!d(pre, r); | else | static if (q == "post") | return concatenation!d(r, post); | else | static assert(0, `allowed directions are "both", "pre", and "post"`); | } |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 3], 1) | .padEdge!([0], ["pre"])([2]) | .slice; | | assert(pad == [ | [1, 2, 3], | [1, 2, 3], | | [1, 2, 3], | [4, 5, 6]]); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: iota; | | auto pad = iota([2, 2], 1) | .padEdge!([0, 1], ["both", "post"])([2, 1]) | .slice; | | assert(pad == [ | [1, 2, 2], | [1, 2, 2], | | [1, 2, 2], | [3, 4, 4], | | [3, 4, 4], | [3, 4, 4]]); |} | |/++ |Iterates 1D fragments in $(SUBREF slice, Slice) or $(LREF Concatenation) in optimal for buffering way. | |See_also: $(LREF ._concatenation) examples. |+/ |template forEachFragment(alias pred) |{ | @optmath: | | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | { | /++ | Specialization for slices | Params: | sl = $(SUBREF slice, Slice) | +/ | void forEachFragment(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) sl) | { | static if (N == 1) | { | pred(sl); | } | else | static if (kind == Contiguous) | { | import mir.ndslice.topology: flattened; | pred(sl.flattened); | } | else | { | if (!sl.empty) do | { | .forEachFragment!pred(sl.front); | sl.popFront; | } | while(!sl.empty); | } | } | | /++ | Specialization for concatenations | Params: | st = $(LREF Concatenation) | +/ | void forEachFragment(size_t dim, Slices...)(Concatenation!(dim, Slices) st) | { | static if (dim == 0) | { | foreach (i, ref slice; st._slices) | .forEachFragment!pred(slice); | } | else | { | if (!st.empty) do | { | st.applyFront!(0, .forEachFragment!pred); | st.popFront; | } | while(!st.empty); | } | } | } | else | alias forEachFragment = .forEachFragment!(naryFun!pred); |} | |/++ |Iterates elements in $(SUBREF slice, Slice) or $(LREF Concatenation) |until pred returns true. | |Returns: false if pred returned false for all elements and true otherwise. | |See_also: $(LREF ._concatenation) examples. |+/ |template until(alias pred) |{ | @optmath: | | import mir.functional: naryFun; | static if (__traits(isSame, naryFun!pred, pred)) | { | /++ | Specialization for slices | Params: | sl = $(SUBREF slice, Slice) | +/ | bool until(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) sl) | { | static if (N == 1) | { | pragma(inline, false); | alias f = pred; | } | else | alias f = .until!pred; | if (!sl.empty) do | { | if (f(sl.front)) | return true; | sl.popFront; | } | while(!sl.empty); | return false; | } | | /++ | Specialization for concatenations | Params: | st = $(LREF Concatenation) | +/ | bool until(size_t dim, Slices...)(Concatenation!(dim, Slices) st) | { | static if (dim == 0) | { | foreach (i, ref slice; st._slices) | { | if (.until!pred(slice)) | return true; | } | } | else | { | if (!st.empty) do | { | if (st.applyFront!(0, .until!pred)) | return true; | st.popFront; | } | while(!st.empty); | } | return false; | } | } | else | alias until = .until!(naryFun!pred); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/concatenation.d has no code <<<<<< EOF # path=..-..-..-.dub-packages-mir-algorithm-3.7.25-mir-algorithm-source-mir-ndslice-slice.lst |/++ |This is a submodule of $(MREF mir, ndslice). | |Safety_note: | User-defined iterators should care about their safety except bounds checks. | Bounds are checked in ndslice code. | |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko | |$(BOOKTABLE $(H2 Definitions), |$(TR $(TH Name) $(TH Description)) |$(T2 Slice, N-dimensional slice.) |$(T2 SliceKind, SliceKind of $(LREF Slice) enumeration.) |$(T2 Universal, Alias for $(LREF .SliceKind.universal).) |$(T2 Canonical, Alias for $(LREF .SliceKind.canonical).) |$(T2 Contiguous, Alias for $(LREF .SliceKind.contiguous).) |$(T2 sliced, Creates a slice on top of an iterator, a pointer, or an array's pointer.) |$(T2 slicedField, Creates a slice on top of a field, a random access range, or an array.) |$(T2 slicedNdField, Creates a slice on top of an ndField.) |$(T2 kindOf, Extracts $(LREF SliceKind).) |$(T2 isSlice, Extracts dimension count from a type. Extracts `null` if the template argument is not a `Slice`.) |$(T2 Structure, A tuple of lengths and strides.) |) | |Macros: |SUBREF = $(REF_ALTTEXT $(TT $2), $2, mir, ndslice, $1)$(NBSP) |T2=$(TR $(TDNW $(LREF $1)) $(TD $+)) |T4=$(TR $(TDNW $(LREF $1)) $(TD $2) $(TD $3) $(TD $4)) |STD = $(TD $(SMALL $0)) |+/ |module mir.ndslice.slice; | |import mir.internal.utility : Iota; |import mir.math.common : optmath; |import mir.ndslice.concatenation; |import mir.ndslice.field; |import mir.ndslice.internal; |import mir.ndslice.iterator; |import mir.ndslice.traits: isIterator; |import mir.primitives; |import mir.qualifier; |import mir.utility; |import std.meta; |import std.traits; | |public import mir.primitives: DeepElementType; | |/++ |Checks if type T has asSlice property and its returns a slices. |Aliases itself to a dimension count |+/ |template hasAsSlice(T) |{ | static if (__traits(hasMember, T, "asSlice")) | enum size_t hasAsSlice = typeof(T.init.asSlice).N; | else | enum size_t hasAsSlice = 0; |} | |/// |version(mir_test) unittest |{ | import mir.series; | static assert(!hasAsSlice!(int[])); | static assert(hasAsSlice!(SeriesMap!(int, string)) == 1); |} | |/++ |Check if $(LREF toConst) function can be called with type T. |+/ |enum isConvertibleToSlice(T) = isSlice!T || isDynamicArray!T || hasAsSlice!T; | |/// |version(mir_test) unittest |{ | import mir.series: SeriesMap; | static assert(isConvertibleToSlice!(immutable int[])); | static assert(isConvertibleToSlice!(string[])); | static assert(isConvertibleToSlice!(SeriesMap!(string, int))); | static assert(isConvertibleToSlice!(Slice!(int*))); |} | |/++ |Reurns: | Ndslice view in the same data. |See_also: $(LREF isConvertibleToSlice). |+/ |auto toSlice(Iterator, size_t N, SliceKind kind)(Slice!(Iterator, N, kind) val) |{ | return val; |} | |/// ditto |auto toSlice(Iterator, size_t N, SliceKind kind)(const Slice!(Iterator, N, kind) val) |{ | return val[]; |} | |/// ditto |auto toSlice(Iterator, size_t N, SliceKind kind)(immutable Slice!(Iterator, N, kind) val) |{ | return val[]; |} | |/// ditto |auto toSlice(T)(T[] val) |{ | return val.sliced; |} | |/// ditto |auto toSlice(T)(T val) | if (hasAsSlice!T || __traits(hasMember, T, "moveToSlice")) |{ | static if (__traits(hasMember, T, "moveToSlice")) | return val.moveToSlice; | else | return val.asSlice; |} | |/// ditto |auto toSlice(T)(ref T val) | if (hasAsSlice!T) |{ | return val.asSlice; |} | |/// |template toSlices(args...) |{ | static if (args.length) | { | alias arg = args[0]; | @optmath @property auto ref slc()() | { | return toSlice(arg); | } | alias toSlices = AliasSeq!(slc, toSlices!(args[1..$])); | } | else | alias toSlices = AliasSeq!(); |} | |/// |template isSlice(T) |{ | static if (is(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind)) | enum bool isSlice = true; | else | enum bool isSlice = false; |} | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | alias A = uint[]; | alias S = Slice!(int*); | | static assert(isSlice!S); | static assert(!isSlice!A); |} | |/++ |SliceKind of $(LREF Slice). |See_also: | $(SUBREF topology, universal), | $(SUBREF topology, canonical), | $(SUBREF topology, assumeCanonical), | $(SUBREF topology, assumeContiguous). |+/ |alias SliceKind = mir_slice_kind; |/// ditto |enum mir_slice_kind |{ | /// A slice has strides for all dimensions. | universal, | /// A slice has >=2 dimensions and row dimension is contiguous. | canonical, | /// A slice is a flat contiguous data without strides. | contiguous, |} | |/++ |Alias for $(LREF .SliceKind.universal). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Universal = SliceKind.universal; |/++ |Alias for $(LREF .SliceKind.canonical). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Canonical = SliceKind.canonical; |/++ |Alias for $(LREF .SliceKind.contiguous). | |See_also: | Internal Binary Representation section in $(LREF Slice). |+/ |alias Contiguous = SliceKind.contiguous; | |/// Extracts $(LREF SliceKind). |enum kindOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = kind; | |/// |@safe pure nothrow @nogc |version(mir_test) unittest |{ | static assert(kindOf!(Slice!(int*, 1, Universal)) == Universal); |} | |/// Extracts iterator type from a $(LREF Slice). |alias IteratorOf(T : Slice!(Iterator, N, kind), Iterator, size_t N, SliceKind kind) = Iterator; | |private template SkipDimension(size_t dimension, size_t index) |{ | static if (index < dimension) | enum SkipDimension = index; | else | static if (index == dimension) | static assert (0, "SkipInex: wrong index"); | else | enum SkipDimension = index - 1; |} | |/++ |Creates an n-dimensional slice-shell over an iterator. |Params: | iterator = An iterator, a pointer, or an array. | lengths = A list of lengths for each dimension |Returns: | n-dimensional slice |+/ |auto sliced(size_t N, Iterator)(Iterator iterator, size_t[N] lengths...) | if (!isStaticArray!Iterator && N | && !is(Iterator : Slice!(_Iterator, _N, kind), _Iterator, size_t _N, SliceKind kind)) |{ | alias C = ImplicitlyUnqual!(typeof(iterator)); 0000000| size_t[N] _lengths; | foreach (i; Iota!N) 0000000| _lengths[i] = lengths[i]; 0000000| ptrdiff_t[1] _strides = 0; | static if (isDynamicArray!Iterator) | { | assert(lengthsProduct(_lengths) <= iterator.length, | "array length should be greater or equal to the product of constructed ndslice lengths"); | auto ptr = iterator.length ? &iterator[0] : null; | return Slice!(typeof(C.init[0])*, N)(_lengths, ptr); | } | else | { | // break safety 0000000| if (false) | { 0000000| ++iterator; 0000000| --iterator; 0000000| iterator += 34; 0000000| iterator -= 34; | } | import core.lifetime: move; 0000000| return Slice!(C, N)(_lengths, iterator.move); | } |} | |/// $(LINK2 https://en.wikipedia.org/wiki/Vandermonde_matrix, Vandermonde matrix) |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.filling: fillVandermonde; | import mir.ndslice.allocation: uninitSlice; | auto x = [1.0, 2, 3, 4, 5].sliced; | auto v = uninitSlice!double(x.length, x.length); | v.fillVandermonde(x); | assert(v == | [[ 1.0, 1, 1, 1, 1], | [ 1.0, 2, 4, 8, 16], | [ 1.0, 3, 9, 27, 81], | [ 1.0, 4, 16, 64, 256], | [ 1.0, 5, 25, 125, 625]]); |} | |/// Random access range primitives for slices over user defined types |@safe pure nothrow @nogc version(mir_test) unittest |{ | struct MyIota | { | //`[index]` operator overloading | auto opIndex(size_t index) @safe nothrow | { | return index; | } | | auto lightConst()() const @property { return MyIota(); } | auto lightImmutable()() immutable @property { return MyIota(); } | } | import mir.ndslice.iterator: FieldIterator; | alias Iterator = FieldIterator!MyIota; | alias S = Slice!(Iterator, 2); | import std.range.primitives; | static assert(hasLength!S); | static assert(hasSlicing!S); | static assert(isRandomAccessRange!S); | | auto slice = Iterator().sliced(20, 10); | assert(slice[1, 2] == 12); | auto sCopy = slice.save; | assert(slice[1, 2] == 12); |} | |/++ |Creates an 1-dimensional slice-shell over an array. |Params: | array = An array. |Returns: | 1-dimensional slice |+/ |Slice!(T*) sliced(T)(T[] array) @trusted |{ | version(LDC) pragma(inline, true); | return Slice!(T*)([array.length], array.ptr); |} | |/// Creates a slice from an array. |@safe pure nothrow version(mir_test) unittest |{ | auto slice = new int[10].sliced; | assert(slice.length == 10); | static assert(is(typeof(slice) == Slice!(int*))); |} | |/++ |Creates an n-dimensional slice-shell over the 1-dimensional input slice. |Params: | slice = slice | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |+/ |Slice!(Iterator, N, kind) | sliced | (Iterator, size_t N, SliceKind kind) | (Slice!(Iterator, 1, kind) slice, size_t[N] lengths...) | if (N) |{ | auto structure = typeof(return)._Structure.init; | structure[0] = lengths; | static if (kind != Contiguous) | { | import mir.ndslice.topology: iota; | structure[1] = structure[0].iota.strides; | } | import core.lifetime: move; | return typeof(return)(structure, slice._iterator.move); |} | |/// |@safe pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto data = new int[24]; | foreach (i, ref e; data) | e = cast(int)i; | auto a = data[0..10].sliced(10)[0..6].sliced(2, 3); | auto b = iota!int(10)[0..6].sliced(2, 3); | assert(a == b); | a[] += b; | foreach (i, e; data[0..6]) | assert(e == 2*i); | foreach (i, e; data[6..$]) | assert(e == i+6); |} | |/++ |Creates an n-dimensional slice-shell over a field. |Params: | field = A field. The length of the | array should be equal to or less then the product of | lengths. | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |+/ |Slice!(FieldIterator!Field, N) |slicedField(Field, size_t N)(Field field, size_t[N] lengths...) | if (N) |{ | static if (hasLength!Field) 0000000| assert(lengths.lengthsProduct <= field.length, "Length product should be less or equal to the field length."); 0000000| return FieldIterator!Field(0, field).sliced(lengths); |} | |///ditto |auto slicedField(Field)(Field field) | if(hasLength!Field) |{ 0000000| return .slicedField(field, field.length); |} | |/// Creates an 1-dimensional slice over a field, array, or random access range. |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto slice = 10.iota.slicedField; | assert(slice.length == 10); |} | |/++ |Creates an n-dimensional slice-shell over an ndField. |Params: | field = A ndField. Lengths should fit into field's shape. | lengths = A list of lengths for each dimension. |Returns: | n-dimensional slice |See_also: $(SUBREF concatenation, concatenation) examples. |+/ |Slice!(IndexIterator!(FieldIterator!(ndIotaField!N), ndField), N) |slicedNdField(ndField, size_t N)(ndField field, size_t[N] lengths...) | if (N) |{ | static if(hasShape!ndField) | { | auto shape = field.shape; | foreach (i; 0 .. N) | assert(lengths[i] <= shape[i], "Lengths should fit into ndfield's shape."); | } | import mir.ndslice.topology: indexed, ndiota; | return indexed(field, ndiota(lengths)); |} | |///ditto |auto slicedNdField(ndField)(ndField field) | if(hasShape!ndField) |{ | return .slicedNdField(field, field.shape); |} | |/++ |Combination of coordinate(s) and value. |+/ |struct CoordinateValue(T, size_t N = 1) |{ | /// | size_t[N] index; | | /// | T value; | | /// | sizediff_t opCmp()(scope auto ref const typeof(this) rht) const | { | return cmpCoo(this.index, rht.index); | } |} | |private sizediff_t cmpCoo(size_t N)(scope const auto ref size_t[N] a, scope const auto ref size_t[N] b) |{ | foreach (i; Iota!(0, N)) | if (auto d = a[i] - b[i]) | return d; | return 0; |} | |/++ |Presents $(LREF .Slice.structure). |+/ |struct Structure(size_t N) |{ | /// | size_t[N] lengths; | /// | sizediff_t[N] strides; |} | |package(mir) alias LightConstOfLightScopeOf(Iterator) = LightConstOf!(LightScopeOf!Iterator); |package(mir) alias LightImmutableOfLightConstOf(Iterator) = LightImmutableOf!(LightScopeOf!Iterator); |package(mir) alias ImmutableOfUnqualOfPointerTarget(Iterator) = immutable(Unqual!(PointerTarget!Iterator))*; |package(mir) alias ConstOfUnqualOfPointerTarget(Iterator) = const(Unqual!(PointerTarget!Iterator))*; | |package(mir) template allLightScope(args...) |{ | static if (args.length) | { | alias arg = args[0]; | alias Arg = typeof(arg); | static if(!isDynamicArray!Arg) | { | static if(!is(LightScopeOf!Arg == Arg)) | @optmath @property ls()() | { | import mir.qualifier: lightScope; | return lightScope(arg); | } | else alias ls = arg; | } | else alias ls = arg; | alias allLightScope = AliasSeq!(ls, allLightScope!(args[1..$])); | } | else | alias allLightScope = AliasSeq!(); |} | |/++ |Presents an n-dimensional view over a range. | |$(H3 Definitions) | |In order to change data in a slice using |overloaded operators such as `=`, `+=`, `++`, |a syntactic structure of type |`[]` must be used. |It is worth noting that just like for regular arrays, operations `a = b` |and `a[] = b` have different meanings. |In the first case, after the operation is carried out, `a` simply points at the same data as `b` |does, and the data which `a` previously pointed at remains unmodified. |Here, `а` and `b` must be of the same type. |In the second case, `a` points at the same data as before, |but the data itself will be changed. In this instance, the number of dimensions of `b` |may be less than the number of dimensions of `а`; and `b` can be a Slice, |a regular multidimensional array, or simply a value (e.g. a number). | |In the following table you will find the definitions you might come across |in comments on operator overloading. | |$(BOOKTABLE |$(TR $(TH Operator Overloading) $(TH Examples at `N == 3`)) |$(TR $(TD An $(B interval) is a part of a sequence of type `i .. j`.) | $(STD `2..$-3`, `0..4`)) |$(TR $(TD An $(B index) is a part of a sequence of type `i`.) | $(STD `3`, `$-1`)) |$(TR $(TD A $(B partially defined slice) is a sequence composed of | $(B intervals) and $(B indexes) with an overall length strictly less than `N`.) | $(STD `[3]`, `[0..$]`, `[3, 3]`, `[0..$,0..3]`, `[0..$,2]`)) |$(TR $(TD A $(B fully defined index) is a sequence | composed only of $(B indexes) with an overall length equal to `N`.) | $(STD `[2,3,1]`)) |$(TR $(TD A $(B fully defined slice) is an empty sequence | or a sequence composed of $(B indexes) and at least one | $(B interval) with an overall length equal to `N`.) | $(STD `[]`, `[3..$,0..3,0..$-1]`, `[2,0..$,1]`)) |$(TR $(TD An $(B indexed slice) is syntax sugar for $(SUBREF topology, indexed) and $(SUBREF topology, cartesian).) | $(STD `[anNdslice]`, `[$.iota, anNdsliceForCartesian1, $.iota]`)) |) | |See_also: | $(SUBREF topology, iota). | |$(H3 Internal Binary Representation) | |Multidimensional Slice is a structure that consists of lengths, strides, and a iterator (pointer). | |$(SUBREF topology, FieldIterator) shell is used to wrap fields and random access ranges. |FieldIterator contains a shift of the current initial element of a multidimensional slice |and the field itself. | |With the exception of $(MREF mir,ndslice,allocation) module, no functions in this |package move or copy data. The operations are only carried out on lengths, strides, |and pointers. If a slice is defined over a range, only the shift of the initial element |changes instead of the range. | |$(H4 Internal Representation for Universal Slices) | |Type definition | |------- |Slice!(Iterator, N, Universal) |------- | |Schema | |------- |Slice!(Iterator, N, Universal) | size_t[N] _lengths | sizediff_t[N] _strides | Iterator _iterator |------- | |$(H5 Example) | |Definitions | |------- |import mir.ndslice; |auto a = new double[24]; |Slice!(double*, 3, Universal) s = a.sliced(2, 3, 4).universal; |Slice!(double*, 3, Universal) t = s.transposed!(1, 2, 0); |Slice!(double*, 3, Universal) r = t.reversed!1; |------- | |Representation | |------- |s________________________ | lengths[0] ::= 2 | lengths[1] ::= 3 | lengths[2] ::= 4 | | strides[0] ::= 12 | strides[1] ::= 4 | strides[2] ::= 1 | | iterator ::= &a[0] | |t____transposed!(1, 2, 0) | lengths[0] ::= 3 | lengths[1] ::= 4 | lengths[2] ::= 2 | | strides[0] ::= 4 | strides[1] ::= 1 | strides[2] ::= 12 | | iterator ::= &a[0] | |r______________reversed!1 | lengths[0] ::= 2 | lengths[1] ::= 3 | lengths[2] ::= 4 | | strides[0] ::= 12 | strides[1] ::= -4 | strides[2] ::= 1 | | iterator ::= &a[8] // (old_strides[1] * (lengths[1] - 1)) = 8 |------- | |$(H4 Internal Representation for Canonical Slices) | |Type definition | |------- |Slice!(Iterator, N, Canonical) |------- | |Schema | |------- |Slice!(Iterator, N, Canonical) | size_t[N] _lengths | sizediff_t[N-1] _strides | Iterator _iterator |------- | |$(H4 Internal Representation for Contiguous Slices) | |Type definition | |------- |Slice!(Iterator, N) |------- | |Schema | |------- |Slice!(Iterator, N, Contiguous) | size_t[N] _lengths | sizediff_t[0] _strides | Iterator _iterator |------- |+/ |struct mir_slice(Iterator_, size_t N_ = 1, SliceKind kind_ = Contiguous, Labels_...) | if (0 < N_ && N_ < 255 && !(kind_ == Canonical && N_ == 1) && Labels_.length <= N_ && isIterator!Iterator_) |{ |@optmath: | | /// $(LREF SliceKind) | enum SliceKind kind = kind_; | | /// Dimensions count | enum size_t N = N_; | | /// Strides count | enum size_t S = kind == Universal ? N : kind == Canonical ? N - 1 : 0; | | /// Labels count. | enum size_t L = Labels_.length; | | /// Data iterator type | alias Iterator = Iterator_; | | /// This type | alias This = Slice!(Iterator, N, kind); | | /// Data element type | alias DeepElement = typeof(Iterator.init[size_t.init]); | | /// Label Iterators types | alias Labels = Labels_; | | /// | template Element(size_t dimension) | if (dimension < N) | { | static if (N == 1) | alias Element = DeepElement; | else | { | static if (kind == Universal || dimension == N - 1) | alias Element = mir_slice!(Iterator, N - 1, Universal); | else | static if (N == 2 || kind == Contiguous && dimension == 0) | alias Element = mir_slice!(Iterator, N - 1); | else | alias Element = mir_slice!(Iterator, N - 1, Canonical); | } | } | |package(mir): | | enum doUnittest = is(Iterator == int*) && N == 1 && kind == Contiguous; | | enum hasAccessByRef = __traits(compiles, &_iterator[0]); | | enum PureIndexLength(Slices...) = Filter!(isIndex, Slices).length; | | enum isPureSlice(Slices...) = | Slices.length == 0 | || Slices.length <= N | && PureIndexLength!Slices < N | && Filter!(isIndex, Slices).length < Slices.length | && allSatisfy!(templateOr!(isIndex, is_Slice), Slices); | | | enum isFullPureSlice(Slices...) = | Slices.length == 0 | || Slices.length == N | && PureIndexLength!Slices < N | && allSatisfy!(templateOr!(isIndex, is_Slice), Slices); | | enum isIndexedSlice(Slices...) = | Slices.length | && Slices.length <= N | && allSatisfy!(isSlice, Slices) | && anySatisfy!(templateNot!is_Slice, Slices); | | static if (S) | { | /// | public alias _Structure = AliasSeq!(size_t[N], ptrdiff_t[S]); | /// | _Structure _structure; | /// | public alias _lengths = _structure[0]; | /// | public alias _strides = _structure[1]; | } | else | { | /// | public alias _Structure = AliasSeq!(size_t[N]); | /// | _Structure _structure; | /// | public alias _lengths = _structure[0]; | /// | public enum ptrdiff_t[S] _strides = ptrdiff_t[S].init; | } | | /// Data Iterator | public Iterator _iterator; | /// Labels iterators | public Labels _labels; | | sizediff_t backIndex(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | return _stride!dimension * (_lengths[dimension] - 1); | } | | size_t indexStride(size_t I)(size_t[I] _indexes) @safe scope const | { | static if (_indexes.length) | { | static if (kind == Contiguous) | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | ptrdiff_t ball = this._stride!E; | ptrdiff_t stride = _indexes[E] * ball; | foreach_reverse (i; Iota!E) //static | { | ball *= _lengths[i + 1]; | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += ball * _indexes[i]; | } | } | else | static if (kind == Canonical) | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | static if (I == N) | size_t stride = _indexes[E]; | else | size_t stride = _strides[E] * _indexes[E]; | foreach_reverse (i; Iota!E) //static | { | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += _strides[i] * _indexes[i]; | } | } | else | { | enum E = I - 1; | assert(_indexes[E] < _lengths[E], indexError!(E, N)); | size_t stride = _strides[E] * _indexes[E]; | foreach_reverse (i; Iota!E) //static | { | assert(_indexes[i] < _lengths[i], indexError!(i, N)); | stride += _strides[i] * _indexes[i]; | } | } | return stride; | } | else | { | return 0; | } | } | |public: | | // static if (S == 0) | // { | /// Defined for Contiguous Slice only | // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // assert(empty.length == 0); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, in ptrdiff_t[] empty, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // assert(empty.length == 0); | // this._lengths = lengths; | // this._iterator = iterator; | // } | | // /// ditto | // this()(size_t[N] lengths, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._iterator = iterator; | // } | // } | | // version(LDC) | // private enum classicConstructor = true; | // else | // private enum classicConstructor = S > 0; | | // static if (classicConstructor) | // { | /// Defined for Canonical and Universal Slices (DMD, GDC, LDC) and for Contiguous Slices (LDC) | // this()(size_t[N] lengths, ptrdiff_t[S] strides, Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._strides = strides; | // this._iterator = iterator; | // this._labels = labels; | // } | | // /// ditto | // this()(size_t[N] lengths, ptrdiff_t[S] strides, ref Iterator iterator, Labels labels) | // { | // version(LDC) pragma(inline, true); | // this._lengths = lengths; | // this._strides = strides; | // this._iterator = iterator; | // this._labels = labels; | // } | // } | | // /// Construct from null | // this()(typeof(null)) | // { | // version(LDC) pragma(inline, true); | // } | | // static if (doUnittest) | // /// | // @safe pure version(mir_test) unittest | // { | // import mir.ndslice.slice; | // alias Array = Slice!(double*); | // Array a = null; | // auto b = Array(null); | // assert(a.empty); | // assert(b.empty); | | // auto fun(Array a = null) | // { | | // } | // } | | static if (doUnittest) | /// Creates a 2-dimentional slice with custom strides. | nothrow pure | version(mir_test) unittest | { | uint[8] array = [1, 2, 3, 4, 5, 6, 7, 8]; | auto slice = Slice!(uint*, 2, Universal)([2, 2], [4, 1], array.ptr); | | assert(&slice[0, 0] == &array[0]); | assert(&slice[0, 1] == &array[1]); | assert(&slice[1, 0] == &array[4]); | assert(&slice[1, 1] == &array[5]); | assert(slice == [[1, 2], [5, 6]]); | | array[2] = 42; | assert(slice == [[1, 2], [5, 6]]); | | array[1] = 99; | assert(slice == [[1, 99], [5, 6]]); | } | | /++ | Returns: View with stripped out reference counted context. | The lifetime of the result mustn't be longer then the lifetime of the original slice. | +/ | auto lightScope()() scope return @property | { | auto ret = Slice!(LightScopeOf!Iterator, N, kind, staticMap!(LightScopeOf, Labels)) | (_structure, .lightScope(_iterator)); | foreach(i; Iota!L) | ret._labels[i] = .lightScope(_labels[i]); | return ret; | } | | /// ditto | auto lightScope()() scope const return @property | { 0000000| auto ret = Slice!(LightConstOf!(LightScopeOf!Iterator), N, kind, staticMap!(LightConstOfLightScopeOf, Labels)) | (_structure, .lightScope(_iterator)); | foreach(i; Iota!L) | ret._labels[i] = .lightScope(_labels[i]); 0000000| return ret; | } | | /// ditto | auto lightScope()() scope immutable return @property | { | auto ret = Slice!(LightImmutableOf!(LightScopeOf!Iterator), N, kind, staticMap!(LightImmutableOfLightConstOf(Labels))) | (_structure, .lightScope(_iterator)); | foreach(i; Iota!L) | ret._labels[i] = .lightScope(_labels[i]); | return ret; | } | | /// Returns: Mutable slice over immutable data. | Slice!(LightImmutableOf!Iterator, N, kind, staticMap!(LightImmutableOf, Labels)) lightImmutable()() scope return immutable @property | { | auto ret = typeof(return)(_structure, .lightImmutable(_iterator)); | foreach(i; Iota!L) | ret._labels[i] = .lightImmutable(_labels[i]); | return ret; | } | | /// Returns: Mutable slice over const data. | Slice!(LightConstOf!Iterator, N, kind, staticMap!(LightConstOf, Labels)) lightConst()() scope return const @property @trusted | { | auto ret = typeof(return)(_structure, .lightConst(_iterator)); | foreach(i; Iota!L) | ret._labels[i] = .lightConst(_labels[i]); | return ret; | } | | /// ditto | Slice!(LightImmutableOf!Iterator, N, kind, staticMap!(LightImmutableOf, Labels)) lightConst()() scope return immutable @property | { | return this.lightImmutable; | } | | /// Label for the dimensions 'd'. By default returns the row label. | Slice!(Labels[d]) | label(size_t d = 0)() @property | if (d <= L) | { | return typeof(return)(_lengths[d], _labels[d]); | } | | /// ditto | void label(size_t d = 0)(Slice!(Labels[d]) rhs) @property | if (d <= L) | { | import core.lifetime: move; | assert(rhs.length == _lengths[d], "ndslice: labels dimension mismatch"); | _labels[d] = rhs._iterator.move; | } | | /// ditto | Slice!(LightConstOf!(Labels[d])) | label(size_t d = 0)() @property const | if (d <= L) | { | return typeof(return)(_lengths[d].lightConst, _labels[d]); | } | | /// ditto | Slice!(LightImmutableOf!(Labels[d])) | label(size_t d = 0)() @property immutable | if (d <= L) | { | return typeof(return)(_lengths[d].lightImmutable, _labels[d]); | } | | /// Strips label off the DataFrame | auto values()() @property | { 0000000| return Slice!(Iterator, N, kind)(_structure, _iterator); | } | | /// ditto | auto values()() @property const | { | return Slice!(LightConstOf!Iterator, N, kind)(_structure, .lightConst(_iterator)); | } | | /// ditto | auto values()() @property immutable | { | return Slice!(LightImmutableOf!Iterator, N, kind)(_structure, .lightImmutable(_iterator)); | } | | /// `opIndex` overload for const slice | auto ref opIndex(Indexes...)(Indexes indexes) const @trusted | if (isPureSlice!Indexes || isIndexedSlice!Indexes) | { | return lightConst.opIndex(indexes); | } | /// `opIndex` overload for immutable slice | auto ref opIndex(Indexes...)(Indexes indexes) immutable @trusted | if (isPureSlice!Indexes || isIndexedSlice!Indexes) | { | return lightImmutable.opIndex(indexes); | } | | static if (allSatisfy!(isPointer, Iterator, Labels)) | { | private alias ConstThis = Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind); | private alias ImmutableThis = Slice!(immutable(Unqual!(PointerTarget!Iterator))*, N, kind); | | /++ | Cast to const and immutable slices in case of underlying range is a pointer. | +/ | auto toImmutable()() scope return immutable @trusted pure nothrow @nogc | { | return Slice!(ImmutableOfUnqualOfPointerTarget!Iterator, N, kind, staticMap!(ImmutableOfUnqualOfPointerTarget, Labels)) | (_structure, _iterator, _labels); | } | | /// ditto | auto toConst()() scope return const @trusted pure nothrow @nogc | { | version(LDC) pragma(inline, true); | return Slice!(ConstOfUnqualOfPointerTarget!Iterator, N, kind, staticMap!(ConstOfUnqualOfPointerTarget, Labels)) | (_structure, _iterator, _labels); | } | | static if (!is(Slice!(const(Unqual!(PointerTarget!Iterator))*, N, kind) == This)) | /// ditto | alias toConst this; | | static if (doUnittest) | /// | version(mir_test) unittest | { | static struct Foo | { | Slice!(int*) bar; | | int get(size_t i) immutable | { | return bar[i]; | } | | int get(size_t i) const | { | return bar[i]; | } | | int get(size_t i) inout | { | return bar[i]; | } | } | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | Slice!(double*, 2, Universal) nn; | Slice!(immutable(double)*, 2, Universal) ni; | Slice!(const(double)*, 2, Universal) nc; | | const Slice!(double*, 2, Universal) cn; | const Slice!(immutable(double)*, 2, Universal) ci; | const Slice!(const(double)*, 2, Universal) cc; | | immutable Slice!(double*, 2, Universal) in_; | immutable Slice!(immutable(double)*, 2, Universal) ii; | immutable Slice!(const(double)*, 2, Universal) ic; | | nc = nc; nc = cn; nc = in_; | nc = nc; nc = cc; nc = ic; | nc = ni; nc = ci; nc = ii; | | void fun(T, size_t N)(Slice!(const(T)*, N, Universal) sl) | { | //... | } | | fun(nn); fun(cn); fun(in_); | fun(nc); fun(cc); fun(ic); | fun(ni); fun(ci); fun(ii); | | static assert(is(typeof(cn[]) == typeof(nc))); | static assert(is(typeof(ci[]) == typeof(ni))); | static assert(is(typeof(cc[]) == typeof(nc))); | | static assert(is(typeof(in_[]) == typeof(ni))); | static assert(is(typeof(ii[]) == typeof(ni))); | static assert(is(typeof(ic[]) == typeof(ni))); | | ni = ci[]; | ni = in_[]; | ni = ii[]; | ni = ic[]; | } | } | | /++ | Iterator | Returns: | Iterator (pointer) to the $(LREF Slice.first) element. | +/ | auto iterator()() inout scope return @property | { | return _iterator; | } | | static if (kind == Contiguous && isPointer!Iterator) | /++ | `ptr` alias is available only if the slice kind is $(LREF Contiguous) contiguous and the $(LREF Slice.iterator) is a pointers. | +/ | alias ptr = iterator; | else | { | import mir.rc.array: mir_rci; | static if (kind == Contiguous && is(Iterator : mir_rci!ET, ET)) | auto ptr() scope return inout @property | { | return _iterator._iterator; | } | } | | /++ | Field (array) data. | Returns: | Raw data slice. | Constraints: | Field is defined only for contiguous slices. | +/ | auto field()() scope return @trusted @property | { | static assert(kind == Contiguous, "Slice.field is defined only for contiguous slices. Slice kind is " ~ kind.stringof); | static if (is(typeof(_iterator[size_t(0) .. elementCount]))) | { | return _iterator[size_t(0) .. elementCount]; | } | else | { | import mir.ndslice.topology: flattened; | return this.flattened; | } | } | | /// ditto | auto field()() scope const return @trusted @property | { | return this.lightConst.field; | } | | /// ditto | auto field()() scope immutable return @trusted @property | { | return this.lightImmutable.field; | } | | static if (doUnittest) | /// | @safe version(mir_test) unittest | { | auto arr = [1, 2, 3, 4]; | auto sl0 = arr.sliced; | auto sl1 = arr.slicedField; | | assert(sl0.field is arr); | assert(sl1.field is arr); | | arr = arr[1 .. $]; | sl0 = sl0[1 .. $]; | sl1 = sl1[1 .. $]; | | assert(sl0.field is arr); | assert(sl1.field is arr); | assert((cast(const)sl1).field is arr); | ()@trusted{ assert((cast(immutable)sl1).field is arr); }(); | } | | /++ | Returns: static array of lengths | See_also: $(LREF .Slice.structure) | +/ | size_t[N] shape()() @trusted @property scope const | { 0000000| return _lengths[0 .. N]; | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5).shape == cast(size_t[3])[3, 4, 5]); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow | version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | size_t[3] s = [3, 4, 5]; | assert(iota(3, 4, 5, 6, 7).pack!2.shape == s); | } | | /++ | Returns: static array of lengths | See_also: $(LREF .Slice.structure) | +/ | ptrdiff_t[N] strides()() @trusted @property scope const | { | static if (N <= S) | return _strides[0 .. N]; | else | { | typeof(return) ret; | static if (kind == Canonical) | { | foreach (i; Iota!S) | ret[i] = _strides[i]; | ret[$-1] = 1; | } | else | { | ret[$ - 1] = _stride!(N - 1); | foreach_reverse (i; Iota!(N - 1)) | ret[i] = ret[i + 1] * _lengths[i + 1]; | } | return ret; | } | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow | version(mir_test) unittest | { | import mir.ndslice.topology : iota; | size_t[3] s = [20, 5, 1]; | assert(iota(3, 4, 5).strides == s); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota, universal; | import mir.ndslice.dynamic : reversed, strided, transposed; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes corresponding length | .transposed!2 //brings dimension `2` to the first position | .strides == cast(ptrdiff_t[3])[-6, 200, 50]); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | size_t[3] s = [20 * 42, 5 * 42, 1 * 42]; | assert(iota(3, 4, 5, 6, 7) | .pack!2 | .strides == s); | } | | /++ | Returns: static array of lengths and static array of strides | See_also: $(LREF .Slice.shape) | +/ | Structure!N structure()() @safe @property scope const | { | return typeof(return)(_lengths, strides); | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5) | .structure == Structure!3([3, 4, 5], [20, 5, 1])); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota, universal; | import mir.ndslice.dynamic : reversed, strided, transposed; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes corresponding length | .transposed!2 //brings dimension `2` to the first position | .structure == Structure!3([9, 3, 4], [-6, 200, 50])); | } | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, iota; | assert(iota(3, 4, 5, 6, 7) | .pack!2 | .structure == Structure!3([3, 4, 5], [20 * 42, 5 * 42, 1 * 42])); | } | | /++ | Save primitive. | +/ | auto save()() scope return inout @property | { | return this; | } | | static if (doUnittest) | /// Save range | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(2, 3).save; | } | | static if (doUnittest) | /// Pointer type. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | //sl type is `Slice!(2, int*)` | auto sl = slice!int(2, 3).save; | } | | /++ | Multidimensional `length` property. | Returns: length of the corresponding dimension | See_also: $(LREF .Slice.shape), $(LREF .Slice.structure) | +/ | size_t length(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | return _lengths[dimension]; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(3, 4, 5); | assert(slice.length == 3); | assert(slice.length!0 == 3); | assert(slice.length!1 == 4); | assert(slice.length!2 == 5); | } | | alias opDollar = length; | | /++ | Multidimensional `stride` property. | Returns: stride of the corresponding dimension | See_also: $(LREF .Slice.structure) | +/ | sizediff_t _stride(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { | static if (dimension < S) | { | return _strides[dimension]; | } | else | static if (dimension + 1 == N) | { | return 1; | } | else | { | size_t ball = _lengths[$ - 1]; | foreach_reverse(i; Iota!(dimension + 1, N - 1)) | ball *= _lengths[i]; | return ball; | } | | } | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto slice = iota(3, 4, 5); | assert(slice._stride == 20); | assert(slice._stride!0 == 20); | assert(slice._stride!1 == 5); | assert(slice._stride!2 == 1); | } | | static if (doUnittest) | /// Modified regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.dynamic : reversed, strided, swapped; | import mir.ndslice.topology : universal, iota; | assert(iota(3, 4, 50) | .universal | .reversed!2 //makes stride negative | .strided!2(6) //multiplies stride by 6 and changes the corresponding length | .swapped!(1, 2) //swaps dimensions `1` and `2` | ._stride!1 == -6); | } | | /++ | Multidimensional input range primitive. | +/ | bool empty(size_t dimension = 0)() @safe @property scope const | if (dimension < N) | { 0000000| return _lengths[dimension] == 0; | } | | static if (N == 1) | { | ///ditto | auto ref front(size_t dimension = 0)() scope return @trusted @property | if (dimension == 0) | { 0000000| assert(!empty!dimension); 0000000| return *_iterator; | } | | ///ditto | auto ref front(size_t dimension = 0)() scope return @trusted @property const | if (dimension == 0) | { | assert(!empty!dimension); | return *_iterator.lightScope; | } | | ///ditto | auto ref front(size_t dimension = 0)() scope return @trusted @property immutable | if (dimension == 0) | { | assert(!empty!dimension); | return *_iterator.lightScope; | } | } | else | { | /// ditto | Element!dimension front(size_t dimension = 0)() scope return @property | if (dimension < N) | { | typeof(return)._Structure structure_ = typeof(return)._Structure.init; | | foreach (i; Iota!(typeof(return).N)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[0][i] = _lengths[j]; | } | | static if (!typeof(return).S || typeof(return).S + 1 == S) | alias s = _strides; | else | auto s = strides; | | foreach (i; Iota!(typeof(return).S)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[1][i] = s[j]; | } | | return typeof(return)(structure_, _iterator); | } | | ///ditto | auto front(size_t dimension = 0)() scope return @trusted @property const | if (dimension < N) | { | assert(!empty!dimension); | return this.lightConst.front!dimension; | } | | ///ditto | auto front(size_t dimension = 0)() scope return @trusted @property immutable | if (dimension < N) | { | assert(!empty!dimension); | return this.lightImmutable.front!dimension; | } | } | | static if (N == 1 && isMutable!DeepElement && !hasAccessByRef) | { | ///ditto | auto ref front(size_t dimension = 0, T)(T value) scope return @trusted @property | if (dimension == 0) | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | assert(!empty!dimension); | static if (__traits(compiles, *_iterator = value)) | return *_iterator = value; | else | return _iterator[0] = value; | } | } | | ///ditto | static if (N == 1) | auto ref Element!dimension | back(size_t dimension = 0)() scope return @trusted @property | if (dimension < N) | { | assert(!empty!dimension); | return _iterator[backIndex]; | } | else | auto ref Element!dimension | back(size_t dimension = 0)() scope return @trusted @property | if (dimension < N) | { | assert(!empty!dimension); | auto structure_ = typeof(return)._Structure.init; | | foreach (i; Iota!(typeof(return).N)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[0][i] = _lengths[j]; | } | | static if (!typeof(return).S || typeof(return).S + 1 == S) | alias s =_strides; | else | auto s = strides; | | foreach (i; Iota!(typeof(return).S)) | { | enum j = i >= dimension ? i + 1 : i; | structure_[1][i] = s[j]; | } | | return typeof(return)(structure_, _iterator + backIndex!dimension); | } | | static if (N == 1 && isMutable!DeepElement && !hasAccessByRef) | { | ///ditto | auto ref back(size_t dimension = 0, T)(T value) scope return @trusted @property | if (dimension == 0) | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | assert(!empty!dimension); | return _iterator[backIndex] = value; | } | } | | ///ditto | void popFront(size_t dimension = 0)() @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { 0000000| assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0."); 0000000| _lengths[dimension]--; | static if ((kind == Contiguous || kind == Canonical) && dimension + 1 == N) 0000000| ++_iterator; | else | static if (kind == Canonical || kind == Universal) | _iterator += _strides[dimension]; | else | _iterator += _stride!dimension; | } | | ///ditto | void popBack(size_t dimension = 0)() @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(_lengths[dimension], __FUNCTION__ ~ ": length!" ~ dimension.stringof ~ " should be greater than 0."); | --_lengths[dimension]; | } | | ///ditto | void popFrontExactly(size_t dimension = 0)(size_t n) @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(n <= _lengths[dimension], | __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof); | _lengths[dimension] -= n; | _iterator += _stride!dimension * n; | } | | ///ditto | void popBackExactly(size_t dimension = 0)(size_t n) @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | assert(n <= _lengths[dimension], | __FUNCTION__ ~ ": n should be less than or equal to length!" ~ dimension.stringof); | _lengths[dimension] -= n; | } | | ///ditto | void popFrontN(size_t dimension = 0)(size_t n) @trusted scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | popFrontExactly!dimension(min(n, _lengths[dimension])); | } | | ///ditto | void popBackN(size_t dimension = 0)(size_t n) @safe scope | if (dimension < N && (dimension == 0 || kind != Contiguous)) | { | popBackExactly!dimension(min(n, _lengths[dimension])); | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import std.range.primitives; | import mir.ndslice.topology : iota, canonical; | auto slice = iota(10, 20, 30).canonical; | | static assert(isRandomAccessRange!(typeof(slice))); | static assert(hasSlicing!(typeof(slice))); | static assert(hasLength!(typeof(slice))); | | assert(slice.shape == cast(size_t[3])[10, 20, 30]); | slice.popFront; | slice.popFront!1; | slice.popBackExactly!2(4); | assert(slice.shape == cast(size_t[3])[9, 19, 26]); | | auto matrix = slice.front!1; | assert(matrix.shape == cast(size_t[2])[9, 26]); | | auto column = matrix.back!1; | assert(column.shape == cast(size_t[1])[9]); | | slice.popFrontExactly!1(slice.length!1); | assert(slice.empty == false); | assert(slice.empty!1 == true); | assert(slice.empty!2 == false); | assert(slice.shape == cast(size_t[3])[9, 0, 26]); | | assert(slice.back.front!1.empty); | | slice.popFrontN!0(40); | slice.popFrontN!2(40); | assert(slice.shape == cast(size_t[3])[0, 0, 0]); | } | | package(mir) ptrdiff_t lastIndex()() @safe @property scope const | { | static if (kind == Contiguous) | { | return elementCount - 1; | } | else | { | auto strides = strides; | ptrdiff_t shift = 0; | foreach(i; Iota!N) | shift += strides[i] * (_lengths[i] - 1); | return shift; | } | } | | static if (N > 1) | { | /// Accesses the first deep element of the slice. | auto ref first()() scope return @trusted @property | { | assert(!anyEmpty); | return *_iterator; | } | | static if (isMutable!DeepElement && !hasAccessByRef) | ///ditto | auto ref first(T)(T value) scope return @trusted @property | { | assert(!anyEmpty); | static if (__traits(compiles, *_iterator = value)) | return *_iterator = value; | else | return _iterator[0] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota, universal, canonical; | auto f = 5; | assert([2, 3].iota(f).first == f); | } | | /// Accesses the last deep element of the slice. | auto ref last()() @trusted scope return @property | { | assert(!anyEmpty); | return _iterator[lastIndex]; | } | | static if (isMutable!DeepElement && !hasAccessByRef) | ///ditto | auto ref last(T)(T value) @trusted scope return @property | { | assert(!anyEmpty); | return _iterator[lastIndex] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota; | auto f = 5; | assert([2, 3].iota(f).last == f + 2 * 3 - 1); | } | } | else | { | alias first = front; | alias last = back; | } | | /+ | Returns: `true` if for any dimension of completely unpacked slice the length equals to `0`, and `false` otherwise. | +/ | private bool anyRUEmpty()() @trusted @property scope const | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | import mir.ndslice.topology: unpack; | return this.lightScope.unpack.anyRUEmpty; | } | else | return _lengths[0 .. N].anyEmptyShape; | } | | | /++ | Returns: `true` if for any dimension the length equals to `0`, and `false` otherwise. | +/ | bool anyEmpty()() @trusted @property scope const | { 0000000| return _lengths[0 .. N].anyEmptyShape; | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology : iota, canonical; | auto s = iota(2, 3).canonical; | assert(!s.anyEmpty); | s.popFrontExactly!1(3); | assert(s.anyEmpty); | } | | /++ | Convenience function for backward indexing. | | Returns: `this[$-index[0], $-index[1], ..., $-index[N-1]]` | +/ | auto ref backward()(size_t[N] index) scope return | { | foreach (i; Iota!N) | index[i] = _lengths[i] - index[i]; | return this[index]; | } | | /// ditto | auto ref backward()(size_t[N] index) scope return const | { | return this.lightConst.backward(index); | } | | /// ditto | auto ref backward()(size_t[N] index) scope return const | { | return this.lightConst.backward(index); | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto s = iota(2, 3); | assert(s[$ - 1, $ - 2] == s.backward([1, 2])); | } | | /++ | Returns: Total number of elements in a slice | +/ | size_t elementCount()() @safe @property scope const | { 0000000| size_t len = 1; | foreach (i; Iota!N) 0000000| len *= _lengths[i]; 0000000| return len; | } | | deprecated("use elementCount instead") | alias elementsCount = elementCount; | | static if (doUnittest) | /// Regular slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | assert(iota(3, 4, 5).elementCount == 60); | } | | | static if (doUnittest) | /// Packed slice | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : pack, evertPack, iota; | auto slice = iota(3, 4, 5, 6, 7, 8); | auto p = slice.pack!2; | assert(p.elementCount == 360); | assert(p[0, 0, 0, 0].elementCount == 56); | assert(p.evertPack.elementCount == 56); | } | | /++ | Slice selected dimension. | Params: | begin = initial index of the sub-slice (inclusive) | end = final index of the sub-slice (noninclusive) | Returns: ndslice with `length!dimension` equal to `end - begin`. | +/ | auto select(size_t dimension)(size_t begin, size_t end) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | auto len = end - begin; | assert(len <= ret._lengths[dimension]); | ret._lengths[dimension] = len; | ret._iterator += ret._stride!dimension * begin; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.select!1(1, 3) == sl[0 .. $, 1 .. 3]); | } | | /++ | Select the first n elements for the dimension. | Params: | dimension = Dimension to slice. | n = count of elements for the dimension | Returns: ndslice with `length!dimension` equal to `n`. | +/ | auto selectFront(size_t dimension)(size_t n) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | assert(n <= ret._lengths[dimension]); | ret._lengths[dimension] = n; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.selectFront!1(2) == sl[0 .. $, 0 .. 2]); | } | | /++ | Select the last n elements for the dimension. | Params: | dimension = Dimension to slice. | n = count of elements for the dimension | Returns: ndslice with `length!dimension` equal to `n`. | +/ | auto selectBack(size_t dimension)(size_t n) scope return | { | static if (kind == Contiguous && dimension) | { | import mir.ndslice.topology: canonical; | auto ret = this.canonical; | } | else | { | auto ret = this; | } | assert(n <= ret._lengths[dimension]); | ret._iterator += ret._stride!dimension * (ret._lengths[dimension] - n); | ret._lengths[dimension] = n; | return ret; | } | | static if (doUnittest) | /// | @safe @nogc pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : iota; | auto sl = iota(3, 4); | assert(sl.selectBack!1(2) == sl[0 .. $, $ - 2 .. $]); | } | | /++ | Overloading `==` and `!=` | +/ | bool opEquals(scope const ref typeof(this) rslice) @trusted scope const | { | static if (!hasReference!(typeof(this))) | { 0000000| if (this._lengths != rslice._lengths) 0000000| return false; 0000000| if (this._iterator == rslice._iterator) 0000000| return true; | } | | import mir.algorithm.iteration : equal; | static if (__traits(compiles, this.lightScope)) | { 0000000| auto slice1 = this.lightScope; 0000000| auto slice2 = rslice.lightScope; | foreach(i; Iota!(min(slice1.L, slice2.L))) | if(slice1.label!i != slice2.label!i) | return false; 0000000| return equal(slice1.values, slice2.values); | } | else | return equal(*cast(This*)&this, *cast(This*)&rslice); | } | | ///ditto | bool opEquals(IteratorR, SliceKind rkind)(auto ref const Slice!(IteratorR, N, rkind) rslice) @trusted scope const | { | static if ( | !hasReference!(typeof(this)) | && !hasReference!(typeof(rslice)) | && __traits(compiles, this._iterator == rslice._iterator) | ) | { | if (this._lengths != rslice._lengths) | return false; | if (this._iterator == rslice._iterator) | return true; | } | import mir.algorithm.iteration : equal; | return equal(this.lightScope, rslice.lightScope); | } | | /// ditto | bool opEquals(T)(scope const(T)[] arr) @trusted scope const | { | auto slice = this.lightConst; | if (slice.length != arr.length) | return false; | if (arr.length) do | { | if (slice.front != arr[0]) | return false; | slice.popFront; | arr = arr[1 .. $]; | } | while (arr.length); | return true; | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | auto a = [1, 2, 3, 4].sliced(2, 2); | | assert(a != [1, 2, 3, 4, 5, 6].sliced(2, 3)); | assert(a != [[1, 2, 3], [4, 5, 6]]); | | assert(a == [1, 2, 3, 4].sliced(2, 2)); | assert(a == [[1, 2], [3, 4]]); | | assert(a != [9, 2, 3, 4].sliced(2, 2)); | assert(a != [[9, 2], [3, 4]]); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation: slice; | import mir.ndslice.topology : iota; | assert(iota(2, 3).slice[0 .. $ - 2] == iota([4, 3], 2)[0 .. $ - 4]); | } | | /++ | `Slice!(IotaIterator!size_t)` is the basic type for `[a .. b]` syntax for all ndslice based code. | +/ | Slice!(IotaIterator!size_t) opSlice(size_t dimension)(size_t i, size_t j) @safe scope const | if (dimension < N) | in | { | assert(i <= j, | "Slice.opSlice!" ~ dimension.stringof ~ ": the left opSlice boundary must be less than or equal to the right bound."); | enum errorMsg = ": right opSlice boundary must be less than or equal to the length of the given dimension."; | assert(j <= _lengths[dimension], | "Slice.opSlice!" ~ dimension.stringof ~ errorMsg); | } | do | { | return typeof(return)(j - i, typeof(return).Iterator(i)); | } | | /++ | $(BOLD Fully defined index) | +/ | auto ref opIndex()(size_t[N] _indexes...) scope return @trusted | { | return _iterator[indexStride(_indexes)]; | } | | /// ditto | auto ref opIndex()(size_t[N] _indexes...) scope return const @trusted | { | static if (is(typeof(_iterator[indexStride(_indexes)]))) | return _iterator[indexStride(_indexes)]; | else | return .lightConst(.lightScope(_iterator))[indexStride(_indexes)]; | } | | /// ditto | auto ref opIndex()(size_t[N] _indexes...) scope return immutable @trusted | { | static if (is(typeof(_iterator[indexStride(_indexes)]))) | return _iterator[indexStride(_indexes)]; | else | return .lightImmutable(.lightScope(_iterator))[indexStride(_indexes)]; | } | | /++ | $(BOLD Partially defined index) | +/ | auto opIndex(size_t I)(size_t[I] _indexes...) scope return @trusted | if (I && I < N) | { | enum size_t diff = N - I; | alias Ret = Slice!(Iterator, diff, diff == 1 && kind == Canonical ? Contiguous : kind); | static if (I < S) | return Ret(_lengths[I .. N], _strides[I .. S], _iterator + indexStride(_indexes)); | else | return Ret(_lengths[I .. N], _iterator + indexStride(_indexes)); | } | | /// ditto | auto opIndex(size_t I)(size_t[I] _indexes...) scope return const | if (I && I < N) | { | return this.lightConst.opIndex(_indexes); | } | | /// ditto | auto opIndex(size_t I)(size_t[I] _indexes...) scope return immutable | if (I && I < N) | { | return this.lightImmutable.opIndex(_indexes); | } | | /++ | $(BOLD Partially or fully defined slice.) | +/ | auto opIndex(Slices...)(Slices slices) scope return @trusted | if (isPureSlice!Slices) | { | static if (Slices.length) | { | enum size_t j(size_t n) = n - Filter!(isIndex, Slices[0 .. n]).length; | enum size_t F = PureIndexLength!Slices; | enum size_t S = Slices.length; | static assert(N - F > 0); | size_t stride; | static if (Slices.length == 1) | enum K = kind; | else | static if (kind == Universal || Slices.length == N && isIndex!(Slices[$-1])) | enum K = Universal; | else | static if (Filter!(isIndex, Slices[0 .. $-1]).length == Slices.length - 1 || N - F == 1) | enum K = Contiguous; | else | enum K = Canonical; | alias Ret = Slice!(Iterator, N - F, K); | auto structure_ = Ret._Structure.init; | | enum bool shrink = kind == Canonical && slices.length == N; | static if (shrink) | { | { | enum i = Slices.length - 1; | auto slice = slices[i]; | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += slice; | } | else | { | stride += slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | } | } | } | static if (kind == Universal || kind == Canonical) | { | foreach_reverse (i, slice; slices[0 .. $ - shrink]) //static | { | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += _strides[i] * slice; | } | else | { | stride += _strides[i] * slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | structure_[1][j!i] = _strides[i]; | } | } | } | else | { | ptrdiff_t ball = this._stride!(slices.length - 1); | foreach_reverse (i, slice; slices) //static | { | static if (isIndex!(Slices[i])) | { | assert(slice < _lengths[i], "Slice.opIndex: index must be less than length"); | stride += ball * slice; | } | else | { | stride += ball * slice._iterator._index; | structure_[0][j!i] = slice._lengths[0]; | static if (j!i < Ret.S) | structure_[1][j!i] = ball; | } | static if (i) | ball *= _lengths[i]; | } | } | foreach (i; Iota!(Slices.length, N)) | structure_[0][i - F] = _lengths[i]; | foreach (i; Iota!(Slices.length, N)) | static if (Ret.S > i - F) | structure_[1][i - F] = _strides[i]; | | return Ret(structure_, _iterator + stride); | } | else | { | return this; | } | } | | static if (doUnittest) | /// | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto slice = slice!int(5, 3); | | /// Fully defined slice | assert(slice[] == slice); | auto sublice = slice[0..$-2, 1..$]; | | /// Partially defined slice | auto row = slice[3]; | auto col = slice[0..$, 1]; | } | | /++ | $(BOLD Indexed slice.) | +/ | auto opIndex(Slices...)(scope return Slices slices) scope return | if (isIndexedSlice!Slices) | { | import mir.ndslice.topology: indexed, cartesian; | static if (Slices.length == 1) | alias index = slices[0]; | else | auto index = slices.cartesian; | return this.indexed(index); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation: slice; | auto sli = slice!int(4, 3); | auto idx = slice!(size_t[2])(3); | idx[] = [ | cast(size_t[2])[0, 2], | cast(size_t[2])[3, 1], | cast(size_t[2])[2, 0]]; | | // equivalent to: | // import mir.ndslice.topology: indexed; | // sli.indexed(indx)[] = 1; | sli[idx] = 1; | | assert(sli == [ | [0, 0, 1], | [0, 0, 0], | [1, 0, 0], | [0, 1, 0], | ]); | | foreach (row; sli[[1, 3].sliced]) | row[] += 2; | | assert(sli == [ | [0, 0, 1], | [2, 2, 2], // <-- += 2 | [1, 0, 0], | [2, 3, 2], // <-- += 2 | ]); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology: iota; | import mir.ndslice.allocation: slice; | auto sli = slice!int(5, 6); | | // equivalent to | // import mir.ndslice.topology: indexed, cartesian; | // auto a = [0, sli.length!0 / 2, sli.length!0 - 1].sliced; | // auto b = [0, sli.length!1 / 2, sli.length!1 - 1].sliced; | // auto c = cartesian(a, b); | // auto minor = sli.indexed(c); | auto minor = sli[[0, $ / 2, $ - 1].sliced, [0, $ / 2, $ - 1].sliced]; | | minor[] = iota!int([3, 3], 1); | | assert(sli == [ | // ↓ ↓ ↓︎ | [1, 0, 0, 2, 0, 3], // <--- | [0, 0, 0, 0, 0, 0], | [4, 0, 0, 5, 0, 6], // <--- | [0, 0, 0, 0, 0, 0], | [7, 0, 0, 8, 0, 9], // <--- | ]); | } | | /++ | Element-wise binary operator overloading. | Returns: | lazy slice of the same kind and the same structure | Note: | Does not allocate neither new slice nor a closure. | +/ | auto opUnary(string op)() scope return | if (op == "*" || op == "~" || op == "-" || op == "+") | { | import mir.ndslice.topology: map; | static if (op == "+") | return this; | else | return this.map!(op ~ "a"); | } | | static if (doUnittest) | /// | version(mir_test) unittest | { | import mir.ndslice.topology; | | auto payload = [1, 2, 3, 4]; | auto s = iota([payload.length], payload.ptr); // slice of references; | assert(s[1] == payload.ptr + 1); | | auto c = *s; // the same as s.map!"*a" | assert(c[1] == *s[1]); | | *s[1] = 3; | assert(c[1] == *s[1]); | } | | /++ | Element-wise operator overloading for scalars. | Params: | value = a scalar | Returns: | lazy slice of the same kind and the same structure | Note: | Does not allocate neither new slice nor a closure. | +/ | auto opBinary(string op, T)(scope return T value) scope return | if(!isSlice!T) | { | import mir.ndslice.topology: vmap; | return this.vmap(LeftOp!(op, ImplicitlyUnqual!T)(value)); | } | | /// ditto | auto opBinaryRight(string op, T)(scope return T value) scope return | if(!isSlice!T) | { | import mir.ndslice.topology: vmap; | return this.vmap(RightOp!(op, ImplicitlyUnqual!T)(value)); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology; | | // 0 1 2 3 | auto s = iota([4]); | // 0 1 2 0 | assert(s % 3 == iota([4]).map!"a % 3"); | // 0 2 4 6 | assert(2 * s == iota([4], 0, 2)); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology; | | // 0 1 2 3 | auto s = iota([4]); | // 0 1 4 9 | assert(s ^^ 2.0 == iota([4]).map!"a ^^ 2.0"); | } | | /++ | Element-wise operator overloading for slices. | Params: | rhs = a slice of the same shape. | Returns: | lazy slice the same shape that has $(LREF Contiguous) kind | Note: | Binary operator overloading is allowed if both slices are contiguous or one-dimensional. | $(BR) | Does not allocate neither new slice nor a closure. | +/ | auto opBinary(string op, RIterator, size_t RN, SliceKind rkind) | (scope return Slice!(RIterator, RN, rkind) rhs) scope return | if(N == RN && (kind == Contiguous && rkind == Contiguous || N == 1) && op != "~") | { | import mir.ndslice.topology: zip, map; | return zip(this, rhs).map!("a " ~ op ~ " b"); | } | | static if (doUnittest) | /// | @safe pure nothrow @nogc version(mir_test) unittest | { | import mir.ndslice.topology: iota, map, zip; | | auto s = iota([2, 3]); | auto c = iota([2, 3], 5, 8); | assert(s * s + c == s.map!"a * a".zip(c).map!"a + b"); | } | | /++ | Duplicates slice. | Returns: GC-allocated Contiguous mutable slice. | See_also: $(LREF Slice.idup) | +/ | Slice!(Unqual!DeepElement*, N) | dup()() scope @property | { | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return this.flattened.array.dup.sliced(this.shape); | } | else | { | import mir.ndslice.allocation: uninitSlice; | import mir.conv: emplaceRef; | alias E = this.DeepElement; | | auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!(Unqual!E))(result, this); | | return result; | } | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | dup()() scope const @property | { | this.lightScope.dup; | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | dup()() scope immutable @property | { | this.lightScope.dup; | } | | static if (doUnittest) | /// | @safe pure version(mir_test) unittest | { | import mir.ndslice; | auto x = 3.iota!int; | Slice!(immutable(int)*) imm = x.idup; | Slice!(int*) mut = imm.dup; | assert(imm == x); | assert(mut == x); | } | | /++ | Duplicates slice. | Returns: GC-allocated Contiguous immutable slice. | See_also: $(LREF Slice.dup) | +/ | Slice!(immutable(DeepElement)*, N) | idup()() scope @property | { | if (__ctfe) | { | import mir.ndslice.topology: flattened; | import mir.array.allocation: array; | return this.flattened.array.idup.sliced(this.shape); | } | else | { | import mir.ndslice.allocation: uninitSlice; | import mir.conv: emplaceRef; | alias E = this.DeepElement; | | auto result = (() @trusted => this.shape.uninitSlice!(Unqual!E))(); | | import mir.algorithm.iteration: each; | each!(emplaceRef!(immutable E))(result, this); | alias R = typeof(return); | return (() @trusted => cast(R) result)(); | } | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | idup()() scope const @property | { | this.lightScope.idup; | } | | /// ditto | Slice!(immutable(DeepElement)*, N) | idup()() scope immutable @property | { | this.lightScope.idup; | } | | static if (doUnittest) | /// | @safe pure version(mir_test) unittest | { | import mir.ndslice; | auto x = 3.iota!int; | Slice!(int*) mut = x.dup; | Slice!(immutable(int)*) imm = mut.idup; | assert(imm == x); | assert(mut == x); | } | | static if (isMutable!DeepElement) | { | private void opIndexOpAssignImplSlice(string op, RIterator, size_t RN, SliceKind rkind) | (Slice!(RIterator, RN, rkind) value) scope | { | static if (N > 1 && RN == N && kind == Contiguous && rkind == Contiguous) | { | import mir.ndslice.topology : flattened; | this.flattened.opIndexOpAssignImplSlice!op(value.flattened); | } | else | { | auto ls = this; | do | { | static if (N > RN) | { | ls.front.opIndexOpAssignImplSlice!op(value); | } | else | { | static if (ls.N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | static if (isSlice!(typeof(value.front))) | ls.front.opIndexOpAssignImplSlice!op(value.front); | else | static if (isDynamicArray!(typeof(value.front))) | ls.front.opIndexOpAssignImplSlice!op(value.front); | else | ls.front.opIndexOpAssignImplValue!op(value.front); | } | else | static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value.front))) | { | import mir.math.common: pow; | ls.front = pow(ls.front, value.front); | } | else | mixin("ls.front " ~ op ~ "= value.front;"); | } | else | static if (RN == 1) | ls.front.opIndexOpAssignImplValue!op(value.front); | else | ls.front.opIndexOpAssignImplSlice!op(value.front); | value.popFront; | } | ls.popFront; | } | while (ls._lengths[0]); | } | } | | /++ | Assignment of a value of `Slice` type to a $(B fully defined slice). | +/ | void opIndexAssign(RIterator, size_t RN, SliceKind rkind, Slices...) | (Slice!(RIterator, RN, rkind) value, Slices slices) scope return | if (isFullPureSlice!Slices || isIndexedSlice!Slices) | { | auto sl = this.lightScope.opIndex(slices); | assert(_checkAssignLengths(sl, value)); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplSlice!""(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] = b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | // fills both rows with b[0] | a[0..$, 0..$-1] = b[0]; | assert(a == [[1, 2, 0], [1, 2, 0]]); | | a[1, 0..$-1] = b[1]; | assert(a[1] == [3, 4, 0]); | | a[1, 0..$-1][] = b[0]; | assert(a[1] == [1, 2, 0]); | } | | static if (doUnittest) | /// Left slice is packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : blocks, iota; | import mir.ndslice.allocation : slice; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = iota!int(2, 2); | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | static if (doUnittest) | /// Both slices are packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.topology : blocks, iota, pack; | import mir.ndslice.allocation : slice; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = iota!int(2, 2, 2).pack!1; | | assert(a == | [[0, 1, 2, 3], | [0, 1, 2, 3], | [4, 5, 6, 7], | [4, 5, 6, 7]]); | } | | void opIndexOpAssignImplArray(string op, T, Slices...)(T[] value) scope | { | auto ls = this; | assert(ls.length == value.length, __FUNCTION__ ~ ": argument must have the same length."); | static if (N == 1) | { | do | { | static if (ls.N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | { | static if (isSlice!(typeof(value[0]))) | ls.front.opIndexOpAssignImplSlice!op(value[0]); | else | static if (isDynamicArray!(typeof(value[0]))) | ls.front.opIndexOpAssignImplSlice!op(value[0]); | else | ls.front.opIndexOpAssignImplValue!op(value[0]); | } | else | static if (op == "^^" && isFloatingPoint!(typeof(ls.front)) && isFloatingPoint!(typeof(value[0]))) | { | import mir.math.common: pow; | ls.front = pow(ls.front, value[0]); | } | else | mixin("ls.front " ~ op ~ "= value[0];"); | } | else | mixin("ls.front[] " ~ op ~ "= value[0];"); | value = value[1 .. $]; | ls.popFront; | } | while (ls.length); | } | else | static if (N == DynamicArrayDimensionsCount!(T[])) | { | do | { | ls.front.opIndexOpAssignImplArray!op(value[0]); | value = value[1 .. $]; | ls.popFront; | } | while (ls.length); | } | else | { | do | { | ls.front.opIndexOpAssignImplArray!op(value); | ls.popFront; | } | while (ls.length); | } | } | | /++ | Assignment of a regular multidimensional array to a $(B fully defined slice). | +/ | void opIndexAssign(T, Slices...)(T[] value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && (!isDynamicArray!DeepElement || isDynamicArray!T) | && DynamicArrayDimensionsCount!(T[]) - DynamicArrayDimensionsCount!DeepElement <= typeof(this.opIndex(slices)).N) | { | auto sl = this.lightScope.opIndex(slices); | sl.opIndexOpAssignImplArray!""(value); | } | | static if (doUnittest) | /// | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [[1, 2], [3, 4]]; | | a[] = [[1, 2, 3], [4, 5, 6]]; | assert(a == [[1, 2, 3], [4, 5, 6]]); | | a[0..$, 0..$-1] = [[1, 2], [3, 4]]; | assert(a == [[1, 2, 3], [3, 4, 6]]); | | a[0..$, 0..$-1] = [1, 2]; | assert(a == [[1, 2, 3], [1, 2, 6]]); | | a[1, 0..$-1] = [3, 4]; | assert(a[1] == [3, 4, 6]); | | a[1, 0..$-1][] = [3, 4]; | assert(a[1] == [3, 4, 6]); | } | | static if (doUnittest) | /// Packed slices | pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] = [[0, 1], [2, 3]]; | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | | private void opIndexOpAssignImplConcatenation(string op, T)(T value) scope | { | auto sl = this; | static if (concatenationDimension!T) | { | if (!sl.empty) do | { | static if (op == "") | sl.front.opIndexAssign(value.front); | else | sl.front.opIndexOpAssign!op(value.front); | value.popFront; | sl.popFront; | } | while(!sl.empty); | } | else | { | foreach (ref slice; value._slices) | { | static if (op == "") | sl[0 .. slice.length].opIndexAssign(slice); | else | sl[0 .. slice.length].opIndexOpAssign!op(slice); | | sl = sl[slice.length .. $]; | } | assert(sl.empty); | } | } | | /// | void opIndexAssign(T, Slices...)(T concatenation, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | static assert(typeof(sl).N == T.N, "incompatible dimension count"); | sl.opIndexOpAssignImplConcatenation!""(concatenation); | } | | /++ | Assignment of a value (e.g. a number) to a $(B fully defined slice). | +/ | void opIndexAssign(T, Slices...)(T value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && (!isDynamicArray!T || isDynamicArray!DeepElement) | && DynamicArrayDimensionsCount!T == DynamicArrayDimensionsCount!DeepElement | && !isSlice!T | && !isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplValue!""(value); | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[] = 9; | assert(a == [[9, 9, 9], [9, 9, 9]]); | | a[0..$, 0..$-1] = 1; | assert(a == [[1, 1, 9], [1, 1, 9]]); | | a[0..$, 0..$-1] = 2; | assert(a == [[2, 2, 9], [2, 2, 9]]); | | a[1, 0..$-1] = 3; | //assert(a[1] == [3, 3, 9]); | | a[1, 0..$-1] = 4; | //assert(a[1] == [4, 4, 9]); | | a[1, 0..$-1][] = 5; | | assert(a[1] == [5, 5, 9]); | } | | static if (doUnittest) | /// Packed slices have the same behavior. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | import mir.ndslice.topology : pack; | auto a = slice!int(2, 3).pack!1; | | a[] = 9; | //assert(a == [[9, 9, 9], [9, 9, 9]]); | } | | /++ | Assignment of a value (e.g. a number) to a $(B fully defined index). | +/ | auto ref opIndexAssign(T)(T value, size_t[N] _indexes...) scope return @trusted | { | // check assign safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return t = v; | } | return _iterator[indexStride(_indexes)] = value; | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[1, 2] = 3; | assert(a[1, 2] == 3); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | a[[1, 2]] = 3; | assert(a[[1, 2]] == 3); | } | | /++ | Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined index). | +/ | auto ref opIndexOpAssign(string op, T)(T value, size_t[N] _indexes...) scope return @trusted | { | // check op safety | static auto ref fun(ref DeepElement t, ref T v) @safe | { | return mixin(`t` ~ op ~ `= v`); | } | auto str = indexStride(_indexes); | static if (op == "^^" && isFloatingPoint!DeepElement && isFloatingPoint!(typeof(value))) | { | import mir.math.common: pow; | _iterator[str] = pow(_iterator[str], value); | } | else | return mixin (`_iterator[str] ` ~ op ~ `= value`); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[1, 2] += 3; | assert(a[1, 2] == 3); | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | a[[1, 2]] += 3; | assert(a[[1, 2]] == 3); | } | | /++ | Op Assignment `op=` of a value of `Slice` type to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, RIterator, SliceKind rkind, size_t RN, Slices...) | (Slice!(RIterator, RN, rkind) value, Slices slices) scope return | if (isFullPureSlice!Slices || isIndexedSlice!Slices) | { | auto sl = this.lightScope.opIndex(slices); | assert(_checkAssignLengths(sl, value)); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplSlice!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] += b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += b[0]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += b[1]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += b[0]; | assert(a[1] == [8, 12, 0]); | } | | static if (doUnittest) | /// Left slice is packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks, iota; | auto a = slice!size_t(4, 4); | a.blocks(2, 2)[] += iota(2, 2); | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | static if (doUnittest) | /// Both slices are packed | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks, iota, pack; | auto a = slice!size_t(4, 4); | a.blocks(2, 2)[] += iota(2, 2, 2).pack!1; | | assert(a == | [[0, 1, 2, 3], | [0, 1, 2, 3], | [4, 5, 6, 7], | [4, 5, 6, 7]]); | } | | /++ | Op Assignment `op=` of a regular multidimensional array to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, T, Slices...)(T[] value, Slices slices) scope return | if (isFullPureSlice!Slices | && (!isDynamicArray!DeepElement || isDynamicArray!T) | && DynamicArrayDimensionsCount!(T[]) - DynamicArrayDimensionsCount!DeepElement <= typeof(this.opIndex(slices)).N) | { | auto sl = this.lightScope.opIndex(slices); | sl.opIndexOpAssignImplArray!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | auto a = slice!int(2, 3); | | a[0..$, 0..$-1] += [[1, 2], [3, 4]]; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += [1, 2]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += [3, 4]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += [1, 2]; | assert(a[1] == [8, 12, 0]); | } | | static if (doUnittest) | /// Packed slices | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation : slice; | import mir.ndslice.topology : blocks; | auto a = slice!int(4, 4); | a.blocks(2, 2)[] += [[0, 1], [2, 3]]; | | assert(a == | [[0, 0, 1, 1], | [0, 0, 1, 1], | [2, 2, 3, 3], | [2, 2, 3, 3]]); | } | | private void opIndexOpAssignImplValue(string op, T)(T value) scope return | { | static if (N > 1 && kind == Contiguous) | { | import mir.ndslice.topology : flattened; | this.flattened.opIndexOpAssignImplValue!op(value); | } | else | { | auto ls = this; | do | { | static if (N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | ls.front.opIndexOpAssignImplValue!op(value); | else | mixin (`ls.front ` ~ op ~ `= value;`); | } | else | ls.front.opIndexOpAssignImplValue!op(value); | ls.popFront; | } | while(ls._lengths[0]); | } | } | | /++ | Op Assignment `op=` of a value (e.g. a number) to a $(B fully defined slice). | +/ | void opIndexOpAssign(string op, T, Slices...)(T value, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) | && (!isDynamicArray!T || isDynamicArray!DeepElement) | && DynamicArrayDimensionsCount!T == DynamicArrayDimensionsCount!DeepElement | && !isSlice!T | && !isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | if(!sl.anyRUEmpty) | sl.opIndexOpAssignImplValue!op(value); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | a[] += 1; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | a[0..$, 0..$-1] += 2; | assert(a == [[3, 3, 1], [3, 3, 1]]); | | a[1, 0..$-1] += 3; | assert(a[1] == [6, 6, 1]); | } | | /// | void opIndexOpAssign(string op,T, Slices...)(T concatenation, Slices slices) scope return | if ((isFullPureSlice!Slices || isIndexedSlice!Slices) && isConcatenation!T) | { | auto sl = this.lightScope.opIndex(slices); | static assert(typeof(sl).N == concatenation.N); | sl.opIndexOpAssignImplConcatenation!op(concatenation); | } | | static if (doUnittest) | /// Packed slices have the same behavior. | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | import mir.ndslice.topology : pack; | auto a = slice!int(2, 3).pack!1; | | a[] += 9; | assert(a == [[9, 9, 9], [9, 9, 9]]); | } | | | /++ | Increment `++` and Decrement `--` operators for a $(B fully defined index). | +/ | auto ref opIndexUnary(string op)(size_t[N] _indexes...) scope return | @trusted | // @@@workaround@@@ for Issue 16473 | //if (op == `++` || op == `--`) | { | // check op safety | static auto ref fun(DeepElement t) @safe | { | return mixin(op ~ `t`); | } | return mixin (op ~ `_iterator[indexStride(_indexes)]`); | } | | static if (doUnittest) | /// | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | ++a[1, 2]; | assert(a[1, 2] == 1); | } | | // Issue 16473 | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | import mir.ndslice.allocation; | auto sl = slice!double(2, 5); | auto d = -sl[0, 1]; | } | | static if (doUnittest) | @safe pure nothrow version(mir_test) unittest | { | auto a = new int[6].sliced(2, 3); | | ++a[[1, 2]]; | assert(a[[1, 2]] == 1); | } | | private void opIndexUnaryImpl(string op, Slices...)(Slices slices) scope | { | auto ls = this; | do | { | static if (N == 1) | { | static if (isInstanceOf!(SliceIterator, Iterator)) | ls.front.opIndexUnaryImpl!op; | else | mixin (op ~ `ls.front;`); | } | else | ls.front.opIndexUnaryImpl!op; | ls.popFront; | } | while(ls._lengths[0]); | } | | /++ | Increment `++` and Decrement `--` operators for a $(B fully defined slice). | +/ | void opIndexUnary(string op, Slices...)(Slices slices) scope return | if (isFullPureSlice!Slices && (op == `++` || op == `--`)) | { | auto sl = this.lightScope.opIndex(slices); | if (!sl.anyRUEmpty) | sl.opIndexUnaryImpl!op; | } | | static if (doUnittest) | /// | @safe pure nothrow | version(mir_test) unittest | { | import mir.ndslice.allocation; | auto a = slice!int(2, 3); | | ++a[]; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | --a[1, 0..$-1]; | | assert(a[1] == [0, 0, 1]); | } | } |} | |/// ditto |alias Slice = mir_slice; | |/++ |Slicing, indexing, and arithmetic operations. |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.dynamic : transposed; | import mir.ndslice.topology : iota, universal; | auto tensor = iota(3, 4, 5).slice; | | assert(tensor[1, 2] == tensor[1][2]); | assert(tensor[1, 2, 3] == tensor[1][2][3]); | | assert( tensor[0..$, 0..$, 4] == tensor.universal.transposed!2[4]); | assert(&tensor[0..$, 0..$, 4][1, 2] is &tensor[1, 2, 4]); | | tensor[1, 2, 3]++; //`opIndex` returns value by reference. | --tensor[1, 2, 3]; //`opUnary` | | ++tensor[]; | tensor[] -= 1; | | // `opIndexAssing` accepts only fully defined indexes and slices. | // Use an additional empty slice `[]`. | static assert(!__traits(compiles, tensor[0 .. 2] *= 2)); | | tensor[0 .. 2][] *= 2; //OK, empty slice | tensor[0 .. 2, 3, 0..$] /= 2; //OK, 3 index or slice positions are defined. | | //fully defined index may be replaced by a static array | size_t[3] index = [1, 2, 3]; | assert(tensor[index] == tensor[1, 2, 3]); |} | |/++ |Operations with rvalue slices. |+/ |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology: universal; | import mir.ndslice.dynamic: transposed, everted; | | auto tensor = slice!int(3, 4, 5).universal; | auto matrix = slice!int(3, 4).universal; | auto vector = slice!int(3); | | foreach (i; 0..3) | vector[i] = i; | | // fills matrix columns | matrix.transposed[] = vector; | | // fills tensor with vector | // transposed tensor shape is (4, 5, 3) | // vector shape is ( 3) | tensor.transposed!(1, 2)[] = vector; | | // transposed tensor shape is (5, 3, 4) | // matrix shape is ( 3, 4) | tensor.transposed!2[] += matrix; | | // transposed tensor shape is (5, 4, 3) | // transposed matrix shape is ( 4, 3) | tensor.everted[] ^= matrix.transposed; // XOR |} | |/++ |Creating a slice from text. |See also $(MREF std, format). |+/ |version(mir_test) unittest |{ | import mir.ndslice.allocation; | import std.algorithm, std.conv, std.exception, std.format, | std.functional, std.string, std.range; | | Slice!(int*, 2) toMatrix(string str) | { | string[][] data = str.lineSplitter.filter!(not!empty).map!split.array; | | size_t rows = data .length.enforce("empty input"); | size_t columns = data[0].length.enforce("empty first row"); | | data.each!(a => enforce(a.length == columns, "rows have different lengths")); | auto slice = slice!int(rows, columns); | foreach (i, line; data) | foreach (j, num; line) | slice[i, j] = num.to!int; | return slice; | } | | auto input = "\r1 2 3\r\n 4 5 6\n"; | | auto matrix = toMatrix(input); | assert(matrix == [[1, 2, 3], [4, 5, 6]]); | | // back to text | auto text2 = format("%(%(%s %)\n%)\n", matrix); | assert(text2 == "1 2 3\n4 5 6\n"); |} | |// Slicing |@safe @nogc pure nothrow version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | auto a = iota(10, 20, 30, 40); | auto b = a[0..$, 10, 4 .. 27, 4]; | auto c = b[2 .. 9, 5 .. 10]; | auto d = b[3..$, $-2]; | assert(b[4, 17] == a[4, 10, 21, 4]); | assert(c[1, 2] == a[3, 10, 11, 4]); | assert(d[3] == a[6, 10, 25, 4]); |} | |// Operator overloading. # 1 |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto fun(ref sizediff_t x) { x *= 3; } | | auto tensor = iota(8, 9, 10).slice; | | ++tensor[]; | fun(tensor[0, 0, 0]); | | assert(tensor[0, 0, 0] == 3); | | tensor[0, 0, 0] *= 4; | tensor[0, 0, 0]--; | assert(tensor[0, 0, 0] == 11); |} | |// Operator overloading. # 2 |pure nothrow version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import mir.array.allocation : array; | //import std.bigint; | import std.range : iota; | | auto matrix = 72 | .iota | //.map!(i => BigInt(i)) | .array | .sliced(8, 9); | | matrix[3 .. 6, 2] += 100; | foreach (i; 0 .. 8) | foreach (j; 0 .. 9) | if (i >= 3 && i < 6 && j == 2) | assert(matrix[i, j] >= 100); | else | assert(matrix[i, j] < 100); |} | |// Operator overloading. # 3 |pure nothrow version(mir_test) unittest |{ | import mir.ndslice.allocation; | import mir.ndslice.topology : iota; | | auto matrix = iota(8, 9).slice; | matrix[] = matrix; | matrix[] += matrix; | assert(matrix[2, 3] == (2 * 9 + 3) * 2); | | auto vec = iota([9], 100); | matrix[] = vec; | foreach (v; matrix) | assert(v == vec); | | matrix[] += vec; | foreach (vector; matrix) | foreach (elem; vector) | assert(elem >= 200); |} | |// Type deduction |version(mir_test) unittest |{ | // Arrays | foreach (T; AliasSeq!(int, const int, immutable int)) | static assert(is(typeof((T[]).init.sliced(3, 4)) == Slice!(T*, 2))); | | // Container Array | import std.container.array; | Array!int ar; | ar.length = 12; | auto arSl = ar[].slicedField(3, 4); |} | |// Test for map #1 |version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import std.range.primitives; | auto slice = [1, 2, 3, 4].sliced(2, 2); | | auto r = slice.map!(a => a.map!(a => a * 6)); | assert(r.front.front == 6); | assert(r.front.back == 12); | assert(r.back.front == 18); | assert(r.back.back == 24); | assert(r[0][0] == 6); | assert(r[0][1] == 12); | assert(r[1][0] == 18); | assert(r[1][1] == 24); | static assert(hasSlicing!(typeof(r))); | static assert(isForwardRange!(typeof(r))); | static assert(isRandomAccessRange!(typeof(r))); |} | |// Test for map #2 |version(mir_test) unittest |{ | import std.algorithm.iteration : map; | import std.range.primitives; | auto data = [1, 2, 3, 4] | //.map!(a => a * 2) | ; | static assert(hasSlicing!(typeof(data))); | static assert(isForwardRange!(typeof(data))); | static assert(isRandomAccessRange!(typeof(data))); | auto slice = data.sliced(2, 2); | static assert(hasSlicing!(typeof(slice))); | static assert(isForwardRange!(typeof(slice))); | static assert(isRandomAccessRange!(typeof(slice))); | auto r = slice.map!(a => a.map!(a => a * 6)); | static assert(hasSlicing!(typeof(r))); | static assert(isForwardRange!(typeof(r))); | static assert(isRandomAccessRange!(typeof(r))); | assert(r.front.front == 6); | assert(r.front.back == 12); | assert(r.back.front == 18); | assert(r.back.back == 24); | assert(r[0][0] == 6); | assert(r[0][1] == 12); | assert(r[1][0] == 18); | assert(r[1][1] == 24); |} | |private enum bool isType(alias T) = false; | |private enum bool isType(T) = true; | |private enum isStringValue(alias T) = is(typeof(T) : string); | | |private bool _checkAssignLengths( | LIterator, RIterator, | size_t LN, size_t RN, | SliceKind lkind, SliceKind rkind, | ) | (Slice!(LIterator, LN, lkind) ls, | Slice!(RIterator, RN, rkind) rs) |{ | static if (isInstanceOf!(SliceIterator, LIterator)) | { | import mir.ndslice.topology: unpack; | return _checkAssignLengths(ls.unpack, rs); | } | else | static if (isInstanceOf!(SliceIterator, RIterator)) | { | import mir.ndslice.topology: unpack; | return _checkAssignLengths(ls, rs.unpack); | } | else | { | foreach (i; Iota!(0, RN)) | if (ls._lengths[i + LN - RN] != rs._lengths[i]) | return false; | return true; | } |} | |@safe pure nothrow @nogc version(mir_test) unittest |{ | import mir.ndslice.topology : iota; | | assert(_checkAssignLengths(iota(2, 2), iota(2, 2))); | assert(!_checkAssignLengths(iota(2, 2), iota(2, 3))); | assert(!_checkAssignLengths(iota(2, 2), iota(3, 2))); | assert(!_checkAssignLengths(iota(2, 2), iota(3, 3))); |} | |pure nothrow version(mir_test) unittest |{ | auto slice = new int[15].slicedField(5, 3); | | /// Fully defined slice | assert(slice[] == slice); | auto sublice = slice[0..$-2, 1..$]; | | /// Partially defined slice | auto row = slice[3]; | auto col = slice[0..$, 1]; |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] = b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] = b[0]; | assert(a == [[1, 2, 0], [1, 2, 0]]); | | a[1, 0..$-1] = b[1]; | assert(a[1] == [3, 4, 0]); | | a[1, 0..$-1][] = b[0]; | assert(a[1] == [1, 2, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [[1, 2], [3, 4]]; | | a[] = [[1, 2, 3], [4, 5, 6]]; | assert(a == [[1, 2, 3], [4, 5, 6]]); | | a[0..$, 0..$-1] = [[1, 2], [3, 4]]; | assert(a == [[1, 2, 3], [3, 4, 6]]); | | a[0..$, 0..$-1] = [1, 2]; | assert(a == [[1, 2, 3], [1, 2, 6]]); | | a[1, 0..$-1] = [3, 4]; | assert(a[1] == [3, 4, 6]); | | a[1, 0..$-1][] = [3, 4]; | assert(a[1] == [3, 4, 6]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[] = 9; | //assert(a == [[9, 9, 9], [9, 9, 9]]); | | a[0..$, 0..$-1] = 1; | //assert(a == [[1, 1, 9], [1, 1, 9]]); | | a[0..$, 0..$-1] = 2; | //assert(a == [[2, 2, 9], [2, 2, 9]]); | | a[1, 0..$-1] = 3; | //assert(a[1] == [3, 3, 9]); | | a[1, 0..$-1] = 4; | //assert(a[1] == [4, 4, 9]); | | a[1, 0..$-1][] = 5; | //assert(a[1] == [5, 5, 9]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[1, 2] = 3; | assert(a[1, 2] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[[1, 2]] = 3; | assert(a[[1, 2]] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[1, 2] += 3; | assert(a[1, 2] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[[1, 2]] += 3; | assert(a[[1, 2]] == 3); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | auto b = [1, 2, 3, 4].sliced(2, 2); | | a[0..$, 0..$-1] += b; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += b[0]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += b[1]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += b[0]; | assert(a[1] == [8, 12, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[0..$, 0..$-1] += [[1, 2], [3, 4]]; | assert(a == [[1, 2, 0], [3, 4, 0]]); | | a[0..$, 0..$-1] += [1, 2]; | assert(a == [[2, 4, 0], [4, 6, 0]]); | | a[1, 0..$-1] += [3, 4]; | assert(a[1] == [7, 10, 0]); | | a[1, 0..$-1][] += [1, 2]; | assert(a[1] == [8, 12, 0]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | a[] += 1; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | a[0..$, 0..$-1] += 2; | assert(a == [[3, 3, 1], [3, 3, 1]]); | | a[1, 0..$-1] += 3; | assert(a[1] == [6, 6, 1]); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[1, 2]; | assert(a[1, 2] == 1); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[[1, 2]]; | assert(a[[1, 2]] == 1); |} | |pure nothrow version(mir_test) unittest |{ | auto a = new int[6].slicedField(2, 3); | | ++a[]; | assert(a == [[1, 1, 1], [1, 1, 1]]); | | --a[1, 0..$-1]; | assert(a[1] == [0, 0, 1]); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: iota, universal; | | auto sl = iota(3, 4).universal; | assert(sl[0 .. $] == sl); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: canonical, iota; | static assert(kindOf!(typeof(iota([1, 2]).canonical[1])) == Contiguous); |} | |version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | auto s = iota(2, 3); | assert(s.front!1 == [0, 3]); | assert(s.back!1 == [2, 5]); |} | |/++ |Assignment utility for generic code that works both with scalars and with ndslices. |Params: | op = assign operation (generic, optional) | lside = left side | rside = right side |Returns: | expression value |+/ |auto ndassign(string op = "", L, R)(ref L lside, auto ref R rside) @property | if (!isSlice!L && (op.length == 0 || op[$-1] != '=')) |{ | return mixin(`lside ` ~ op ~ `= rside`); |} | |/// ditto |auto ndassign(string op = "", L, R)(L lside, auto ref R rside) @property | if (isSlice!L && (op.length == 0 || op[$-1] != '=')) |{ | static if (op == "") | return lside.opIndexAssign(rside); | else | return lside.opIndexOpAssign!op(rside); |} | |/// |version(mir_test) unittest |{ | import mir.ndslice.topology: iota; | import mir.ndslice.allocation: slice; | auto scalar = 3; | auto vector = 3.iota.slice; // [0, 1, 2] | | // scalar = 5; | scalar.ndassign = 5; | assert(scalar == 5); | | // vector[] = vector * 2; | vector.ndassign = vector * 2; | assert(vector == [0, 2, 4]); | | // vector[] += scalar; | vector.ndassign!"+"= scalar; | assert(vector == [5, 7, 9]); |} | |version(mir_test) pure nothrow unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: universal; | | auto df = slice!(double, int, int)(2, 3).universal; | df.label[] = [1, 2]; | df.label!1[] = [1, 2, 3]; | auto lsdf = df.lightScope; | assert(lsdf.label!0[0] == 1); | assert(lsdf.label!1[1] == 2); | | auto immdf = (cast(immutable)df).lightImmutable; | assert(immdf.label!0[0] == 1); | assert(immdf.label!1[1] == 2); | | auto constdf = df.lightConst; | assert(constdf.label!0[0] == 1); | assert(constdf.label!1[1] == 2); | | auto constdf2 = df.toConst; | assert(constdf2.label!0[0] == 1); | assert(constdf2.label!1[1] == 2); | | auto immdf2 = (cast(immutable)df).toImmutable; | assert(immdf2.label!0[0] == 1); | assert(immdf2.label!1[1] == 2); |} | |version(mir_test) pure nothrow unittest |{ | import mir.ndslice.allocation: slice; | import mir.ndslice.topology: universal; | | auto df = slice!(double, int, int)(2, 3).universal; | df[] = 5; | | Slice!(double*, 2, Universal) values = df.values; | assert(values[0][0] == 5); | Slice!(LightConstOf!(double*), 2, Universal) constvalues = df.values; | assert(constvalues[0][0] == 5); | Slice!(LightImmutableOf!(double*), 2, Universal) immvalues = (cast(immutable)df).values; | assert(immvalues[0][0] == 5); |} | |version(mir_test) @safe unittest |{ | import mir.ndslice.allocation; | auto a = rcslice!double([2, 3], 0); | auto b = rcslice!double([2, 3], 0); | a[1, 2] = 3; | b[] = a; | assert(a == b); |} ../../../.dub/packages/mir-algorithm-3.7.25/mir-algorithm/source/mir/ndslice/slice.d is 0% covered <<<<<< EOF # path=source-mir-sparse-blas-gemv.lst |/++ |License: $(HTTP boost.org/LICENSE_1_0.txt, Boost License 1.0). |Copyright: Copyright © 2016-, Ilya Yaroshenko |Authors: Ilya Yaroshenko |+/ |module mir.sparse.blas.gemv; | | |import std.traits; |import mir.ndslice.slice; |import mir.ndslice.iterator; |import mir.internal.utility; |import mir.sparse; |import mir.series; | |/++ |General matrix-vector multiplication. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | x = dense vector | beta = scalar | y = dense vector |Returns: | `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise. |+/ |void gemv( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 1, kind2) x, | in CL beta, | Slice!(Iterator3, 1, kind3) y) |in |{ 6| assert(a.length == y.length); |} |body |{ 6| if (beta) | { 22| foreach (ref e; y) | { | import mir.sparse.blas.dot; 6| e = alpha * dot(a.front, x) + beta * e; 6| a.popFront; | } | } | else | { 44| foreach (ref e; y) | { | import mir.sparse.blas.dot; 12| e = alpha * dot(a.front, x); 12| a.popFront; | } | } |} | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto slice = sparse!double(3, 5); 1| slice[] = | [[ 0.0, 2.0, 3.0, 0.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |General matrix-vector multiplication with transposition. | |Params: | alpha = scalar | a = sparse matrix (CSR format) | x = dense vector | beta = scalar | y = dense vector |Returns: | `y = alpha * aᵀ × x + beta * y` if beta does not equal null and `y = alpha * aᵀ × x` otherwise. |+/ |void gemtv( | CR, | CL, | SliceKind kind1, T1, I1, J1, SliceKind kind2, Iterator2, SliceKind kind3, Iterator3) |( | in CR alpha, | Slice!(ChopIterator!(J1*, Series!(I1*, T1*)), 1, kind1) a, | Slice!(Iterator2, 1, kind2) x, | in CL beta, | Slice!(Iterator3, 1, kind3) y) |in |{ 5| assert(a.length == x.length); |} |body |{ | alias T3 = Unqual!(DeepElementType!(Slice!(Iterator3, 1, kind3))); | 5| if (beta == 0) | { 4| y[] = 0; | } 5| if (beta == 1) | { | } | else | { 5| y[] *= T3(beta); | } 85| foreach (ref t; x) | { | import mir.sparse.blas.axpy; 25| axpy(alpha * t, a.front, y); 25| a.popFront; | } |} | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto slice = sparse!double(5, 3); 1| slice[] = | [[0.0, 6.0, 6.0], | [2.0, 0.0, 0.0], | [3.0, 30.0, 30.0], | [0.0, 8.0, 8.0], | [0.0, 0.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemtv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |General matrix-vector multiplication for sparse vectors. | |Params: | alpha = scalar | a = dense matrix | x = sparse vector | beta = scalar | y = dense vector |Returns: | `y = alpha * a × x + beta * y` if beta does not equal null and `y = alpha * a × x` otherwise. |+/ |void gemv( | CR, | CL, | SliceKind kind1, Iterator1, | T2, I2, | SliceKind kind3, Iterator3, | ) |(in CR alpha, Slice!(Iterator1, 2, kind1) a, Series!(I2*, T2*) x, in CL beta, Slice!(Iterator3, 1, kind3) y) |in |{ | assert(a.length == y.length); |} |body |{ | if (beta) | { | foreach (ref e; y) | { | import mir.sparse.blas.dot; | e = alpha * dot(x, a.front) + beta * e; | a.popFront; | } | } | else | { | foreach (ref e; y) | { | import mir.sparse.blas.dot; | e = alpha * dot(x, a.front); | a.popFront; | } | } |} | |/// |unittest |{ | import mir.ndslice; | import mir.sparse; | 1| auto slice = sparse!double(3, 5); 1| slice[] = | [[ 0.0, 2.0, 3.0, 0.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0], | [ 6.0, 0.0, 30.0, 8.0, 0.0]]; 1| auto alpha = 3.0; 1| auto a = slice.compress; 1| auto x = [ 17.0, 19, 31, 3, 5].sliced; 1| auto beta = 2.0; 1| auto y = [1.0, 2, 3].sliced; 1| auto t = [131.0, 1056.0, 1056.0].sliced; 1| t[] *= alpha; | import mir.glas.l1: axpy; 1| axpy(beta, y, t); 1| gemv(alpha, a, x, beta, y); 1| assert(t == y); |} | |/++ |Selective general matrix-vector multiplication with a selector sparse vector. | |Params: | a = dense matrix | x = dense vector | y = sparse vector (compressed) |Returns: | `y[available indexes] = (alpha * a × x)[available indexes]`. |+/ |void selectiveGemv(string op = "", SliceKind kind1, SliceKind kind2, T, T3, I3) |(Slice!(T*, 2, kind1) a, Slice!(T*, 1, kind2) x, Series!(I3*, T3*) y) |in |{ 3| assert(a.length!1 == x.length); 3| if (y.index.length) 2| assert(y.index[$-1] < a.length); |} |body |{ | import mir.ndslice.dynamic: transposed; | 21| foreach (i, j; y.index.field) | { | import mir.glas.l1 : dot; 3| auto d = dot(a[j], x); | mixin(`y.value[i] ` ~ op ~ `= d;`); | } |} source/mir/sparse/blas/gemv.d is 100% covered <<<<<< EOF