@@ -97,6 +97,45 @@
Loading
97 97
98 98
@inline (f::TransposedStorage)(v,m,n,k) = f.store(v,n,m,k)
99 99
100 +
101 +
function allocatestorage(op::MWDoubleLayerTDIO, testST, basisST,
102 +
	::Type{Val{:bandedstorage}},
103 +
	::Type{LongDelays{:ignore}})
104 +
105 +
    # tfs = spatialbasis(testST)
106 +
    # bfs = spatialbasis(basisST)
107 +
	X, T = spatialbasis(testST), temporalbasis(testST)
108 +
	Y, U = spatialbasis(basisST), temporalbasis(basisST)
109 +
110 +
	if CompScienceMeshes.refines(geometry(Y), geometry(X))
111 +
		testST = Y⊗T
112 +
		basisST = X⊗U
113 +
	end
114 +
115 +
    M = numfunctions(X)
116 +
    N = numfunctions(Y)
117 +
118 +
    K0 = fill(typemax(Int), M, N)
119 +
    K1 = zeros(Int, M, N)
120 +
121 +
    function store(v,m,n,k)
122 +
        K0[m,n] = min(K0[m,n],k)
123 +
        K1[m,n] = max(K1[m,n],k)
124 +
    end
125 +
126 +
    aux = EmptyRP(op.speed_of_light)
127 +
    print("Allocating memory for convolution operator: ")
128 +
    assemble!(aux, testST, basisST, store)
129 +
    println("\nAllocated memory for convolution operator.")
130 +
131 +
	maxk1 = maximum(K1)
132 +
	bandwidth = maximum(K1 .- K0 .+ 1)
133 +
	data = zeros(eltype(op), bandwidth, M, N)
134 +
	Z = SparseND.Banded3D(K0, data, maxk1)
135 +
    store1(v,m,n,k) = (Z[m,n,k] += v)
136 +
    return Z, store1
137 +
end
138 +
100 139
function assemble!(dl::MWDoubleLayerTDIO, W::SpaceTimeBasis, V::SpaceTimeBasis, store)
101 140
102 141
	X, T = spatialbasis(W), temporalbasis(W)
@@ -108,8 +147,6 @@
Loading
108 147
		op = MWDoubleLayerTransposedTDIO(dl.speed_of_light, dl.weight, dl.num_diffs)
109 148
		assemble!(op, W, V, store)
110 149
		return
111 -
		# store1 = (v,m,n,k) -> store(v,n,m,k)
112 -
		# store = TransposedStorage(store)
113 150
	end
114 151
115 152
	P = Threads.nthreads()

@@ -182,6 +182,9 @@
Loading
182 182
183 183
    cellids, ncells = vertextocellmap(mesh)
184 184
185 +
    Cells = cells(mesh)
186 +
    Verts = vertices(mesh)
187 +
185 188
    # create the local shapes
186 189
    fns = Vector{Shape{Float64}}[]
187 190
    pos = Vector{vertextype(mesh)}()
@@ -196,7 +199,8 @@
Loading
196 199
        shapes = Vector{Shape{Float64}}(undef,numshapes)
197 200
        for s in 1: numshapes
198 201
            c = cellids[v,s]
199 -
            cell = mesh.faces[c]
202 +
            # cell = mesh.faces[c]
203 +
            cell = Cells[c]
200 204
201 205
            localid = something(findfirst(isequal(v), cell),0)
202 206
            @assert localid != 0
@@ -205,7 +209,7 @@
Loading
205 209
        end
206 210
207 211
        push!(fns, shapes)
208 -
        push!(pos, mesh.vertices[v])
212 +
        push!(pos, Verts[v])
209 213
    end
210 214
211 215
    NF = 3
@@ -258,7 +262,7 @@
Loading
258 262
end
259 263
260 264
261 -
function lagrangec0d1(mesh, nodes::Mesh{U,1} where {U})
265 +
function lagrangec0d1(mesh, nodes::CompScienceMeshes.AbstractMesh{U,1} where {U})
262 266
263 267
    Conn = connectivity(nodes, mesh, abs)
264 268
    rows = rowvals(Conn)

@@ -79,3 +79,50 @@
Loading
79 79
(ϕ::NDotTrace)(p) = dot(normal(p), ϕ.field(cartesian(p)))
80 80
integrand(::NDotTrace, g, ϕ) = dot(g.value, ϕ)
81 81
LinearAlgebra.dot(::NormalVector, f) = NDotTrace(f)
82 +
83 +
84 +
85 +
mutable struct CurlGreen{T,U,V}
86 +
  wavenumber::T
87 +
  source::U
88 +
  position::V
89 +
end
90 +
91 +
function (f::CurlGreen)(x)
92 +
  γ = im * f.wavenumber
93 +
  r = x-f.position
94 +
  R = norm(r)
95 +
  g = exp(-γ*R)/(4π*R)
96 +
  j = f.source
97 +
  return -γ/R * (1 + 1/(γ*R)) * g * (r × j)
98 +
end
99 +
100 +
101 +
mutable struct CurlCurlGreen{T,U,V}
102 +
  wavenumber::T
103 +
  source::U
104 +
  position::V
105 +
end
106 +
107 +
cross(::NormalVector, p::CurlGreen) = CrossTraceMW(p)
108 +
cross(::NormalVector, p::CurlCurlGreen) = CrossTraceMW(p)
109 +
110 +
function (f::CurlCurlGreen)(x)
111 +
  γ = im * f.wavenumber
112 +
  r = x - f.position
113 +
  R = norm(r)
114 +
  g = exp(-γ*R)/(4π*R)
115 +
  j = f.source
116 +
  return (-γ^2/R^2 * (r × j) × r + (1/R^2 + γ/R)/R^2 * (3r * dot(r,j) - R^2 * j)) * g
117 +
end
118 +
119 +
curl(f::CurlGreen) = CurlCurlGreen(f.wavenumber, f.source, f.position)
120 +
function curl(f::CurlCurlGreen)
121 +
  κ = f.wavenumber
122 +
  j = κ^2 * f.source
123 +
  x = f.position
124 +
  return CurlGreen(κ, j, x)
125 +
end
126 +
127 +
Base.:*(a::Number, f::CurlGreen) = CurlGreen(f.wavenumber, a*f.source, f.position)
128 +
Base.:*(a::Number, f::CurlCurlGreen) = CurlCurlGreen(f.wavenumber, a*f.source, f.position)

@@ -25,42 +25,109 @@
Loading
25 25
    return x
26 26
end
27 27
28 +
function marchonintime(iZ0, Z::ConvOp, B, Nt)
29 +
30 +
    T = eltype(iZ0)
31 +
    Ns = size(Z,1)
32 +
    x = zeros(T,Ns,Nt)
33 +
    csx = zeros(T,Ns,Nt)
34 +
    y = zeros(T,Ns)
35 +
36 +
    todo, done, pct = Nt, 0, 0
37 +
    for i in 1:Nt
38 +
        fill!(y,0)
39 +
        convolve!(y, Z, x, csx, i, 2, Nt)
40 +
        y .*= -1
41 +
        y .+= B[:,i]
42 +
        # @show norm(B[:,i])
43 +
44 +
        x[:,i] .+= iZ0 * y
45 +
        if i > 1
46 +
            csx[:,i] .= csx[:,i-1] .+ x[:,i]
47 +
        else
48 +
            csx[:,i] .= x[:,i]
49 +
        end
50 +
51 +
        done += 1
52 +
        new_pct = round(Int, done / todo * 100)
53 +
        new_pct > pct+9 && (println("[$new_pct]"); pct=new_pct)
54 +
    end
55 +
    x
56 +
end
57 +
28 58
using BlockArrays
29 59
30 60
function convolve(Z::BlockArray, x, i, j_start)
31 -
    cs = BlockArrays.cumulsizes(Z)
32 -
    bs = [blocksize(Z, (i,1))[1] for i in 1:nblocks(Z,1)]
33 -
    # @show bs
61 +
    # ax1 = axes(Z,1)
62 +
    ax2 = axes(Z,2)
63 +
    T = eltype(eltype(Z))
64 +
    y = PseudoBlockVector{T}(undef,blocklengths(axes(Z,1)))
65 +
    fill!(y,0)
66 +
    for I in blockaxes(Z,1)
67 +
        for J in blockaxes(Z,2)
68 +
            xJ = view(x, ax2[J], :)
69 +
            try
70 +
                ZIJ = Z[I,J].banded
71 +
                y[I] .+= convolve(ZIJ, xJ, i, j_start)
72 +
            catch
73 +
                @info "Skipping unassigned block."
74 +
                continue
75 +
            end
76 +
        end
77 +
    end
78 +
    return y
79 +
end
80 +
81 +
function convolve!(y,Z::BlockArray, x, csx, i, j_start, j_stop)
82 +
    ax1 = axes(Z,1)
83 +
    ax2 = axes(Z,2)
34 84
    T = eltype(eltype(Z))
35 -
    y = PseudoBlockVector{T}(undef,bs)
85 +
    # y = PseudoBlockVector{T}(undef,blocklengths(axes(Z,1)))
36 86
    fill!(y,0)
37 -
    for I in 1:nblocks(Z,1)
38 -
        # xI = view(x, cs[1][I] : cs[1][I+1]-1, :)
39 -
        for J in 1:nblocks(Z,2)
40 -
            xJ = view(x, cs[2][J] : cs[2][J+1]-1, :)
41 -
            isassigned(Z.blocks, I, J) || continue
42 -
            ZIJ = Z[Block(I,J)].banded
43 -
            # @show size(xJ) size(ZIJ)
44 -
            # @show size(y[Block(I)])
45 -
            y[Block(I)] .+= convolve(ZIJ, xJ, i, j_start)
87 +
    for I in blockaxes(Z,1)
88 +
        for J in blockaxes(Z,2)
89 +
            xJ = view(x, ax2[J], :)
90 +
            csxJ = view(csx, ax2[J], :)
91 +
            try
92 +
                ZIJ = Z[I,J].banded
93 +
                # y[I] .+= convolve(ZIJ, xJ, i, j_start)
94 +
                yI = view(y, ax1[I])
95 +
                convolve!(yI, ZIJ, xJ, csxJ, i, j_start, j_stop)
96 +
            catch
97 +
                @info "Skipping unassigned block."
98 +
                continue
99 +
            end
46 100
        end
47 101
    end
48 102
    return y
49 103
end
50 104
51 105
function marchonintime(W0,Z::BlockArray,B,I)
106 +
52 107
    T = eltype(W0)
53 108
    M,N = size(W0)
54 109
    @assert M == size(B,1)
110 +
55 111
    x = zeros(T,N,I)
112 +
    y = zeros(T,N)
113 +
    csx = zeros(T,N,I)
114 +
56 115
    for i in 1:I
57 116
        R = [ B[j][i] for j in 1:N ]
58 -
        S = convolve(Z,x,i,2)
59 -
        # @show size(R)
60 -
        # @show size(S)
61 -
        # b = R - convolve(Z,x,i,2)
62 -
        b = R - S
63 -
        x[:,i] += W0 * b
117 +
        # @show norm(R)
118 +
        k_start = 2
119 +
        k_stop = I
120 +
121 +
        fill!(y,0)
122 +
        convolve!(y,Z,x,csx,i,k_start,k_stop)
123 +
        b = R - y
124 +
        x[:,i] .+= W0 * b
125 +
        if i > 1
126 +
            csx[:,i] .= csx[:,i-1] .+ x[:,i]
127 +
        else
128 +
            csx[:,i] .= x[:,i]
129 +
        end
130 +
64 131
        (i % 10 == 0) && print(i, "[", I, "] - ")
65 132
    end
66 133
    return x

@@ -29,7 +29,7 @@
Loading
29 29
30 30
    V = eq.trial_space_dict[1]
31 31
32 -
    bilform = eq.equation.lhs
32 +
    # bilform = eq.equation.lhs
33 33
34 34
    A = td_assemble(eq.equation.lhs, eq.test_space_dict, eq.trial_space_dict)
35 35
    S = timeslice(A,1)
@@ -43,15 +43,18 @@
Loading
43 43
44 44
function timeslice(A::BlockArray, k)
45 45
46 -
    I = [blocksize(A, (1,i))[1] for i in 1:nblocks(A,1)]
47 -
    J = [blocksize(A, (j,1))[2] for j in 1:nblocks(A,2)]
46 +
    # I = [blocksize(A, (1,i))[1] for i in 1:nblocks(A,1)]
47 +
    # J = [blocksize(A, (j,1))[2] for j in 1:nblocks(A,2)]
48 +
49 +
    I = blocklengths(axes(A,1))
50 +
    J = blocklengths(axes(A,2))
48 51
49 52
    T = eltype(eltype(A))
50 53
    S = PseudoBlockArray{T}(undef, I, J)
51 54
    fill!(S,0)
52 55
53 -
    for i in 1:nblocks(A,1)
54 -
        for j in 1:nblocks(A,2)
56 +
    for i in 1:blocksize(A,1)
57 +
        for j in 1:blocksize(A,2)
55 58
            isassigned(A.blocks, i, j) || continue
56 59
            S[Block(i,j)] = A[Block(i,j)].banded[:,:,k]
57 60
        end

@@ -154,6 +154,7 @@
Loading
154 154
include("quadrature/double_quadrature.jl")
155 155
include("quadrature/singularity_extraction.jl")
156 156
157 +
include("timedomain/convop.jl")
157 158
include("timedomain/tdintegralop.jl")
158 159
include("timedomain/tdexcitation.jl")
159 160
include("timedomain/motlu.jl")
@@ -161,6 +162,7 @@
Loading
161 162
include("timedomain/rkcq.jl")
162 163
include("timedomain/zdomain.jl")
163 164
165 +
164 166
# Support for Maxwell equations
165 167
include("maxwell/mwexc.jl")
166 168
include("maxwell/mwops.jl")

@@ -34,11 +34,9 @@
Loading
34 34
    print("Allocating memory for convolution operator: ")
35 35
    assemble!(aux, testST, basisST, store)
36 36
    println("\nAllocated memory for convolution operator.")
37 -
    # data = zeros(eltype(op), M, N, maximum(K1.-K0.+1))
38 37
39 -
    #Z = SparseND.Banded3D(K0,K1,data)
40 38
    kmax = maximum(K1);
41 -
    Z = zeros(eltype(op), M, N, kmax+1)
39 +
	Z = zeros(eltype(op), M, N, kmax)
42 40
    store1(v,m,n,k) = (Z[m,n,k] += v)
43 41
    return MatrixConvolution(Z), store1
44 42
end
@@ -66,24 +64,67 @@
Loading
66 64
    print("Allocating memory for convolution operator: ")
67 65
    assemble!(aux, testST, basisST, store)
68 66
    println("\nAllocated memory for convolution operator.")
69 -
    # data = zeros(eltype(op), M, N, maximum(K1.-K0.+1))
70 67
71 -
	# @show minimum(K0)
72 -
	# @show maximum(K0)
73 -
	# @show minimum(K1)
74 -
	# @show maximum(K1)
75 -
76 -
    #Z = SparseND.Banded3D(K0,K1,data)
77 -
    # kmax = maximum(K1);
78 -
    # Z = zeros(eltype(op), M, N, kmax+1)
79 68
	maxk1 = maximum(K1)
80 69
	bandwidth = maximum(K1 .- K0 .+ 1)
81 70
	data = zeros(eltype(op), bandwidth, M, N)
82 -
	Z = SparseND.Banded3D(K0, data, maxk1+1)
71 +
	Z = SparseND.Banded3D(K0, data, maxk1)
83 72
    store1(v,m,n,k) = (Z[m,n,k] += v)
84 73
    return Z, store1
85 74
end
86 75
76 +
struct Storage{T} end
77 +
78 +
function allocatestorage(op::RetardedPotential, testST, basisST,
79 +
	::Type{Val{:bandedstorage}},
80 +
    ::Type{LongDelays{:compress}})
81 +
    
82 +
    @info "Allocating mem for RP op compressing the static tail..."
83 +
84 +
	T = eltype(op)
85 +
86 +
    tfs = spatialbasis(testST)
87 +
    bfs = spatialbasis(basisST)
88 +
89 +
	Δt = timestep(temporalbasis(basisST))
90 +
	Nt = numfunctions(temporalbasis(basisST))
91 +
92 +
	tbf = convolve(temporalbasis(testST), temporalbasis(basisST))
93 +
	has_tail = all(tbf.polys[end].data .== 0)
94 +
95 +
    M = numfunctions(tfs)
96 +
    N = numfunctions(bfs)
97 +
98 +
    K0 = fill(typemax(Int), M, N)
99 +
    K1 = zeros(Int, M, N)
100 +
101 +
    function store_alloc(v,m,n,k)
102 +
        K0[m,n] = min(K0[m,n],k)
103 +
        K1[m,n] = max(K1[m,n],k)
104 +
    end
105 +
106 +
    op_alloc = EmptyRP(op.speed_of_light)
107 +
	tbf_trunc = truncatetail(tbf)
108 +
	δ = timebasisdelta(Δt, Nt)
109 +
    print("Allocating memory for convolution operator: ")
110 +
    assemble!(op_alloc, tfs⊗δ, bfs⊗tbf_trunc, store_alloc)
111 +
    println("\nAllocated memory for convolution operator.")
112 +
113 +
	bandwidth = maximum(K1 .- K0 .+ 1)
114 +
	data = zeros(T, bandwidth, M, N)
115 +
	tail = zeros(T, M, N)
116 +
	Z = ConvOp(data, K0, K1, tail, Nt)
117 +
118 +
    function store1(v,m,n,k)
119 +
		if Z.k0[m,n] ≤ k ≤ Z.k1[m,n]
120 +
			Z.data[k - Z.k0[m,n] + 1,m,n] += v
121 +
		elseif k == Z.k1[m,n]+1
122 +
			Z.tail[m,n] += v
123 +
		end
124 +
	end
125 +
126 +
    return Z, store1
127 +
end
87 128
88 129
function assemble!(op::LinearCombinationOfOperators, tfs::SpaceTimeBasis, bfs::SpaceTimeBasis, store)
89 130
    for (a,A) in zip(op.coeffs, op.ops)
@@ -100,16 +141,16 @@
Loading
100 141
	@assert P >= 1
101 142
	splits = [round(Int,s) for s in range(0, stop=numfunctions(Y), length=P+1)]
102 143
103 -
	@info "Starting assembly with $P threads:"
104 -
	Threads.@threads for i in 1:P
105 -
		lo, hi = splits[i]+1, splits[i+1]
106 -
		lo <= hi || continue
107 -
		Y_p = subset(Y, lo:hi)
108 -
		store1 = (v,m,n,k) -> store(v,lo+m-1,n,k)
109 -
		assemble_chunk!(op, Y_p ⊗ S, trialST, store1)
110 -
	end
144 +
	# @info "Starting assembly with $P threads:"
145 +
	# Threads.@threads for i in 1:P
146 +
	# 	lo, hi = splits[i]+1, splits[i+1]
147 +
	# 	lo <= hi || continue
148 +
	# 	Y_p = subset(Y, lo:hi)
149 +
	# 	store1 = (v,m,n,k) -> store(v,lo+m-1,n,k)
150 +
	# 	assemble_chunk!(op, Y_p ⊗ S, trialST, store1)
151 +
	# end
111 152
112 -
	# assemble_chunk!(op, testST, trialST, store)
153 +
	assemble_chunk!(op, testST, trialST, store)
113 154
end
114 155
115 156
function assemble_chunk!(op::RetardedPotential, testST, trialST, store)
@@ -179,7 +220,6 @@
Loading
179 220
									abv = b*av
180 221
									tad_rd = timead[r,d]
181 222
                                    for (k,c) in tad_rd
182 -
                                        #@assert 1 <= s <= Nt
183 223
										store(c*abv, m, n, k)
184 224
									end # next κ
185 225
		                        end # next ν

@@ -33,36 +33,11 @@
Loading
33 33
34 34
function setindex!(A::Banded3D, v, m, n, k)
35 35
    k0 = A.k0[m,n]
36 -
    @assert k0 != 0
36 +
    @assert k0 != 0 "Failed: $v, $m, $n, $k"
37 37
    @assert A.k0[m,n] <= k <= A.k0[m,n] + bandwidth(A) - 1
38 38
    A.data[k-A.k0[m,n]+1,m,n] = v
39 39
end
40 40
41 -
# """
42 -
#     convolve{T}(A::Banded3D, x::Array{T,2})
43 -
#
44 -
# Compute the *space-time* convolution of A and x.
45 -
#
46 -
# Returns an array `y` of size `(size(A,1),size(x,2))` such that
47 -
# `y[m,i] = sum([A[]])``
48 -
# """
49 -
# function convolve(A::Banded3D, x::Array{T,2}) where T
50 -
#     V = promote_type(eltype(A),eltype(x))
51 -
#     y = zeros(V,size(A,1),size(x,2))
52 -
#     bw = bandwidth(A)
53 -
#     for i in 1:size(A,1)
54 -
#         for l in 1 : size(y,2)
55 -
#             for j in 1:size(A,2)
56 -
#                 k0 = A.k0[i,j]
57 -
#                 for k in k0 : k0 + bw - 1
58 -
#                     l-k+1 < 1 && continue
59 -
#                     y[i,l] += A.data[k,i,j] * x[j,l-k+1]
60 -
#                 end
61 -
#             end
62 -
#         end
63 -
#     end
64 -
#     return y
65 -
# end
66 41
67 42
function Base.:+(A::Banded3D{T}, B::Banded3D{T}) where {T}
68 43
@@ -145,7 +120,7 @@
Loading
145 120
146 121
147 122
struct MatrixOfConvolutions{T} <: AbstractArray{Vector{T},2}
148 -
    banded::Banded3D{T}
123 +
    banded::AbstractArray{T,3}
149 124
end
150 125
151 126
function Base.eltype(x::MatrixOfConvolutions{T}) where {T}
@@ -162,7 +137,7 @@
Loading
162 137
end
163 138
164 139
Base.eltype(x::SpaceTimeData{T}) where {T} = Vector{T}
165 -
Base.size(x::SpaceTimeData) = size(x.data)[1]
140 +
Base.size(x::SpaceTimeData) = (size(x.data)[1],)
166 141
Base.getindex(x::SpaceTimeData, i::Int) = x.data[i,:]
167 142
168 143
end # module

@@ -0,0 +1,82 @@
Loading
1 +
struct ConvOp{T} <: AbstractArray{T,3}
2 +
    data::Array{T,3}
3 +
    k0::Array{Int,2}
4 +
    k1::Array{Int,2}
5 +
    tail::Array{T,2}
6 +
    length::Int
7 +
end
8 +
9 +
function Base.size(obj)
10 +
    return (size(obj.data)[2:3]...,obj.length)
11 +
end
12 +
13 +
function Base.getindex(obj::ConvOp, m::Int, n::Int, k::Int)
14 +
    if k < obj.k0[m,n]
15 +
        return zero(eltype(obj.data))
16 +
    end
17 +
18 +
    if k > obj.k1[m,n]
19 +
        return obj.tail[m,n]
20 +
    end
21 +
22 +
    return obj.data[k-obj.k0[m,n]+1,m,n]
23 +
end
24 +
25 +
#
26 +
# function convolve!(y, Z::ConvOp, x, j, k_start, k_stop=size(Z,3))
27 +
#     for m in axes(y,1)
28 +
#         for n in axes(x,1)
29 +
#             k0 = Z.k0[m,n]
30 +
#             k1 = Z.k1[m,n]
31 +
#             for k in max(k0,k_start):min(k1,k_stop)
32 +
#                 p = k - k0 + 1
33 +
#                 j-k < 0 && continue
34 +
#                 y[m] += Z.data[p,m,n] * x[n,j-k+1]
35 +
#             end
36 +
#
37 +
#             X = sum(x[n,1:j-k1])
38 +
#             y[m] += Z.tail[m,n] * X
39 +
#         end
40 +
#     end
41 +
# end
42 +
43 +
44 +
function convolve!(y, Z::ConvOp, x, X, j, k_start, k_stop=size(Z,3))
45 +
    # @info "The corrrect convolve!"
46 +
    for n in axes(x,1)
47 +
        for m in axes(y,1)
48 +
            k0 = Z.k0[m,n]
49 +
            k1 = Z.k1[m,n]
50 +
            for k in max(k0,k_start):min(k1,k_stop)
51 +
                p = k - k0 + 1
52 +
                j-k < 0 && continue
53 +
                y[m] += Z.data[p,m,n] * x[n,j-k+1]
54 +
            end
55 +
56 +
            j-k1 > 0 || continue
57 +
            y[m] += Z.tail[m,n] * X[n,j-k1]
58 +
        end
59 +
    end
60 +
end
61 +
62 +
63 +
# function convolve(Z::ConvOp, x, j, k_start)
64 +
#     y = zeros(eltype(Z), size(Z)[1])
65 +
#     convolve!(y, Z, x, j, k_start, size(Z)[3])
66 +
#     return y
67 +
# end
68 +
function polyeig(Z::ConvOp)
69 +
    kmax = maximum(Z.k1)
70 +
    M = size(Z,1)
71 +
    Q = zeros(eltype(Z), 2M, 2M, kmax+1)
72 +
    for k in 1:kmax
73 +
        Q[1:M,1:M,k] .= Z[:,:,k]
74 +
    end
75 +
    Id = Matrix{eltype(Z)}(LinearAlgebra.I,M,M)
76 +
    Q[M+1:2M,1:M,1] .= -Id
77 +
    Q[M+1:2M,M+1:2M,1] .= Id
78 +
    Q[M+1:2M,M+1:2M,2] .= -Id
79 +
    Q[1:M,M+1:2M,kmax+1] .= Z[:,:,kmax+1]
80 +
    return eigvals(companion(Q)), Q
81 +
    # return Q
82 +
end

@@ -86,7 +86,17 @@
Loading
86 86
numfunctions(t::TimeBasisFunction) = t.numfunctions
87 87
refspace(t::TimeBasisFunction{T,N,D1,D}) where {T,N,D1,D} = MonomialBasis{T,D,D1}()
88 88
89 -
89 +
function truncatetail(t::TimeBasisFunction{T,N,D1,D})  where {T,N,D1,D}
90 +
    P = Polynomial{D1,T}
91 +
    S = SVector{N,P}
92 +
    R = SVector{D1,T}
93 +
94 +
    out = deepcopy(t)
95 +
    polys = [p for p in t.polys]
96 +
    polys[end] = P(zero(R))
97 +
    out.polys = S(polys)
98 +
    return out
99 +
end
90 100
91 101
92 102
geometry(t::AbstractTimeBasisFunction) = [SegmentedAxis(timestep(t), numfunctions(t)+numintervals(t)-3)]

@@ -100,7 +100,8 @@
Loading
100 100
    space_operator = op.spatial_factor
101 101
    A = assemble(space_operator, spatialbasis(test_functions), spatialbasis(trial_functions))
102 102
103 -
    K0 = Int.(A .!= 0)
103 +
    # K0 = Int.(A .!= 0)
104 +
    K0 = ones(M,N)
104 105
    # K0 = ones(Int,M,N)
105 106
    bandwidth = numintervals(time_basis_function) - 1
106 107
    data = zeros(scalartype(op), bandwidth, M, N)
@@ -110,7 +111,8 @@
Loading
110 111
end
111 112
112 113
113 -
function allocatestorage(op::TensorOperator, test_functions, trial_functions)
114 +
function allocatestorage(op::TensorOperator, test_functions, trial_functions,
115 +
    ::Type{Val{:bandedstorage}}, ::Type{LongDelays{:compress}},)
114 116
115 117
    M = numfunctions(spatialbasis(test_functions))
116 118
    N = numfunctions(spatialbasis(trial_functions))
@@ -119,26 +121,57 @@
Loading
119 121
        temporalbasis(test_functions),
120 122
        temporalbasis(trial_functions))
121 123
122 -
    tbf = time_basis_function
123 -
    has_zero_tail = all(tbf.polys[end].data .== 0)
124 -
    @show has_zero_tail
125 -
126 -
    if has_zero_tail
127 -
        K = numintervals(time_basis_function)-1
128 -
    else
129 -
        speedoflight = 1.0
130 -
        @warn "Assuming speed of light to be equal to 1!"
131 -
        Δt = timestep(tbf)
132 -
        ct, hs = boundingbox(geometry(spatialbasis(trial_functions)).vertices)
133 -
        diam = 2 * sqrt(3) * hs
134 -
        K = ceil(Int, (numintervals(tbf)-1) + diam/speedoflight/Δt)+1
135 -
    end
136 -
    @assert K > 0
124 +
    space_operator = op.spatial_factor
125 +
    A = assemble(space_operator, spatialbasis(test_functions), spatialbasis(trial_functions))
137 126
138 -
    Z = zeros(M, N, K)
139 -
    return MatrixConvolution(Z), (v,m,n,k)->(Z[m,n,k] += v)
127 +
    K0 = ones(Int, M, N)
128 +
    bandwidth = numintervals(time_basis_function) - 1
129 +
    K1 = ones(Int,M,N) .+ (bandwidth - 1)
130 +
    T  = scalartype(op)
131 +
    data = zeros(T, bandwidth, M, N)
132 +
    tail = zeros(T, M, N)
133 +
134 +
    Nt = numfunctions(temporalbasis(trial_functions))
135 +
    Z = ConvOp(data, K0, K1, tail, Nt)
136 +
    function store1(v,m,n,k)
137 +
		if Z.k0[m,n] ≤ k ≤ Z.k1[m,n]
138 +
			Z.data[k - Z.k0[m,n] + 1,m,n] += v
139 +
		elseif k == Z.k1[m,n]+1
140 +
			Z.tail[m,n] += v
141 +
		end
142 +
	end
143 +
    return Z, store1
140 144
end
141 145
146 +
# function allocatestorage(op::TensorOperator, test_functions, trial_functions)
147 +
148 +
#     M = numfunctions(spatialbasis(test_functions))
149 +
#     N = numfunctions(spatialbasis(trial_functions))
150 +
151 +
#     time_basis_function = BEAST.convolve(
152 +
#         temporalbasis(test_functions),
153 +
#         temporalbasis(trial_functions))
154 +
155 +
#     tbf = time_basis_function
156 +
#     has_zero_tail = all(tbf.polys[end].data .== 0)
157 +
#     @show has_zero_tail
158 +
159 +
#     if has_zero_tail
160 +
#         K = numintervals(time_basis_function)-1
161 +
#     else
162 +
#         speedoflight = 1.0
163 +
#         @warn "Assuming speed of light to be equal to 1!"
164 +
#         Δt = timestep(tbf)
165 +
#         ct, hs = boundingbox(geometry(spatialbasis(trial_functions)).vertices)
166 +
#         diam = 2 * sqrt(3) * hs
167 +
#         K = ceil(Int, (numintervals(tbf)-1) + diam/speedoflight/Δt)+1
168 +
#     end
169 +
#     @assert K > 0
170 +
171 +
#     Z = zeros(M, N, K)
172 +
#     return MatrixConvolution(Z), (v,m,n,k)->(Z[m,n,k] += v)
173 +
# end
174 +
142 175
function assemble!(operator::TensorOperator, testfns, trialfns, store)
143 176
144 177
    space_operator = operator.spatial_factor
@@ -198,17 +231,18 @@
Loading
198 231
199 232
end
200 233
201 -
struct TemporalIntegration <: Operator
202 -
    operator::Operator
234 +
struct TemporalIntegration <: AbstractOperator
235 +
    operator::AbstractOperator
203 236
end
204 237
205 -
integrate(op::Operator) = TemporalIntegration(op)
238 +
integrate(op::AbstractOperator) = TemporalIntegration(op)
206 239
scalartype(op::TemporalIntegration) = scalartype(op.operator)
207 240
Base.:*(a::Number, op::TemporalIntegration) = TemporalIntegration(a * op.operator)
208 241
209 242
function allocatestorage(op::TemporalIntegration, testfns, trialfns,
210 -
	::Type{Val{:bandedstorage}},
211 -
	::Type{LongDelays{:ignore}})
243 +
	storage_trait, longdelays_trait)
244 +
	# ::Type{Val{:bandedstorage}},
245 +
	# ::Type{LongDelays{:ignore}})
212 246
213 247
	trial_time_fns  = temporalbasis(trialfns)
214 248
	trial_space_fns = spatialbasis(trialfns)
@@ -218,7 +252,7 @@
Loading
218 252
		integrate(trial_time_fns)
219 253
	)
220 254
221 -
	Z, store = allocatestorage(op.operator, testfns, trialfns, Val{:bandedstorage}, LongDelays{:ignore})
255 +
	Z, store = allocatestorage(op.operator, testfns, trialfns, storage_trait, longdelays_trait)
222 256
	@show size(Z)
223 257
	return Z, store
224 258
end

@@ -69,7 +69,7 @@
Loading
69 69
70 70
function assemble(operator::AbstractOperator, test_functions, trial_functions,
71 71
    storage_policy = Val{:bandedstorage},
72 -
    long_delays_policy = LongDelays{:ignore})
72 +
    long_delays_policy = LongDelays{:compress})
73 73
    # This is a convenience function whose only job is to allocate
74 74
    # the storage for the interaction matrix. Further dispatch on
75 75
    # operator and space types is handled by the 4-argument version
@@ -100,8 +100,10 @@
Loading
100 100
end
101 101
102 102
function allocatestorage(operator::AbstractOperator, test_functions, trial_functions,
103 -
    ::Type{Val{:bandedstorage}},
104 -
    ::Type{LongDelays{:ignore}})
103 +
    storage_trait,
104 +
    longdelays_trait)
105 +
    # ::Type{Val{:bandedstorage}},
106 +
    # ::Type{LongDelays{:ignore}})
105 107
106 108
    T = promote_type(
107 109
        scalartype(operator)       ,
Files Coverage
src 55.75%
Project Totals (81 files) 55.75%
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading