@@ -18,7 +18,6 @@
Loading
18 18
#include "FunctorOps.h"
19 19
#include "Global.h"
20 20
#include "interpreter/Context.h"
21 -
#include "interpreter/Generator.h"
22 21
#include "interpreter/Index.h"
23 22
#include "interpreter/Node.h"
24 23
#include "interpreter/Relation.h"
@@ -139,8 +138,19 @@
Loading
139 138
constexpr RamDomain RAM_BIT_SHIFT_MASK = RAM_DOMAIN_SIZE - 1;
140 139
}
141 140
141 +
Engine::Engine(ram::TranslationUnit& tUnit)
142 +
        : profileEnabled(Global::config().has("profile")), isProvenance(Global::config().has("provenance")),
143 +
          numOfThreads(std::stoi(Global::config().get("jobs"))), tUnit(tUnit),
144 +
          isa(tUnit.getAnalysis<ram::analysis::IndexAnalysis>()) {
145 +
#ifdef _OPENMP
146 +
    if (numOfThreads > 0) {
147 +
        omp_set_num_threads(numOfThreads);
148 +
    }
149 +
#endif
150 +
}
151 +
142 152
Engine::RelationHandle& Engine::getRelationHandle(const size_t idx) {
143 -
    return generator.getRelationHandle(idx);
153 +
    return *relations[idx];
144 154
}
145 155
146 156
void Engine::swapRelation(const size_t ramRel1, const size_t ramRel2) {
@@ -177,7 +187,26 @@
Loading
177 187
}
178 188
179 189
VecOwn<Engine::RelationHandle>& Engine::getRelationMap() {
180 -
    return generator.getRelations();
190 +
    return relations;
191 +
}
192 +
193 +
void Engine::createRelation(const ram::Relation& id, const size_t idx) {
194 +
    if (relations.size() < idx + 1) {
195 +
        relations.resize(idx + 1);
196 +
    }
197 +
198 +
    RelationHandle res;
199 +
    const auto& orderSet = isa->getIndexes(id.getName());
200 +
    if (id.getRepresentation() == RelationRepresentation::EQREL) {
201 +
        res = createEqrelRelation(id, orderSet);
202 +
    } else {
203 +
        if (isProvenance) {
204 +
            res = createProvenanceRelation(id, orderSet);
205 +
        } else {
206 +
            res = createBTreeRelation(id, orderSet);
207 +
        }
208 +
    }
209 +
    relations[idx] = mk<RelationHandle>(std::move(res));
181 210
}
182 211
183 212
const std::vector<void*>& Engine::loadDLL() {
@@ -299,6 +328,7 @@
Loading
299 328
300 329
void Engine::generateIR() {
301 330
    const ram::Program& program = tUnit.getProgram();
331 +
    NodeGenerator generator(*this);
302 332
    if (subroutine.empty()) {
303 333
        for (const auto& sub : program.getSubroutines()) {
304 334
            subroutine.push_back(generator.generateTree(*sub.second));

@@ -33,17 +33,6 @@
Loading
33 33
class Context {
34 34
    using ViewPtr = Own<ViewWrapper>;
35 35
36 -
    /** @brief Run-time value */
37 -
    std::vector<const RamDomain*> data;
38 -
    /** @brief Subroutine return value */
39 -
    std::vector<RamDomain>* returnValues = nullptr;
40 -
    /** @brief Subroutine arguments */
41 -
    const std::vector<RamDomain>* args = nullptr;
42 -
    /** @bref Allocated data */
43 -
    VecOwn<RamDomain[]> allocatedDataContainer;
44 -
    /** @brief Views */
45 -
    VecOwn<ViewWrapper> views;
46 -
47 36
public:
48 37
    Context(size_t size = 0) : data(size) {}
49 38
@@ -119,6 +108,18 @@
Loading
119 108
        assert(id < views.size());
120 109
        return views[id].get();
121 110
    }
111 +
112 +
private:
113 +
    /** @brief Run-time value */
114 +
    std::vector<const RamDomain*> data;
115 +
    /** @brief Subroutine return value */
116 +
    std::vector<RamDomain>* returnValues = nullptr;
117 +
    /** @brief Subroutine arguments */
118 +
    const std::vector<RamDomain>* args = nullptr;
119 +
    /** @bref Allocated data */
120 +
    VecOwn<RamDomain[]> allocatedDataContainer;
121 +
    /** @brief Views */
122 +
    VecOwn<ViewWrapper> views;
122 123
};
123 124
124 125
}  // namespace souffle::interpreter

@@ -0,0 +1,752 @@
Loading
1 +
/*
2 +
 * Souffle - A Datalog Compiler
3 +
 * Copyright (c) 2020, The Souffle Developers. All rights reserved.
4 +
 * Licensed under the Universal Permissive License v 1.0 as shown at:
5 +
 * - https://opensource.org/licenses/UPL
6 +
 * - <souffle root>/licenses/SOUFFLE-UPL.txt
7 +
 */
8 +
9 +
/************************************************************************
10 +
 *
11 +
 * @file Generator.cpp
12 +
 *
13 +
 * Define the Interpreter Generator class.
14 +
 ***********************************************************************/
15 +
16 +
#include "interpreter/Generator.h"
17 +
#include "interpreter/Engine.h"
18 +
19 +
namespace souffle::interpreter {
20 +
21 +
using NodePtr = Own<Node>;
22 +
using NodePtrVec = std::vector<NodePtr>;
23 +
using RelationHandle = Own<RelationWrapper>;
24 +
25 +
NodeGenerator::NodeGenerator(Engine& engine) : engine(engine) {
26 +
    visitDepthFirst(engine.tUnit.getProgram(), [&](const ram::Relation& relation) {
27 +
        assert(relationMap.find(relation.getName()) == relationMap.end() && "double-naming of relations");
28 +
        relationMap[relation.getName()] = &relation;
29 +
    });
30 +
}
31 +
32 +
NodePtr NodeGenerator::generateTree(const ram::Node& root) {
33 +
    // Encode all relation, indexPos and viewId.
34 +
    visitDepthFirst(root, [&](const ram::Node& node) {
35 +
        if (isA<ram::Query>(&node)) {
36 +
            newQueryBlock();
37 +
        }
38 +
        if (const auto* indexSearch = dynamic_cast<const ram::IndexOperation*>(&node)) {
39 +
            encodeIndexPos(*indexSearch);
40 +
            encodeView(indexSearch);
41 +
        } else if (const auto* exists = dynamic_cast<const ram::ExistenceCheck*>(&node)) {
42 +
            encodeIndexPos(*exists);
43 +
            encodeView(exists);
44 +
        } else if (const auto* provExists = dynamic_cast<const ram::ProvenanceExistenceCheck*>(&node)) {
45 +
            encodeIndexPos(*provExists);
46 +
            encodeView(provExists);
47 +
        }
48 +
    });
49 +
    // Parse program
50 +
    return visit(root);
51 +
}
52 +
53 +
NodePtr NodeGenerator::visitConstant(const ram::Constant& num) {
54 +
    return mk<Constant>(I_Constant, &num);
55 +
}
56 +
57 +
NodePtr NodeGenerator::visitTupleElement(const ram::TupleElement& access) {
58 +
    auto tupleId = access.getTupleId();
59 +
    auto elementId = access.getElement();
60 +
    auto newElementId = orderingContext.mapOrder(tupleId, elementId);
61 +
    return mk<TupleElement>(I_TupleElement, &access, tupleId, newElementId);
62 +
}
63 +
64 +
NodePtr NodeGenerator::visitAutoIncrement(const ram::AutoIncrement& inc) {
65 +
    return mk<AutoIncrement>(I_AutoIncrement, &inc);
66 +
}
67 +
68 +
NodePtr NodeGenerator::visitIntrinsicOperator(const ram::IntrinsicOperator& op) {
69 +
    NodePtrVec children;
70 +
    for (const auto& arg : op.getArguments()) {
71 +
        children.push_back(visit(arg));
72 +
    }
73 +
    return mk<IntrinsicOperator>(I_IntrinsicOperator, &op, std::move(children));
74 +
}
75 +
76 +
NodePtr NodeGenerator::visitUserDefinedOperator(const ram::UserDefinedOperator& op) {
77 +
    NodePtrVec children;
78 +
    for (const auto& arg : op.getArguments()) {
79 +
        children.push_back(visit(arg));
80 +
    }
81 +
    return mk<UserDefinedOperator>(I_UserDefinedOperator, &op, std::move(children));
82 +
}
83 +
84 +
NodePtr NodeGenerator::visitNestedIntrinsicOperator(const ram::NestedIntrinsicOperator& op) {
85 +
    auto arity = op.getArguments().size();
86 +
    orderingContext.addNewTuple(op.getTupleId(), arity);
87 +
    NodePtrVec children;
88 +
    for (auto&& arg : op.getArguments()) {
89 +
        children.push_back(visit(arg));
90 +
    }
91 +
    children.push_back(visitTupleOperation(op));
92 +
    return mk<NestedIntrinsicOperator>(I_NestedIntrinsicOperator, &op, std::move(children));
93 +
}
94 +
95 +
NodePtr NodeGenerator::visitPackRecord(const ram::PackRecord& pr) {
96 +
    NodePtrVec children;
97 +
    for (const auto& arg : pr.getArguments()) {
98 +
        children.push_back(visit(arg));
99 +
    }
100 +
    return mk<PackRecord>(I_PackRecord, &pr, std::move(children));
101 +
}
102 +
103 +
NodePtr NodeGenerator::visitSubroutineArgument(const ram::SubroutineArgument& arg) {
104 +
    return mk<SubroutineArgument>(I_SubroutineArgument, &arg);
105 +
}
106 +
107 +
// -- connectors operators --
108 +
NodePtr NodeGenerator::visitTrue(const ram::True& ltrue) {
109 +
    return mk<True>(I_True, &ltrue);
110 +
}
111 +
112 +
NodePtr NodeGenerator::visitFalse(const ram::False& lfalse) {
113 +
    return mk<False>(I_False, &lfalse);
114 +
}
115 +
116 +
NodePtr NodeGenerator::visitConjunction(const ram::Conjunction& conj) {
117 +
    return mk<Conjunction>(I_Conjunction, &conj, visit(conj.getLHS()), visit(conj.getRHS()));
118 +
}
119 +
120 +
NodePtr NodeGenerator::visitNegation(const ram::Negation& neg) {
121 +
    return mk<Negation>(I_Negation, &neg, visit(neg.getOperand()));
122 +
}
123 +
124 +
NodePtr NodeGenerator::visitEmptinessCheck(const ram::EmptinessCheck& emptiness) {
125 +
    size_t relId = encodeRelation(emptiness.getRelation());
126 +
    auto rel = getRelationHandle(relId);
127 +
    NodeType type = constructNodeType("EmptinessCheck", lookup(emptiness.getRelation()));
128 +
    return mk<EmptinessCheck>(type, &emptiness, rel);
129 +
}
130 +
131 +
NodePtr NodeGenerator::visitRelationSize(const ram::RelationSize& size) {
132 +
    size_t relId = encodeRelation(size.getRelation());
133 +
    auto rel = getRelationHandle(relId);
134 +
    NodeType type = constructNodeType("RelationSize", lookup(size.getRelation()));
135 +
    return mk<RelationSize>(type, &size, rel);
136 +
}
137 +
138 +
NodePtr NodeGenerator::visitExistenceCheck(const ram::ExistenceCheck& exists) {
139 +
    SuperInstruction superOp = getExistenceSuperInstInfo(exists);
140 +
    // Check if the search signature is a total signature
141 +
    bool isTotal = true;
142 +
    for (const auto& cur : exists.getValues()) {
143 +
        if (isUndefValue(cur)) {
144 +
            isTotal = false;
145 +
        }
146 +
    }
147 +
    auto ramRelation = lookup(exists.getRelation());
148 +
    NodeType type = constructNodeType("ExistenceCheck", ramRelation);
149 +
    return mk<ExistenceCheck>(type, &exists, isTotal, encodeView(&exists), std::move(superOp),
150 +
            ramRelation.isTemp(), ramRelation.getName());
151 +
}
152 +
153 +
NodePtr NodeGenerator::visitProvenanceExistenceCheck(const ram::ProvenanceExistenceCheck& provExists) {
154 +
    SuperInstruction superOp = getExistenceSuperInstInfo(provExists);
155 +
    NodeType type = constructNodeType("ProvenanceExistenceCheck", lookup(provExists.getRelation()));
156 +
    return mk<ProvenanceExistenceCheck>(type, &provExists, visit(provExists.getChildNodes().back()),
157 +
            encodeView(&provExists), std::move(superOp));
158 +
}
159 +
160 +
NodePtr NodeGenerator::visitConstraint(const ram::Constraint& relOp) {
161 +
    return mk<Constraint>(I_Constraint, &relOp, visit(relOp.getLHS()), visit(relOp.getRHS()));
162 +
}
163 +
164 +
NodePtr NodeGenerator::visitNestedOperation(const ram::NestedOperation& nested) {
165 +
    return visit(nested.getOperation());
166 +
}
167 +
168 +
NodePtr NodeGenerator::visitTupleOperation(const ram::TupleOperation& search) {
169 +
    if (engine.profileEnabled) {
170 +
        return mk<TupleOperation>(I_TupleOperation, &search, visit(search.getOperation()));
171 +
    }
172 +
    return visit(search.getOperation());
173 +
}
174 +
175 +
NodePtr NodeGenerator::visitScan(const ram::Scan& scan) {
176 +
    orderingContext.addTupleWithDefaultOrder(scan.getTupleId(), scan);
177 +
    size_t relId = encodeRelation(scan.getRelation());
178 +
    auto rel = getRelationHandle(relId);
179 +
    NodeType type = constructNodeType("Scan", lookup(scan.getRelation()));
180 +
    return mk<Scan>(type, &scan, rel, visitTupleOperation(scan));
181 +
}
182 +
183 +
NodePtr NodeGenerator::visitParallelScan(const ram::ParallelScan& pScan) {
184 +
    orderingContext.addTupleWithDefaultOrder(pScan.getTupleId(), pScan);
185 +
    size_t relId = encodeRelation(pScan.getRelation());
186 +
    auto rel = getRelationHandle(relId);
187 +
    NodeType type = constructNodeType("ParallelScan", lookup(pScan.getRelation()));
188 +
    auto res = mk<ParallelScan>(type, &pScan, rel, visitTupleOperation(pScan));
189 +
    res->setViewContext(parentQueryViewContext);
190 +
    return res;
191 +
}
192 +
193 +
NodePtr NodeGenerator::visitIndexScan(const ram::IndexScan& iScan) {
194 +
    orderingContext.addTupleWithIndexOrder(iScan.getTupleId(), iScan);
195 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(iScan);
196 +
    NodeType type = constructNodeType("IndexScan", lookup(iScan.getRelation()));
197 +
    return mk<IndexScan>(
198 +
            type, &iScan, nullptr, visitTupleOperation(iScan), encodeView(&iScan), std::move(indexOperation));
199 +
}
200 +
201 +
NodePtr NodeGenerator::visitParallelIndexScan(const ram::ParallelIndexScan& piscan) {
202 +
    orderingContext.addTupleWithIndexOrder(piscan.getTupleId(), piscan);
203 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(piscan);
204 +
    size_t relId = encodeRelation(piscan.getRelation());
205 +
    auto rel = getRelationHandle(relId);
206 +
    NodeType type = constructNodeType("ParallelIndexScan", lookup(piscan.getRelation()));
207 +
    auto res = mk<ParallelIndexScan>(type, &piscan, rel, visitTupleOperation(piscan), encodeIndexPos(piscan),
208 +
            std::move(indexOperation));
209 +
    res->setViewContext(parentQueryViewContext);
210 +
    return res;
211 +
}
212 +
213 +
NodePtr NodeGenerator::visitChoice(const ram::Choice& choice) {
214 +
    orderingContext.addTupleWithDefaultOrder(choice.getTupleId(), choice);
215 +
    size_t relId = encodeRelation(choice.getRelation());
216 +
    auto rel = getRelationHandle(relId);
217 +
    NodeType type = constructNodeType("Choice", lookup(choice.getRelation()));
218 +
    return mk<Choice>(type, &choice, rel, visit(choice.getCondition()), visitTupleOperation(choice));
219 +
}
220 +
221 +
NodePtr NodeGenerator::visitParallelChoice(const ram::ParallelChoice& pChoice) {
222 +
    orderingContext.addTupleWithDefaultOrder(pChoice.getTupleId(), pChoice);
223 +
    size_t relId = encodeRelation(pChoice.getRelation());
224 +
    auto rel = getRelationHandle(relId);
225 +
    NodeType type = constructNodeType("ParallelChoice", lookup(pChoice.getRelation()));
226 +
    auto res = mk<ParallelChoice>(
227 +
            type, &pChoice, rel, visit(pChoice.getCondition()), visitTupleOperation(pChoice));
228 +
    res->setViewContext(parentQueryViewContext);
229 +
    return res;
230 +
}
231 +
232 +
NodePtr NodeGenerator::visitIndexChoice(const ram::IndexChoice& iChoice) {
233 +
    orderingContext.addTupleWithIndexOrder(iChoice.getTupleId(), iChoice);
234 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(iChoice);
235 +
    NodeType type = constructNodeType("IndexChoice", lookup(iChoice.getRelation()));
236 +
    return mk<IndexChoice>(type, &iChoice, nullptr, visit(iChoice.getCondition()),
237 +
            visitTupleOperation(iChoice), encodeView(&iChoice), std::move(indexOperation));
238 +
}
239 +
240 +
NodePtr NodeGenerator::visitParallelIndexChoice(const ram::ParallelIndexChoice& piChoice) {
241 +
    orderingContext.addTupleWithIndexOrder(piChoice.getTupleId(), piChoice);
242 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(piChoice);
243 +
    size_t relId = encodeRelation(piChoice.getRelation());
244 +
    auto rel = getRelationHandle(relId);
245 +
    NodeType type = constructNodeType("ParallelIndexChoice", lookup(piChoice.getRelation()));
246 +
    auto res = mk<ParallelIndexChoice>(type, &piChoice, rel, visit(piChoice.getCondition()),
247 +
            visit(piChoice.getOperation()), encodeIndexPos(piChoice), std::move(indexOperation));
248 +
    res->setViewContext(parentQueryViewContext);
249 +
    return res;
250 +
}
251 +
252 +
NodePtr NodeGenerator::visitUnpackRecord(const ram::UnpackRecord& unpack) {  // get reference
253 +
    orderingContext.addNewTuple(unpack.getTupleId(), unpack.getArity());
254 +
    return mk<UnpackRecord>(
255 +
            I_UnpackRecord, &unpack, visit(unpack.getExpression()), visitTupleOperation(unpack));
256 +
}
257 +
258 +
NodePtr NodeGenerator::visitAggregate(const ram::Aggregate& aggregate) {
259 +
    // Notice: Aggregate is sensitive to the visiting order of the subexprs in order to make
260 +
    // orderCtxt consistent. The order of visiting should be the same as the order of execution during
261 +
    // runtime.
262 +
    orderingContext.addTupleWithDefaultOrder(aggregate.getTupleId(), aggregate);
263 +
    NodePtr expr = visit(aggregate.getExpression());
264 +
    NodePtr cond = visit(aggregate.getCondition());
265 +
    orderingContext.addNewTuple(aggregate.getTupleId(), 1);
266 +
    NodePtr nested = visitTupleOperation(aggregate);
267 +
    size_t relId = encodeRelation(aggregate.getRelation());
268 +
    auto rel = getRelationHandle(relId);
269 +
    NodeType type = constructNodeType("Aggregate", lookup(aggregate.getRelation()));
270 +
    return mk<Aggregate>(type, &aggregate, rel, std::move(expr), std::move(cond), std::move(nested));
271 +
}
272 +
273 +
NodePtr NodeGenerator::visitParallelAggregate(const ram::ParallelAggregate& pAggregate) {
274 +
    orderingContext.addTupleWithDefaultOrder(pAggregate.getTupleId(), pAggregate);
275 +
    NodePtr expr = visit(pAggregate.getExpression());
276 +
    NodePtr cond = visit(pAggregate.getCondition());
277 +
    orderingContext.addNewTuple(pAggregate.getTupleId(), 1);
278 +
    NodePtr nested = visitTupleOperation(pAggregate);
279 +
    size_t relId = encodeRelation(pAggregate.getRelation());
280 +
    auto rel = getRelationHandle(relId);
281 +
    NodeType type = constructNodeType("ParallelAggregate", lookup(pAggregate.getRelation()));
282 +
    auto res = mk<ParallelAggregate>(
283 +
            type, &pAggregate, rel, std::move(expr), std::move(cond), std::move(nested));
284 +
    res->setViewContext(parentQueryViewContext);
285 +
286 +
    return res;
287 +
}
288 +
289 +
NodePtr NodeGenerator::visitIndexAggregate(const ram::IndexAggregate& iAggregate) {
290 +
    orderingContext.addTupleWithIndexOrder(iAggregate.getTupleId(), iAggregate);
291 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(iAggregate);
292 +
    NodePtr expr = visit(iAggregate.getExpression());
293 +
    NodePtr cond = visit(iAggregate.getCondition());
294 +
    orderingContext.addNewTuple(iAggregate.getTupleId(), 1);
295 +
    NodePtr nested = visitTupleOperation(iAggregate);
296 +
    size_t relId = encodeRelation(iAggregate.getRelation());
297 +
    auto rel = getRelationHandle(relId);
298 +
    NodeType type = constructNodeType("IndexAggregate", lookup(iAggregate.getRelation()));
299 +
    return mk<IndexAggregate>(type, &iAggregate, rel, std::move(expr), std::move(cond), std::move(nested),
300 +
            encodeView(&iAggregate), std::move(indexOperation));
301 +
}
302 +
303 +
NodePtr NodeGenerator::visitParallelIndexAggregate(const ram::ParallelIndexAggregate& piAggregate) {
304 +
    orderingContext.addTupleWithIndexOrder(piAggregate.getTupleId(), piAggregate);
305 +
    SuperInstruction indexOperation = getIndexSuperInstInfo(piAggregate);
306 +
    NodePtr expr = visit(piAggregate.getExpression());
307 +
    NodePtr cond = visit(piAggregate.getCondition());
308 +
    orderingContext.addNewTuple(piAggregate.getTupleId(), 1);
309 +
    NodePtr nested = visitTupleOperation(piAggregate);
310 +
    size_t relId = encodeRelation(piAggregate.getRelation());
311 +
    auto rel = getRelationHandle(relId);
312 +
    NodeType type = constructNodeType("ParallelIndexAggregate", lookup(piAggregate.getRelation()));
313 +
    auto res = mk<ParallelIndexAggregate>(type, &piAggregate, rel, std::move(expr), std::move(cond),
314 +
            std::move(nested), encodeView(&piAggregate), std::move(indexOperation));
315 +
    res->setViewContext(parentQueryViewContext);
316 +
    return res;
317 +
}
318 +
319 +
NodePtr NodeGenerator::visitBreak(const ram::Break& breakOp) {
320 +
    return mk<Break>(I_Break, &breakOp, visit(breakOp.getCondition()), visit(breakOp.getOperation()));
321 +
}
322 +
323 +
NodePtr NodeGenerator::visitFilter(const ram::Filter& filter) {
324 +
    return mk<Filter>(I_Filter, &filter, visit(filter.getCondition()), visit(filter.getOperation()));
325 +
}
326 +
327 +
NodePtr NodeGenerator::visitProject(const ram::Project& project) {
328 +
    SuperInstruction superOp = getProjectSuperInstInfo(project);
329 +
    size_t relId = encodeRelation(project.getRelation());
330 +
    auto rel = getRelationHandle(relId);
331 +
    NodeType type = constructNodeType("Project", lookup(project.getRelation()));
332 +
    return mk<Project>(type, &project, rel, std::move(superOp));
333 +
}
334 +
335 +
NodePtr NodeGenerator::visitSubroutineReturn(const ram::SubroutineReturn& ret) {
336 +
    NodePtrVec children;
337 +
    for (const auto& value : ret.getValues()) {
338 +
        children.push_back(visit(value));
339 +
    }
340 +
    return mk<SubroutineReturn>(I_SubroutineReturn, &ret, std::move(children));
341 +
}
342 +
343 +
NodePtr NodeGenerator::visitSequence(const ram::Sequence& seq) {
344 +
    NodePtrVec children;
345 +
    for (const auto& value : seq.getStatements()) {
346 +
        children.push_back(visit(value));
347 +
    }
348 +
    return mk<Sequence>(I_Sequence, &seq, std::move(children));
349 +
}
350 +
351 +
NodePtr NodeGenerator::visitParallel(const ram::Parallel& parallel) {
352 +
    // Parallel statements are executed in sequence for now.
353 +
    NodePtrVec children;
354 +
    for (const auto& value : parallel.getStatements()) {
355 +
        children.push_back(visit(value));
356 +
    }
357 +
    return mk<Parallel>(I_Parallel, &parallel, std::move(children));
358 +
}
359 +
360 +
NodePtr NodeGenerator::visitLoop(const ram::Loop& loop) {
361 +
    return mk<Loop>(I_Loop, &loop, visit(loop.getBody()));
362 +
}
363 +
364 +
NodePtr NodeGenerator::visitExit(const ram::Exit& exit) {
365 +
    return mk<Exit>(I_Exit, &exit, visit(exit.getCondition()));
366 +
}
367 +
368 +
NodePtr NodeGenerator::visitCall(const ram::Call& call) {
369 +
    // translate a subroutine name to an index
370 +
    // the index is used to identify the subroutine
371 +
    // in the interpreter. The index is stored in the
372 +
    // data array of the Node as the first
373 +
    // entry.
374 +
    auto subs = engine.tUnit.getProgram().getSubroutines();
375 +
    size_t subroutineId = distance(subs.begin(), subs.find(call.getName()));
376 +
    return mk<Call>(I_Call, &call, subroutineId);
377 +
}
378 +
379 +
NodePtr NodeGenerator::visitLogRelationTimer(const ram::LogRelationTimer& timer) {
380 +
    size_t relId = encodeRelation(timer.getRelation());
381 +
    auto rel = getRelationHandle(relId);
382 +
    NodePtrVec children;
383 +
    children.push_back(visit(timer.getStatement()));
384 +
    return mk<LogRelationTimer>(I_LogRelationTimer, &timer, visit(timer.getStatement()), rel);
385 +
}
386 +
387 +
NodePtr NodeGenerator::visitLogTimer(const ram::LogTimer& timer) {
388 +
    NodePtrVec children;
389 +
    children.push_back(visit(timer.getStatement()));
390 +
    return mk<LogTimer>(I_LogTimer, &timer, visit(timer.getStatement()));
391 +
}
392 +
393 +
NodePtr NodeGenerator::visitDebugInfo(const ram::DebugInfo& dbg) {
394 +
    NodePtrVec children;
395 +
    children.push_back(visit(dbg.getStatement()));
396 +
    return mk<DebugInfo>(I_DebugInfo, &dbg, visit(dbg.getStatement()));
397 +
}
398 +
399 +
NodePtr NodeGenerator::visitClear(const ram::Clear& clear) {
400 +
    size_t relId = encodeRelation(clear.getRelation());
401 +
    auto rel = getRelationHandle(relId);
402 +
    NodeType type = constructNodeType("Clear", lookup(clear.getRelation()));
403 +
    return mk<Clear>(type, &clear, rel);
404 +
}
405 +
406 +
NodePtr NodeGenerator::visitLogSize(const ram::LogSize& size) {
407 +
    size_t relId = encodeRelation(size.getRelation());
408 +
    auto rel = getRelationHandle(relId);
409 +
    return mk<LogSize>(I_LogSize, &size, rel);
410 +
}
411 +
412 +
NodePtr NodeGenerator::visitIO(const ram::IO& io) {
413 +
    size_t relId = encodeRelation(io.getRelation());
414 +
    auto rel = getRelationHandle(relId);
415 +
    return mk<IO>(I_IO, &io, rel);
416 +
}
417 +
418 +
NodePtr NodeGenerator::visitQuery(const ram::Query& query) {
419 +
    std::shared_ptr<ViewContext> viewContext = std::make_shared<ViewContext>();
420 +
    parentQueryViewContext = viewContext;
421 +
    // split terms of conditions of outer-most filter operation
422 +
    // into terms that require a context and terms that
423 +
    // do not require a view
424 +
    const ram::Operation* next = &query.getOperation();
425 +
    std::vector<const ram::Condition*> freeOfView;
426 +
    if (const auto* filter = dynamic_cast<const ram::Filter*>(&query.getOperation())) {
427 +
        next = &filter->getOperation();
428 +
        // Check terms of outer filter operation whether they can be pushed before
429 +
        // the view-generation for speed improvements
430 +
        auto conditions = findConjunctiveTerms(&filter->getCondition());
431 +
        for (auto const& cur : conditions) {
432 +
            bool needView = false;
433 +
            visitDepthFirst(*cur, [&](const ram::Node& node) {
434 +
                if (requireView(&node)) {
435 +
                    needView = true;
436 +
                    const auto& rel = getViewRelation(&node);
437 +
                    viewContext->addViewInfoForFilter(
438 +
                            encodeRelation(rel), indexTable[&node], encodeView(&node));
439 +
                }
440 +
            });
441 +
442 +
            if (needView) {
443 +
                viewContext->addViewOperationForFilter(visit(*cur));
444 +
            } else {
445 +
                viewContext->addViewFreeOperationForFilter(visit(*cur));
446 +
            }
447 +
        }
448 +
    }
449 +
450 +
    visitDepthFirst(*next, [&](const ram::Node& node) {
451 +
        if (requireView(&node)) {
452 +
            const auto& rel = getViewRelation(&node);
453 +
            viewContext->addViewInfoForNested(encodeRelation(rel), indexTable[&node], encodeView(&node));
454 +
        };
455 +
    });
456 +
457 +
    visitDepthFirst(*next, [&](const ram::AbstractParallel&) { viewContext->isParallel = true; });
458 +
459 +
    auto res = mk<Query>(I_Query, &query, visit(*next));
460 +
    res->setViewContext(parentQueryViewContext);
461 +
    return res;
462 +
}
463 +
464 +
NodePtr NodeGenerator::visitExtend(const ram::Extend& extend) {
465 +
    size_t src = encodeRelation(extend.getFirstRelation());
466 +
    size_t target = encodeRelation(extend.getSecondRelation());
467 +
    return mk<Extend>(I_Extend, &extend, src, target);
468 +
}
469 +
470 +
NodePtr NodeGenerator::visitSwap(const ram::Swap& swap) {
471 +
    size_t src = encodeRelation(swap.getFirstRelation());
472 +
    size_t target = encodeRelation(swap.getSecondRelation());
473 +
    return mk<Swap>(I_Swap, &swap, src, target);
474 +
}
475 +
476 +
NodePtr NodeGenerator::visitUndefValue(const ram::UndefValue&) {
477 +
    return nullptr;
478 +
}
479 +
480 +
NodePtr NodeGenerator::visitNode(const ram::Node& node) {
481 +
    fatal("unsupported node type: %s", typeid(node).name());
482 +
}
483 +
484 +
void NodeGenerator::newQueryBlock() {
485 +
    viewTable.clear();
486 +
    viewId = 0;
487 +
}
488 +
489 +
size_t NodeGenerator::getNewRelId() {
490 +
    return relId++;
491 +
}
492 +
493 +
size_t NodeGenerator::getNextViewId() {
494 +
    return viewId++;
495 +
}
496 +
497 +
template <class RamNode>
498 +
size_t NodeGenerator::encodeIndexPos(RamNode& node) {
499 +
    const std::string& name = node.getRelation();
500 +
    auto& orderSet = engine.isa->getIndexes(name);
501 +
    ram::analysis::SearchSignature signature = engine.isa->getSearchSignature(&node);
502 +
    // A zero signature is equivalent as a full order signature.
503 +
    if (signature.empty()) {
504 +
        signature = ram::analysis::SearchSignature::getFullSearchSignature(signature.arity());
505 +
    }
506 +
    auto i = orderSet.getLexOrderNum(signature);
507 +
    indexTable[&node] = i;
508 +
    return i;
509 +
};
510 +
511 +
size_t NodeGenerator::encodeView(const ram::Node* node) {
512 +
    auto pos = viewTable.find(node);
513 +
    if (pos != viewTable.end()) {
514 +
        return pos->second;
515 +
    }
516 +
    size_t id = getNextViewId();
517 +
    viewTable[node] = id;
518 +
    return id;
519 +
}
520 +
521 +
const ram::Relation& NodeGenerator::lookup(const std::string& relName) {
522 +
    auto it = relationMap.find(relName);
523 +
    assert(it != relationMap.end() && "relation not found");
524 +
    return *it->second;
525 +
}
526 +
527 +
size_t NodeGenerator::getArity(const std::string& relName) {
528 +
    auto rel = lookup(relName);
529 +
    return rel.getArity();
530 +
}
531 +
532 +
size_t NodeGenerator::encodeRelation(const std::string& relName) {
533 +
    auto pos = relTable.find(relName);
534 +
    if (pos != relTable.end()) {
535 +
        return pos->second;
536 +
    }
537 +
    size_t id = getNewRelId();
538 +
    relTable[relName] = id;
539 +
    engine.createRelation(lookup(relName), id);
540 +
    return id;
541 +
}
542 +
543 +
RelationHandle* NodeGenerator::getRelationHandle(const size_t idx) {
544 +
    return engine.relations[idx].get();
545 +
}
546 +
547 +
bool NodeGenerator::requireView(const ram::Node* node) {
548 +
    if (isA<ram::AbstractExistenceCheck>(node)) {
549 +
        return true;
550 +
    } else if (isA<ram::IndexOperation>(node)) {
551 +
        return true;
552 +
    }
553 +
    return false;
554 +
}
555 +
556 +
const std::string& NodeGenerator::getViewRelation(const ram::Node* node) {
557 +
    if (const auto* exist = dynamic_cast<const ram::AbstractExistenceCheck*>(node)) {
558 +
        return exist->getRelation();
559 +
    } else if (const auto* index = dynamic_cast<const ram::IndexOperation*>(node)) {
560 +
        return index->getRelation();
561 +
    }
562 +
563 +
    fatal("The ram::Node does not require a view.");
564 +
}
565 +
566 +
SuperInstruction NodeGenerator::getIndexSuperInstInfo(const ram::IndexOperation& ramIndex) {
567 +
    size_t arity = getArity(ramIndex.getRelation());
568 +
    auto interpreterRel = encodeRelation(ramIndex.getRelation());
569 +
    auto indexId = encodeIndexPos(ramIndex);
570 +
    auto order = (*getRelationHandle(interpreterRel))->getIndexOrder(indexId);
571 +
    SuperInstruction indexOperation(arity);
572 +
    const auto& first = ramIndex.getRangePattern().first;
573 +
    for (size_t i = 0; i < arity; ++i) {
574 +
        // Note: unlike orderingContext::mapOrder, where we try to decode the order,
575 +
        // here we have to encode the order.
576 +
        auto& low = first[order[i]];
577 +
578 +
        // Unbounded
579 +
        if (isUndefValue(low)) {
580 +
            indexOperation.first[i] = MIN_RAM_SIGNED;
581 +
            continue;
582 +
        }
583 +
584 +
        // Constant
585 +
        if (isA<ram::Constant>(low)) {
586 +
            indexOperation.first[i] = dynamic_cast<ram::Constant*>(low)->getConstant();
587 +
            continue;
588 +
        }
589 +
590 +
        // TupleElement
591 +
        if (isA<ram::TupleElement>(low)) {
592 +
            auto lowTuple = dynamic_cast<ram::TupleElement*>(low);
593 +
            size_t tupleId = lowTuple->getTupleId();
594 +
            size_t elementId = lowTuple->getElement();
595 +
            size_t newElementId = orderingContext.mapOrder(tupleId, elementId);
596 +
            indexOperation.tupleFirst.push_back({i, tupleId, newElementId});
597 +
            continue;
598 +
        }
599 +
600 +
        // Generic expression
601 +
        indexOperation.exprFirst.push_back(std::pair<size_t, Own<Node>>(i, visit(low)));
602 +
    }
603 +
    const auto& second = ramIndex.getRangePattern().second;
604 +
    for (size_t i = 0; i < arity; ++i) {
605 +
        auto& hig = second[order[i]];
606 +
607 +
        // Unbounded
608 +
        if (isUndefValue(hig)) {
609 +
            indexOperation.second[i] = MAX_RAM_SIGNED;
610 +
            continue;
611 +
        }
612 +
613 +
        // Constant
614 +
        if (isA<ram::Constant>(hig)) {
615 +
            indexOperation.second[i] = dynamic_cast<ram::Constant*>(hig)->getConstant();
616 +
            continue;
617 +
        }
618 +
619 +
        // TupleElement
620 +
        if (isA<ram::TupleElement>(hig)) {
621 +
            auto highTuple = dynamic_cast<ram::TupleElement*>(hig);
622 +
            size_t tupleId = highTuple->getTupleId();
623 +
            size_t elementId = highTuple->getElement();
624 +
            size_t newElementId = orderingContext.mapOrder(tupleId, elementId);
625 +
            indexOperation.tupleSecond.push_back({i, tupleId, newElementId});
626 +
            continue;
627 +
        }
628 +
629 +
        // Generic expression
630 +
        indexOperation.exprSecond.push_back(std::pair<size_t, Own<Node>>(i, visit(hig)));
631 +
    }
632 +
    return indexOperation;
633 +
}
634 +
635 +
SuperInstruction NodeGenerator::getExistenceSuperInstInfo(const ram::AbstractExistenceCheck& abstractExist) {
636 +
    auto interpreterRel = encodeRelation(abstractExist.getRelation());
637 +
    size_t indexId = 0;
638 +
    if (isA<ram::ExistenceCheck>(&abstractExist)) {
639 +
        indexId = encodeIndexPos(*dynamic_cast<const ram::ExistenceCheck*>(&abstractExist));
640 +
    } else if (isA<ram::ProvenanceExistenceCheck>(&abstractExist)) {
641 +
        indexId = encodeIndexPos(*dynamic_cast<const ram::ProvenanceExistenceCheck*>(&abstractExist));
642 +
    } else {
643 +
        fatal("Unrecognized ram::AbstractExistenceCheck.");
644 +
    }
645 +
    auto order = (*getRelationHandle(interpreterRel))->getIndexOrder(indexId);
646 +
    size_t arity = getArity(abstractExist.getRelation());
647 +
    SuperInstruction superOp(arity);
648 +
    const auto& children = abstractExist.getValues();
649 +
    for (size_t i = 0; i < arity; ++i) {
650 +
        auto& child = children[order[i]];
651 +
652 +
        // Unbounded
653 +
        if (isUndefValue(child)) {
654 +
            superOp.first[i] = MIN_RAM_SIGNED;
655 +
            superOp.second[i] = MAX_RAM_SIGNED;
656 +
            continue;
657 +
        }
658 +
659 +
        // Constant
660 +
        if (isA<ram::Constant>(child)) {
661 +
            superOp.first[i] = dynamic_cast<ram::Constant*>(child)->getConstant();
662 +
            superOp.second[i] = superOp.first[i];
663 +
            continue;
664 +
        }
665 +
666 +
        // TupleElement
667 +
        if (isA<ram::TupleElement>(child)) {
668 +
            auto tuple = dynamic_cast<ram::TupleElement*>(child);
669 +
            size_t tupleId = tuple->getTupleId();
670 +
            size_t elementId = tuple->getElement();
671 +
            size_t newElementId = orderingContext.mapOrder(tupleId, elementId);
672 +
            superOp.tupleFirst.push_back({i, tupleId, newElementId});
673 +
            continue;
674 +
        }
675 +
676 +
        // Generic expression
677 +
        superOp.exprFirst.push_back(std::pair<size_t, Own<Node>>(i, visit(child)));
678 +
    }
679 +
    return superOp;
680 +
}
681 +
682 +
SuperInstruction NodeGenerator::getProjectSuperInstInfo(const ram::Project& exist) {
683 +
    size_t arity = getArity(exist.getRelation());
684 +
    SuperInstruction superOp(arity);
685 +
    const auto& children = exist.getValues();
686 +
    for (size_t i = 0; i < arity; ++i) {
687 +
        auto& child = children[i];
688 +
        // Constant
689 +
        if (isA<ram::Constant>(child)) {
690 +
            superOp.first[i] = dynamic_cast<ram::Constant*>(child)->getConstant();
691 +
            continue;
692 +
        }
693 +
694 +
        // TupleElement
695 +
        if (isA<ram::TupleElement>(child)) {
696 +
            auto tuple = dynamic_cast<ram::TupleElement*>(child);
697 +
            size_t tupleId = tuple->getTupleId();
698 +
            size_t elementId = tuple->getElement();
699 +
            size_t newElementId = orderingContext.mapOrder(tupleId, elementId);
700 +
            superOp.tupleFirst.push_back({i, tupleId, newElementId});
701 +
            continue;
702 +
        }
703 +
704 +
        // Generic expression
705 +
        superOp.exprFirst.push_back(std::pair<size_t, Own<Node>>(i, visit(child)));
706 +
    }
707 +
    return superOp;
708 +
}
709 +
710 +
// -- Definition of OrderingContext --
711 +
712 +
NodeGenerator::OrderingContext::OrderingContext(NodeGenerator& generator) : generator(generator) {}
713 +
714 +
void NodeGenerator::OrderingContext::addNewTuple(size_t tupleId, size_t arity) {
715 +
    std::vector<uint32_t> order;
716 +
    for (size_t i = 0; i < arity; ++i) {
717 +
        order.push_back((uint32_t)i);
718 +
    }
719 +
    insertOrder(tupleId, std::move(order));
720 +
}
721 +
722 +
template <class RamNode>
723 +
void NodeGenerator::OrderingContext::addTupleWithDefaultOrder(size_t tupleId, const RamNode& node) {
724 +
    auto interpreterRel = generator.encodeRelation(node.getRelation());
725 +
    insertOrder(tupleId, (*generator.getRelationHandle(interpreterRel))->getIndexOrder(0));
726 +
}
727 +
728 +
template <class RamNode>
729 +
void NodeGenerator::OrderingContext::addTupleWithIndexOrder(size_t tupleId, const RamNode& node) {
730 +
    auto interpreterRel = generator.encodeRelation(node.getRelation());
731 +
    auto indexId = generator.encodeIndexPos(node);
732 +
    auto order = (*generator.getRelationHandle(interpreterRel))->getIndexOrder(indexId);
733 +
    insertOrder(tupleId, order);
734 +
}
735 +
736 +
size_t NodeGenerator::OrderingContext::mapOrder(size_t tupleId, size_t elementId) const {
737 +
    return tupleOrders[tupleId][elementId];
738 +
}
739 +
740 +
void NodeGenerator::OrderingContext::insertOrder(size_t tupleId, const Order& order) {
741 +
    if (tupleId >= tupleOrders.size()) {
742 +
        tupleOrders.resize(tupleId + 1);
743 +
    }
744 +
745 +
    std::vector<uint32_t> decodeOrder(order.size());
746 +
    for (size_t i = 0; i < order.size(); ++i) {
747 +
        decodeOrder[order[i]] = i;
748 +
    }
749 +
750 +
    tupleOrders[tupleId] = std::move(decodeOrder);
751 +
}
752 +
};  // namespace souffle::interpreter
Files Coverage
src 81.92%
Project Totals (391 files) 81.92%
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading