e2nIEE / pandapipes
Showing 25 of 60 files from the diff.
Other files ignored by Codecov
CHANGELOG.rst has changed.
.travis.yml was deleted.

@@ -15,8 +15,8 @@
Loading
15 15
16 16
def valve_patches(coords, size, **kwargs):
17 17
    polys, lines = list(), list()
18 -
    facecolor = kwargs.pop('patch_facecolor')
19 -
    colors = get_color_list(facecolor, len(coords))
18 +
    edgecolor = kwargs.pop('patch_edgecolor')
19 +
    colors = get_color_list(edgecolor, len(coords))
20 20
    lw = kwargs.get("linewidths", 2.)
21 21
    filled = kwargs.pop("filled", np.full(len(coords), 0, dtype=np.bool))
22 22
    filled = get_filled_list(filled, len(coords))

@@ -4,9 +4,12 @@
Loading
4 4
5 5
import copy
6 6
7 +
import numpy as np
7 8
import pandas as pd
8 9
from numpy import dtype
9 10
from pandapipes import __version__
11 +
from pandapipes.component_models import Junction, Pipe, ExtGrid
12 +
from pandapipes.component_models.auxiliaries.component_toolbox import add_new_component
10 13
from pandapower.auxiliary import ADict
11 14
12 15
try:
@@ -58,24 +61,27 @@
Loading
58 61
        return r
59 62
60 63
61 -
def get_default_pandapipes_structure():
62 -
    """
63 -
64 -
    :return:
65 -
    :rtype:
66 -
    """
67 -
    default_pandapipes_structure = {
68 -
        # structure data
69 -
        # f8, u4 etc. are probably referencing numba or numpy data types
64 +
def get_basic_net_entries():
65 +
    return {
70 66
        "fluid": None,
71 67
        "converged": False,
72 68
        "name": "",
73 69
        "version": __version__,
74 -
        "controller": [('object', dtype(object)),
70 +
        "component_list": []}
71 +
72 +
73 +
def get_basic_components():
74 +
    return Junction, Pipe, ExtGrid
75 +
76 +
77 +
def add_default_components(net, overwrite=False):
78 +
    for comp in get_basic_components():
79 +
        add_new_component(net, comp, overwrite)
80 +
    if "controller" not in net or overwrite:
81 +
        ctrl_dtypes = [('object', dtype(object)),
75 82
                       ('in_service', "bool"),
76 83
                       ('order', "float64"),
77 84
                       ('level', dtype(object)),
78 85
                       ('initial_run', "bool"),
79 -
                       ("recycle", "bool")],
80 -
        "component_list": []}
81 -
    return default_pandapipes_structure
86 +
                       ("recycle", "bool")]
87 +
        net['controller'] = pd.DataFrame(np.zeros(0, dtype=ctrl_dtypes), index=pd.Int64Index([]))

@@ -3,11 +3,12 @@
Loading
3 3
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
4 4
5 5
import numpy as np
6 +
import pandas as pd
6 7
from pandapower.plotting.collections import _create_node_collection, \
7 8
    _create_node_element_collection, _create_line2d_collection, _create_complex_branch_collection, \
8 9
    add_cmap_to_collection, coords_from_node_geodata
9 10
from pandapower.plotting.patch_makers import load_patches, ext_grid_patches
10 -
from pandapipes.plotting.patch_makers import valve_patches, source_patches, heat_exchanger_patches, \
11 +
from pandapipes.plotting.patch_makers import valve_patches, source_patches, heat_exchanger_patches,\
11 12
    pump_patches
12 13
from pandapower.plotting.plotting_toolbox import get_index_array
13 14
@@ -34,7 +35,8 @@
Loading
34 35
    :param size: Patch size
35 36
    :type size: int, default 5
36 37
    :param patch_type: Patch type, can be \n
37 -
        - "circle" or "ellipse" for an ellipse (cirlces are just ellipses with the same width + height)
38 +
        - "circle" or "ellipse" for an ellipse (cirlces are just ellipses with the same width +\
39 +
            height)
38 40
        - "rect" or "rectangle" for a rectangle
39 41
        - "poly<n>" for a polygon with n edges
40 42
    :type patch_type: str, default "circle"
@@ -100,7 +102,7 @@
Loading
100 102
    :type pipes: list, default None
101 103
    :param pipe_geodata: Coordinates to use for plotting. If None, net["pipe_geodata"] is used.
102 104
    :type pipe_geodata: pandas.DataFrame, default None
103 -
    :param junction_geodata: Coordinates to use for plotting in case of use_junction_geodata = True.\
105 +
    :param junction_geodata: Coordinates to use for plotting in case of use_junction_geodata=True.\
104 106
        If None, net["junction_geodata"] is used.
105 107
    :type junction_geodata: pandas.DataFrame, default None
106 108
    :param use_junction_geodata: Defines whether junction or pipe geodata are used.
@@ -158,13 +160,15 @@
Loading
158 160
    if cmap is not None:
159 161
        if z is None:
160 162
            z = net.res_pipe.v_mean_m_per_s.loc[pipes_with_geo]
163 +
        elif isinstance(z, pd.Series):
164 +
            z = z.loc[pipes_with_geo]
161 165
        add_cmap_to_collection(lc, cmap, norm, z, cbar_title, clim=clim)
162 166
163 167
    return lc
164 168
165 169
166 170
def create_sink_collection(net, sinks=None, size=1., infofunc=None, picker=False,
167 -
                           orientation=(np.pi*5/6), **kwargs):
171 +
                           orientation=(np.pi*5/6), cmap=None, norm=None, z=None, **kwargs):
168 172
    """
169 173
    Creates a matplotlib patch collection of pandapipes sinks.
170 174
@@ -182,6 +186,13 @@
Loading
182 186
    :param orientation: Orientation of sink collection. pi is directed downwards, increasing values\
183 187
        lead to clockwise direction changes.
184 188
    :type orientation: float, default np.pi*(5/6)
189 +
    :param cmap: colormap for the sink colors
190 +
    :type cmap: matplotlib norm object, default None
191 +
    :param norm: matplotlib norm object to normalize the values of z
192 +
    :type norm: matplotlib norm object, default None
193 +
    :param z: Array of sink result magnitudes for colormap. Used in case of given cmap. If None,\
194 +
        net.res_sink.mdot_kg_per_s is used.
195 +
    :type z: array, default None
185 196
    :param kwargs: Keyword arguments are passed to the patch function
186 197
    :return: sink_pc - patch collection, sink_lc - line collection
187 198
    """
@@ -191,14 +202,27 @@
Loading
191 202
    infos = [infofunc(i) for i in range(len(sinks))] if infofunc is not None else []
192 203
    node_coords = net.junction_geodata.loc[
193 204
        net.sink.loc[sinks, "junction"].values, ['x', 'y']].values
205 +
206 +
    colors = kwargs.pop("color", "k")
207 +
    linewidths = kwargs.pop("linewidths", 2.)
208 +
    linewidths = kwargs.pop("linewidth", linewidths)
209 +
    linewidths = kwargs.pop("lw", linewidths)
210 +
    if cmap is not None:
211 +
        if z is None:
212 +
            z = net.res_sink.mdot_kg_per_s
213 +
        colors = [cmap(norm(z.at[idx])) for idx in sinks]
214 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
215 +
    line_color = kwargs.pop("line_color", colors)
216 +
194 217
    sink_pc, sink_lc = _create_node_element_collection(
195 218
        node_coords, load_patches, size=size, infos=infos, orientation=orientation,
196 -
        picker=picker, **kwargs)
219 +
        picker=picker, patch_edgecolor=patch_edgecolor, line_color=line_color,
220 +
        linewidths=linewidths, **kwargs)
197 221
    return sink_pc, sink_lc
198 222
199 223
200 224
def create_source_collection(net, sources=None, size=1., infofunc=None, picker=False,
201 -
                             orientation=(np.pi*7/6), **kwargs):
225 +
                             orientation=(np.pi*7/6), cmap=None, norm=None, z=None, **kwargs):
202 226
    """
203 227
    Creates a matplotlib patch collection of pandapipes sources.
204 228
@@ -216,6 +240,13 @@
Loading
216 240
    :param orientation: Orientation of source collection. pi is directed downwards, increasing\
217 241
        values lead to clockwise direction changes.
218 242
    :type orientation: float, default np.pi*(7/6)
243 +
    :param cmap: colormap for the source colors
244 +
    :type cmap: matplotlib norm object, default None
245 +
    :param norm: matplotlib norm object to normalize the values of z
246 +
    :type norm: matplotlib norm object, default None
247 +
    :param z: Array of source result magnitudes for colormap. Used in case of given cmap. If None,\
248 +
        net.res_source.mdot_kg_per_s is used.
249 +
    :type z: array, default None
219 250
    :param kwargs: Keyword arguments are passed to the patch function
220 251
    :return: source_pc - patch collection, source_lc - line collection
221 252
    """
@@ -225,9 +256,22 @@
Loading
225 256
    infos = [infofunc(i) for i in range(len(sources))] if infofunc is not None else []
226 257
    node_coords = net.junction_geodata.loc[net.source.loc[sources, "junction"].values,
227 258
                                           ["x", "y"]].values
259 +
260 +
    colors = kwargs.pop("color", "k")
261 +
    linewidths = kwargs.pop("linewidths", 2.)
262 +
    linewidths = kwargs.pop("linewidth", linewidths)
263 +
    linewidths = kwargs.pop("lw", linewidths)
264 +
    if cmap is not None:
265 +
        if z is None:
266 +
            z = net.res_source.mdot_kg_per_s
267 +
        colors = [cmap(norm(z.at[idx])) for idx in sources]
268 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
269 +
    line_color = kwargs.pop("line_color", colors)
270 +
228 271
    source_pc, source_lc = _create_node_element_collection(
229 272
        node_coords, source_patches, size=size, infos=infos, orientation=orientation,
230 -
        picker=picker, repeat_infos=(1, 3), **kwargs)
273 +
        picker=picker, patch_edgecolor=patch_edgecolor, line_color=line_color,
274 +
        linewidths=linewidths, repeat_infos=(1, 3), **kwargs)
231 275
    return source_pc, source_lc
232 276
233 277
@@ -266,14 +310,22 @@
Loading
266 310
            raise ValueError("Length mismatch between chosen ext_grids and ext_grid_junctions.")
267 311
    infos = [infofunc(ext_grid_idx) for ext_grid_idx in ext_grids] if infofunc is not None else []
268 312
313 +
    colors = kwargs.pop("color", "k")
314 +
    linewidths = kwargs.pop("linewidths", 2.)
315 +
    linewidths = kwargs.pop("linewidth", linewidths)
316 +
    linewidths = kwargs.pop("lw", linewidths)
317 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
318 +
    line_color = kwargs.pop("line_color", colors)
319 +
269 320
    node_coords = net.junction_geodata.loc[ext_grid_junctions, ["x", "y"]].values
270 321
    ext_grid_pc, ext_grid_lc = _create_node_element_collection(
271 322
        node_coords, ext_grid_patches, size=size, infos=infos, orientation=orientation,
272 -
        picker=picker, hatch="XXX", **kwargs)
323 +
        picker=picker, hatch="XXX", patch_edgecolor=patch_edgecolor, line_color=line_color,
324 +
        linewidths=linewidths, **kwargs)
273 325
    return ext_grid_pc, ext_grid_lc
274 326
275 327
276 -
def create_heat_exchanger_collection(net, heat_ex=None, size=5., junction_geodata=None, color='k',
328 +
def create_heat_exchanger_collection(net, heat_ex=None, size=5., junction_geodata=None,
277 329
                                     infofunc=None, picker=False, **kwargs):
278 330
    """
279 331
    Creates a matplotlib patch collection of pandapipes junction-junction heat_exchangers.
@@ -311,24 +363,25 @@
Loading
311 363
    if len(hex_with_geo) == 0:
312 364
        return None
313 365
366 +
    colors = kwargs.pop("color", "k")
314 367
    linewidths = kwargs.pop("linewidths", 2.)
315 368
    linewidths = kwargs.pop("linewidth", linewidths)
316 369
    linewidths = kwargs.pop("lw", linewidths)
370 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
371 +
    line_color = kwargs.pop("line_color", colors)
317 372
318 373
    infos = list(np.repeat([infofunc(i) for i in range(len(hex_with_geo))], 2)) \
319 374
        if infofunc is not None else []
320 375
321 -
    lc, pc = _create_complex_branch_collection(coords, heat_exchanger_patches, size, infos,
322 -
                                               picker=picker, linewidths=linewidths,
323 -
                                               patch_facecolor=color, line_color=color,
324 -
                                               **kwargs)
376 +
    lc, pc = _create_complex_branch_collection(
377 +
        coords, heat_exchanger_patches, size, infos, picker=picker, linewidths=linewidths,
378 +
        patch_edgecolor=patch_edgecolor, line_color=line_color, **kwargs)
325 379
326 380
    return lc, pc
327 381
328 382
329 -
def create_valve_collection(net, valves=None, size=5., junction_geodata=None, color='k',
330 -
                            infofunc=None, picker=False, fill_closed=True,
331 -
                            respect_valves=False, **kwargs):
383 +
def create_valve_collection(net, valves=None, size=5., junction_geodata=None, infofunc=None,
384 +
                            picker=False, fill_closed=True, respect_valves=False, **kwargs):
332 385
    """
333 386
    Creates a matplotlib patch collection of pandapipes junction-junction valves. Valves are
334 387
    plotted in the center between two junctions with a "helper" line (dashed and thin) being drawn
@@ -343,8 +396,6 @@
Loading
343 396
    :type size: float, default 5.
344 397
    :param junction_geodata: Coordinates to use for plotting. If None, net["junction_geodata"] is used.
345 398
    :type junction_geodata: pandas.DataFrame, default None
346 -
    :param colors: Color or list of colors for every valve
347 -
    :type colors: iterable, float, default None
348 399
    :param infofunc: infofunction for the patch element
349 400
    :type infofunc: function, default None
350 401
    :param picker: Picker argument passed to the patch collection
@@ -369,25 +420,27 @@
Loading
369 420
    if len(valves_with_geo) == 0:
370 421
        return None
371 422
423 +
    colors = kwargs.pop("color", "k")
372 424
    linewidths = kwargs.pop("linewidths", 2.)
373 425
    linewidths = kwargs.pop("linewidth", linewidths)
374 426
    linewidths = kwargs.pop("lw", linewidths)
427 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
428 +
    line_color = kwargs.pop("line_color", colors)
375 429
376 430
    infos = list(np.repeat([infofunc(i) for i in range(len(valves_with_geo))], 2)) \
377 431
        if infofunc is not None else []
378 432
    filled = valve_table["opened"].values
379 433
    if fill_closed:
380 434
        filled = ~filled
381 -
    lc, pc = _create_complex_branch_collection(coords, valve_patches, size, infos,
382 -
                                               picker=picker, linewidths=linewidths, filled=filled,
383 -
                                               patch_facecolor=color, line_color=color,
384 -
                                               **kwargs)
435 +
    lc, pc = _create_complex_branch_collection(
436 +
        coords, valve_patches, size, infos, picker=picker, linewidths=linewidths, filled=filled,
437 +
        patch_edgecolor=patch_edgecolor, line_color=line_color, **kwargs)
385 438
386 439
    return lc, pc
387 440
388 441
389 442
def create_pump_collection(net, pumps=None, table_name='pump', size=5., junction_geodata=None,
390 -
                           color='k', infofunc=None, picker=False, **kwargs):
443 +
                           infofunc=None, picker=False, **kwargs):
391 444
    """
392 445
    Creates a matplotlib patch collection of pandapipes junction-junction valves. Valves are
393 446
    plotted in the center between two junctions with a "helper" line (dashed and thin) being drawn
@@ -395,22 +448,19 @@
Loading
395 448
396 449
    :param net: The pandapipes network
397 450
    :type net: pandapipesNet
398 -
    :param valves: The valves for which the collections are created. If None, all valves which have\
451 +
    :param pumps: The pumps for which the collections are created. If None, all pumps which have\
399 452
        entries in the respective junction geodata will be plotted.
400 -
    :type valves: list, default None
453 +
    :type pumps: list, default None
454 +
    :param table_name: Name of the pump table from which to get the data.
455 +
    :type table_name: str, default 'pump'
401 456
    :param size: Patch size
402 457
    :type size: float, default 5.
403 458
    :param junction_geodata: Coordinates to use for plotting. If None, net["junction_geodata"] is used.
404 459
    :type junction_geodata: pandas.DataFrame, default None
405 -
    :param colors: Color or list of colors for every valve
406 -
    :type colors: iterable, float, default None
407 460
    :param infofunc: infofunction for the patch element
408 461
    :type infofunc: function, default None
409 462
    :param picker: Picker argument passed to the patch collection
410 463
    :type picker: bool, default False
411 -
    :param fill_closed: If True, valves with parameter opened == False will be filled and those\
412 -
        with opened == True will have a white facecolor. Vice versa if False.
413 -
    :type fill_closed: bool, default True
414 464
    :param kwargs: Keyword arguments are passed to the patch function
415 465
    :return: lc - line collection, pc - patch collection
416 466
@@ -426,15 +476,17 @@
Loading
426 476
    if len(pumps_with_geo) == 0:
427 477
        return None
428 478
479 +
    colors = kwargs.pop("color", "k")
429 480
    linewidths = kwargs.pop("linewidths", 2.)
430 481
    linewidths = kwargs.pop("linewidth", linewidths)
431 482
    linewidths = kwargs.pop("lw", linewidths)
483 +
    patch_edgecolor = kwargs.pop("patch_edgecolor", colors)
484 +
    line_color = kwargs.pop("line_color", colors)
432 485
433 486
    infos = list(np.repeat([infofunc(i) for i in range(len(pumps_with_geo))], 2)) \
434 487
        if infofunc is not None else []
435 -
    lc, pc = _create_complex_branch_collection(coords, pump_patches, size, infos,
436 -
                                               picker=picker, linewidths=linewidths,
437 -
                                               patch_edgecolor=color, line_color=color,
438 -
                                               **kwargs)
488 +
    lc, pc = _create_complex_branch_collection(
489 +
        coords, pump_patches, size, infos, picker=picker, linewidths=linewidths,
490 +
        patch_edgecolor=patch_edgecolor, line_color=line_color, **kwargs)
439 491
440 492
    return lc, pc

@@ -343,9 +343,8 @@
Loading
343 343
        node_pit = net["_pit"]["node"]
344 344
345 345
        junction_idx_lookup = get_lookup(net, "node", "index")[Junction.table_name()]
346 -
        from_junction_nodes = junction_idx_lookup[net[cls.table_name]["from_junction"].values]
347 -
        to_junction_nodes = junction_idx_lookup[net[cls.table_name]["to_junction"].values]
348 -
346 +
        from_junction_nodes = junction_idx_lookup[net[cls.table_name()]["from_junction"].values]
347 +
        to_junction_nodes = junction_idx_lookup[net[cls.table_name()]["to_junction"].values]
349 348
        p_values = np.zeros(len(pipe_p_data[0]) + 2)
350 349
        p_values[0] = node_pit[from_junction_nodes[pipe], PINIT]
351 350
        p_values[1:-1] = pipe_p_data[:]

@@ -0,0 +1,81 @@
Loading
1 +
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
2 +
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
3 +
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
4 +
5 +
import networkx as nx
6 +
import pandas as pd
7 +
from pandapipes.topology.create_graph import create_nxgraph
8 +
9 +
10 +
def calc_distance_to_junction(net, junction, notravjunctions=None,
11 +
                              nogojunctions= None, weight="weight"):
12 +
    """
13 +
    Calculates the shortest distance between a source junction and all junctions connected to it.
14 +
15 +
     INPUT:
16 +
        **net** (pandapipesNet) - Variable that contains a pandapipes network.
17 +
18 +
        **junction** (integer) - Index of the source junction.
19 +
20 +
21 +
     OPTIONAL:
22 +
        **nogojunctions** (integer/list, None) - nogojunctions are not being considered
23 +
24 +
        **notravjunctions** (integer/list, None) - lines connected to these junctions are not being
25 +
                                              considered
26 +
        **weight** (string, None) – Edge data key corresponding to the edge weight
27 +
28 +
     OUTPUT:
29 +
        **dist** - Returns a pandas series with containing all distances to the source junction
30 +
                   in km. If weight=None dist is the topological distance (int).
31 +
32 +
     EXAMPLE:
33 +
         import pandapipes.topology as top
34 +
35 +
         dist = top.calc_distance_to_junction(net, 5)
36 +
37 +
    """
38 +
    g = create_nxgraph(net, nogojunctions=nogojunctions,
39 +
                       notravjunctions=notravjunctions)
40 +
    dist = nx.single_source_dijkstra_path_length(g, junction, weight=weight)
41 +
    return pd.Series(dist)
42 +
43 +
def calc_minimum_distance_to_junctions(net, junctions, notravjunctions=None,
44 +
                              nogojunctions=None, weight="weight"):
45 +
    """
46 +
    Calculates the shortest distance between multiple source junctions and all junctions connected to it.
47 +
48 +
     INPUT:
49 +
        **net** (pandapipesNet) - Variable that contains a pandapipes network.
50 +
51 +
        **junction** (integer) - Index of the source junction.
52 +
53 +
54 +
     OPTIONAL:
55 +
        **nogojunctions** (integer/list, None) - nogojunctions are not being considered
56 +
57 +
        **notravjunctions** (integer/list, None) - lines connected to these junctions are not being
58 +
                                              considered
59 +
        **weight** (string, None) – Edge data key corresponding to the edge weight
60 +
61 +
     OUTPUT:
62 +
        **dist** - Returns a pandas series with containing all distances to the source junction
63 +
                   in km. If weight=None dist is the topological distance (int).
64 +
65 +
     EXAMPLE:
66 +
         import pandapipes.topology as top
67 +
68 +
         dist = top.calc_distance_to_junction(net, 5)
69 +
70 +
    """
71 +
    mg = create_nxgraph(net, notravjunctions=notravjunctions,
72 +
                        nogojunctions=nogojunctions, weight=weight)
73 +
    junctions = set(junctions)
74 +
    junction = junctions.pop()
75 +
    mg.add_edges_from([(junction, y, {"weight": 0}) for y in junctions])
76 +
    return pd.Series(nx.single_source_dijkstra_path_length(mg, junction))
77 +
78 +
if __name__ == '__main__':
79 +
    import pandapipes.networks as nw
80 +
    net = nw.gas_meshed_delta()
81 +
    dist = calc_minimum_distance_to_junctions(net, net.ext_grid.junction.values)

@@ -4,6 +4,7 @@
Loading
4 4
5 5
import numpy as np
6 6
7 +
7 8
def _sum_by_group_sorted(indices, *values):
8 9
    """Auxiliary function to sum up values by some given indices (both as numpy arrays). Expects the
9 10
    indices and values to already be sorted.
@@ -56,24 +57,27 @@
Loading
56 57
57 58
def select_from_pit(table_index_array, input_array, data):
58 59
    """
59 -
        Auxiliary function to retrieve values from a table like a pit. Each data entry corresponds to a
60 -
        table_index_array entry. Example: velocities are indexed by the corresponding from_nodes stored in the
61 -
        pipe pit.
62 -
63 -
        The function inputs another array which consists of some table_index_array entries the user wants to retrieve.
64 -
        The function is used in pandapipes results evaluation. The input array is the list of from_junction entries,
65 -
        corresponding only to the junction elements, not containing additional pipe nodes. The table_index_array
66 -
        is the complete list of from_nodes consisting of junction element entries and additional pipe section nodes.
67 -
        Data corresponds to the gas velocities.
68 -
69 -
        :param indices:
70 -
        :type indices:
71 -
        :param values:
72 -
        :type values:
60 +
        Auxiliary function to retrieve values from a table like a pit. Each data entry corresponds
61 +
        to a table_index_array entry. Example: velocities are indexed by the corresponding
62 +
        from_nodes stored in the pipe pit.
63 +
64 +
        The function inputs another array which consists of some table_index_array entries the user
65 +
        wants to retrieve. The function is used in pandapipes results evaluation. The input array is
66 +
        the list of from_junction entries, corresponding only to the junction elements, not
67 +
        containing additional pipe nodes. The table_index_array is the complete list of from_nodes
68 +
        consisting of junction element entries and additional pipe section nodes. Data corresponds
69 +
        to the gas velocities.
70 +
71 +
        :param table_index_array:
72 +
        :type table_index_array:
73 +
        :param input_array:
74 +
        :type input_array:
75 +
        :param data:
76 +
        :type data:
73 77
        :return:
74 78
        :rtype:
75 79
        """
76 80
    sorter = np.argsort(table_index_array)
77 81
    indices = sorter[np.searchsorted(table_index_array, input_array, sorter=sorter)]
78 82
79 -
    return data[indices]

@@ -7,7 +7,7 @@
Loading
7 7
from pandapipes.component_models import Junction, Sink, Source, Pump, Pipe, ExtGrid, \
8 8
    HeatExchanger, Valve, CirculationPumpPressure, CirculationPumpMass
9 9
from pandapipes.component_models.auxiliaries.component_toolbox import add_new_component
10 -
from pandapipes.pandapipes_net import pandapipesNet, get_default_pandapipes_structure
10 +
from pandapipes.pandapipes_net import pandapipesNet, get_basic_net_entries, add_default_components
11 11
from pandapipes.properties import call_lib
12 12
from pandapipes.properties.fluids import Fluid
13 13
from pandapipes.properties.fluids import _add_fluid_to_net
@@ -47,11 +47,8 @@
Loading
47 47
        >>> net2 = create_empty_network()
48 48
49 49
    """
50 -
    net = pandapipesNet(get_default_pandapipes_structure())
51 -
    add_new_component(net, Junction, True)
52 -
    add_new_component(net, Pipe, True)
53 -
    add_new_component(net, ExtGrid, True)
54 -
    net['controller'] = pd.DataFrame(np.zeros(0, dtype=net['controller']), index=[])
50 +
    net = pandapipesNet(get_basic_net_entries())
51 +
    add_default_components(net, True)
55 52
    net['name'] = name
56 53
    if add_stdtypes:
57 54
        add_basic_std_types(net)
@@ -272,7 +269,8 @@
Loading
272 269
    :type to_junction: int
273 270
    :param diameter_m: The heat exchanger inner diameter in [m]
274 271
    :type diameter_m: float
275 -
    :param qext_w: External heat feed-in through the heat exchanger in [W]
272 +
    :param qext_w: External heat flux in [W]. If positive, heat is derived from the network. If
273 +
            negative, heat is being fed into the network from a heat source.
276 274
    :type qext_w: float, default 0.0
277 275
    :param loss_coefficient: An additional pressure loss coefficient, introduced by e.g. bends
278 276
    :type loss_coefficient: float

@@ -22,22 +22,23 @@
Loading
22 22
23 23
def nets_equal(net1, net2, check_only_results=False, exclude_elms=None, **kwargs):
24 24
    """
25 -
    Compares the DataFrames of two networks. The networks are considered equal if they share the
26 -
    same keys and values, except of the 'et' (elapsed time) entry which differs depending on
27 -
    runtime conditions and entries stating with '_'.
25 +
    Compares the DataFrames of two networks.
28 26
29 -
    :param net1:
27 +
    The networks are considered equal if they share the same keys and values, except of the 'et'
28 +
    (elapsed time) entry which differs depending on runtime conditions and entries stating with '_'.
29 +
30 +
    :param net1: first net for comparison
30 31
    :type net1: pandapipesNet
31 -
    :param net2:
32 -
    :type net2:pandapipesNet
32 +
    :param net2: second net for comparison
33 +
    :type net2: pandapipesNet
33 34
    :param check_only_results:
34 35
    :type check_only_results: bool, default False
35 -
    :param exclude_elms:
36 -
    :type exclude_elms: ?, default None
37 -
    :param kwargs:
36 +
    :param exclude_elms: element types that should be skipped in the comparison
37 +
    :type exclude_elms: list of strings, default None
38 +
    :param kwargs: key word arguments
38 39
    :type kwargs:
39 -
    :return:
40 -
    :rtype:
40 +
    :return: True, if nets are equal
41 +
    :rtype: Bool
41 42
    """
42 43
43 44
    eq = isinstance(net1, pandapipesNet) and isinstance(net2, pandapipesNet)
@@ -85,7 +86,7 @@
Loading
85 86
    Provides the tuples of elements and corresponding columns for junctions they are connected to
86 87
87 88
    :param include_node_elements: whether tuples for junction elements e.g. sink, source, are \
88 -
            included
89 +
           included
89 90
    :type include_node_elements: bool
90 91
    :param include_branch_elements: whether branch elements e.g. pipe, pumps, ... are included
91 92
    :type include_branch_elements: bool
@@ -96,6 +97,7 @@
Loading
96 97
    :return: set of tuples with element names and column names
97 98
    :rtype: set
98 99
    """
100 +
99 101
    node_elements = []
100 102
    if net is not None and include_node_elements:
101 103
        node_elements = [comp.table_name() for comp in net.component_list
@@ -147,6 +149,7 @@
Loading
147 149
    :return: pp_elms - set of table names for the desired element types
148 150
    :rtype: set
149 151
    """
152 +
150 153
    pp_elms = {"junction"} if junction else set()
151 154
    pp_elms |= set([el[0] for el in element_junction_tuples(
152 155
        include_node_elements, include_branch_elements, include_res_elements, net)])
@@ -166,6 +169,7 @@
Loading
166 169
    :return: junction_lookup - the finally reindexed junction lookup (with corrections if necessary)
167 170
    :rtype: dict
168 171
    """
172 +
169 173
    not_fitting_junction_lookup_keys = set(junction_lookup.keys()) - set(net.junction.index)
170 174
    if len(not_fitting_junction_lookup_keys):
171 175
        logger.error("These junction indices are unknown to net. Thus, they cannot be reindexed: " +
@@ -185,7 +189,7 @@
Loading
185 189
    if hasattr(net, "res_junction"):
186 190
        net.res_junction.index = get_indices(net.res_junction.index, junction_lookup)
187 191
188 -
    for element, value in element_junction_tuples():
192 +
    for element, value in element_junction_tuples(net=net):
189 193
        if element in net.keys():
190 194
            net[element][value] = get_indices(net[element][value], junction_lookup)
191 195
    net["junction_geodata"].set_index(get_indices(net["junction_geodata"].index, junction_lookup),
@@ -208,6 +212,7 @@
Loading
208 212
    :type old_indices: iterable, default None
209 213
    :return: No output.
210 214
    """
215 +
211 216
    old_indices = old_indices if old_indices is not None else net[element].index
212 217
    if not len(new_indices) or not net[element].shape[0]:
213 218
        return
@@ -245,7 +250,7 @@
Loading
245 250
    :type start: int, default 0
246 251
    :param store_old_index: if True, stores the old index in net.junction["old_index"]
247 252
    :type store_old_index: bool, default False
248 -
    :return: bus_lookup - mapping of old to new index
253 +
    :return: junction_lookup - mapping of old to new index
249 254
    :rtype: dict
250 255
    """
251 256
    net.junction.sort_index(inplace=True)
@@ -275,7 +280,7 @@
Loading
275 280
    add_df_to_reindex = set() if add_df_to_reindex is None else set(add_df_to_reindex)
276 281
    elements = pp_elements(include_res_elements=True, net=net)
277 282
278 -
    # create continuous bus index
283 +
    # create continuous junction index
279 284
    create_continuous_junction_index(net, start=start)
280 285
    elements -= {"junction", "junction_geodata", "res_junction"}
281 286
@@ -400,7 +405,7 @@
Loading
400 405
    if "res_pipe" in net.keys():
401 406
        res_pipes = net.res_pipe.index.intersection(pipes)
402 407
        net["res_pipe"].drop(res_pipes, inplace=True)
403 -
    logger.info("dropped %d pipes" % len(pipes))
408 +
    logger.info("dropped %d pipes" % len(list(pipes)))
404 409
405 410
406 411
# TODO: change to pumps??

@@ -0,0 +1,36 @@
Loading
1 +
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
2 +
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
3 +
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
4 +
5 +
6 +
import pandapipes.topology as top
7 +
import pandas as pd
8 +
9 +
def pressure_profile_to_junction_geodata(net):
10 +
    """
11 +
    Calculates pressure profile for a pandapipes network.
12 +
13 +
     INPUT:
14 +
        **net** (pandapipesNet) - Variable that contains a pandapipes network.
15 +
16 +
     OUTPUT:
17 +
        **bgd** - Returns a pandas DataFrame containing distance to the closest ext_grid as x coordinate and pressure level as y coordinate for each junction.
18 +
19 +
     EXAMPLE:
20 +
        import pandapipes.networks as nw
21 +
        import pandapipes.plotting as plotting
22 +
        import pandapipes as pp
23 +
24 +
        net = nw.schutterwald()
25 +
        pp.pipeflow(net)
26 +
        bgd = plotting.pressure_profile_to_junction_geodata(net)
27 +
28 +
    """
29 +
    if not "res_junction" in net:
30 +
        raise ValueError("no results in this pandapipes network")
31 +
32 +
    dist = top.calc_minimum_distance_to_junctions(net, net.ext_grid.junction.values)
33 +
    junctions = net.junction.index.values
34 +
    return pd.DataFrame({"x": dist.loc[junctions].values,
35 +
                        "y": net.res_junction.p_bar.loc[junctions].values},
36 +
                       index=junctions)

@@ -56,7 +56,7 @@
Loading
56 56
57 57
        r = "Fluid %s (%s) with properties:" % (self.name, self.fluid_type)
58 58
        for key in self.all_properties.keys():
59 -
            r += "\n   - %s (%s)" %(key, self.all_properties[key].__class__.__name__[13:])
59 +
            r += "\n   - %s (%s)" % (key, self.all_properties[key].__class__.__name__[13:])
60 60
        return r
61 61
62 62
    def add_property(self, property_name, prop, overwrite=True, warn_on_duplicates=True):
@@ -74,7 +74,8 @@
Loading
74 74
        :type warn_on_duplicates: bool
75 75
76 76
        :Example:
77 -
            >>> fluid.add_property('water_density', pandapipes.FluidPropertyConstant(998.2061), overwrite=True, warn_on_duplicates=False)
77 +
            >>> fluid.add_property('water_density', pandapipes.FluidPropertyConstant(998.2061),
78 +
                                   overwrite=True, warn_on_duplicates=False)
78 79
79 80
        """
80 81
        if property_name in self.all_properties:
@@ -100,7 +101,7 @@
Loading
100 101
        if property_name not in self.all_properties:
101 102
            raise UserWarning("The property %s was not defined for the fluid %s"
102 103
                              % (property_name, self.name))
103 -
        return self.all_properties[property_name].get_property(*at_values)
104 +
        return self.all_properties[property_name].get_at_value(*at_values)
104 105
105 106
    def get_density(self, temperature):
106 107
        """
@@ -182,11 +183,11 @@
Loading
182 183
        """
183 184
        super().__init__()
184 185
185 -
    def get_property(self, *args):
186 +
    def get_at_value(self, *args):
186 187
        """
187 188
188 -
        :param arg:
189 -
        :type arg:
189 +
        :param args:
190 +
        :type args:
190 191
        :return:
191 192
        :rtype:
192 193
        """
@@ -216,10 +217,11 @@
Loading
216 217
        else:
217 218
            self.prop_getter = interp1d(x_values, y_values)
218 219
219 -
    def get_property(self, arg):
220 +
    def get_at_value(self, arg):
220 221
        """
221 222
222 -
        :param arg: Name of the property and one or more values (x-values) for which the y-values of the property are to be displayed
223 +
        :param arg: Name of the property and one or more values (x-values) for which the y-values \
224 +
            of the property are to be displayed
223 225
        :type arg: str, float or array
224 226
        :return: y-value/s
225 227
        :rtype: float, array
@@ -266,7 +268,7 @@
Loading
266 268
    Creates Property with a constant value.
267 269
    """
268 270
269 -
    def __init__(self, value):
271 +
    def __init__(self, value, warn_dependent_variables=False):
270 272
        """
271 273
272 274
        :param value:
@@ -274,27 +276,29 @@
Loading
274 276
        """
275 277
        super(FluidPropertyConstant, self).__init__()
276 278
        self.value = value
279 +
        self.warn_dependent_variables = warn_dependent_variables
277 280
278 -
    def get_property(self, *args):
281 +
    def get_at_value(self, *args):
279 282
        """
280 283
281 -
        :param arg: Name of the property
282 -
        :type arg: str
284 +
        :param args: Name of the property
285 +
        :type args: str
283 286
        :return: Value of the property
284 287
        :rtype: float
285 288
286 289
        :Example:
287 -
            >>> heat_capacity = get_fluid(net).get_property("heat_capacity")
290 +
            >>> heat_capacity = get_fluid(net).all_properties["heat_capacity"].get_at_value(293.15)
288 291
        """
289 292
        if len(args) > 1:
290 -
            raise(UserWarning('Please define either none or an array-like argument'))
293 +
            raise UserWarning('Please define either none or an array-like argument')
291 294
        elif len(args) == 1:
292 -
            logger.warning('One constant property has several input variables even though it is '
293 -
                           'independent of these')
295 +
            if self.warn_dependent_variables:
296 +
                logger.warning('Constant property received several input variables, although it is'
297 +
                               'independent of these')
294 298
            output = np.array([self.value]) * np.ones(len(args[0]))
295 299
        else:
296 300
            output = np.array([self.value])
297 -
        return  output
301 +
        return output
298 302
299 303
    @classmethod
300 304
    def from_path(cls, path):
@@ -312,6 +316,13 @@
Loading
312 316
        value = np.loadtxt(path).item()
313 317
        return cls(value)
314 318
319 +
    @classmethod
320 +
    def from_dict(cls, d):
321 +
        obj = super().from_dict(d)
322 +
        if "warn_dependent_variables" not in obj.__dict__.keys():
323 +
            obj.__dict__["warn_dependent_variables"] = False
324 +
        return obj
325 +
315 326
316 327
class FluidPropertyLinear(FluidProperty):
317 328
    """
@@ -331,16 +342,17 @@
Loading
331 342
        self.slope = slope
332 343
        self.offset = offset
333 344
334 -
    def get_property(self, arg):
345 +
    def get_at_value(self, arg):
335 346
        """
336 347
337 -
        :param arg: Name of the property and one or more values (x-values) for which the function of the property should be calculated
348 +
        :param arg: Name of the property and one or more values (x-values) for which the function \
349 +
            of the property should be calculated
338 350
        :type arg: str, float or array
339 351
        :return: y-value or function values
340 352
        :rtype: float, array
341 353
342 354
        :Example:
343 -
            >>> comp_fact = get_fluid(net).get_property("compressibility", p_bar)
355 +
            >>> comp_fact = get_fluid(net).all_properties["compressibility"].get_at_value(p_bar)
344 356
345 357
        """
346 358
        if isinstance(arg, pd.Series):
@@ -356,8 +368,6 @@
Loading
356 368
357 369
        :param path:
358 370
        :type path:
359 -
        :param method:
360 -
        :type method:
361 371
        :return:
362 372
        :rtype:
363 373
        """
@@ -484,6 +494,7 @@
Loading
484 494
                     heat_capacity=heat_capacity, molar_mass=molar_mass, compressibility=compr,
485 495
                     der_compressibility=der_compr)
486 496
497 +
487 498
def get_fluid(net):
488 499
    """
489 500
    This function shows which fluid is used in the net.
@@ -494,7 +505,7 @@
Loading
494 505
    :rtype: Fluid
495 506
    """
496 507
    if "fluid" not in net or net["fluid"] is None:
497 -
        raise UserWarning("There is no fluid defined for the given net!")
508 +
        raise AttributeError("There is no fluid defined for the given net!")
498 509
    fluid = net["fluid"]
499 510
    if not isinstance(fluid, Fluid):
500 511
        logger.warning("The fluid in this net is not of the pandapipes Fluid type. This could lead"
@@ -526,6 +537,6 @@
Loading
526 537
    if isinstance(fluid, str):
527 538
        logger.warning("Instead of a pandapipes.Fluid, a string ('%s') was passed to the fluid "
528 539
                       "argument. Internally, it will be passed to call_lib(fluid) to get the "
529 -
                       "respective pandapipes.Fluid." %fluid)
540 +
                       "respective pandapipes.Fluid." % fluid)
530 541
        fluid = call_lib(fluid)
531 542
    net["fluid"] = fluid

@@ -66,8 +66,10 @@
Loading
66 66
67 67
def init_time_series(net, time_steps, continue_on_divergence=False, verbose=True, **kwargs):
68 68
    """
69 -
    Initializes the time series calculation. Creates the dict ts_variables, which includes
70 -
    necessary variables for the time series / control function.
69 +
    Initializes the time series calculation.
70 +
71 +
    Creates the dict ts_variables, which includes necessary variables for the time series /
72 +
    control function.
71 73
72 74
    :param net: The pandapipes format network
73 75
    :type net: pandapipesNet

@@ -21,12 +21,12 @@
Loading
21 21
    :return: No output
22 22
    """
23 23
    if ctrl_variables is None:
24 -
        ctrl_variables = prepare_run_ctrl(net, None)
24 +
        ctrl_variables = prepare_run_ctrl(net, None, **kwargs)
25 25
26 26
    run_control_pandapower(net, ctrl_variables=ctrl_variables, max_iter=max_iter, **kwargs)
27 27
28 28
29 -
def prepare_run_ctrl(net, ctrl_variables):
29 +
def prepare_run_ctrl(net, ctrl_variables, **kwargs):
30 30
    """
31 31
    Function that defines default control variables.
32 32
@@ -36,7 +36,7 @@
Loading
36 36
    :rtype: dict
37 37
    """
38 38
    if ctrl_variables is None:
39 -
        ctrl_variables  = prepare_run_control_pandapower(net, None)
39 +
        ctrl_variables  = prepare_run_control_pandapower(net, None, **kwargs)
40 40
        ctrl_variables["run"] = ppipe.pipeflow
41 41
42 42
    ctrl_variables["errors"] = (PipeflowNotConverged,) # has to be a tuple

@@ -81,8 +81,7 @@
Loading
81 81
82 82
def net_initialization_multinet(multinet, ctrl_variables, **kwargs):
83 83
    """
84 -
    This function initilizes each net, i.e. if one controller affecting a net requires an initial_run a loadflow/
85 -
    pipeflow is conducted, otherwise not.
84 +
    If one controller affecting a net requires an initial_run, a loadflow/pipeflow is conducted.
86 85
87 86
    :param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
88 87
    :type multinet: pandapipes.Multinet
@@ -106,7 +105,8 @@
Loading
106 105
107 106
def run_control(multinet, ctrl_variables=None, max_iter=30, **kwargs):
108 107
    """
109 -
    Main function to call a multnet with controllers
108 +
    Main function to call a multnet with controllers.
109 +
110 110
    Function is running control loops for the controllers specified in net.controller
111 111
    Runs controller until each one converged or max_iter is hit.
112 112
@@ -114,21 +114,23 @@
Loading
114 114
    2. Calculate an inital run (if it is enabled, i.e. setting the initial_run veriable to True)
115 115
    3. Repeats the following steps in ascending order of controller_order until total convergence of all
116 116
       controllers for each level:
117 -
        a) Evaluate individual convergence for all controllers in the level
118 -
        b) Call control_step() for all controllers in the level on diverged controllers
119 -
        c) Fire run function (or optionally another function like runopf or whatever you defined)
117 +
       a) Evaluate individual convergence for all controllers in the level
118 +
       b) Call control_step() for all controllers in the level on diverged controllers
119 +
       c) Fire run function (or optionally another function like runopf or whatever you defined)
120 120
    4. Call finalize_control() on each controller
121 121
122 122
    :param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
123 123
    :type multinet: pandapipes.Multinet
124 124
    :param ctrl_variables: contains all relevant information and boundaries required for a successful control run. To \
125 -
        define ctrl_variables yourself, following entries for each net are required:\n
126 -
        - level (list): gives a list of levels to be investigated \n
127 -
        - controller_order (list): nested list of tuples given the correct order of the different controllers \
128 -
        within one level\
129 -
        - run (funct, e.g. pandapower.runpp, pandapipes.pipeflow): function to be used to conduct a loadflow/pipeflow \n
130 -
        - initial_run (boolean): Is a initial_run for a net required or not\n
131 -
        - continue_on_divergence (boolean): What to do if loadflow/pipeflow is not converging, fires control_repair
125 +
           define ctrl_variables yourself, following entries for each net are required:\n
126 +
           - level (list): gives a list of levels to be investigated \n
127 +
           - controller_order (list): nested list of tuples given the correct order of the
128 +
             different controllers within one level\
129 +
           - run (funct, e.g. pandapower.runpp, pandapipes.pipeflow): function to be used to
130 +
             conduct a loadflow/pipeflow \n
131 +
           - initial_run (boolean): Is a initial_run for a net required or not\n
132 +
           - continue_on_divergence (boolean): What to do if loadflow/pipeflow is not converging,
133 +
         fires control_repair
132 134
    :type ctrl_variables: dict, default: None
133 135
    :param max_iter: number of iterations for each controller to converge
134 136
    :type max_iter: int, default: 30
@@ -156,8 +158,9 @@
Loading
156 158
157 159
def get_controller_order_multinet(multinet):
158 160
    """
159 -
    Defining the controller order per level
160 -
    Takes the order and level columns from net.controller
161 +
    Defining the controller order per level.
162 +
163 +
    Takes the order and level columns from net.controller.
161 164
    If levels are specified, the levels and orders are executed in ascending order.
162 165
163 166
    :param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
@@ -193,9 +196,11 @@
Loading
193 196
        return get_controller_order(net_list, controller_list)
194 197
195 198
196 -
def prepare_run_ctrl(multinet, ctrl_variables):
199 +
def prepare_run_ctrl(multinet, ctrl_variables, **kwargs):
197 200
    """
198 -
    Prepares run control functions. Internal variables needed:
201 +
    Prepares run control functions.
202 +
203 +
    Internal variables needed:
199 204
        - level (list): gives a list of levels to be investigated
200 205
        - controller_order (list): nested list of tuples given the correct order of the different controllers
201 206
        within one level
@@ -224,12 +229,13 @@
Loading
224 229
            net_names = c.object.get_all_net_names()
225 230
            for net_name in net_names:
226 231
                if net_name not in ctrl_variables.keys():
227 -
                    ctrl_variables[net_name] = {'run': None, 'initial_run': None, 'errors': ()}
232 +
                    ctrl_variables[net_name] = {'run': None, 'initial_run': None,
233 +
                                                'continue_on_divergence': None, 'errors': ()}
228 234
                net = multinet['nets'][net_name]
229 235
                if isinstance(net, ppipes.pandapipesNet):
230 -
                    ctrl_variables_net = prepare_run_ctrl_ppipes(net, None)
236 +
                    ctrl_variables_net = prepare_run_ctrl_ppipes(net, None, **kwargs)
231 237
                elif isinstance(net, pp.pandapowerNet):
232 -
                    ctrl_variables_net = prepare_run_ctrl_pp(net, None)
238 +
                    ctrl_variables_net = prepare_run_ctrl_pp(net, None, **kwargs)
233 239
                else:
234 240
                    raise ValueError('the given nets are neither pandapipes nor pandapower nets')
235 241
@@ -239,18 +245,23 @@
Loading
239 245
                    ctrl_variables[net_name]['initial_run'] is not None else ctrl_variables_net['initial_run']
240 246
                ctrl_variables[net_name]['only_v_results'], ctrl_variables[net_name]['recycle'] = \
241 247
                    get_recycle(ctrl_variables_net)
248 +
                ctrl_variables[net_name]['continue_on_divergence'] = \
249 +
                    ctrl_variables[net_name]['continue_on_divergence'] if \
250 +
                    ctrl_variables[net_name]['continue_on_divergence'] is not None else \
251 +
                    ctrl_variables_net['continue_on_divergence']
242 252
                excl_net += [net_name]
243 253
244 254
    for net_name in multinet['nets'].keys():
245 255
        if net_name in excl_net:
246 256
            continue
247 257
        if net_name not in ctrl_variables.keys():
248 -
            ctrl_variables[net_name] = {'run': None, 'initial_run': False, 'errors': ()}
258 +
            ctrl_variables[net_name] = {'run': None, 'initial_run': False,
259 +
                                        'continue_on_divergence': None, 'errors': ()}
249 260
        net = multinet['nets'][net_name]
250 261
        if isinstance(net, ppipes.pandapipesNet):
251 -
            ctrl_variables_net = prepare_run_ctrl_ppipes(net, None)
262 +
            ctrl_variables_net = prepare_run_ctrl_ppipes(net, None, **kwargs)
252 263
        elif isinstance(net, pp.pandapowerNet):
253 -
            ctrl_variables_net = prepare_run_ctrl_pp(net, None)
264 +
            ctrl_variables_net = prepare_run_ctrl_pp(net, None, **kwargs)
254 265
        else:
255 266
            raise ValueError('the given nets are neither pandapipes nor pandapower nets')
256 267
        ctrl_variables[net_name]['run'] = ctrl_variables_net['run']
@@ -259,6 +270,16 @@
Loading
259 270
            ctrl_variables[net_name]['initial_run'] is not None else ctrl_variables_net['initial_run']
260 271
        ctrl_variables[net_name]['only_v_results'], ctrl_variables[net_name]['recycle'] = \
261 272
            get_recycle(ctrl_variables_net)
273 +
        ctrl_variables[net_name]['continue_on_divergence'] = \
274 +
            ctrl_variables[net_name]['continue_on_divergence'] if \
275 +
            ctrl_variables[net_name]['continue_on_divergence'] is not None else \
276 +
            ctrl_variables_net['continue_on_divergence']
277 +
278 +
    if ('check_each_level') in kwargs:
279 +
        check = kwargs.pop('check_each_level')
280 +
        ctrl_variables['check_each_level'] = check
281 +
    else:
282 +
        ctrl_variables['check_each_level'] = True
262 283
263 284
    ctrl_variables['errors'] = (NetCalculationNotConverged,)
264 285

@@ -24,7 +24,7 @@
Loading
24 24
    'Programming Language :: Python',
25 25
    'Programming Language :: Python :: 3']
26 26
27 -
with open('.travis.yml', 'rb') as f:
27 +
with open('.github/workflows/run_tests_master.yml', 'rb') as f:
28 28
    lines = f.read().decode('utf-8')
29 29
    for version in re.findall('python: 3.[0-9]', lines):
30 30
        classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])
@@ -33,7 +33,7 @@
Loading
33 33
34 34
setup(
35 35
    name='pandapipes',
36 -
    version='0.3.0',
36 +
    version='0.4.0',
37 37
    author='Dennis Cronbach, Daniel Lohmeier, Simon Ruben Drauz, Jolando Marius Kisse',
38 38
    author_email='dennis.cronbach@iee.fraunhofer.de, daniel.lohmeier@iee.fraunhofer.de, '
39 39
                 'simon.ruben.drauz@iee.fraunhofer.de, jolando.kisse@uni-kassel.de',
@@ -42,7 +42,7 @@
Loading
42 42
	long_description_content_type='text/x-rst',
43 43
    url='http://www.pandapipes.org',
44 44
    license='BSD',
45 -
    install_requires=["pandapower>=2.5.0", "matplotlib"],
45 +
    install_requires=["pandapower>=2.6.0", "matplotlib"],
46 46
    extras_require={"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "sphinxcontrib.bibtex"],
47 47
                    "plotting": ["plotly", "python-igraph"],
48 48
                    "test": ["pytest", "pytest-xdist"]},

@@ -10,7 +10,7 @@
Loading
10 10
from pandapipes.multinet.create_multinet import MultiNet, create_empty_multinet
11 11
from pandapipes.component_models.abstract_models import Component
12 12
from pandapipes.create import create_empty_network
13 -
from pandapipes.pandapipes_net import pandapipesNet
13 +
from pandapipes.pandapipes_net import pandapipesNet, get_basic_net_entries
14 14
from pandapower.io_utils import pp_hook
15 15
from pandapower.io_utils import with_signature, to_serializable, JSONSerializableClass, \
16 16
    isinstance_partial as ppow_isinstance, FromSerializableRegistry, PPJSONDecoder
@@ -61,7 +61,7 @@
Loading
61 61
            from pandapipes import from_json_string
62 62
            return from_json_string(self.obj)
63 63
        else:
64 -
            net = create_empty_network()
64 +
            net = pandapipesNet(get_basic_net_entries())
65 65
            net.update(self.obj)
66 66
            return net
67 67

@@ -64,7 +64,7 @@
Loading
64 64
    time_steps = init_time_steps(multinet, time_steps, **kwargs)
65 65
    run = kwargs.get('run', None)
66 66
67 -
    ts_variables = prepare_run_ctrl(multinet, None)
67 +
    ts_variables = prepare_run_ctrl(multinet, None, **kwargs)
68 68
69 69
    for net_name in multinet['nets'].keys():
70 70
        net = multinet['nets'][net_name]

@@ -8,6 +8,7 @@
Loading
8 8
from pandapipes.plotting.geo import *
9 9
from pandapower.plotting.collections import add_collections_to_axes, add_cmap_to_collection, \
10 10
    add_single_collection
11 +
from pandapipes.plotting.pipeflow_results import *
11 12
12 13
import types
13 14
from matplotlib.backend_bases import GraphicsContextBase, RendererBase

@@ -140,7 +140,8 @@
Loading
140 140
141 141
        height_difference = node_pit[from_nodes, HEIGHT] - node_pit[to_nodes, HEIGHT]
142 142
        dummy = length != 0
143 -
        lambda_pipe, re = calc_lambda(v_init, eta, rho, d, k, gas_mode, friction_model, dummy)
143 +
        lambda_pipe, re = calc_lambda(v_init, eta, rho, d, k, gas_mode, friction_model, dummy,
144 +
                                      options)
144 145
        der_lambda_pipe = calc_der_lambda(v_init, eta, rho, d, k, friction_model, lambda_pipe)
145 146
        branch_component_pit[:, RE] = re
146 147
        branch_component_pit[:, LAMBDA] = lambda_pipe

@@ -2,7 +2,7 @@
Loading
2 2
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
3 3
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
4 4
5 -
__version__ = '0.3.0'
5 +
__version__ = '0.4.0'
6 6
7 7
import pandas as pd
8 8
import os

@@ -58,16 +58,17 @@
Loading
58 58
59 59
def add_nets_to_multinet(multinet, overwrite=False, **networks):
60 60
    """
61 -
    Add multiple nets to a multinet. 'networks' has to be a dictionary.
61 +
    Add multiple nets to a multinet. 'networks' is one or more keyword arguments with nets.
62 62
63 63
    :param multinet: multinet to which several pandapipes/pandapower nets are added
64 64
    :type multinet: pandapipes.MultiNet
65 65
    :param overwrite: whether a net should be overwritten if it has the same net_name
66 66
    :type overwrite: bool
67 -
    :param networks: a dictionary with different pandapipes/pandapower nets as values. The keys
68 -
                     will be set in multinet.nets as net names for the different networks.
69 -
    :type networks: dict
70 -
    :return: net is added to multinet
67 +
    :param networks: one or more keyword arguments with pandapipes/pandapower nets as values. 
68 +
                     The keyword of each net will be set in multinet.nets as the name for the
69 +
                     network in the respective argument.
70 +
    :type networks: kwarg (name=net)
71 +
    :return: nets are added to multinet
71 72
    :rtype: None
72 73
    """
73 74
    for name, net in networks.items():

@@ -5,6 +5,7 @@
Loading
5 5
from packaging import version
6 6
7 7
from pandapipes import __version__
8 +
from pandapipes.pandapipes_net import add_default_components
8 9
9 10
try:
10 11
    import pplog as logging
@@ -18,6 +19,7 @@
Loading
18 19
    """
19 20
    Converts old nets to new format to ensure consistency. The converted net is returned.
20 21
    """
22 +
    add_default_components(net, overwrite=False)
21 23
    if isinstance(net.version, str) and version.parse(net.version) >= version.parse(__version__):
22 24
        return net
23 25
    _rename_columns(net)

@@ -25,7 +25,7 @@
Loading
25 25
                   "tol_T": 1e-3, "tol_res": 1e-3, "iter": 10, "error_flag": False, "alpha": 1,
26 26
                   "nonlinear_method": "constant", "p_scale": 1, "mode": "hydraulics",
27 27
                   "ambient_temperature": 293, "check_connectivity": True,
28 -
                   "only_update_hydraulic_matrix": False,
28 +
                   "max_iter_colebrook": 100, "only_update_hydraulic_matrix": False,
29 29
                   "reuse_internal_data": False,
30 30
                   "quit_on_inconsistency_connectivity": False}
31 31

@@ -6,7 +6,7 @@
Loading
6 6
import numpy as np
7 7
8 8
9 -
def calc_lambda(v, eta, rho, d, k, gas_mode, friction_model, dummy):
9 +
def calc_lambda(v, eta, rho, d, k, gas_mode, friction_model, dummy, options):
10 10
    """
11 11
    Function calculates the friction factor of a pipe. Turbulence is calculated based on
12 12
    Nikuradse. If v equals 0, a value of 0.001 is used in order to avoid division by zero.
@@ -28,6 +28,8 @@
Loading
28 28
    :type friction_model:
29 29
    :param dummy:
30 30
    :type dummy:
31 +
    :param options:
32 +
    :type options:
31 33
    :return:
32 34
    :rtype:
33 35
    """
@@ -43,7 +45,15 @@
Loading
43 45
        lambda_nikuradse = 1 / (-2 * np.log10(k / (3.71 * d))) ** 2
44 46
45 47
    if friction_model == "colebrook":
46 -
        lambda_colebrook = colebrook(re, d, k, lambda_nikuradse, dummy)
48 +
        # TODO: move this import to top level if possible
49 +
        from pandapipes.pipeflow import PipeflowNotConverged
50 +
        max_iter = options.get("max_iter_colebrook", 100)
51 +
        converged, lambda_colebrook = colebrook(re, d, k, lambda_nikuradse, dummy, max_iter)
52 +
        if not converged:
53 +
            raise PipeflowNotConverged(
54 +
                "The Colebrook-White algorithm did not converge. There might be model "
55 +
                "inconsistencies. The maximum iterations can be given as 'max_iter_colebrook' "
56 +
                "argument to the pipeflow.")
47 57
        return lambda_colebrook, re
48 58
    elif friction_model == "swamee-jain":
49 59
        lambda_swamee_jain = 0.25 / ((np.log10(k/(3.7*d) + 5.74/(re**0.9)))**2)
@@ -103,7 +113,7 @@
Loading
103 113
        return lambda_laminar_der
104 114
105 115
106 -
def colebrook(re, d, k, lambda_nikuradse, dummy):
116 +
def colebrook(re, d, k, lambda_nikuradse, dummy, max_iter):
107 117
    """
108 118
109 119
    :param re:
@@ -116,6 +126,8 @@
Loading
116 126
    :type lambda_nikuradse:
117 127
    :param dummy:
118 128
    :type dummy:
129 +
    :param max_iter:
130 +
    :type max_iter:
119 131
    :return: lambda_cb
120 132
    :rtype:
121 133
    """
@@ -125,7 +137,7 @@
Loading
125 137
    niter = 0
126 138
127 139
    # Inner Newton-loop for calculation of lambda
128 -
    while not converged:
140 +
    while not converged and niter < max_iter:
129 141
        f = lambda_cb ** (-1 / 2) + 2 * np.log10(2.51 / (re * np.sqrt(lambda_cb)) + k / (3.71 * d))
130 142
131 143
        df_dlambda_cb = (-1 / 2 * lambda_cb ** (-3 / 2)) - (2.51 / re) * lambda_cb ** (-3 / 2) \
@@ -144,4 +156,4 @@
Loading
144 156
145 157
        niter += 1
146 158
147 -
    return lambda_cb
159 +
    return converged, lambda_cb

@@ -22,17 +22,25 @@
Loading
22 22
23 23
    """
24 24
25 -
    def __init__(self, name, type):
25 +
    def __init__(self, name, component):
26 26
        """
27 27
28 28
        :param name: name of the standard type object
29 29
        :type name: str
30 -
        :param type: the specific standard type
31 -
        :type type: str
30 +
        :param component: the specific standard type
31 +
        :type component: str
32 32
        """
33 33
        super(StdType, self).__init__()
34 34
        self.name = name
35 -
        self.type = type
35 +
        self.component = component
36 +
37 +
    @classmethod
38 +
    def from_dict(cls, d):
39 +
        obj = super().from_dict(d)
40 +
        if hasattr(obj, "type") and not hasattr(obj, "component"):
41 +
            setattr(obj, "component", getattr(obj, "type"))
42 +
            delattr(obj, "type")
43 +
        return obj
36 44
37 45
38 46
class PumpStdType(StdType):

@@ -3,4 +3,5 @@
Loading
3 3
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
4 4
5 5
from pandapipes.topology.create_graph import *
6 -
from pandapower.topology.graph_searches import connected_component, connected_components
Files Coverage
pandapipes 91.01%
setup.py 0.00%
Project Totals (74 files) 90.67%

No yaml found.

Create your codecov.yml to customize your Codecov experience

Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading