e2nIEE / simbench
Showing 15 of 33 files from the diff.
Newly tracked file
setup.py changed.

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -84,7 +82,7 @@
Loading
84 82
                break
85 83
86 84
    if element == "measurement":
87 -
        measurement_buses = Series(index=net.measurement.index)
85 +
        measurement_buses = Series(index=net.measurement.index, dtype=int)
88 86
        # bus
89 87
        bool_ = net.measurement.element_type == "bus"
90 88
        measurement_buses.loc[bool_] = net.measurement.element.loc[bool_]

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -190,16 +188,11 @@
Loading
190 188
    dupl = df.index[df.duplicated(subset=subset)]
191 189
    uniq = df.index[~df.duplicated(subset=subset)]
192 190
    uniq_dupl_dict = {}
193 -
    # nan_str only needed since compare_arrays() using old numpy versions connected to python 3.4
194 -
    # don't detect reliably nans as equal
195 -
    nan_str = "nan"
196 -
    while nan_str in df.values:
197 -
        nan_str += "n"
198 191
199 192
    for uni in uniq:
200 193
        do_dupl_fit = compare_arrays(
201 -
            np.repeat(df.loc[uni, subset].fillna(nan_str).values.reshape(1, -1), len(dupl), axis=0),
202 -
            df.loc[dupl, subset].fillna(nan_str).values).all(axis=1)
194 +
            np.repeat(df.loc[uni, subset].values.reshape(1, -1), len(dupl), axis=0),
195 +
            df.loc[dupl, subset].values).all(axis=1)
203 196
        uniq_dupl_dict[uni] = list(dupl[do_dupl_fit])
204 197
    return uniq_dupl_dict
205 198
@@ -261,7 +254,7 @@
Loading
261 254
    # --- initalizations
262 255
    # ensure only unique values in reserved_strings:
263 256
    reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \
264 -
        else pd.Series()
257 +
        else pd.Series(dtype=object)
265 258
    count = counting_start
266 259
267 260
    # --- do first append

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -25,9 +23,9 @@
Loading
25 23
def get_applied_profiles(net, profile_type):
26 24
    """ Returns a list of unique profiles in element tables, e.g. net.sgen.profile.
27 25
        profile_type must be in ["load", "renewables", "powerplants", "storage"]. """
28 -
    applied_profiles = []
26 +
    applied_profiles = set()
29 27
    if profile_type in ["renewables", "powerplants"]:
30 -
        phys_type = "RES" if profile_type == "renewables" else "PP"
28 +
        phys_type = "RES" if profile_type == "renewables" else "PowerPlant"
31 29
        fitting_elm = {"renewables": "sgen", "powerplants": "gen"}[profile_type]
32 30
        for elm in ['sgen', 'gen', 'ext_grid']:
33 31
            if 'profile' in net[elm].columns:
@@ -35,10 +33,10 @@
Loading
35 33
                    idx = net[elm].index[net[elm].phys_type == phys_type]
36 34
                else:
37 35
                    idx = net[elm].index if elm == fitting_elm else []
38 -
                applied_profiles += list(net[elm].profile[idx].dropna().unique())
36 +
                applied_profiles |= set(net[elm].profile[idx].dropna())
39 37
    else:
40 38
        if 'profile' in net[profile_type].columns:
41 -
            applied_profiles += list(net[profile_type].profile.dropna().unique())
39 +
            applied_profiles |= set(net[profile_type].profile.dropna())
42 40
    return applied_profiles
43 41
44 42
@@ -52,25 +50,40 @@
Loading
52 50
        avail_prof = avail_prof if "time" not in avail_prof else avail_prof.difference(["time"])
53 51
        avail_prof = pd.Series(avail_prof)
54 52
        if p_or_q is None:
55 -
            return avail_prof
53 +
            return set(avail_prof)
56 54
        elif p_or_q == "p":
57 -
            return avail_prof.loc[avail_prof.str.endswith("_pload")].str[:-6]
55 +
            return set(avail_prof.loc[avail_prof.str.endswith("_pload")].str[:-6])
58 56
        elif p_or_q == "q":
59 -
            return avail_prof.loc[avail_prof.str.endswith("_qload")].str[:-6]
57 +
            return set(avail_prof.loc[avail_prof.str.endswith("_qload")].str[:-6])
60 58
        else:
61 59
            raise ValueError(str(p_or_q) + " is unknown as 'p_or_q'.")
62 60
    elif continue_on_missing:
63 61
        logger.warning("%s is not in net['profiles'].keys()" % profile_type)
64 -
        return pd.Series()
62 +
        return set()
65 63
    else:
66 64
        raise ValueError("%s is not in net['profiles'].keys()" % profile_type)
67 65
68 66
69 67
def get_missing_profiles(net, profile_type, p_or_q=None):
70 -
    """ Returns a set of profiles which miss in net.profiles compared to the profile columns in the
71 -
        element tables. """
72 -
    return set(get_applied_profiles(net, profile_type)) - set(get_available_profiles(
73 -
        net, profile_type, p_or_q=p_or_q))
68 +
    """ Returns a set of profiles which miss in net.profiles compared to the profile column of the
69 +
        element table. """
70 +
    return get_applied_profiles(net, profile_type) - get_available_profiles(
71 +
        net, profile_type, p_or_q=p_or_q)
72 +
73 +
74 +
def get_unused_profiles(net, profile_type, p_or_q=None):
75 +
    """ Returns a set of profiles which is in net.profiles but is not used in the profile column
76 +
        of the element table. """
77 +
    if profile_type == "load" and p_or_q is None:
78 +
        applied_ = set()
79 +
        for s in get_applied_profiles(net, profile_type):
80 +
            applied_ |= {"%s_pload" % s, "%s_qload" % s}
81 +
        availp = {"%s_pload" % s for s in get_available_profiles(net, profile_type, "p")}
82 +
        availq = {"%s_qload" % s for s in get_available_profiles(net, profile_type, "q")}
83 +
        return (availp | availq) - applied_
84 +
    else:
85 +
        return get_available_profiles(net, profile_type, p_or_q=p_or_q) - \
86 +
            get_applied_profiles(net, profile_type)
74 87
75 88
76 89
def dismantle_dict_values_to_deep_list(dict_):
@@ -129,6 +142,26 @@
Loading
129 142
        csv_data[prof_tab].drop(unapplied_profiles, axis=1, inplace=True)
130 143
131 144
145 +
def filter_unapplied_profiles_pp(net, named_profiles: bool):
146 +
    """ Filters unapplied profiles from pandapower net. """
147 +
    if "profiles" in net and isinstance(net["profiles"], dict):
148 +
        if named_profiles:
149 +
            for key in net["profiles"].keys():
150 +
                unused = get_unused_profiles(net, key)
151 +
                net.profiles[key].drop(columns=unused, inplace=True)
152 +
        else:
153 +
            for key in net.profiles.keys():
154 +
                if isinstance(key, tuple):
155 +
                    elm = key[0]
156 +
                elif isinstance(key, str):
157 +
                    elm = key.split(".")[0]
158 +
                else:
159 +
                    raise NotImplementedError("The keys of net.profiles are expected as " +
160 +
                                              "tuple(element, column) or as str, e.g. 'gen.vm_pu'.")
161 +
                net.profiles[key].drop(columns=net.profiles[key].columns[~net.profiles[
162 +
                    key].columns.isin(net[elm].index)], inplace=True)
163 +
164 +
132 165
def get_absolute_profiles_from_relative_profiles(
133 166
        net, element, multiplying_column, relative_profiles=None, profile_column="profile",
134 167
        profile_suffix=None, time_as_index=False, **kwargs):
@@ -202,7 +235,7 @@
Loading
202 235
            element, profile_column))
203 236
        missing_col_handling = "missing_col_handling"
204 237
        applied_profiles = pd.Series([missing_col_handling]*net[element].shape[0],
205 -
                                     index=net[element].index)
238 +
                                     index=net[element].index, dtype=object)
206 239
        relative_profiles[missing_col_handling] = 1
207 240
208 241
    # nan profile handling
@@ -250,7 +283,8 @@
Loading
250 283
    OUTPUT:
251 284
        **abs_val** (dict) - absolute values calculated from relative scaling factors and maximum
252 285
        active or reactive powers. The keys of this dict are tuples consisting of element and
253 -
        parameter. The values are DataFrames with absolute power values.
286 +
        column or of strings consisting of the same but splitted by '.'.
287 +
        The values are DataFrames with absolute power values.
254 288
    """
255 289
    abs_val = dict()
256 290
@@ -280,10 +314,10 @@
Loading
280 314
            if isinstance(loadcase_type, list):
281 315
                assert elm_col[0] == "sgen"
282 316
                assert len(loadcase_type) == 3
283 -
                Idx_wind = net.sgen.loc[(net.sgen.type.str.contains("Wind").fillna(False)) |
284 -
                                        (net.sgen.type.str.contains("WP").fillna(False))].index
285 -
                Idx_pv = net.sgen.loc[net.sgen.type.str.contains("PV").fillna(False)].index
286 -
                Idx_sgen = net.sgen.index.difference(Idx_wind | Idx_pv)
317 +
                Idx_wind = net.sgen.index[(net.sgen.type.str.contains("Wind").fillna(False)) |
318 +
                                        (net.sgen.type.str.contains("WP").fillna(False))]
319 +
                Idx_pv = net.sgen.index[net.sgen.type.str.contains("PV").fillna(False)]
320 +
                Idx_sgen = net.sgen.index.difference(Idx_wind.union(Idx_pv))
287 321
                net.sgen["loadcase_type"] = ""
288 322
                net.sgen['loadcase_type'].loc[Idx_wind] = loadcase_type[0]
289 323
                net.sgen['loadcase_type'].loc[Idx_pv] = loadcase_type[1]
@@ -303,7 +337,7 @@
Loading
303 337
    return abs_val
304 338
305 339
306 -
def apply_const_controllers(net, absolute_profiles_values):
340 +
def apply_const_controllers(net, absolute_profiles_values, exclude_elms_dict=None):
307 341
    """
308 342
    Applys ConstControl instances to the net. As a result, one can easily run timeseries with given
309 343
    power values of e.g. loads, sgens, storages or gens.
@@ -315,10 +349,24 @@
Loading
315 349
        keys should be tuples of length 2 (element and parameter), DataFrame size is
316 350
        timesteps x number of elements
317 351
352 +
    OPTIONAL:
353 +
        **exclude_elms_dict** (dict, None) - elements which should not get ConstControllers. The
354 +
        keys should be the element type, such as "sgen", and the values should be the indices.
318 355
    """
319 356
    n_time_steps = dict()
320 -
    for (elm, param), values in absolute_profiles_values.items():
357 +
    for key, values in absolute_profiles_values.items():
358 +
        if isinstance(key, tuple):
359 +
            elm = key[0]
360 +
            col = key[1]
361 +
        elif isinstance(key, str):
362 +
            elm = key.split(".")[0]
363 +
            col = key.split(".")[1]
364 +
        else:
365 +
            raise NotImplementedError("The keys of net.profiles are expected as " +
366 +
                                      "tuple(element, column) or as str, e.g. 'gen.vm_pu'.")
321 367
        if values.shape[1]:
368 +
            to_exclude = pd.Index([]) if not isinstance(exclude_elms_dict, dict) or elm not in \
369 +
                exclude_elms_dict.keys() else pd.Index(exclude_elms_dict[elm])
322 370
323 371
            # check DataFrame shape[0] == time_steps
324 372
            if elm in n_time_steps.keys():
@@ -331,20 +379,20 @@
Loading
331 379
            # check DataFrame shape[1] == net[elm].index
332 380
            unknown_idx = values.columns.difference(net[elm].index)
333 381
            if len(unknown_idx):
334 -
                logger.warning("In absolute_profiles_values[%s][%s], " % (elm, param) +
382 +
                logger.warning("In absolute_profiles_values[%s], " % key +
335 383
                               "there are indices additional & unknown to net[%s].index" % elm +
336 384
                               str(["%i" % i for i in unknown_idx]))
337 -
            missing_idx = net[elm].index.difference(values.columns)
385 +
            missing_idx = net[elm].index.difference(values.columns.union(to_exclude))
338 386
            if len(missing_idx):
339 -
                logger.warning("In absolute_profiles_values[%s][%s], " % (elm, param) +
387 +
                logger.warning("In absolute_profiles_values[%s], " % key +
340 388
                               "these indices are missing compared to net[%s].index" % elm +
341 389
                               str(["%i" % i for i in missing_idx]))
342 390
343 391
            # apply const controllers
344 -
            idx = list(net[elm].index.intersection(values.columns))
345 -
            ConstControl(net, element=elm, variable=param,
392 +
            idx = list(net[elm].index.difference(to_exclude).intersection(values.columns))
393 +
            ConstControl(net, element=elm, variable=col,
346 394
                         element_index=idx, profile_name=idx,
347 -
                         data_source=DFData(absolute_profiles_values[(elm, param)][idx]))
395 +
                         data_source=DFData(absolute_profiles_values[key][idx]))
348 396
349 397
    # compare all DataFrame shape[0] == time_steps
350 398
    if len(set(n_time_steps.values())) > 1:

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -387,7 +385,7 @@
Loading
387 385
            "Transformer"]["id"])].astype(int)
388 386
        idx_line = data["Measurement"].index[data["Measurement"]["element"].isin(data[
389 387
            "Line"]["id"])].astype(int)
390 -
        idx_bus = data["Measurement"].index.difference(idx_line | idx_trafo).astype(int)
388 +
        idx_bus = data["Measurement"].index.difference(idx_line.union(idx_trafo)).astype(int)
391 389
        n_no_element2_info = data["Measurement"]["element"].isnull().sum()
392 390
        if n_no_element2_info != len(idx_bus):
393 391
            logger.warning("%i Measurement have no element2 info, but " % n_no_element2_info +
@@ -465,13 +463,13 @@
Loading
465 463
    # --- splitting Line table into a table with dcline data and line data
466 464
    idx_lines = csv_data["Line"].index[csv_data["Line"].type.isin(csv_data["LineType"].id)]
467 465
    idx_dclines = csv_data["Line"].index[csv_data["Line"].type.isin(csv_data["DCLineType"].id)]
468 -
    missing = csv_data["Line"].index.difference(idx_lines | idx_dclines)
466 +
    missing = csv_data["Line"].index.difference(idx_lines.union(idx_dclines))
469 467
    if len(missing):
470 468
        raise ValueError("In Line table, the types of these line indices misses in LineType and " +
471 469
                         "DCLineType table: " + str(list(missing)))
472 -
    if len(idx_lines & idx_dclines):
470 +
    if len(idx_lines.intersection(idx_dclines)):
473 471
        raise ValueError("In Line table, the types of these line indices occur in LineType and " +
474 -
                         "DCLineType table: " + str(list(idx_lines & idx_dclines)))
472 +
                         "DCLineType table: " + str(list(idx_lines.intersection(idx_dclines))))
475 473
    csv_data["Line*line"] = csv_data["Line"].loc[idx_lines]
476 474
    csv_data["Line*dcline"] = csv_data["Line"].loc[idx_dclines]
477 475
@@ -637,7 +635,7 @@
Loading
637 635
        # trafo3w type
638 636
        table = "Transformer3WType"
639 637
        for vkr, sR, pCu in zip(vkr_3w, sR_3w, pCu_3w):
640 -
            data[table][vkr] = 100 * data[table][pCu] / (data[table][sR] * sb2pp_base())
638 +
            data[table][vkr] = 100 * data[table][pCu] / (data[table][sR]*1e3)
641 639
642 640
643 641
def _convert_elements_and_types(input_data, output_data):
@@ -826,10 +824,10 @@
Loading
826 824
        Type_col_except_std_type = input_data[corr_str_type].columns.difference(["std_type"])
827 825
        if version.parse(pd.__version__) >= version.parse("0.21.0"):
828 826
            input_data[corr_str] = input_data[corr_str].reindex(
829 -
                columns=input_data[corr_str].columns | Type_col_except_std_type)
827 +
                columns=input_data[corr_str].columns.union(Type_col_except_std_type))
830 828
        else:
831 829
            input_data[corr_str] = input_data[corr_str].reindex_axis(
832 -
                input_data[corr_str].columns | Type_col_except_std_type, axis=1)
830 +
                input_data[corr_str].columns.union(Type_col_except_std_type), axis=1)
833 831
        input_data[corr_str].loc[:, Type_col_except_std_type] = input_data[corr_str_type].loc[
834 832
            idx_type, Type_col_except_std_type].values
835 833
        input_data[corr_str_type].drop(input_data[corr_str_type].index, inplace=True)

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -19,7 +17,7 @@
Loading
19 17
20 18
def sb2pp_base(variable="power"):
21 19
    """ converting factor from simbench data structure to pandapower:
22 -
        power: simbench in MVA - pandapower in kVA
20 +
        power: simbench in MVA - pandapower in MVA (former in kVA)
23 21
        current: simbench in A, pandapower in kA
24 22
    """
25 23
    if variable == "power":
@@ -248,11 +246,13 @@
Loading
248 246
#        ("dVaHV", "xxxxxxxx", None), ("dVaMV", "xxxxxxxx", None),
249 247
#        ("dVaLV", "xxxxxxxx", None),
250 248
#        ("tapNeutrMV", "xxxxxxxx", None), ("tapNeutrLV", "xxxxxxxx", None),
251 -
        ("tapMinHV", "tap_min", None), ("tapMaxHV", "tap_max", None)
249 +
        ("tapMinHV", "tap_min", None), ("tapMaxHV", "tap_max", None),
252 250
#        ("tapMinMV", "xxxxxxxx", None), ("tapMinLV", "xxxxxxxx", None),
253 251
#        ("tapMaxMV", "xxxxxxxx", None), ("tapMaxLV", "xxxxxxxx", None)
254 252
        # cosidered by _add_vm_va_setpoints_to_buses() and _add_phys_type_and_vm_va_setpoints_to_generation_element_tables():
255 253
        # ("vmSetp", "vm_pu", None), ("vaSetp", "va:degree", None),
254 +
        # slack_weight
255 +
        ("dspf", "slack_weight", None)
256 256
        ]
257 257
258 258
    # --- add "pLoad", "qLoad" respectively "pPP", "qPP" or others, according to tablename

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,13 +1,20 @@
Loading
1 -
# Copyright (c) 2015-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
2 -
# and Energy System Technology (IEE), Kassel. All rights reserved.
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
2 +
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
3 +
# contributors (see AUTHORS file for details). All rights reserved.
3 4
4 5
from setuptools import setup, find_packages
5 6
import re
6 7
8 +
with open('README.rst', 'rb') as f:
9 +
    install = f.read().decode('utf-8')
10 +
11 +
with open('CHANGELOG.rst', 'rb') as f:
12 +
    changelog = f.read().decode('utf-8')
13 +
7 14
with open('README.rst', 'rb') as f:
8 15
    readme = f.read().decode('utf-8')
9 16
10 -
classifiers=[
17 +
classifiers = [
11 18
        'Development Status :: 5 - Production/Stable',
12 19
        'Environment :: Console',
13 20
        'Intended Audience :: Developers',
@@ -17,23 +24,28 @@
Loading
17 24
        'Operating System :: OS Independent',
18 25
        'Programming Language :: Python',
19 26
        'Programming Language :: Python :: 3']
20 -
with open('.travis.yml', 'rb') as f:
27 +
with open('.github/workflows/github_test_action.yml', 'rb') as f:
21 28
    lines = f.read().decode('utf-8')
22 -
    for version in re.findall('python: 3.[0-9]', lines):
23 -
        classifiers.append('Programming Language :: Python :: 3.%s'%version[-1])
29 +
    versions = set(re.findall('3.[0-9]', lines))
30 +
    for version in versions:
31 +
        classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])
32 +
33 +
long_description = '\n\n'.join((install, changelog))
24 34
25 35
setup(
26 36
    name='simbench',
27 -
    version='1.2.0',
37 +
    version='1.3.0',
28 38
    author='Steffen Meinecke',
29 39
    author_email='steffen.meinecke@uni-kassel.de',
30 40
    description='Electrical Power System Benchmark Models',
31 41
    long_description=readme,
42 +
    long_description_content_type="text/x-rst",
32 43
    url='http://www.simbench.de/en',
33 44
    license='odbl',
34 -
    install_requires=["pandapower>=2.2"],
35 -
    extras_require = {"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"]},
45 +
    install_requires=["pandapower>=2.5"],
46 +
    extras_require={"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
47 +
                    "all": ["numpydoc", "sphinx", "sphinx_rtd_theme"]},
36 48
    packages=find_packages(),
37 49
    include_package_data=True,
38 -
    classifiers = classifiers
50 +
    classifiers=classifiers
39 51
)

@@ -1,10 +1,8 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
7 -
__version__ = "1.2.0"
5 +
__version__ = "1.3.0"
8 6
__author__ = "smeinecke"
9 7
10 8
import os

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4
@@ -42,10 +40,10 @@
Loading
42 40
    min_max = ['min_p_mw', 'max_p_mw', 'min_q_mvar', 'max_q_mvar']
43 41
    add_columns = {x: (min_max+y if x in elms_with_min_max else y) for x, y in add_columns.items()}
44 42
    add_columns["bus"] = ['min_vm_pu', 'max_vm_pu', 'substation'] + add_columns["bus"]
45 -
    add_columns["ext_grid"] = ['dspf', 'p_disp_mw', 'phys_type', 'type', 'profile', 'sn_mva'] + \
46 -
        add_columns["ext_grid"]
47 -
    add_columns["gen"] = ['dspf', 'phys_type', 'profile'] + add_columns["gen"]
48 -
    add_columns["sgen"] = ['dspf', 'phys_type', 'profile'] + add_columns["sgen"]
43 +
    add_columns["ext_grid"] = ['slack_weight', 'p_disp_mw', 'phys_type', 'type', 'profile',
44 +
                               'sn_mva'] + add_columns["ext_grid"]
45 +
    add_columns["gen"] = ['slack_weight', 'phys_type', 'profile'] + add_columns["gen"]
46 +
    add_columns["sgen"] = ['slack_weight', 'phys_type', 'profile'] + add_columns["sgen"]
49 47
    add_columns["load"] = ['profile'] + add_columns["load"]
50 48
    add_columns["storage"] = ['profile', "efficiency_percent", "self-discharge_percent_per_day"] + \
51 49
        add_columns["storage"]
@@ -277,16 +275,18 @@
Loading
277 275
278 276
279 277
def _add_dspf_calc_type_and_phys_type_columns(net):
280 -
    """ Adds 'dspf' and 'calc_type' column to generation elements if missing. """
278 +
    """ Adds 'slack_weight' and 'calc_type' column to generation elements if missing. """
281 279
    gen_tables = ["ext_grid", "gen", "sgen", "ward", "xward"]
282 280
    phys_types = ["ExternalNet", "PowerPlant", "RES", None, None]
283 281
    calc_types = ["vavm", "pvm", "pq", "Ward", "xWard"]
284 282
    for gen_table, phys_type, calc_type in zip(gen_tables, phys_types, calc_types):
285 -
        if "dspf" not in net[gen_table].columns or net[gen_table]["dspf"].isnull().all():
283 +
        if "slack_weight" not in net[gen_table].columns or net[gen_table][
284 +
                "slack_weight"].isnull().all():
286 285
            if gen_table == "ext_grid" and net[gen_table].shape[0]:
287 -
                net[gen_table]["dspf"] = 1/net[gen_table].shape[0]
286 +
                net[gen_table]["slack_weight"] = 1/net[gen_table].shape[0]
288 287
            else:
289 -
                net[gen_table]["dspf"] = 0
288 +
                net[gen_table]["slack_weight"] = 0
289 +
        net[gen_table].rename(columns={"slack_weight": "dspf"}, inplace=True)
290 290
        if phys_type is not None:
291 291
            if "phys_type" not in net[gen_table].columns:
292 292
                net[gen_table]["phys_type"] = phys_type
@@ -302,7 +302,7 @@
Loading
302 302
    non_busbars = buses[no_busbar.values]
303 303
    bb_sw = net.switch.loc[net.switch.et == "b"]
304 304
305 -
    new_buses = pd.Series()
305 +
    new_buses = pd.Series(dtype=int)
306 306
    for X, Y in zip(["element", "bus"], ["bus", "element"]):
307 307
        X_in_nonb = bb_sw[X].isin(non_busbars)
308 308
        Y_is_busbar = net.bus.loc[bb_sw[Y]].type.str.contains("busbar").values
@@ -402,14 +402,14 @@
Loading
402 402
    idx_t_sw = net.switch.index[net.switch.et == "t"]
403 403
    idx_l_sw = net.switch.index[net.switch.et == "l"]
404 404
    n_branch_switches = len(idx_t_sw)+len(idx_l_sw)
405 -
    idx_bus = net.switch.bus[idx_t_sw | idx_l_sw]
405 +
    idx_bus = net.switch.bus[idx_t_sw.union(idx_l_sw)]
406 406
407 407
    # --- create auxiliary nodes
408 408
    names, reserved_aux_node_names = append_str_by_underline_count(
409 409
        net.bus.name[idx_bus], reserved_strings=reserved_aux_node_names)
410 410
    if "subnet" in net.switch.columns:
411 411
        # if replace_branch_switches() is called by pp2csv_data(), "subnet" is available
412 -
        subnets = net.switch.subnet.loc[idx_t_sw | idx_l_sw].values
412 +
        subnets = net.switch.subnet.loc[idx_t_sw.union(idx_l_sw)].values
413 413
    else:
414 414
        # if replace_branch_switches() is called out of pp2csv_data(), this else statement is given
415 415
        subnets = net.bus.zone[idx_bus].values
@@ -433,16 +433,16 @@
Loading
433 433
            idx_b_sw].values
434 434
        # is_first_bus_type == hv_bus resp. from_bus
435 435
        pos_in_aux_buses = idx_in_2nd_array(np.array(idx_b_sw[is_first_bus_type]),
436 -
                                            np.array(idx_t_sw | idx_l_sw))
436 +
                                            np.array(idx_t_sw.union(idx_l_sw)))
437 437
        net[branch][bus_types[0]].loc[idx_elm[is_first_bus_type]] = aux_buses[pos_in_aux_buses]
438 438
        # ~is_first_bus_type == lv_bus resp. to_bus
439 439
        pos_in_aux_buses = idx_in_2nd_array(np.array(idx_b_sw[~is_first_bus_type]),
440 -
                                            np.array(idx_t_sw | idx_l_sw))
440 +
                                            np.array(idx_t_sw.union(idx_l_sw)))
441 441
        net[branch][bus_types[1]].loc[idx_elm[~is_first_bus_type]] = aux_buses[pos_in_aux_buses]
442 442
443 443
    # --- replace switch element by new auxiliary nodes
444 -
    net.switch.element.loc[idx_t_sw | idx_l_sw] = aux_buses
445 -
    net.switch.et.loc[idx_t_sw | idx_l_sw] = "b"
444 +
    net.switch.element.loc[idx_t_sw.union(idx_l_sw)] = aux_buses
445 +
    net.switch.et.loc[idx_t_sw.union(idx_l_sw)] = "b"
446 446
447 447
    return reserved_aux_node_names
448 448

@@ -1,6 +1,4 @@
Loading
1 -
# -*- coding: utf-8 -*-
2 -
3 -
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
4 2
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
5 3
# contributors (see AUTHORS file for details). All rights reserved.
6 4

@@ -1,3 +1,7 @@
Loading
1 +
# Copyright (c) 2019-2021 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
2 +
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
3 +
# contributors (see AUTHORS file for details). All rights reserved.
4 +
1 5
from .simbench_code import *
2 6
from .profiles import *
3 7
from .loadcases import *
Files Coverage
simbench 88.31%
setup.py 0.00%
Project Totals (15 files) 87.52%

No yaml found.

Create your codecov.yml to customize your Codecov experience

Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading