@@ -11,7 +11,9 @@
Loading
11 11
12 12
def test_center_mask():
13 13
    """ Test that the first mask is centered """
14 -
    cb6 = pmd.load_file(os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb"))
14 +
    cb6 = pmd.load_file(
15 +
        os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb")
16 +
    )
15 17
    aligned_cb6 = zalign(cb6, ":CB6", ":BUT")
16 18
    test_coordinates = check_coordinates(aligned_cb6, ":CB6")
17 19
    assert np.allclose(test_coordinates, np.zeros(3))
@@ -19,7 +21,9 @@
Loading
19 21
20 22
def test_alignment_after_offset():
21 23
    """ Test that molecule is properly aligned after random offset. """
22 -
    cb6 = pmd.load_file(os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb"))
24 +
    cb6 = pmd.load_file(
25 +
        os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb")
26 +
    )
23 27
    random_coordinates = np.random.randint(10) * np.random.rand(1, 3)
24 28
    cb6_offset = offset_structure(cb6, random_coordinates)
25 29
    aligned_cb6 = zalign(cb6_offset, ":CB6", ":BUT")
@@ -29,8 +33,14 @@
Loading
29 33
30 34
def test_theta_after_alignment():
31 35
    """ Test that molecule is properly aligned after random offset. """
32 -
    cb6 = pmd.load_file(os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb"))
36 +
    cb6 = pmd.load_file(
37 +
        os.path.join(os.path.dirname(__file__), "../data/cb6-but/vac.pdb")
38 +
    )
33 39
    aligned_cb6 = zalign(cb6, ":CB6", ":BUT")
34 40
    assert get_theta(aligned_cb6, ":CB6", ":BUT", axis="z") == 0
35 -
    assert pytest.approx(get_theta(aligned_cb6, ":CB6", ":BUT", axis="x"), 0.001) == 1.5708
36 -
    assert pytest.approx(get_theta(aligned_cb6, ":CB6", ":BUT", axis="y"), 0.001) == 1.5708
41 +
    assert (
42 +
        pytest.approx(get_theta(aligned_cb6, ":CB6", ":BUT", axis="x"), 0.001) == 1.5708
43 +
    )
44 +
    assert (
45 +
        pytest.approx(get_theta(aligned_cb6, ":CB6", ":BUT", axis="y"), 0.001) == 1.5708
46 +
    )

@@ -1,12 +1,13 @@
Loading
1 -
import logging as log
2 -
import os
3 1
import base64
4 2
import json
3 +
import logging as log
4 +
import os
5 +
5 6
import numpy as np
6 -
from paprika.restraints import DAT_restraint
7 -
from parmed.amber import AmberParm
8 7
from parmed import Structure
8 +
from parmed.amber import AmberParm
9 9
10 +
from paprika.restraints import DAT_restraint
10 11
11 12
# https://stackoverflow.com/questions/27909658/json-encoder-and-decoder-for-complex-numpy-arrays
12 13
# https://stackoverflow.com/a/24375113/901925
@@ -63,7 +64,6 @@
Loading
63 64
        elif isinstance(obj, (np.ndarray,)):
64 65
            return obj.tolist()
65 66
66 -
67 67
        # Let the base class default method raise the TypeError
68 68
        # return json.JSONEncoder(self, obj)
69 69
        return super(NumpyEncoder, self).default(obj)
@@ -104,8 +104,21 @@
Loading
104 104
        tmp = DAT_restraint()
105 105
        tmp.__dict__ = loaded
106 106
107 -
        properties = ["mask1", "mask2", "mask3", "mask4", "topology", "instances", "custom_restraint_values",
108 -
                      "auto_apr", "continuous_apr", "attach", "pull", "release", "amber_index"]
107 +
        properties = [
108 +
            "mask1",
109 +
            "mask2",
110 +
            "mask3",
111 +
            "mask4",
112 +
            "topology",
113 +
            "instances",
114 +
            "custom_restraint_values",
115 +
            "auto_apr",
116 +
            "continuous_apr",
117 +
            "attach",
118 +
            "pull",
119 +
            "release",
120 +
            "amber_index",
121 +
        ]
109 122
        for class_property in properties:
110 123
            if f"_{class_property}" in tmp.__dict__.keys():
111 124
                tmp.__dict__[class_property] = tmp.__dict__[f"_{class_property}"]

@@ -5,6 +5,7 @@
Loading
5 5
6 6
logger = logging.getLogger(__name__)
7 7
8 +
8 9
def read_yaml(file):
9 10
    """
10 11
    Read `Taproom <https://github.com/slochower/host-guest-benchmarks>`_ -style YAML-formatted instructions for
@@ -40,7 +41,7 @@
Loading
40 41
    regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
41 42
42 43
    # For each match, look-up corresponding value in dictionary
43 -
    return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
44 +
    return regex.sub(lambda mo: dict[mo.string[mo.start() : mo.end()]], text)
44 45
45 46
46 47
def de_alias(yaml_data):

@@ -11,15 +11,17 @@
Loading
11 11
12 12
# Add imports here
13 13
versions = get_versions()
14 -
__version__ = versions['version']
15 -
__git_revision__ = versions['full-revisionid']
14 +
__version__ = versions["version"]
15 +
__git_revision__ = versions["full-revisionid"]
16 16
del get_versions, versions
17 17
18 18
import logging
19 +
19 20
logger = logging.getLogger(__name__)
20 21
21 22
try:
22 23
    from simtk import openmm
24 +
23 25
    from paprika.setup import Setup
24 26
25 27
    setup = Setup
@@ -29,9 +31,10 @@
Loading
29 31
    setup = None
30 32
31 33
from paprika.analyze import Analyze
34 +
32 35
analyze = Analyze
33 36
34 37
if setup is None:
35 38
    __all__ = ["setup", "analyze"]
36 39
else:
37 -
    __all__ = ["analyze"]

@@ -7,9 +7,7 @@
Loading
7 7
import pytest
8 8
from pytest import approx
9 9
10 -
from paprika import analysis
11 -
from paprika import log
12 -
from paprika import restraints
10 +
from paprika import analysis, log, restraints
13 11
14 12
log.config_root_logger(verbose=True)
15 13
logger = logging.getLogger(__name__)

@@ -779,4 +779,4 @@
Loading
779 779
    lower.initialize()
780 780
781 781
    assert get_bias_potential_type(lower, "attach", 0) == "lower_walls"
782 -
    assert get_bias_potential_type(lower, "attach", 1) == "lower_walls"

@@ -27,7 +27,7 @@
Loading
27 27
        topology_name: str,
28 28
        trajectory_mask: str = "*.dcd",
29 29
        bootstrap_cycles: int = 1000,
30 -
        analysis_method: str = "ti-block"
30 +
        analysis_method: str = "ti-block",
31 31
    ) -> Dict[str, Any]:
32 32
        """Computes the free energy of a specified phase of an APR calculation.
33 33
@@ -104,7 +104,7 @@
Loading
104 104
                and restraint.mask3 is None
105 105
                and restraint.mask4 is None
106 106
            ),
107 -
            None
107 +
            None,
108 108
        )
109 109
110 110
        theta_restraint = next(
@@ -116,7 +116,7 @@
Loading
116 116
                and restraint.mask3 is not None
117 117
                and restraint.mask4 is None
118 118
            ),
119 -
            None
119 +
            None,
120 120
        )
121 121
        beta_restraint = next(
122 122
            (
@@ -127,7 +127,7 @@
Loading
127 127
                and restraint.mask3 is not None
128 128
                and restraint.mask4 is None
129 129
            ),
130 -
            None
130 +
            None,
131 131
        )
132 132
133 133
        if not distance_restraint or not theta_restraint or not beta_restraint:
@@ -144,7 +144,11 @@
Loading
144 144
        return analysis.results["ref_state_work"]
145 145
146 146
    @classmethod
147 -
    def symmetry_correction(cls, n_microstates: int, temperature: float,) -> float:
147 +
    def symmetry_correction(
148 +
        cls,
149 +
        n_microstates: int,
150 +
        temperature: float,
151 +
    ) -> float:
148 152
        """Computes the free energy corrections to apply to symmetrical guest
149 153
        when the guest is restrained to just one of several possible symmetrical
150 154
        configurations (e.g. butane restrained in a cyclic host).

@@ -263,7 +263,9 @@
Loading
263 263
264 264
        for fraction in value:
265 265
            if fraction < 0.0 or fraction > 1.0:
266 -
                raise ValueError(f"Unable to calculation fraction of the data: {fraction}.")
266 +
                raise ValueError(
267 +
                    f"Unable to calculation fraction of the data: {fraction}."
268 +
                )
267 269
268 270
        self._fractions = value
269 271
@@ -403,9 +405,15 @@
Loading
403 405
        """
404 406
405 407
        orders = {"attach": [], "pull": [], "release": []}
406 -
        active_attach_restraints = list(compress(self.restraint_list, self.changing_restraints["attach"]))
407 -
        active_pull_restraints = list(compress(self.restraint_list, self.changing_restraints["pull"]))
408 -
        active_release_restraints = list(compress(self.restraint_list, self.changing_restraints["release"]))
408 +
        active_attach_restraints = list(
409 +
            compress(self.restraint_list, self.changing_restraints["attach"])
410 +
        )
411 +
        active_pull_restraints = list(
412 +
            compress(self.restraint_list, self.changing_restraints["pull"])
413 +
        )
414 +
        active_release_restraints = list(
415 +
            compress(self.restraint_list, self.changing_restraints["release"])
416 +
        )
409 417
410 418
        attach_orders = []
411 419
        pull_orders = []
@@ -497,9 +505,9 @@
Loading
497 505
        # which should be the same value for all restraints.
498 506
        if len(active_attach_restraints) > 0:
499 507
            if (
500 -
                    active_attach_restraints[0].continuous_apr
501 -
                    and self.orders["attach"].size > 0
502 -
                    and self.orders["pull"].size > 0
508 +
                active_attach_restraints[0].continuous_apr
509 +
                and self.orders["attach"].size > 0
510 +
                and self.orders["pull"].size > 0
503 511
            ):
504 512
                logger.debug(
505 513
                    "Replacing {} with {} in {} for `continuous_apr`...".format(
@@ -512,9 +520,9 @@
Loading
512 520
513 521
        if len(active_release_restraints) > 0:
514 522
            if (
515 -
                    active_release_restraints[0].continuous_apr
516 -
                    and self.orders["release"].size > 0
517 -
                    and self.orders["pull"].size > 0
523 +
                active_release_restraints[0].continuous_apr
524 +
                and self.orders["release"].size > 0
525 +
                and self.orders["pull"].size > 0
518 526
            ):
519 527
                logger.debug(
520 528
                    "Replacing {} with {} in {} for `continuous_apr`...".format(
@@ -592,9 +600,15 @@
Loading
592 600
        """
593 601
594 602
        # Unpack the prepared data
595 -
        num_win, data_points, max_data_points, active_rest, force_constants, targets, ordered_values = (
596 -
            prepared_data
597 -
        )
603 +
        (
604 +
            num_win,
605 +
            data_points,
606 +
            max_data_points,
607 +
            active_rest,
608 +
            force_constants,
609 +
            targets,
610 +
            ordered_values,
611 +
        ) = prepared_data
598 612
599 613
        # Number of data points in each restraint value array
600 614
        N_k = np.array(data_points)
@@ -621,18 +635,18 @@
Loading
621 635
                    if rest.mask3 is not None and rest.mask4 is not None:
622 636
                        target = targets_T[l, r]
623 637
                        # Coords from coord window, k
624 -
                        bool_list = ordered_values[k][r] < target-180.0
638 +
                        bool_list = ordered_values[k][r] < target - 180.0
625 639
                        ordered_values[k][r][bool_list] += 360.0
626 -
                        bool_list = ordered_values[k][r] > target+180.0
640 +
                        bool_list = ordered_values[k][r] > target + 180.0
627 641
                        ordered_values[k][r][bool_list] -= 360.0
628 642
629 643
                # Compute the potential ... for each frame, sum the contributions for each restraint
630 644
                # Note, we multiply by beta, and do some extra [l,:,None] to
631 645
                # get the math operation correct.
632 -
                u_kln[k, l, 0: N_k[k]] = np.sum(
646 +
                u_kln[k, l, 0 : N_k[k]] = np.sum(
633 647
                    self.beta
634 648
                    * force_constants_T[l, :, None]
635 -
                    * (ordered_values[k]-targets_T[l, :, None]) ** 2,
649 +
                    * (ordered_values[k] - targets_T[l, :, None]) ** 2,
636 650
                    axis=0,
637 651
                )
638 652
@@ -648,17 +662,19 @@
Loading
648 662
                # If the potential is zero everywhere, we can't estimate the uncertainty, so
649 663
                # check the next *potential* window which probably had non-zero
650 664
                # force constants
651 -
                while not u_kln[k, l, 0: N_k[k]].any():
665 +
                while not u_kln[k, l, 0 : N_k[k]].any():
652 666
                    l += 1
653 667
                # Now compute statistical inefficiency: g = N*(SEM**2)/variance
654 668
                nearest_max = get_nearest_max(N_k[k])
655 669
                sem = get_block_sem(u_kln[k, l, 0:nearest_max])
656 -
                variance = np.var(u_kln[k, l, 0: N_k[k]])
670 +
                variance = np.var(u_kln[k, l, 0 : N_k[k]])
657 671
                g_k[k] = N_k[k] * (sem ** 2) / variance
658 672
659 673
        if method == "mbar-autoc":
660 674
            for k in range(num_win):
661 -
                [t0, g_k[k], Neff_max] = timeseries.detectEquilibration(N_k[k])  # compute indices of uncorrelated
675 +
                [t0, g_k[k], Neff_max] = timeseries.detectEquilibration(
676 +
                    N_k[k]
677 +
                )  # compute indices of uncorrelated
662 678
                # timeseries
663 679
664 680
        # Create subsampled indices and count their lengths. If g=1, ie no correlation,
@@ -688,8 +704,7 @@
Loading
688 704
689 705
            mbar = pymbar.MBAR(u_kln, frac_N_k, verbose=verbose)
690 706
            mbar_results = mbar.getFreeEnergyDifferences(
691 -
                compute_uncertainty=True,
692 -
                return_dict=True
707 +
                compute_uncertainty=True, return_dict=True
693 708
            )
694 709
695 710
            Deltaf_ij = mbar_results["Delta_f"]
@@ -709,8 +724,8 @@
Loading
709 724
                # fraction of subsamples from the original
710 725
                for k in range(num_win):
711 726
                    for l in range(num_win):
712 -
                        u_kln_err[k, l, 0: frac_N_ss[k]] = u_kln[
713 -
                            k, l, ss_indices[k][0: frac_N_ss[k]]
727 +
                        u_kln_err[k, l, 0 : frac_N_ss[k]] = u_kln[
728 +
                            k, l, ss_indices[k][0 : frac_N_ss[k]]
714 729
                        ]
715 730
716 731
                # We toss junk_Deltaf_ij, because we got a better estimate for it from above using all data.
@@ -718,8 +733,7 @@
Loading
718 733
                # correlation in the data.
719 734
                mbar = pymbar.MBAR(u_kln_err, frac_N_ss, verbose=verbose)
720 735
                mbar_results = mbar.getFreeEnergyDifferences(
721 -
                    compute_uncertainty=True,
722 -
                    return_dict=True
736 +
                    compute_uncertainty=True, return_dict=True
723 737
                )
724 738
                dDeltaf_ij = mbar_results["dDelta_f"]
725 739
                dDeltaf_ij_N_eff = mbar.computeEffectiveSampleNumber()
@@ -729,9 +743,13 @@
Loading
729 743
            dDeltaf_ij /= self.beta
730 744
731 745
            self.results[phase][method]["fraction_fe_matrix"][fraction] = Deltaf_ij
732 -
            self.results[phase][method]["fraction_fe_Neffective"][fraction] = Deltaf_ij_N_eff
746 +
            self.results[phase][method]["fraction_fe_Neffective"][
747 +
                fraction
748 +
            ] = Deltaf_ij_N_eff
733 749
            self.results[phase][method]["fraction_sem_matrix"][fraction] = dDeltaf_ij
734 -
            self.results[phase][method]["fraction_sem_Neffective"][fraction] = dDeltaf_ij_N_eff
750 +
            self.results[phase][method]["fraction_sem_Neffective"][
751 +
                fraction
752 +
            ] = dDeltaf_ij_N_eff
735 753
736 754
    def run_ti(self, phase, prepared_data, method):
737 755
        """
@@ -774,9 +792,15 @@
Loading
774 792
        """
775 793
776 794
        # Unpack the prepared data
777 -
        num_win, data_points, max_data_points, active_rest, force_constants, targets, ordered_values = (
778 -
            prepared_data
779 -
        )
795 +
        (
796 +
            num_win,
797 +
            data_points,
798 +
            max_data_points,
799 +
            active_rest,
800 +
            force_constants,
801 +
            targets,
802 +
            ordered_values,
803 +
        ) = prepared_data
780 804
781 805
        # Number of data points in each restraint value array
782 806
        N_k = np.array(data_points)
@@ -822,26 +846,26 @@
Loading
822 846
823 847
                if rest.mask3 is not None and rest.mask4 is not None:
824 848
                    target = targets_T[k, r]
825 -
                    bool_list = ordered_values[k][r] < target-180.0
849 +
                    bool_list = ordered_values[k][r] < target - 180.0
826 850
                    ordered_values[k][r][bool_list] += 360.0
827 -
                    bool_list = ordered_values[k][r] > target+180.0
851 +
                    bool_list = ordered_values[k][r] > target + 180.0
828 852
                    ordered_values[k][r][bool_list] -= 360.0
829 853
830 854
            # Compute forces and store the values of the changing coordinate,
831 855
            # either lambda or target
832 856
            if phase == "attach" or phase == "release":
833 -
                dU[k, 0: N_k[k]] = np.sum(
857 +
                dU[k, 0 : N_k[k]] = np.sum(
834 858
                    max_force_constants[:, None]
835 -
                    * (ordered_values[k]-targets_T[k, :, None]) ** 2,
859 +
                    * (ordered_values[k] - targets_T[k, :, None]) ** 2,
836 860
                    axis=0,
837 861
                )
838 862
                # this is lambda. assume the same scaling for all restraints
839 863
                dl_vals[k] = force_constants_T[k, 0] / max_force_constants[0]
840 864
            else:
841 -
                dU[k, 0: N_k[k]] = np.sum(
865 +
                dU[k, 0 : N_k[k]] = np.sum(
842 866
                    2.0
843 867
                    * max_force_constants[:, None]
844 -
                    * (ordered_values[k]-targets_T[k, :, None]),
868 +
                    * (ordered_values[k] - targets_T[k, :, None]),
845 869
                    axis=0,
846 870
                )
847 871
                # Currently assuming a single distance restraint
@@ -849,8 +873,8 @@
Loading
849 873
850 874
            # Compute standard deviations and SEMs, unless we're going to do
851 875
            # exact_sem_each_ti_fraction
852 -
            dU_avgs[k] = np.mean(dU[k, 0: N_k[k]])
853 -
            dU_stdv[k] = np.std(dU[k, 0: N_k[k]])
876 +
            dU_avgs[k] = np.mean(dU[k, 0 : N_k[k]])
877 +
            dU_stdv[k] = np.std(dU[k, 0 : N_k[k]])
854 878
            if method == "ti-block":
855 879
                nearest_max = get_nearest_max(N_k[k])
856 880
                dU_sems[k] = get_block_sem(dU[k, 0:nearest_max])
@@ -866,7 +890,7 @@
Loading
866 890
            if k > 0:
867 891
                dl_intp = np.append(
868 892
                    dl_intp,
869 -
                    np.linspace(dl_vals[k-1], dl_vals[k], num=100, endpoint=False),
893 +
                    np.linspace(dl_vals[k - 1], dl_vals[k], num=100, endpoint=False),
870 894
                )
871 895
872 896
        # Tack on the final value to the dl interpolation
@@ -884,7 +908,7 @@
Loading
884 908
885 909
            # Compute means for this fraction.
886 910
            frac_dU_avgs = np.array(
887 -
                [np.mean(dU[k, 0: int(fraction * n)]) for k, n in enumerate(N_k)]
911 +
                [np.mean(dU[k, 0 : int(fraction * n)]) for k, n in enumerate(N_k)]
888 912
            )
889 913
890 914
            # If self.exact_sem_each_ti_fraction, we're gonna recompute the SEM for each fraction
@@ -899,7 +923,7 @@
Loading
899 923
                frac_dU_sems = np.zero([k], np.float64)
900 924
                for k in range(num_win):
901 925
                    frac_dU_sems[k] = np.std(
902 -
                        dU[k, 0: int(fraction * N_k[k])]
926 +
                        dU[k, 0 : int(fraction * N_k[k])]
903 927
                    ) / np.sqrt(int(fraction * N_k[k]))
904 928
            else:
905 929
                frac_dU_sems = dU_stdv / np.sqrt(fraction * dU_Nunc)
@@ -909,9 +933,10 @@
Loading
909 933
            )
910 934
911 935
            # Run bootstraps
912 -
            self.results[phase][method]["fraction_fe_matrix"][fraction], self.results[
913 -
                phase
914 -
            ][method]["fraction_sem_matrix"][fraction] = integrate_bootstraps(
936 +
            (
937 +
                self.results[phase][method]["fraction_fe_matrix"][fraction],
938 +
                self.results[phase][method]["fraction_sem_matrix"][fraction],
939 +
            ) = integrate_bootstraps(
915 940
                dl_vals, dU_samples, x_intp=dl_intp, matrix=self.ti_matrix
916 941
            )
917 942
@@ -921,7 +946,7 @@
Loading
921 946
                self.results[phase][method]["fraction_fe_matrix"][fraction] *= -1.0
922 947
923 948
        if self.compute_roi:
924 -
            logger.info(phase+": computing ROI for "+method)
949 +
            logger.info(phase + ": computing ROI for " + method)
925 950
            # Do ROI calc
926 951
            max_fraction = np.max(self.fractions)
927 952
            # If we didn't compute fe/sem for fraction 1.0 already, do it now
@@ -954,8 +979,8 @@
Loading
954 979
                # Deriv1----^---^---^        ^---^---^----Deriv2
955 980
956 981
                # Deriv1:
957 -
                deriv1 = (cnvg_sem_matrix[0, -1]-total_sem_matrix[0, -1]) / (
958 -
                        -0.1 * dU_sems[k]
982 +
                deriv1 = (cnvg_sem_matrix[0, -1] - total_sem_matrix[0, -1]) / (
983 +
                    -0.1 * dU_sems[k]
959 984
                )
960 985
961 986
                # Deriv2:
@@ -967,7 +992,7 @@
Loading
967 992
                # d( n_frames )          2g * (n_frames/g)**3/2
968 993
969 994
                deriv2 = (
970 -
                        -1.0 * dU_stdv[k] / (2.0 * g[k] * (N_k[k] / g[k]) ** (3.0 / 2.0))
995 +
                    -1.0 * dU_stdv[k] / (2.0 * g[k] * (N_k[k] / g[k]) ** (3.0 / 2.0))
971 996
                )
972 997
973 998
                # ROI
@@ -989,7 +1014,9 @@
Loading
989 1014
990 1015
        for fraction in self.fractions:
991 1016
            if fraction <= 0.0 or fraction > 1.0:
992 -
                raise Exception("The fraction of data to analyze must be 0 < fraction ≤ 1.0.")
1017 +
                raise Exception(
1018 +
                    "The fraction of data to analyze must be 0 < fraction ≤ 1.0."
1019 +
                )
993 1020
994 1021
        for phase in phases:
995 1022
            self.results[phase] = {}
@@ -1008,14 +1035,18 @@
Loading
1008 1035
                prepared_data = self.prepare_data(phase)
1009 1036
                self.results[phase][method]["n_frames"] = np.sum(prepared_data[1])
1010 1037
1011 -
                logger.debug("Running {} analysis on {} phase ...".format(method, phase))
1038 +
                logger.debug(
1039 +
                    "Running {} analysis on {} phase ...".format(method, phase)
1040 +
                )
1012 1041
1013 1042
                if method == "mbar-block" or method == "mbar-autoc":
1014 1043
                    self.run_mbar(phase, prepared_data, method)
1015 1044
                elif method == "ti-block":
1016 1045
                    self.run_ti(phase, prepared_data, method)
1017 1046
                else:
1018 -
                    raise NotImplementedError(f"Method ({method}) is not implemented yet.")
1047 +
                    raise NotImplementedError(
1048 +
                        f"Method ({method}) is not implemented yet."
1049 +
                    )
1019 1050
1020 1051
                # Store endpoint free energy and SEM for each fraction
1021 1052
                self.results[phase][method]["fraction_n_frames"] = {}
@@ -1039,27 +1070,39 @@
Loading
1039 1070
                # Set these higher level (total) values, which will be slightly
1040 1071
                # easier to access
1041 1072
                max_fraction = np.max(self.fractions)
1042 -
                self.results[phase][method]["fe_matrix"] = self.results[phase][method]["fraction_fe_matrix"][max_fraction]
1043 -
                self.results[phase][method]["sem_matrix"] = self.results[phase][method]["fraction_sem_matrix"][max_fraction]
1044 -
                self.results[phase][method]["fe"] = self.results[phase][method]["fe_matrix"][0, -1]
1045 -
                self.results[phase][method]["sem"] = self.results[phase][method]["sem_matrix"][0, -1]
1073 +
                self.results[phase][method]["fe_matrix"] = self.results[phase][method][
1074 +
                    "fraction_fe_matrix"
1075 +
                ][max_fraction]
1076 +
                self.results[phase][method]["sem_matrix"] = self.results[phase][method][
1077 +
                    "fraction_sem_matrix"
1078 +
                ][max_fraction]
1079 +
                self.results[phase][method]["fe"] = self.results[phase][method][
1080 +
                    "fe_matrix"
1081 +
                ][0, -1]
1082 +
                self.results[phase][method]["sem"] = self.results[phase][method][
1083 +
                    "sem_matrix"
1084 +
                ][0, -1]
1046 1085
1047 1086
                if self.compute_largest_neighbor:
1048 1087
                    # Store convergence values, which are helpful for running
1049 1088
                    # simulations
1050 1089
                    windows = len(self.results[phase][method]["sem_matrix"])
1051 -
                    self.results[phase][method]["largest_neighbor"] = (np.ones([windows], np.float64) * -1.0)
1090 +
                    self.results[phase][method]["largest_neighbor"] = (
1091 +
                        np.ones([windows], np.float64) * -1.0
1092 +
                    )
1052 1093
                    logger.info(f"{phase}: computing largest_neighbor for {method}...")
1053 1094
                    for i in range(windows):
1054 1095
                        if i == 0:
1055 -
                            self.results[phase][method]["largest_neighbor"][i] = \
1056 -
                                self.results[phase][method]["sem_matrix"][i][i+1]
1057 -
                        elif i == windows-1:
1058 -
                            self.results[phase][method]["largest_neighbor"][i] = \
1059 -
                                self.results[phase][method]["sem_matrix"][i][i-1]
1096 +
                            self.results[phase][method]["largest_neighbor"][
1097 +
                                i
1098 +
                            ] = self.results[phase][method]["sem_matrix"][i][i + 1]
1099 +
                        elif i == windows - 1:
1100 +
                            self.results[phase][method]["largest_neighbor"][
1101 +
                                i
1102 +
                            ] = self.results[phase][method]["sem_matrix"][i][i - 1]
1060 1103
                        else:
1061 -
                            left = self.results[phase][method]["sem_matrix"][i][i-1]
1062 -
                            right = self.results[phase][method]["sem_matrix"][i][i+1]
1104 +
                            left = self.results[phase][method]["sem_matrix"][i][i - 1]
1105 +
                            right = self.results[phase][method]["sem_matrix"][i][i + 1]
1063 1106
                            if left > right:
1064 1107
                                max_val = left
1065 1108
                            elif right > left:
@@ -1086,9 +1129,10 @@
Loading
1086 1129
        """
1087 1130
1088 1131
        if not restraints or restraints[0] is None:
1089 -
            raise ValueError("At minimum, a single distance restraint is necesarry to compute the work of releasing"
1090 -
                             " the guest to standard state."
1091 -
                             )
1132 +
            raise ValueError(
1133 +
                "At minimum, a single distance restraint is necesarry to compute the work of releasing"
1134 +
                " the guest to standard state."
1135 +
            )
1092 1136
1093 1137
        fcs = []
1094 1138
        targs = []
@@ -1175,14 +1219,14 @@
Loading
1175 1219
    """
1176 1220
    max_factors = 0
1177 1221
    if n % 2 == 0:
1178 -
        beg = n-100
1222 +
        beg = n - 100
1179 1223
        end = n
1180 1224
    else:
1181 -
        beg = n-101
1182 -
        end = n-1
1225 +
        beg = n - 101
1226 +
        end = n - 1
1183 1227
    if beg < 0:
1184 1228
        beg = 0
1185 -
    for i in range(beg, end+2, 2):
1229 +
    for i in range(beg, end + 2, 2):
1186 1230
        num_factors = len(get_factors(i))
1187 1231
        if num_factors >= max_factors:
1188 1232
            max_factors = num_factors
@@ -1218,23 +1262,23 @@
Loading
1218 1262
1219 1263
    # Store the SEM for each block size, except the last two size for which
1220 1264
    # there will only be two or one blocks total and thus very noisy.
1221 -
    sems = np.zeros([len(block_sizes)-2], np.float64)
1265 +
    sems = np.zeros([len(block_sizes) - 2], np.float64)
1222 1266
1223 1267
    # Check each block size except the last two.
1224 -
    for size_idx in range(len(block_sizes)-2):
1268 +
    for size_idx in range(len(block_sizes) - 2):
1225 1269
        # Check each block, the number of which is conveniently found as
1226 1270
        # the other number of the factor pair in block_sizes
1227 -
        num_blocks = block_sizes[-size_idx-1]
1271 +
        num_blocks = block_sizes[-size_idx - 1]
1228 1272
        for blk_idx in range(num_blocks):
1229 1273
            # Find the index for beg and end of data points for each block
1230 1274
            data_beg_idx = blk_idx * block_sizes[size_idx]
1231 -
            data_end_idx = (blk_idx+1) * block_sizes[size_idx]
1275 +
            data_end_idx = (blk_idx + 1) * block_sizes[size_idx]
1232 1276
            # Compute the mean of this block and store in array
1233 1277
            block_means[blk_idx] = np.mean(data_array[data_beg_idx:data_end_idx])
1234 1278
        # Compute the standard deviation across all blocks, devide by
1235 1279
        # num_blocks-1 for SEM
1236 1280
        sems[size_idx] = np.std(block_means[0:num_blocks], ddof=0) / np.sqrt(
1237 -
            num_blocks-1
1281 +
            num_blocks - 1
1238 1282
        )
1239 1283
        # Hmm or should ddof=1? I think 0, see Flyvbjerg -----^
1240 1284
@@ -1242,7 +1286,7 @@
Loading
1242 1286
1243 1287
1244 1288
def get_subsampled_indices(N, g, conservative=False):
1245 -
    """ Get the indices of independent (subsampled) frames. This is adapted from the implementation in `pymbar`.
1289 +
    """Get the indices of independent (subsampled) frames. This is adapted from the implementation in `pymbar`.
1246 1290
1247 1291
    Parameters
1248 1292
    ----------
@@ -1310,14 +1354,18 @@
Loading
1310 1354
        raise RuntimeError("Trajectory path should be a `str` or `list`.")
1311 1355
    if isinstance(prmtop, str) and not single_prmtop:
1312 1356
        if not os.path.isfile(os.path.join(window, prmtop)):
1313 -
            raise FileNotFoundError(f"Cannot find `prmtop` file: {os.path.join(window, prmtop)}")
1357 +
            raise FileNotFoundError(
1358 +
                f"Cannot find `prmtop` file: {os.path.join(window, prmtop)}"
1359 +
            )
1314 1360
        logger.debug(f"Loading {os.path.join(window, prmtop)} and {trajectory_path}")
1315 1361
        try:
1316 1362
            traj = pt.iterload(trajectory_path, os.path.join(window, prmtop))
1317 1363
        except ValueError as e:
1318 1364
            formatted_exception = traceback.format_exception(None, e, e.__traceback__)
1319 -
            logger.info(f"Failed trying to load {os.path.join(window, prmtop)} and {trajectory_path}: "
1320 -
                        f"{formatted_exception}")
1365 +
            logger.info(
1366 +
                f"Failed trying to load {os.path.join(window, prmtop)} and {trajectory_path}: "
1367 +
                f"{formatted_exception}"
1368 +
            )
1321 1369
    elif isinstance(prmtop, str) and single_prmtop:
1322 1370
        traj = pt.iterload(trajectory_path, os.path.join(prmtop))
1323 1371
    else:
@@ -1348,16 +1396,16 @@
Loading
1348 1396
    """
1349 1397
1350 1398
    if (
1351 -
            restraint.mask1
1352 -
            and restraint.mask2
1353 -
            and not restraint.mask3
1354 -
            and not restraint.mask4
1399 +
        restraint.mask1
1400 +
        and restraint.mask2
1401 +
        and not restraint.mask3
1402 +
        and not restraint.mask4
1355 1403
    ):
1356 1404
        data = pt.distance(
1357 1405
            traj, " ".join([restraint.mask1, restraint.mask2]), image=True
1358 1406
        )
1359 1407
    elif (
1360 -
            restraint.mask1 and restraint.mask2 and restraint.mask3 and not restraint.mask4
1408 +
        restraint.mask1 and restraint.mask2 and restraint.mask3 and not restraint.mask4
1361 1409
    ):
1362 1410
        data = pt.angle(
1363 1411
            traj, " ".join([restraint.mask1, restraint.mask2, restraint.mask3])
@@ -1373,19 +1421,19 @@
Loading
1373 1421
1374 1422
1375 1423
def ref_state_work(
1376 -
        temperature,
1377 -
        r_fc,
1378 -
        r_tg,
1379 -
        th_fc,
1380 -
        th_tg,
1381 -
        ph_fc,
1382 -
        ph_tg,
1383 -
        a_fc,
1384 -
        a_tg,
1385 -
        b_fc,
1386 -
        b_tg,
1387 -
        g_fc,
1388 -
        g_tg,
1424 +
    temperature,
1425 +
    r_fc,
1426 +
    r_tg,
1427 +
    th_fc,
1428 +
    th_tg,
1429 +
    ph_fc,
1430 +
    ph_tg,
1431 +
    a_fc,
1432 +
    a_tg,
1433 +
    b_fc,
1434 +
    b_tg,
1435 +
    g_fc,
1436 +
    g_tg,
1389 1437
):
1390 1438
    """
1391 1439
    Computes the free energy to release a molecule from some restrained translational
@@ -1466,7 +1514,7 @@
Loading
1466 1514
    # Distance Integration Function
1467 1515
    def dist_int(RT, fc, targ):
1468 1516
        def potential(arange, RT, fc, targ):
1469 -
            return (arange ** 2) * np.exp((-1.0 / RT) * fc * (arange-targ) ** 2)
1517 +
            return (arange ** 2) * np.exp((-1.0 / RT) * fc * (arange - targ) ** 2)
1470 1518
1471 1519
        arange = np.arange(0.0, 100.0, 0.0001)
1472 1520
        return np.trapz(potential(arange, RT, fc, targ), arange)
@@ -1474,7 +1522,7 @@
Loading
1474 1522
    # Angle Integration Function
1475 1523
    def ang_int(RT, fc, targ):
1476 1524
        def potential(arange, RT, fc, targ):
1477 -
            return np.sin(arange) * np.exp((-1.0 / RT) * fc * (arange-targ) ** 2)
1525 +
            return np.sin(arange) * np.exp((-1.0 / RT) * fc * (arange - targ) ** 2)
1478 1526
1479 1527
        arange = np.arange(0.0, np.pi, 0.00005)
1480 1528
        return np.trapz(potential(arange, RT, fc, targ), arange)
@@ -1482,10 +1530,10 @@
Loading
1482 1530
    # Torsion Integration Function
1483 1531
    def tors_int(RT, fc, targ):
1484 1532
        def potential(arange, RT, fc, targ):
1485 -
            return np.exp((-1.0 / RT) * fc * (arange-targ) ** 2)
1533 +
            return np.exp((-1.0 / RT) * fc * (arange - targ) ** 2)
1486 1534
1487 1535
        # Note, because of periodicity, I'm gonna wrap +/- pi around target for integration.
1488 -
        arange = np.arange(targ-np.pi, targ+np.pi, 0.00005)
1536 +
        arange = np.arange(targ - np.pi, targ + np.pi, 0.00005)
1489 1537
        return np.trapz(potential(arange, RT, fc, targ), arange)
1490 1538
1491 1539
    # Distance restraint, r
@@ -1577,7 +1625,7 @@
Loading
1577 1625
        x_intp = np.zeros([0], np.float64)
1578 1626
        for i in range(1, num_x):
1579 1627
            x_intp = np.append(
1580 -
                x_intp, np.linspace(x[i-1], x[i], num=100, endpoint=False)
1628 +
                x_intp, np.linspace(x[i - 1], x[i], num=100, endpoint=False)
1581 1629
            )
1582 1630
            x_idxs = len(x_intp)
1583 1631
        # Tack on the final value onto the interpolation
@@ -1592,7 +1640,7 @@
Loading
1592 1640
        if i != num_x:
1593 1641
            raise Exception(
1594 1642
                "One or more x values seem to be missing in the x_intp array,"
1595 -
                +" or one of the lists is not monotonically increasing!"
1643 +
                + " or one of the lists is not monotonically increasing!"
1596 1644
            )
1597 1645
1598 1646
    cycles = len(ys)
@@ -1610,14 +1658,14 @@
Loading
1610 1658
            #            for i in range(0, num_x):
1611 1659
            #                for j in range(i+1, num_x):
1612 1660
            #                    int_matrix[i, j, cycle] = np.trapz( y_intp, x_intp )
1613 -
            int_matrix[0, num_x-1, cycle] = np.trapz(y_intp, x_intp)
1661 +
            int_matrix[0, num_x - 1, cycle] = np.trapz(y_intp, x_intp)
1614 1662
    else:
1615 1663
        for cycle in range(cycles):
1616 1664
            intp_func = Akima1DInterpolator(x, ys[cycle])
1617 1665
            y_intp = intp_func(x_intp)
1618 1666
            for i in range(0, num_x):
1619 -
                for j in range(i+1, num_x):
1620 -
                    if matrix == "diagonal" and i != 0 and j-i > 1:
1667 +
                for j in range(i + 1, num_x):
1668 +
                    if matrix == "diagonal" and i != 0 and j - i > 1:
1621 1669
                        continue
1622 1670
                    beg = x_idxs[i]
1623 1671
                    end = x_idxs[j]
@@ -1630,12 +1678,12 @@
Loading
1630 1678
1631 1679
    # Second pass to compute the mean and standard deviation.
1632 1680
    for i in range(0, num_x):
1633 -
        for j in range(i+1, num_x):
1681 +
        for j in range(i + 1, num_x):
1634 1682
            # If quick_ti_matrix, only populate first row and neighbors in
1635 1683
            # matrix
1636 -
            if matrix == "diagonal" and i != 0 and j-i > 1:
1684 +
            if matrix == "diagonal" and i != 0 and j - i > 1:
1637 1685
                continue
1638 -
            if matrix == "endpoints" and i != 0 and j != num_x-1:
1686 +
            if matrix == "endpoints" and i != 0 and j != num_x - 1:
1639 1687
                continue
1640 1688
            avg_matrix[i, j] = np.mean(int_matrix[i, j])
1641 1689
            avg_matrix[j, i] = -1.0 * avg_matrix[i, j]

@@ -1,4 +1,3 @@
Loading
1 1
__all__ = ["DAT_restraint", "static_DAT_restraint"]
2 2
3 3
from paprika.restraints.restraints import DAT_restraint, static_DAT_restraint
4 -

@@ -1,2 +1,2 @@
Loading
1 1
class Simulate(object):
2 -
    pass

@@ -1,9 +1,10 @@
Loading
1 1
import os as os
2 2
3 3
import parmed as pmd
4 -
from paprika import utils
5 4
from parmed.structure import Structure as ParmedStructureClass
6 5
6 +
from paprika import utils
7 +
7 8
8 9
def add_dummy(
9 10
    structure,
@@ -224,8 +225,12 @@
Loading
224 225
    for dummy_atom in resname:
225 226
        residue = f":{dummy_atom}"
226 227
        dummy_atoms[dummy_atom]["pos"] = structure[residue].coordinates[0]
227 -
        dummy_atoms[dummy_atom]["mass"] = [atom.mass for atom in structure[residue].atoms][0]
228 -
        dummy_atoms[dummy_atom]["idx"] = utils.index_from_mask(structure, residue, amber_index=serial)[0]
228 +
        dummy_atoms[dummy_atom]["mass"] = [
229 +
            atom.mass for atom in structure[residue].atoms
230 +
        ][0]
231 +
        dummy_atoms[dummy_atom]["idx"] = utils.index_from_mask(
232 +
            structure, residue, amber_index=serial
233 +
        )[0]
229 234
        dummy_atoms[dummy_atom]["idx_type"] = "serial" if serial else "index"
230 235
231 236
    return dummy_atoms

@@ -296,8 +296,8 @@
Loading
296 296
    sys.neutralize = False
297 297
    sys.build()
298 298
    with open(
299 -
            os.path.join(os.path.dirname(__file__), "../data/cb6-but/REF_cb6-but-dum.rst7"),
300 -
            "r",
299 +
        os.path.join(os.path.dirname(__file__), "../data/cb6-but/REF_cb6-but-dum.rst7"),
300 +
        "r",
301 301
    ) as f:
302 302
        contents = f.read()
303 303
        reference = [float(i) for i in contents.split()[2:]]
@@ -331,9 +331,13 @@
Loading
331 331
    sys.neutralize = False
332 332
    sys.build()
333 333
334 -
    but = pmd.load_file(os.path.join(temporary_directory, sys.output_prefix + ".prmtop"))
334 +
    but = pmd.load_file(
335 +
        os.path.join(temporary_directory, sys.output_prefix + ".prmtop")
336 +
    )
335 337
    assert np.allclose(but["@H="].atoms[0].mass, 1.008)
336 338
337 339
    sys.repartition_hydrogen_mass()
338 -
    but = pmd.load_file(os.path.join(temporary_directory, sys.output_prefix + ".prmtop"))
340 +
    but = pmd.load_file(
341 +
        os.path.join(temporary_directory, sys.output_prefix + ".prmtop")
342 +
    )
339 343
    assert np.allclose(but["@H="].atoms[0].mass, 3.024)

@@ -1,4 +1,3 @@
Loading
1 -
2 1
# This file helps to compute a version number in source trees obtained from
3 2
# git-archive tarball (such as those provided by githubs download-from-tag
4 3
# feature). Distribution tarballs (built by setup.py sdist) and build
@@ -58,17 +57,18 @@
Loading
58 57
59 58
def register_vcs_handler(vcs, method):  # decorator
60 59
    """Decorator to mark a method as the handler for a particular VCS."""
60 +
61 61
    def decorate(f):
62 62
        """Store f in HANDLERS[vcs][method]."""
63 63
        if vcs not in HANDLERS:
64 64
            HANDLERS[vcs] = {}
65 65
        HANDLERS[vcs][method] = f
66 66
        return f
67 +
67 68
    return decorate
68 69
69 70
70 -
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
71 -
                env=None):
71 +
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
72 72
    """Call the given command(s)."""
73 73
    assert isinstance(commands, list)
74 74
    p = None
@@ -76,10 +76,13 @@
Loading
76 76
        try:
77 77
            dispcmd = str([c] + args)
78 78
            # remember shell=False, so use git.cmd on windows, not just git
79 -
            p = subprocess.Popen([c] + args, cwd=cwd, env=env,
80 -
                                 stdout=subprocess.PIPE,
81 -
                                 stderr=(subprocess.PIPE if hide_stderr
82 -
                                         else None))
79 +
            p = subprocess.Popen(
80 +
                [c] + args,
81 +
                cwd=cwd,
82 +
                env=env,
83 +
                stdout=subprocess.PIPE,
84 +
                stderr=(subprocess.PIPE if hide_stderr else None),
85 +
            )
83 86
            break
84 87
        except EnvironmentError:
85 88
            e = sys.exc_info()[1]
@@ -116,16 +119,22 @@
Loading
116 119
    for i in range(3):
117 120
        dirname = os.path.basename(root)
118 121
        if dirname.startswith(parentdir_prefix):
119 -
            return {"version": dirname[len(parentdir_prefix):],
120 -
                    "full-revisionid": None,
121 -
                    "dirty": False, "error": None, "date": None}
122 +
            return {
123 +
                "version": dirname[len(parentdir_prefix) :],
124 +
                "full-revisionid": None,
125 +
                "dirty": False,
126 +
                "error": None,
127 +
                "date": None,
128 +
            }
122 129
        else:
123 130
            rootdirs.append(root)
124 131
            root = os.path.dirname(root)  # up a level
125 132
126 133
    if verbose:
127 -
        print("Tried directories %s but none started with prefix %s" %
128 -
              (str(rootdirs), parentdir_prefix))
134 +
        print(
135 +
            "Tried directories %s but none started with prefix %s"
136 +
            % (str(rootdirs), parentdir_prefix)
137 +
        )
129 138
    raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
130 139
131 140
@@ -181,7 +190,7 @@
Loading
181 190
    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
182 191
    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
183 192
    TAG = "tag: "
184 -
    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
193 +
    tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
185 194
    if not tags:
186 195
        # Either we're using git < 1.8.3, or there really are no tags. We use
187 196
        # a heuristic: assume all version tags have a digit. The old git %d
@@ -190,7 +199,7 @@
Loading
190 199
        # between branches and tags. By ignoring refnames without digits, we
191 200
        # filter out many common branch names like "release" and
192 201
        # "stabilization", as well as "HEAD" and "master".
193 -
        tags = set([r for r in refs if re.search(r'\d', r)])
202 +
        tags = set([r for r in refs if re.search(r"\d", r)])
194 203
        if verbose:
195 204
            print("discarding '%s', no digits" % ",".join(refs - tags))
196 205
    if verbose:
@@ -198,19 +207,26 @@
Loading
198 207
    for ref in sorted(tags):
199 208
        # sorting will prefer e.g. "2.0" over "2.0rc1"
200 209
        if ref.startswith(tag_prefix):
201 -
            r = ref[len(tag_prefix):]
210 +
            r = ref[len(tag_prefix) :]
202 211
            if verbose:
203 212
                print("picking %s" % r)
204 -
            return {"version": r,
205 -
                    "full-revisionid": keywords["full"].strip(),
206 -
                    "dirty": False, "error": None,
207 -
                    "date": date}
213 +
            return {
214 +
                "version": r,
215 +
                "full-revisionid": keywords["full"].strip(),
216 +
                "dirty": False,
217 +
                "error": None,
218 +
                "date": date,
219 +
            }
208 220
    # no suitable tags, so version is "0+unknown", but full hex is still there
209 221
    if verbose:
210 222
        print("no suitable tags, using unknown + full revision id")
211 -
    return {"version": "0+unknown",
212 -
            "full-revisionid": keywords["full"].strip(),
213 -
            "dirty": False, "error": "no suitable tags", "date": None}
223 +
    return {
224 +
        "version": "0+unknown",
225 +
        "full-revisionid": keywords["full"].strip(),
226 +
        "dirty": False,
227 +
        "error": "no suitable tags",
228 +
        "date": None,
229 +
    }
214 230
215 231
216 232
@register_vcs_handler("git", "pieces_from_vcs")
@@ -225,8 +241,7 @@
Loading
225 241
    if sys.platform == "win32":
226 242
        GITS = ["git.cmd", "git.exe"]
227 243
228 -
    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
229 -
                          hide_stderr=True)
244 +
    out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
230 245
    if rc != 0:
231 246
        if verbose:
232 247
            print("Directory %s not under git control" % root)
@@ -234,10 +249,19 @@
Loading
234 249
235 250
    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
236 251
    # if there isn't one, this yields HEX[-dirty] (no NUM)
237 -
    describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
238 -
                                          "--always", "--long",
239 -
                                          "--match", "%s*" % tag_prefix],
240 -
                                   cwd=root)
252 +
    describe_out, rc = run_command(
253 +
        GITS,
254 +
        [
255 +
            "describe",
256 +
            "--tags",
257 +
            "--dirty",
258 +
            "--always",
259 +
            "--long",
260 +
            "--match",
261 +
            "%s*" % tag_prefix,
262 +
        ],
263 +
        cwd=root,
264 +
    )
241 265
    # --long was added in git-1.5.5
242 266
    if describe_out is None:
243 267
        raise NotThisMethod("'git describe' failed")
@@ -260,17 +284,16 @@
Loading
260 284
    dirty = git_describe.endswith("-dirty")
261 285
    pieces["dirty"] = dirty
262 286
    if dirty:
263 -
        git_describe = git_describe[:git_describe.rindex("-dirty")]
287 +
        git_describe = git_describe[: git_describe.rindex("-dirty")]
264 288
265 289
    # now we have TAG-NUM-gHEX or HEX
266 290
267 291
    if "-" in git_describe:
268 292
        # TAG-NUM-gHEX
269 -
        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
293 +
        mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
270 294
        if not mo:
271 295
            # unparseable. Maybe git-describe is misbehaving?
272 -
            pieces["error"] = ("unable to parse git-describe output: '%s'"
273 -
                               % describe_out)
296 +
            pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
274 297
            return pieces
275 298
276 299
        # tag
@@ -279,10 +302,12 @@
Loading
279 302
            if verbose:
280 303
                fmt = "tag '%s' doesn't start with prefix '%s'"
281 304
                print(fmt % (full_tag, tag_prefix))
282 -
            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
283 -
                               % (full_tag, tag_prefix))
305 +
            pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
306 +
                full_tag,
307 +
                tag_prefix,
308 +
            )
284 309
            return pieces
285 -
        pieces["closest-tag"] = full_tag[len(tag_prefix):]
310 +
        pieces["closest-tag"] = full_tag[len(tag_prefix) :]
286 311
287 312
        # distance: number of commits since tag
288 313
        pieces["distance"] = int(mo.group(2))
@@ -293,13 +318,13 @@
Loading
293 318
    else:
294 319
        # HEX: no tags
295 320
        pieces["closest-tag"] = None
296 -
        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
297 -
                                    cwd=root)
321 +
        count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
298 322
        pieces["distance"] = int(count_out)  # total number of commits
299 323
300 324
    # commit date: see ISO-8601 comment in git_versions_from_keywords()
301 -
    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
302 -
                       cwd=root)[0].strip()
325 +
    date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
326 +
        0
327 +
    ].strip()
303 328
    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
304 329
305 330
    return pieces
@@ -330,8 +355,7 @@
Loading
330 355
                rendered += ".dirty"
331 356
    else:
332 357
        # exception #1
333 -
        rendered = "0+untagged.%d.g%s" % (pieces["distance"],
334 -
                                          pieces["short"])
358 +
        rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
335 359
        if pieces["dirty"]:
336 360
            rendered += ".dirty"
337 361
    return rendered
@@ -445,11 +469,13 @@
Loading
445 469
def render(pieces, style):
446 470
    """Render the given version pieces into the requested style."""
447 471
    if pieces["error"]:
448 -
        return {"version": "unknown",
449 -
                "full-revisionid": pieces.get("long"),
450 -
                "dirty": None,
451 -
                "error": pieces["error"],
452 -
                "date": None}
472 +
        return {
473 +
            "version": "unknown",
474 +
            "full-revisionid": pieces.get("long"),
475 +
            "dirty": None,
476 +
            "error": pieces["error"],
477 +
            "date": None,
478 +
        }
453 479
454 480
    if not style or style == "default":
455 481
        style = "pep440"  # the default
@@ -469,9 +495,13 @@
Loading
469 495
    else:
470 496
        raise ValueError("unknown style '%s'" % style)
471 497
472 -
    return {"version": rendered, "full-revisionid": pieces["long"],
473 -
            "dirty": pieces["dirty"], "error": None,
474 -
            "date": pieces.get("date")}
498 +
    return {
499 +
        "version": rendered,
500 +
        "full-revisionid": pieces["long"],
501 +
        "dirty": pieces["dirty"],
502 +
        "error": None,
503 +
        "date": pieces.get("date"),
504 +
    }
475 505
476 506
477 507
def get_versions():
@@ -485,8 +515,7 @@
Loading
485 515
    verbose = cfg.verbose
486 516
487 517
    try:
488 -
        return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
489 -
                                          verbose)
518 +
        return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
490 519
    except NotThisMethod:
491 520
        pass
492 521
@@ -495,13 +524,16 @@
Loading
495 524
        # versionfile_source is the relative path from the top of the source
496 525
        # tree (where the .git directory might live) to this file. Invert
497 526
        # this to find the root from __file__.
498 -
        for i in cfg.versionfile_source.split('/'):
527 +
        for i in cfg.versionfile_source.split("/"):
499 528
            root = os.path.dirname(root)
500 529
    except NameError:
501 -
        return {"version": "0+unknown", "full-revisionid": None,
502 -
                "dirty": None,
503 -
                "error": "unable to find root of source tree",
504 -
                "date": None}
530 +
        return {
531 +
            "version": "0+unknown",
532 +
            "full-revisionid": None,
533 +
            "dirty": None,
534 +
            "error": "unable to find root of source tree",
535 +
            "date": None,
536 +
        }
505 537
506 538
    try:
507 539
        pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
@@ -515,6 +547,10 @@
Loading
515 547
    except NotThisMethod:
516 548
        pass
517 549
518 -
    return {"version": "0+unknown", "full-revisionid": None,
519 -
            "dirty": None,
520 -
            "error": "unable to compute version", "date": None}
550 +
    return {
551 +
        "version": "0+unknown",
552 +
        "full-revisionid": None,
553 +
        "dirty": None,
554 +
        "error": "unable to compute version",
555 +
        "date": None,
556 +
    }

@@ -26,7 +26,9 @@
Loading
26 26
    rest.amber_index = True
27 27
    rest.continuous_apr = False
28 28
    rest.auto_apr = False
29 -
    rest.topology = os.path.join(os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb")
29 +
    rest.topology = os.path.join(
30 +
        os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb"
31 +
    )
30 32
    rest.mask1 = ":CB6@O,O2,O4,O6,O8,O10"
31 33
    rest.mask2 = ":BUT@C3"
32 34
    rest.attach["target"] = 3.0
@@ -53,7 +55,9 @@
Loading
53 55
    rest1.amber_index = True
54 56
    rest1.continuous_apr = False
55 57
    rest1.auto_apr = False
56 -
    rest1.topology = os.path.join(os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb")
58 +
    rest1.topology = os.path.join(
59 +
        os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb"
60 +
    )
57 61
    rest1.mask1 = ":CB6@O,O2,O4,O6,O8,O10"
58 62
    rest1.mask2 = ":BUT@C3"
59 63
    rest1.attach["target"] = 3.0
@@ -74,7 +78,9 @@
Loading
74 78
    rest2.amber_index = True
75 79
    rest2.continuous_apr = False
76 80
    rest2.auto_apr = False
77 -
    rest2.topology = os.path.join(os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb")
81 +
    rest2.topology = os.path.join(
82 +
        os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb"
83 +
    )
78 84
    rest2.mask1 = ":CB6@O,O2,O4,O6,O8,O10"
79 85
    rest2.mask2 = ":BUT@C3"
80 86
    rest2.mask3 = ":BUT@C"

@@ -8,9 +8,7 @@
Loading
8 8
_PI_ = np.pi
9 9
10 10
11 -
def apply_positional_restraints(
12 -
    coordinate_path: str, system, force_group: int = 15
13 -
):
11 +
def apply_positional_restraints(coordinate_path: str, system, force_group: int = 15):
14 12
    """A utility function which will add OpenMM harmonic positional restraints to
15 13
    any dummy atoms found within a system to restrain them to their initial
16 14
    positions.
@@ -101,7 +99,10 @@
Loading
101 99
            / unit.radian ** 2
102 100
        )
103 101
        flat_bottom_force.addAngle(
104 -
            restraint.index1[0], restraint.index2[0], restraint.index3[0], [k, theta_0],
102 +
            restraint.index1[0],
103 +
            restraint.index2[0],
104 +
            restraint.index3[0],
105 +
            [k, theta_0],
105 106
        )
106 107
        system.addForce(flat_bottom_force)
107 108
        if force_group:
@@ -123,7 +124,9 @@
Loading
123 124
            / unit.radian ** 2
124 125
        )
125 126
        flat_bottom_force.addBond(
126 -
            restraint.index1[0], restraint.index2[0], [k, r_0],
127 +
            restraint.index1[0],
128 +
            restraint.index2[0],
129 +
            [k, r_0],
127 130
        )
128 131
        system.addForce(flat_bottom_force)
129 132
        if force_group:

@@ -101,7 +101,7 @@
Loading
101 101
102 102
def get_theta(structure, mask1, mask2, axis):
103 103
    """Get the angle (theta) between the vector formed by two masks and an axis.
104 -
    
104 +
105 105
    Parameters
106 106
    ----------
107 107
    structure : parmed.Structure
@@ -112,7 +112,7 @@
Loading
112 112
        Selection of second set of atoms
113 113
    axis : str
114 114
        Axis: x, y, or z
115 -
    
115 +
116 116
    Returns
117 117
    -------
118 118
    float
@@ -148,14 +148,14 @@
Loading
148 148
149 149
def check_coordinates(structure, mask):
150 150
    """Return the coordinates of an atom selection.
151 -
    
151 +
152 152
    Parameters
153 153
    ----------
154 154
    structure : parmed.Structure
155 155
        Molecular structure containing coordinates
156 156
    mask : str
157 157
        Atom selection
158 -
    
158 +
159 159
    Returns
160 160
    -------
161 161
    np.array
@@ -172,14 +172,14 @@
Loading
172 172
173 173
def offset_structure(structure, offset):
174 174
    """Return a structure whose coordinates have been offset.
175 -
    
175 +
176 176
    Parameters
177 177
    ----------
178 178
    structure : parmed.Structure
179 179
        Molecular structure containing coordinates
180 180
    offset : float
181 181
        The offset that will be added to *every* atom in the structure
182 -
    
182 +
183 183
    Returns
184 184
    -------
185 185
    :py:class:`parmed.Structure`

@@ -9,8 +9,7 @@
Loading
9 9
from paprika.restraints import amber
10 10
from paprika.restraints.restraints import create_window_list
11 11
from paprika.tests import addons
12 -
from paprika.utils import parse_mden
13 -
from paprika.utils import parse_mdout
12 +
from paprika.utils import parse_mden, parse_mdout
14 13
15 14
16 15
@pytest.fixture(scope="function", autouse=True)
@@ -51,7 +50,7 @@
Loading
51 50
    for window in window_list:
52 51
        os.makedirs(os.path.join(windows_directory, window))
53 52
        with open(
54 -
                os.path.join(windows_directory, window, "restraints.in"), "a"
53 +
            os.path.join(windows_directory, window, "restraints.in"), "a"
55 54
        ) as file:
56 55
            string = amber.amber_restraint_line(restraint, window)
57 56
            file.write(string)
@@ -83,8 +82,8 @@
Loading
83 82
                structure=True,
84 83
            )
85 84
            target_difference = (
86 -
                    restraint.phase["pull"]["targets"][int(window[1:])]
87 -
                    - restraint.phase["pull"]["targets"][0]
85 +
                restraint.phase["pull"]["targets"][int(window[1:])]
86 +
                - restraint.phase["pull"]["targets"][0]
88 87
            )
89 88
90 89
            for atom in structure.atoms:
@@ -137,8 +136,12 @@
Loading
137 136
    simulation = Simulation()
138 137
    simulation.path = os.path.join("tmp")
139 138
140 -
    shutil.copy(os.path.join(os.path.dirname(__file__), "../data/k-cl/k-cl.prmtop"), "tmp")
141 -
    shutil.copy(os.path.join(os.path.dirname(__file__), "../data/k-cl/k-cl.rst7"), "tmp")
139 +
    shutil.copy(
140 +
        os.path.join(os.path.dirname(__file__), "../data/k-cl/k-cl.prmtop"), "tmp"
141 +
    )
142 +
    shutil.copy(
143 +
        os.path.join(os.path.dirname(__file__), "../data/k-cl/k-cl.rst7"), "tmp"
144 +
    )
142 145
143 146
    simulation.executable = "sander"
144 147
    simulation.restraint_file = None

@@ -1,11 +1,11 @@
Loading
1 +
import logging as log
1 2
import os as os
2 3
import re as re
3 4
import subprocess as sp
4 -
import logging as log
5 +
5 6
import numpy as np
6 7
import parmed as pmd
7 8
8 -
9 9
N_A = 6.0221409 * 10 ** 23
10 10
ANGSTROM_CUBED_TO_LITERS = 1 * 10 ** -27
11 11
@@ -341,7 +341,7 @@
Loading
341 341
        # to the target_waters. This will control when we can start manually deleting
342 342
        # waters rather than adjusting the buffer_value.
343 343
        if self.manual_switch_thresh is None:
344 -
            self.manual_switch_thresh = int(np.ceil(self.target_waters ** (1. / 3.)))
344 +
            self.manual_switch_thresh = int(np.ceil(self.target_waters ** (1.0 / 3.0)))
345 345
            if self.manual_switch_thresh < 12:
346 346
                self.manual_switch_thresh = 12
347 347
            log.debug(
@@ -728,13 +728,13 @@
Loading
728 728
729 729
    def repartition_hydrogen_mass(self, options=None):
730 730
        """
731 -
        Repartitions the masses of Hydrogen atoms in the system by a factor 3 and 
731 +
        Repartitions the masses of Hydrogen atoms in the system by a factor 3 and
732 732
        overwrites the prmtop file: "self.output_path/self.output_prefix.prmtop"
733 733
734 734
        Parameters
735 735
        ----------
736 736
        options : str
737 -
            Optional keyword(s) for the repartitioning and following the 
737 +
            Optional keyword(s) for the repartitioning and following the
738 738
            parmed.tools.actions documentation the usage is '[<mass>] [dowater]'.
739 739
740 740
        """
@@ -746,4 +746,3 @@
Loading
746 746
        pmd.tools.actions.HMassRepartition(structure, arg_list=options).execute()
747 747
748 748
        structure.save(prmtop, overwrite=True)
749 -

@@ -3,6 +3,7 @@
Loading
3 3
import numpy as np
4 4
import parmed as pmd
5 5
import pytraj as pt
6 +
6 7
from paprika import utils
7 8
8 9
logger = logging.getLogger(__name__)
Files Coverage
paprika 77.20%
setup.py 16.08%
Project Totals (30 files) 74.02%
873.2
TRAVIS_PYTHON_VERSION=3.7
TRAVIS_OS_NAME=linux
872.2
TRAVIS_PYTHON_VERSION=3.7
TRAVIS_OS_NAME=linux
873.1
TRAVIS_OS_NAME=osx
872.1
TRAVIS_OS_NAME=osx
1
# Codecov configuration to make it a bit less noisy
2
coverage:
3
  status:
4
    patch: false
5
    project:
6
      default:
7
        threshold: 50%
8
comment:
9
  layout: "header"
10
  require_changes: false
11
  branches: null
12
  behavior: default
13
  flags: null
14
  paths: null
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading