@@ -4,13 +4,14 @@
Loading
4 4
5 5
__all__ = ["pagerank", "pagerank_numpy", "pagerank_scipy", "google_matrix"]
6 6
7 +
7 8
class PageRankResult(dict):
8 -
  def __init__(self, pagerank_score, analytics_info) -> None:
9 -
    super().__init__(pagerank_score)
10 -
    self.pagerank_iterations = analytics_info['x']
11 -
    self.convergence = analytics_info['err']
12 -
    self.iterations = analytics_info['iterations']
13 -
    self.return_message=analytics_info['return_message']
9 +
    def __init__(self, pagerank_score, analytics_info) -> None:
10 +
        super().__init__(pagerank_score)
11 +
        self.pagerank_iterations = analytics_info["x"]
12 +
        self.convergence = analytics_info["err"]
13 +
        self.iterations = analytics_info["iterations"]
14 +
        self.return_message = analytics_info["return_message"]
14 15
15 16
16 17
@not_implemented_for("multigraph")
@@ -23,7 +24,7 @@
Loading
23 24
    nstart=None,
24 25
    weight="weight",
25 26
    dangling=None,
26 -
    analytics=False
27 +
    analytics=False,
27 28
):
28 29
    """Returns the PageRank of the nodes in the graph.
29 30
@@ -153,11 +154,7 @@
Loading
153 154
    dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
154 155
155 156
    # power iteration: make up to max_iter iterations
156 -
    analytics_info=dict(
157 -
        x=[],
158 -
        err=[],
159 -
        iterations=0
160 -
    )
157 +
    analytics_info = dict(x=[], err=[], iterations=0)
161 158
    for _ in range(max_iter):
162 159
        xlast = x
163 160
        x = dict.fromkeys(xlast.keys(), 0)
@@ -171,17 +168,22 @@
Loading
171 168
        # check convergence, l1 norm
172 169
        err = sum([abs(x[n] - xlast[n]) for n in x])
173 170
        if analytics:
174 -
            analytics_info['x'].append(x)
175 -
            analytics_info['err'].append(err)
176 -
        analytics_info['iterations']+=1
171 +
            analytics_info["x"].append(x)
172 +
            analytics_info["err"].append(err)
173 +
        analytics_info["iterations"] += 1
177 174
        if err < N * tol:
178 -
            analytics_info['return_message'] = f"iteration converged within {analytics_info['iterations']} iterations"
175 +
            analytics_info[
176 +
                "return_message"
177 +
            ] = f"iteration converged within {analytics_info['iterations']} iterations"
179 178
            return PageRankResult(x, analytics_info)
180 179
    if not analytics:
181 180
        raise nx.PowerIterationFailedConvergence(max_iter)
182 -
    analytics_info['return_message']=f"power iteration failed to converge within {max_iter} iterations"
181 +
    analytics_info[
182 +
        "return_message"
183 +
    ] = f"power iteration failed to converge within {max_iter} iterations"
183 184
    return PageRankResult(dict(), analytics_info)
184 185
186 +
185 187
def google_matrix(
186 188
    G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None
187 189
):

@@ -4,16 +4,18 @@
Loading
4 4
5 5
__all__ = ["hits", "hits_numpy", "hits_scipy", "authority_matrix", "hub_matrix"]
6 6
7 +
7 8
class HitsResult(tuple):
8 -
    def __new__ (cls, hub_score, authority_score, analytics_info) -> tuple:
9 +
    def __new__(cls, hub_score, authority_score, analytics_info) -> tuple:
9 10
        return super().__new__(cls, (hub_score, authority_score))
10 -
    
11 +
11 12
    def __init__(self, hub_score, authority_score, analytics_info) -> None:
12 -
        self.hub_iterations=analytics_info['h']
13 -
        self.authority_iterations=analytics_info['a']
14 -
        self.convergence = analytics_info['err']
15 -
        self.iterations = analytics_info['iterations']
16 -
        self.return_message=analytics_info['return_message']
13 +
        self.hub_iterations = analytics_info["h"]
14 +
        self.authority_iterations = analytics_info["a"]
15 +
        self.convergence = analytics_info["err"]
16 +
        self.iterations = analytics_info["iterations"]
17 +
        self.return_message = analytics_info["return_message"]
18 +
17 19
18 20
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True, analytics=False):
19 21
    """Returns HITS hubs and authorities values for nodes.
@@ -38,7 +40,7 @@
Loading
38 40
39 41
    normalized : bool (default=True)
40 42
       Normalize results by the sum of all of the values.
41 -
    
43 +
42 44
    analytics : bool (default=False)
43 45
        Store the authority and hub scores and error delta of each Iteration.
44 46
        Iteration values are not normalized.
@@ -96,12 +98,7 @@
Loading
96 98
        s = 1.0 / sum(h.values())
97 99
        for k in h:
98 100
            h[k] *= s
99 -
    analytics_info=dict(
100 -
        h=[],
101 -
        a=[],
102 -
        err=[],
103 -
        iterations=0
104 -
    )
101 +
    analytics_info = dict(h=[], a=[], err=[], iterations=0)
105 102
    for _ in range(max_iter):  # power iteration: make up to max_iter iterations
106 103
        hlast = h
107 104
        h = dict.fromkeys(hlast.keys(), 0)
@@ -122,22 +119,26 @@
Loading
122 119
        # normalize vector
123 120
        s = 1.0 / max(a.values())
124 121
        for n in a:
125 -
            a[n] *= s        
122 +
            a[n] *= s
126 123
        # check convergence, l1 norm
127 124
        err = sum([abs(h[n] - hlast[n]) for n in h])
128 125
        if analytics:
129 -
            analytics_info['a'].append(a)
130 -
            analytics_info['h'].append(h)
131 -
            analytics_info['err'].append(err)
132 -
        analytics_info['iterations']+=1
126 +
            analytics_info["a"].append(a)
127 +
            analytics_info["h"].append(h)
128 +
            analytics_info["err"].append(err)
129 +
        analytics_info["iterations"] += 1
133 130
        if err < tol:
134 -
            analytics_info['return_message']=f"iteration converged within {analytics_info['iterations']} iterations"
131 +
            analytics_info[
132 +
                "return_message"
133 +
            ] = f"iteration converged within {analytics_info['iterations']} iterations"
135 134
            break
136 135
    else:
137 136
        if not analytics:
138 137
            raise nx.PowerIterationFailedConvergence(max_iter)
139 -
        analytics_info['return_message']=f"power iteration failed to converge within {max_iter} iterations"
140 -
        return HitsResult(dict(),dict(),analytics_info)
138 +
        analytics_info[
139 +
            "return_message"
140 +
        ] = f"power iteration failed to converge within {max_iter} iterations"
141 +
        return HitsResult(dict(), dict(), analytics_info)
141 142
    if normalized:
142 143
        s = 1.0 / sum(a.values())
143 144
        for n in a:
@@ -145,7 +146,7 @@
Loading
145 146
        s = 1.0 / sum(h.values())
146 147
        for n in h:
147 148
            h[n] *= s
148 -
    return HitsResult(h,a,analytics_info)
149 +
    return HitsResult(h, a, analytics_info)
149 150
150 151
151 152
def authority_matrix(G, nodelist=None):
Files Coverage
networkx 93.90%
Project Totals (275 files) 93.90%
7085.1
TRAVIS_PYTHON_VERSION=3.6
TRAVIS_OS_NAME=linux
1
# Allow coverage to decrease by 0.05%.
2
coverage:
3
  status:
4
    project:
5
      default:
6
        threshold: 0.05%
7

8
# Don't post a comment on pull requests.
9
comment: off
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading