-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmultilevelmethod.py
More file actions
191 lines (151 loc) · 6.61 KB
/
multilevelmethod.py
File metadata and controls
191 lines (151 loc) · 6.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
"""two-level iterative method B = B_{TL} for graph Laplacian matrices. We
want the symmetric B.
Components: Given a graph, construct its graph Laplacian matrix. Then using
Luby's algorithm, construct the P matrix that ensures a prescribed coarsening
factor, e.g., 2, 4, or 8 times smaller number of coarse vertices.
Since the graph Laplacian matrix is singular (it has the constants in its
nullspace), to make it invertible, make its last row and columns zero,
but keep the diagonal as it were (nonzero). The resulting modified graph
Laplacian matrix A is invertible and s.p.d..
Form the coarse matrix A_c = P^TAP.
To implement symmetric two-level cycle use one of the following M and M^T:
(i) M is forward Gauss-Seidel, M^T - backward Gauss-Seidel (both
corresponding to A)
(ii) M = M^T - the ell_1 smoother.
Compare the performance (convergence properties in terms of number of
iterations) of B w.r.t. just using the smoother M in a stationary iterative
method.
Optionally, you may try to implement the multilevel version of B.
"""
from iterativemethods import *
from aggregation_coarsening import *
from scipy.sparse.linalg import spsolve
def B_TL(graph, b=0, x0=0, smoother=fgs, c_factor=2, max_iter=1000,
epsilon=1e-6, laplacian=False):
"""
Two Level Method
:param graph: The weighted or unweighted vertex-vertex adjacency matrix
in COO format (or any scipy sparse format, although testing should be
done to confirm)
:param b: known right hand side of equation Ax = b, if none given,
random vector of correct size will be used
:param x0: initial guess. if none given, random vector of correct size will
be made and used
:param smoother: smoother method from iterativemethods.py to use as M.
:param c_factor: coarsening factor to determine when to stop coarsening.
It is the ratio of the number of original vertices divided by the number
in the resulting coarse graph).
:param max_iter: maximum number of iterations
:param epsilon: tolerance level for how small the residual norm needs to
:param laplacian: set to True to use the modified laplacian as in hw 4
:return: x (the approximate solution), iter (number of iterations used),
delta_0 (norm of initial residual), and delta (norm of final residual)
"""
assert issubclass(type(graph), (csr, coo))
if laplacian:
A = Laplacian(graph, modifiedSPD=True)
else:
A = graph.copy()
# if no guess was given, make a vector of random values for initial
# "guess" x0, for solving Ax0 = b
if x0 == 0:
x0 = np.random.rand(A.shape[1])
x = x0
# if no right-hand-side b was given (Ax=b), then make that a random
# vector as in previous step
if b == 0:
b = np.random.rand(A.shape[0])
# compute P, the vertex_aggregate relation matrix and Ac, the coarsened A
# such that Ac = P.T @ A @ P
P, Ac = P_coarse(A, c_factor, modularity_weights=True)
# compute residual
r = b - A @ x0
delta_0 = delta = npla.norm(r)
curr_iter = 0
while curr_iter < max_iter:
if delta <= epsilon * delta_0:
return x, curr_iter, delta_0, delta
# project residual to coarse space
rc = P.T @ r
# solve for xc, x for coarse level Ac
xc = spsolve(Ac, rc)
# compute x1:
x1 = x + P @ xc
# solve for xTL with smoother
x = smoother(A, b, x1, max_iter=1)[0]
r = b - A @ x
delta = npla.norm(r)
curr_iter += 1
return x, curr_iter, delta_0, delta
def B_TL_symmetric(graph, b=0, x0=0, smoother=fgs, c_factor=2, max_iter=1000,
epsilon=1e-6, laplacian=False):
"""
Symmetric Two Level Method
:param graph: The weighted or unweighted vertex-vertex adjacency matrix
in COO format (or any scipy sparse format, although testing should be
done to confirm)
:param b: known right hand side of equation Ax = b, if none given,
random vector of correct size will be used
:param x0: initial guess. if none given, random vector of correct size will
be made and used
:param smoother: smoother method from iterativemethods.py to use as M. M
transpose is determined from choice
:param c_factor: coarsening factor to determine when to stop coarsening.
It is the ratio of the number of original vertices divided by the number
in the resulting coarse graph).
:param max_iter: maximum number of iterations
:param epsilon: tolerance level for how small the residual norm needs to
:param laplacian: set to True to use the modified laplacian as in hw 4
:return: x (the approximate solution), iter (number of iterations used),
delta_0 (norm of initial residual), and delta (norm of final residual)
"""
assert issubclass(type(graph), (csr, coo))
assert (smoother == fgs or smoother == L_1), "B_TL_symmetric takes " \
"smoother types: fgs or " \
"L_1 and will calculate " \
"the correct transpose " \
"smoothers from those"
M = MT = smoother # this is true if L_1
if smoother == fgs:
MT = bgs
if laplacian:
A = Laplacian(graph, modifiedSPD=True)
else:
A = graph.copy()
# if no guess was given, make a vector of random values for initial
# "guess" x0, for solving Ax0 = b
if type(x0) == int:
if x0 == 0:
x0 = np.random.rand(A.shape[1])
# if no right-hand-side b was given (Ax=b), then make that a random
# vector as in previous step
if type(b) == int:
if b == 0:
b = np.random.rand(A.shape[0])
# compute P, the vertex_aggregate relation matrix and Ac, the coarsened A
# such that Ac = P.T @ A @ P
P, Ac = P_coarse(A, c_factor, modularity_weights=True)
x = x0
# compute residual
r = b - A @ x0
delta_0 = delta = npla.norm(r)
curr_iter = 0
while curr_iter < max_iter:
if delta <= epsilon * delta_0:
return x, curr_iter, delta_0, delta
# solve for x1 with smoother
x1 = M(A, b, x, max_iter=1)[0]
# compute residual
r = b - A @ x1
# project residual to coarse space
rc = P.T @ r
# solve for xc, x for coarse level Ac
xc = spsolve(Ac, rc)
# compute x2:
x2 = x + P @ xc
# solve for xTL with smoother
x = MT(A, b, x2, max_iter=1)[0]
r = b - A @ x
delta = npla.norm(r)
curr_iter += 1
return x, curr_iter, delta_0, delta