[Scipy-svn] r3433 - in trunk/scipy/sandbox/multigrid: . tests

scipy-svn@scip... scipy-svn@scip...
Thu Oct 11 15:54:05 CDT 2007


Author: wnbell
Date: 2007-10-11 15:53:54 -0500 (Thu, 11 Oct 2007)
New Revision: 3433

Removed:
   trunk/scipy/sandbox/multigrid/coarsen.py
Modified:
   trunk/scipy/sandbox/multigrid/adaptive.py
   trunk/scipy/sandbox/multigrid/sa.py
   trunk/scipy/sandbox/multigrid/simple_test.py
   trunk/scipy/sandbox/multigrid/tests/test_sa.py
   trunk/scipy/sandbox/multigrid/utils.py
Log:
added more tests for aggregation operators that exclude nodes
consolidated some SA tests


Modified: trunk/scipy/sandbox/multigrid/adaptive.py
===================================================================
--- trunk/scipy/sandbox/multigrid/adaptive.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/adaptive.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -5,28 +5,9 @@
 from relaxation import gauss_seidel
 from multilevel import multilevel_solver
 from sa import sa_constant_interpolation,sa_fit_candidates
-from utils import approximate_spectral_radius
+from utils import approximate_spectral_radius,hstack_csr,vstack_csr
 
 
-def hstack_csr(A,B):
-    #OPTIMIZE THIS
-    assert(A.shape[0] == B.shape[0])
-    A = A.tocoo()
-    B = B.tocoo()
-    I = concatenate((A.row,B.row))
-    J = concatenate((A.col,B.col+A.shape[1]))
-    V = concatenate((A.data,B.data))
-    return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr()
-
-def vstack_csr(A,B):
-    #OPTIMIZE THIS
-    assert(A.shape[1] == B.shape[1])
-    A = A.tocoo()
-    B = B.tocoo()
-    I = concatenate((A.row,B.row+A.shape[0]))
-    J = concatenate((A.col,B.col))
-    V = concatenate((A.data,B.data))
-    return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr()
     
 
 
@@ -34,7 +15,9 @@
     """
      
     """
-    X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False)  #candidate prolongator (assumes every value from x is used)
+
+    #candidate prolongator (assumes every value from x is used)
+    X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False)  
     
     R = (P_l.T.tocsr() * X)  # R has at most 1 nz per row
     X = X - P_l*R            # othogonalize X against P_l
@@ -78,6 +61,7 @@
     omega = 4.0/(3.0*approximate_spectral_radius(D_inv_A))
     print "spectral radius",approximate_spectral_radius(D_inv_A)
     D_inv_A *= omega
+
     return P - D_inv_A*P
  
 
@@ -114,16 +98,21 @@
     return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False)
 
 class adaptive_sa_solver:
-    def __init__(self,A,options=None,max_levels=10,max_coarse=100,max_candidates=1,mu=5,epsilon=0.1):
+    def __init__(self,A,blocks=None,options=None,max_levels=10,max_coarse=100,\
+                        max_candidates=1,mu=5,epsilon=0.1):
+
         self.A = A
 
         self.Rs = [] 
         
-        #if self.A.shape[0] <= self.opts['coarse: max size']:
-        #    raise ValueError,'small matrices not handled yet'
+        if self.A.shape[0] <= self.opts['coarse: max size']:
+            raise ValueError,'small matrices not handled yet'
+       
+        #first candidate 
+        x,AggOps = self.__initialization_stage(A,blocks=blocks,\
+                            max_levels=max_levels,max_coarse=max_coarse,\
+                            mu=mu,epsilon=epsilon) 
         
-        x,AggOps = self.__initialization_stage(A,max_levels=max_levels,max_coarse=max_coarse,mu=mu,epsilon=epsilon) #first candidate
-        
         Ws = AggOps
 
         self.candidates = [x]
@@ -135,27 +124,72 @@
             x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps,mu=mu)    
             
             self.candidates.append(x)
-
-            #if i == 0:
-            #    x = arange(50).repeat(50).astype(float)
-            #elif i == 1:
-            #    x = arange(50).repeat(50).astype(float)
-            #    x = numpy.ravel(transpose(x.reshape((50,50))))
            
             #As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x)             
             As,Is,Ps = sa_hierarchy(A,AggOps,self.candidates)
-   
-            #random.seed(0)
-            #solver = multilevel_solver(As,Is)
-            #x = solver.solve(zeros(A.shape[0]), x0=rand(A.shape[0]), tol=1e-12, maxiter=30)
-            #self.candidates.append(x)
 
         self.Ps = Ps 
         self.solver = multilevel_solver(As,Is)
         self.AggOps = AggOps
                 
 
+    def __initialization_stage(self,A,max_levels,max_coarse,mu,epsilon):
+        AggOps = []
+        Is     = []
 
+        # aSA parameters 
+        # mu      - number of test relaxation iterations
+        # epsilon - minimum acceptable relaxation convergence factor 
+        
+        #step 1
+        A_l = A
+        x   = scipy.rand(A_l.shape[0])
+        skip_f_to_i = False
+        
+        #step 2
+        b = zeros_like(x)
+        gauss_seidel(A_l,x,b,iterations=mu,sweep='symmetric')
+
+        #step 3
+        #TODO test convergence rate here 
+      
+        As = [A]
+
+        while len(AggOps) + 1 < max_levels  and A_l.shape[0] > max_coarse:
+            W_l   = sa_constant_interpolation(A_l,epsilon=0,blocks=blocks) #step 4b
+            P_l,x = sa_fit_candidates(W_l,[x])                             #step 4c  
+            x = x[0]  #TODO make sa_fit_candidates accept a single x
+            I_l   = smoothed_prolongator(P_l,A_l)                          #step 4d
+            A_l   = I_l.T.tocsr() * A_l * I_l                              #step 4e
+            
+            AggOps.append(W_l)
+            Is.append(I_l)
+            As.append(A_l)
+
+            if A_l.shape <= max_coarse:  break
+
+            if not skip_f_to_i:
+                print "."
+                x_hat = x.copy()                                                   #step 4g
+                gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric')  #step 4h
+                x_A_x = inner(x,A_l*x) 
+                if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon:             #step 4i
+                    print "sufficient convergence, skipping"
+                    skip_f_to_i = True
+                    if x_A_x == 0:       
+                        x = x_hat  #need to restore x
+        
+        #update fine-level candidate
+        for A_l,I in reversed(zip(As[1:],Is)):
+            gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric')         #TEST
+            x = I * x
+        gauss_seidel(A,x,b,iterations=mu)         #TEST
+    
+        return x,AggOps  #first candidate,aggregation
+
+
+
+
     def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps,mu):
         #scipy.random.seed(0)  #TEST
         x = scipy.rand(A.shape[0])
@@ -231,65 +265,7 @@
         return new_As,new_Is,new_Ps,new_Ws
 
 
-    def __initialization_stage(self,A,max_levels,max_coarse,mu,epsilon):
-        AggOps = []
-        Is     = []
 
-        # aSA parameters 
-        # mu      - number of test relaxation iterations
-        # epsilon - minimum acceptable relaxation convergence factor 
-        
-        #scipy.random.seed(0) #TEST
-
-        #step 1
-        A_l = A
-        x   = scipy.rand(A_l.shape[0])
-        skip_f_to_i = False
-        
-        #step 2
-        b = zeros_like(x)
-        gauss_seidel(A_l,x,b,iterations=mu,sweep='symmetric')
-        #step 3
-        #test convergence rate here 
-      
-        As = [A]
-
-        while len(AggOps) + 1 < max_levels  and A_l.shape[0] > max_coarse:
-            #W_l   = sa_constant_interpolation(A_l,epsilon=0.08*0.5**(len(AggOps)-1))           #step 4b  #TEST
-            W_l   = sa_constant_interpolation(A_l,epsilon=0)              #step 4b
-            P_l,x = sa_fit_candidates(W_l,[x])                            #step 4c  
-            x = x[0]  #TODO make sa_fit_candidates accept a single x
-            I_l   = smoothed_prolongator(P_l,A_l)                         #step 4d
-            A_l   = I_l.T.tocsr() * A_l * I_l                             #step 4e
-            
-            AggOps.append(W_l)
-            Is.append(I_l)
-            As.append(A_l)
-
-            if A_l.shape <= max_coarse:  break
-
-            if not skip_f_to_i:
-                print "."
-                x_hat = x.copy()                                                   #step 4g
-                gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric')  #step 4h
-                x_A_x = inner(x,A_l*x) 
-                if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon:             #step 4i
-                    print "sufficient convergence, skipping"
-                    skip_f_to_i = True
-                    if x_A_x == 0:       
-                        x = x_hat  #need to restore x
-        
-        #update fine-level candidate
-        for A_l,I in reversed(zip(As[1:],Is)):
-            gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric')         #TEST
-            x = I * x
-        gauss_seidel(A,x,b,iterations=mu)         #TEST
-    
-        return x,AggOps  #first candidate,aggregation
-
-
-
-
 from scipy import *
 from utils import diag_sparse
 from multilevel import poisson_problem1D,poisson_problem2D

Deleted: trunk/scipy/sandbox/multigrid/coarsen.py
===================================================================
--- trunk/scipy/sandbox/multigrid/coarsen.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/coarsen.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -1,163 +0,0 @@
-##import scipy
-##import numpy
-##
-##from numpy import arange,ones,zeros,sqrt,isinf,asarray,empty
-##from scipy.sparse import csr_matrix,isspmatrix_csr
-##
-##from utils import diag_sparse,approximate_spectral_radius
-##import multigridtools
-##
-##
-##def rs_strong_connections(A,theta):
-##    """
-##    Return a strength of connection matrix using the method of Ruge and Stuben
-##
-##        An off-diagonal entry A[i.j] is a strong connection iff
-##
-##                -A[i,j] >= theta * max( -A[i,k] )   where k != i
-##    """
-##    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-##
-##    Sp,Sj,Sx = multigridtools.rs_strong_connections(A.shape[0],theta,A.indptr,A.indices,A.data)
-##    return csr_matrix((Sx,Sj,Sp),A.shape)
-##
-##
-##def rs_interpolation(A,theta=0.25):
-##    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-##    
-##    S = rs_strong_connections(A,theta)
-##
-##    T = S.T.tocsr()  #transpose S for efficient column access
-##
-##    Ip,Ij,Ix = multigridtools.rs_interpolation(A.shape[0],\
-##                                               A.indptr,A.indices,A.data,\
-##                                               S.indptr,S.indices,S.data,\
-##                                               T.indptr,T.indices,T.data)
-##
-##    return csr_matrix((Ix,Ij,Ip))
-##
-##
-##def sa_strong_connections(A,epsilon):
-##    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-##
-##    Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data)
-##
-##    return csr_matrix((Sx,Sj,Sp),A.shape)
-##
-##def sa_constant_interpolation(A,epsilon,blocks=None):
-##    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-##    
-##    #handle epsilon = 0 case without creating strength of connection matrix?
-##    
-##    if blocks is not None:
-##        num_dofs   = A.shape[0]
-##        num_blocks = blocks.max()
-##        
-##        if num_dofs != len(blocks):
-##            raise ValueError,'improper block specification'
-##        
-##        # for non-scalar problems, use pre-defined blocks in aggregation
-##        # the strength of connection matrix is based on the Frobenius norms of the blocks
-##        
-##        B  = csr_matrix((ones(num_dofs),blocks,arange(num_dofs + 1)),dims=(num_dofs,num_blocks))
-##        Block_Frob = B.T.tocsr() * csr_matrix((A.data**2,A.indices,A.indptr),dims=A.shape) * B #Frobenius norms of blocks entries of A
-##
-##        S = sa_strong_connections(Block_Frob,epsilon)
-##    
-##        Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices)
-##        Pj = Pj[blocks] #expand block aggregates into constituent dofs
-##        Pp = B.indptr
-##        Px = B.data
-##    else:
-##        S = sa_strong_connections(A,epsilon)
-##        
-##        Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices)
-##        Pp = numpy.arange(len(Pj)+1)
-##        Px = numpy.ones(len(Pj))
-##    
-##    return csr_matrix((Px,Pj,Pp))
-##
-##
-##def fit_candidates(AggOp,candidates):
-##    K = len(candidates)
-##
-##    N_fine,N_coarse = AggOp.shape
-##
-##    if K > 1 and len(candidates[0]) == K*N_fine:
-##        #see if fine space has been expanded (all levels except for first)
-##        AggOp = csr_matrix((AggOp.data.repeat(K),AggOp.indices.repeat(K),arange(K*N_fine + 1)),dims=(K*N_fine,N_coarse))
-##        N_fine = K*N_fine
-##
-##    R = zeros((K*N_coarse,K))
-##
-##    candidate_matrices = []
-##    for i,c in enumerate(candidates):
-##        X = csr_matrix((c.copy(),AggOp.indices,AggOp.indptr),dims=AggOp.shape)
-##       
-##        #TODO optimize this  
-##
-##        #orthogonalize X against previous
-##        for j,A in enumerate(candidate_matrices):
-##            D_AtX = csr_matrix((A.data*X.data,X.indices,X.indptr),dims=X.shape).sum(axis=0).A.flatten() #same as diagonal of A.T * X            
-##            R[j::K,i] = D_AtX
-##            X.data -= D_AtX[X.indices] * A.data
-##
-##            #AtX = csr_matrix(A.T.tocsr() * X
-##            #R[j::K,i] = AtX.data
-##            #X = X - A * AtX 
-##    
-##        #normalize X
-##        XtX = X.T.tocsr() * X
-##        col_norms = sqrt(asarray(XtX.sum(axis=0)).flatten())
-##        R[i::K,i] = col_norms
-##        col_norms = 1.0/col_norms
-##        col_norms[isinf(col_norms)] = 0
-##        X.data *= col_norms[X.indices]
-##
-##        candidate_matrices.append(X)
-##
-##
-##    Q_indptr  = K*AggOp.indptr
-##    Q_indices = (K*AggOp.indices).repeat(K)
-##    for i in range(K):
-##        Q_indices[i::K] += i
-##    Q_data = empty(N_fine * K)
-##    for i,X in enumerate(candidate_matrices):
-##        Q_data[i::K] = X.data
-##    Q = csr_matrix((Q_data,Q_indices,Q_indptr),dims=(N_fine,K*N_coarse))
-##
-##    coarse_candidates = [R[:,i] for i in range(K)]
-##
-##    return Q,coarse_candidates
-##    
-####    S = sa_strong_connections(A,epsilon)
-####
-####    #tentative (non-smooth) interpolation operator I
-####    Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices)
-####    Pp = numpy.arange(len(Pj)+1)
-####    Px = numpy.ones(len(Pj))
-####    
-####    return scipy.sparse.csr_matrix((Px,Pj,Pp))
-##
-####def sa_smoother(A,S,omega):
-####    Bp,Bj,Bx = multigridtools.sa_smoother(A.shape[0],omega,A.indptr,A.indices,A.data,S.indptr,S.indices,S.data)
-####
-####    return csr_matrix((Bx,Bj,Bp),dims=A.shape)
-##    
-##def sa_interpolation(A,candidates,epsilon,omega=4.0/3.0,blocks=None):
-##    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-##   
-##    AggOp  = sa_constant_interpolation(A,epsilon=epsilon,blocks=blocks)
-##    T,coarse_candidates = fit_candidates(AggOp,candidates)
-##
-##    D_inv = diag_sparse(1.0/diag_sparse(A))       
-##    
-##    D_inv_A  = D_inv * A
-##    D_inv_A *= omega/approximate_spectral_radius(D_inv_A)
-##
-##    P = T - (D_inv_A*T)  #same as I=S*P, (faster?)
-##           
-##    return P,coarse_candidates
-##
-##
-##

Modified: trunk/scipy/sandbox/multigrid/sa.py
===================================================================
--- trunk/scipy/sandbox/multigrid/sa.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/sa.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -1,31 +1,64 @@
 import scipy
 import numpy
-from numpy import array,arange,ones,zeros,sqrt,isinf,asarray,empty
+from numpy import array,arange,ones,zeros,sqrt,isinf,asarray,empty,diff
 from scipy.sparse import csr_matrix,isspmatrix_csr
 
 from utils import diag_sparse,approximate_spectral_radius
 import multigridtools
 
-__all__ = ['sa_strong_connections','sa_constant_interpolation',
+__all__ = ['sa_filtered_matrix','sa_strong_connections','sa_constant_interpolation',
            'sa_interpolation','sa_fit_candidates']
 
 
-def sa_strong_connections(A,epsilon):
+def sa_filtered_matrix(A,epsilon,blocks=None):
     if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
 
+    if epsilon == 0:
+        A_filtered = A
+
+    else:
+        if blocks is None:
+            Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data)
+            A_filtered = csr_matrix((Sx,Sj,Sp),A.shape)
+
+        else:
+            num_dofs   = A.shape[0]
+            num_blocks = blocks.max()
+            
+            if num_dofs != len(blocks):
+                raise ValueError,'improper block specification'
+            
+            # for non-scalar problems, use pre-defined blocks in aggregation
+            # the strength of connection matrix is based on the Frobenius norms of the blocks
+            
+            B  = csr_matrix((ones(num_dofs),blocks,arange(num_dofs + 1)),dims=(num_dofs,num_blocks))
+            Bt = B.T.tocsr()
+            
+            #Frobenius norms of blocks entries of A
+            #TODO change to 1-norm ?
+            Block_Frob = Bt * csr_matrix((A.data**2,A.indices,A.indptr),dims=A.shape) * B 
+    
+            S = sa_strong_connections(Block_Frob,epsilon)       
+            S.data[:] = 1
+
+            Mask = B * S * Bt
+
+            A_filtered = A ** Mask
+
+    return A_filtered          
+
+
+def sa_strong_connections(A,epsilon,blocks=None):
+    if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
+    
     Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data)
 
-    #D = diag_sparse(D)
-    #I,J,V = arange(A.shape[0]).repeat(diff(A.indptr)),A.indices,A.data  #COO format for A
-    #diag_mask = (I == J)
-
     return csr_matrix((Sx,Sj,Sp),A.shape)
 
+
 def sa_constant_interpolation(A,epsilon,blocks=None):
     if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
     
-    #TODO handle epsilon = 0 case without creating strength of connection matrix?
-    
     if blocks is not None:
         num_dofs   = A.shape[0]
         num_blocks = blocks.max()
@@ -35,9 +68,11 @@
         
         # for non-scalar problems, use pre-defined blocks in aggregation
         # the strength of connection matrix is based on the Frobenius norms of the blocks
-        
+       
+        #TODO change this to matrix 1 norm?
         B  = csr_matrix((ones(num_dofs),blocks,arange(num_dofs + 1)),dims=(num_dofs,num_blocks))
-        Block_Frob = B.T.tocsr() * csr_matrix((A.data**2,A.indices,A.indptr),dims=A.shape) * B #Frobenius norms of blocks entries of A
+        #Frobenius norms of blocks entries of A
+        Block_Frob = B.T.tocsr() * csr_matrix((A.data**2,A.indices,A.indptr),dims=A.shape) * B 
 
         S = sa_strong_connections(Block_Frob,epsilon)
     
@@ -70,8 +105,8 @@
 
     candidate_matrices = []
     for i,c in enumerate(candidates):
-        #TODO permit incomplete AggOps here (for k-form problems) (other modifications necessary?)
-        X = csr_matrix((c.copy(),AggOp.indices,AggOp.indptr),dims=AggOp.shape)
+        c = c[diff(AggOp.indptr) == 1]  #eliminate DOFs that aggregation misses
+        X = csr_matrix((c,AggOp.indices,AggOp.indptr),dims=AggOp.shape)
        
 
         #orthogonalize X against previous
@@ -79,7 +114,6 @@
             D_AtX = csr_matrix((A.data*X.data,X.indices,X.indptr),dims=X.shape).sum(axis=0).A.flatten() #same as diagonal of A.T * X            
             R[j::K,i] = D_AtX
             X.data -= D_AtX[X.indices] * A.data
-
          
         #normalize X
         D_XtX = csr_matrix((X.data**2,X.indices,X.indptr),dims=X.shape).sum(axis=0).A.flatten() #same as diagonal of X.T * X            
@@ -95,7 +129,7 @@
     Q_indices = (K*AggOp.indices).repeat(K)
     for i in range(K):
         Q_indices[i::K] += i
-    Q_data = empty(N_fine * K)
+    Q_data = empty(AggOp.indptr[-1] * K) #if AggOp includes all nodes, then this is (N_fine * K)
     for i,X in enumerate(candidate_matrices):
         Q_data[i::K] = X.data
     Q = csr_matrix((Q_data,Q_indices,Q_indptr),dims=(N_fine,K*N_coarse))
@@ -107,16 +141,17 @@
     
 def sa_interpolation(A,candidates,epsilon,omega=4.0/3.0,blocks=None):
     if not isspmatrix_csr(A): raise TypeError('expected csr_matrix')
-   
+
     AggOp  = sa_constant_interpolation(A,epsilon=epsilon,blocks=blocks)
-    T,coarse_candidates = sa_fit_candidates(AggOp,candidates)
+    T,coarse_candidates = sa_fit_candidates(AggOp,candidates)  
 
-    #TODO use filtered matrix here for anisotropic problems
-    A_filtered = A 
-    D_inv = diag_sparse(1.0/diag_sparse(A_filtered))       
+    A_filtered = sa_filtered_matrix(A,epsilon,blocks) #use filtered matrix for anisotropic problems
+
+    D_inv    = diag_sparse(1.0/diag_sparse(A_filtered))       
     D_inv_A  = D_inv * A_filtered
     D_inv_A *= omega/approximate_spectral_radius(D_inv_A)
 
+    # smooth tentative prolongator T
     P = T - (D_inv_A*T)
            
     return P,coarse_candidates

Modified: trunk/scipy/sandbox/multigrid/simple_test.py
===================================================================
--- trunk/scipy/sandbox/multigrid/simple_test.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/simple_test.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -1,12 +1,11 @@
 from multilevel import *
-from multigrid import *
 from scipy import *
 
 A = poisson_problem2D(200)
 rs_solver = ruge_stuben_solver(A)
 b = rand(A.shape[0])
-x,res = rs_solver.solve(b,return_residuals=True)
-print res
+x,residuals = rs_solver.solve(b,return_residuals=True)
+print 'residuals',residuals
 
 
 

Modified: trunk/scipy/sandbox/multigrid/tests/test_sa.py
===================================================================
--- trunk/scipy/sandbox/multigrid/tests/test_sa.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/tests/test_sa.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -1,7 +1,7 @@
 from numpy.testing import *
 
 from numpy import sqrt,empty,ones,arange,array_split,eye,array, \
-                  zeros,diag,zeros_like
+                  zeros,diag,zeros_like,diff
 from numpy.linalg import norm                  
 from scipy import rand
 from scipy.sparse import spdiags,csr_matrix,lil_matrix, \
@@ -32,175 +32,72 @@
 #
 #    return A
 
-def reference_sa_strong_connections(A,epsilon):
-    A_coo = A.tocoo()
-    S = lil_matrix(A.shape)
+class TestSA(NumpyTestCase):
+    def setUp(self):
+        self.cases = []
 
-    for (i,j,v) in zip(A_coo.row,A_coo.col,A_coo.data):
-        if i == j or abs(v) >= epsilon*sqrt(abs(A[i,i])*abs(A[j,j])):
-            S[i,j] += v
-        else:
-            S[i,i] += v
+        # random matrices
+        numpy.random.seed(0)        
+        for N in [2,3,5]:
+            self.cases.append( csr_matrix(rand(N,N)) ) 
+        
+        # poisson problems in 1D and 2D
+        for N in [2,3,5,7,10,11,19]:
+            self.cases.append( poisson_problem1D(N) )
+        for N in [2,3,5,7,10,11]:
+            self.cases.append( poisson_problem2D(N) )
 
-    return S
 
-class TestSAStrongConnections(NumpyTestCase):
-#    def check_simple(self):
-#        N = 4
-#        A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
-#        assert_array_equal(sa_strong_connections(A,0.50).todense(),A.todense())   #all connections are strong
-#        assert_array_equal(sa_strong_connections(A,0.51).todense(),0*A.todense()) #no connections are strong
-#       
-#        N = 100
-#        A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr()
-#        assert_array_equal(sa_strong_connections(A,0.50).todense(),A.todense())   #all connections are strong
-#        assert_array_equal(sa_strong_connections(A,0.51).todense(),0*A.todense()) #no connections are strong
-
-    def check_random(self):
-        numpy.random.seed(0)
-
-        for N in [2,3,5]:
-            A = csr_matrix(rand(N,N))
+    def check_sa_strong_connections(self):
+        for A in self.cases:
             for epsilon in [0.0,0.1,0.5,1.0,10.0]:
                 S_result = sa_strong_connections(A,epsilon)
                 S_expected = reference_sa_strong_connections(A,epsilon)
                 assert_almost_equal(S_result.todense(),S_expected.todense())
                 #assert_array_equal(sparsity(S_result).todense(),sparsity(S_expected).todense())
 
-    def check_poisson1D(self):
-        for N in [2,3,5,7,10,11,19]:
-            A = poisson_problem1D(N)
+    def check_sa_constant_interpolation(self):
+        for A in self.cases:
             for epsilon in [0.0,0.1,0.5,1.0]:
-                S_result   = sa_strong_connections(A,epsilon)
-                S_expected = reference_sa_strong_connections(A,epsilon)
-                assert_array_equal(S_result.todense(),S_expected.todense())
-                #assert_array_equal(sparsity(S_result).todense(),sparsity(S_expected).todense())
-
-    def check_poisson2D(self):
-        for N in [2,3,5,7,10,11]:
-            A = poisson_problem2D(N)
-            for epsilon in [0.0,0.1,0.5,1.0]:
-                S_result   = sa_strong_connections(A,epsilon)
-                S_expected = reference_sa_strong_connections(A,epsilon)
-                assert_array_equal(S_result.todense(),S_expected.todense())
-                #assert_array_equal(sparsity(S_result).todense(),sparsity(S_expected).todense())
-
-
-
-
-# note that this method only tests the current implementation, not
-# all possible implementations
-def reference_sa_constant_interpolation(A,epsilon):
-    S = sa_strong_connections(A,epsilon)
-    S = array_split(S.indices,S.indptr[1:-1])
-
-    n = A.shape[0]
-
-    R = set(range(n))
-    j = 0
-
-    aggregates = empty(n,dtype=A.indices.dtype)
-    aggregates[:] = -1
-
-    # Pass #1
-    for i,row in enumerate(S):
-        Ni = set(row) | set([i])
-
-        if Ni.issubset(R):
-            R -= Ni
-            for x in Ni:
-                aggregates[x] = j
-            j += 1
-
-    # Pass #2
-    Old_R = R.copy()
-    for i,row in enumerate(S):
-        if i not in R: continue
-        
-        for x in row:
-            if x not in Old_R:
-                aggregates[i] = aggregates[x]
-                R.remove(i)
-                break
-
-    # Pass #3
-    for i,row in enumerate(S):
-        if i not in R: continue
-        Ni = set(row) | set([i])
-
-        for x in Ni:
-            if x in R:
-                aggregates[x] = j
-            j += 1
-
-    assert(len(R) == 0)
-
-    Pj = aggregates
-    Pp = arange(n+1)
-    Px = ones(n)
- 
-    return csr_matrix((Px,Pj,Pp))
-
-class TestSAConstantInterpolation(NumpyTestCase):
-    def check_random(self):
-        numpy.random.seed(0)
-        for N in [2,3,5,10]:
-            A = csr_matrix(rand(N,N))
-            for epsilon in [0.0,0.1,0.5,1.0]:
                 S_result   = sa_constant_interpolation(A,epsilon)
                 S_expected = reference_sa_constant_interpolation(A,epsilon)
                 assert_array_equal(S_result.todense(),S_expected.todense())
 
-    def check_poisson1D(self):
-        for N in [2,3,5,7,10,11,20,21,29,30]:
-            A = poisson_problem1D(N)
-            for epsilon in [0.0,0.1,0.5,1.0]:
-                S_result   = sa_constant_interpolation(A,epsilon)
-                S_expected = reference_sa_constant_interpolation(A,epsilon)
-                assert_array_equal(S_result.todense(),S_expected.todense())
 
-    def check_poisson2D(self):
-        for N in [2,3,5,7,10,11]:
-            A = poisson_problem2D(N)
-            for epsilon in [0.0,0.1,0.5,1.0]:
-                S_result   = sa_constant_interpolation(A,epsilon)
-                S_expected = reference_sa_constant_interpolation(A,epsilon)
-                assert_array_equal(S_result.todense(),S_expected.todense())
-
-##    def check_sample_data(self):
-##        from examples import all_examples,read_matrix
-##
-##        for filename in all_examples:
-##            A = read_matrix(filename)            
-##            for epsilon in [0.0,0.08,0.51,1.0]:
-##                S_result   = sa_constant_interpolation(A,epsilon)
-##                S_expected = reference_sa_constant_interpolation(A,epsilon)
-##                assert_array_equal((S_result - S_expected).nnz,0)
-
 class TestFitCandidates(NumpyTestCase):
     def setUp(self):
-        self.normal_cases = []
+        self.cases = []
 
+        ## tests where AggOp includes all DOFs
         #one candidate
-        self.normal_cases.append((csr_matrix((ones(5),array([0,0,0,1,1]),arange(6)),dims=(5,2)),[ones(5)]))
-        self.normal_cases.append((csr_matrix((ones(5),array([1,1,0,0,0]),arange(6)),dims=(5,2)),[ones(5)]))
-        self.normal_cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9)]))
-        self.normal_cases.append((csr_matrix((ones(9),array([2,1,0,0,1,2,1,0,2]),arange(10)),dims=(9,3)),[arange(9)]))
-
+        self.cases.append((csr_matrix((ones(5),array([0,0,0,1,1]),arange(6)),dims=(5,2)),[ones(5)]))
+        self.cases.append((csr_matrix((ones(5),array([1,1,0,0,0]),arange(6)),dims=(5,2)),[ones(5)]))
+        self.cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9)]))
+        self.cases.append((csr_matrix((ones(9),array([2,1,0,0,1,2,1,0,2]),arange(10)),dims=(9,3)),[arange(9)]))
         #two candidates
-        self.normal_cases.append((csr_matrix((ones(4),array([0,0,1,1]),arange(5)),dims=(4,2)),[ones(4),arange(4)]))
-        self.normal_cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9),arange(9)]))
-        self.normal_cases.append((csr_matrix((ones(9),array([0,0,1,1,2,2,3,3,3]),arange(10)),dims=(9,4)),[ones(9),arange(9)]))
-
+        self.cases.append((csr_matrix((ones(4),array([0,0,1,1]),arange(5)),dims=(4,2)),[ones(4),arange(4)]))
+        self.cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9),arange(9)]))
+        self.cases.append((csr_matrix((ones(9),array([0,0,1,1,2,2,3,3,3]),arange(10)),dims=(9,4)),[ones(9),arange(9)]))
         #block candidates
-        self.normal_cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[array([1]*9 + [0]*9),arange(2*9)]))
+        self.cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[array([1]*9 + [0]*9),arange(2*9)]))
+        
+        ## tests where AggOp excludes some DOFs
+        self.cases.append((csr_matrix((ones(4),array([0,0,1,1]),array([0,1,2,2,3,4])),dims=(5,2)),[ones(5)]))
+        self.cases.append((csr_matrix((ones(4),array([0,0,1,1]),array([0,1,2,2,3,4])),dims=(5,2)),[ones(5),arange(5)]))
+        self.cases.append((csr_matrix((ones(6),array([1,3,0,2,1,0]),array([0,0,1,2,2,3,4,5,5,6])),dims=(9,4)),[ones(9),arange(9)]))
 
-        #TODO add test case where aggregation operator has holes
-
-    def check_normal(self):
+    def check_all_cases(self):
         """Test case where aggregation includes all fine nodes"""
         
-        for AggOp,fine_candidates in self.normal_cases:
+        def mask_candidate(AggOp,candidates):
+            #mask out all DOFs that are not included in the aggregation
+            for c in candidates:
+                c[diff(AggOp.indptr) == 0] = 0
+
+        for AggOp,fine_candidates in self.cases:
+
+            mask_candidate(AggOp,fine_candidates)
+
             Q,coarse_candidates = sa_fit_candidates(AggOp,fine_candidates)
 
             assert_equal(len(coarse_candidates),len(fine_candidates))
@@ -210,13 +107,14 @@
                 assert_almost_equal(fine,Q*coarse)
                 assert_almost_equal(Q*(Q.T*fine),fine)
 
-
             #aggregate one more level (to a single aggregate)
             K = len(coarse_candidates)
             N = K*AggOp.shape[1]
             AggOp = csr_matrix((ones(N),zeros(N),arange(N + 1)),dims=(N,1)) #aggregate to a single point
             fine_candidates = coarse_candidates
             
+            #mask_candidate(AggOp,fine_candidates)  #not needed
+            
             #now check the coarser problem
             Q,coarse_candidates = sa_fit_candidates(AggOp,fine_candidates)
 
@@ -229,15 +127,16 @@
 
 
 
+
 class TestSASolver(NumpyTestCase):
     def setUp(self):
         self.cases = []
 
-        #self.cases.append((poisson_problem1D(10),None))
-
-        self.cases.append((poisson_problem1D(500),None))
+        self.cases.append((poisson_problem1D(100),None))
         self.cases.append((poisson_problem2D(50),None))
-        
+        # TODO add unstructured tests
+       
+
     def check_basic(self):
         """check that method converges at a reasonable rate"""
 
@@ -256,21 +155,23 @@
             assert(avg_convergence_ratio < 0.5)
 
     def check_DAD(self):
-
         for A,candidates in self.cases:
 
             x = rand(A.shape[0])
-            b = A*rand(A.shape[0]) #zeros_like(x)
+            b = A*rand(A.shape[0]) 
             
-            D = diag_sparse(rand(A.shape[0]))
+            D     = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr()
             D_inv = diag_sparse(1.0/D.data)
-            DAD = D*A*D
+
+            DAD   = D*A*D
            
             if candidates is None:
                 candidates = [ ones(A.shape[0]) ]
            
             DAD_candidates = [ (D_inv * c) for c in candidates ]
-            
+           
+            #TODO force 2 level method and check that result is the same
+
             #ml = smoothed_aggregation_solver(A,candidates,max_coarse=1,max_levels=2)
 
             ml = smoothed_aggregation_solver(DAD,DAD_candidates,max_coarse=100,max_levels=2)
@@ -280,38 +181,84 @@
             x_sol,residuals = ml.solve(b,x0=x,maxiter=10,tol=1e-12,return_residuals=True)
 
             avg_convergence_ratio = (residuals[-1]/residuals[0])**(1.0/len(residuals))
-            print avg_convergence_ratio
+            #print avg_convergence_ratio
 
             assert(avg_convergence_ratio < 0.5)
+
+
+
+
+################################################
+##   reference implementations for unittests  ##
+################################################
+def reference_sa_strong_connections(A,epsilon):
+    A_coo = A.tocoo()
+    S = lil_matrix(A.shape)
+
+    for (i,j,v) in zip(A_coo.row,A_coo.col,A_coo.data):
+        if i == j or abs(v) >= epsilon*sqrt(abs(A[i,i])*abs(A[j,j])):
+            S[i,j] += v
+        else:
+            S[i,i] += v
+
+    return S
+
+
+# note that this method only tests the current implementation, not
+# all possible implementations
+def reference_sa_constant_interpolation(A,epsilon):
+    S = sa_strong_connections(A,epsilon)
+    S = array_split(S.indices,S.indptr[1:-1])
+
+    n = A.shape[0]
+
+    R = set(range(n))
+    j = 0
+
+    aggregates = empty(n,dtype=A.indices.dtype)
+    aggregates[:] = -1
+
+    # Pass #1
+    for i,row in enumerate(S):
+        Ni = set(row) | set([i])
+
+        if Ni.issubset(R):
+            R -= Ni
+            for x in Ni:
+                aggregates[x] = j
+            j += 1
+
+    # Pass #2
+    Old_R = R.copy()
+    for i,row in enumerate(S):
+        if i not in R: continue
         
-##    def check_DAD(self):
-##        """check that method is invariant to symmetric diagonal scaling (A -> DAD)"""
-##
-##        for A,A_candidates in self.cases:
-##            numpy.random.seed(0) #make tests repeatable
-##            
-##            x = rand(A.shape[0])
-##            b = zeros_like(x)
-##
-##            D = diag_sparse(rand(A.shape[0]))
-##            D_inv = diag_sparse(1.0/D.data)
-##            DAD = D*A*D
-##            
-##
-##            if A_candidates is None:
-##                A_candidates = [ ones(A.shape[0]) ]
-##           
-##            DAD_candidates = [ (D_inv * c) for c in A_candidates ]
-##            
-##            ml_A    = smoothed_aggregation_solver(A,     A_candidates, max_coarse=10, max_levels=10, epsilon=0.0)
-##            x_sol_A = ml_A.solve(b, x0=x, maxiter=1, tol=1e-12)
-##
-##            ml_DAD    = smoothed_aggregation_solver(DAD, DAD_candidates, max_coarse=10, max_levels=10, epsilon=0.0)
-##            x_sol_DAD = ml_DAD.solve(b, x0=D*x, maxiter=1, tol=1e-12)
-##            
-##            assert_almost_equal(x_sol_A, D_inv * x_sol_DAD)
+        for x in row:
+            if x not in Old_R:
+                aggregates[i] = aggregates[x]
+                R.remove(i)
+                break
 
+    # Pass #3
+    for i,row in enumerate(S):
+        if i not in R: continue
+        Ni = set(row) | set([i])
 
+        for x in Ni:
+            if x in R:
+                aggregates[x] = j
+            j += 1
+
+    assert(len(R) == 0)
+
+    Pj = aggregates
+    Pp = arange(n+1)
+    Px = ones(n)
+ 
+    return csr_matrix((Px,Pj,Pp))
+
+
+
 if __name__ == '__main__':
     NumpyTest().run()
 

Modified: trunk/scipy/sandbox/multigrid/utils.py
===================================================================
--- trunk/scipy/sandbox/multigrid/utils.py	2007-10-10 18:06:00 UTC (rev 3432)
+++ trunk/scipy/sandbox/multigrid/utils.py	2007-10-11 20:53:54 UTC (rev 3433)
@@ -1,4 +1,5 @@
-__all__ =['approximate_spectral_radius','infinity_norm','diag_sparse']
+__all__ =['approximate_spectral_radius','infinity_norm','diag_sparse',
+          'hstack_csr','vstack_csr']
 
 import numpy
 import scipy
@@ -45,3 +46,23 @@
         return csr_matrix((A,arange(len(A)),arange(len(A)+1)),(len(A),len(A)))
 
 
+def hstack_csr(A,B):
+    #TODO OPTIMIZE THIS
+    assert(A.shape[0] == B.shape[0])
+    A = A.tocoo()
+    B = B.tocoo()
+    I = concatenate((A.row,B.row))
+    J = concatenate((A.col,B.col+A.shape[1]))
+    V = concatenate((A.data,B.data))
+    return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr()
+
+def vstack_csr(A,B):
+    #TODO OPTIMIZE THIS
+    assert(A.shape[1] == B.shape[1])
+    A = A.tocoo()
+    B = B.tocoo()
+    I = concatenate((A.row,B.row+A.shape[0]))
+    J = concatenate((A.col,B.col))
+    V = concatenate((A.data,B.data))
+    return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr()
+



More information about the Scipy-svn mailing list