[Scipy-svn] r4154 - in trunk/scipy: cluster cluster/tests fftpack integrate integrate/tests io io/arff io/arff/tests io/matlab io/matlab/tests io/tests lib lib/blas lib/lapack lib/lapack/tests linalg linalg/benchmarks linalg/tests ndimage ndimage/tests optimize sandbox/numexpr sparse sparse/linalg sparse/linalg/dsolve sparse/linalg/dsolve/tests sparse/linalg/dsolve/umfpack/tests sparse/linalg/eigen sparse/linalg/eigen/arpack sparse/linalg/eigen/arpack/tests sparse/linalg/eigen/lobpcg sparse/linalg/eigen/lobpcg/tests sparse/linalg/isolve sparse/linalg/isolve/tests sparse/linalg/tests sparse/sparsetools sparse/tests splinalg stats stats/models stats/models/tests stats/tests testing testing/examples weave weave/tests

scipy-svn@scip... scipy-svn@scip...
Sun Apr 20 07:16:11 CDT 2008


Author: jarrod.millman
Date: 2008-04-20 07:15:19 -0500 (Sun, 20 Apr 2008)
New Revision: 4154

Modified:
   trunk/scipy/cluster/hierarchy.py
   trunk/scipy/cluster/tests/test_hierarchy.py
   trunk/scipy/fftpack/setupscons.py
   trunk/scipy/integrate/ode.py
   trunk/scipy/integrate/tests/test_integrate.py
   trunk/scipy/io/__init__.py
   trunk/scipy/io/arff/arffread.py
   trunk/scipy/io/arff/tests/test_data.py
   trunk/scipy/io/data_store.py
   trunk/scipy/io/matlab/mio4.py
   trunk/scipy/io/matlab/tests/test_mio.py
   trunk/scipy/io/mmio.py
   trunk/scipy/io/npfile.py
   trunk/scipy/io/pickler.py
   trunk/scipy/io/tests/test_mmio.py
   trunk/scipy/io/tests/test_recaster.py
   trunk/scipy/lib/blas/scons_support.py
   trunk/scipy/lib/lapack/scons_support.py
   trunk/scipy/lib/lapack/tests/test_lapack.py
   trunk/scipy/lib/setupscons.py
   trunk/scipy/linalg/basic.py
   trunk/scipy/linalg/benchmarks/bench_decom.py
   trunk/scipy/linalg/blas.py
   trunk/scipy/linalg/decomp.py
   trunk/scipy/linalg/iterative.py
   trunk/scipy/linalg/matfuncs.py
   trunk/scipy/linalg/scons_support.py
   trunk/scipy/linalg/tests/test_decomp.py
   trunk/scipy/ndimage/_registration.py
   trunk/scipy/ndimage/_segmenter.py
   trunk/scipy/ndimage/tests/test_segment.py
   trunk/scipy/optimize/slsqp.py
   trunk/scipy/sandbox/numexpr/__init__.py
   trunk/scipy/sparse/base.py
   trunk/scipy/sparse/bsr.py
   trunk/scipy/sparse/compressed.py
   trunk/scipy/sparse/construct.py
   trunk/scipy/sparse/coo.py
   trunk/scipy/sparse/csc.py
   trunk/scipy/sparse/csr.py
   trunk/scipy/sparse/data.py
   trunk/scipy/sparse/dia.py
   trunk/scipy/sparse/dok.py
   trunk/scipy/sparse/info.py
   trunk/scipy/sparse/lil.py
   trunk/scipy/sparse/linalg/dsolve/linsolve.py
   trunk/scipy/sparse/linalg/dsolve/setupscons.py
   trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py
   trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py
   trunk/scipy/sparse/linalg/eigen/arpack/arpack.py
   trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
   trunk/scipy/sparse/linalg/eigen/lobpcg/info.py
   trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
   trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py
   trunk/scipy/sparse/linalg/eigen/setup.py
   trunk/scipy/sparse/linalg/eigen/setupscons.py
   trunk/scipy/sparse/linalg/isolve/__init__.py
   trunk/scipy/sparse/linalg/isolve/iterative.py
   trunk/scipy/sparse/linalg/isolve/minres.py
   trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py
   trunk/scipy/sparse/linalg/isolve/utils.py
   trunk/scipy/sparse/linalg/setup.py
   trunk/scipy/sparse/linalg/setupscons.py
   trunk/scipy/sparse/linalg/tests/test_interface.py
   trunk/scipy/sparse/setupscons.py
   trunk/scipy/sparse/sparsetools/__init__.py
   trunk/scipy/sparse/sparsetools/bsr.py
   trunk/scipy/sparse/sparsetools/coo.py
   trunk/scipy/sparse/sparsetools/csc.py
   trunk/scipy/sparse/sparsetools/csr.py
   trunk/scipy/sparse/sparsetools/dia.py
   trunk/scipy/sparse/sparsetools/setupscons.py
   trunk/scipy/sparse/spfuncs.py
   trunk/scipy/sparse/sputils.py
   trunk/scipy/sparse/tests/bench_sparse.py
   trunk/scipy/sparse/tests/test_base.py
   trunk/scipy/sparse/tests/test_construct.py
   trunk/scipy/sparse/tests/test_spfuncs.py
   trunk/scipy/sparse/tests/test_sputils.py
   trunk/scipy/splinalg/__init__.py
   trunk/scipy/stats/mmorestats.py
   trunk/scipy/stats/models/contrast.py
   trunk/scipy/stats/models/formula.py
   trunk/scipy/stats/models/tests/test_formula.py
   trunk/scipy/stats/mstats.py
   trunk/scipy/stats/stats.py
   trunk/scipy/stats/tests/test_mmorestats.py
   trunk/scipy/stats/tests/test_mstats.py
   trunk/scipy/testing/decorators.py
   trunk/scipy/testing/examples/test_foo.py
   trunk/scipy/testing/nosetester.py
   trunk/scipy/testing/nulltester.py
   trunk/scipy/testing/pkgtester.py
   trunk/scipy/testing/utils.py
   trunk/scipy/weave/size_check.py
   trunk/scipy/weave/tests/test_blitz_tools.py
   trunk/scipy/weave/tests/test_c_spec.py
Log:
ran reindent


Modified: trunk/scipy/cluster/hierarchy.py
===================================================================
--- trunk/scipy/cluster/hierarchy.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/cluster/hierarchy.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -106,7 +106,7 @@
      The Wolfram Research, Inc. http://reference.wolfram.com/...
      ...mathematica/HierarchicalClustering/tutorial/...
      HierarchicalClustering.html. Accessed October 1, 2007.
-     
+
  [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
      Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
 
@@ -209,7 +209,7 @@
         return numpy.float64(a)
     else:
         return a
-    
+
 def _copy_arrays_if_base_present(T):
     """
     Accepts a tuple of arrays T. Copies the array T[i] if its base array
@@ -219,8 +219,8 @@
     """
     l = [_copy_array_if_base_present(a) for a in T]
     return l
-            
 
+
 def copying():
     """ Displays the license for this package."""
     print _copyingtxt
@@ -286,7 +286,7 @@
 def centroid(y):
     """
     Z = centroid(y)
-    
+
     Performs centroid/UPGMC linkage on the condensed distance matrix Z.
     See linkage for more information on the return structure and
     algorithm.
@@ -297,7 +297,7 @@
 
     Performs centroid/UPGMC linkage on the observation matrix X using
     Euclidean distance as the distance metric. See linkage for more
-    information on the return structure and algorithm.    
+    information on the return structure and algorithm.
 
     """
     return linkage(y, method='centroid', metric='euclidean')
@@ -314,7 +314,7 @@
 
     Performs median/WPGMC linkage on the observation matrix X using
     Euclidean distance as the distance metric. See linkage for more
-    information on the return structure and algorithm.    
+    information on the return structure and algorithm.
 
       (a condensed alias for linkage)
     """
@@ -323,7 +323,7 @@
 def ward(y):
     """
     Z = ward(y)
-    
+
     Performs Ward's linkage on the condensed distance matrix Z. See
     linkage for more information on the return structure and algorithm.
 
@@ -331,13 +331,13 @@
 
     Performs Ward's linkage on the observation matrix X using Euclidean
     distance as the distance metric. See linkage for more information
-    on the return structure and algorithm.    
+    on the return structure and algorithm.
 
       (a condensed alias for linkage)
     """
     return linkage(y, method='ward', metric='euclidean')
 
-      
+
 def linkage(y, method='single', metric='euclidean'):
     """ Z = linkage(y, method)
 
@@ -367,11 +367,11 @@
           A distance matrix is maintained at each iteration. The
           d[i,j] entry corresponds to the distance between cluster
           i and j in the original forest.
-          
+
           At each iteration, the algorithm must update the distance
           matrix to reflect the distance of the newly formed cluster
           u with the remaining clusters in the forest.
-          
+
           Suppose there are |u| original observations u[0], ..., u[|u|-1]
           in cluster u and |v| original objects v[0], ..., v[|v|-1]
           in cluster v. Recall s and t are combined to form cluster
@@ -380,7 +380,7 @@
 
           The following are methods for calculating the distance between
           the newly formed cluster u and each v.
-        
+
             * method='single' assigns dist(u,v) = MIN(dist(u[i],v[j])
               for all points i in cluster u and j in cluster v.
 
@@ -402,7 +402,7 @@
            * method='weighted' assigns
 
                dist(u,v) = (dist(s,v) + dist(t,v))/2
-               
+
              where cluster u was formed with cluster s and t and v
              is a remaining cluster in the forest. (also called WPGMA)
 
@@ -422,11 +422,11 @@
              the Euclidean distance between the centroid of u and the
              centroid of a remaining cluster v in the forest.
              (also called UPGMC)
- 
+
            * method='median' assigns dist(s,t) as above. When two clusters
              s and t are combined into a new cluster u, the average of
              centroids s and t give the new centroid u. (also called WPGMC)
-           
+
            * method='ward' uses the Ward variance minimization algorithm.
              The new entry dist(u, v) is computed as follows,
 
@@ -452,7 +452,7 @@
 
     if type(y) != _array_type:
         raise TypeError("Argument 'y' must be a numpy array.")
-    
+
     s = y.shape
     if len(s) == 1:
         is_valid_y(y, throw=True, name='y')
@@ -517,7 +517,7 @@
     def getId(self):
         """
         i = nd.getId()
-        
+
         Returns the id number of the node nd. For 0 <= i < n, i
         corresponds to original observation i. For n <= i < 2n - 1,
         i corresponds to non-singleton cluster formed at iteration i-n.
@@ -560,24 +560,24 @@
     def preOrder(self, func=(lambda x: x.id)):
         """
         vlst = preOrder(func)
-    
+
           Performs preorder traversal without recursive function calls.
           When a leaf node is first encountered, func is called with the
           leaf node as its argument, and its result is appended to the
           list vlst.
-    
+
           For example, the statement
-        
+
             ids = root.preOrder(lambda x: x.id)
-    
+
           returns a list of the node ids corresponding to the leaf
           nodes of the tree as they appear from left to right.
         """
-    
+
         # Do a preorder traversal, caching the result. To avoid having to do
         # recursion, we'll store the previous index we've visited in a vector.
         n = self.count
-        
+
         curNode = [None] * (2 * n)
         lvisited = numpy.zeros((2 * n,), dtype='bool')
         rvisited = numpy.zeros((2 * n,), dtype='bool')
@@ -603,7 +603,7 @@
                 # node already, go up in the tree.
                 else:
                     k = k - 1
-            
+
         return preorder
 
 _cnode_bare = cnode(0)
@@ -612,11 +612,11 @@
 def totree(Z, rd=False):
     """
     r = totree(Z)
-    
+
       Converts a hierarchical clustering encoded in the matrix Z
       (by linkage) into an easy-to-use tree object. The reference r
       to the root cnode object is returned.
-    
+
       Each cnode object has a left, right, dist, id, and count
       attribute. The left and right attributes point to cnode
       objects that were combined to generate the cluster. If
@@ -638,7 +638,7 @@
     """
 
     is_valid_linkage(Z, throw=True, name='Z')
-    
+
     # The number of original objects is equal to the number of rows minus
     # 1.
     n = Z.shape[0] + 1
@@ -690,7 +690,7 @@
     ... = squareform(...)
 
     Converts a vector-form distance vector to a square-form distance
-    matrix, and vice-versa. 
+    matrix, and vice-versa.
 
     v = squareform(X)
 
@@ -719,7 +719,7 @@
     ignored any way so they do not disrupt the squareform
     transformation.
     """
-    
+
     if type(X) is not _array_type:
         raise TypeError('The parameter passed must be an array.')
 
@@ -739,7 +739,7 @@
         # Check that v is of valid dimensions.
         if d * (d - 1) / 2 != int(s[0]):
             raise ValueError('Incompatible vector size. It must be a binomial coefficient n choose 2 for some integer n >= 2.')
-        
+
         # Allocate memory for the distance matrix.
         M = numpy.zeros((d, d), 'double')
 
@@ -766,7 +766,7 @@
 
         # One-side of the dimensions is set here.
         d = s[0]
-        
+
         # Create a vector.
         v = numpy.zeros(((d * (d - 1) / 2),), 'double')
 
@@ -785,7 +785,7 @@
 def minkowski(u, v, p):
     """
     d = minkowski(u, v, p)
-    
+
       Returns the Minkowski distance between two vectors u and v,
 
         ||u-v||_p = (\sum {|u_i - v_i|^p})^(1/p).
@@ -797,7 +797,7 @@
 def euclidean(u, v):
     """
     d = euclidean(u, v)
-    
+
       Computes the Euclidean distance between two n-vectors u and v, ||u-v||_2
     """
     q=numpy.matrix(u-v)
@@ -825,7 +825,7 @@
 def correlation(u, v):
     """
     d = correlation(u, v)
-    
+
       Computes the correlation distance between two n-vectors u and v,
 
             1 - (u - n|u|_1)(v - n|v|_1)^T
@@ -846,7 +846,7 @@
 def hamming(u, v):
     """
     d = hamming(u, v)
-    
+
       Computes the Hamming distance between two n-vectors u and v,
       which is simply the proportion of disagreeing components in u
       and v. If u and v are boolean vectors, the hamming distance is
@@ -904,7 +904,7 @@
 def seuclidean(u, v, V):
     """
     d = seuclidean(u, v, V)
-    
+
       Returns the standardized Euclidean distance between two
       n-vectors u and v. V is a m-dimensional vector of component
       variances. It is usually computed among a larger collection vectors.
@@ -925,7 +925,7 @@
 def mahalanobis(u, v, VI):
     """
     d = mahalanobis(u, v, VI)
-    
+
       Computes the Mahalanobis distance between two n-vectors u and v,
         (u-v)VI(u-v)^T
       where VI is the inverse covariance matrix.
@@ -937,7 +937,7 @@
 def chebyshev(u, v):
     """
     d = chebyshev(u, v)
-    
+
       Computes the Chebyshev distance between two n-vectors u and v,
         \max {|u_i-v_i|}.
     """
@@ -946,7 +946,7 @@
 def braycurtis(u, v):
     """
     d = braycurtis(u, v)
-    
+
       Computes the Bray-Curtis distance between two n-vectors u and v,
         \sum{|u_i-v_i|} / \sum{|u_i+v_i|}.
     """
@@ -1018,7 +1018,7 @@
 def dice(u, v):
     """
     d = dice(u, v)
-    
+
       Computes the Dice dissimilarity between two boolean n-vectors
       u and v, which is
 
@@ -1039,7 +1039,7 @@
 def rogerstanimoto(u, v):
     """
     d = rogerstanimoto(u, v)
-    
+
       Computes the Rogers-Tanimoto dissimilarity between two boolean
       n-vectors u and v,
 
@@ -1062,7 +1062,7 @@
 def russellrao(u, v):
     """
     d = russellrao(u, v)
-    
+
       Computes the Russell-Rao dissimilarity between two boolean n-vectors
       u and v, (n - c_{TT}) / n where c_{ij} is the number of occurrences
       of u[k] == i and v[k] == j for k < n.
@@ -1121,7 +1121,7 @@
 
 def pdist(X, metric='euclidean', p=2, V=None, VI=None):
     """ Y = pdist(X, method='euclidean', p=2)
-    
+
            Computes the distance between m original observations in
            n-dimensional space. Returns a condensed distance matrix Y.
            For each i and j (i<j), the metric dist(u=X[i], v=X[j]) is
@@ -1163,7 +1163,7 @@
         6. Y = pdist(X, 'cosine')
 
           Computes the cosine distance between vectors u and v,
-        
+
                1 - uv^T
              -----------
              |u|_2 |v|_2
@@ -1270,7 +1270,7 @@
           boolean vectors. (see sokalsneath function documentation)
 
         21. Y = pdist(X, f)
-        
+
           Computes the distance between all pairs of vectors in X
           using the user supplied 2-arity function f. For example,
           Euclidean distance between the vectors could be computed
@@ -1297,7 +1297,7 @@
 #           using the distance metric Y but with a more succint,
 #           verifiable, but less efficient implementation.
 
-    
+
     if type(X) is not _array_type:
         raise TypeError('The parameter passed must be an array.')
 
@@ -1367,7 +1367,7 @@
             else:
                 raise TypeError('Invalid input array value type %s for jaccard.' % str(X.dtype))
         elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
-            _hierarchy_wrap.pdist_chebyshev_wrap(X, dm)            
+            _hierarchy_wrap.pdist_chebyshev_wrap(X, dm)
         elif mstr in set(['minkowski', 'mi', 'm']):
             _hierarchy_wrap.pdist_minkowski_wrap(X, dm, p)
         elif mstr in set(['seuclidean', 'se', 's']):
@@ -1511,7 +1511,7 @@
     (c, d) = cophenet(Z, Y, [])
 
       Also returns the cophenetic distance matrix in condensed form.
-      
+
     """
     nargs = len(args)
 
@@ -1535,7 +1535,7 @@
     Y = args[1]
     Ys = Y.shape
     is_valid_y(Y, throw=True, name='Y')
-    
+
     z = zz.mean()
     y = Y.mean()
     Yy = Y - y
@@ -1555,7 +1555,7 @@
 def inconsistent(Z, d=2):
     """
     R = inconsistent(Z, d=2)
-    
+
       Calculates statistics on links up to d levels below each
       non-singleton cluster defined in the (n-1)x4 linkage matrix Z.
 
@@ -1587,17 +1587,17 @@
 
     _hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d));
     return R
-    
+
 def from_mlab_linkage(Z):
     """
     Z2 = from_mlab_linkage(Z)
-    
+
     Converts a linkage matrix Z generated by MATLAB(TM) to a new linkage
     matrix Z2 compatible with this module. The conversion does two
     things:
 
      * the indices are converted from 1..N to 0..(N-1) form, and
-    
+
      * a fourth column Z[:,3] is added where Z[i,3] is equal to
        the number of original observations (leaves) in the non-singleton
        cluster i.
@@ -1625,13 +1625,13 @@
     1..N indexing.
     """
     is_valid_linkage(Z, throw=True, name='Z')
-    
+
     return numpy.hstack([Z[:,0:2] + 1, Z[:,2]])
 
 def is_monotonic(Z):
     """
     is_monotonic(Z)
-    
+
       Returns True if the linkage Z is monotonic. The linkage is monotonic
       if for every cluster s and t joined, the distance between them is
       no less than the distance between any previously joined clusters.
@@ -1644,7 +1644,7 @@
 def is_valid_im(R, warning=False, throw=False, name=None):
     """
     is_valid_im(R)
-    
+
       Returns True if the inconsistency matrix passed is valid. It must
       be a n by 4 numpy array of doubles. The standard deviations R[:,1]
       must be nonnegative. The link counts R[:,2] must be positive and
@@ -1805,7 +1805,7 @@
 def is_valid_dm(D, t=0.0):
     """
     is_valid_dm(D)
-    
+
       Returns True if the variable D passed is a valid distance matrix.
       Distance matrices must be 2-dimensional numpy arrays containing
       doubles. They must have a zero-diagonal, and they must be symmetric.
@@ -1889,7 +1889,7 @@
 def numobs_dm(D):
     """
     numobs_dm(D)
-    
+
       Returns the number of original observations that correspond to a
       square, non-condensed distance matrix D.
     """
@@ -1899,7 +1899,7 @@
 def numobs_y(Y):
     """
     numobs_y(Y)
-    
+
       Returns the number of original observations that correspond to a
       condensed distance matrix Y.
     """
@@ -1910,7 +1910,7 @@
 def Z_y_correspond(Z, Y):
     """
     yesno = Z_y_correspond(Z, Y)
-    
+
       Returns True if a linkage matrix Z and condensed distance matrix
       Y could possibly correspond to one another. They must have the same
       number of original observations. This function is useful as a sanity
@@ -1931,7 +1931,7 @@
       original observation i belongs.
 
       The criterion parameter can be any of the following values,
-      
+
         * 'inconsistent': If a cluster node and all its decendents have an
         inconsistent value less than or equal to c then all its leaf
         descendents belong to the same flat cluster. When no non-singleton
@@ -1968,19 +1968,19 @@
         cluster node c when monocrit[i] <= r for all cluster indices i below
         and including c. r is minimized such that no more than t flat clusters
         are formed. monocrit must be monotonic.
-        
+
         For example, to minimize the threshold t on maximum inconsistency
         values so that no more than 3 flat clusters are formed, do:
 
           MI = maxinconsts(Z, R)
           cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
-        
+
     """
     is_valid_linkage(Z, throw=True, name='Z')
 
     n = Z.shape[0] + 1
     T = numpy.zeros((n,), dtype='int32')
-    
+
     # Since the C code does not support striding using strides.
     # The dimensions are used instead.
     [Z] = _copy_arrays_if_base_present([Z])
@@ -2033,12 +2033,12 @@
       specified.
 
       Named parameters are described below.
-      
+
         criterion:  specifies the criterion for forming flat clusters.
                     Valid values are 'inconsistent', 'distance', or
                     'maxclust' cluster formation algorithms. See
                     cluster for descriptions.
-           
+
         method:     the linkage method to use. See linkage for
                     descriptions.
 
@@ -2046,7 +2046,7 @@
                     distances. See pdist for descriptions and
                     linkage to verify compatibility with the linkage
                     method.
-                     
+
         t:          the cut-off threshold for the cluster function or
                     the maximum number of clusters (criterion='maxclust').
 
@@ -2083,15 +2083,15 @@
     _hierarchy_wrap.prelist_wrap(Z, ML, int(n))
     return ML
 
-# Let's do a conditional import. If matplotlib is not available, 
+# Let's do a conditional import. If matplotlib is not available,
 try:
-    
+
     import matplotlib
     import matplotlib.pylab
     import matplotlib.patches
     #import matplotlib.collections
     _mpl = True
-    
+
     # Maps number of leaves to text size.
     #
     # p <= 20, size="12"
@@ -2182,7 +2182,7 @@
             if leaf_font_size:
                 matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
             else:
-                matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(p)))    
+                matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(p)))
             axis.xaxis.set_ticks_position('top')
             # Make the tick marks invisible because they cover up the links
             for line in axis.get_xticklines():
@@ -2272,7 +2272,7 @@
                     e.set_clip_box(axis.bbox)
                     e.set_alpha(0.5)
                     e.set_facecolor('k')
-                    
+
                 #matplotlib.pylab.plot(xs, ys, 'go', markeredgecolor='k', markersize=3)
 
                 #matplotlib.pylab.plot(ys, xs, 'go', markeredgecolor='k', markersize=3)
@@ -2377,7 +2377,7 @@
 
         * 'top': plots the root at the top, and plot descendent
           links going downwards. (default).
-           
+
         * 'bottom': plots the root at the bottom, and plot descendent
           links going upwards.
 
@@ -2396,7 +2396,7 @@
 
         When labels=None, the index of the original observation is used
         used.
-        
+
     R = dendrogram(..., count_sort)
 
         When plotting a cluster node and its directly descendent links,
@@ -2405,7 +2405,7 @@
         of count_sort are:
 
           * False: nothing is done.
-          
+
           * 'ascending'/True: the child with the minimum number of
           original objects in its cluster is plotted first.
 
@@ -2464,7 +2464,7 @@
         When a callable function is passed, leaf_label_func is passed
         cluster index k, and returns a string with the label for the
         leaf.
-        
+
         Indices k < n correspond to original observations while indices
         k >= n correspond to non-singleton clusters.
 
@@ -2618,7 +2618,7 @@
             if show_leaf_counts:
                 ivl.append("(" + str(int(Z[i-n, 3])) + ")")
             else:
-                ivl.append("")   
+                ivl.append("")
 
 def _append_contraction_marks(Z, iv, i, n, contraction_marks):
     _append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks)
@@ -2629,8 +2629,8 @@
         contraction_marks.append((iv, Z[i-n, 2]))
         _append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks)
         _append_contraction_marks_sub(Z, iv, Z[i-n, 1], n, contraction_marks)
-        
 
+
 def _dendrogram_calculate_info(Z, p, truncate_mode, \
                                colorthreshold=scipy.inf, get_leaves=True, \
                                orientation='top', labels=None, \
@@ -2649,7 +2649,7 @@
     variable value to plot the left-most leaf node below the root node i
     (if orientation='top', this would be the left-most x value where the
     plotting of this root node i and its descendents should begin).
-    
+
     ivl is a list to store the labels of the leaf nodes. The leaf_label_func
     is called whenever ivl != None, labels == None, and
     leaf_label_func != None. When ivl != None and labels != None, the
@@ -2668,7 +2668,7 @@
 
       * left is the independent variable coordinate of the center of the
         the U of the subtree
-        
+
       * w is the amount of space used for the subtree (in independent
         variable units)
 
@@ -2676,7 +2676,7 @@
 
       * md is the max(Z[*,2]) for all nodes * below and including
         the target node.
-    
+
     """
     if n == 0:
         raise ValueError("Invalid singleton cluster count n.")
@@ -2712,7 +2712,7 @@
     elif truncate_mode in ('mlab',):
         pass
 
-    
+
     # Otherwise, only truncate if we have a leaf node.
     #
     # If the truncate_mode is mlab, the linkage has been modified
@@ -2900,7 +2900,7 @@
         else:
             d[T1[i]] = T2[i]
     return True
-    
+
 def maxdists(Z):
     """
     MD = maxdists(Z)
@@ -2910,12 +2910,12 @@
       and including the node with index i. More specifically,
       MD[i] = Z[Q(i)-n, 2].max() where Q(i) is the set of all node indices
       below and including node i.
-      
+
       Note that when Z[:,2] is monotonic, Z[:,2] and MD should not differ.
       See linkage for more information on this issue.
     """
     is_valid_linkage(Z, throw=True, name='Z')
-    
+
     n = Z.shape[0] + 1
     MD = numpy.zeros((n-1,))
     [Z] = _copy_arrays_if_base_present([Z])
@@ -2933,7 +2933,7 @@
     """
     is_valid_linkage(Z, throw=True, name='Z')
     is_valid_im(R, throw=True, name='R')
-    
+
     n = Z.shape[0] + 1
     MI = numpy.zeros((n-1,))
     [Z, R] = _copy_arrays_if_base_present([Z, R])
@@ -2989,7 +2989,7 @@
     is_valid_linkage(Z, throw=True, name='Z')
     if len(T) != Z.shape[0] + 1:
         raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
-    
+
     Cl = numpy.unique(T)
     kk = len(Cl)
     L = numpy.zeros((kk,), dtype='int32')

Modified: trunk/scipy/cluster/tests/test_hierarchy.py
===================================================================
--- trunk/scipy/cluster/tests/test_hierarchy.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/cluster/tests/test_hierarchy.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -368,7 +368,7 @@
         Y_test1 = pdist(X, 'minkowski', 3.2)
         #print "minkowski", numpy.abs(Y_test1 - Y_right).max()
         self.failUnless(within_tol(Y_test1, Y_right, eps))
-        
+
     def test_pdist_minkowski_random_nonC(self):
         "Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data."
         eps = 1e-05
@@ -388,7 +388,7 @@
         Y_test1 = pdist(X, 'minkowski', 3.2)
         #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max()
         self.failUnless(within_tol(Y_test1, Y_right, eps))
-        
+
     def test_pdist_minkowski_iris_nonC(self):
         "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
         eps = 1e-07
@@ -408,7 +408,7 @@
         Y_test1 = pdist(X, 'minkowski', 5.8)
         #print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max()
         self.failUnless(within_tol(Y_test1, Y_right, eps))
-        
+
     def test_pdist_minkowski_iris_nonC(self):
         "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
         eps = 1e-07

Modified: trunk/scipy/fftpack/setupscons.py
===================================================================
--- trunk/scipy/fftpack/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/fftpack/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -10,7 +10,7 @@
 
     config.add_sconscript('SConstruct')
     config.add_data_dir('tests')
-    
+
     return config
 
 if __name__ == '__main__':

Modified: trunk/scipy/integrate/ode.py
===================================================================
--- trunk/scipy/integrate/ode.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/integrate/ode.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -8,7 +8,7 @@
     d y(t)[i]
     ---------  = f(t,y(t))[i],
        d t
-    
+
     y(t=0)[i] = y0[i],
 
 where::
@@ -20,7 +20,7 @@
 
 A generic interface class to numeric integrators. It has the following
 methods::
-  
+
     integrator = ode(f,jac=None)
     integrator = integrator.set_integrator(name,**params)
     integrator = integrator.set_initial_value(y0,t0=0.0)
@@ -108,22 +108,22 @@
 #   To wrap cvode to Python, one must write extension module by
 #   hand. Its interface is too much 'advanced C' that using f2py
 #   would be too complicated (or impossible).
-# 
+#
 # How to define a new integrator:
 # ===============================
-# 
+#
 # class myodeint(IntegratorBase):
-# 
+#
 #     runner = <odeint function> or None
-# 
+#
 #     def __init__(self,...):                           # required
 #         <initialize>
-# 
+#
 #     def reset(self,n,has_jac):                        # optional
 #         # n - the size of the problem (number of equations)
 #         # has_jac - whether user has supplied its own routine for Jacobian
 #         <allocate memory,initialize further>
-# 
+#
 #     def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
 #         # this method is called to integrate from t=t0 to t=t1
 #         # with initial condition y0. f and jac are user-supplied functions
@@ -134,11 +134,11 @@
 #         if <calculation was unsuccesful>:
 #             self.success = 0
 #         return t1,y1
-# 
+#
 #     # In addition, one can define step() and run_relax() methods (they
 #     # take the same arguments as run()) if the integrator can support
 #     # these features (see IntegratorBase doc strings).
-# 
+#
 # if myodeint.runner:
 #     IntegratorBase.integrator_classes.append(myodeint)
 
@@ -158,7 +158,7 @@
 A generic interface class to numeric integrators.
 
 See also
---------    
+--------
 odeint : an integrator with a simpler interface based on lsoda from ODEPACK
 quad : for finding the area under a curve
 
@@ -533,7 +533,7 @@
         rwork[5] = self.max_step
         rwork[6] = self.min_step
         self.rwork = rwork
-        
+
         iwork = zeros((liw,), int32)
         if self.ml is not None:
             iwork[0] = self.ml
@@ -543,7 +543,7 @@
         iwork[5] = self.nsteps
         iwork[6] = 2           # mxhnil
         self.iwork = iwork
-        
+
         self.call_args = [self.rtol,self.atol,1,1,
                           self.zwork,self.rwork,self.iwork,mf]
         self.success = 1

Modified: trunk/scipy/integrate/tests/test_integrate.py
===================================================================
--- trunk/scipy/integrate/tests/test_integrate.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/integrate/tests/test_integrate.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -35,7 +35,7 @@
     Check integrate.ode
     """
     def _do_problem(self, problem, integrator, method='adams'):
-        
+
         # ode has callback arguments in different order than odeint
         f = lambda t, z: problem.f(z, t)
         jac = None

Modified: trunk/scipy/io/__init__.py
===================================================================
--- trunk/scipy/io/__init__.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/__init__.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -16,17 +16,17 @@
      convert_objectarray
 
 fread = deprecate_with_doc("""
-scipy.io.fread is can be replaced with raw reading capabilities of NumPy 
-including fromfile as well as memory-mapping capabilities.  
+scipy.io.fread is can be replaced with raw reading capabilities of NumPy
+including fromfile as well as memory-mapping capabilities.
 """)(fread)
 
 fwrite = deprecate_with_doc("""
 scipy.io.fwrite can be replaced with raw writing capabilities of
 NumPy.  Also, remember that files can be directly memory-mapped into NumPy
-arrays which is often a better way of reading especially large files. 
+arrays which is often a better way of reading especially large files.
 
 Look at the tofile methods as well as save and savez for writing arrays into
-easily transported files of data.  
+easily transported files of data.
 """)(fwrite)
 
 bswap = deprecate_with_doc("""
@@ -54,7 +54,7 @@
 unpackbits = deprecate_with_doc("""
 The functionality of scipy.io.unpackbits is now available in numpy.unpackbits
 The calling convention is different however as the 2-d case is no longer
-specialized. 
+specialized.
 
 Thus, the scipy.unpackbits behavior must be simulated using numpy.unpackbits.
 

Modified: trunk/scipy/io/arff/arffread.py
===================================================================
--- trunk/scipy/io/arff/arffread.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/arff/arffread.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -12,7 +12,7 @@
 
 __all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
 
-# An Arff file is basically two parts: 
+# An Arff file is basically two parts:
 #   - header
 #   - data
 #
@@ -42,7 +42,7 @@
 r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
 # To get attributes name enclosed with '', possibly spread accross multilines
 r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)")
-# To get normal attributes 
+# To get normal attributes
 r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
 
 #-------------------------
@@ -61,7 +61,7 @@
 # An attribute  is defined as @attribute name value
 def parse_type(attrtype):
     """Given an arff attribute value (meta data), returns its type.
-    
+
     Expect the value to be a name."""
     uattribute = attrtype.lower().strip()
     if uattribute[0] == '{':
@@ -83,7 +83,7 @@
 def get_nominal(attribute):
     """If attribute is nominal, returns a list of the values"""
     return attribute.split(',')
-        
+
 def read_data_list(ofile):
     """Read each line of the iterable and put it in a list."""
     data = [ofile.next()]
@@ -105,9 +105,9 @@
 def maxnomlen(atrv):
     """Given a string contening a nominal type definition, returns the string
     len of the biggest component.
-    
+
     A nominal type is defined as seomthing framed between brace ({}).
-    
+
     Example: maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of
     ratata, the longest nominal value)."""
     nomtp = get_nom_val(atrv)
@@ -115,10 +115,10 @@
 
 def get_nom_val(atrv):
     """Given a string contening a nominal type, returns a tuple of the possible
-    values.    
-    
+    values.
+
     A nominal type is defined as something framed between brace ({}).
-    
+
     Example: get_nom_val("{floup, bouga, fl, ratata}") returns ("floup",
     "bouga", "fl", "ratata")."""
     r_nominal = re.compile('{(..+)}')
@@ -130,7 +130,7 @@
 
 def go_data(ofile):
     """Skip header.
-    
+
     the first next() call of the returned iterator will be the @data line"""
     return itertools.dropwhile(lambda x : not r_datameta.match(x), ofile)
 
@@ -139,18 +139,18 @@
 #----------------
 def tokenize_attribute(iterable, attribute):
     """Parse a raw string in header (eg starts by @attribute).
-    
+
     Given a raw string attribute, try to get the name and type of the
     attribute. Constraints:
         - The first line must start with @attribute (case insensitive, and
           space like characters begore @attribute are allowed)
-        - Works also if the attribute is spread on multilines. 
+        - Works also if the attribute is spread on multilines.
         - Works if empty lines or comments are in between
-    
+
     :Parameters:
         attribute : str
-            the attribute string. 
-    
+            the attribute string.
+
     :Returns:
         name : str
             name of the attribute
@@ -205,7 +205,7 @@
     else:
         raise ValueError("Cannot parse attribute names spread over multi "\
                         "lines yet")
-    
+
 def tokenize_single_comma(val):
     # XXX we match twice the same string (here and at the caller level). It is
     # stupid, but it is easier for now...
@@ -299,7 +299,7 @@
 
 class MetaData:
     """Small container to keep useful informations on a ARFF dataset.
-    
+
     Knows about attributes names and types.
 
     :Example:
@@ -318,7 +318,7 @@
         Also maintains the list of attributes in order, i.e. doing for i in
         meta, where meta is an instance of MetaData, will return the different
         attribute names in the order they were defined.
-    
+
     """
     def __init__(self, rel, attr):
         self.name = rel
@@ -343,7 +343,7 @@
                 msg += ", range is %s" % str(self._attributes[i][1])
             msg += '\n'
         return msg
-    
+
     def __iter__(self):
         return iter(self._attrnames)
 
@@ -386,7 +386,7 @@
     """
     ofile = open(filename)
 
-    # Parse the header file 
+    # Parse the header file
     try:
         rel, attr = read_header(ofile)
     except ValueError, e:
@@ -459,9 +459,9 @@
 
     def generator(row_iter, delim = ','):
         # TODO: this is where we are spending times (~80%). I think things
-        # could be made more efficiently: 
+        # could be made more efficiently:
         #   - We could for example "compile" the function, because some values
-        #   do not change here. 
+        #   do not change here.
         #   - The function to convert a line to dtyped values could also be
         #   generated on the fly from a string and be executed instead of
         #   looping.

Modified: trunk/scipy/io/arff/tests/test_data.py
===================================================================
--- trunk/scipy/io/arff/tests/test_data.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/arff/tests/test_data.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -11,8 +11,8 @@
 
 test4 = os.path.join(data_path, 'test4.arff')
 test5 = os.path.join(data_path, 'test5.arff')
-expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'), 
-        (-0.1, -0.2, -0.3, -0.4, 'class2'), 
+expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
+        (-0.1, -0.2, -0.3, -0.4, 'class2'),
         (1, 2, 3, 4, 'class3')]
 
 missing = os.path.join(data_path, 'missing.arff')

Modified: trunk/scipy/io/data_store.py
===================================================================
--- trunk/scipy/io/data_store.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/data_store.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -15,7 +15,7 @@
     1
 """
 
-__all__ = ['save_as_module', 
+__all__ = ['save_as_module',
            # The rest of these are all deprecated
            'save', 'create_module',
            'create_shelf', 'load']

Modified: trunk/scipy/io/matlab/mio4.py
===================================================================
--- trunk/scipy/io/matlab/mio4.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/matlab/mio4.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -168,13 +168,13 @@
         dims = res[-1,0:2]
         I = N.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also
         J = N.ascontiguousarray(tmp[:,1],dtype='intc')
-        I -= 1  # for 1-based indexing 
+        I -= 1  # for 1-based indexing
         J -= 1
         if res.shape[1] == 3:
             V = N.ascontiguousarray(tmp[:,2],dtype='float')
         else:
             V = N.ascontiguousarray(tmp[:,2],dtype='complex')
-            V.imag = tmp[:,3] 
+            V.imag = tmp[:,3]
         if have_sparse:
             return scipy.sparse.coo_matrix((V,(I,J)), dims)
         return (dims, I, J, V)

Modified: trunk/scipy/io/matlab/tests/test_mio.py
===================================================================
--- trunk/scipy/io/matlab/tests/test_mio.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/matlab/tests/test_mio.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -238,5 +238,3 @@
         expected = case['expected']
         format = case in case_table4 and '4' or '5'
         yield _make_rt_check_case, name, expected, format
-
-

Modified: trunk/scipy/io/mmio.py
===================================================================
--- trunk/scipy/io/mmio.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/mmio.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -124,7 +124,7 @@
     SYMMETRY_SYMMETRIC      = 'symmetric'
     SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
     SYMMETRY_HERMITIAN      = 'hermitian'
-    SYMMETRY_VALUES = ( SYMMETRY_GENERAL,        SYMMETRY_SYMMETRIC, 
+    SYMMETRY_VALUES = ( SYMMETRY_GENERAL,        SYMMETRY_SYMMETRIC,
                         SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
 
     @classmethod
@@ -217,7 +217,7 @@
                     stream = bz2.BZ2File(filespec, 'r')
                 else:
                     stream = open(filespec, mode)
-     
+
             # open for writing
             else:
                 if filespec[-4:] != '.mtx':
@@ -257,7 +257,7 @@
     @staticmethod
     def _field_template(field, precision):
         return {
-          MMFile.FIELD_REAL: '%%.%ie\n' % precision, 
+          MMFile.FIELD_REAL: '%%.%ie\n' % precision,
           MMFile.FIELD_INTEGER: '%i\n',
           MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision,precision)
         }.get(field, None)
@@ -296,7 +296,7 @@
         attrs = self.__class__.__slots__
         public_attrs = [attr[1:] for attr in attrs]
         invalid_keys = set(kwargs.keys()) - set(public_attrs)
-       
+
         if invalid_keys:
             raise ValueError, \
               'found %s invalid keyword arguments, please only use %s' % \
@@ -395,10 +395,10 @@
             except:
                 # fallback - fromfile fails for some file-like objects
                 flat_data = fromstring(stream.read(), sep=' ')
-                
+
                 # TODO use iterator (e.g. xreadlines) to avoid reading
                 # the whole file into memory
-            
+
             if is_pattern:
                 flat_data = flat_data.reshape(-1,2)
                 I = ascontiguousarray(flat_data[:,0], dtype='intc')

Modified: trunk/scipy/io/npfile.py
===================================================================
--- trunk/scipy/io/npfile.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/npfile.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -225,8 +225,8 @@
 
 npfile = N.deprecate_with_doc("""
 You can achieve the same effect as using npfile, using ndarray.tofile
-and numpy.fromfile. 
+and numpy.fromfile.
 
-Even better you can use memory-mapped arrays and data-types to map out a 
+Even better you can use memory-mapped arrays and data-types to map out a
 file format for direct manipulation in NumPy.
 """)(npfile)

Modified: trunk/scipy/io/pickler.py
===================================================================
--- trunk/scipy/io/pickler.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/pickler.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -25,7 +25,7 @@
     fid.close()
 
 @deprecate_with_doc("""
-Just use cPickle.load or numpy.load. 
+Just use cPickle.load or numpy.load.
 """)
 def objload(file, allglobals):
     """Load a previously pickled dictionary and insert into given dictionary.

Modified: trunk/scipy/io/tests/test_mmio.py
===================================================================
--- trunk/scipy/io/tests/test_mmio.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/tests/test_mmio.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -150,37 +150,37 @@
 _skew_example = '''\
 %%MatrixMarket matrix coordinate real skew-symmetric
   5  5  7
-    1     1     1.0     
-    2     2    10.5     
-    4     2   250.5     
-    3     3     1.5e-2  
-    4     4    -2.8e2   
-    5     5    12.      
-    5     4     0        
+    1     1     1.0
+    2     2    10.5
+    4     2   250.5
+    3     3     1.5e-2
+    4     4    -2.8e2
+    5     5    12.
+    5     4     0
 '''
 
 _symmetric_example = '''\
 %%MatrixMarket matrix coordinate real symmetric
   5  5  7
-    1     1     1.0    
-    2     2    10.5    
-    4     2   250.5    
-    3     3     1.5e-2 
-    4     4    -2.8e2  
-    5     5    12.     
-    5     4     8     
+    1     1     1.0
+    2     2    10.5
+    4     2   250.5
+    3     3     1.5e-2
+    4     4    -2.8e2
+    5     5    12.
+    5     4     8
 '''
 
 _symmetric_pattern_example = '''\
 %%MatrixMarket matrix coordinate pattern symmetric
   5  5  7
-    1     1  
-    2     2  
-    4     2  
-    3     3  
-    4     4  
-    5     5  
-    5     4  
+    1     1
+    2     2
+    4     2
+    3     3
+    4     4
+    5     5
+    5     4
 '''
 
 class TestMMIOCoordinate(TestCase):
@@ -213,7 +213,7 @@
              [0,      0,               0,     33.32j,          12]]
         b = mmread(fn).todense()
         assert_array_almost_equal(a,b)
-    
+
     def test_read_skew(self):
         """read a skew-symmetric matrix"""
         fn = mktemp()
@@ -228,7 +228,7 @@
              [0,      0,               0,       0,    12]]
         b = mmread(fn).todense()
         assert_array_almost_equal(a,b)
-    
+
     def test_read_symmetric(self):
         """read a symmetric matrix"""
         fn = mktemp()

Modified: trunk/scipy/io/tests/test_recaster.py
===================================================================
--- trunk/scipy/io/tests/test_recaster.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/io/tests/test_recaster.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -171,7 +171,6 @@
             dtt = arr.dtype.type
             assert dtt is outp, \
                    'Expected %s from %s, got %s' % (outp, inp, dtt)
-            
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-

Modified: trunk/scipy/lib/blas/scons_support.py
===================================================================
--- trunk/scipy/lib/blas/scons_support.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/lib/blas/scons_support.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -15,7 +15,7 @@
     # XXX handle skip names
     name = splitext(pbasename(target_name))[0]
     #generate_interface(name, source_name, target_name)
-        
+
     f = open(target_name, 'w')
     f.write('python module '+name+'\n')
     f.write('usercode void empty_module(void) {}\n')
@@ -27,4 +27,3 @@
     f.close()
 
     return 0
-

Modified: trunk/scipy/lib/lapack/scons_support.py
===================================================================
--- trunk/scipy/lib/lapack/scons_support.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/lib/lapack/scons_support.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -15,7 +15,7 @@
     # XXX handle skip names
     name = splitext(pbasename(target_name))[0]
     #generate_interface(name, source_name, target_name)
-        
+
     f = open(target_name, 'w')
     f.write('python module '+name+'\n')
     f.write('usercode void empty_module(void) {}\n')
@@ -27,4 +27,3 @@
     f.close()
 
     return 0
-

Modified: trunk/scipy/lib/lapack/tests/test_lapack.py
===================================================================
--- trunk/scipy/lib/lapack/tests/test_lapack.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/lib/lapack/tests/test_lapack.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -11,7 +11,7 @@
 method names.  There are no subclasses of TestCase.  Thus nose will
 pick up nothing but the final test_all_lapack generator function.
 This does the work of collecting the test methods and checking if they
-can be run (see the isrunnable method).  
+can be run (see the isrunnable method).
 '''
 
 import os
@@ -137,4 +137,3 @@
         methods += [getattr(o, n) for n in dir(o) if o.isrunnable(n) is True]
     for method in methods:
         yield (method, )
-        

Modified: trunk/scipy/lib/setupscons.py
===================================================================
--- trunk/scipy/lib/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/lib/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -3,7 +3,7 @@
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
 
-    config = Configuration('lib',parent_package,top_path, 
+    config = Configuration('lib',parent_package,top_path,
                            setup_name = 'setupscons.py')
     config.add_subpackage('blas')
     config.add_subpackage('lapack')

Modified: trunk/scipy/linalg/basic.py
===================================================================
--- trunk/scipy/linalg/basic.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/basic.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -26,7 +26,7 @@
 
 def lu_solve((lu, piv), b, trans=0, overwrite_b=0):
     """Solve an equation system, a x = b, given the LU factorization of a
-    
+
     Parameters
     ----------
     (lu, piv)
@@ -52,7 +52,7 @@
     See also
     --------
     lu_factor : LU factorize a matrix
-    
+
     """
     b1 = asarray_chkfinite(b)
     overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__'))
@@ -83,7 +83,7 @@
     See also
     --------
     cho_factor : Cholesky factorization of a matrix
-    
+
     """
     b1 = asarray_chkfinite(b)
     overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__'))
@@ -114,14 +114,14 @@
         Allow overwriting data in a (may enhance performance)
     overwrite_b : boolean
         Allow overwriting data in b (may enhance performance)
-    
+
     Returns
     -------
     x : array, shape (M,) or (M, N) depending on b
         Solution to the system a x = b
 
     Raises LinAlgError if a is singular
-    
+
     """
     a1, b1 = map(asarray_chkfinite,(a,b))
     if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
@@ -164,8 +164,8 @@
 
         *    a01  a12  a23  a34  a45
         a00  a11  a22  a33  a44  a55
-        a10  a21  a32  a43  a54   * 
-        a20  a31  a42  a53   *    * 
+        a10  a21  a32  a43  a54   *
+        a20  a31  a42  a53   *    *
 
     Parameters
     ----------
@@ -184,7 +184,7 @@
     -------
     x : array, shape (M,) or (M, K)
         The solution to the system a x = b
-    
+
     """
     a1, b1 = map(asarray_chkfinite,(ab,b))
     overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__'))
@@ -218,7 +218,7 @@
         *   *   a02 a13 a24 a35
         *   a01 a12 a23 a34 a45
         a00 a11 a22 a33 a44 a55
-        
+
         lower form:
         a00 a11 a22 a33 a44 a55
         a10 a21 a32 a43 a54 *
@@ -245,7 +245,7 @@
         Cholesky factorization of a, in the same banded format as ab
     x : array, shape (M,) or (M, K)
         The solution to the system a x = b
-    
+
     """
     ab, b = map(asarray_chkfinite,(ab,b))
 
@@ -263,25 +263,25 @@
 
 def cholesky_banded(ab, overwrite_ab=0, lower=0):
     """Cholesky decompose a banded Hermitian positive-definite matrix
-    
+
     The matrix a is stored in ab either in lower diagonal or upper
     diagonal ordered form:
-    
+
         ab[u + i - j, j] == a[i,j]        (if upper form; i <= j)
         ab[    i - j, j] == a[i,j]        (if lower form; i >= j)
-    
+
     Example of ab (shape of a is (6,6), u=2)::
-    
+
         upper form:
         *   *   a02 a13 a24 a35
         *   a01 a12 a23 a34 a45
         a00 a11 a22 a33 a44 a55
-        
+
         lower form:
         a00 a11 a22 a33 a44 a55
         a10 a21 a32 a43 a54 *
         a20 a31 a42 a53 *   *
-    
+
     Parameters
     ----------
     ab : array, shape (M, u + 1)
@@ -290,12 +290,12 @@
         Discard data in ab (may enhance performance)
     lower : boolean
         Is the matrix in the lower form. (Default is upper form)
-    
+
     Returns
     -------
     c : array, shape (M, u+1)
         Cholesky factorization of a, in the same banded format as ab
-    
+
     """
     ab = asarray_chkfinite(ab)
 
@@ -315,12 +315,12 @@
 # matrix inversion
 def inv(a, overwrite_a=0):
     """Compute the inverse of a matrix.
-    
+
     Parameters
     ----------
     a : array-like, shape (M, M)
         Matrix to be inverted
-    
+
     Returns
     -------
     ainv : array-like, shape (M, M)
@@ -409,7 +409,7 @@
         -2     smallest singular value       as below
         other  -                             sum(abs(x)**ord)**(1./ord)
         =====  ============================  ==========================
-    
+
     Returns
     -------
     n : float
@@ -420,7 +420,7 @@
     For values ord < 0, the result is, strictly speaking, not a
     mathematical 'norm', but it may still be useful for numerical
     purposes.
-    
+
     """
     x = asarray_chkfinite(x)
     if ord is None: # check the default case first and handle it immediately
@@ -472,7 +472,7 @@
     -------
     det : float or complex
         Determinant of a
-    
+
     Notes
     -----
     The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
@@ -491,9 +491,9 @@
 
 def lstsq(a, b, cond=None, overwrite_a=0, overwrite_b=0):
     """Compute least-squares solution to equation :m:`a x = b`
-    
+
     Compute a vector x such that the 2-norm :m:`|b - a x|` is minimised.
-    
+
     Parameters
     ----------
     a : array, shape (M, N)
@@ -506,7 +506,7 @@
         Discard data in a (may enhance performance)
     overwrite_b : boolean
         Discard data in b (may enhance performance)
-    
+
     Returns
     -------
     x : array, shape (N,) or (N, K) depending on shape of b
@@ -519,9 +519,9 @@
         Effective rank of matrix a
     s : array, shape (min(M,N),)
         Singular values of a. The condition number of a is abs(s[0]/s[-1]).
-    
+
     Raises LinAlgError if computation does not converge
-    
+
     """
     a1, b1 = map(asarray_chkfinite,(a,b))
     if len(a1.shape) != 2:
@@ -562,10 +562,10 @@
 
 def pinv(a, cond=None, rcond=None):
     """Compute the (Moore-Penrose) pseudo-inverse of a matrix.
-    
+
     Calculate a generalized inverse of a matrix using a least-squares
     solver.
-    
+
     Parameters
     ----------
     a : array, shape (M, N)
@@ -574,11 +574,11 @@
         Cutoff for 'small' singular values in the least-squares solver.
         Singular values smaller than rcond*largest_singular_value are
         considered zero.
-    
+
     Returns
     -------
     B : array, shape (N, M)
-    
+
     Raises LinAlgError if computation does not converge
 
     Examples
@@ -590,7 +590,7 @@
     True
     >>> allclose(B, dot(B, dot(a, B)))
     True
-    
+
     """
     a = asarray_chkfinite(a)
     b = numpy.identity(a.shape[0], dtype=a.dtype)
@@ -605,11 +605,11 @@
 _array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1}
 def pinv2(a, cond=None, rcond=None):
     """Compute the (Moore-Penrose) pseudo-inverse of a matrix.
-    
+
     Calculate a generalized inverse of a matrix using its
     singular-value decomposition and including all 'large' singular
     values.
-    
+
     Parameters
     ----------
     a : array, shape (M, N)
@@ -620,11 +620,11 @@
         considered zero.
 
         If None or -1, suitable machine precision is used.
-    
+
     Returns
     -------
     B : array, shape (N, M)
-    
+
     Raises LinAlgError if SVD computation does not converge
 
     Examples
@@ -636,7 +636,7 @@
     True
     >>> allclose(B, dot(B, dot(a, B)))
     True
-    
+
     """
     a = asarray_chkfinite(a)
     u, s, vh = decomp.svd(a)
@@ -689,7 +689,7 @@
     array([[0, 0, 0, 0, 0],
            [1, 0, 0, 0, 0],
            [1, 1, 0, 0, 0]])
-    
+
     """
     if M is None: M = N
     if type(M) == type('d'):
@@ -705,7 +705,7 @@
 
 def tril(m, k=0):
     """Construct a copy of a matrix with elements above the k-th diagonal zeroed.
-    
+
     Parameters
     ----------
     m : array
@@ -717,7 +717,7 @@
     Returns
     -------
     A : array, shape m.shape, dtype m.dtype
-    
+
     Examples
     --------
     >>> from scipy.linalg import tril
@@ -726,7 +726,7 @@
            [ 4,  0,  0],
            [ 7,  8,  0],
            [10, 11, 12]])
-    
+
     """
     svsp = getattr(m,'spacesaver',lambda:0)()
     m = asarray(m)
@@ -736,7 +736,7 @@
 
 def triu(m, k=0):
     """Construct a copy of a matrix with elements below the k-th diagonal zeroed.
-    
+
     Parameters
     ----------
     m : array
@@ -748,7 +748,7 @@
     Returns
     -------
     A : array, shape m.shape, dtype m.dtype
-    
+
     Examples
     --------
     >>> from scipy.linalg import tril
@@ -757,7 +757,7 @@
            [ 4,  5,  6],
            [ 0,  8,  9],
            [ 0,  0, 12]])
-    
+
     """
     svsp = getattr(m,'spacesaver',lambda:0)()
     m = asarray(m)
@@ -767,23 +767,23 @@
 
 def toeplitz(c,r=None):
     """Construct a Toeplitz matrix.
-    
+
     The Toepliz matrix has constant diagonals, c as its first column,
     and r as its first row (if not given, r == c is assumed).
-    
+
     Parameters
     ----------
     c : array
         First column of the matrix
     r : array
         First row of the matrix. If None, r == c is assumed.
-    
+
     Returns
     -------
     A : array, shape (len(c), len(r))
         Constructed Toeplitz matrix.
         dtype is the same as (c[0] + r[0]).dtype
-    
+
     Examples
     --------
     >>> from scipy.linalg import toeplitz
@@ -791,11 +791,11 @@
     array([[1, 4, 5, 6],
            [2, 1, 4, 5],
            [3, 2, 1, 4]])
-    
+
     See also
     --------
     hankel : Hankel matrix
-    
+
     """
     isscalar = numpy.isscalar
     if isscalar(c) or isscalar(r):
@@ -819,23 +819,23 @@
 
 def hankel(c,r=None):
     """Construct a Hankel matrix.
-    
+
     The Hankel matrix has constant anti-diagonals, c as its first column,
     and r as its last row (if not given, r == 0 os assumed).
-    
+
     Parameters
     ----------
     c : array
         First column of the matrix
     r : array
         Last row of the matrix. If None, r == 0 is assumed.
-    
+
     Returns
     -------
     A : array, shape (len(c), len(r))
         Constructed Hankel matrix.
         dtype is the same as (c[0] + r[0]).dtype
-    
+
     Examples
     --------
     >>> from scipy.linalg import hankel
@@ -844,11 +844,11 @@
            [2, 3, 4, 7, 7],
            [3, 4, 7, 7, 8],
            [4, 7, 7, 8, 9]])
-    
+
     See also
     --------
     toeplitz : Toeplitz matrix
-    
+
     """
     isscalar = numpy.isscalar
     if isscalar(c) or isscalar(r):
@@ -889,14 +889,14 @@
     -------
     A : array, shape (M*P, N*Q)
         Kronecker product of a and b
-    
+
     Examples
     --------
     >>> from scipy import kron, array
     >>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
     array([[1, 1, 1, 2, 2, 2],
            [3, 3, 3, 4, 4, 4]])
-    
+
     """
     if not a.flags['CONTIGUOUS']:
         a = reshape(a, a.shape)

Modified: trunk/scipy/linalg/benchmarks/bench_decom.py
===================================================================
--- trunk/scipy/linalg/benchmarks/bench_decom.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/benchmarks/bench_decom.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -20,16 +20,15 @@
     print '      |    contiguous     '#'|   non-contiguous '
     print '----------------------------------------------'
     print ' size |  scipy  '#'| core |  scipy  | core '
-    
+
     for size,repeat in [(20,150),(100,7),(200,2)]:
         repeat *= 1
         print '%5s' % size,
         sys.stdout.flush()
-        
+
         a = random([size,size])
-        
+
         print '| %6.2f ' % measure('eigvals(a)',repeat),
         sys.stdout.flush()
-        
-        print '   (secs for %s calls)' % (repeat)
 
+        print '   (secs for %s calls)' % (repeat)

Modified: trunk/scipy/linalg/blas.py
===================================================================
--- trunk/scipy/linalg/blas.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/blas.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -29,7 +29,7 @@
     """Return available BLAS function objects with names.
     arrays are used to determine the optimal prefix of
     BLAS routines.
-    
+
     """
     ordering = []
     for i in range(len(arrays)):

Modified: trunk/scipy/linalg/decomp.py
===================================================================
--- trunk/scipy/linalg/decomp.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/decomp.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -101,12 +101,12 @@
     """Solve an ordinary or generalized eigenvalue problem of a square matrix.
 
     Find eigenvalues w and right or left eigenvectors of a general matrix::
-    
+
         a   vr[:,i] = w[i]        b   vr[:,i]
         a.H vl[:,i] = w[i].conj() b.H vl[:,i]
-    
+
     where .H is the Hermitean conjugation.
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -119,12 +119,12 @@
         Whether to calculate and return left eigenvectors
     right : boolean
         Whether to calculate and return right eigenvectors
-    
+
     overwrite_a : boolean
         Whether to overwrite data in a (may improve performance)
     overwrite_b : boolean
         Whether to overwrite data in b (may improve performance)
-    
+
     Returns
     -------
     w : double or complex array, shape (M,)
@@ -134,18 +134,18 @@
     vl : double or complex array, shape (M, M)
         The normalized left eigenvector corresponding to the eigenvalue w[i]
         is the column v[:,i].
-    
+
     (if right == True)
     vr : double or complex array, shape (M, M)
         The normalized right eigenvector corresponding to the eigenvalue w[i]
         is the column vr[:,i].
-    
+
     Raises LinAlgError if eigenvalue computation does not converge
 
     See Also
     --------
     eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
@@ -207,10 +207,10 @@
     """Solve the eigenvalue problem for a Hermitian or real symmetric matrix.
 
     Find eigenvalues w and optionally right eigenvectors v of a::
-    
+
         a v[:,i] = w[i] v[:,i]
         v.H v    = identity
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -224,24 +224,24 @@
         (Default: both are calculated)
     overwrite_a : boolean
         Whether data in a is overwritten (may improve performance).
-    
+
     Returns
     -------
     w : double array, shape (M,)
         The eigenvalues, in ascending order, each repeated according to its
         multiplicity.
-    
+
     (if eigvals_only == False)
     v : double or complex double array, shape (M, M)
         The normalized eigenvector corresponding to the eigenvalue w[i] is
         the column v[:,i].
-    
+
     Raises LinAlgError if eigenvalue computation does not converge
 
     See Also
     --------
     eig : eigenvalues and right eigenvectors for non-symmetric arrays
-    
+
     """
     if eigvals_only or overwrite_a:
         a1 = asarray_chkfinite(a)
@@ -299,10 +299,10 @@
     """Solve real symmetric or complex hermetian band matrix eigenvalue problem.
 
     Find eigenvalues w and optionally right eigenvectors v of a::
-    
+
         a v[:,i] = w[i] v[:,i]
         v.H v    = identity
-    
+
     The matrix a is stored in ab either in lower diagonal or upper
     diagonal ordered form:
 
@@ -315,7 +315,7 @@
         *   *   a02 a13 a24 a35
         *   a01 a12 a23 a34 a45
         a00 a11 a22 a33 a44 a55
-        
+
         lower form:
         a00 a11 a22 a33 a44 a55
         a10 a21 a32 a43 a54 *
@@ -349,7 +349,7 @@
     max_ev : integer
         For select=='v', maximum number of eigenvalues expected.
         For other values of select, has no meaning.
-        
+
         In doubt, leave this parameter untouched.
 
     Returns
@@ -361,9 +361,9 @@
     v : double or complex double array, shape (M, M)
         The normalized eigenvector corresponding to the eigenvalue w[i] is
         the column v[:,i].
-    
+
     Raises LinAlgError if eigenvalue computation does not converge
-    
+
     """
     if eigvals_only or overwrite_a_band:
         a1 = asarray_chkfinite(a_band)
@@ -446,7 +446,7 @@
     """Compute eigenvalues from an ordinary or generalized eigenvalue problem.
 
     Find eigenvalues of a general matrix::
-    
+
         a   vr[:,i] = w[i]        b   vr[:,i]
 
     Parameters
@@ -465,9 +465,9 @@
     w : double or complex array, shape (M,)
         The eigenvalues, each repeated according to its multiplicity,
         but not in any specific order.
-    
+
     Raises LinAlgError if eigenvalue computation does not converge
-    
+
     See Also
     --------
     eigvalsh : eigenvalues of symmetric or Hemitiean arrays
@@ -481,10 +481,10 @@
     """Solve the eigenvalue problem for a Hermitian or real symmetric matrix.
 
     Find eigenvalues w of a::
-    
+
         a v[:,i] = w[i] v[:,i]
         v.H v    = identity
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -495,13 +495,13 @@
         triangle of a. (Default: lower)
     overwrite_a : boolean
         Whether data in a is overwritten (may improve performance).
-    
+
     Returns
     -------
     w : double array, shape (M,)
         The eigenvalues, in ascending order, each repeated according to its
         multiplicity.
-    
+
     Raises LinAlgError if eigenvalue computation does not converge
 
     See Also
@@ -509,7 +509,7 @@
     eigvals : eigenvalues of general arrays
     eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
     eig : eigenvalues and right eigenvectors for non-symmetric arrays
-    
+
     """
     return eigh(a,lower=lower,eigvals_only=1,overwrite_a=overwrite_a)
 
@@ -518,10 +518,10 @@
     """Solve real symmetric or complex hermetian band matrix eigenvalue problem.
 
     Find eigenvalues w of a::
-    
+
         a v[:,i] = w[i] v[:,i]
         v.H v    = identity
-    
+
     The matrix a is stored in ab either in lower diagonal or upper
     diagonal ordered form:
 
@@ -534,7 +534,7 @@
         *   *   a02 a13 a24 a35
         *   a01 a12 a23 a34 a45
         a00 a11 a22 a33 a44 a55
-        
+
         lower form:
         a00 a11 a22 a33 a44 a55
         a10 a21 a32 a43 a54 *
@@ -570,14 +570,14 @@
         multiplicity.
 
     Raises LinAlgError if eigenvalue computation does not converge
-    
+
     See Also
     --------
     eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices
     eigvals : eigenvalues of general arrays
     eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
     eig : eigenvalues and right eigenvectors for non-symmetric arrays
-    
+
     """
     return eig_banded(a_band,lower=lower,eigvals_only=1,
                       overwrite_a_band=overwrite_a_band, select=select,
@@ -585,14 +585,14 @@
 
 def lu_factor(a, overwrite_a=0):
     """Compute pivoted LU decomposition of a matrix.
-    
+
     The decomposition is::
 
         A = P L U
 
     where P is a permutation matrix, L lower triangular with unit
     diagonal elements, and U upper triangular.
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -612,11 +612,11 @@
     See also
     --------
     lu_solve : solve an equation system using the LU factorization of a matrix
-    
+
     Notes
     -----
     This is a wrapper to the *GETRF routines from LAPACK.
-    
+
     """
     a1 = asarray(a)
     if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
@@ -632,7 +632,7 @@
 
 def lu_solve(a_lu_pivots,b):
     """Solve an equation system, a x = b, given the LU factorization of a
-    
+
     Parameters
     ----------
     (lu, piv)
@@ -676,7 +676,7 @@
 
     where P is a permutation matrix, L lower triangular with unit
     diagonal elements, and U upper triangular.
-    
+
     Parameters
     ----------
     a : array, shape (M, N)
@@ -696,18 +696,18 @@
         K = min(M, N)
     u : array, shape (K, N)
         Upper triangular or trapezoidal matrix
-    
+
     (If permute_l == True)
     pl : array, shape (M, K)
         Permuted L matrix.
         K = min(M, N)
     u : array, shape (K, N)
         Upper triangular or trapezoidal matrix
-    
+
     Notes
     -----
     This is a LU factorization routine written for Scipy.
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2:
@@ -728,7 +728,7 @@
     an 1d-array s of singular values (real, non-negative) such that
     a == U S Vh  if S is an suitably shaped matrix of zeros whose
     main diagonal is s.
-    
+
     Parameters
     ----------
     a : array, shape (M, N)
@@ -740,7 +740,7 @@
         Whether to compute also U, Vh in addition to s (Default: true)
     overwrite_a : boolean
         Whether data in a is overwritten (may improve performance)
-    
+
     Returns
     -------
     U:  array, shape (M,M) or (M,K) depending on full_matrices
@@ -759,14 +759,14 @@
     >>> U, s, Vh = linalg.svd(a)
     >>> U.shape, Vh.shape, s.shape
     ((9, 9), (6, 6), (6,))
-    
+
     >>> U, s, Vh = linalg.svd(a, full_matrices=False)
     >>> U.shape, Vh.shape, s.shape
     ((9, 6), (6, 6), (6,))
     >>> S = linalg.diagsvd(s, 6, 6)
     >>> allclose(a, dot(U, dot(S, Vh)))
     True
-    
+
     >>> s2 = linalg.svd(a, compute_uv=False)
     >>> allclose(s, s2)
     True
@@ -775,7 +775,7 @@
     --------
     svdvals : return singular values of a matrix
     diagsvd : return the Sigma matrix, given the vector s
-    
+
     """
     # A hack until full_matrices == 0 support is fixed here.
     if full_matrices == 0:
@@ -810,7 +810,7 @@
         Matrix to decompose
     overwrite_a : boolean
         Whether data in a is overwritten (may improve performance)
-    
+
     Returns
     -------
     s:  array, shape (K,)
@@ -822,7 +822,7 @@
     --------
     svd : return the full singular value decomposition of a matrix
     diagsvd : return the Sigma matrix, given the vector s
-    
+
     """
     return svd(a,compute_uv=0,overwrite_a=overwrite_a)
 
@@ -841,7 +841,7 @@
     -------
     S : array, shape (M, N)
         The S-matrix in the singular value decomposition
-    
+
     """
     part = diag(s)
     typ = part.dtype.char
@@ -855,10 +855,10 @@
 
 def cholesky(a,lower=0,overwrite_a=0):
     """Compute the Cholesky decomposition of a matrix.
-    
+
     Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U`
     of a Hermitian positive-definite matrix :lm:`A`.
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -868,14 +868,14 @@
         (Default: upper-triangular)
     overwrite_a : boolean
         Whether to overwrite data in a (may improve performance)
-    
+
     Returns
     -------
     B : array, shape (M, M)
         Upper- or lower-triangular Cholesky factor of A
-    
+
     Raises LinAlgError if decomposition fails
-    
+
     Examples
     --------
     >>> from scipy import array, linalg, dot
@@ -887,7 +887,7 @@
     >>> dot(L, L.T.conj())
     array([[ 1.+0.j,  0.-2.j],
            [ 0.+2.j,  5.+0.j]])
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
@@ -902,12 +902,12 @@
 
 def cho_factor(a, lower=0, overwrite_a=0):
     """Compute the Cholesky decomposition of a matrix, to use in cho_solve
-    
+
     Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U`
     of a Hermitian positive-definite matrix :lm:`A`.
 
     The return value can be directly used as the first parameter to cho_solve.
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -917,16 +917,16 @@
         (Default: upper-triangular)
     overwrite_a : boolean
         Whether to overwrite data in a (may improve performance)
-    
+
     Returns
     -------
     c : array, shape (M, M)
         Upper- or lower-triangular Cholesky factor of A
     lower : array, shape (M, M)
         Flag indicating whether the factor is lower or upper triangular
-    
+
     Raises LinAlgError if decomposition fails
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
@@ -955,7 +955,7 @@
         The return value from cho_factor can be used.
     b : array
         Right-hand side of the equation system
-    
+
     First input is a tuple (LorU, lower) which is the output to cho_factor.
     Second input is the right-hand side.
 
@@ -963,7 +963,7 @@
     -------
     x : array
         Solution to the equation system
-    
+
     """
     c, lower = clow
     c = asarray_chkfinite(c)
@@ -1000,7 +1000,7 @@
     mode : {'qr', 'r'}
         Determines what information is to be returned: either both Q and R
         or only R.
-    
+
     Returns
     -------
     (if mode == 'qr')
@@ -1029,11 +1029,11 @@
 
     >>> r2 = linalg.qr(a, mode='r')
     >>> allclose(r, r2)
-    
+
     >>> q3, r3 = linalg.qr(a, econ=True)
     >>> q3.shape, r3.shape
     ((9, 6), (6, 6))
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2:
@@ -1108,7 +1108,7 @@
     lwork : integer
         Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
         is computed.
-    
+
     Returns
     -------
     Q : double or complex array, shape (M, M)
@@ -1116,7 +1116,7 @@
         Size K = min(M, N)
 
     Raises LinAlgError if decomposition fails
-    
+
     """
     a1 = asarray_chkfinite(a)
     if len(a1.shape) != 2:
@@ -1163,15 +1163,15 @@
         Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
         is computed.
     econ : boolean
-    
+
     Returns
     -------
     R : double array, shape (M, N) or (K, N) for econ==True
         Size K = min(M, N)
     Q : double or complex array, shape (M, M) or (M, K) for econ==True
-    
+
     Raises LinAlgError if decomposition fails
-    
+
     """
     # TODO: implement support for non-square and complex arrays
     a1 = asarray_chkfinite(a)
@@ -1211,16 +1211,16 @@
 
 def schur(a,output='real',lwork=None,overwrite_a=0):
     """Compute Schur decomposition of a matrix.
-    
+
     The Schur decomposition is
-    
+
         A = Z T Z^H
-    
+
     where Z is unitary and T is either upper-triangular, or for real
     Schur decomposition (output='real'), quasi-upper triangular.  In
     the quasi-triangular form, 2x2 blocks describing complex-valued
     eigenvalue pairs may extrude from the diagonal.
-    
+
     Parameters
     ----------
     a : array, shape (M, M)
@@ -1243,7 +1243,7 @@
     See also
     --------
     rsf2csf : Convert real Schur form to complex Schur form
-    
+
     """
     if not output in ['real','complex','r','c']:
         raise ValueError, "argument must be 'real', or 'complex'"
@@ -1305,28 +1305,28 @@
 
 def rsf2csf(T, Z):
     """Convert real Schur form to complex Schur form.
-    
+
     Convert a quasi-diagonal real-valued Schur form to the upper triangular
     complex-valued Schur form.
-    
+
     Parameters
     ----------
     T : array, shape (M, M)
         Real Schur form of the original matrix
     Z : array, shape (M, M)
         Schur transformation matrix
-    
+
     Returns
     -------
     T : array, shape (M, M)
         Complex Schur form of the original matrix
     Z : array, shape (M, M)
         Schur transformation matrix corresponding to the complex form
-    
+
     See also
     --------
     schur : Schur decompose a matrix
-    
+
     """
     Z,T = map(asarray_chkfinite, (Z,T))
     if len(Z.shape) !=2 or Z.shape[0] != Z.shape[1]:
@@ -1366,21 +1366,21 @@
 
 def orth(A):
     """Construct an orthonormal basis for the range of A using SVD
-    
+
     Parameters
     ----------
     A : array, shape (M, N)
-    
+
     Returns
     -------
     Q : array, shape (M, K)
         Orthonormal basis for the range of A.
         K = effective rank of A, as determined by automatic cutoff
-    
+
     See also
     --------
     svd : Singular value decomposition of a matrix
-    
+
     """
     u,s,vh = svd(A)
     M,N = A.shape
@@ -1391,14 +1391,14 @@
 
 def hessenberg(a,calc_q=0,overwrite_a=0):
     """Compute Hessenberg form of a matrix.
-    
+
     The Hessenberg decomposition is
-    
+
         A = Q H Q^H
-    
+
     where Q is unitary/orthogonal and H has only zero elements below the first
     subdiagonal.
-    
+
     Parameters
     ----------
     a : array, shape (M,M)

Modified: trunk/scipy/linalg/iterative.py
===================================================================
--- trunk/scipy/linalg/iterative.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/iterative.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -7,7 +7,7 @@
 
 for name in __all__:
     oldfn = getattr(isolve, name)
-    oldname='scipy.linalg.' + name 
+    oldname='scipy.linalg.' + name
     newname='scipy.sparse.linalg.' + name
     newfn = deprecate(oldfn, oldname=oldname, newname=newname)
     exec(name + ' = newfn')

Modified: trunk/scipy/linalg/matfuncs.py
===================================================================
--- trunk/scipy/linalg/matfuncs.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/matfuncs.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -104,7 +104,7 @@
         Matrix to be exponentiated
     q : integer
         Order of the Taylor series
-    
+
     Returns
     -------
     expA : array, shape(M,M)
@@ -154,7 +154,7 @@
     Parameters
     ----------
     A : array, shape(M,M)
-    
+
     Returns
     -------
     cosA : array, shape(M,M)
@@ -275,10 +275,10 @@
 
 def funm(A,func,disp=1):
     """Evaluate a matrix function specified by a callable.
-    
+
     Returns the value of matrix-valued function f at A. The function f
     is an extension of the scalar-valued function func to matrices.
-    
+
     Parameters
     ----------
     A : array, shape(M,M)
@@ -350,9 +350,9 @@
 
 def logm(A,disp=1):
     """Compute matrix logarithm.
-    
+
     The matrix logarithm is the inverse of expm: expm(logm(A)) == A
-    
+
     Parameters
     ----------
     A : array, shape(M,M)
@@ -401,9 +401,9 @@
 
 def signm(a,disp=1):
     """Matrix sign function.
-    
+
     Extension of the scalar sign(x) to matrices.
-    
+
     Parameters
     ----------
     A : array, shape(M,M)
@@ -411,7 +411,7 @@
     disp : boolean
         Print warning if error in the result is estimated large
         instead of returning estimated error. (Default: True)
-    
+
     Returns
     -------
     sgnA : array, shape(M,M)
@@ -420,7 +420,7 @@
     (if disp == False)
     errest : float
         1-norm of the estimated error, ||err||_1 / ||A||_1
-    
+
     Examples
     --------
     >>> from scipy.linalg import signm, eigvals
@@ -429,7 +429,7 @@
     array([ 4.12488542+0.j, -0.76155718+0.j,  0.63667176+0.j])
     >>> eigvals(signm(a))
     array([-1.+0.j,  1.+0.j,  1.+0.j])
-        
+
     """
     def rounded_sign(x):
         rx = real(x)
@@ -478,7 +478,7 @@
 
 def sqrtm(A,disp=1):
     """Matrix square root.
-    
+
     Parameters
     ----------
     A : array, shape(M,M)
@@ -486,7 +486,7 @@
     disp : boolean
         Print warning if error in the result is estimated large
         instead of returning estimated error. (Default: True)
-    
+
     Returns
     -------
     sgnA : array, shape(M,M)
@@ -499,7 +499,7 @@
     Notes
     -----
     Uses algorithm by Nicholas J. Higham
-    
+
     """
     A = asarray(A)
     if len(A.shape)!=2:

Modified: trunk/scipy/linalg/scons_support.py
===================================================================
--- trunk/scipy/linalg/scons_support.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/scons_support.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -28,7 +28,7 @@
     # XXX handle skip names
     name = splitext(pbasename(target_name))[0]
     generate_interface(name, source_name, target_name)
-        
+
     f = open(target_name, 'w')
     f.write('python module '+name+'\n')
     f.write('usercode void empty_module(void) {}\n')
@@ -40,4 +40,3 @@
     f.close()
 
     return 0
-

Modified: trunk/scipy/linalg/tests/test_decomp.py
===================================================================
--- trunk/scipy/linalg/tests/test_decomp.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/linalg/tests/test_decomp.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -11,7 +11,7 @@
 Run tests if scipy is installed:
   python -c 'import scipy;scipy.linalg.test()'
 Run tests if linalg is not installed:
-  python tests/test_decomp.py 
+  python tests/test_decomp.py
 """
 
 import sys

Modified: trunk/scipy/ndimage/_registration.py
===================================================================
--- trunk/scipy/ndimage/_registration.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/ndimage/_registration.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -30,951 +30,951 @@
 #
 
 def resize_image(imageG, imageF_mat):
-	"""
-	zoom_image = resize_image(source_image, reference_image[mat])
+    """
+    zoom_image = resize_image(source_image, reference_image[mat])
 
-	Fractional resample source_image to reference_imagesize. The
-	resample is implemented with 3D cubic spline. The reference
-	image [mat] is the 4x4 voxel-to-physical conversion matrix.
-	
-	Parameters 
-	----------
+    Fractional resample source_image to reference_imagesize. The
+    resample is implemented with 3D cubic spline. The reference
+    image [mat] is the 4x4 voxel-to-physical conversion matrix.
+    
+    Parameters 
+    ----------
 
-	imageG : {dictionary} 
-	    imageG is the source image to be resized. it is a dictionary with
-	    the data as an ndarray in the ['data'] component.
+    imageG : {dictionary} 
+        imageG is the source image to be resized. it is a dictionary with
+        the data as an ndarray in the ['data'] component.
 
-	reference_image[mat] : {ndarray}
-	    refernce_image is the image whose sampling dimensions the source
-	    image is to be remapped to. [mat] refers to the component
-	    of the image dictionary, reference_image['mat'] that is the
-	    sampling dimensions.
+    reference_image[mat] : {ndarray}
+        refernce_image is the image whose sampling dimensions the source
+        image is to be remapped to. [mat] refers to the component
+        of the image dictionary, reference_image['mat'] that is the
+        sampling dimensions.
 
-	Returns 
-	-------
-	zoom_image : {dictionary}
+    Returns 
+    -------
+    zoom_image : {dictionary}
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import _registration as reg
-	>>> measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
+    >>> import _registration as reg
+    >>> measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
 
-	>>> resampled_fmri = reg.resize_image(fmri_series[10], imageF_anat['mat'])
+    >>> resampled_fmri = reg.resize_image(fmri_series[10], imageF_anat['mat'])
 
-	image 10 in the fmri_series is resampled to imageF_anat coordinates
+    image 10 in the fmri_series is resampled to imageF_anat coordinates
 
-	"""
+    """
 
-	Z = NP.zeros(3, dtype=NP.float64);
-	# get the zoom
-	Z[0] = imageG['mat'][0][0] / imageF_mat[0][0]
-	Z[1] = imageG['mat'][1][1] / imageF_mat[1][1]
-	Z[2] = imageG['mat'][2][2] / imageF_mat[2][2]
+    Z = NP.zeros(3, dtype=NP.float64);
+    # get the zoom
+    Z[0] = imageG['mat'][0][0] / imageF_mat[0][0]
+    Z[1] = imageG['mat'][1][1] / imageF_mat[1][1]
+    Z[2] = imageG['mat'][2][2] / imageF_mat[2][2]
 
-	# new volume dimensions (rounded)
-	D = NP.zeros(3, dtype=NP.int32);
-	D[0] = int(float(imageG['dim'][0])*Z[0]+0.5)
-	D[1] = int(float(imageG['dim'][1])*Z[1]+0.5)
-	D[2] = int(float(imageG['dim'][2])*Z[2]+0.5)
+    # new volume dimensions (rounded)
+    D = NP.zeros(3, dtype=NP.int32);
+    D[0] = int(float(imageG['dim'][0])*Z[0]+0.5)
+    D[1] = int(float(imageG['dim'][1])*Z[1]+0.5)
+    D[2] = int(float(imageG['dim'][2])*Z[2]+0.5)
 
-	M = NP.eye(4, dtype=NP.float64);
-	# for the test data, set the xyz voxel sizes for fMRI volume
-	M[0][0] = imageG['mat'][0][0]/Z[0]
-	M[1][1] = imageG['mat'][1][1]/Z[1]
-	M[2][2] = imageG['mat'][2][2]/Z[2]
+    M = NP.eye(4, dtype=NP.float64);
+    # for the test data, set the xyz voxel sizes for fMRI volume
+    M[0][0] = imageG['mat'][0][0]/Z[0]
+    M[1][1] = imageG['mat'][1][1]/Z[1]
+    M[2][2] = imageG['mat'][2][2]/Z[2]
 
-    	image = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1])
-	mode  = 2
-	scale = 0
-	R.register_volume_resample(imageG['data'], image, Z, scale, mode)
-	F = NP.zeros(3, dtype=NP.float64);
-	zoom_image = {'data' : image, 'mat' : M, 'dim' : D, 'fwhm' : F}
+    image = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1])
+    mode  = 2
+    scale = 0
+    R.register_volume_resample(imageG['data'], image, Z, scale, mode)
+    F = NP.zeros(3, dtype=NP.float64);
+    zoom_image = {'data' : image, 'mat' : M, 'dim' : D, 'fwhm' : F}
 
-	return zoom_image
+    return zoom_image
 
 def remap_image(image, parm_vector, resample='linear'):
-	"""
-	remaped_image = remap_image(image, parm_vector, resample='linear')
+    """
+    remaped_image = remap_image(image, parm_vector, resample='linear')
 
-	rotates and translates image using the 3 angles and 3 translations in the 6-dim
-	parm_vector. The mapping is stored in the 4x4 M_inverse matrix from the get_inverse_mapping
-	method.
+    rotates and translates image using the 3 angles and 3 translations in the 6-dim
+    parm_vector. The mapping is stored in the 4x4 M_inverse matrix from the get_inverse_mapping
+    method.
 
-	Parameters 
-	----------
-	image : {dictionary} 
-	    image is the source image to be remapped. it is a dictionary with
-	    the data as an ndarray in the ['data'] component.
+    Parameters 
+    ----------
+    image : {dictionary} 
+        image is the source image to be remapped. it is a dictionary with
+        the data as an ndarray in the ['data'] component.
 
-	parm_vector : {ndarray}
-	    parm_vector is the 6-dimensional vector (3 angles, 3 translations)
-	    generated from the registration.
+    parm_vector : {ndarray}
+        parm_vector is the 6-dimensional vector (3 angles, 3 translations)
+        generated from the registration.
 
-	resample : {'linear', 'cubic'}, optional
+    resample : {'linear', 'cubic'}, optional
 
 
-	Returns 
-	-------
-	remaped_image : {dictionary}
+    Returns 
+    -------
+    remaped_image : {dictionary}
 
-	Examples
-	--------
-	    image = fmri_series[i]
-	    x[0:6] = measures[i]['align_rotate'][0:6]
-	    # overwrite the fMRI volume with the aligned volume
-	    fmri_series[i] = remap_image(image, x, resample='cubic')
+    Examples
+    --------
+        image = fmri_series[i]
+        x[0:6] = measures[i]['align_rotate'][0:6]
+        # overwrite the fMRI volume with the aligned volume
+        fmri_series[i] = remap_image(image, x, resample='cubic')
 
-	"""
+    """
 
-	#
-	# remap imageG to coordinates of imageF (creates imageG')
-	# use the 6 dim parm_vector (3 angles, 3 translations) to remap
-	#
-	M_inverse = get_inverse_mappings(parm_vector)
-	(layers, rows, cols) = image['data'].shape
-	# allocate the zero image
-	remaped_image = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
-	remaped_image = {'data' : remaped_image, 'mat' : image['mat'], 
-			 'dim' : image['dim'], 'fwhm' : image['fwhm']}
-	imdata = build_structs()
+    #
+    # remap imageG to coordinates of imageF (creates imageG')
+    # use the 6 dim parm_vector (3 angles, 3 translations) to remap
+    #
+    M_inverse = get_inverse_mappings(parm_vector)
+    (layers, rows, cols) = image['data'].shape
+    # allocate the zero image
+    remaped_image = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
+    remaped_image = {'data' : remaped_image, 'mat' : image['mat'], 
+                     'dim' : image['dim'], 'fwhm' : image['fwhm']}
+    imdata = build_structs()
 
-	if resample == 'linear':
-	    # trilinear interpolation mapping.
-	    R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
-	elif resample == 'cubic':
-	    # tricubic convolve interpolation mapping. 
-	    R.register_cubic_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
+    if resample == 'linear':
+        # trilinear interpolation mapping.
+        R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
+    elif resample == 'cubic':
+        # tricubic convolve interpolation mapping. 
+        R.register_cubic_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
 
-	return remaped_image
+    return remaped_image
 
 def get_inverse_mappings(parm_vector):
-	"""
-	M_inverse = get_inverse_mappings(parm_vector)
+    """
+    M_inverse = get_inverse_mappings(parm_vector)
 
-	takes the 6-dimensional rotation/translation vector and builds the inverse
-	4x4 mapping matrix M_inverse that will map imageG to imageF orientation
+    takes the 6-dimensional rotation/translation vector and builds the inverse
+    4x4 mapping matrix M_inverse that will map imageG to imageF orientation
 
-	Parameters 
-	----------
-	parm_vector : {nd_array} 
+    Parameters 
+    ----------
+    parm_vector : {nd_array} 
 
-	Returns 
-	-------
-	M_inverse : {nd_array}
+    Returns 
+    -------
+    M_inverse : {nd_array}
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> array = NP.zeros(6, dtype=float)
-	>>> M = reg.get_inverse_mappings(array)
-	>>> M 
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> array = NP.zeros(6, dtype=float)
+    >>> M = reg.get_inverse_mappings(array)
+    >>> M 
 
-	array([
-	[ 1.,  0.,  0.,  0.],
-       	[ 0.,  1.,  0.,  0.],
-       	[ 0.,  0.,  1.,  0.],
-       	[ 0.,  0.,  0.,  1.]])
+    array([
+    [ 1.,  0.,  0.,  0.],
+    [ 0.,  1.,  0.,  0.],
+    [ 0.,  0.,  1.,  0.],
+    [ 0.,  0.,  0.,  1.]])
 
-	"""
-	# get the inverse mapping to rotate the G matrix to F space following registration
-	imdata = build_structs()
-	# inverse angles and translations
-	imdata['parms'][0] = -parm_vector[0]
-	imdata['parms'][1] = -parm_vector[1]
-	imdata['parms'][2] = -parm_vector[2]
-	imdata['parms'][3] = -parm_vector[3]
-	imdata['parms'][4] = -parm_vector[4]
-	imdata['parms'][5] = -parm_vector[5]
-	M_inverse = build_rotate_matrix(imdata['parms'])
-	return M_inverse
+    """
+    # get the inverse mapping to rotate the G matrix to F space following registration
+    imdata = build_structs()
+    # inverse angles and translations
+    imdata['parms'][0] = -parm_vector[0]
+    imdata['parms'][1] = -parm_vector[1]
+    imdata['parms'][2] = -parm_vector[2]
+    imdata['parms'][3] = -parm_vector[3]
+    imdata['parms'][4] = -parm_vector[4]
+    imdata['parms'][5] = -parm_vector[5]
+    M_inverse = build_rotate_matrix(imdata['parms'])
+    return M_inverse
 
 def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0,
-		 method='nmi', opt_method='powell'):
-	"""
-	parm_vector = python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0,
-				   smhist=0, method='nmi', opt_method='powell'):
+                 method='nmi', opt_method='powell'):
+    """
+    parm_vector = python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0,
+                               smhist=0, method='nmi', opt_method='powell'):
 
-	takes two images and the image data descriptor (imdata) and determines the optimal 
-	alignment of the two images (measured by mutual information or cross correlation) 
-	using optimization search of 3 angle and 3 translation parameters. The optimization 
-	uses either the Powell or Conjugate Gradient methods in the scipy optimization 
-	package. The optimal parameter is returned.
+    takes two images and the image data descriptor (imdata) and determines the optimal 
+    alignment of the two images (measured by mutual information or cross correlation) 
+    using optimization search of 3 angle and 3 translation parameters. The optimization 
+    uses either the Powell or Conjugate Gradient methods in the scipy optimization 
+    package. The optimal parameter is returned.
 
-	Parameters 
-	----------
-	image1 : {dictionary} 
-	    image1 is the source image to be remapped during the registration. 
-	    it is a dictionary with the data as an ndarray in the ['data'] component.
-	image2 : {dictionary} 
-	    image2 is the reference image that image1 gets mapped to. 
-	imdata : {dictionary} 
-	    image sampling and optimization information.
-	ftype : {0, 1}, optional
-	    flag for type of low pass filter. 0 is Gauss-Spline
-	    1 is pure Gauss. Sigma determined from volume sampling info.
-	smimage : {0, 1}, optional
-	    flag for volume 3D low pass filtering of image 2.
-	    0 for no filter, 1 for do filter.
-	lite : {0, 1}, optional
-	    lite of 1 is to jitter both images during resampling. 0
-	    is to not jitter. jittering is for non-aliased volumes.
-	smhist: {0, 1}, optional
-	    flag for joint histogram low pass filtering. 0 for no filter,
-	    1 for do filter.
-	method: {'nmi', 'mi', 'ncc', 'ecc'}, optional
-	    flag for type of registration metric. nmi is normalized mutual
-	    information; mi is mutual information; ecc is entropy cross
-	    correlation; ncc is normalized cross correlation.
-	opt_method: {'powell', 'hybrid'}, optional
-	    registration is two pass. Pass 1 is low res to get close to alignment
-	    and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
-	    2 are powell, in hybrid pass 2 is conjugate gradient.
+    Parameters 
+    ----------
+    image1 : {dictionary} 
+        image1 is the source image to be remapped during the registration. 
+        it is a dictionary with the data as an ndarray in the ['data'] component.
+    image2 : {dictionary} 
+        image2 is the reference image that image1 gets mapped to. 
+    imdata : {dictionary} 
+        image sampling and optimization information.
+    ftype : {0, 1}, optional
+        flag for type of low pass filter. 0 is Gauss-Spline
+        1 is pure Gauss. Sigma determined from volume sampling info.
+    smimage : {0, 1}, optional
+        flag for volume 3D low pass filtering of image 2.
+        0 for no filter, 1 for do filter.
+    lite : {0, 1}, optional
+        lite of 1 is to jitter both images during resampling. 0
+        is to not jitter. jittering is for non-aliased volumes.
+    smhist: {0, 1}, optional
+        flag for joint histogram low pass filtering. 0 for no filter,
+        1 for do filter.
+    method: {'nmi', 'mi', 'ncc', 'ecc'}, optional
+        flag for type of registration metric. nmi is normalized mutual
+        information; mi is mutual information; ecc is entropy cross
+        correlation; ncc is normalized cross correlation.
+    opt_method: {'powell', 'hybrid'}, optional
+        registration is two pass. Pass 1 is low res to get close to alignment
+        and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
+        2 are powell, in hybrid pass 2 is conjugate gradient.
 
 
-	Returns 
-	-------
-	parm_vector : {nd_array}
-	    this is the optimal alignment (6-dim) array with 3 angles and
-	    3 translations.
+    Returns 
+    -------
+    parm_vector : {nd_array}
+        this is the optimal alignment (6-dim) array with 3 angles and
+        3 translations.
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
+    >>> import numpy as NP
+    >>> import _registration as reg
 
-	>>> image1, image2, imdata = reg.demo_MRI_volume_align()
-	>>> parm_vector = python_coreg(image1, image2, imdata)
+    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
+    >>> parm_vector = python_coreg(image1, image2, imdata)
 
-	"""
-    	start = time.time()
-	# smooth of the images
-	if smimage: 
-	    image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype)
-	    image2['data'] = image_F_xyz2
-	parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
-    	stop = time.time()
-	print 'Total Optimizer Time is ', (stop-start)
-	return parm_vector
+    """
+    start = time.time()
+    # smooth of the images
+    if smimage: 
+        image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype)
+        image2['data'] = image_F_xyz2
+    parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
+    stop = time.time()
+    print 'Total Optimizer Time is ', (stop-start)
+    return parm_vector
 
 def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method):
-	"""
-	x = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
+    """
+    x = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
 
-	to be called by python_coreg() which optionally does 3D image filtering and 
-	provies timing for registration.
+    to be called by python_coreg() which optionally does 3D image filtering and 
+    provies timing for registration.
 
-	Parameters 
-	----------
+    Parameters 
+    ----------
 
-	image1 : {dictionary} 
-	    image1 is the source image to be remapped during the registration. 
-	    it is a dictionary with the data as an ndarray in the ['data'] component.
-	image2 : {dictionary} 
-	    image2 is the reference image that image1 gets mapped to. 
-	imdata : {dictionary} 
-	    image sampling and optimization information.
-	lite : {integer}
-	    lite of 1 is to jitter both images during resampling. 0
-	    is to not jitter. jittering is for non-aliased volumes.
-	smhist: {integer}
-	    flag for joint histogram low pass filtering. 0 for no filter,
-	    1 for do filter.
-	method: {'nmi', 'mi', 'ncc', 'ecc'}
-	    flag for type of registration metric. nmi is normalized mutual
-	    information; mi is mutual information; ecc is entropy cross
-	    correlation; ncc is normalized cross correlation.
-	opt_method: {'powell', 'hybrid'}
-	    registration is two pass. Pass 1 is low res to get close to alignment
-	    and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
-	    2 are powell, in hybrid pass 2 is conjugate gradient.
+    image1 : {dictionary} 
+        image1 is the source image to be remapped during the registration. 
+        it is a dictionary with the data as an ndarray in the ['data'] component.
+    image2 : {dictionary} 
+        image2 is the reference image that image1 gets mapped to. 
+    imdata : {dictionary} 
+        image sampling and optimization information.
+    lite : {integer}
+        lite of 1 is to jitter both images during resampling. 0
+        is to not jitter. jittering is for non-aliased volumes.
+    smhist: {integer}
+        flag for joint histogram low pass filtering. 0 for no filter,
+        1 for do filter.
+    method: {'nmi', 'mi', 'ncc', 'ecc'}
+        flag for type of registration metric. nmi is normalized mutual
+        information; mi is mutual information; ecc is entropy cross
+        correlation; ncc is normalized cross correlation.
+    opt_method: {'powell', 'hybrid'}
+        registration is two pass. Pass 1 is low res to get close to alignment
+        and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
+        2 are powell, in hybrid pass 2 is conjugate gradient.
 
-	Returns 
-	-------
-	x : {nd_array}
-	    this is the optimal alignment (6-dim) array with 3 angles and
-	    3 translations.
+    Returns 
+    -------
+    x : {nd_array}
+        this is the optimal alignment (6-dim) array with 3 angles and
+        3 translations.
 
-	Examples
-	--------
+    Examples
+    --------
 
-	(calling this from python_coreg which optionally filters image2)
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> image1, image2, imdata = reg.demo_MRI_volume_align()
-	>>> parm_vector = python_coreg(image1, image2, imdata)
+    (calling this from python_coreg which optionally filters image2)
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
+    >>> parm_vector = python_coreg(image1, image2, imdata)
 
-	"""
-	ret_histo=0
-	# zero out the start parameter; but this may be set to large values 
-	# if the head is out of range and well off the optimal alignment skirt
-	imdata['parms'][0:5] = 0.0
-	# make the step a scalar to can put in a multi-res loop
-	loop = range(imdata['sample'].size)
-    	x = imdata['parms']
-	for i in loop:
-	    step = imdata['sample'][i]
-	    imdata['step'][:] = step
-	    optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
-			    method, ret_histo)
-	    p_args = (optfunc_args,)
-	    if opt_method=='powell':
-		print 'POWELL multi-res registration step size ', step
-		print 'vector ', x
-    	        x = OPT.fmin_powell(optimize_function, x, args=p_args,
-				    callback=callback_powell) 
-	    elif opt_method=='cg':
-		print 'CG multi-res registration step size ', step
-		print 'vector ', x
-    	        x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
-	    elif opt_method=='hybrid':
-		if i==0:
-		    print 'Hybrid POWELL multi-res registration step size ', step
-		    print 'vector ', x
-		    lite = 0
-	    	    optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
-				    method, ret_histo)
-	    	    p_args = (optfunc_args,)
-    	            x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) 
-	        elif i==1:
-		    print 'Hybrid CG multi-res registration step size ', step
-		    print 'vector ', x
-		    lite = 1
-	    	    optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, 
-				    smhist, method, ret_histo)
-	    	    p_args = (optfunc_args,)
-    	            x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
+    """
+    ret_histo=0
+    # zero out the start parameter; but this may be set to large values 
+    # if the head is out of range and well off the optimal alignment skirt
+    imdata['parms'][0:5] = 0.0
+    # make the step a scalar to can put in a multi-res loop
+    loop = range(imdata['sample'].size)
+    x = imdata['parms']
+    for i in loop:
+        step = imdata['sample'][i]
+        imdata['step'][:] = step
+        optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
+                        method, ret_histo)
+        p_args = (optfunc_args,)
+        if opt_method=='powell':
+            print 'POWELL multi-res registration step size ', step
+            print 'vector ', x
+            x = OPT.fmin_powell(optimize_function, x, args=p_args,
+                                callback=callback_powell) 
+        elif opt_method=='cg':
+            print 'CG multi-res registration step size ', step
+            print 'vector ', x
+            x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
+        elif opt_method=='hybrid':
+            if i==0:
+                print 'Hybrid POWELL multi-res registration step size ', step
+                print 'vector ', x
+                lite = 0
+                optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
+                                method, ret_histo)
+                p_args = (optfunc_args,)
+                x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) 
+            elif i==1:
+                print 'Hybrid CG multi-res registration step size ', step
+                print 'vector ', x
+                lite = 1
+                optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, 
+                                smhist, method, ret_histo)
+                p_args = (optfunc_args,)
+                x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
 
-	return x
+    return x
 
 
 def callback_powell(x):
-	"""
-	called by optimize.powell only. prints current parameter vector.
-	"""
-	print 'Parameter Vector from Powell: - '
-	print x
-	return
+    """
+    called by optimize.powell only. prints current parameter vector.
+    """
+    print 'Parameter Vector from Powell: - '
+    print x
+    return
 
 def callback_cg(x):
-	"""
-	called by optimize.cg only. prints current parameter vector.
-	"""
-	print 'Parameter Vector from Conjugate Gradient: - '
-	print x
-	return
+    """
+    called by optimize.cg only. prints current parameter vector.
+    """
+    print 'Parameter Vector from Conjugate Gradient: - '
+    print x
+    return
 
 def smooth_kernel(fwhm, x, ktype=1):
-	"""
-	kernel = smooth_kernel(fwhm, x, ktype=1)
+    """
+    kernel = smooth_kernel(fwhm, x, ktype=1)
 
-	smooth_kernel creates filter kernel based on image sampling parameters.
-	provide domain of kernel and sampling parameters. 
+    smooth_kernel creates filter kernel based on image sampling parameters.
+    provide domain of kernel and sampling parameters. 
 
-	Parameters 
-	----------
-	fwhm : {int}
-	    used for kernel width
-	x : {nd_array}
-	    domain of kernel
-	ktype: {1, 2}, optional
-	    kernel type. 1 is Gauss convoled with spline, 2 is Gauss
+    Parameters 
+    ----------
+    fwhm : {int}
+        used for kernel width
+    x : {nd_array}
+        domain of kernel
+    ktype: {1, 2}, optional
+        kernel type. 1 is Gauss convoled with spline, 2 is Gauss
 
 
-	Returns 
-	-------
-	kernel : {nd_array}
+    Returns 
+    -------
+    kernel : {nd_array}
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> fwhm = 3
-	>>> ftype = 2
-	>>> p = NP.ceil(2*fwhm).astype(int)
-	>>> x = NP.array(range(-p, p+1))
-	>>> kernel = reg.smooth_kernel(fwhm, x, ktype=ftype)
-	>>> kernel
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> fwhm = 3
+    >>> ftype = 2
+    >>> p = NP.ceil(2*fwhm).astype(int)
+    >>> x = NP.array(range(-p, p+1))
+    >>> kernel = reg.smooth_kernel(fwhm, x, ktype=ftype)
+    >>> kernel
 
-	array([
-	 4.77853772e-06,   1.41575516e-04,   2.26516955e-03,
-         1.95718875e-02,   9.13238336e-02,   2.30120330e-01,
-         3.13144850e-01,   2.30120330e-01,   9.13238336e-02,
-         1.95718875e-02,   2.26516955e-03,   1.41575516e-04,
-         4.77853772e-06])
+    array([
+     4.77853772e-06,   1.41575516e-04,   2.26516955e-03,
+     1.95718875e-02,   9.13238336e-02,   2.30120330e-01,
+     3.13144850e-01,   2.30120330e-01,   9.13238336e-02,
+     1.95718875e-02,   2.26516955e-03,   1.41575516e-04,
+     4.77853772e-06])
 
-	"""
-	eps = 0.00001
-	s   = NP.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps
-	if ktype==1:
-	    # from SPM: Gauss kernel convolved with 1st degree B spline
-	    w1 = 0.5 * math.sqrt(2.0/s)
-	    w2 = -0.5 / s
-	    w3 = math.sqrt((s*math.pi) /2.0)
-	    kernel = 0.5*(SP.erf(w1*(x+1))*(x+1)       + SP.erf(w1*(x-1))*(x-1)    - 2.0*SP.erf(w1*(x))*(x) + 
-	 	          w3*(NP.exp(w2*NP.square(x+1))) + NP.exp(w2*(NP.square(x-1))) - 2.0*NP.exp(w2*NP.square(x)))
-	    kernel[kernel<0] = 0
-	    kernel = kernel / kernel.sum()  
-	else:
-	    # Gauss kernel 
-	    kernel = (1.0/math.sqrt(2.0*math.pi*s)) * NP.exp(-NP.square(x)/(2.0*s)) 
-	    kernel = kernel / kernel.sum()  
+    """
+    eps = 0.00001
+    s   = NP.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps
+    if ktype==1:
+        # from SPM: Gauss kernel convolved with 1st degree B spline
+        w1 = 0.5 * math.sqrt(2.0/s)
+        w2 = -0.5 / s
+        w3 = math.sqrt((s*math.pi) /2.0)
+        kernel = 0.5*(SP.erf(w1*(x+1))*(x+1)       + SP.erf(w1*(x-1))*(x-1)    - 2.0*SP.erf(w1*(x))*(x) + 
+                      w3*(NP.exp(w2*NP.square(x+1))) + NP.exp(w2*(NP.square(x-1))) - 2.0*NP.exp(w2*NP.square(x)))
+        kernel[kernel<0] = 0
+        kernel = kernel / kernel.sum()  
+    else:
+        # Gauss kernel 
+        kernel = (1.0/math.sqrt(2.0*math.pi*s)) * NP.exp(-NP.square(x)/(2.0*s)) 
+        kernel = kernel / kernel.sum()  
 
-	return kernel
+    return kernel
 
 def filter_image_3D(imageRaw, fwhm, ftype=2):
-	"""
-	image_F_xyz = filter_image_3D(imageRaw, fwhm, ftype=2):
-	does 3D separable digital filtering using scipy.ndimage.correlate1d
+    """
+    image_F_xyz = filter_image_3D(imageRaw, fwhm, ftype=2):
+    does 3D separable digital filtering using scipy.ndimage.correlate1d
 
-	Parameters 
-	----------
-	imageRaw : {nd_array}
-	    the unfiltered 3D volume image
-	fwhm : {int}
-	    used for kernel width
-	ktype: {1, 2}, optional
-	    kernel type. 1 is Gauss convoled with spline, 2 is Gauss
+    Parameters 
+    ----------
+    imageRaw : {nd_array}
+        the unfiltered 3D volume image
+    fwhm : {int}
+        used for kernel width
+    ktype: {1, 2}, optional
+        kernel type. 1 is Gauss convoled with spline, 2 is Gauss
 
-	Returns 
-	-------
-	image_F_xyz : {nd_array}
-	    3D filtered volume image
+    Returns 
+    -------
+    image_F_xyz : {nd_array}
+        3D filtered volume image
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import _registration as reg
-	>>> image1, image2, imdata = reg.demo_MRI_volume_align()
-	>>> ftype = 1
-	>>> image_Filter_xyz = filter_image_3D(image1['data'], image1['fwhm'], ftype)
-	>>> image1['data'] = image_Filter_xyz
-	"""
+    >>> import _registration as reg
+    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
+    >>> ftype = 1
+    >>> image_Filter_xyz = filter_image_3D(image1['data'], image1['fwhm'], ftype)
+    >>> image1['data'] = image_Filter_xyz
+    """
 
-	p = NP.ceil(2*fwhm[0]).astype(int)
-	x = NP.array(range(-p, p+1))
-	kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype)
-	p = NP.ceil(2*fwhm[1]).astype(int)
-	x = NP.array(range(-p, p+1))
-	kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype)
-	p = NP.ceil(2*fwhm[2]).astype(int)
-	x = NP.array(range(-p, p+1))
-	kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype)
-	output=None
-	# 3D filter in 3 1D separable stages
-	axis = 0
-	image_F_x   = NDI.correlate1d(imageRaw,   kernel_x, axis, output)
-	axis = 1
-	image_F_xy  = NDI.correlate1d(image_F_x,  kernel_y, axis, output)
-	axis = 2
-	image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output)
-	return image_F_xyz  
+    p = NP.ceil(2*fwhm[0]).astype(int)
+    x = NP.array(range(-p, p+1))
+    kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype)
+    p = NP.ceil(2*fwhm[1]).astype(int)
+    x = NP.array(range(-p, p+1))
+    kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype)
+    p = NP.ceil(2*fwhm[2]).astype(int)
+    x = NP.array(range(-p, p+1))
+    kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype)
+    output=None
+    # 3D filter in 3 1D separable stages
+    axis = 0
+    image_F_x   = NDI.correlate1d(imageRaw,   kernel_x, axis, output)
+    axis = 1
+    image_F_xy  = NDI.correlate1d(image_F_x,  kernel_y, axis, output)
+    axis = 2
+    image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output)
+    return image_F_xyz  
 
 def build_fwhm(M, S):
-	"""
-	fwhm = build_fwhm(M, S)
+    """
+    fwhm = build_fwhm(M, S)
 
-	builds the low pass filter kernel sigma value from the image pixel sampling
+    builds the low pass filter kernel sigma value from the image pixel sampling
 
-	Parameters 
-	----------
-	M : {nd_array}
-	    input 4x4 voxel to physical map matrix (called 'MAT')
+    Parameters 
+    ----------
+    M : {nd_array}
+        input 4x4 voxel to physical map matrix (called 'MAT')
 
-	S : {nd_array}
-	    1x3 sample increment array. should be = (1, 1, 1)
+    S : {nd_array}
+        1x3 sample increment array. should be = (1, 1, 1)
 
-	Returns 
-	-------
-	fwhm : {nd_array}
-	    the 3D Gaussian kernel width
+    Returns 
+    -------
+    fwhm : {nd_array}
+        the 3D Gaussian kernel width
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> anat_desc = reg.load_anatMRI_desc()
-	>>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-	>>> imdata = reg.build_structs()
-	>>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> anat_desc = reg.load_anatMRI_desc()
+    >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
+    >>> imdata = reg.build_structs()
+    >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
 
-	"""
-	view_3x3 = NP.square(M[0:3, 0:3])
-	# sum the elements inn the first row
-	vxg = NP.sqrt(view_3x3.sum(axis=0))
-	# assumes that sampling is the same for xyz
- 	size = NP.array([1,1,1])*S[0]
-	x = NP.square(size) - NP.square(vxg)
-	# clip
-	x[x<0] = 0
-	fwhm = NP.sqrt(x) / vxg
-	# pathology when stepsize = 1 for MAT equal to the identity matrix
-	fwhm[fwhm==0] = 1
-	# return the 3D Gaussian kernel width (xyz)
-	return fwhm 
+    """
+    view_3x3 = NP.square(M[0:3, 0:3])
+    # sum the elements inn the first row
+    vxg = NP.sqrt(view_3x3.sum(axis=0))
+    # assumes that sampling is the same for xyz
+    size = NP.array([1,1,1])*S[0]
+    x = NP.square(size) - NP.square(vxg)
+    # clip
+    x[x<0] = 0
+    fwhm = NP.sqrt(x) / vxg
+    # pathology when stepsize = 1 for MAT equal to the identity matrix
+    fwhm[fwhm==0] = 1
+    # return the 3D Gaussian kernel width (xyz)
+    return fwhm 
 
 def optimize_function(x, optfunc_args):
-	"""
-	cost = optimize_function(x, optfunc_args)    --- OR ---
-	cost, joint_histogram = optimize_function(x, optfunc_args)   
+    """
+    cost = optimize_function(x, optfunc_args)    --- OR ---
+    cost, joint_histogram = optimize_function(x, optfunc_args)   
 
- 	computes the alignment between 2 volumes using cross correlation or mutual
-	information metrics. In both the 8 bit joint histogram of the 2 images is
-	computed. The 8 bit scaling is done using an integrated histogram method and
-	is called prior to this.
+    computes the alignment between 2 volumes using cross correlation or mutual
+    information metrics. In both the 8 bit joint histogram of the 2 images is
+    computed. The 8 bit scaling is done using an integrated histogram method and
+    is called prior to this.
 
-	Parameters 
-	----------
-	x : {nd_array}
-	    this is the current (6-dim) array with 3 angles and 3 translations.
+    Parameters 
+    ----------
+    x : {nd_array}
+        this is the current (6-dim) array with 3 angles and 3 translations.
 
-	optfunc_args : {tuple}
-	    this is a tuple of 8 elements that is formed in the scipy.optimize powell
-	    and cg (conjugate gradient) functions. this is why the elements need to be
-	    a tuple. The elements of optfunc_args are: 
+    optfunc_args : {tuple}
+        this is a tuple of 8 elements that is formed in the scipy.optimize powell
+        and cg (conjugate gradient) functions. this is why the elements need to be
+        a tuple. The elements of optfunc_args are: 
 
-	    image_F       : {dictionary} 
-	        image_F is the source image to be remapped during the registration. 
-	        it is a dictionary with the data as an ndarray in the ['data'] component.
-	    image_G       : {dictionary} 
-	        image_G is the reference image that image_F gets mapped to. 
-	    sample_vector : {nd_array} 
-	        sample in x,y,x. should be (1,1,1)
-	    fwhm          : {nd_array} 
-	        Gaussian sigma
-	    do_lite       : {0, 1} 
-	        lite of 1 is to jitter both images during resampling. 
-	        0 is to not jitter. jittering is for non-aliased volumes.
-	    smooth        : {0, 1} 
-	        flag for joint histogram low pass filtering. 0 for no filter,
-	        1 for do filter.
-	    method        : {'nmi', 'mi', 'ncc', 'ecc', 'mse'}
-	        flag for type of registration metric. nmi is normalized mutual
-	        information; mi is mutual information; ecc is entropy cross
-	        correlation; ncc is normalized cross correlation. mse is mean
-		square error. with mse there is no joint histogram.
-	    ret_histo     : {0, 1} 
-	        if 0 return is: cost 
-	        if 0 return is: cost, joint_histogram  
+        image_F       : {dictionary} 
+            image_F is the source image to be remapped during the registration. 
+            it is a dictionary with the data as an ndarray in the ['data'] component.
+        image_G       : {dictionary} 
+            image_G is the reference image that image_F gets mapped to. 
+        sample_vector : {nd_array} 
+            sample in x,y,x. should be (1,1,1)
+        fwhm          : {nd_array} 
+            Gaussian sigma
+        do_lite       : {0, 1} 
+            lite of 1 is to jitter both images during resampling. 
+            0 is to not jitter. jittering is for non-aliased volumes.
+        smooth        : {0, 1} 
+            flag for joint histogram low pass filtering. 0 for no filter,
+            1 for do filter.
+        method        : {'nmi', 'mi', 'ncc', 'ecc', 'mse'}
+            flag for type of registration metric. nmi is normalized mutual
+            information; mi is mutual information; ecc is entropy cross
+            correlation; ncc is normalized cross correlation. mse is mean
+            square error. with mse there is no joint histogram.
+        ret_histo     : {0, 1} 
+            if 0 return is: cost 
+            if 0 return is: cost, joint_histogram  
 
-	Returns 
-	-------
-	    cost : {float}
-	        the negative of one of the mutual information metrics
-		or negative cross correlation. use negative as the optimization
-		is minimization.
+    Returns 
+    -------
+        cost : {float}
+            the negative of one of the mutual information metrics
+            or negative cross correlation. use negative as the optimization
+            is minimization.
 
-	    --- OR --- (if ret_histo = 1)
+        --- OR --- (if ret_histo = 1)
 
-	    cost : {float}
-	        the negative of one of the mutual information metrics
-		or negative cross correlation. use negative as the optimization
-		is minimization.
+        cost : {float}
+            the negative of one of the mutual information metrics
+            or negative cross correlation. use negative as the optimization
+            is minimization.
 
-	    joint_histogram : {nd_array}
-	        the 2D (256x256) joint histogram of the two volumes
+        joint_histogram : {nd_array}
+            the 2D (256x256) joint histogram of the two volumes
 
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> anat_desc = reg.load_anatMRI_desc()
-	>>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-	>>> image2 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-	>>> imdata = reg.build_structs()
-	>>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
-	>>> image2['fwhm'] = reg.build_fwhm(image2['mat'], imdata['step'])
-	>>> method = 'ncc'
-	>>> lite = 1
-	>>> smhist = 0
-	>>> ret_histo = 1
-	>>> optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
-	>>> x = NP.zeros(6, dtype=NP.float64)
-	>>> return cost, joint_histogram = reg.optimize_function(x, optfunc_args)
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> anat_desc = reg.load_anatMRI_desc()
+    >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
+    >>> image2 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
+    >>> imdata = reg.build_structs()
+    >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
+    >>> image2['fwhm'] = reg.build_fwhm(image2['mat'], imdata['step'])
+    >>> method = 'ncc'
+    >>> lite = 1
+    >>> smhist = 0
+    >>> ret_histo = 1
+    >>> optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
+    >>> x = NP.zeros(6, dtype=NP.float64)
+    >>> return cost, joint_histogram = reg.optimize_function(x, optfunc_args)
 
 
-	"""
+    """
 
-	image_F       = optfunc_args[0]
-	image_G       = optfunc_args[1]
-	sample_vector = optfunc_args[2]
-	fwhm          = optfunc_args[3]
-	do_lite       = optfunc_args[4]
-	smooth        = optfunc_args[5]
-	method        = optfunc_args[6]
-	ret_histo     = optfunc_args[7]
+    image_F       = optfunc_args[0]
+    image_G       = optfunc_args[1]
+    sample_vector = optfunc_args[2]
+    fwhm          = optfunc_args[3]
+    do_lite       = optfunc_args[4]
+    smooth        = optfunc_args[5]
+    method        = optfunc_args[6]
+    ret_histo     = optfunc_args[7]
 
-	rot_matrix = build_rotate_matrix(x)
-	cost = 0.0
-	epsilon = 2.2e-16 
-	# image_G is base image
-	# image_F is the to-be-rotated image
-	# rot_matrix is the 4x4 constructed (current angles and translates) transform matrix
-	# sample_vector is the subsample vector for x-y-z
+    rot_matrix = build_rotate_matrix(x)
+    cost = 0.0
+    epsilon = 2.2e-16 
+    # image_G is base image
+    # image_F is the to-be-rotated image
+    # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix
+    # sample_vector is the subsample vector for x-y-z
 
-	F_inv = NP.linalg.inv(image_F['mat'])
-	composite = NP.dot(F_inv, image_G['mat'])
-	composite = NP.dot(composite, rot_matrix)
+    F_inv = NP.linalg.inv(image_F['mat'])
+    composite = NP.dot(F_inv, image_G['mat'])
+    composite = NP.dot(composite, rot_matrix)
 
-	if method == 'mse':
-	    #
-	    # mean squard error method
-	    #
+    if method == 'mse':
+        #
+        # mean squard error method
+        #
 
-	    (layers, rows, cols) = image_F['data'].shape
-	    # allocate the zero image
-	    remap_image_F = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
-	    imdata = build_structs()
-	    # trilinear interpolation mapping.
-	    R.register_linear_resample(image_F['data'], remap_image_F, composite,
-			               imdata['step'])
-	    cost = (NP.square(image_G['data']-remap_image_F)).mean()
+        (layers, rows, cols) = image_F['data'].shape
+        # allocate the zero image
+        remap_image_F = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
+        imdata = build_structs()
+        # trilinear interpolation mapping.
+        R.register_linear_resample(image_F['data'], remap_image_F, composite,
+                                   imdata['step'])
+        cost = (NP.square(image_G['data']-remap_image_F)).mean()
 
-	    return cost
+        return cost
 
-	else:
-	    #
-	    # histogram-based methods (nmi, ncc, mi, ecc)
-	    #
+    else:
+        #
+        # histogram-based methods (nmi, ncc, mi, ecc)
+        #
 
-	    # allocate memory for 2D histogram
-	    joint_histogram = NP.zeros([256, 256], dtype=NP.float64);
+        # allocate memory for 2D histogram
+        joint_histogram = NP.zeros([256, 256], dtype=NP.float64);
 
-	    if do_lite: 
-	        R.register_histogram_lite(image_F['data'], image_G['data'], composite,
-			                  sample_vector, joint_histogram)
-	    else:
-	        R.register_histogram(image_F['data'], image_G['data'], composite,
-			             sample_vector, joint_histogram)
+        if do_lite: 
+            R.register_histogram_lite(image_F['data'], image_G['data'], composite,
+                                      sample_vector, joint_histogram)
+        else:
+            R.register_histogram(image_F['data'], image_G['data'], composite,
+                                 sample_vector, joint_histogram)
 
-	    # smooth the histogram
-	    if smooth: 
-	        p = NP.ceil(2*fwhm[0]).astype(int)
-	        x = NP.array(range(-p, p+1))
-	        kernel1 = smooth_kernel(fwhm[0], x)
-	        p = NP.ceil(2*fwhm[1]).astype(int)
-	        x = NP.array(range(-p, p+1))
-	        kernel2 = smooth_kernel(fwhm[1], x)
-	        output=None
-	        # 2D filter in 1D separable stages
-	        axis = 0
-	        result = NDI.correlate1d(joint_histogram, kernel1, axis, output)
-	        axis = 1
-	        joint_histogram = NDI.correlate1d(result, kernel1, axis, output)
+        # smooth the histogram
+        if smooth: 
+            p = NP.ceil(2*fwhm[0]).astype(int)
+            x = NP.array(range(-p, p+1))
+            kernel1 = smooth_kernel(fwhm[0], x)
+            p = NP.ceil(2*fwhm[1]).astype(int)
+            x = NP.array(range(-p, p+1))
+            kernel2 = smooth_kernel(fwhm[1], x)
+            output=None
+            # 2D filter in 1D separable stages
+            axis = 0
+            result = NDI.correlate1d(joint_histogram, kernel1, axis, output)
+            axis = 1
+            joint_histogram = NDI.correlate1d(result, kernel1, axis, output)
 
-	    joint_histogram += epsilon # prevent log(0) 
-	    # normalize the joint histogram
-	    joint_histogram /= joint_histogram.sum() 
-	    # get the marginals
-	    marginal_col = joint_histogram.sum(axis=0)
-	    marginal_row = joint_histogram.sum(axis=1)
+        joint_histogram += epsilon # prevent log(0) 
+        # normalize the joint histogram
+        joint_histogram /= joint_histogram.sum() 
+        # get the marginals
+        marginal_col = joint_histogram.sum(axis=0)
+        marginal_row = joint_histogram.sum(axis=1)
 
-	    if method == 'mi':
-	        # mutual information
-	        marginal_outer = NP.outer(marginal_col, marginal_row)
-	        H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
-	        mutual_information = H.sum()
-	        cost = -mutual_information
+        if method == 'mi':
+            # mutual information
+            marginal_outer = NP.outer(marginal_col, marginal_row)
+            H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
+            mutual_information = H.sum()
+            cost = -mutual_information
 
-	    elif method == 'ecc':
-	        # entropy correlation coefficient 
-	        marginal_outer = NP.outer(marginal_col, marginal_row)
-	        H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
-	        mutual_information = H.sum()
-	        row_entropy = marginal_row * NP.log(marginal_row)
-	        col_entropy = marginal_col * NP.log(marginal_col)
-	        ecc  = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum())
-	        cost = -ecc
+        elif method == 'ecc':
+            # entropy correlation coefficient 
+            marginal_outer = NP.outer(marginal_col, marginal_row)
+            H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
+            mutual_information = H.sum()
+            row_entropy = marginal_row * NP.log(marginal_row)
+            col_entropy = marginal_col * NP.log(marginal_col)
+            ecc  = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum())
+            cost = -ecc
 
-	    elif method == 'nmi':
-	        # normalized mutual information
-	        row_entropy = marginal_row * NP.log(marginal_row)
-	        col_entropy = marginal_col * NP.log(marginal_col)
-	        H = joint_histogram * NP.log(joint_histogram)  
-	        nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum())
-	        cost = -nmi
+        elif method == 'nmi':
+            # normalized mutual information
+            row_entropy = marginal_row * NP.log(marginal_row)
+            col_entropy = marginal_col * NP.log(marginal_col)
+            H = joint_histogram * NP.log(joint_histogram)  
+            nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum())
+            cost = -nmi
 
-	    elif method == 'ncc':
-	        # cross correlation from the joint histogram 
-	        r, c = joint_histogram.shape
-	        i = NP.array(range(1,c+1))
-	        j = NP.array(range(1,r+1))
-	        m1 = (marginal_row * i).sum()
-	        m2 = (marginal_col * j).sum()
-	        sig1 = NP.sqrt((marginal_row*(NP.square(i-m1))).sum())
-	        sig2 = NP.sqrt((marginal_col*(NP.square(j-m2))).sum())
-	        [a, b] = NP.mgrid[1:c+1, 1:r+1]
-	        a = a - m1
-	        b = b - m2
-	        # element multiplies in the joint histogram and grids
-	        H = ((joint_histogram * a) * b).sum()
-	        ncc = H / (NP.dot(sig1, sig2)) 
-	        cost = -ncc
+        elif method == 'ncc':
+            # cross correlation from the joint histogram 
+            r, c = joint_histogram.shape
+            i = NP.array(range(1,c+1))
+            j = NP.array(range(1,r+1))
+            m1 = (marginal_row * i).sum()
+            m2 = (marginal_col * j).sum()
+            sig1 = NP.sqrt((marginal_row*(NP.square(i-m1))).sum())
+            sig2 = NP.sqrt((marginal_col*(NP.square(j-m2))).sum())
+            [a, b] = NP.mgrid[1:c+1, 1:r+1]
+            a = a - m1
+            b = b - m2
+            # element multiplies in the joint histogram and grids
+            H = ((joint_histogram * a) * b).sum()
+            ncc = H / (NP.dot(sig1, sig2)) 
+            cost = -ncc
 
-	    if ret_histo:
-	        return cost, joint_histogram 
-    	    else:
-	        return cost
+        if ret_histo:
+            return cost, joint_histogram 
+        else:
+            return cost
 
 
 def build_structs(step=1):
-	"""
-	img_data = build_structs(step=1)
+    """
+    img_data = build_structs(step=1)
 
-	builds the image data (imdata) dictionary for later use as parameter
-	storage in the co-registration.
+    builds the image data (imdata) dictionary for later use as parameter
+    storage in the co-registration.
 
-	Parameters 
-	----------
-	step : {int} : optional
-	default is 1 and is the sample increment in voxels. This sets the sample
-	for x,y,z and is the same value in all 3 axes. only change the default for debug.
+    Parameters 
+    ----------
+    step : {int} : optional
+    default is 1 and is the sample increment in voxels. This sets the sample
+    for x,y,z and is the same value in all 3 axes. only change the default for debug.
 
-	Returns 
-	-------
-	img_data : {dictionary}
+    Returns 
+    -------
+    img_data : {dictionary}
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> imdata = reg.build_structs()
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> imdata = reg.build_structs()
 
-	"""
+    """
 
-	# build image data structures here
-	P = NP.zeros(6, dtype=NP.float64);
-	T = NP.zeros(6, dtype=NP.float64);
-	F = NP.zeros(2, dtype=NP.int32);
-	S = NP.ones(3,  dtype=NP.int32);
-	sample = NP.zeros(2, dtype=NP.int32);
-	S[0] = step
-	S[1] = step
-	S[2] = step
-	# image/histogram smoothing
-	F[0] = 3
-	F[1] = 3
-	# subsample for multiresolution registration
-	sample[0] = 4
-	sample[1] = 2
-	# tolerances for angle (0-2) and translation (3-5)
-	T[0] = 0.02 
-	T[1] = 0.02 
-	T[2] = 0.02 
-	T[3] = 0.001 
-	T[4] = 0.001 
-	T[5] = 0.001 
-	# P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane
-	# P[1] = beta  <=> roll.  + beta  is moving right in the coronal plane
-	# P[2] = gamma <=> yaw.   + gamma is right turn in the transverse plane
-	# P[3] = Tx
-	# P[4] = Ty
-	# P[5] = Tz
-	img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample}
-	return img_data
+    # build image data structures here
+    P = NP.zeros(6, dtype=NP.float64);
+    T = NP.zeros(6, dtype=NP.float64);
+    F = NP.zeros(2, dtype=NP.int32);
+    S = NP.ones(3,  dtype=NP.int32);
+    sample = NP.zeros(2, dtype=NP.int32);
+    S[0] = step
+    S[1] = step
+    S[2] = step
+    # image/histogram smoothing
+    F[0] = 3
+    F[1] = 3
+    # subsample for multiresolution registration
+    sample[0] = 4
+    sample[1] = 2
+    # tolerances for angle (0-2) and translation (3-5)
+    T[0] = 0.02 
+    T[1] = 0.02 
+    T[2] = 0.02 
+    T[3] = 0.001 
+    T[4] = 0.001 
+    T[5] = 0.001 
+    # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane
+    # P[1] = beta  <=> roll.  + beta  is moving right in the coronal plane
+    # P[2] = gamma <=> yaw.   + gamma is right turn in the transverse plane
+    # P[3] = Tx
+    # P[4] = Ty
+    # P[5] = Tz
+    img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample}
+    return img_data
 
 
 def build_rotate_matrix(img_data_parms):
-	"""
-	rot_matrix = reg.build_rotate_matrix(img_data_parms)
+    """
+    rot_matrix = reg.build_rotate_matrix(img_data_parms)
 
-	takes the 6 element vector (3 angles, 3 translations) and build the 4x4 mapping matrix
+    takes the 6 element vector (3 angles, 3 translations) and build the 4x4 mapping matrix
 
-	Parameters 
-	----------
-	img_data_parms : {nd_array}
-	    this is the current (6-dim) array with 3 angles and 3 translations.
+    Parameters 
+    ----------
+    img_data_parms : {nd_array}
+        this is the current (6-dim) array with 3 angles and 3 translations.
 
-	Returns 
-	-------
-	rot_matrix: {nd_array}
-	    the 4x4 mapping matrix
+    Returns 
+    -------
+    rot_matrix: {nd_array}
+        the 4x4 mapping matrix
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> imdata = reg.build_structs()
-	>>> x = NP.zeros(6, dtype=NP.float64)
-	>>> M = reg.build_rotate_matrix(x)
-	>>> M 
-	array([[ 1.,  0.,  0.,  0.],
-       	       [ 0.,  1.,  0.,  0.],
-       	       [ 0.,  0.,  1.,  0.],
-       	       [ 0.,  0.,  0.,  1.]])
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> imdata = reg.build_structs()
+    >>> x = NP.zeros(6, dtype=NP.float64)
+    >>> M = reg.build_rotate_matrix(x)
+    >>> M 
+    array([[ 1.,  0.,  0.,  0.],
+           [ 0.,  1.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.],
+           [ 0.,  0.,  0.,  1.]])
 
 
-	"""
+    """
 
-	R1 = NP.zeros([4,4], dtype=NP.float64);
-	R2 = NP.zeros([4,4], dtype=NP.float64);
-	R3 = NP.zeros([4,4], dtype=NP.float64);
-	T  = NP.eye(4, dtype=NP.float64);
+    R1 = NP.zeros([4,4], dtype=NP.float64);
+    R2 = NP.zeros([4,4], dtype=NP.float64);
+    R3 = NP.zeros([4,4], dtype=NP.float64);
+    T  = NP.eye(4, dtype=NP.float64);
 
-	alpha = math.radians(img_data_parms[0])
-	beta  = math.radians(img_data_parms[1])
-	gamma = math.radians(img_data_parms[2])
+    alpha = math.radians(img_data_parms[0])
+    beta  = math.radians(img_data_parms[1])
+    gamma = math.radians(img_data_parms[2])
 
-	R1[0][0] = 1.0
-	R1[1][1] = math.cos(alpha)
-	R1[1][2] = math.sin(alpha)
-	R1[2][1] = -math.sin(alpha)
-	R1[2][2] = math.cos(alpha)
-	R1[3][3] = 1.0
+    R1[0][0] = 1.0
+    R1[1][1] = math.cos(alpha)
+    R1[1][2] = math.sin(alpha)
+    R1[2][1] = -math.sin(alpha)
+    R1[2][2] = math.cos(alpha)
+    R1[3][3] = 1.0
 
-	R2[0][0] = math.cos(beta)
-	R2[0][2] = math.sin(beta)
-	R2[1][1] = 1.0
-	R2[2][0] = -math.sin(beta)
-	R2[2][2] = math.cos(beta)
-	R2[3][3] = 1.0
+    R2[0][0] = math.cos(beta)
+    R2[0][2] = math.sin(beta)
+    R2[1][1] = 1.0
+    R2[2][0] = -math.sin(beta)
+    R2[2][2] = math.cos(beta)
+    R2[3][3] = 1.0
 
-	R3[0][0] = math.cos(gamma)
-	R3[0][1] = math.sin(gamma)
-	R3[1][0] = -math.sin(gamma)
-	R3[1][1] = math.cos(gamma)
-	R3[2][2] = 1.0
-	R3[3][3] = 1.0
+    R3[0][0] = math.cos(gamma)
+    R3[0][1] = math.sin(gamma)
+    R3[1][0] = -math.sin(gamma)
+    R3[1][1] = math.cos(gamma)
+    R3[2][2] = 1.0
+    R3[3][3] = 1.0
 
-	T[0][0] = 1.0
-	T[1][1] = 1.0
-	T[2][2] = 1.0
-	T[3][3] = 1.0
-	T[0][3] = img_data_parms[3]
-	T[1][3] = img_data_parms[4]
-	T[2][3] = img_data_parms[5]
+    T[0][0] = 1.0
+    T[1][1] = 1.0
+    T[2][2] = 1.0
+    T[3][3] = 1.0
+    T[0][3] = img_data_parms[3]
+    T[1][3] = img_data_parms[4]
+    T[2][3] = img_data_parms[5]
 
-	rot_matrix = NP.dot(T, R1);
-	rot_matrix = NP.dot(rot_matrix, R2);
-	rot_matrix = NP.dot(rot_matrix, R3);
+    rot_matrix = NP.dot(T, R1);
+    rot_matrix = NP.dot(rot_matrix, R2);
+    rot_matrix = NP.dot(rot_matrix, R3);
 
-	return rot_matrix
+    return rot_matrix
 
 
 def load_volume(imagedesc, imagename=None, threshold=0.999, debug=0):
 
-	"""
-	image = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)  --- OR ---
-	image, h, ih, index = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)
+    """
+    image = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)  --- OR ---
+    image, h, ih, index = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)
 
-	gets an image descriptor and optional filename and returns a scaled 8 bit volume. The
-	scaling is designed to make full use of the 8 bits (ignoring high amplitude outliers).
-	The current method uses numpy fromfile and will be replaced by neuroimage nifti load.
+    gets an image descriptor and optional filename and returns a scaled 8 bit volume. The
+    scaling is designed to make full use of the 8 bits (ignoring high amplitude outliers).
+    The current method uses numpy fromfile and will be replaced by neuroimage nifti load.
 
-	Parameters 
-	----------
-	imagedesc : {dictionary} 
-	    imagedesc is the descriptor of the image to be read. 
+    Parameters 
+    ----------
+    imagedesc : {dictionary} 
+        imagedesc is the descriptor of the image to be read. 
 
-	imagename : {string} : optional
-	    name of image file. No name creates a blank image that is used for creating
-	    a rotated test image or image rescaling.
+    imagename : {string} : optional
+        name of image file. No name creates a blank image that is used for creating
+        a rotated test image or image rescaling.
 
-	threshold : {float} : optional
-	    this is the threshold for upper cutoff in the 8 bit scaling. The volume histogram
-	    and integrated histogram is computed and the upper amplitude cutoff is where the 
-	    integrated histogram crosses the value set in the threshold. setting threshold to
-	    1.0 means the scaling is done over the min to max amplitude range.
+    threshold : {float} : optional
+        this is the threshold for upper cutoff in the 8 bit scaling. The volume histogram
+        and integrated histogram is computed and the upper amplitude cutoff is where the 
+        integrated histogram crosses the value set in the threshold. setting threshold to
+        1.0 means the scaling is done over the min to max amplitude range.
 
-	debug : {0, 1} : optional
-	    when debug=1 the method returns the volume histogram, integrated histogram and the 
-	    amplitude index where the provided threshold occured.
+    debug : {0, 1} : optional
+        when debug=1 the method returns the volume histogram, integrated histogram and the 
+        amplitude index where the provided threshold occured.
 
-	Returns 
-	-------
-	image : {dictionary}
-	    the volume data assoicated with the filename or a blank volume of the same
-	    dimensions as specified in imagedesc.
+    Returns 
+    -------
+    image : {dictionary}
+        the volume data assoicated with the filename or a blank volume of the same
+        dimensions as specified in imagedesc.
 
-	--- OR --- (if debug = 1)
+    --- OR --- (if debug = 1)
 
-	image : {dictionary}
-	    the volume data assoicated with the filename or a blank volume of the same
-	    dimensions as specified in imagedesc.
+    image : {dictionary}
+        the volume data assoicated with the filename or a blank volume of the same
+        dimensions as specified in imagedesc.
 
-	h : {nd_array}
-	    the volume 1D amplitude histogram
+    h : {nd_array}
+        the volume 1D amplitude histogram
 
-	ih : {nd_array}
-	    the volume 1D amplitude integrated histogram
+    ih : {nd_array}
+        the volume 1D amplitude integrated histogram
 
-	index : {int}
-	    the amplitude (histogram index) where the integrated histogram
-	    crosses the 'threshold' provided.
+    index : {int}
+        the amplitude (histogram index) where the integrated histogram
+        crosses the 'threshold' provided.
 
-	Examples
-	--------
+    Examples
+    --------
 
-	>>> import numpy as NP
-	>>> import _registration as reg
-	>>> anat_desc = reg.load_anatMRI_desc()
-	>>> image_anat, h, ih, index = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img', debug=1)
-	>>> index
-	210
+    >>> import numpy as NP
+    >>> import _registration as reg
+    >>> anat_desc = reg.load_anatMRI_desc()
+    >>> image_anat, h, ih, index = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img', debug=1)
+    >>> index
+    210
 
 
-	"""
+    """
 
-	# load MRI or fMRI volume and return an autoscaled 8 bit image.
-	# autoscale is using integrated histogram to deal with outlier high amplitude voxels
-	if imagename == None:
-	    # imagename of none means to create a blank image
-    	    ImageVolume = NP.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'],
-			    dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols'])
-	else:
-    	    ImageVolume = NP.fromfile(imagename,
-			    dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']);
+    # load MRI or fMRI volume and return an autoscaled 8 bit image.
+    # autoscale is using integrated histogram to deal with outlier high amplitude voxels
+    if imagename == None:
+        # imagename of none means to create a blank image
+        ImageVolume = NP.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'],
+                        dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols'])
+    else:
+        ImageVolume = NP.fromfile(imagename,
+                        dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']);
 
-	# the mat (voxel to physical) matrix
-	M = NP.eye(4, dtype=NP.float64);
-	# for now just the sample size (mm units) in x, y and z
-	M[0][0] = imagedesc['sample_x']
-	M[1][1] = imagedesc['sample_y']
-	M[2][2] = imagedesc['sample_z']
-	# dimensions 
-	D = NP.zeros(3, dtype=NP.int32);
-	# Gaussian kernel - fill in with build_fwhm() 
-	F = NP.zeros(3, dtype=NP.float64);
-	D[0] = imagedesc['rows']
-	D[1] = imagedesc['cols']
-	D[2] = imagedesc['layers']
+    # the mat (voxel to physical) matrix
+    M = NP.eye(4, dtype=NP.float64);
+    # for now just the sample size (mm units) in x, y and z
+    M[0][0] = imagedesc['sample_x']
+    M[1][1] = imagedesc['sample_y']
+    M[2][2] = imagedesc['sample_z']
+    # dimensions 
+    D = NP.zeros(3, dtype=NP.int32);
+    # Gaussian kernel - fill in with build_fwhm() 
+    F = NP.zeros(3, dtype=NP.float64);
+    D[0] = imagedesc['rows']
+    D[1] = imagedesc['cols']
+    D[2] = imagedesc['layers']
 
-	if imagename == None:
-	    # no voxels to scale to 8 bits
-    	    ImageVolume = ImageVolume.astype(NP.uint8)
-	    image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F}
-    	    return image
+    if imagename == None:
+        # no voxels to scale to 8 bits
+        ImageVolume = ImageVolume.astype(NP.uint8)
+        image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F}
+        return image
 
-	# 8 bit scale with threshold clip of the volume integrated histogram
-	max = ImageVolume.max()
-	min = ImageVolume.min()
-	ih  = NP.zeros(max-min+1, dtype=NP.float64);
-	h   = NP.zeros(max-min+1, dtype=NP.float64);
-	if threshold <= 0:
-	    threshold = 0.999
-	elif threshold > 1.0:
-	    threshold = 1.0
-	# get the integrated histogram of the volume and get max from 
-	# the threshold crossing in the integrated histogram 
-	index  = R.register_image_threshold(ImageVolume, h, ih, threshold)
-	scale  = 255.0 / (index-min)
-	# generate the scaled 8 bit image
-	images = (scale*(ImageVolume.astype(NP.float)-min))
-	images[images>255] = 255 
-	image = {'data' : images.astype(NP.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F}
-	if debug == 1:
-    	    return image, h, ih, index
-        else:
-    	    return image
+    # 8 bit scale with threshold clip of the volume integrated histogram
+    max = ImageVolume.max()
+    min = ImageVolume.min()
+    ih  = NP.zeros(max-min+1, dtype=NP.float64);
+    h   = NP.zeros(max-min+1, dtype=NP.float64);
+    if threshold <= 0:
+        threshold = 0.999
+    elif threshold > 1.0:
+        threshold = 1.0
+    # get the integrated histogram of the volume and get max from 
+    # the threshold crossing in the integrated histogram 
+    index  = R.register_image_threshold(ImageVolume, h, ih, threshold)
+    scale  = 255.0 / (index-min)
+    # generate the scaled 8 bit image
+    images = (scale*(ImageVolume.astype(NP.float)-min))
+    images[images>255] = 255 
+    image = {'data' : images.astype(NP.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F}
+    if debug == 1:
+        return image, h, ih, index
+    else:
+        return image
 
 
 
@@ -983,274 +983,274 @@
 #
 
 def load_anatMRI_desc():
-	# this is for demo on the test MRI and fMRI volumes
-	rows   = 256
-	cols   = 256
-	layers = 90
-	xsamp  = 0.9375
-	ysamp  = 0.9375
-	zsamp  = 1.5
-	desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
-		'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
-	return desc
+    # this is for demo on the test MRI and fMRI volumes
+    rows   = 256
+    cols   = 256
+    layers = 90
+    xsamp  = 0.9375
+    ysamp  = 0.9375
+    zsamp  = 1.5
+    desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
+            'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
+    return desc
 
 def load_fMRI_desc():
-	# this is for demo on the test MRI and fMRI volumes
-	rows   = 64
-	cols   = 64
-	layers = 28
-	xsamp  = 3.75
-	ysamp  = 3.75
-	zsamp  = 5.0
-	desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
-		'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
-	return desc
+    # this is for demo on the test MRI and fMRI volumes
+    rows   = 64
+    cols   = 64
+    layers = 28
+    xsamp  = 3.75
+    ysamp  = 3.75
+    zsamp  = 5.0
+    desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
+            'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
+    return desc
 
 def read_fMRI_directory(path):
-	files_fMRI = glob.glob(path)
-	return files_fMRI
+    files_fMRI = glob.glob(path)
+    return files_fMRI
 
 
 def check_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, 
-		    alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0):
-	            
-	#
-	# to test the cost function and view the joint histogram
-	# for 2 images. used for debug
-	#
-	imdata['parms'][0] = alpha
-	imdata['parms'][1] = beta
-	imdata['parms'][2] = gamma
-	imdata['parms'][3] = Tx
-	imdata['parms'][4] = Ty
-	imdata['parms'][5] = Tz
-	M = build_rotate_matrix(imdata['parms'])
-	optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
+                    alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0):
+                    
+    #
+    # to test the cost function and view the joint histogram
+    # for 2 images. used for debug
+    #
+    imdata['parms'][0] = alpha
+    imdata['parms'][1] = beta
+    imdata['parms'][2] = gamma
+    imdata['parms'][3] = Tx
+    imdata['parms'][4] = Ty
+    imdata['parms'][5] = Tz
+    M = build_rotate_matrix(imdata['parms'])
+    optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
 
-	if ret_histo:
-	    cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args)
-	    return cost, joint_histogram 
-    	else:
-	    cost = optimize_function(imdata['parms'], optfunc_args)
-	    return cost
+    if ret_histo:
+        cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args)
+        return cost, joint_histogram 
+    else:
+        cost = optimize_function(imdata['parms'], optfunc_args)
+        return cost
 
 def build_scale_image(image, scale):
-	#
-	# rescale the 'mat' (voxel to physical mapping matrix) 
-	#
-	(layers, rows, cols) = image['data'].shape
-	M = image['mat'] * scale
-	# dimensions 
-	D = NP.zeros(3, dtype=NP.int32);
-	# Gaussian kernel - fill in with build_fwhm() 
-	F = NP.zeros(3, dtype=NP.float64);
-	Z = NP.zeros(3, dtype=NP.float64);
-	D[0] = rows/scale
-	D[1] = cols/scale
-	D[2] = layers/scale
-    	image2 = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1]);
-	mode = 1;
-	R.register_volume_resample(image['data'], image2, Z, scale, mode)
-	scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F}
-	return scaled_image
+    #
+    # rescale the 'mat' (voxel to physical mapping matrix) 
+    #
+    (layers, rows, cols) = image['data'].shape
+    M = image['mat'] * scale
+    # dimensions 
+    D = NP.zeros(3, dtype=NP.int32);
+    # Gaussian kernel - fill in with build_fwhm() 
+    F = NP.zeros(3, dtype=NP.float64);
+    Z = NP.zeros(3, dtype=NP.float64);
+    D[0] = rows/scale
+    D[1] = cols/scale
+    D[2] = layers/scale
+    image2 = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1]);
+    mode = 1;
+    R.register_volume_resample(image['data'], image2, Z, scale, mode)
+    scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F}
+    return scaled_image
 
 
 def demo_MRI_volume_align(scale=2, alpha=3.0, beta=4.0, gamma=5.0, Tx = 0.0, Ty = 0.0, Tz = 0.0):
-	"""
-	demo with (must have file ANAT1_V0001.img)
+    """
+    demo with (must have file ANAT1_V0001.img)
 
-	image1, image2, imdata = reg.demo_MRI_volume_align()
-	x = reg.python_coreg(image1, image2, imdata, method='ncc', lite=1) 
-	image2r = reg.remap_image(image2, x, resample='cubic')
-	image2rz = reg.resize_image(image2r, image1['mat'])
+    image1, image2, imdata = reg.demo_MRI_volume_align()
+    x = reg.python_coreg(image1, image2, imdata, method='ncc', lite=1) 
+    image2r = reg.remap_image(image2, x, resample='cubic')
+    image2rz = reg.resize_image(image2r, image1['mat'])
 
 
-	slice1 = image1['data'][45, :, :]
-	slice2 = image2['data'][45/2, :, :]
-	slice2r = image2r['data'][45/2, :, :]
-	slice2rz = image2rz['data'][45, :, :]
+    slice1 = image1['data'][45, :, :]
+    slice2 = image2['data'][45/2, :, :]
+    slice2r = image2r['data'][45/2, :, :]
+    slice2rz = image2rz['data'][45, :, :]
 
-	pylab.figure(1)
-	pylab.bone()
-	pylab.imshow(slice1)
-	pylab.imshow(slice1)
-	pylab.figure(2)
-	pylab.imshow(slice2)
-	pylab.figure(3)
-	pylab.imshow(slice2r)
-	pylab.figure(4)
-	pylab.imshow(slice2rz)
-	pylab.show()
+    pylab.figure(1)
+    pylab.bone()
+    pylab.imshow(slice1)
+    pylab.imshow(slice1)
+    pylab.figure(2)
+    pylab.imshow(slice2)
+    pylab.figure(3)
+    pylab.imshow(slice2r)
+    pylab.figure(4)
+    pylab.imshow(slice2rz)
+    pylab.show()
 
-	"""
-	#
-	# this is for coreg MRI / fMRI scale test. The volume is anatomical MRI.
-	# the image is rotated in 3D. after rotation the image is scaled.  
-	#
+    """
+    #
+    # this is for coreg MRI / fMRI scale test. The volume is anatomical MRI.
+    # the image is rotated in 3D. after rotation the image is scaled.  
+    #
 
-	anat_desc = load_anatMRI_desc()
-	image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img')
-	image2 = load_volume(anat_desc, imagename=None)
-	imdata = build_structs()
-	image1['fwhm'] = build_fwhm(image1['mat'], imdata['step'])
-	image2['fwhm'] = build_fwhm(image2['mat'], imdata['step'])
-	imdata['parms'][0] = alpha
-	imdata['parms'][1] = beta
-	imdata['parms'][2] = gamma
-	imdata['parms'][3] = Tx
-	imdata['parms'][4] = Ty
-	imdata['parms'][5] = Tz
-	M = build_rotate_matrix(imdata['parms'])
-	# rotate volume. linear interpolation means the volume is low pass filtered
-	R.register_linear_resample(image1['data'], image2['data'], M, imdata['step'])
-	# subsample volume
-	image3 = build_scale_image(image2, scale)
-	return image1, image3, imdata
+    anat_desc = load_anatMRI_desc()
+    image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img')
+    image2 = load_volume(anat_desc, imagename=None)
+    imdata = build_structs()
+    image1['fwhm'] = build_fwhm(image1['mat'], imdata['step'])
+    image2['fwhm'] = build_fwhm(image2['mat'], imdata['step'])
+    imdata['parms'][0] = alpha
+    imdata['parms'][1] = beta
+    imdata['parms'][2] = gamma
+    imdata['parms'][3] = Tx
+    imdata['parms'][4] = Ty
+    imdata['parms'][5] = Tz
+    M = build_rotate_matrix(imdata['parms'])
+    # rotate volume. linear interpolation means the volume is low pass filtered
+    R.register_linear_resample(image1['data'], image2['data'], M, imdata['step'])
+    # subsample volume
+    image3 = build_scale_image(image2, scale)
+    return image1, image3, imdata
 
 def demo_rotate_fMRI_volume(fMRIVol, x): 
-	#
-	# return rotated fMRIVol. the fMRIVol is already loaded, and gets rotated
-	#
+    #
+    # return rotated fMRIVol. the fMRIVol is already loaded, and gets rotated
+    #
 
-	desc = load_fMRI_desc()
-	image = load_volume(desc, imagename=None)
-	imdata = build_structs()
-	image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
-	imdata['parms'][0] = x[0]  # alpha
-	imdata['parms'][1] = x[1]  # beta
-	imdata['parms'][2] = x[2]  # gamma
-	imdata['parms'][3] = x[3]  # Tx
-	imdata['parms'][4] = x[4]  # Ty
-	imdata['parms'][5] = x[5]  # Tz
-	M = build_rotate_matrix(imdata['parms'])
-	# rotate volume. cubic spline interpolation means the volume is NOT low pass filtered
-	R.register_cubic_resample(fMRIVol['data'], image['data'], M, imdata['step'])
-	return image
+    desc = load_fMRI_desc()
+    image = load_volume(desc, imagename=None)
+    imdata = build_structs()
+    image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
+    imdata['parms'][0] = x[0]  # alpha
+    imdata['parms'][1] = x[1]  # beta
+    imdata['parms'][2] = x[2]  # gamma
+    imdata['parms'][3] = x[3]  # Tx
+    imdata['parms'][4] = x[4]  # Ty
+    imdata['parms'][5] = x[5]  # Tz
+    M = build_rotate_matrix(imdata['parms'])
+    # rotate volume. cubic spline interpolation means the volume is NOT low pass filtered
+    R.register_cubic_resample(fMRIVol['data'], image['data'], M, imdata['step'])
+    return image
 
 def demo_MRI_coregistration(optimizer_method='powell', histo_method=1, smooth_histo=0, smooth_image=0, ftype=1):
-	"""
-	demo with (must have file ANAT1_V0001.img and fMRI directory fMRIData)
+    """
+    demo with (must have file ANAT1_V0001.img and fMRI directory fMRIData)
 
-	measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
+    measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
 
-	show results with
+    show results with
 
-	In [59]: measures[25]['cost']
-	Out[59]: -0.48607185
+    In [59]: measures[25]['cost']
+    Out[59]: -0.48607185
 
-	In [60]: measures[25]['align_cost']
-	Out[60]: -0.99514639
+    In [60]: measures[25]['align_cost']
+    Out[60]: -0.99514639
 
-	In [61]: measures[25]['align_rotate']
-	Out[61]:
-	array([ 1.94480181,  5.64703989,  5.35002136, -5.00544405, -2.2712214, -1.42249691], dtype=float32)
+    In [61]: measures[25]['align_rotate']
+    Out[61]:
+    array([ 1.94480181,  5.64703989,  5.35002136, -5.00544405, -2.2712214, -1.42249691], dtype=float32)
 
-	In [62]: measures[25]['rotate']
-	Out[62]:
-	array([ 1.36566341,  4.70644331,  4.68198586, -4.32256889, -2.47607017, -2.39173937], dtype=float32)
+    In [62]: measures[25]['rotate']
+    Out[62]:
+    array([ 1.36566341,  4.70644331,  4.68198586, -4.32256889, -2.47607017, -2.39173937], dtype=float32)
 
 
-	"""
+    """
 
-	# demo of alignment of fMRI series with anatomical MRI
-	# in this demo, each fMRI volume is first perturbed (rotated, translated) 
-	# by a random value. The initial registration is measured, then the optimal
-	# alignment is computed and the registration measure made following the volume remap.
-	# The fMRI registration is done with the first fMRI volume using normalized cross-correlation.
-	# Each fMRI volume is rotated to the fMRI-0 volume and the series is ensemble averaged.
-	# The ensemble averaged is then registered with the anatomical MRI volume using normalized mutual information.
-	# The fMRI series is then rotated with this parameter. The alignments are done with 3D cubic splines.
+    # demo of alignment of fMRI series with anatomical MRI
+    # in this demo, each fMRI volume is first perturbed (rotated, translated) 
+    # by a random value. The initial registration is measured, then the optimal
+    # alignment is computed and the registration measure made following the volume remap.
+    # The fMRI registration is done with the first fMRI volume using normalized cross-correlation.
+    # Each fMRI volume is rotated to the fMRI-0 volume and the series is ensemble averaged.
+    # The ensemble averaged is then registered with the anatomical MRI volume using normalized mutual information.
+    # The fMRI series is then rotated with this parameter. The alignments are done with 3D cubic splines.
 
-	# read the anatomical MRI volume
-	anat_desc = load_anatMRI_desc()
-	imageF_anat = load_volume(anat_desc, imagename='ANAT1_V0001.img')
-	# the sampling structure
-	imdata = build_structs()
-	# the volume filter
-	imageF_anat['fwhm'] = build_fwhm(imageF_anat['mat'], imdata['step'])
+    # read the anatomical MRI volume
+    anat_desc = load_anatMRI_desc()
+    imageF_anat = load_volume(anat_desc, imagename='ANAT1_V0001.img')
+    # the sampling structure
+    imdata = build_structs()
+    # the volume filter
+    imageF_anat['fwhm'] = build_fwhm(imageF_anat['mat'], imdata['step'])
 
-	# read in the file list of the fMRI data
-	metric_test = NP.dtype([('cost', 'f'),
-                    	       ('align_cost', 'f'),
-                    	       ('rotate', 'f', 6),
-                    	       ('align_rotate', 'f', 6)])
+    # read in the file list of the fMRI data
+    metric_test = NP.dtype([('cost', 'f'),
+                           ('align_cost', 'f'),
+                           ('rotate', 'f', 6),
+                           ('align_rotate', 'f', 6)])
 
-	fMRIdata = read_fMRI_directory('fMRIData\*.img')
-	fmri_desc = load_fMRI_desc()
-	fmri_series = {}
-	ave_fMRI_volume = NP.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'],
-			  dtype=NP.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols'])
-	count = 0
-	number_volumes = len(fMRIdata)
-	measures = NP.zeros(number_volumes, dtype=metric_test)
-	# load and perturb (rotation, translation) the fMRI volumes
-	for i in fMRIdata:
-	    image = load_volume(fmri_desc, i)
-	    # random perturbation of angle, translation for each volume beyond the first
-	    if count == 0:
-		image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
-	        fmri_series[count] = image
-	        count = count + 1
-	    else:
-	        x = NP.random.random(6) - 0.5
-	        x = 10.0 * x
-	        fmri_series[count] = demo_rotate_fMRI_volume(image, x)
-		measures[count]['rotate'][0:6] = x[0:6]
-	        count = count + 1
+    fMRIdata = read_fMRI_directory('fMRIData\*.img')
+    fmri_desc = load_fMRI_desc()
+    fmri_series = {}
+    ave_fMRI_volume = NP.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'],
+                      dtype=NP.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols'])
+    count = 0
+    number_volumes = len(fMRIdata)
+    measures = NP.zeros(number_volumes, dtype=metric_test)
+    # load and perturb (rotation, translation) the fMRI volumes
+    for i in fMRIdata:
+        image = load_volume(fmri_desc, i)
+        # random perturbation of angle, translation for each volume beyond the first
+        if count == 0:
+            image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
+            fmri_series[count] = image
+            count = count + 1
+        else:
+            x = NP.random.random(6) - 0.5
+            x = 10.0 * x
+            fmri_series[count] = demo_rotate_fMRI_volume(image, x)
+            measures[count]['rotate'][0:6] = x[0:6]
+            count = count + 1
 
 
-	# load and register the fMRI volumes with volume_0 using normalized cross correlation metric
-	imageF = fmri_series[0]
-	if smooth_image:
-	    image_F_xyz = filter_image_3D(imageF['data'], imageF['fwhm'], ftype)
-	    imageF['data'] = image_F_xyz
-	for i in range(1, number_volumes):
-	    imageG = fmri_series[i]
-	    # the measure prior to alignment 
-	    measures[i]['cost'] = check_alignment(imageF, imageG, imdata, method='ncc',
-			                          lite=histo_method, smhist=smooth_histo)
-	    x = python_coreg(imageF, imageG, imdata, lite=histo_method, method='ncc',
-			     opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
-	    measures[i]['align_rotate'][0:6] = x[0:6]
-	    measures[i]['align_cost'] = check_alignment(imageF, imageG, imdata, method='ncc', 
-		                             lite=histo_method, smhist=smooth_histo,
-					     alpha=x[0], beta=x[1], gamma=x[2], Tx=x[3], Ty=x[4], Tz=x[5])
+    # load and register the fMRI volumes with volume_0 using normalized cross correlation metric
+    imageF = fmri_series[0]
+    if smooth_image:
+        image_F_xyz = filter_image_3D(imageF['data'], imageF['fwhm'], ftype)
+        imageF['data'] = image_F_xyz
+    for i in range(1, number_volumes):
+        imageG = fmri_series[i]
+        # the measure prior to alignment 
+        measures[i]['cost'] = check_alignment(imageF, imageG, imdata, method='ncc',
+                                              lite=histo_method, smhist=smooth_histo)
+        x = python_coreg(imageF, imageG, imdata, lite=histo_method, method='ncc',
+                         opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
+        measures[i]['align_rotate'][0:6] = x[0:6]
+        measures[i]['align_cost'] = check_alignment(imageF, imageG, imdata, method='ncc', 
+                                         lite=histo_method, smhist=smooth_histo,
+                                         alpha=x[0], beta=x[1], gamma=x[2], Tx=x[3], Ty=x[4], Tz=x[5])
 
 
-	# align the volumes and average them for co-registration with the anatomical MRI 
-	ave_fMRI_volume = fmri_series[0]['data'].astype(NP.float64)
-	for i in range(1, number_volumes):
-	    image = fmri_series[i]
-	    x[0:6] = measures[i]['align_rotate'][0:6]
-	    # overwrite the fMRI volume with the aligned volume
-	    fmri_series[i] = remap_image(image, x, resample='cubic')
-	    ave_fMRI_volume = ave_fMRI_volume + fmri_series[i]['data'].astype(NP.float64)
+    # align the volumes and average them for co-registration with the anatomical MRI 
+    ave_fMRI_volume = fmri_series[0]['data'].astype(NP.float64)
+    for i in range(1, number_volumes):
+        image = fmri_series[i]
+        x[0:6] = measures[i]['align_rotate'][0:6]
+        # overwrite the fMRI volume with the aligned volume
+        fmri_series[i] = remap_image(image, x, resample='cubic')
+        ave_fMRI_volume = ave_fMRI_volume + fmri_series[i]['data'].astype(NP.float64)
 
-	ave_fMRI_volume = (ave_fMRI_volume / float(number_volumes)).astype(NP.uint8)
-	ave_fMRI_volume = {'data' : ave_fMRI_volume, 'mat' : imageF['mat'], 
-			   'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']}
-	# register (using normalized mutual information) with the anatomical MRI
-	if smooth_image:
-	    image_F_anat_xyz = filter_image_3D(imageF_anat['data'], imageF_anat['fwhm'], ftype)
-	    imageF_anat['data'] = image_F_anat_xyz
-	x = python_coreg(imageF_anat, ave_fMRI_volume, imdata, lite=histo_method,
-			 method='nmi', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
-	print 'functional-anatomical align parameters '
-	print x
-	for i in range(number_volumes):
-	    image = fmri_series[i]
-	    # overwrite the fMRI volume with the anatomical-aligned volume
-	    fmri_series[i] = remap_image(image, x, resample='cubic')
+    ave_fMRI_volume = (ave_fMRI_volume / float(number_volumes)).astype(NP.uint8)
+    ave_fMRI_volume = {'data' : ave_fMRI_volume, 'mat' : imageF['mat'], 
+                       'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']}
+    # register (using normalized mutual information) with the anatomical MRI
+    if smooth_image:
+        image_F_anat_xyz = filter_image_3D(imageF_anat['data'], imageF_anat['fwhm'], ftype)
+        imageF_anat['data'] = image_F_anat_xyz
+    x = python_coreg(imageF_anat, ave_fMRI_volume, imdata, lite=histo_method,
+                     method='nmi', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
+    print 'functional-anatomical align parameters '
+    print x
+    for i in range(number_volumes):
+        image = fmri_series[i]
+        # overwrite the fMRI volume with the anatomical-aligned volume
+        fmri_series[i] = remap_image(image, x, resample='cubic')
 
-	return measures, imageF_anat, fmri_series
+    return measures, imageF_anat, fmri_series
 
 
 def demo_fMRI_resample(imageF_anat, fmri_series):
-	resampled_fmri_series = {}
-	number_volumes = len(fmri_series)
-	for i in range(number_volumes):
-	    resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat['mat'])
+    resampled_fmri_series = {}
+    number_volumes = len(fmri_series)
+    for i in range(number_volumes):
+        resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat['mat'])
 
-	return resampled_fmri_series
+    return resampled_fmri_series
 
 

Modified: trunk/scipy/ndimage/_segmenter.py
===================================================================
--- trunk/scipy/ndimage/_segmenter.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/ndimage/_segmenter.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -57,10 +57,10 @@
     return edge_image
 
 def canny_nonmax_supress(horz_DGFilter, vert_DGFilter, img_means, thres=0.5, 
-		         mode=1, canny_l=0.5, canny_h=0.8):
+                         mode=1, canny_l=0.5, canny_h=0.8):
     """
     magnitude, canny_stats = canny_nonmax_supress(horz_DGFilter, vert_DGFilter, img_means,
-		                          thres=0.5, mode=1, canny_l=0.5, canny_h=0.8)
+                                          thres=0.5, mode=1, canny_l=0.5, canny_h=0.8)
 
     non-max supression stage of Canny filter
 
@@ -69,11 +69,11 @@
 
     horz_DGFilter : {nd_array}
         the horizonal filtered image using the derivative of Gaussian kernel filter.
-	this is the output of the canny_filter method
+        this is the output of the canny_filter method
 
     vert_DGFilter : {nd_array}
         the vertical filtered image using the derivative of Gaussian kernel filter.
-	this is the output of the canny_filter method
+        this is the output of the canny_filter method
 
     img_means : {dictionary}
         mean X and Y values of edge signals determined from canny_filter
@@ -103,8 +103,8 @@
     [rows, cols] = horz_DGFilter.shape
     magnitude = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
     aveMag, canny_low, canny_high = S.canny_nonmax_supress(horz_DGFilter, vert_DGFilter,
-		                        magnitude, mode, img_means['x-dg']*thres,
-				        img_means['y-dg']*thres, canny_l, canny_h)
+                                        magnitude, mode, img_means['x-dg']*thres,
+                                        img_means['y-dg']*thres, canny_l, canny_h)
 
     canny_stats = {'mean' : aveMag, 'low' : canny_low, 'high' : canny_high} 
 
@@ -144,7 +144,7 @@
     horz_DGFilter = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
     vert_DGFilter = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
     aveX, aveY = S.canny_filter(slice, horz_DGFilter, vert_DGFilter,
-		   dg_kernel['coefficients'], dg_kernel['kernelSize'])
+                   dg_kernel['coefficients'], dg_kernel['kernelSize'])
 
     img_means = {'x-dg' : aveX, 'y-dg' : aveY} 
 
@@ -181,30 +181,30 @@
     number_regions    = ROI.size
     indices           = range(0, number_regions)
     for i in indices:
-	left   = ROI[i]['Left']-2
-	right  = ROI[i]['Right']+2
-	bottom = ROI[i]['Bottom']-2
-	top    = ROI[i]['Top']+2
-	Label  = ROI[i]['Label']
-	if left < 0: 
-	    left = 0
-	if bottom < 0: 
-	    bottom = 0
-	if right > cols-1: 
-	    right = cols-1
-	if top > rows-1: 
-	    top = rows-1
+        left   = ROI[i]['Left']-2
+        right  = ROI[i]['Right']+2
+        bottom = ROI[i]['Bottom']-2
+        top    = ROI[i]['Top']+2
+        Label  = ROI[i]['Label']
+        if left < 0: 
+            left = 0
+        if bottom < 0: 
+            bottom = 0
+        if right > cols-1: 
+            right = cols-1
+        if top > rows-1: 
+            top = rows-1
 
-	roi_rows = top-bottom
-	roi_cols = right-left
+        roi_rows = top-bottom
+        roi_cols = right-left
         label_region  = NP.zeros(roi_rows*roi_cols, dtype=NP.uint16).reshape(roi_rows, roi_cols)
         input = NP.zeros(roi_rows*roi_cols, dtype=NP.uint16).reshape(roi_rows, roi_cols)
-	# load the labeled region 
-	label_region[0:roi_rows, 0:roi_cols][label_image[bottom:top, left:right]==Label] = 1 
-	S.binary_edge(label_region, input)
-	input[0:roi_rows,0:roi_cols][input[0:roi_rows,0:roi_cols]==1] = Label
-	binary_edge_image[bottom:top,left:right] = binary_edge_image[bottom:top,left:right] + \
-	                                                       input[0:roi_rows,0:roi_cols] 
+        # load the labeled region 
+        label_region[0:roi_rows, 0:roi_cols][label_image[bottom:top, left:right]==Label] = 1 
+        S.binary_edge(label_region, input)
+        input[0:roi_rows,0:roi_cols][input[0:roi_rows,0:roi_cols]==1] = Label
+        binary_edge_image[bottom:top,left:right] = binary_edge_image[bottom:top,left:right] + \
+                                                               input[0:roi_rows,0:roi_cols] 
 
     return binary_edge_image
 
@@ -239,7 +239,7 @@
 
     ROI : {dictionary}
         Region of Interest structure that has blob bounding boxes. The largest
-	2D target bounding box is extracted.
+        2D target bounding box is extracted.
 
 
     Returns 
@@ -247,7 +247,7 @@
 
     co_occurence_images : {dictionary}
         contains 4 joint histogram images for each ROI 
-	returned if verbose=1
+        returned if verbose=1
 
     """
 
@@ -268,50 +268,50 @@
         Label  = ROI[i]['Label']
         rows   = top-bottom
         cols   = right-left
-	# copy the mask to section image
+        # copy the mask to section image
         section = NP.zeros(rows*cols, dtype=label_image.dtype).reshape(rows, cols)
         section[0:rows, 0:cols][label_image[bottom:top, left:right]==Label] = 1
         source_region = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
         cocm_block = NP.zeros(num_bits*num_bits, dtype=NP.int32).reshape(num_bits, num_bits)
-	source_region[0:rows, 0:cols] = copy_image[bottom:top, left:right] 
+        source_region[0:rows, 0:cols] = copy_image[bottom:top, left:right] 
         # scale segment to 8 bits. this needs to be smarter (e.g. use integrated histogram method)
         max_value = source_region.max()
         min_value = source_region.min()
         scale = 255.0 / (max_value-min_value)
         image_roi = (scale*(source_region-min_value)).astype(NP.int16)
-	# image_roi is short type
-	S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
+        # image_roi is short type
+        S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
         co_occurence_image_list[i] = cocm_block
-	# normalize the joint histogram prior to feature extraction
-	joint_histogram  = cocm_block.astype(NP.float64) 
-	joint_histogram  = joint_histogram / joint_histogram.sum()
-	# to prevent log(0)
-	joint_histogram += epsilon
-	# compute the com features
-	energy = joint_histogram.std()
-	H = joint_histogram * NP.log(joint_histogram)
-	entropy = H.sum()
-	r, c = joint_histogram.shape
-	[a, b] = NP.mgrid[1:c+1, 1:r+1]
-	contrast = ((NP.square(a-b))*joint_histogram).sum()
-	d = 1.0 + NP.abs(a-b)
-	homogeneity = (joint_histogram / d).sum()
-	ROI[i]['COM'][0] = distance
-	ROI[i]['COM'][1] = orientation 
-	ROI[i]['COM'][2] = energy
-	ROI[i]['COM'][3] = entropy
-	ROI[i]['COM'][4] = contrast
-	ROI[i]['COM'][5] = homogeneity
+        # normalize the joint histogram prior to feature extraction
+        joint_histogram  = cocm_block.astype(NP.float64) 
+        joint_histogram  = joint_histogram / joint_histogram.sum()
+        # to prevent log(0)
+        joint_histogram += epsilon
+        # compute the com features
+        energy = joint_histogram.std()
+        H = joint_histogram * NP.log(joint_histogram)
+        entropy = H.sum()
+        r, c = joint_histogram.shape
+        [a, b] = NP.mgrid[1:c+1, 1:r+1]
+        contrast = ((NP.square(a-b))*joint_histogram).sum()
+        d = 1.0 + NP.abs(a-b)
+        homogeneity = (joint_histogram / d).sum()
+        ROI[i]['COM'][0] = distance
+        ROI[i]['COM'][1] = orientation 
+        ROI[i]['COM'][2] = energy
+        ROI[i]['COM'][3] = entropy
+        ROI[i]['COM'][4] = contrast
+        ROI[i]['COM'][5] = homogeneity
 
     if verbose == 1:
         return co_occurence_image_list
     else:
-	return
+        return
 
 
 
 def region_grow(label_image, raw_image, ROI, roi_index, roi_inflate,
-		low_thresh=0.5, high_thresh=1.5, N_connectivity=3, debug=0):
+                low_thresh=0.5, high_thresh=1.5, N_connectivity=3, debug=0):
     """
     region_grow(label_image, raw_image, ROI, roi_index, roi_inflate, stop_thresh)
 
@@ -329,33 +329,33 @@
 
     ROI : {dictionary}
         Region of Interest structure that has blob bounding boxes. The largest
-	2D target bounding box is extracted.
+        2D target bounding box is extracted.
 
     roi_index : {int}
         the single ROI element to apply region growing to.
 
     roi_inflate : {list}
         the maximum increase in the ROI bounding box. For 3D the tuple is [layers, rows, cols]
-	and for 2D it is [rows, cols].
+        and for 2D it is [rows, cols].
 
     low_thresh : {float}
         this is the percent of the voxel mean that the growing region must be GREATER than.
-	region growing terminates when the raw_image is BELOW this value.
+        region growing terminates when the raw_image is BELOW this value.
 
     high_thresh : {float}
         this is the percent of the voxel mean that the growing region must be LESS than.
-	region growing terminates when the raw_image is ABOVE this value.
+        region growing terminates when the raw_image is ABOVE this value.
 
     N_connectivity : {int}
         for growing this indicates how connected in a 3x3 or 3x3x3 window the un-labeled
-	sample is. Make less than full connected for growing boundaries
+        sample is. Make less than full connected for growing boundaries
 
     Returns 
     ----------
 
     label : {nd_array}
         the label image with the selected ROI after region growing. only returned
-	in debug mode.
+        in debug mode.
 
     """
 
@@ -394,25 +394,25 @@
         Label   = ROI[roi_index]['Label']
         lcutoff = low_thresh  * ROI[roi_index]['voxelMean']
         hcutoff = high_thresh * ROI[roi_index]['voxelMean']
-   	if left < 0: 
-           left = 0
-    	if bottom < 0: 
+        if left < 0: 
+            left = 0
+        if bottom < 0: 
             bottom = 0
-    	if right > cols-1: 
+        if right > cols-1: 
             right = cols-1
-    	if top > rows-1: 
+        if top > rows-1: 
             top = rows-1
         expanded_ROI['Left']   = left 
         expanded_ROI['Right']  = right 
         expanded_ROI['Top']    = top 
         expanded_ROI['Bottom'] = bottom 
         expanded_ROI['Label']  = Label 
-	rows    = top-bottom
-	cols    = right-left
+        rows    = top-bottom
+        cols    = right-left
         label   = NP.zeros(rows*cols, dtype=NP.int16).reshape(rows, cols)
         section = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
-	label   = label_image[bottom:top, left:right].copy()
-	section = (raw_image[bottom:top, left:right].astype(NP.float64)).copy()
+        label   = label_image[bottom:top, left:right].copy()
+        section = (raw_image[bottom:top, left:right].astype(NP.float64)).copy()
     elif dimensions == 3:  
         left    = ROI[roi_index]['Left']-x_ext
         right   = ROI[roi_index]['Right']+x_ext
@@ -423,17 +423,17 @@
         Label   = ROI[roi_index]['Label']
         lcutoff = low_thresh  * ROI[roi_index]['voxelMean']
         hcutoff = high_thresh * ROI[roi_index]['voxelMean']
-    	if left < 0: 
+        if left < 0: 
             left = 0
-    	if bottom < 0: 
+        if bottom < 0: 
             bottom = 0
-    	if right > cols-1: 
+        if right > cols-1: 
             right = cols-1
-    	if top > rows-1: 
+        if top > rows-1: 
             top = rows-1
-    	if front < 0: 
+        if front < 0: 
             front = 0
-    	if back > layers-1: 
+        if back > layers-1: 
             back = layers-1
         expanded_ROI['Left']   = left 
         expanded_ROI['Right']  = right 
@@ -442,13 +442,13 @@
         expanded_ROI['Back']   = back 
         expanded_ROI['Front']  = front 
         expanded_ROI['Label']  = Label 
-	rows    = top-bottom
-	cols    = right-left
-	layers  = back-front
+        rows    = top-bottom
+        cols    = right-left
+        layers  = back-front
         label   = NP.zeros(layers*rows*cols, dtype=NP.int16).reshape(layers, rows, cols)
-	label   = label_image[front:back, bottom:top, left:right].copy()
+        label   = label_image[front:back, bottom:top, left:right].copy()
         section = NP.zeros(layers*rows*cols, dtype=NP.float64).reshape(layers, rows, cols)
-	section = (raw_image[front:back, bottom:top, left:right].astype(NP.float64)).copy()
+        section = (raw_image[front:back, bottom:top, left:right].astype(NP.float64)).copy()
 
     #
     # this newgrow_ROI gets filled in and the label image is grown
@@ -458,27 +458,27 @@
     S.region_grow(section, label, expanded_ROI, newgrow_ROI, lcutoff, hcutoff, Label, N_connectivity)
 
     if debug==1:  
-	#
-	# do not update ROI for index and the label_image 
-	#
+        #
+        # do not update ROI for index and the label_image 
+        #
         return label
 
     else:
-	#
-	# update (overwrite) ROI for index and the label_image 
-	#
+        #
+        # update (overwrite) ROI for index and the label_image 
+        #
         if dimensions == 2:  
-	    ROI[roi_index]['Left']   = newgrow_ROI['Left']
-	    ROI[roi_index]['Right']  = newgrow_ROI['Right']
-	    ROI[roi_index]['Top']    = newgrow_ROI['Top']
-	    ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom']
-	    left   = ROI[roi_index]['Left']
-	    right  = ROI[roi_index]['Right']
-	    top    = ROI[roi_index]['Top']
-	    bottom = ROI[roi_index]['Bottom']
-	    rows   = top-bottom
-	    cols   = right-left
-	    label_image[bottom:top,left:right] = label[0:rows,0:cols]
+            ROI[roi_index]['Left']   = newgrow_ROI['Left']
+            ROI[roi_index]['Right']  = newgrow_ROI['Right']
+            ROI[roi_index]['Top']    = newgrow_ROI['Top']
+            ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom']
+            left   = ROI[roi_index]['Left']
+            right  = ROI[roi_index]['Right']
+            top    = ROI[roi_index]['Top']
+            bottom = ROI[roi_index]['Bottom']
+            rows   = top-bottom
+            cols   = right-left
+            label_image[bottom:top,left:right] = label[0:rows,0:cols]
         elif dimensions == 3:  
             ROI[roi_index]['Left']   = newgrow_ROI['Left']
             ROI[roi_index]['Right']  = newgrow_ROI['Right']
@@ -486,17 +486,17 @@
             ROI[roi_index]['Bottom'] = newgrow_ROI['Bottom']
             ROI[roi_index]['Front']  = newgrow_ROI['Front']
             ROI[roi_index]['Back']   = newgrow_ROI['Back']
-	    left   = expanded_ROI['Left']
-	    right  = expanded_ROI['Right']
-	    top    = expanded_ROI['Top']
-	    bottom = expanded_ROI['Bottom']
-	    front  = expanded_ROI['Front']
-	    back   = expanded_ROI['Back']
-	    rows   = top-bottom
-	    cols   = right-left
-	    layers = back-front
-	    label_image[front:back,bottom:top,left:right] = label[0:layers,0:rows,0:cols]
-			     
+            left   = expanded_ROI['Left']
+            right  = expanded_ROI['Right']
+            top    = expanded_ROI['Top']
+            bottom = expanded_ROI['Bottom']
+            front  = expanded_ROI['Front']
+            back   = expanded_ROI['Back']
+            rows   = top-bottom
+            cols   = right-left
+            layers = back-front
+            label_image[front:back,bottom:top,left:right] = label[0:layers,0:rows,0:cols]
+                             
         return 
 
 
@@ -525,8 +525,8 @@
 
     window : {int}
         integer value of moving 2D window. Window slides in 2D over image and is the
-	region-of-interest from which co-occurence texture features are extracted. The
-	window is 2D square so only a single value is entered. Default window is 32x32. 
+        region-of-interest from which co-occurence texture features are extracted. The
+        window is 2D square so only a single value is entered. Default window is 32x32. 
 
     distance : {int}
         integer value of pixel offset in forming joint histogram. default value 2
@@ -539,8 +539,8 @@
 
     cocm_images : {dictionary}
         
-	co_occurence_feature_images. contains 4 normalized feature
-	windows with keys: energy, entropy, contrast and homogeneity.
+        co_occurence_feature_images. contains 4 normalized feature
+        windows with keys: energy, entropy, contrast and homogeneity.
 
     """
 
@@ -571,34 +571,34 @@
         for j in col_indices:
             left  = j - window
             right = j + window 
-	    source_region[0:2*window, 0:2*window] = copy_image[bottom:top, left:right] 
+            source_region[0:2*window, 0:2*window] = copy_image[bottom:top, left:right] 
             # scale segment to 8 bits. this needs to be smarter (e.g. use integrated histogram method)
             max_value = source_region.max()
             min_value = source_region.min()
             scale     = 255.0 / (max_value-min_value)
             image_roi = (scale*(source_region-min_value)).astype(NP.int16)
-	    # image_roi is short type
-	    cocm_block[:] = 0.0
-	    S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
-	    # normalize the joint histogram prior to feature extraction
-	    joint_histogram = cocm_block.astype(NP.float64) 
-	    joint_histogram = joint_histogram / joint_histogram.sum()
-	    # to prevent log(0)
-	    joint_histogram += epsilon
-	    # compute the com features
-	    energy      = joint_histogram.std()
-	    H           = joint_histogram * NP.log(joint_histogram)
-	    entropy     = H.sum()
-	    r, c        = joint_histogram.shape
-	    [a, b]      = NP.mgrid[1:c+1, 1:r+1]
-	    contrast    = ((NP.square(a-b))*joint_histogram).sum()
-	    d           = 1.0 + NP.abs(a-b)
-	    homogeneity = (joint_histogram / d).sum()
-	    # store the feature pixel for the 4 images
-	    energy_image[i, j]      = energy
-	    entropy_image[i, j]     = entropy
-	    contrast_image[i, j]    = contrast
-	    homogeneity_image[i, j] = homogeneity
+            # image_roi is short type
+            cocm_block[:] = 0.0
+            S.roi_co_occurence(section, image_roi, cocm_block, distance, orientation)
+            # normalize the joint histogram prior to feature extraction
+            joint_histogram = cocm_block.astype(NP.float64) 
+            joint_histogram = joint_histogram / joint_histogram.sum()
+            # to prevent log(0)
+            joint_histogram += epsilon
+            # compute the com features
+            energy      = joint_histogram.std()
+            H           = joint_histogram * NP.log(joint_histogram)
+            entropy     = H.sum()
+            r, c        = joint_histogram.shape
+            [a, b]      = NP.mgrid[1:c+1, 1:r+1]
+            contrast    = ((NP.square(a-b))*joint_histogram).sum()
+            d           = 1.0 + NP.abs(a-b)
+            homogeneity = (joint_histogram / d).sum()
+            # store the feature pixel for the 4 images
+            energy_image[i, j]      = energy
+            entropy_image[i, j]     = entropy
+            contrast_image[i, j]    = contrast
+            homogeneity_image[i, j] = homogeneity
 
     scale_energy      = 1.0 / max(energy_image.max(), abs(energy_image.min()))
     scale_entropy     = 1.0 / max(entropy_image.max(), abs(entropy_image.min()))
@@ -635,7 +635,7 @@
 
     ROI : {dictionary}
         Region of Interest structure that has blob bounding boxes. The largest
-	2D target bounding box is extracted.
+        2D target bounding box is extracted.
 
     Returns 
     ----------
@@ -686,7 +686,7 @@
          [label_image[bottom:top, left:right]==Label] = 1 
     # thin this region
     S.thin_filter(thin_kernel['jmask'], thin_kernel['kmask'], thin_kernel['number3x3Masks'],
-	          roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
+                  roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
 
     # accumulate the images (do not over-write). for overlapping regions
     input[inflate:rgrows+inflate,inflate:rgcols+inflate] \
@@ -724,7 +724,7 @@
     """
     if ROI==None:
         ROIList = NP.zeros(1, dtype=_objstruct)
-	[rows, cols] = label_image.shape
+        [rows, cols] = label_image.shape
         ROIList['Left']   = 2
         ROIList['Right']  = cols-3
         ROIList['Bottom'] = 2
@@ -746,39 +746,39 @@
     indices = range(0, number_regions)
     inflate = 1
     for i in indices:
-	left     = ROI[i]['Left']-1
-	right    = ROI[i]['Right']+1
-	bottom   = ROI[i]['Bottom']-1
-	top      = ROI[i]['Top']+1
-	Label    = ROI[i]['Label']
-	if left < 0: 
-	    left = 0
-	if bottom < 0: 
-	    bottom = 0
-	if right > cols-1: 
-	    right = cols-1
-	if top > rows-1: 
-	    top = rows-1
+        left     = ROI[i]['Left']-1
+        right    = ROI[i]['Right']+1
+        bottom   = ROI[i]['Bottom']-1
+        top      = ROI[i]['Top']+1
+        Label    = ROI[i]['Label']
+        if left < 0: 
+            left = 0
+        if bottom < 0: 
+            bottom = 0
+        if right > cols-1: 
+            right = cols-1
+        if top > rows-1: 
+            top = rows-1
 
-	roi_rows = top-bottom+2*inflate
-	roi_cols = right-left+2*inflate
-	rgrows   = top-bottom
-	rgcols   = right-left
-	# clear the memory
-	input[0:roi_rows, 0:roi_cols] = 0
-	# load the labeled region 
-	input[inflate:inflate+rgrows, inflate:inflate+rgcols] \
-	     [label_image[bottom:top, left:right]==Label] = 1 
-	# thin this region
+        roi_rows = top-bottom+2*inflate
+        roi_cols = right-left+2*inflate
+        rgrows   = top-bottom
+        rgcols   = right-left
+        # clear the memory
+        input[0:roi_rows, 0:roi_cols] = 0
+        # load the labeled region 
+        input[inflate:inflate+rgrows, inflate:inflate+rgcols] \
+             [label_image[bottom:top, left:right]==Label] = 1 
+        # thin this region
         S.thin_filter(thin_kernel['jmask'], thin_kernel['kmask'], thin_kernel['number3x3Masks'],
-		      roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
+                      roi_rows, roi_cols, cols, input, cinput, erosion, dialation, hmt, copy)
 
-	# accumulate the images (do not over-write). for overlapping regions
-	input[inflate:rgrows+inflate,inflate:rgcols+inflate] \
-	     [input[inflate:rgrows+inflate,inflate:rgcols+inflate]==1] = Label 
-	thin_edge_image[bottom:top,left:right] = thin_edge_image[bottom:top,left:right] + \
-	                                         input[inflate:rgrows+inflate,inflate:rgcols+inflate] 
-	    
+        # accumulate the images (do not over-write). for overlapping regions
+        input[inflate:rgrows+inflate,inflate:rgcols+inflate] \
+             [input[inflate:rgrows+inflate,inflate:rgcols+inflate]==1] = Label 
+        thin_edge_image[bottom:top,left:right] = thin_edge_image[bottom:top,left:right] + \
+                                                 input[inflate:rgrows+inflate,inflate:rgcols+inflate] 
+            
 
     # accumulate overlaps set back to binary at later date
     mat_image[:, :] = thin_edge_image[:, :]
@@ -787,7 +787,7 @@
 
 
 def laws_texture_filter(raw_image, label_image, laws_kernel, ROI=None, dc_thres=1.0,
-		        mean_feature=1, verbose=0):
+                        mean_feature=1, verbose=0):
     """
     texture_images = laws_texture_filter(raw_image, label_image, laws_kernel, ROI=None, verbose=1)
     .
@@ -812,13 +812,13 @@
 
     dc_thres : {float}
         used as a filter. Sets texture feature to 0.0 when the 
-	mean level is above this. Removes the low frequency, high amplitude
-	image regions from the feature list
+        mean level is above this. Removes the low frequency, high amplitude
+        image regions from the feature list
 
     mean_feature : {0, 1}, optional
         when set to 1, the feature is the mean value of the
-	selected Law's texture filter. When 0 the feature is
-	the standard deviation.
+        selected Law's texture filter. When 0 the feature is
+        the standard deviation.
 
     verbose : {0, 1}, optional
         determines if return is to include Law's filter images
@@ -828,13 +828,13 @@
 
     laws_image : {dictionary}
         contains 21 Laws filtered  regions for each ROI 
-	returned if verbose=1
+        returned if verbose=1
         
 
     """
     if ROI==None:
         ROI= NP.zeros(1, dtype=_objstruct)
-	[rows, cols] = label_image.shape
+        [rows, cols] = label_image.shape
         ROI['Left']   = 2
         ROI['Right']  = cols-3
         ROI['Bottom'] = 2
@@ -846,46 +846,46 @@
     indices         = range(0, number_regions)
     filters         = range(0, layers)
     for i in indices:
-	left   = ROI[i]['Left']
-	right  = ROI[i]['Right']
-	bottom = ROI[i]['Bottom']
-	top    = ROI[i]['Top']
-	Label  = ROI[i]['Label']
-	rows   = top-bottom
-	cols   = right-left
+        left   = ROI[i]['Left']
+        right  = ROI[i]['Right']
+        bottom = ROI[i]['Bottom']
+        top    = ROI[i]['Top']
+        Label  = ROI[i]['Label']
+        rows   = top-bottom
+        cols   = right-left
         label_region  = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
         source_region = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
         laws_block    = NP.zeros(layers*rows*cols, dtype=NP.float32).reshape(layers, rows, cols)
-	# load the labeled region 
-	label_region[0:rows,  0:cols][label_image[bottom:top, left:right]==Label] = 1 
-	source_region[0:rows, 0:cols] = raw_image[bottom:top, left:right] 
+        # load the labeled region 
+        label_region[0:rows,  0:cols][label_image[bottom:top, left:right]==Label] = 1 
+        source_region[0:rows, 0:cols] = raw_image[bottom:top, left:right] 
 
-	S.laws_texture_metric(label_region, source_region, laws_block, laws_kernel['numKernels'],
-		              laws_kernel['kernelSize'], laws_kernel['filters'],
-		              laws_kernel['coefficients'][0], laws_kernel['coefficients'][1],
-		              laws_kernel['coefficients'][2], laws_kernel['coefficients'][3],
-		              laws_kernel['coefficients'][4], laws_kernel['coefficients'][5])
+        S.laws_texture_metric(label_region, source_region, laws_block, laws_kernel['numKernels'],
+                              laws_kernel['kernelSize'], laws_kernel['filters'],
+                              laws_kernel['coefficients'][0], laws_kernel['coefficients'][1],
+                              laws_kernel['coefficients'][2], laws_kernel['coefficients'][3],
+                              laws_kernel['coefficients'][4], laws_kernel['coefficients'][5])
 
         for j in filters:
-	    # compute the energy measure for each filter in the ROI
-	    mask_image = laws_block[j, :, :][label_region[:, :]>0]
-	    mean = abs(mask_image.mean())
-	    std  = mask_image.std()
-	    if mean > dc_thres:
-	        mean = 0.0
-	        std = 0.0
-	    if mean_feature == 1:
-	        ROI[i]['TEM'][j] = mean 
+            # compute the energy measure for each filter in the ROI
+            mask_image = laws_block[j, :, :][label_region[:, :]>0]
+            mean = abs(mask_image.mean())
+            std  = mask_image.std()
+            if mean > dc_thres:
+                mean = 0.0
+                std = 0.0
+            if mean_feature == 1:
+                ROI[i]['TEM'][j] = mean 
             else:
-	        ROI[i]['TEM'][j] = std 
+                ROI[i]['TEM'][j] = std 
 
-	ROI[i]['TEM'][:] = ROI[i]['TEM'][:] / ROI[i]['TEM'][:].max() 
+        ROI[i]['TEM'][:] = ROI[i]['TEM'][:] / ROI[i]['TEM'][:].max() 
         # accumulate the 21 Law's filtered ROI's and optional
-	# return as image (3D)
+        # return as image (3D)
         laws_image_list[i] = laws_block
 
     if verbose == 1:
-	return laws_image_list
+        return laws_image_list
     else:
         return 
 
@@ -923,13 +923,13 @@
     if ROI==None:
         ROIList = NP.zeros(1, dtype=_objstruct)
         if dimensions == 2:  
-	    [rows, cols] = label_image.shape
+            [rows, cols] = label_image.shape
             ROIList['Left']   = 1
             ROIList['Right']  = cols-1
             ROIList['Bottom'] = 1
             ROIList['Top']    = rows-1
         elif dimensions == 3:  
-	    [layers, rows, cols] = label_image.shape
+            [layers, rows, cols] = label_image.shape
             ROIList['Left']   = 1
             ROIList['Right']  = cols-1
             ROIList['Bottom'] = 1
@@ -942,34 +942,34 @@
     inflate = 1
     for i in indices:
         if dimensions == 2:  
-	    left   = ROI[i]['Left']
-	    right  = ROI[i]['Right']
-	    bottom = ROI[i]['Bottom']
-	    top    = ROI[i]['Top']
-	    Label  = ROI[i]['Label']
-	    rows   = top-bottom-1
-	    cols   = right-left-1
+            left   = ROI[i]['Left']
+            right  = ROI[i]['Right']
+            bottom = ROI[i]['Bottom']
+            top    = ROI[i]['Top']
+            Label  = ROI[i]['Label']
+            rows   = top-bottom-1
+            cols   = right-left-1
             section= NP.zeros(rows*cols, dtype=raw_image.dtype).reshape(rows, cols)
-	    section = raw_image[bottom:top, left:right] \
-	                       [label_image[bottom:top, left:right]==Label]
+            section = raw_image[bottom:top, left:right] \
+                               [label_image[bottom:top, left:right]==Label]
         elif dimensions == 3:  
-	    left   = ROI[i]['Left']
-	    right  = ROI[i]['Right']
-	    bottom = ROI[i]['Bottom']
-	    top    = ROI[i]['Top']
-	    front  = ROI[i]['Front']
-	    back   = ROI[i]['Back']
-	    Label  = ROI[i]['Label']
-	    rows   = top-bottom-1
-	    cols   = right-left-1
-	    layers = back-front-1
+            left   = ROI[i]['Left']
+            right  = ROI[i]['Right']
+            bottom = ROI[i]['Bottom']
+            top    = ROI[i]['Top']
+            front  = ROI[i]['Front']
+            back   = ROI[i]['Back']
+            Label  = ROI[i]['Label']
+            rows   = top-bottom-1
+            cols   = right-left-1
+            layers = back-front-1
             section= NP.zeros(layers*rows*cols, dtype=raw_image.dtype).reshape(layers, rows, cols)
-	    section = raw_image[front:back, bottom:top, left:right] \
-			       [label_image[front:back, bottom:top, left:right]==Label]
+            section = raw_image[front:back, bottom:top, left:right] \
+                               [label_image[front:back, bottom:top, left:right]==Label]
 
-	mask = section[section>0]
-	ROI[i]['voxelMean'] = mask.mean()
-	ROI[i]['voxelVar']  = mask.std()
+        mask = section[section>0]
+        ROI[i]['voxelMean'] = mask.mean()
+        ROI[i]['voxelVar']  = mask.std()
 
     return 
 
@@ -1021,17 +1021,17 @@
 
     indices = range(0, groups)
     for i in indices:
-	ROIList[i]['Left']   = c_ext_ROI[i]['Left']
-	ROIList[i]['Right']  = c_ext_ROI[i]['Right']
-	ROIList[i]['Bottom'] = c_ext_ROI[i]['Bottom']
-	ROIList[i]['Top']    = c_ext_ROI[i]['Top']
-	ROIList[i]['Front']  = c_ext_ROI[i]['Front']
-	ROIList[i]['Back']   = c_ext_ROI[i]['Back']
-	ROIList[i]['Label']  = c_ext_ROI[i]['Label']
-	ROIList[i]['Mass']   = c_ext_ROI[i]['Mass']
-	ROIList[i]['cX']     = c_ext_ROI[i]['cX']
-	ROIList[i]['cY']     = c_ext_ROI[i]['cY']
-	ROIList[i]['cZ']     = c_ext_ROI[i]['cZ']
+        ROIList[i]['Left']   = c_ext_ROI[i]['Left']
+        ROIList[i]['Right']  = c_ext_ROI[i]['Right']
+        ROIList[i]['Bottom'] = c_ext_ROI[i]['Bottom']
+        ROIList[i]['Top']    = c_ext_ROI[i]['Top']
+        ROIList[i]['Front']  = c_ext_ROI[i]['Front']
+        ROIList[i]['Back']   = c_ext_ROI[i]['Back']
+        ROIList[i]['Label']  = c_ext_ROI[i]['Label']
+        ROIList[i]['Mass']   = c_ext_ROI[i]['Mass']
+        ROIList[i]['cX']     = c_ext_ROI[i]['cX']
+        ROIList[i]['cY']     = c_ext_ROI[i]['cY']
+        ROIList[i]['cZ']     = c_ext_ROI[i]['cZ']
 
     return ROIList[ROIList['Mass']>dust]
 
@@ -1052,8 +1052,8 @@
 
     mask : {int}
         the size of the 2D or 3D connectivity mask. For 2D this is 1, 4 or 8.
-	For 3D this is 1, 6, 14 or 28. Mask = 1 is ANY connection in 3x3
-	or 3x3x3 mask for 2D or 3D, respectively.
+        For 3D this is 1, 6, 14 or 28. Mask = 1 is ANY connection in 3x3
+        or 3x3x3 mask for 2D or 3D, respectively.
 
     Returns 
     ----------
@@ -1069,17 +1069,17 @@
     dimensions = binary_edge_image.ndim
     if dimensions == 2:  
         if mask != 1 and mask != 4 and mask != 8:
-	    mask = 1 
+            mask = 1 
         [rows, cols] = binary_edge_image.shape
         labeled_edge_image_or_vol = NP.zeros(rows*cols, dtype=NP.uint16).reshape(rows, cols)
     elif dimensions == 3:
         if mask != 1 and mask != 6 and mask != 14 and mask != 28:
-	    mask = 1 
+            mask = 1 
         [layers, rows, cols] = binary_edge_image.shape
         labeled_edge_image_or_vol = NP.zeros(layers*rows*cols, dtype=NP.uint16).reshape(layers, rows, cols)
     else:
         labeled_edge_image_or_vol = None
-	groups = 0
+        groups = 0
         return labeled_edge_image_or_vol, groups
 
     groups = S.get_blobs(binary_edge_image, labeled_edge_image_or_vol, mask)
@@ -1182,7 +1182,7 @@
 
     conv_binary : {0, 1}, optional
         flag to convert edge_filter image to binary valued. default 
-	is binary conversion off
+        is binary conversion off
 
     Returns 
     ----------
@@ -1194,14 +1194,14 @@
     # make sure the input is 16 bits. this is input to edge machine
     # so can handle raw and 8 bit scaled inputs
     if high_threshold==0:
-	# default to the maximum value of the image
+        # default to the maximum value of the image
         high_threshold = slice.max()
 
     slice = slice.astype(NP.int16)
     [rows, cols] = slice.shape
     edge_image = NP.zeros(rows*cols, dtype=NP.float64).reshape(rows, cols)
     S.edge_prefilter(low_threshold, high_threshold, filter['kernelSize'], filter['kernel'],
-		     slice, edge_image)
+                     slice, edge_image)
 
     if conv_binary == 1:
         edge_image[edge_image>0] = 1
@@ -1220,7 +1220,7 @@
 
     ROI : {dictionary}
         the ROI is the automatically extracted blob regions of interest
-	and contains the rectangular bounding box of each blob.
+        and contains the rectangular bounding box of each blob.
 
     Returns 
     ----------
@@ -1231,8 +1231,8 @@
     """
     max_index = ROI[:]['Mass'].argmax()
     bounding_box = {'Left' : ROI[max_index]['Left'], 'Right' : ROI[max_index]['Right'],
-		    'Top' : ROI[max_index]['Top'], 'Bottom' : ROI[max_index]['Bottom'],
-		    'Label' : ROI[max_index]['Label']} 
+                    'Top' : ROI[max_index]['Top'], 'Bottom' : ROI[max_index]['Bottom'],
+                    'Label' : ROI[max_index]['Label']} 
 
     return bounding_box 
 
@@ -1247,7 +1247,7 @@
 
     ROI : {dictionary}
         the ROI is the automatically extracted blob regions of interest
-	and contains the rectangular bounding box of each blob.
+        and contains the rectangular bounding box of each blob.
 
     Returns 
     ----------
@@ -1265,10 +1265,10 @@
                              ('bottom', 'i')])
     measures = NP.zeros(number, dtype=_shortstruct)
     for i in indices:
-	measures[i]['left']   = ROI[i]['Left']
-	measures[i]['right']  = ROI[i]['Right']
-	measures[i]['top']    = ROI[i]['Top']
-	measures[i]['bottom'] = ROI[i]['Bottom']
+        measures[i]['left']   = ROI[i]['Left']
+        measures[i]['right']  = ROI[i]['Right']
+        measures[i]['top']    = ROI[i]['Top']
+        measures[i]['bottom'] = ROI[i]['Bottom']
 
     return measures
 
@@ -1338,11 +1338,11 @@
     ----------
     gWdith : {int}, optional
          width of derivative of Gaussian kernel.
-	 default value is 20
+         default value is 20
 
     sigma : {float}, optional
         sigma term of derivative of Gaussian kernel
-	 default value is 1.0
+         default value is 1.0
 
     Returns 
     ----------
@@ -1515,7 +1515,7 @@
     coefficients[5, :] =  (-1.0,  6.0, -15.0, 20.0, -15.0,  6.0, -1.0)
 
     LAWSFilter= {'numKernels' : 6, 'kernelSize' : 7, 'filters' : 21,
-		 'coefficients': coefficients, 'names': names} 
+                 'coefficients': coefficients, 'names': names} 
 
     return LAWSFilter
 
@@ -1561,17 +1561,17 @@
     mask_array = {}
     count = 0
     for i in outer_indices:
-	rowFilter = LAWSFilter['coefficients'][i]
-	colFilter = LAWSFilter['coefficients'][i]
-	matrix = NP.outer(rowFilter, colFilter)
-	mask_array[count] = 2.0*matrix
-	count = count + 1 
+        rowFilter = LAWSFilter['coefficients'][i]
+        colFilter = LAWSFilter['coefficients'][i]
+        matrix = NP.outer(rowFilter, colFilter)
+        mask_array[count] = 2.0*matrix
+        count = count + 1 
         inner_indices = range(i+1, LAWSFilter['numKernels'])
         for j in inner_indices:
-	    colFilter = LAWSFilter['coefficients'][j]
-	    matrix = NP.outer(rowFilter, colFilter) + NP.outer(colFilter, rowFilter)
-	    mask_array[count] = matrix
-	    count = count + 1 
+            colFilter = LAWSFilter['coefficients'][j]
+            matrix = NP.outer(rowFilter, colFilter) + NP.outer(colFilter, rowFilter)
+            mask_array[count] = matrix
+            count = count + 1 
 
     return mask_array
 
@@ -1640,12 +1640,12 @@
     center_y = cols / 4
 
     for i in y_indices:
-	x = math.sqrt(float(radius)**2 - float(i)**2)
-	# different raw mean levels
-	test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 80
-	test_image[1*center_y+i, 3*center_x-x:3*center_x+x] = 90
-	test_image[3*center_y+i, 1*center_x-x:1*center_x+x] = 100
-	test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 110
+        x = math.sqrt(float(radius)**2 - float(i)**2)
+        # different raw mean levels
+        test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 80
+        test_image[1*center_y+i, 3*center_x-x:3*center_x+x] = 90
+        test_image[3*center_y+i, 1*center_x-x:1*center_x+x] = 100
+        test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 110
 
     return test_image
 
@@ -1676,10 +1676,10 @@
     center_y = cols / 4
 
     for i in y_indices:
-	x = math.sqrt(float(radius)**2 - float(i)**2)
-	# different raw mean levels
-	test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 100
-	test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 100
+        x = math.sqrt(float(radius)**2 - float(i)**2)
+        # different raw mean levels
+        test_image[1*center_y+i, 1*center_x-x:1*center_x+x] = 100
+        test_image[3*center_y+i, 3*center_x-x:3*center_x+x] = 100
 
     return test_image
 

Modified: trunk/scipy/ndimage/tests/test_segment.py
===================================================================
--- trunk/scipy/ndimage/tests/test_segment.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/ndimage/tests/test_segment.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -37,13 +37,13 @@
     filter      = seg.build_2d_kernel(hiFilterCutoff=60.0)
     img         = seg.build_test_discs()
     disc_mask   = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255,
-		                 conv_binary=1)
+                                 conv_binary=1)
     label_disc_mask, disc_mask_groups = seg.get_blobs(disc_mask)
     disc_ROI    = seg.get_blob_regions(label_disc_mask, disc_mask_groups)
     laws_kernel = seg.build_laws_kernel() 
     impulse     = seg.build_test_impulses()
     calib       = seg.laws_texture_filter(impulse, label_disc_mask, laws_kernel,
-		                          ROI=disc_ROI, verbose=1)
+                                          ROI=disc_ROI, verbose=1)
     kernels = calib[0]
     x = laws_kernel['coefficients'][0]
     m = NP.outer(x, x)
@@ -57,13 +57,13 @@
     img = seg.build_test_unit_discs()
     disc = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255)
     disc_mask = seg.pre_filter(img, filter, low_threshold=50, high_threshold=255,
-		               conv_binary=1)
+                               conv_binary=1)
     label_disc_mask, disc_mask_groups = seg.get_blobs(disc_mask)
     disc_ROI = seg.get_blob_regions(label_disc_mask, disc_mask_groups)
     laws_kernel = seg.build_laws_kernel() 
     texture_img = seg.build_test_texture_discs()
     seg.laws_texture_filter(texture_img, label_disc_mask, laws_kernel, ROI=disc_ROI,
-		            mean_feature=1, verbose=0)
+                            mean_feature=1, verbose=0)
     tem = disc_ROI['TEM']
     return tem 
 
@@ -87,72 +87,72 @@
         truth[3] = (332, 435, 435, 333)
         match = (truth==measures).all()
         assert_equal(match, True)
-    	# load the ground truth for the bounding box test image mean value
-    	voxel_truth = NP.zeros(number, dtype=NP.float64)
-    	voxel_truth = (80.0, 90.0, 100.0, 110.0)
-    	match = (voxel_truth==voxel_means).all()
-    	assert_equal(match, True)
+        # load the ground truth for the bounding box test image mean value
+        voxel_truth = NP.zeros(number, dtype=NP.float64)
+        voxel_truth = (80.0, 90.0, 100.0, 110.0)
+        match = (voxel_truth==voxel_means).all()
+        assert_equal(match, True)
 
         return
 
     def test_canny(self):
         # generate 4 discs, find the bounding boxes and 
         # confirm the bounding boxes are at the true position
-    	measures, voxel_means = run_canny()
-    	number = measures.size
-    	_shortstruct = NP.dtype([('left', 'i'),
-                             	 ('right', 'i'),
-                             	 ('top', 'i'),
-                             	 ('bottom', 'i')])
+        measures, voxel_means = run_canny()
+        number = measures.size
+        _shortstruct = NP.dtype([('left', 'i'),
+                                 ('right', 'i'),
+                                 ('top', 'i'),
+                                 ('bottom', 'i')])
 
-    	assert_equal(number, 4)
-    	# load the ground truth for the bounding box
-    	truth = NP.zeros(number, dtype=_shortstruct)
-    	truth[0] = (78,  177, 177, 79)
-    	truth[1] = (334, 433, 177, 79)
-    	truth[2] = (78,  177, 433, 335)
-    	truth[3] = (334, 433, 433, 335)
-    	match = (truth==measures).all()
-    	assert_equal(match, True)
-    	# load the ground truth for the bounding box test image mean value
-    	voxel_truth = NP.zeros(number, dtype=NP.float64)
-    	voxel_truth = (80.0, 90.0, 100.0, 110.0)
-    	match = (voxel_truth==voxel_means).all()
-    	assert_equal(match, True)
+        assert_equal(number, 4)
+        # load the ground truth for the bounding box
+        truth = NP.zeros(number, dtype=_shortstruct)
+        truth[0] = (78,  177, 177, 79)
+        truth[1] = (334, 433, 177, 79)
+        truth[2] = (78,  177, 433, 335)
+        truth[3] = (334, 433, 433, 335)
+        match = (truth==measures).all()
+        assert_equal(match, True)
+        # load the ground truth for the bounding box test image mean value
+        voxel_truth = NP.zeros(number, dtype=NP.float64)
+        voxel_truth = (80.0, 90.0, 100.0, 110.0)
+        match = (voxel_truth==voxel_means).all()
+        assert_equal(match, True)
 
-    	return
+        return
 
     def test_texture1(self):
-	# [1] texture1 is delta functions and confirm the
-	#     filter result is outer product of the L kernel
-    	M, Laws_LL = run_texture1()
-    	match = (Laws_LL==M).all()
-    	assert_equal(match, True)
-    	return
+        # [1] texture1 is delta functions and confirm the
+        #     filter result is outer product of the L kernel
+        M, Laws_LL = run_texture1()
+        match = (Laws_LL==M).all()
+        assert_equal(match, True)
+        return
 
     def test_texture2(self):
-	# [2] texture2 is 2 plane waves and assert the 20-element feature
-	#     vector for each disc is correct
-    	tem = run_texture2()
-	tem0 = tem[0]
-	tem1 = tem[1]
-	truth_tem0 = NP.array(
-			[ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
+        # [2] texture2 is 2 plane waves and assert the 20-element feature
+        #     vector for each disc is correct
+        tem = run_texture2()
+        tem0 = tem[0]
+        tem1 = tem[1]
+        truth_tem0 = NP.array(
+                        [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
                           0.        ,  0.13306101,  0.08511007,  0.05084148,  0.07550675,
                           0.4334695 ,  0.03715914,  0.00289055,  0.02755581,  0.48142046,
                           0.03137803,  0.00671277,  0.51568902,  0.01795249,  0.49102375,  1.
                         ], dtype=NP.float32)
-	truth_tem1 = NP.array(
+        truth_tem1 = NP.array(
                         [ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
                           0.        ,  0.02970393,  0.00164266,  0.00922416,  0.01221788,
                           0.51485199,  0.03298925,  0.02212243,  0.01912871,  0.48350537,
                           0.01125561,  0.00826189,  0.49437219,  0.00526817,  0.49736592,  1.
                         ], dtype=NP.float32)
 
-    	assert_array_almost_equal(tem0, truth_tem0, decimal=6)
-    	assert_array_almost_equal(tem1, truth_tem1, decimal=6)
+        assert_array_almost_equal(tem0, truth_tem0, decimal=6)
+        assert_array_almost_equal(tem1, truth_tem1, decimal=6)
 
-    	return
+        return
 
 if __name__ == "__main__":
     inittest.main()

Modified: trunk/scipy/optimize/slsqp.py
===================================================================
--- trunk/scipy/optimize/slsqp.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/optimize/slsqp.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,5 +1,5 @@
 """This module implements the Sequential Least SQuares Programming optimization
-algorithm (SLSQP), orginally developed by Dieter Kraft. 
+algorithm (SLSQP), orginally developed by Dieter Kraft.
 
 See http://www.netlib.org/toms/733
 
@@ -18,37 +18,37 @@
 
 def approx_jacobian(x,func,epsilon,*args):
     """Approximate the Jacobian matrix of callable function func
-    
+
        *Parameters*:
-         x       - The state vector at which the Jacobian matrix is desired        
+         x       - The state vector at which the Jacobian matrix is desired
          func    - A vector-valued function of the form f(x,*args)
          epsilon - The peturbation used to determine the partial derivatives
          *args   - Additional arguments passed to func
-       
+
        *Returns*:
-         An array of dimensions (lenf, lenx) where lenf is the length 
-         of the outputs of func, and lenx is the number of 
-              
+         An array of dimensions (lenf, lenx) where lenf is the length
+         of the outputs of func, and lenx is the number of
+
        *Notes*:
          The approximation is done using forward differences
-                
+
     """
     x0 = asfarray(x)
     f0 = func(*((x0,)+args))
     jac = zeros([len(x0),len(f0)])
     dx = zeros(len(x0))
     for i in range(len(x0)):
-       dx[i] = epsilon
-       jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon
-       dx[i] = 0.0
+        dx[i] = epsilon
+        jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon
+        dx[i] = 0.0
     return jac.transpose()
 
 
 
 
 def fmin_slsqp( func, x0 , eqcons=[], f_eqcons=None, ieqcons=[], f_ieqcons=None,
-                bounds = [], fprime = None, fprime_eqcons=None, 
-                fprime_ieqcons=None, args = (), iter = 100, acc = 1.0E-6, 
+                bounds = [], fprime = None, fprime_eqcons=None,
+                fprime_ieqcons=None, args = (), iter = 100, acc = 1.0E-6,
                 iprint = 1, full_output = 0, epsilon = _epsilon ):
     """
     Minimize a function using Sequential Least SQuares Programming
@@ -86,12 +86,12 @@
             A function of the form f(x, *args) that returns the m by n
             array of equality constraint normals.  If not provided,
             the normals will be approximated. The array returned by
-            fprime_eqcons should be sized as ( len(eqcons), len(x0) ).  
+            fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
         fprime_ieqcons : callable f(x,*args)
             A function of the form f(x, *args) that returns the m by n
             array of inequality constraint normals.  If not provided,
             the normals will be approximated. The array returned by
-            fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).  
+            fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
         args : sequence
             Additional arguments passed to func and fprime.
         iter : int
@@ -150,9 +150,9 @@
                     7 : "Rank-deficient equality constraint subproblem HFTI",
                     8 : "Positive directional derivative for linesearch",
                     9 : "Iteration limit exceeded" }
-                    
-    # Now do a lot of function wrapping                
 
+    # Now do a lot of function wrapping
+
     # Wrap func
     feval, func = wrap_function(func, args)
     # Wrap fprime, if provided, or approx_fprime if not
@@ -160,7 +160,7 @@
         geval, fprime = wrap_function(fprime,args)
     else:
         geval, fprime = wrap_function(approx_fprime,(func,epsilon))
-        
+
     if f_eqcons:
         # Equality constraints provided via f_eqcons
         ceval, f_eqcons = wrap_function(f_eqcons,args)
@@ -179,8 +179,8 @@
             if eqcons[i]:
                 # Wrap eqcons and eqcons_prime
                 ceval, eqcons[i] = wrap_function(eqcons[i],args)
-                geval, eqcons_prime[i] = wrap_function(approx_fprime, 
-                                                       (eqcons[i],epsilon))                                                           
+                geval, eqcons_prime[i] = wrap_function(approx_fprime,
+                                                       (eqcons[i],epsilon))
 
     if f_ieqcons:
         # Inequality constraints provided via f_ieqcons
@@ -200,29 +200,29 @@
             if ieqcons[i]:
                 # Wrap ieqcons and ieqcons_prime
                 ceval, ieqcons[i] = wrap_function(ieqcons[i],args)
-                geval, ieqcons_prime[i] = wrap_function(approx_fprime, 
-                                                        (ieqcons[i],epsilon))  
-                                                                   
+                geval, ieqcons_prime[i] = wrap_function(approx_fprime,
+                                                        (ieqcons[i],epsilon))
 
+
     # Transform x0 into an array.
     x = asfarray(x0).flatten()
 
     # Set the parameters that SLSQP will need
     # meq = The number of equality constraints
     if f_eqcons:
-       meq = len(f_eqcons(x))
+        meq = len(f_eqcons(x))
     else:
-       meq = len(eqcons)   
+        meq = len(eqcons)
     if f_ieqcons:
-       mieq = len(f_ieqcons(x))   
+        mieq = len(f_ieqcons(x))
     else:
-       mieq = len(ieqcons)
+        mieq = len(ieqcons)
     # m = The total number of constraints
-    m = meq + mieq 
+    m = meq + mieq
     # la = The number of constraints, or 1 if there are no constraints
-    la = array([1,m]).max()  
+    la = array([1,m]).max()
     # n = The number of independent variables
-    n = len(x)               
+    n = len(x)
 
     # Define the workspaces for SLSQP
     n1 = n+1
@@ -232,7 +232,7 @@
     len_jw = mineq
     w = zeros(len_w)
     jw = zeros(len_jw)
- 
+
     # Decompose bounds into xl and xu
     if len(bounds) == 0:
         bounds = [(-1.0E12, 1.0E12) for i in range(n)]
@@ -241,15 +241,15 @@
         'SLSQP Error:  If bounds is specified, len(bounds) == len(x0)'
     else:
         for i in range(len(bounds)):
-           if bounds[i][0] > bounds[i][1]:
-              raise ValueError, \
-              'SLSQP Error: lb > ub in bounds[' + str(i) +']  ' + str(bounds[4])
-                                 
+            if bounds[i][0] > bounds[i][1]:
+                raise ValueError, \
+                'SLSQP Error: lb > ub in bounds[' + str(i) +']  ' + str(bounds[4])
+
     xl = array( [ b[0] for b in bounds ] )
-    xu = array( [ b[1] for b in bounds ] )   
-    
-    
+    xu = array( [ b[1] for b in bounds ] )
 
+
+
     # Initialize the iteration counter and the mode value
     mode = array(0,int)
     acc = array(acc,float)
@@ -261,26 +261,26 @@
         print "%5s %5s %16s %16s" % ("NIT","FC","OBJFUN","GNORM")
 
     while 1:
-    
+
         if mode == 0 or mode == 1: # objective and constraint evaluation requird
-        
+
             # Compute objective function
             fx = func(x)
             # Compute the constraints
             if f_eqcons:
-               c_eq = f_eqcons(x)
+                c_eq = f_eqcons(x)
             else:
-               c_eq = array([ eqcons[i](x) for i in range(meq) ])
+                c_eq = array([ eqcons[i](x) for i in range(meq) ])
             if f_ieqcons:
-               c_ieq = f_ieqcons(x)
+                c_ieq = f_ieqcons(x)
             else:
-               c_ieq = array([ ieqcons[i](x) for i in range(len(ieqcons)) ])
-               
+                c_ieq = array([ ieqcons[i](x) for i in range(len(ieqcons)) ])
+
             # Now combine c_eq and c_ieq into a single matrix
             if m == 0:
                 # no constraints
                 c = zeros([la])
-            else: 
+            else:
                 # constraints exist
                 if meq > 0 and mieq == 0:
                     # only equality constraints
@@ -290,29 +290,29 @@
                     c = c_ieq
                 if meq > 0 and mieq > 0:
                     # both equality and inequality constraints exist
-                    c = append(c_eq, c_ieq)         
-            
+                    c = append(c_eq, c_ieq)
+
         if mode == 0 or mode == -1: # gradient evaluation required
-        
+
             # Compute the derivatives of the objective function
             # For some reason SLSQP wants g dimensioned to n+1
             g = append(fprime(x),0.0)
 
-            # Compute the normals of the constraints   
+            # Compute the normals of the constraints
             if fprime_eqcons:
                 a_eq = fprime_eqcons(x)
-            else:         
+            else:
                 a_eq = zeros([meq,n])
                 for i in range(meq):
-                    a_eq[i] = eqcons_prime[i](x)           
-              
+                    a_eq[i] = eqcons_prime[i](x)
+
             if fprime_ieqcons:
                 a_ieq = fprime_ieqcons(x)
             else:
                 a_ieq = zeros([mieq,n])
                 for i in range(mieq):
                     a_ieq[i] = ieqcons_prime[i](x)
-            
+
             # Now combine a_eq and a_ieq into a single a matrix
             if m == 0:
                 # no constraints
@@ -325,9 +325,9 @@
                 a = a_ieq
             elif meq > 0 and mieq > 0:
                 # both equality and inequality constraints exist
-                a = vstack((a_eq,a_ieq))                    
-            a = concatenate((a,zeros([la,1])),1)     
-        
+                a = vstack((a_eq,a_ieq))
+            a = concatenate((a,zeros([la,1])),1)
+
         # Call SLSQP
         slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw)
 

Modified: trunk/scipy/sandbox/numexpr/__init__.py
===================================================================
--- trunk/scipy/sandbox/numexpr/__init__.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sandbox/numexpr/__init__.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,4 +1,3 @@
 from scipy.sandbox.numexpr.info import __doc__
 from scipy.sandbox.numexpr.expressions import E
 from scipy.sandbox.numexpr.compiler import numexpr, disassemble, evaluate
-

Modified: trunk/scipy/sparse/base.py
===================================================================
--- trunk/scipy/sparse/base.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/base.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -170,10 +170,10 @@
             return '\n'.join( [ ('  %s\t%s' % t) for t in triples] )
 
         if nnz > maxprint:
-            half = maxprint // 2 
+            half = maxprint // 2
             out  = tostr(A.row[:half], A.col[:half], A.data[:half])
             out += "\n  :\t:\n"
-            half = maxprint - maxprint//2 
+            half = maxprint - maxprint//2
             out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
         else:
             out  = tostr(A.row, A.col, A.data)
@@ -230,7 +230,7 @@
 
     def __rsub__(self, other):  # other - self
         return self.tocsr().__rsub__(other)
-    
+
     def multiply(self, other):
         """Point-wise multiplication by another matrix
         """
@@ -251,7 +251,7 @@
     def __div__(self, other):
         # Always do true division
         return self.__truediv__(other)
-    
+
     def __neg__(self):
         return -self.tocsr()
 
@@ -278,7 +278,7 @@
             other = int(other)
             if other < 0:
                 raise ValueError,'exponent must be >= 0'
-            
+
             if other == 0:
                 from construct import identity
                 return identity( self.shape[0], dtype=self.dtype )
@@ -391,12 +391,12 @@
     def rmatvec(self, other, conjugate=True):
         """Multiplies the vector 'other' by the sparse matrix, returning a
         dense vector as a result.
-        
+
         If 'conjugate' is True:
             - returns A.transpose().conj() * other
         Otherwise:
             - returns A.transpose() * other.
-        
+
         """
         return self.tocsr().rmatvec(other, conjugate=conjugate)
 
@@ -420,7 +420,7 @@
 
     def todok(self):
         return self.tocoo().todok()
-    
+
     def tocoo(self):
         return self.tocsr().tocoo()
 
@@ -429,10 +429,10 @@
 
     def todia(self):
         return self.tocoo().todia()
-    
+
     def tobsr(self,blocksize=None):
         return self.tocsr().tobsr(blocksize=blocksize)
-    
+
     def copy(self):
         return self.__class__(self,copy=True)
 
@@ -472,7 +472,7 @@
             return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
         else:
             raise ValueError, "axis out of bounds"
-    
+
     def diagonal(self):
         """Returns the main diagonal of the matrix
         """
@@ -526,4 +526,3 @@
     return _isinstance(x, spmatrix)
 
 issparse = isspmatrix
-

Modified: trunk/scipy/sparse/bsr.py
===================================================================
--- trunk/scipy/sparse/bsr.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/bsr.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -36,9 +36,9 @@
             where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
 
         bsr_matrix((data, indices, indptr), [shape=(M, N)])
-            is the standard BSR representation where the block column 
-            indices for row i are stored in ``indices[indptr[i]:indices[i+1]]`` 
-            and their corresponding block values are stored in 
+            is the standard BSR representation where the block column
+            indices for row i are stored in ``indices[indptr[i]:indices[i+1]]``
+            and their corresponding block values are stored in
             ``data[ indptr[i]: indptr[i+1] ]``.  If the shape parameter is not
             supplied, the matrix dimensions are inferred from the index arrays.
 
@@ -49,20 +49,20 @@
         - The Block Compressed Row (BSR) format is very similar to the
           Compressed Sparse Row (CSR) format.  BSR is appropriate for
           sparse matrices with dense sub matrices like the last example
-          below.  Block matrices often arise in vector-valued finite 
+          below.  Block matrices often arise in vector-valued finite
           element discretizations.  In such cases, BSR is considerably
           more efficient than CSR and CSC for many sparse arithmetic
           operations.
 
     Blocksize
-        - The blocksize (R,C) must evenly divide the shape of 
+        - The blocksize (R,C) must evenly divide the shape of
           the matrix (M,N).  That is, R and C must satisfy the
           relationship M % R = 0 and N % C = 0.
         - If no blocksize is specified, a simple heuristic is applied
           to determine an appropriate blocksize.
-   
 
 
+
     Examples
     --------
 
@@ -80,7 +80,7 @@
     matrix([[1, 0, 2],
             [0, 0, 3],
             [4, 5, 6]])
-    
+
     >>> indptr  = array([0,2,3,6])
     >>> indices = array([0,2,2,0,1,2])
     >>> data    = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
@@ -91,19 +91,19 @@
             [0, 0, 0, 0, 3, 3],
             [4, 4, 5, 5, 6, 6],
             [4, 4, 5, 5, 6, 6]])
-    
+
     """
     def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
         _data_matrix.__init__(self)
 
-        
+
         if isspmatrix(arg1):
             if arg1.format == self.format and copy:
                 arg1 = arg1.copy()
             else:
                 arg1 = getattr(arg1,'to' + self.format)(blocksize=blocksize)
             self._set_self( arg1 )
-            
+
         elif isinstance(arg1,tuple):
             if isshape(arg1):
                 #it's a tuple of matrix dimensions (M,N)
@@ -118,13 +118,13 @@
                     blocksize = tuple(blocksize)
                 self.data   = zeros( (0,) + blocksize, getdtype(dtype, default=float) )
                 self.indices = zeros( 0, dtype=intc )
-                
+
                 R,C = blocksize
                 if (M % R) != 0 or (N % C) != 0:
                     raise ValueError, 'shape must be multiple of blocksize'
 
                 self.indptr  = zeros(M/R + 1, dtype=intc )
-            
+
             elif len(arg1) == 2:
                 # (data,(row,col)) format
                 from coo import coo_matrix
@@ -241,12 +241,12 @@
     def _get_blocksize(self):
         return self.data.shape[1:]
     blocksize = property(fget=_get_blocksize)
-    
+
     def getnnz(self):
         R,C = self.blocksize
         return self.indptr[-1] * R * C
     nnz = property(fget=getnnz)
-    
+
     def __repr__(self):
         nnz = self.getnnz()
         format = self.getformat()
@@ -275,7 +275,7 @@
 
     def __getitem__(self,key):
         raise NotImplementedError
-    
+
     def __setitem__(self,key,val):
         raise NotImplementedError
 
@@ -286,13 +286,13 @@
     def matvec(self, other, output=None):
         """Sparse matrix vector product (self * other)
 
-        'other' may be a rank 1 array of length N or a rank 2 array 
-        or matrix with shape (N,1).  
-        
+        'other' may be a rank 1 array of length N or a rank 2 array
+        or matrix with shape (N,1).
+
         If the optional 'output' parameter is defined, it will
         be used to store the result.  Otherwise, a new vector
         will be allocated.
-             
+
         """
         if isdense(other):
             M,N = self.shape
@@ -300,7 +300,7 @@
 
             if other.shape != (N,) and other.shape != (N,1):
                 raise ValueError, "dimension mismatch"
-    
+
             #output array
             if output is None:
                 y = zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) )
@@ -314,8 +314,8 @@
                             "dtype=%s is required" % \
                             (output.dtype,upcast(self.dtype,other.dtype))
                 y = output
-            
-            
+
+
             bsr_matvec(M/R, N/C, R, C, \
                 self.indptr, self.indices, ravel(self.data), ravel(other), y)
 
@@ -343,9 +343,9 @@
                 raise ValueError, "shape mismatch error"
 
             indptr = empty_like( self.indptr )
-            
+
             R,n = self.blocksize
-            
+
             #convert to this format
             if isspmatrix_bsr(other):
                 C = other.blocksize[1]
@@ -373,7 +373,7 @@
                     self.indptr,  self.indices,  ravel(self.data), \
                     other.indptr, other.indices, ravel(other.data), \
                     indptr,       indices,       data)
-            
+
             data = data.reshape(-1,R,C)
             #TODO eliminate zeros
 
@@ -386,11 +386,11 @@
         else:
             raise TypeError, "need a dense or sparse matrix"
 
-  
+
     ######################
     # Conversion methods #
     ######################
-    
+
     def tobsr(self,blocksize=None,copy=False):
         if blocksize not in [None, self.blocksize]:
             return self.tocsr().tobsr(blocksize=blocksize)
@@ -412,14 +412,14 @@
         When copy=False the data array will be shared between
         this matrix and the resultant coo_matrix.
         """
-        
+
         M,N = self.shape
         R,C = self.blocksize
 
         row  = (R * arange(M/R)).repeat(diff(self.indptr))
         row  = row.repeat(R*C).reshape(-1,R,C)
         row += tile( arange(R).reshape(-1,1), (1,C) )
-        row  = row.reshape(-1) 
+        row  = row.reshape(-1)
 
         col  = (C * self.indices).repeat(R*C).reshape(-1,R,C)
         col += tile( arange(C), (R,1) )
@@ -435,11 +435,11 @@
 
 
     def transpose(self):
-        
+
         R,C = self.blocksize
         M,N = self.shape
         NBLK = self.nnz/(R*C)
-        
+
         if self.nnz == 0:
             return bsr_matrix((N,M),blocksize=(C,R))
 
@@ -452,18 +452,18 @@
                       indptr,      indices,      data.ravel())
 
         return bsr_matrix( (data,indices,indptr), shape=(N,M) )
-    
-    
-    ############################################################## 
+
+
+    ##############################################################
     # methods that examine or modify the internal data structure #
     ##############################################################
-    
+
     def eliminate_zeros(self):
         R,C = self.blocksize
         M,N = self.shape
 
         mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) #nonzero blocks
-       
+
         nonzero_blocks = mask.nonzero()[0]
 
         if len(nonzero_blocks) == 0:
@@ -476,7 +476,7 @@
         # modifies self.indptr and self.indices *in place*
         proxy = csr_matrix((mask,self.indices,self.indptr),shape=(M/R,N/C))
         proxy.eliminate_zeros()
-       
+
         self.prune()
 
 
@@ -505,17 +505,17 @@
 
         if len(self.indptr) != M/R + 1:
             raise ValueError, "index pointer has invalid length"
-        
+
         bnnz = self.indptr[-1]
 
-        if len(self.indices) < bnnz: 
+        if len(self.indices) < bnnz:
             raise ValueError, "indices array has too few elements"
         if len(self.data) < bnnz:
             raise ValueError, "data array has too few elements"
 
         self.data    = self.data[:bnnz]
         self.indices = self.indices[:bnnz]
-    
+
     # utility functions
     def _binopt(self, other, op, in_shape=None, out_shape=None):
         """apply the binary operation fn to two sparse matrices"""
@@ -525,15 +525,15 @@
             in_shape = self.shape
         if out_shape is None:
             out_shape = self.shape
-        
+
         self.sort_indices()
         other.sort_indices()
 
         # e.g. bsr_plus_bsr, etc.
         fn = getattr(sparsetools, self.format + op + self.format)
-        
-        R,C = self.blocksize 
 
+        R,C = self.blocksize
+
         max_bnnz = len(self.data) + len(other.data)
         indptr  = empty_like(self.indptr)
         indices = empty( max_bnnz, dtype=intc )
@@ -543,7 +543,7 @@
                 self.indptr,  self.indices,  ravel(self.data),
                 other.indptr, other.indices, ravel(other.data),
                 indptr,       indices,       data)
-        
+
         actual_bnnz = indptr[-1]
         indices = indices[:actual_bnnz]
         data    = data[:R*C*actual_bnnz]
@@ -570,7 +570,7 @@
                                    shape=self.shape,dtype=data.dtype)
 
 
-    
+
 #    # these functions are used by the parent class
 #    # to remove redudancy between bsc_matrix and bsr_matrix
 #    def _swap(self,x):
@@ -583,4 +583,3 @@
 
 def isspmatrix_bsr(x):
     return _isinstance(x, bsr_matrix)
-

Modified: trunk/scipy/sparse/compressed.py
===================================================================
--- trunk/scipy/sparse/compressed.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/compressed.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,4 +1,4 @@
-"""Base class for sparse matrix formats using compressed storage  
+"""Base class for sparse matrix formats using compressed storage
 """
 
 __all__ = []
@@ -20,14 +20,14 @@
 
 class _cs_matrix(_data_matrix):
     """base matrix class for compressed row and column oriented matrices"""
-    
+
     def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None, nzmax=None):
         _data_matrix.__init__(self)
 
         if dims is not None:
             warn("dims= is deprecated, use shape= instead", DeprecationWarning)
             shape=dims
-        
+
         if nzmax is not None:
             warn("nzmax= is deprecated", DeprecationWarning)
 
@@ -93,8 +93,8 @@
     def getnnz(self):
         return self.indptr[-1]
     nnz = property(fget=getnnz)
-    
-    
+
+
     def _set_self(self, other, copy=False):
         """take the member variables of other and assign them to self"""
 
@@ -105,7 +105,7 @@
         self.indices = other.indices
         self.indptr  = other.indptr
         self.shape   = other.shape
-    
+
     def check_format(self, full_check=True):
         """check whether the matrix format is valid
 
@@ -186,7 +186,7 @@
         elif isspmatrix(other):
             if (other.shape != self.shape):
                 raise ValueError, "inconsistent shapes"
-           
+
             return self._binopt(other,'_plus_')
         elif isdense(other):
             # Convert this matrix to a dense matrix and add them
@@ -254,12 +254,12 @@
         elif isspmatrix(other):
             if (other.shape != self.shape):
                 raise ValueError, "inconsistent shapes"
-            
+
             return self._binopt(other,'_eldiv_')
         else:
             raise NotImplementedError
 
-    
+
     def multiply(self, other):
         """Point-wise multiplication by another matrix
         """
@@ -281,15 +281,15 @@
 
             #return self._binopt(other,'mu',in_shape=(M,N),out_shape=(M,N))
 
-            major_axis = self._swap((M,N))[0]        
+            major_axis = self._swap((M,N))[0]
             indptr = empty( major_axis + 1, dtype=intc )
-            
+
             other = self.__class__(other) #convert to this format
             fn = getattr(sparsetools, self.format + '_matmat_pass1')
             fn( M, N, self.indptr, self.indices, \
                       other.indptr, other.indices, \
                       indptr)
-            
+
             nnz = indptr[-1]
             indices = empty( nnz, dtype=intc)
             data    = empty( nnz, dtype=upcast(self.dtype,other.dtype))
@@ -298,7 +298,7 @@
             fn( M, N, self.indptr, self.indices, self.data, \
                       other.indptr, other.indices, other.data, \
                       indptr, indices, data)
-            
+
             return self.__class__((data,indices,indptr),shape=(M,N))
 
 
@@ -313,9 +313,9 @@
     def matvec(self, other, output=None):
         """Sparse matrix vector product (self * other)
 
-        'other' may be a rank 1 array of length N or a rank 2 array 
-        or matrix with shape (N,1).  
-        
+        'other' may be a rank 1 array of length N or a rank 2 array
+        or matrix with shape (N,1).
+
         """
         #If the optional 'output' parameter is defined, it will
         #be used to store the result.  Otherwise, a new vector
@@ -329,7 +329,7 @@
 
             # csrmux, cscmux
             fn = getattr(sparsetools,self.format + '_matvec')
-    
+
             #output array
             y = zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) )
 
@@ -367,12 +367,12 @@
     def rmatvec(self, other, conjugate=True):
         """Multiplies the vector 'other' by the sparse matrix, returning a
         dense vector as a result.
-        
+
         If 'conjugate' is True:
             - returns A.transpose().conj() * other
         Otherwise:
             - returns A.transpose() * other.
-        
+
         """
         if conjugate:
             return self.transpose().conj().matvec( other )
@@ -382,7 +382,7 @@
     @deprecate
     def getdata(self, ind):
         return self.data[ind]
-    
+
     def diagonal(self):
         """Returns the main diagonal of the matrix
         """
@@ -407,12 +407,12 @@
     #######################
     # Getting and Setting #
     #######################
-    
+
     def __getitem__(self, key):
         if isinstance(key, tuple):
             row = key[0]
             col = key[1]
-           
+
             #TODO implement CSR[ [1,2,3], X ] with sparse matmat
             #TODO make use of sorted indices
 
@@ -434,8 +434,8 @@
             return self[key, :]
         else:
             raise IndexError, "invalid index"
-    
 
+
     def _get_single_element(self,row,col):
         M, N = self.shape
         if (row < 0):
@@ -444,7 +444,7 @@
             col += N
         if not (0<=row<M) or not (0<=col<N):
             raise IndexError, "index out of bounds"
-        
+
         major_index, minor_index = self._swap((row,col))
 
         start = self.indptr[major_index]
@@ -462,7 +462,7 @@
             raise ValueError,'nonzero entry (%d,%d) occurs more than once' % (row,col)
 
     def _get_slice(self, i, start, stop, stride, shape):
-        """Returns a copy of the elements 
+        """Returns a copy of the elements
             [i, start:stop:string] for row-oriented matrices
             [start:stop:string, i] for column-oriented matrices
         """
@@ -550,9 +550,9 @@
                 col += N
             if not (0<=row<M) or not (0<=col<N):
                 raise IndexError, "index out of bounds"
-        
+
             major_index, minor_index = self._swap((row,col))
-        
+
             start = self.indptr[major_index]
             end   = self.indptr[major_index+1]
             indxs = where(minor_index == self.indices[start:end])[0]
@@ -566,7 +566,7 @@
                         SparseEfficiencyWarning)
 
                 self.sort_indices()
-   
+
                 newindx = self.indices[start:end].searchsorted(minor_index)
                 newindx += start
 
@@ -595,7 +595,7 @@
 
     def todia(self):
         return self.tocoo(copy=False).todia()
-    
+
     def todok(self):
         return self.tocoo(copy=False).todok()
 
@@ -621,14 +621,14 @@
 
         from coo import coo_matrix
         return coo_matrix((data,(row,col)), self.shape)
-    
+
     def toarray(self):
         A = self.tocoo(copy=False)
         M = zeros(self.shape, dtype=self.dtype)
         M[A.row, A.col] = A.data
         return M
 
-    ############################################################## 
+    ##############################################################
     # methods that examine or modify the internal data structure #
     ##############################################################
 
@@ -663,7 +663,7 @@
         Returns
             - True: if the indices of the matrix are in sorted order
             - False: otherwise
-        
+
         """
 
         #first check to see if result was cached
@@ -692,7 +692,7 @@
     def sort_indices(self):
         """Sort the indices of this matrix *in place*
         """
-       
+
         if not self.has_sorted_indices:
             fn = sparsetools.csr_sort_indices
             fn( len(self.indptr) - 1, self.indptr, self.indices, self.data)
@@ -704,12 +704,12 @@
         warn('ensure_sorted_indices is deprecated, ' \
                 'use sorted_indices() or sort_indices() instead', \
                 DeprecationWarning)
-        
+
         if inplace:
             self.sort_indices()
         else:
             return self.sorted_indices()
-    
+
     def prune(self):
         """ Remove empty space after all non-zero elements.
         """
@@ -717,11 +717,11 @@
 
         if len(self.indptr) != major_dim + 1:
             raise ValueError, "index pointer has invalid length"
-        if len(self.indices) < self.nnz: 
+        if len(self.indices) < self.nnz:
             raise ValueError, "indices array has fewer than nnz elements"
         if len(self.data) < self.nnz:
             raise ValueError, "data array has fewer than nnz elements"
-        
+
         self.data    = self.data[:self.nnz]
         self.indices = self.indices[:self.nnz]
 
@@ -779,5 +779,3 @@
         A = self.__class__((data, indices, indptr), shape=out_shape)
         A.has_sorted_indices = True
         return A
-
-

Modified: trunk/scipy/sparse/construct.py
===================================================================
--- trunk/scipy/sparse/construct.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/construct.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -3,7 +3,7 @@
 
 __docformat__ = "restructuredtext en"
 
-__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum', 
+__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',
             'hstack', 'vstack', 'bmat' ]
 
 
@@ -32,13 +32,13 @@
     Parameters
     ----------
         - data   : matrix whose rows contain the diagonal values
-        - diags  : diagonals to set 
+        - diags  : diagonals to set
             - k = 0 - the main diagonal
             - k > 0 - the k-th upper diagonal
             - k < 0 - the k-th lower diagonal
         - m, n   : dimensions of the result
         - format : format of the result (e.g. "csr")
-            -  By default (format=None) an appropriate sparse matrix 
+            -  By default (format=None) an appropriate sparse matrix
                format is returned.  This choice is subject to change.
 
     See Also
@@ -76,7 +76,7 @@
         return identity( n, dtype=dtype, format='csr').asformat(format)
 
 def eye(m, n, k=0, dtype='d', format=None):
-    """eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal 
+    """eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
     is all ones and everything else is zeros.
     """
     diags = ones((1, m), dtype=dtype)
@@ -114,21 +114,21 @@
 
     """
     B = coo_matrix(B)
-    
+
     if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
         #B is fairly dense, use BSR
         A = csr_matrix(A,copy=True)
-        
+
         output_shape = (A.shape[0]*B.shape[0],A.shape[1]*B.shape[1])
 
         if A.nnz == 0 or B.nnz == 0:
             # kronecker product is the zero matrix
             return coo_matrix( output_shape )
-        
+
         B = B.toarray()
         data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
         data = data * B
-        
+
         return bsr_matrix((data,A.indices,A.indptr),shape=output_shape)
     else:
         #use COO
@@ -162,9 +162,9 @@
 def kronsum(A, B, format=None):
     """kronecker sum of sparse matrices A and B
 
-    Kronecker sum of two sparse matrices is a sum of two Kronecker 
+    Kronecker sum of two sparse matrices is a sum of two Kronecker
     products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
-    and B has shape (n,n) and I_m and I_n are identity matrices 
+    and B has shape (n,n) and I_m and I_n are identity matrices
     of shape (m,m) and (n,n) respectively.
 
     Parameters
@@ -179,20 +179,20 @@
     Examples
     ========
 
-    
+
     """
     A = coo_matrix(A)
     B = coo_matrix(B)
 
     if A.shape[0] != A.shape[1]:
         raise ValueError('A is not square')
-    
+
     if B.shape[0] != B.shape[1]:
         raise ValueError('B is not square')
 
     dtype = upcast(A.dtype,B.dtype)
 
-    L = kron(identity(B.shape[0],dtype=dtype), A, format=format) 
+    L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
     R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
 
     return (L+R).asformat(format) #since L + R is not always same format
@@ -204,12 +204,12 @@
     Parameters
     ----------
 
-    blocks 
+    blocks
         sequence of sparse matrices with compatible shapes
     format : sparse format of the result (e.g. "csr")
         by default an appropriate sparse matrix format is returned.
         This choice is subject to change.
-   
+
     Example
     -------
 
@@ -235,7 +235,7 @@
     format : sparse format of the result (e.g. "csr")
         by default an appropriate sparse matrix format is returned.
         This choice is subject to change.
-   
+
     Example
     -------
 
@@ -263,7 +263,7 @@
     format : sparse format of the result (e.g. "csr")
         by default an appropriate sparse matrix format is returned.
         This choice is subject to change.
-   
+
     Example
     -------
 
@@ -275,7 +275,7 @@
     matrix([[1, 2, 5],
             [3, 4, 6],
             [0, 0, 7]])
- 
+
     >>> bmat( [[A,None],[None,C]] ).todense()
     matrix([[1, 2, 0],
             [3, 4, 0],
@@ -283,7 +283,7 @@
 
 
     """
-    
+
     blocks = asarray(blocks, dtype='object')
 
     if rank(blocks) != 2:
@@ -321,8 +321,8 @@
         raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
     if bcol_lengths.min() == 0:
         raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
-    
-    nnz = sum([ A.nnz for A in blocks[block_mask] ]) 
+
+    nnz = sum([ A.nnz for A in blocks[block_mask] ])
     if dtype is None:
         dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
 
@@ -344,10 +344,10 @@
 
                 row[nnz:nnz + A.nnz] += row_offsets[i]
                 col[nnz:nnz + A.nnz] += col_offsets[j]
-                
+
                 nnz += A.nnz
 
-    shape = (sum(brow_lengths),sum(bcol_lengths)) 
+    shape = (sum(brow_lengths),sum(bcol_lengths))
     return coo_matrix( (data, (row, col)), shape=shape ).asformat(format)
 
 
@@ -371,7 +371,7 @@
 
     Parameters
     ----------
-    
+
     r,c : int
         row and column-dimensions of the output.
     k : int
@@ -434,4 +434,3 @@
             out.rows[c-k].append(c)
             out.data[c-k].append(diag[ix])
     return out
-

Modified: trunk/scipy/sparse/coo.py
===================================================================
--- trunk/scipy/sparse/coo.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/coo.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -5,7 +5,7 @@
 __all__ = ['coo_matrix', 'isspmatrix_coo']
 
 from itertools import izip
-from warnings import warn 
+from warnings import warn
 
 from numpy import array, asarray, empty, intc, zeros,  \
         unique, searchsorted, atleast_2d, empty_like, rank, \
@@ -39,7 +39,7 @@
                 2. ij[0][:]  the row indices of the matrix entries
                 3. ij[1][:]  the column indices of the matrix entries
 
-            Where ``A[ij[0][k], ij[1][k] = data[k]``.  When shape is 
+            Where ``A[ij[0][k], ij[1][k] = data[k]``.  When shape is
             not specified, it is inferred from the index arrays
 
 
@@ -50,7 +50,7 @@
         - facilitates fast conversion among sparse formats
         - permits duplicate entries (see example)
         - very fast conversion to and from CSR/CSC formats
-    
+
     Disadvantages of the COO format
         - does not directly support:
             + arithmetic operations
@@ -62,10 +62,10 @@
     --------------
 
         - COO is a fast format for constructing sparse matrices
-        - Once a matrix has been constructed, convert to CSR or 
+        - Once a matrix has been constructed, convert to CSR or
           CSC format for fast arithmetic and matrix vector operations
-        - By default when converting to CSR or CSC format, duplicate (i,j) 
-          entries will be summed together.  This facilitates efficient 
+        - By default when converting to CSR or CSC format, duplicate (i,j)
+          entries will be summed together.  This facilitates efficient
           construction of finite element matrices and the like. (see example)
 
 
@@ -97,7 +97,7 @@
             [0, 2, 0, 0],
             [0, 0, 0, 0],
             [0, 0, 0, 1]])
-    
+
     """
 
     def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None):
@@ -170,7 +170,7 @@
                     M = atleast_2d(asarray(arg1))
                 except:
                     raise TypeError, "invalid input format"
-    
+
                 if len(M.shape) != 2:
                     raise TypeError, "expected rank <= 2 array or matrix"
                 self.shape = M.shape
@@ -202,7 +202,7 @@
         if self.col.dtype.kind != 'i':
             warn("col index array has non-integer dtype (%s) " \
                     % self.col.dtype.name )
-       
+
         # only support 32-bit ints for now
         self.row  = asarray(self.row,dtype=intc)
         self.col  = asarray(self.col,dtype=intc)
@@ -228,7 +228,7 @@
     @deprecate
     def getdata(self, num):
         return self.data[num]
-    
+
     def transpose(self,copy=False):
         M,N = self.shape
         return coo_matrix((self.data,(self.col,self.row)),(N,M),copy=copy)
@@ -242,7 +242,7 @@
     def tocsc(self,sum_duplicates=True):
         """Return a copy of this matrix in Compressed Sparse Column format
 
-            By default sum_duplicates=True and any duplicate 
+            By default sum_duplicates=True and any duplicate
             matrix entries are added together.
 
         """
@@ -266,7 +266,7 @@
     def tocsr(self,sum_duplicates=True):
         """Return a copy of this matrix in Compressed Sparse Row format
 
-            By default sum_duplicates=True and any duplicate 
+            By default sum_duplicates=True and any duplicate
             matrix entries are added together.
 
         """
@@ -286,8 +286,8 @@
             if sum_duplicates:
                 A.sum_duplicates()
             return A
-    
 
+
     def tocoo(self, copy=False):
         if copy:
             return self.copy()
@@ -297,7 +297,7 @@
     def todia(self):
         from dia import dia_matrix
 
-        ks = self.col - self.row  #the diagonal for each nonzero          
+        ks = self.col - self.row  #the diagonal for each nonzero
         diags = unique(ks)
 
         if len(diags) > 100:
@@ -316,7 +316,7 @@
 
         dok = dok_matrix((self.shape),dtype=self.dtype)
 
-        dok.update( izip(izip(self.row,self.col),self.data) ) 
+        dok.update( izip(izip(self.row,self.col),self.data) )
 
         return dok
 
@@ -339,4 +339,3 @@
 
 def isspmatrix_coo( x ):
     return _isinstance(x, coo_matrix)
-

Modified: trunk/scipy/sparse/csc.py
===================================================================
--- trunk/scipy/sparse/csc.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/csc.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -37,8 +37,8 @@
             where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
 
         csc_matrix((data, indices, indptr), [shape=(M, N)])
-            is the standard CSC representation where the row indices for 
-            column i are stored in ``indices[indptr[i]:indices[i+1]]`` and their 
+            is the standard CSC representation where the row indices for
+            column i are stored in ``indices[indptr[i]:indices[i+1]]`` and their
             corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
             If the shape parameter is not supplied, the matrix dimensions
             are inferred from the index arrays.
@@ -49,7 +49,7 @@
         - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
         - efficient column slicing
         - fast matrix vector products (CSR, BSR may be faster)
-    
+
     Disadvantages of the CSC format
     -------------------------------
       - slow row slicing operations (consider CSR)
@@ -114,7 +114,7 @@
             return self.copy()
         else:
             return self
-    
+
     def tocsr(self):
         indptr  = empty(self.shape[0] + 1, dtype=intc)
         indices = empty(self.nnz, dtype=intc)
@@ -137,11 +137,11 @@
             col = key[1]
 
             if isintlike(row) or isinstance(row, slice):
-                return self.T[col,row].T                
-            else:    
+                return self.T[col,row].T
+            else:
                 #[[1,2],??] or [[[1],[2]],??]
                 if isintlike(col) or isinstance(col,slice):
-                    return self.T[col,row].T                
+                    return self.T[col,row].T
                 else:
                     row = asarray(row, dtype='intc')
                     col = asarray(col, dtype='intc')
@@ -150,10 +150,10 @@
                     elif len(row.shape) == 2:
                         row = row.reshape(-1)
                         col = col.reshape(-1,1)
-                        return self.T[col,row].T                
+                        return self.T[col,row].T
                     else:
                         raise NotImplementedError('unsupported indexing')
-                        
+
             return self.T[col,row].T
         elif isintlike(key) or isinstance(key,slice):
             return self.T[:,key].T                              #[i] or [1:2]
@@ -173,4 +173,3 @@
 
 def isspmatrix_csc(x):
     return _isinstance(x, csc_matrix)
-

Modified: trunk/scipy/sparse/csr.py
===================================================================
--- trunk/scipy/sparse/csr.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/csr.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -39,8 +39,8 @@
             where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
 
         csr_matrix((data, indices, indptr), [shape=(M, N)])
-            is the standard CSR representation where the column indices for 
-            row i are stored in ``indices[indptr[i]:indices[i+1]]`` and their 
+            is the standard CSR representation where the column indices for
+            row i are stored in ``indices[indptr[i]:indices[i+1]]`` and their
             corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
             If the shape parameter is not supplied, the matrix dimensions
             are inferred from the index arrays.
@@ -51,7 +51,7 @@
       - efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
       - efficient row slicing
       - fast matrix vector products
-    
+
     Disadvantages of the CSR format
       - slow column slicing operations (consider CSC)
       - changes to the sparsity structure are expensive (consider LIL or DOK)
@@ -73,7 +73,7 @@
     matrix([[1, 0, 2],
             [0, 0, 3],
             [4, 5, 6]])
-    
+
     >>> indptr = array([0,2,3,6])
     >>> indices = array([0,2,2,0,1,2])
     >>> data = array([1,2,3,4,5,6])
@@ -108,9 +108,9 @@
     def tolil(self):
         from lil import lil_matrix
         lil = lil_matrix(self.shape,dtype=self.dtype)
-     
+
         self.sort_indices() #lil_matrix needs sorted rows
-        
+
         ptr,ind,dat = self.indptr,self.indices,self.data
         rows, data  = lil.rows, lil.data
 
@@ -149,7 +149,7 @@
             from spfuncs import estimate_blocksize
             return self.tobsr(blocksize=estimate_blocksize(self))
         elif blocksize == (1,1):
-            arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)  
+            arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
             return bsr_matrix( arg1, shape=self.shape, copy=copy )
         else:
             R,C = blocksize
@@ -168,7 +168,7 @@
                     indptr, indices, data.ravel() )
 
             return bsr_matrix( (data,indices,indptr), shape=self.shape )
-    
+
     # these functions are used by the parent class (_cs_matrix)
     # to remove redudancy between csc_matrix and csr_matrix
     def _swap(self,x):
@@ -191,7 +191,7 @@
             slicing of the form self[[1,2,3],:]
             """
             indices = asindices(indices)
-            
+
             max_indx = indices.max()
 
             if max_indx >= N:
@@ -200,7 +200,7 @@
             min_indx = indices.min()
             if min_indx < -N:
                 raise IndexError('index (%d) out of range' % (N + min_indx))
-            
+
             if min_indx < 0:
                 indices = indices.copy()
                 indices[indices < 0] += N
@@ -210,8 +210,8 @@
             shape   = (len(indices),N)
 
             return csr_matrix( (data,indices,indptr), shape=shape)
-            
 
+
         if isinstance(key, tuple):
             row = key[0]
             col = key[1]
@@ -225,7 +225,7 @@
                 else:
                     P = extractor(col,self.shape[1]).T        #[i,[1,2]]
                     return self[row,:]*P
-                    
+
             elif isinstance(row, slice):
                 #[1:2,??]
                 if isintlike(col) or isinstance(col, slice):
@@ -233,16 +233,16 @@
                 else:
                     P = extractor(col,self.shape[1]).T        #[1:2,[1,2]]
                     return self[row,:]*P
-                
-            else:    
+
+            else:
                 #[[1,2],??] or [[[1],[2]],??]
                 if isintlike(col) or isinstance(col,slice):
-                    P = extractor(row, self.shape[0])        #[[1,2],j] or [[1,2],1:2]   
-                    return (P*self)[:,col]                   
+                    P = extractor(row, self.shape[0])        #[[1,2],j] or [[1,2],1:2]
+                    return (P*self)[:,col]
 
                 else:
-                    row = asindices(row)                     
-                    col = asindices(col) 
+                    row = asindices(row)
+                    col = asindices(col)
                     if len(row.shape) == 1:
                         if len(row) != len(col):             #[[1,2],[1,2]]
                             raise IndexError('number of row and column indices differ')
@@ -250,23 +250,23 @@
                         for i,j in zip(row,col):
                             val.append(self._get_single_element(i,j))
                         return asmatrix(val)
-                    
+
                     elif len(row.shape) == 2:
                         row = ravel(row)                    #[[[1],[2]],[1,2]]
-                        P = extractor(row, self.shape[0])   
-                        return (P*self)[:,col]               
+                        P = extractor(row, self.shape[0])
+                        return (P*self)[:,col]
 
                     else:
                         raise NotImplementedError('unsupported indexing')
-                        
+
         elif isintlike(key) or isinstance(key,slice):
             return self[key,:]                                #[i] or [1:2]
         else:
             return self[asindices(key),:]                     #[[1,2]]
-    
 
+
     def _get_single_element(self,row,col):
-        """Returns the single element self[row, col] 
+        """Returns the single element self[row, col]
         """
         M, N = self.shape
         if (row < 0):
@@ -275,9 +275,9 @@
             col += N
         if not (0<=row<M) or not (0<=col<N):
             raise IndexError("index out of bounds")
-            
+
         #TODO make use of sorted indices (if present)
-        
+
         start = self.indptr[row]
         end   = self.indptr[row+1]
         indxs = where(col == self.indices[start:end])[0]
@@ -293,13 +293,13 @@
             raise ValueError('nonzero entry (%d,%d) occurs more than once' % (row,col) )
 
     def _get_row_slice(self, i, cslice ):
-        """Returns a copy of row self[i, cslice] 
+        """Returns a copy of row self[i, cslice]
         """
         if i < 0:
             i += self.shape[0]
 
         if i < 0 or i >= self.shape[0]:
-            raise IndexError('index (%d) out of range' % i ) 
+            raise IndexError('index (%d) out of range' % i )
 
         start, stop, stride = cslice.indices(self.shape[1])
 
@@ -375,4 +375,3 @@
 
 def isspmatrix_csr(x):
     return _isinstance(x, csr_matrix)
-

Modified: trunk/scipy/sparse/data.py
===================================================================
--- trunk/scipy/sparse/data.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/data.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,5 +1,5 @@
 """Base class for sparse matrice with a .data attribute
-    
+
     subclasses must provide a _with_data() method that
     creates a new matrix with the same sparsity pattern
     as self but with a different data array
@@ -31,7 +31,7 @@
 
     def _imag(self):
         return self._with_data(self.data.imag)
-    
+
     def __neg__(self):
         return self._with_data(-self.data)
 
@@ -41,7 +41,7 @@
             return self
         else:
             raise NotImplementedError
-    
+
     def __itruediv__(self, other): #self /= other
         if isscalarlike(other):
             recip = 1.0 / other
@@ -55,7 +55,6 @@
 
     def conj(self, copy=False):
         return self._with_data(self.data.conj(),copy=copy)
-    
+
     def copy(self):
         return self._with_data(self.data.copy(),copy=True)
-

Modified: trunk/scipy/sparse/dia.py
===================================================================
--- trunk/scipy/sparse/dia.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/dia.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -42,7 +42,7 @@
     matrix([[0, 0, 0, 0],
             [0, 0, 0, 0],
             [0, 0, 0, 0]], dtype=int8)
-    
+
     >>> data = array([[1,2,3,4]]).repeat(3,axis=0)
     >>> diags = array([0,-1,2])
     >>> dia_matrix( (data,diags), shape=(4,4)).todense()
@@ -66,7 +66,7 @@
             if isspmatrix_dia(arg1) and copy:
                 A = arg1.copy()
             else:
-                A = arg1.todia() 
+                A = arg1.todia()
             self.data  = A.data
             self.diags = A.diags
             self.shape = A.shape
@@ -114,7 +114,7 @@
             raise ValueError,'number of diagonals (%d) ' \
                     'does not match the number of diags (%d)' \
                     % (self.data.shape[0], len(self.diags))
-        
+
         if len(unique(self.diags)) != len(self.diags):
             raise ValueError,'offset array contains duplicate values'
 
@@ -178,9 +178,9 @@
 
         L = self.data.shape[1]
         M,N = self.shape
-       
+
         dia_matvec(M,N, len(self.diags), L, self.diags, self.data, x.ravel(), y.ravel())
-        
+
         if isinstance(other, matrix):
             y = asmatrix(y)
 
@@ -207,20 +207,20 @@
     def tocoo(self):
         num_data = len(self.data)
         len_data = self.data.shape[1]
-        
+
         row = arange(len_data).reshape(1,-1).repeat(num_data,axis=0)
         col = row.copy()
 
         for i,k in enumerate(self.diags):
             row[i,:] -= k
-        
-        mask  = (row >= 0) 
-        mask &= (row < self.shape[0]) 
+
+        mask  = (row >= 0)
+        mask &= (row < self.shape[0])
         mask &= (col < self.shape[1])
         mask &= self.data != 0
         row,col,data = row[mask],col[mask],self.data[mask]
         row,col,data = row.reshape(-1),col.reshape(-1),data.reshape(-1)
-       
+
         from coo import coo_matrix
         return coo_matrix((data,(row,col)),shape=self.shape)
 
@@ -240,5 +240,3 @@
 
 def isspmatrix_dia(x):
     return _isinstance(x, dia_matrix)
-
-

Modified: trunk/scipy/sparse/dok.py
===================================================================
--- trunk/scipy/sparse/dok.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/dok.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -14,7 +14,7 @@
 
 class dok_matrix(spmatrix, dict):
     """Dictionary Of Keys based matrix.  This is an efficient
-    structure for constructing sparse matrices 
+    structure for constructing sparse matrices
     """
     def __init__(self, A=None, shape=None, dtype=None, copy=False):
         """ Create a new dictionary-of-keys sparse matrix.  An optional
@@ -565,4 +565,3 @@
 
 def isspmatrix_dok(x):
     return _isinstance(x, dok_matrix)
-

Modified: trunk/scipy/sparse/info.py
===================================================================
--- trunk/scipy/sparse/info.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/info.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -88,8 +88,8 @@
 ---------------
 
 CSR column indices are not necessarily sorted.  Likewise for CSC row
-indices.  Use the .sorted_indices() and .sort_indices() methods when 
-sorted indices are required (e.g. when passing data to other libraries). 
+indices.  Use the .sorted_indices() and .sort_indices() methods when
+sorted indices are required (e.g. when passing data to other libraries).
 
 """
 

Modified: trunk/scipy/sparse/lil.py
===================================================================
--- trunk/scipy/sparse/lil.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/lil.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,4 +1,4 @@
-"""LInked List sparse matrix class 
+"""LInked List sparse matrix class
 """
 
 __docformat__ = "restructuredtext en"
@@ -18,7 +18,7 @@
 class lil_matrix(spmatrix):
     """Row-based linked list matrix
 
-    
+
     This can be instantiated in several ways:
         csc_matrix(D)
             with a dense matrix or rank-2 ndarray D
@@ -39,21 +39,21 @@
     Advantages of the LIL format
         - supports flexible slicing
         - changes to the matrix sparsity structure are efficient
-        
+
     Disadvantages of the LIL format
         - arithmetic operations LIL + LIL are slow (consider CSR or CSC)
         - slow column slicing (consider CSC)
         - matrix vector products are slower than CSR/CSC
-        
+
     Intended Usage
-        - LIL is a convenient format for constructing sparse matrices 
-        - once a matrix has been constructed, convert to CSR or 
+        - LIL is a convenient format for constructing sparse matrices
+        - once a matrix has been constructed, convert to CSR or
           CSC format for fast arithmetic and matrix vector operations
         - consider using the COO format when constructing large matrices
-   
+
     Data Structure
         - An array (``self.rows``) of rows, each of which is a sorted
-          list of column indices of non-zero elements. 
+          list of column indices of non-zero elements.
         - The corresponding nonzero values are stored in similar
           fashion in ``self.data``.
 
@@ -80,7 +80,7 @@
             for i in range(M):
                 self.rows[i] = []
                 self.data[i] = []
-        elif isspmatrix(A):                    
+        elif isspmatrix(A):
             if isspmatrix_lil(A) and copy:
                 A = A.copy()
             else:
@@ -111,7 +111,7 @@
             else:
                 from csr import csr_matrix
                 A = csr_matrix(A).tolil()
-                
+
                 self.shape = A.shape
                 self.dtype = A.dtype
                 self.rows  = A.rows
@@ -427,12 +427,12 @@
     def tocsr(self):
         """ Return Compressed Sparse Row format arrays for this matrix.
         """
-        
+
         indptr = asarray([len(x) for x in self.rows], dtype=intc)
         indptr = concatenate( ( array([0],dtype=intc), cumsum(indptr) ) )
-        
+
         nnz = indptr[-1]
-        
+
         indices = []
         for x in self.rows:
             indices.extend(x)
@@ -456,4 +456,3 @@
 
 def isspmatrix_lil( x ):
     return _isinstance(x, lil_matrix)
-

Modified: trunk/scipy/sparse/linalg/dsolve/linsolve.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/linsolve.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/dsolve/linsolve.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -78,7 +78,7 @@
         if A.dtype.char not in 'dD':
             raise ValueError, "convert matrix data to double, please, using"\
                   " .astype(), or set linsolve.useUmfpack = False"
-        
+
         b = asarray(b, dtype=A.dtype).reshape(-1)
 
         family = {'d' : 'di', 'D' : 'zi'}
@@ -116,7 +116,7 @@
 
     A.sort_indices()
     A = A.asfptype()  #upcast to a floating point format
-    
+
     M, N = A.shape
     if (M != N):
         raise ValueError, "can only factor square matrices" #is this true?
@@ -160,4 +160,3 @@
         return solve
     else:
         return splu( A ).solve
-

Modified: trunk/scipy/sparse/linalg/dsolve/setupscons.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/dsolve/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -6,7 +6,7 @@
     from numpy.distutils.misc_util import Configuration
     from numpy.distutils.system_info import get_info
 
-    config = Configuration('dsolve',parent_package,top_path, 
+    config = Configuration('dsolve',parent_package,top_path,
                            setup_name = 'setupscons.py')
 
     config.add_sconscript('SConstruct')

Modified: trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -29,16 +29,15 @@
 
         for t in ['f','d','F','D']:
             eps = finfo(t).eps #floating point epsilon
-            b = b.astype(t) 
+            b = b.astype(t)
 
             for format in ['csc','csr']:
                 Asp = A.astype(t).asformat(format)
 
                 x = spsolve(Asp,b)
-                
+
                 assert( norm(b - Asp*x) < 10 * cond_A * eps )
-                
 
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-

Modified: trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -25,7 +25,7 @@
     _have_umfpack = False
 else:
     _have_umfpack = um.umfpack._um is not None
-    
+
 # Allow disabling of nose tests if umfpack not present
 # See end of file for application
 _umfpack_skip = dec.skipif(not _have_umfpack,

Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -9,26 +9,26 @@
 #
 # ARPACK Entry Points
 # -------------------
-# The entry points to ARPACK are 
+# The entry points to ARPACK are
 # - (s,d)seupd : single and double precision symmetric matrix
 # - (s,d,c,z)neupd: single,double,complex,double complex general matrix
-# This wrapper puts the *neupd (general matrix) interfaces in eigen() 
-# and the *seupd (symmetric matrix) in eigen_symmetric().  
+# This wrapper puts the *neupd (general matrix) interfaces in eigen()
+# and the *seupd (symmetric matrix) in eigen_symmetric().
 # There is no Hermetian complex/double complex interface.
 # To find eigenvalues of a Hermetian matrix you
 # must use eigen() and not eigen_symmetric()
 # It might be desirable to handle the Hermetian case differently
-# and, for example, return real eigenvalues. 
+# and, for example, return real eigenvalues.
 
 # Number of eigenvalues returned and complex eigenvalues
 # ------------------------------------------------------
 # The ARPACK nonsymmetric real and double interface (s,d)naupd return
-# eigenvalues and eigenvectors in real (float,double) arrays.  
+# eigenvalues and eigenvectors in real (float,double) arrays.
 # Since the eigenvalues and eigenvectors are, in general, complex
 # ARPACK puts the real and imaginary parts in consecutive entries
 # in real-valued arrays.   This wrapper puts the real entries
 # into complex data types and attempts to return the requested eigenvalues
-# and eigenvectors.  
+# and eigenvectors.
 
 
 # Solver modes
@@ -52,7 +52,7 @@
 
 
 def eigen(A, k=6, M=None, sigma=None, which='LM', v0=None,
-          ncv=None, maxiter=None, tol=0, 
+          ncv=None, maxiter=None, tol=0,
           return_eigenvectors=True):
     """Find k eigenvalues and eigenvectors of the square matrix A.
 
@@ -76,7 +76,7 @@
         Array of k eigenvalues
 
     v : array
-       An array of k eigenvectors 
+       An array of k eigenvectors
        The v[i] is the eigenvector corresponding to the eigenvector w[i]
 
     Other Parameters
@@ -92,10 +92,10 @@
         Find eigenvalues near sigma.  Shift spectrum by sigma.
 
     v0 : array
-        Starting vector for iteration.  
+        Starting vector for iteration.
 
     ncv : integer
-        The number of Lanczos vectors generated 
+        The number of Lanczos vectors generated
         ncv must be greater than k; it is recommended that ncv > 2*k
 
     which : string
@@ -107,7 +107,7 @@
          - 'LI' : largest imaginary part
          - 'SI' : smallest imaginary part
 
-    maxiter : integer 
+    maxiter : integer
         Maximum number of Arnoldi update iterations allowed
 
     tol : float
@@ -117,7 +117,7 @@
         Return eigenvectors (True) in addition to eigenvalues
 
     See Also
-    --------    
+    --------
     eigen_symmetric : eigenvalues and eigenvectors for symmetric matrix A
 
     Notes
@@ -149,7 +149,7 @@
     ncv=min(ncv,n)
     if maxiter==None:
         maxiter=n*10
-    # assign starting vector        
+    # assign starting vector
     if v0 is not None:
         resid=v0
         info=1
@@ -244,12 +244,12 @@
                    workd,workl,info)
 
         # The ARPACK nonsymmetric real and double interface (s,d)naupd return
-        # eigenvalues and eigenvectors in real (float,double) arrays.  
+        # eigenvalues and eigenvectors in real (float,double) arrays.
 
         # Build complex eigenvalues from real and imaginary parts
         d=dr+1.0j*di
 
-        # Arrange the eigenvectors: complex eigenvectors are stored as 
+        # Arrange the eigenvectors: complex eigenvectors are stored as
         # real,imaginary in consecutive columns
         z=zr.astype(typ.upper())
         eps=np.finfo(typ).eps
@@ -263,7 +263,7 @@
                 z[:,i+1]=z[:,i].conjugate()
                 i+=1
             i+=1
-                      
+
         # Now we have k+1 possible eigenvalues and eigenvectors
         # Return the ones specified by the keyword "which"
         nreturned=iparam[4] # number of good eigenvalues returned
@@ -274,10 +274,10 @@
             # cut at approx precision for sorting
             rd=np.round(d,decimals=_ndigits[typ])
             if which in ['LR','SR']:
-                ind=np.argsort(rd.real) 
+                ind=np.argsort(rd.real)
             elif which in ['LI','SI']:
                 # for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
-                ind=np.argsort(abs(rd.imag)) 
+                ind=np.argsort(abs(rd.imag))
             else:
                 ind=np.argsort(abs(rd))
             if which in ['LR','LM','LI']:
@@ -306,9 +306,9 @@
 
 
 def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', v0=None,
-                    ncv=None, maxiter=None, tol=0, 
+                    ncv=None, maxiter=None, tol=0,
                     return_eigenvectors=True):
-    """Find k eigenvalues and eigenvectors of the real symmetric 
+    """Find k eigenvalues and eigenvectors of the real symmetric
     square matrix A.
 
     Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for
@@ -331,7 +331,7 @@
         Array of k eigenvalues
 
     v : array
-       An array of k eigenvectors 
+       An array of k eigenvectors
        The v[i] is the eigenvector corresponding to the eigenvector w[i]
 
     Other Parameters
@@ -345,12 +345,12 @@
     sigma : real
         (Not implemented)
         Find eigenvalues near sigma.  Shift spectrum by sigma.
-    
+
     v0 : array
-        Starting vector for iteration.  
+        Starting vector for iteration.
 
     ncv : integer
-        The number of Lanczos vectors generated 
+        The number of Lanczos vectors generated
         ncv must be greater than k; it is recommended that ncv > 2*k
 
     which : string
@@ -359,10 +359,10 @@
          - 'SA' : Smallest (algebraic) eigenvalues
          - 'LM' : Largest (in magnitude) eigenvalues
          - 'SM' : Smallest (in magnitude) eigenvalues
-         - 'BE' : Half (k/2) from each end of the spectrum  
+         - 'BE' : Half (k/2) from each end of the spectrum
                   When k is odd, return one more (k/2+1) from the high end
 
-    maxiter : integer 
+    maxiter : integer
         Maximum number of Arnoldi update iterations allowed
 
     tol : float
@@ -372,7 +372,7 @@
         Return eigenvectors (True) in addition to eigenvalues
 
     See Also
-    --------    
+    --------
     eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
 
     Notes
@@ -401,7 +401,7 @@
     ncv=min(ncv,n)
     if maxiter==None:
         maxiter=n*10
-    # assign starting vector        
+    # assign starting vector
     if v0 is not None:
         resid=v0
         info=1

Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -30,7 +30,7 @@
 
 
 
-# precision for tests 
+# precision for tests
 _ndigits = {'f':4, 'd':12, 'F':4, 'D':12}
 
 class TestArpack(TestCase):
@@ -70,7 +70,7 @@
 
         self.nonsymmetric.append(N1)
 
-    
+
 class TestEigenSymmetric(TestArpack):
 
     def get_exact_eval(self,d,typ,k,which):
@@ -82,7 +82,7 @@
         if which=='SM':
             return eval[:k]
         if which=='BE':
-            # one ev from each end - if k is odd, extra ev on high end 
+            # one ev from each end - if k is odd, extra ev on high end
             l=k/2
             h=k/2+k%2
             low=range(len(eval))[:l]
@@ -114,8 +114,8 @@
             n=A.shape[0]
             v0 = random.rand(n).astype(typ)
             self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0)
-            
-    
+
+
 class TestEigenComplexSymmetric(TestArpack):
 
     def sort_choose(self,eval,typ,k,which):
@@ -156,17 +156,17 @@
 #                 self.eval_evec(self.symmetric[0],typ,k,which)
 
 
-    
+
 class TestEigenNonSymmetric(TestArpack):
 
 
     def sort_choose(self,eval,typ,k,which):
         reval=round(eval,decimals=_ndigits[typ])
         if which in ['LR','SR']:
-            ind=argsort(reval.real) 
+            ind=argsort(reval.real)
         elif which in ['LI','SI']:
             # for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
-            ind=argsort(abs(reval.imag)) 
+            ind=argsort(abs(reval.imag))
         else:
             ind=argsort(abs(reval))
 
@@ -222,7 +222,7 @@
         eps=finfo(typ).eps
         reval=round(eval,decimals=_ndigits[typ])
         if which in ['LR','SR']:
-            ind=argsort(reval) 
+            ind=argsort(reval)
         elif which in ['LI','SI']:
             ind=argsort(reval.imag)
         else:

Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/info.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/lobpcg/info.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/lobpcg/info.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -36,14 +36,14 @@
         y = invA  * x
         if sp.issparse( y ):
             y = y.toarray()
-    
+
         return as2d( y )
 
 
 >>>
 >>> # Alternative way of providing the same preconditioner.
 >>> #precond = spdiags( ivals, 0, n, n )
->>> 
+>>>
 >>> tt = time.clock()
 >>> eigs, vecs = lobpcg( X, operatorA, operatorB, blockVectorY = Y,
 >>>                      operatorT = precond,
@@ -57,20 +57,20 @@
 Notes
 -----
 
-In the following ``n`` denotes the matrix size and ``m`` the number 
+In the following ``n`` denotes the matrix size and ``m`` the number
 of required eigenvalues (smallest or largest).
 
 The LOBPCG code internally solves eigenproblems of the size 3``m`` on every
-iteration by calling the "standard" dense eigensolver, so if ``m`` is not 
-small enough compared to ``n``, it does not make sense to call the LOBPCG 
-code, but rather one should use the "standard" eigensolver, e.g. scipy or symeig 
-function in this case. If one calls the LOBPCG algorithm for 5``m``>``n``, 
+iteration by calling the "standard" dense eigensolver, so if ``m`` is not
+small enough compared to ``n``, it does not make sense to call the LOBPCG
+code, but rather one should use the "standard" eigensolver, e.g. scipy or symeig
+function in this case. If one calls the LOBPCG algorithm for 5``m``>``n``,
 it will most likely break internally, so the code tries to call the standard
 function instead.
 
 It is not that n should be large for the LOBPCG to work, but rather the
 ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1
-and ``n``=10, it should work, though ``n`` is small. The method is intended 
+and ``n``=10, it should work, though ``n`` is small. The method is intended
 for extremely large ``n``/``m``, see e.g., reference [28] in
 http://arxiv.org/abs/0705.2626
 
@@ -81,9 +81,9 @@
 
 2.  How well conditioned the problem is. This can be changed by using proper
     preconditioning. For example, a rod vibration test problem (under tests
-    directory) is ill-conditioned for large ``n``, so convergence will be 
+    directory) is ill-conditioned for large ``n``, so convergence will be
     slow, unless efficient preconditioning is used. For this specific problem,
-    a good simple preconditioner function would be a linear solve for A, which 
+    a good simple preconditioner function would be a linear solve for A, which
     is easy to code since A is tridiagonal.
 
 

Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -83,8 +83,8 @@
         return aux
 
 def makeOperator( operatorInput, expectedShape ):
-    """Internal. Takes a dense numpy array or a sparse matrix or 
-    a function and makes an operator performing matrix * blockvector 
+    """Internal. Takes a dense numpy array or a sparse matrix or
+    a function and makes an operator performing matrix * blockvector
     products.
 
     Example
@@ -102,7 +102,7 @@
 
     if operator.shape != expectedShape:
         raise ValueError('operator has invalid shape')
-    
+
     operator.__call__ = operator.matmat
 
     return operator
@@ -146,14 +146,14 @@
             retLambdaHistory = False, retResidualNormsHistory = False ):
     """Solve symmetric partial eigenproblems with optional preconditioning
 
-    This function implements the Locally Optimal Block Preconditioned 
+    This function implements the Locally Optimal Block Preconditioned
     Conjugate Gradient Method (LOBPCG).
 
     TODO write in terms of Ax=lambda B x
 
     Parameters
     ----------
-    blockVectorX : array_like 
+    blockVectorX : array_like
         initial approximation to eigenvectors shape=(n,blockSize)
     A : {dense matrix, sparse matrix, LinearOperator}
         the linear operator of the problem, usually a sparse matrix
@@ -163,7 +163,7 @@
     -------
     (lambda,blockVectorV) : tuple of arrays
         blockVectorX and lambda are computed blockSize eigenpairs, where
-        blockSize=size(blockVectorX,2) for the initial guess blockVectorX 
+        blockSize=size(blockVectorX,2) for the initial guess blockVectorX
         if it is full rank.
 
     Optional Parameters
@@ -177,7 +177,7 @@
         M should approximate the inverse of A
     blockVectorY : array_like
         n-by-sizeY matrix of constraints, sizeY < n
-        The iterations will be performed in the B-orthogonal complement 
+        The iterations will be performed in the B-orthogonal complement
         of the column-space of blockVectorY. blockVectorY must be full rank.
 
     Other Parameters

Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -7,7 +7,7 @@
 
 from scipy import array, arange, ones, sort, cos, pi, rand, \
      set_printoptions, r_, diag, linalg
-from scipy.linalg import eig     
+from scipy.linalg import eig
 from scipy.sparse.linalg.eigen import lobpcg
 
 
@@ -29,7 +29,7 @@
     return A,B
 
 def MikotaPair(n):
-    # Mikota pair acts as a nice test since the eigenvalues 
+    # Mikota pair acts as a nice test since the eigenvalues
     # are the squares of the integers n, n=1,2,...
     x = arange(1,n+1)
     B = diag(1./x)
@@ -46,10 +46,10 @@
 
     V = rand(n,m)
     X = linalg.orth(V)
-    
+
     eigs,vecs = lobpcg.lobpcg(X,A,B,residualTolerance=1e-5, maxIterations=30)
     eigs.sort()
-    
+
     #w,v = symeig(A,B)
     w,v = eig(A,b=B)
     w.sort()
@@ -65,19 +65,19 @@
     #show()
 
 def test_Small():
-    A,B = ElasticRod(10) 
+    A,B = ElasticRod(10)
     compare_solutions(A,B,10)
-    A,B = MikotaPair(10) 
+    A,B = MikotaPair(10)
     compare_solutions(A,B,10)
 
 def test_ElasticRod():
-    A,B = ElasticRod(100) 
+    A,B = ElasticRod(100)
     compare_solutions(A,B,20)
 
 def test_MikotaPair():
-    A,B = MikotaPair(100) 
+    A,B = MikotaPair(100)
     compare_solutions(A,B,20)
-        
 
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])

Modified: trunk/scipy/sparse/linalg/eigen/setup.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/setup.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/setup.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -2,12 +2,12 @@
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    
+
     config = Configuration('eigen',parent_package,top_path)
-    
+
     config.add_subpackage(('arpack'))
     config.add_subpackage(('lobpcg'))
-    
+
     return config
 
 if __name__ == '__main__':

Modified: trunk/scipy/sparse/linalg/eigen/setupscons.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/eigen/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -2,12 +2,12 @@
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    
+
     config = Configuration('eigen',parent_package,top_path, setup_name = 'setupscons.py')
-    
+
     config.add_subpackage(('arpack'))
     config.add_subpackage(('lobpcg'))
-    
+
     return config
 
 if __name__ == '__main__':

Modified: trunk/scipy/sparse/linalg/isolve/__init__.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/__init__.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/isolve/__init__.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -8,4 +8,3 @@
 from scipy.testing.pkgtester import Tester
 test = Tester().test
 bench = Tester().bench
-

Modified: trunk/scipy/sparse/linalg/isolve/iterative.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/iterative.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/isolve/iterative.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -322,7 +322,7 @@
     n = len(b)
     if maxiter is None:
         maxiter = n*10
-    
+
     matvec = A.matvec
     psolve = M.matvec
     ltr = _type_conv[x.dtype.char]

Modified: trunk/scipy/sparse/linalg/isolve/minres.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/minres.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/isolve/minres.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -7,8 +7,8 @@
 
 def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None,
            M=None, callback=None, show=False, check=False):
-    """Use the Minimum Residual Method (MINRES) to solve Ax=b 
-    
+    """Use the Minimum Residual Method (MINRES) to solve Ax=b
+
     MINRES minimizes norm(A*x - b) for the symmetric matrix A.  Unlike
     the Conjugate Gradient method, A can be indefinite or singular.
 
@@ -21,7 +21,7 @@
 
     References
     ==========
-        
+
         Solution of sparse indefinite systems of linear equations,
             C. C. Paige and M. A. Saunders (1975),
             SIAM J. Numer. Anal. 12(4), pp. 617-629.
@@ -30,7 +30,7 @@
         This file is a translation of the following MATLAB implementation:
             http://www.stanford.edu/group/SOL/software/minres/matlab/
 
-    """ 
+    """
     A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
 
     matvec = A.matvec
@@ -57,38 +57,38 @@
             ' M  does not define a symmetric matrix             ',   #  8
             ' M  does not define a pos-def preconditioner       ']   #  9
 
-     
+
     if show:
         print first + 'Solution of symmetric Ax = b'
         print first + 'n      =  %3g     shift  =  %23.14e'  % (n,shift)
         print first + 'itnlim =  %3g     rtol   =  %11.2e'   % (maxiter,tol)
         print
-        
+
     istop = 0;   itn   = 0;   Anorm = 0;    Acond = 0;
-    rnorm = 0;   ynorm = 0; 
+    rnorm = 0;   ynorm = 0;
 
     xtype = A.dtype #TODO update
 
-    eps = finfo(xtype).eps 
+    eps = finfo(xtype).eps
 
     x = zeros( n, dtype=xtype )
 
     # Set up y and v for the first Lanczos vector v1.
     # y  =  beta1 P' v1,  where  P = C**(-1).
     # v is really P' v1.
-    
+
     y  = b
     r1 = b
 
     y = psolve(b)
-    
+
     beta1 = inner(b,y)
 
     if beta1 < 0:
         raise ValueError('indefinite preconditioner')
     elif beta1 == 0:
         return (postprocess(x), 0)
-    
+
     beta1 = sqrt( beta1 )
 
     if check:
@@ -112,8 +112,8 @@
         epsa = (s + eps) * eps**(1.0/3.0)
         if z > epsa:
             raise ValueError('non-symmetric preconditioner')
-        
 
+
     # Initialize other quantities
     oldb   = 0;          beta   = beta1;   dbar   = 0;       epsln  = 0;
     qrnorm = beta1;      phibar = beta1;   rhs1   = beta1;
@@ -124,22 +124,22 @@
     r2     = r1
 
     if show:
-      print
-      print
-      print '   Itn     x(1)     Compatible    LS       norm(A)  cond(A) gbar/|A|'
+        print
+        print
+        print '   Itn     x(1)     Compatible    LS       norm(A)  cond(A) gbar/|A|'
 
     while itn < maxiter:
         itn += 1
 
-        s = 1.0/beta 
+        s = 1.0/beta
         v = s*y
-        
+
         y  = matvec(v)
         y  = y - shift * v
 
         if itn >= 2:
             y  = y - (beta/oldb)*r1
-        
+
         alfa   = inner(v,y)
         y      = y - (alfa/beta)*r2
         r1     = r2
@@ -149,11 +149,11 @@
         beta   = inner(r2,y)
         if beta < 0:
             raise ValueError('non-symmetric matrix')
-        beta    = sqrt(beta) 
+        beta    = sqrt(beta)
         tnorm2 += alfa**2 + oldb**2 + beta**2
 
         if itn == 1:
-            if beta/beta1 <= 10*eps: 
+            if beta/beta1 <= 10*eps:
                 istop = -1  # Terminate later
             #tnorm2 = alfa**2 ??
             gmax = abs(alfa)
@@ -172,33 +172,33 @@
         Arnorm = phibar * root
 
         # Compute the next plane rotation Qk
-        
+
         gamma  = norm([gbar, beta])       # gammak
-        gamma  = max(gamma, eps) 
+        gamma  = max(gamma, eps)
         cs     = gbar / gamma             # ck
         sn     = beta / gamma             # sk
         phi    = cs * phibar              # phik
         phibar = sn * phibar              # phibark+1
-        
+
         # Update  x.
-        
+
         denom = 1.0/gamma
         w1    = w2
         w2    = w
         w     = (v - oldeps*w1 - delta*w2) * denom
         x     = x + phi*w
-        
+
         # Go round again.
-        
+
         gmax   = max(gmax, gamma)
         gmin   = min(gmin, gamma)
         z      = rhs1 / gamma
         ynorm2 = z**2  +  ynorm2
         rhs1   = rhs2 -  delta*z
         rhs2   =      -  epsln*z
-        
+
         # Estimate various norms and test for convergence.
-        
+
         Anorm  = sqrt( tnorm2 )
         ynorm  = sqrt( ynorm2 )
         epsa   = Anorm * eps
@@ -207,7 +207,7 @@
         diag   = gbar
 
         if diag == 0: diag = epsa
-        
+
         qrnorm = phibar
         rnorm  = qrnorm
         test1  = rnorm / (Anorm*ynorm)    #  ||r|| / (||A|| ||x||)
@@ -218,18 +218,18 @@
         # factorization of the lower Hessenberg matrix,  Q * H = R,
         # where H is the tridiagonal matrix from Lanczos with one
         # extra row, beta(k+1) e_k^T.
-        
+
         Acond  = gmax/gmin
-        
+
         # See if any of the stopping criteria are satisfied.
         # In rare cases, istop is already -1 from above (Abar = const*I).
-        
+
         if istop == 0:
             t1 = 1 + test1      # These tests work if tol < eps
             t2 = 1 + test2
             if t2    <= 1       : istop = 2
             if t1    <= 1       : istop = 1
-            
+
             if itn   >= maxiter : istop = 6
             if Acond >= 0.1/eps : istop = 4
             if epsx  >= beta1   : istop = 3
@@ -237,9 +237,9 @@
             #if rnorm <= epsr   : istop = 1
             if test2 <= tol     : istop = 2
             if test1 <= tol     : istop = 1
-    
+
         # See if it is time to print something.
-        
+
         prnt = False
         if n        <= 40         : prnt = True
         if itn      <= 10         : prnt = True
@@ -249,25 +249,25 @@
         if qrnorm   <= 10*epsr    : prnt = True
         if Acond    <= 1e-2/eps   : prnt = True
         if istop  !=  0           : prnt = True
-        
+
         if show and prnt:
             str1 = '%6g %12.5e %10.3e'  % (itn, x[0], test1)
             str2 = ' %10.3e'            % (test2,)
             str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
 
             print str1 + str2 + str3
-        
+
             if itn % 10 == 0: print
 
         if callback is not None:
             callback(x)
 
         if istop != 0: break #TODO check this
-        
 
+
     if show:
         print
-        print last + ' istop   =  %3g               itn   =%5g' % (istop,itn) 
+        print last + ' istop   =  %3g               itn   =%5g' % (istop,itn)
         print last + ' Anorm   =  %12.4e      Acond =  %12.4e'  % (Anorm,Acond)
         print last + ' rnorm   =  %12.4e      ynorm =  %12.4e'  % (rnorm,ynorm)
         print last + ' Arnorm  =  %12.4e'                       %  (Arnorm,)
@@ -297,4 +297,3 @@
     b = 0*ones( A.shape[0] )
     x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
     #x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
-

Modified: trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -41,7 +41,7 @@
         self.solvers.append( (gmres,    False, False) )
         self.solvers.append( (qmr,      False, False) )
         self.solvers.append( (minres,   True,  False) )
-        
+
         # list of tuples (A, symmetric, positive_definite )
         self.cases = []
 
@@ -51,7 +51,7 @@
         # Symmetric and Negative Definite
         self.cases.append( (-Poisson1D,True,False) )
 
-        # Symmetric and Indefinite 
+        # Symmetric and Indefinite
         self.cases.append( (RandDiag,True,False) )
 
         # Non-symmetric and Positive Definite
@@ -78,12 +78,12 @@
                 x0 = 0*b
 
                 x, info = solver(A, b, x0=x0, tol=tol)
-                
+
                 assert_array_equal(x0, 0*b) #ensure that x0 is not overwritten
                 assert_equal(info,0)
 
                 assert( norm(b - A*x) < tol*norm(b) )
-    
+
     def test_precond(self):
         """test whether all methods accept a preconditioner"""
 
@@ -107,7 +107,7 @@
                 x0 = 0*b
 
                 x, info = solver(A, b, x0=x0, tol=tol)
-                
+
                 assert_equal(info,0)
                 assert( norm(b - A*x) < tol*norm(b) )
 
@@ -127,7 +127,7 @@
 
         L = spdiags([-dat/2, dat], [-1,0], n, n)
         U = spdiags([4*dat, -dat], [ 0,1], n, n)
-        
+
         L_solver = splu(L)
         U_solver = splu(U)
 
@@ -144,7 +144,7 @@
         M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve )
 
         x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
-       
+
         assert_equal(info,0)
         assert( norm(b - A*x) < 1e-8*norm(b) )
 

Modified: trunk/scipy/sparse/linalg/isolve/utils.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/utils.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/isolve/utils.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -23,7 +23,7 @@
 
 def make_system(A, M, x0, b, xtype=None):
     """Make a linear system Ax=b
-    
+
     Parameters:
         A - LinearOperator
             - sparse or dense matrix (or any valid input to aslinearoperator)
@@ -39,7 +39,7 @@
 
     Returns:
         (A, M, x, b, postprocess) where:
-            - A is a LinearOperator 
+            - A is a LinearOperator
             - M is a LinearOperator
             - x is the initial guess (rank 1 array)
             - b is the rhs (rank 1 array)
@@ -54,7 +54,7 @@
         raise ValueError('expected square matrix (shape=%s)' % shape)
 
     N = A.shape[0]
-    
+
     b = asanyarray(b)
 
     if not (b.shape == (N,1) or b.shape == (N,)):
@@ -111,5 +111,3 @@
             raise ValueError('matrix and preconditioner have different shapes')
 
     return A, M, x, b, postprocess
-
-

Modified: trunk/scipy/sparse/linalg/setup.py
===================================================================
--- trunk/scipy/sparse/linalg/setup.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/setup.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -2,15 +2,15 @@
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    
+
     config = Configuration('linalg',parent_package,top_path)
-    
+
     config.add_subpackage(('isolve'))
     config.add_subpackage(('dsolve'))
     config.add_subpackage(('eigen'))
-    
+
     config.add_data_dir('tests')
-    
+
     return config
 
 if __name__ == '__main__':

Modified: trunk/scipy/sparse/linalg/setupscons.py
===================================================================
--- trunk/scipy/sparse/linalg/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -2,16 +2,16 @@
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    
-    config = Configuration('linalg',parent_package,top_path, 
+
+    config = Configuration('linalg',parent_package,top_path,
                            setup_name = 'setupscons.py')
-    
+
     config.add_subpackage(('isolve'))
     config.add_subpackage(('dsolve'))
     config.add_subpackage(('eigen'))
-    
+
     config.add_data_dir('tests')
-    
+
     return config
 
 if __name__ == '__main__':

Modified: trunk/scipy/sparse/linalg/tests/test_interface.py
===================================================================
--- trunk/scipy/sparse/linalg/tests/test_interface.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/linalg/tests/test_interface.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -34,7 +34,7 @@
                 return array([ 1*x[0] + 4*x[1],
                                2*x[0] + 5*x[1],
                                3*x[0] + 6*x[1]])
-               
+
         cases.append( matlike() )
 
 

Modified: trunk/scipy/sparse/setupscons.py
===================================================================
--- trunk/scipy/sparse/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -7,7 +7,7 @@
     import numpy
     from numpy.distutils.misc_util import Configuration
 
-    config = Configuration('sparse',parent_package,top_path, 
+    config = Configuration('sparse',parent_package,top_path,
                            setup_name = 'setupscons.py')
 
     config.add_data_dir('tests')

Modified: trunk/scipy/sparse/sparsetools/__init__.py
===================================================================
--- trunk/scipy/sparse/sparsetools/__init__.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/__init__.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -6,4 +6,3 @@
 from coo import *
 from dia import *
 from bsr import *
-

Modified: trunk/scipy/sparse/sparsetools/bsr.py
===================================================================
--- trunk/scipy/sparse/sparsetools/bsr.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/bsr.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -51,442 +51,441 @@
 
 
 def bsr_diagonal(*args):
-  """
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax, signed char Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, unsigned char Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax, short Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, unsigned short Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax, int Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, unsigned int Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax, long long Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, unsigned long long Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax, float Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax, double Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax, long double Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, npy_cfloat_wrapper Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, npy_cdouble_wrapper Yx)
-    bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Yx)
     """
-  return _bsr.bsr_diagonal(*args)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax, signed char Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax, unsigned char Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax, short Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax, unsigned short Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax, int Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax, unsigned int Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax, long long Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, unsigned long long Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax, float Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax, double Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax, long double Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, npy_cfloat_wrapper Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, npy_cdouble_wrapper Yx)
+      bsr_diagonal(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Yx)
+      """
+    return _bsr.bsr_diagonal(*args)
 
 def bsr_scale_rows(*args):
-  """
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax, signed char Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, unsigned char Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax, short Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, unsigned short Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax, int Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, unsigned int Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax, long long Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, unsigned long long Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax, float Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax, double Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax, long double Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx)
-    bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx)
     """
-  return _bsr.bsr_scale_rows(*args)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax, signed char Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax, unsigned char Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax, short Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax, unsigned short Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax, int Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax, unsigned int Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax, long long Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, unsigned long long Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax, float Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax, double Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax, long double Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx)
+      bsr_scale_rows(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx)
+      """
+    return _bsr.bsr_scale_rows(*args)
 
 def bsr_scale_columns(*args):
-  """
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax, signed char Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, unsigned char Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax, short Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, unsigned short Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax, int Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, unsigned int Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax, long long Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, unsigned long long Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax, float Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax, double Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax, long double Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx)
-    bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx)
     """
-  return _bsr.bsr_scale_columns(*args)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax, signed char Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax, unsigned char Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax, short Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax, unsigned short Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax, int Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax, unsigned int Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax, long long Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, unsigned long long Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax, float Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax, double Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax, long double Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx)
+      bsr_scale_columns(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx)
+      """
+    return _bsr.bsr_scale_columns(*args)
 
 def bsr_transpose(*args):
-  """
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx)
-    bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx)
     """
-  return _bsr.bsr_transpose(*args)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx)
+      bsr_transpose(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx)
+      """
+    return _bsr.bsr_transpose(*args)
 
 def bsr_matmat_pass2(*args):
-  """
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, signed char Ax, int Bp, int Bj, signed char Bx, 
-        int Cp, int Cj, signed char Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, unsigned char Ax, int Bp, int Bj, unsigned char Bx, 
-        int Cp, int Cj, unsigned char Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, short Ax, int Bp, int Bj, short Bx, 
-        int Cp, int Cj, short Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, unsigned short Ax, int Bp, int Bj, 
-        unsigned short Bx, int Cp, int Cj, unsigned short Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, int Ax, int Bp, int Bj, int Bx, int Cp, 
-        int Cj, int Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, unsigned int Ax, int Bp, int Bj, unsigned int Bx, 
-        int Cp, int Cj, unsigned int Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, long long Ax, int Bp, int Bj, long long Bx, 
-        int Cp, int Cj, long long Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, unsigned long long Ax, int Bp, int Bj, 
-        unsigned long long Bx, int Cp, int Cj, unsigned long long Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, float Ax, int Bp, int Bj, float Bx, 
-        int Cp, int Cj, float Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, double Ax, int Bp, int Bj, double Bx, 
-        int Cp, int Cj, double Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, long double Ax, int Bp, int Bj, long double Bx, 
-        int Cp, int Cj, long double Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, npy_cfloat_wrapper Ax, int Bp, int Bj, 
-        npy_cfloat_wrapper Bx, int Cp, int Cj, npy_cfloat_wrapper Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, npy_cdouble_wrapper Ax, int Bp, int Bj, 
-        npy_cdouble_wrapper Bx, int Cp, int Cj, 
-        npy_cdouble_wrapper Cx)
-    bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap, 
-        int Aj, npy_clongdouble_wrapper Ax, int Bp, 
-        int Bj, npy_clongdouble_wrapper Bx, int Cp, 
-        int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _bsr.bsr_matmat_pass2(*args)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, signed char Ax, int Bp, int Bj, signed char Bx,
+          int Cp, int Cj, signed char Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, unsigned char Ax, int Bp, int Bj, unsigned char Bx,
+          int Cp, int Cj, unsigned char Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, short Ax, int Bp, int Bj, short Bx,
+          int Cp, int Cj, short Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, unsigned short Ax, int Bp, int Bj,
+          unsigned short Bx, int Cp, int Cj, unsigned short Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, int Ax, int Bp, int Bj, int Bx, int Cp,
+          int Cj, int Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, unsigned int Ax, int Bp, int Bj, unsigned int Bx,
+          int Cp, int Cj, unsigned int Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, long long Ax, int Bp, int Bj, long long Bx,
+          int Cp, int Cj, long long Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, unsigned long long Ax, int Bp, int Bj,
+          unsigned long long Bx, int Cp, int Cj, unsigned long long Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, float Ax, int Bp, int Bj, float Bx,
+          int Cp, int Cj, float Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, double Ax, int Bp, int Bj, double Bx,
+          int Cp, int Cj, double Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, long double Ax, int Bp, int Bj, long double Bx,
+          int Cp, int Cj, long double Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, npy_cfloat_wrapper Ax, int Bp, int Bj,
+          npy_cfloat_wrapper Bx, int Cp, int Cj, npy_cfloat_wrapper Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, npy_cdouble_wrapper Ax, int Bp, int Bj,
+          npy_cdouble_wrapper Bx, int Cp, int Cj,
+          npy_cdouble_wrapper Cx)
+      bsr_matmat_pass2(int n_brow, int n_bcol, int R, int C, int N, int Ap,
+          int Aj, npy_clongdouble_wrapper Ax, int Bp,
+          int Bj, npy_clongdouble_wrapper Bx, int Cp,
+          int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _bsr.bsr_matmat_pass2(*args)
 
 def bsr_matvec(*args):
-  """
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax, signed char Xx, signed char Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, unsigned char Xx, unsigned char Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax, short Xx, short Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, unsigned short Xx, unsigned short Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax, int Xx, int Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, unsigned int Xx, unsigned int Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax, long long Xx, long long Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, unsigned long long Xx, 
-        unsigned long long Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax, float Xx, float Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax, double Xx, double Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax, long double Xx, long double Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx, 
-        npy_cfloat_wrapper Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx, 
-        npy_cdouble_wrapper Yx)
-    bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx, 
-        npy_clongdouble_wrapper Yx)
     """
-  return _bsr.bsr_matvec(*args)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax, signed char Xx, signed char Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax, unsigned char Xx, unsigned char Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax, short Xx, short Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax, unsigned short Xx, unsigned short Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax, int Xx, int Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax, unsigned int Xx, unsigned int Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax, long long Xx, long long Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, unsigned long long Xx,
+          unsigned long long Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax, float Xx, float Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax, double Xx, double Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax, long double Xx, long double Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx,
+          npy_cfloat_wrapper Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx,
+          npy_cdouble_wrapper Yx)
+      bsr_matvec(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, npy_clongdouble_wrapper Xx,
+          npy_clongdouble_wrapper Yx)
+      """
+    return _bsr.bsr_matvec(*args)
 
 def bsr_elmul_bsr(*args):
-  """
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx, 
-        int Cp, int Cj, signed char Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx, 
-        int Cp, int Cj, unsigned char Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx, int Cp, 
-        int Cj, short Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx, 
-        int Cp, int Cj, unsigned short Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, 
-        int Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx, 
-        int Cp, int Cj, unsigned int Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx, 
-        int Cp, int Cj, long long Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx, int Cp, 
-        int Cj, float Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx, int Cp, 
-        int Cj, double Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx, 
-        int Cp, int Cj, long double Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _bsr.bsr_elmul_bsr(*args)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx,
+          int Cp, int Cj, signed char Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx,
+          int Cp, int Cj, unsigned char Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx, int Cp,
+          int Cj, short Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx,
+          int Cp, int Cj, unsigned short Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx, int Cp, int Cj,
+          int Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx,
+          int Cp, int Cj, unsigned int Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx,
+          int Cp, int Cj, long long Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx, int Cp,
+          int Cj, float Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx, int Cp,
+          int Cj, double Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx,
+          int Cp, int Cj, long double Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      bsr_elmul_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _bsr.bsr_elmul_bsr(*args)
 
 def bsr_eldiv_bsr(*args):
-  """
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx, 
-        int Cp, int Cj, signed char Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx, 
-        int Cp, int Cj, unsigned char Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx, int Cp, 
-        int Cj, short Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx, 
-        int Cp, int Cj, unsigned short Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, 
-        int Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx, 
-        int Cp, int Cj, unsigned int Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx, 
-        int Cp, int Cj, long long Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx, int Cp, 
-        int Cj, float Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx, int Cp, 
-        int Cj, double Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx, 
-        int Cp, int Cj, long double Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _bsr.bsr_eldiv_bsr(*args)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx,
+          int Cp, int Cj, signed char Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx,
+          int Cp, int Cj, unsigned char Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx, int Cp,
+          int Cj, short Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx,
+          int Cp, int Cj, unsigned short Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx, int Cp, int Cj,
+          int Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx,
+          int Cp, int Cj, unsigned int Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx,
+          int Cp, int Cj, long long Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx, int Cp,
+          int Cj, float Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx, int Cp,
+          int Cj, double Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx,
+          int Cp, int Cj, long double Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      bsr_eldiv_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _bsr.bsr_eldiv_bsr(*args)
 
 def bsr_plus_bsr(*args):
-  """
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx, 
-        int Cp, int Cj, signed char Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx, 
-        int Cp, int Cj, unsigned char Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx, int Cp, 
-        int Cj, short Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx, 
-        int Cp, int Cj, unsigned short Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, 
-        int Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx, 
-        int Cp, int Cj, unsigned int Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx, 
-        int Cp, int Cj, long long Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx, int Cp, 
-        int Cj, float Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx, int Cp, 
-        int Cj, double Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx, 
-        int Cp, int Cj, long double Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _bsr.bsr_plus_bsr(*args)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx,
+          int Cp, int Cj, signed char Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx,
+          int Cp, int Cj, unsigned char Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx, int Cp,
+          int Cj, short Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx,
+          int Cp, int Cj, unsigned short Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx, int Cp, int Cj,
+          int Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx,
+          int Cp, int Cj, unsigned int Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx,
+          int Cp, int Cj, long long Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx, int Cp,
+          int Cj, float Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx, int Cp,
+          int Cj, double Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx,
+          int Cp, int Cj, long double Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      bsr_plus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _bsr.bsr_plus_bsr(*args)
 
 def bsr_minus_bsr(*args):
-  """
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx, 
-        int Cp, int Cj, signed char Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx, 
-        int Cp, int Cj, unsigned char Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx, int Cp, 
-        int Cj, short Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx, 
-        int Cp, int Cj, unsigned short Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx, int Cp, int Cj, 
-        int Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx, 
-        int Cp, int Cj, unsigned int Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx, 
-        int Cp, int Cj, long long Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx, int Cp, 
-        int Cj, float Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx, int Cp, 
-        int Cj, double Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx, 
-        int Cp, int Cj, long double Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _bsr.bsr_minus_bsr(*args)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx,
+          int Cp, int Cj, signed char Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx,
+          int Cp, int Cj, unsigned char Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx, int Cp,
+          int Cj, short Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx,
+          int Cp, int Cj, unsigned short Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx, int Cp, int Cj,
+          int Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx,
+          int Cp, int Cj, unsigned int Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx,
+          int Cp, int Cj, long long Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx, int Cp,
+          int Cj, float Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx, int Cp,
+          int Cj, double Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx,
+          int Cp, int Cj, long double Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      bsr_minus_bsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx, int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _bsr.bsr_minus_bsr(*args)
 
 def bsr_sort_indices(*args):
-  """
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        signed char Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned char Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        short Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned short Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        int Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned int Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long long Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        float Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        double Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        long double Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax)
-    bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax)
     """
-  return _bsr.bsr_sort_indices(*args)
-
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          signed char Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned char Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          short Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned short Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          int Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned int Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long long Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          unsigned long long Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          float Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          double Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          long double Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax)
+      bsr_sort_indices(int n_brow, int n_bcol, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax)
+      """
+    return _bsr.bsr_sort_indices(*args)

Modified: trunk/scipy/sparse/sparsetools/coo.py
===================================================================
--- trunk/scipy/sparse/sparsetools/coo.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/coo.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -51,101 +51,100 @@
 
 
 def coo_tocsr(*args):
-  """
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, 
-        int Bp, int Bj, short Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, 
-        int Bp, int Bj, int Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, 
-        int Bp, int Bj, float Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, 
-        int Bp, int Bj, double Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx)
-    coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx)
     """
-  return _coo.coo_tocsr(*args)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
+          int Bp, int Bj, short Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
+          int Bp, int Bj, int Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
+          int Bp, int Bj, float Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
+          int Bp, int Bj, double Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx)
+      coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx)
+      """
+    return _coo.coo_tocsr(*args)
 
 def coo_tocsc(*args):
-  """
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, 
-        int Bp, int Bi, signed char Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, 
-        int Bp, int Bi, short Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, 
-        int Bp, int Bi, int Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, 
-        int Bp, int Bi, long long Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, 
-        int Bp, int Bi, float Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, 
-        int Bp, int Bi, double Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, 
-        int Bp, int Bi, long double Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx)
-    coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx)
     """
-  return _coo.coo_tocsc(*args)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
+          int Bp, int Bi, signed char Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
+          int Bp, int Bi, short Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
+          int Bp, int Bi, int Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
+          int Bp, int Bi, long long Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
+          int Bp, int Bi, float Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
+          int Bp, int Bi, double Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
+          int Bp, int Bi, long double Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx)
+      coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx)
+      """
+    return _coo.coo_tocsc(*args)
 
 def coo_todense(*args):
-  """
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax, 
-        signed char Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax, 
-        unsigned char Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax, 
-        short Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax, 
-        unsigned short Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax, 
-        int Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax, 
-        unsigned int Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax, 
-        long long Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax, 
-        unsigned long long Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax, 
-        float Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax, 
-        double Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax, 
-        long double Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Bx)
-    coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Bx)
     """
-  return _coo.coo_todense(*args)
-
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
+          signed char Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
+          unsigned char Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
+          short Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
+          unsigned short Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
+          int Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
+          unsigned int Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
+          long long Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
+          unsigned long long Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
+          float Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
+          double Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
+          long double Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Bx)
+      coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Bx)
+      """
+    return _coo.coo_todense(*args)

Modified: trunk/scipy/sparse/sparsetools/csc.py
===================================================================
--- trunk/scipy/sparse/sparsetools/csc.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/csc.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -50,320 +50,319 @@
 
 
 def csc_matmat_pass1(*args):
-  """
-    csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi, 
-        int Cp)
     """
-  return _csc.csc_matmat_pass1(*args)
+      csc_matmat_pass1(int n_row, int n_col, int Ap, int Ai, int Bp, int Bi,
+          int Cp)
+      """
+    return _csc.csc_matmat_pass1(*args)
 
 
 def csc_diagonal(*args):
-  """
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        signed char Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        unsigned char Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        unsigned short Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        unsigned int Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        long long Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        unsigned long long Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        long double Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Yx)
-    csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Yx)
     """
-  return _csc.csc_diagonal(*args)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          signed char Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          unsigned char Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          unsigned short Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          unsigned int Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          long long Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          unsigned long long Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          long double Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Yx)
+      csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Yx)
+      """
+    return _csc.csc_diagonal(*args)
 
 def csc_tocsr(*args):
-  """
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bj, signed char Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bj, short Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bj, int Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bj, long long Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bj, float Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bj, double Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bj, long double Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx)
-    csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx)
     """
-  return _csc.csc_tocsr(*args)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bj, signed char Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bj, short Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bj, int Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bj, long long Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bj, float Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bj, double Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bj, long double Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx)
+      csc_tocsr(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx)
+      """
+    return _csc.csc_tocsr(*args)
 
 def csc_matmat_pass2(*args):
-  """
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bi, signed char Bx, int Cp, int Ci, 
-        signed char Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx, int Cp, 
-        int Ci, unsigned char Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bi, short Bx, int Cp, int Ci, short Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx, int Cp, 
-        int Ci, unsigned short Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bi, int Bx, int Cp, int Ci, int Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx, int Cp, 
-        int Ci, unsigned int Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bi, long long Bx, int Cp, int Ci, 
-        long long Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx, 
-        int Cp, int Ci, unsigned long long Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bi, float Bx, int Cp, int Ci, float Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bi, double Bx, int Cp, int Ci, double Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bi, long double Bx, int Cp, int Ci, 
-        long double Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx, 
-        int Cp, int Ci, npy_cfloat_wrapper Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx, 
-        int Cp, int Ci, npy_cdouble_wrapper Cx)
-    csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx, 
-        int Cp, int Ci, npy_clongdouble_wrapper Cx)
     """
-  return _csc.csc_matmat_pass2(*args)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bi, signed char Bx, int Cp, int Ci,
+          signed char Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx, int Cp,
+          int Ci, unsigned char Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bi, short Bx, int Cp, int Ci, short Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx, int Cp,
+          int Ci, unsigned short Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bi, int Bx, int Cp, int Ci, int Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx, int Cp,
+          int Ci, unsigned int Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bi, long long Bx, int Cp, int Ci,
+          long long Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx,
+          int Cp, int Ci, unsigned long long Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bi, float Bx, int Cp, int Ci, float Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bi, double Bx, int Cp, int Ci, double Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bi, long double Bx, int Cp, int Ci,
+          long double Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx,
+          int Cp, int Ci, npy_cfloat_wrapper Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx,
+          int Cp, int Ci, npy_cdouble_wrapper Cx)
+      csc_matmat_pass2(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx,
+          int Cp, int Ci, npy_clongdouble_wrapper Cx)
+      """
+    return _csc.csc_matmat_pass2(*args)
 
 def csc_matvec(*args):
-  """
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        signed char Xx, signed char Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        unsigned char Xx, unsigned char Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx, 
-        short Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        unsigned short Xx, unsigned short Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx, 
-        int Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        unsigned int Xx, unsigned int Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        long long Xx, long long Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        unsigned long long Xx, unsigned long long Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx, 
-        float Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx, 
-        double Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        long double Xx, long double Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
-    csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
     """
-  return _csc.csc_matvec(*args)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          signed char Xx, signed char Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          unsigned char Xx, unsigned char Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, short Ax, short Xx,
+          short Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          unsigned short Xx, unsigned short Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx,
+          int Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          unsigned int Xx, unsigned int Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          long long Xx, long long Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          unsigned long long Xx, unsigned long long Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx,
+          float Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx,
+          double Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          long double Xx, long double Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
+      csc_matvec(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
+      """
+    return _csc.csc_matvec(*args)
 
 def csc_elmul_csc(*args):
-  """
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bi, signed char Bx, int Cp, int Ci, 
-        signed char Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx, int Cp, 
-        int Ci, unsigned char Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bi, short Bx, int Cp, int Ci, short Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx, int Cp, 
-        int Ci, unsigned short Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bi, int Bx, int Cp, int Ci, int Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx, int Cp, 
-        int Ci, unsigned int Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bi, long long Bx, int Cp, int Ci, 
-        long long Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx, 
-        int Cp, int Ci, unsigned long long Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bi, float Bx, int Cp, int Ci, float Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bi, double Bx, int Cp, int Ci, double Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bi, long double Bx, int Cp, int Ci, 
-        long double Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx, 
-        int Cp, int Ci, npy_cfloat_wrapper Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx, 
-        int Cp, int Ci, npy_cdouble_wrapper Cx)
-    csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx, 
-        int Cp, int Ci, npy_clongdouble_wrapper Cx)
     """
-  return _csc.csc_elmul_csc(*args)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bi, signed char Bx, int Cp, int Ci,
+          signed char Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx, int Cp,
+          int Ci, unsigned char Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bi, short Bx, int Cp, int Ci, short Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx, int Cp,
+          int Ci, unsigned short Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bi, int Bx, int Cp, int Ci, int Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx, int Cp,
+          int Ci, unsigned int Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bi, long long Bx, int Cp, int Ci,
+          long long Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx,
+          int Cp, int Ci, unsigned long long Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bi, float Bx, int Cp, int Ci, float Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bi, double Bx, int Cp, int Ci, double Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bi, long double Bx, int Cp, int Ci,
+          long double Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx,
+          int Cp, int Ci, npy_cfloat_wrapper Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx,
+          int Cp, int Ci, npy_cdouble_wrapper Cx)
+      csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx,
+          int Cp, int Ci, npy_clongdouble_wrapper Cx)
+      """
+    return _csc.csc_elmul_csc(*args)
 
 def csc_eldiv_csc(*args):
-  """
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bi, signed char Bx, int Cp, int Ci, 
-        signed char Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx, int Cp, 
-        int Ci, unsigned char Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bi, short Bx, int Cp, int Ci, short Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx, int Cp, 
-        int Ci, unsigned short Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bi, int Bx, int Cp, int Ci, int Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx, int Cp, 
-        int Ci, unsigned int Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bi, long long Bx, int Cp, int Ci, 
-        long long Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx, 
-        int Cp, int Ci, unsigned long long Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bi, float Bx, int Cp, int Ci, float Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bi, double Bx, int Cp, int Ci, double Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bi, long double Bx, int Cp, int Ci, 
-        long double Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx, 
-        int Cp, int Ci, npy_cfloat_wrapper Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx, 
-        int Cp, int Ci, npy_cdouble_wrapper Cx)
-    csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx, 
-        int Cp, int Ci, npy_clongdouble_wrapper Cx)
     """
-  return _csc.csc_eldiv_csc(*args)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bi, signed char Bx, int Cp, int Ci,
+          signed char Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx, int Cp,
+          int Ci, unsigned char Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bi, short Bx, int Cp, int Ci, short Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx, int Cp,
+          int Ci, unsigned short Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bi, int Bx, int Cp, int Ci, int Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx, int Cp,
+          int Ci, unsigned int Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bi, long long Bx, int Cp, int Ci,
+          long long Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx,
+          int Cp, int Ci, unsigned long long Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bi, float Bx, int Cp, int Ci, float Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bi, double Bx, int Cp, int Ci, double Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bi, long double Bx, int Cp, int Ci,
+          long double Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx,
+          int Cp, int Ci, npy_cfloat_wrapper Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx,
+          int Cp, int Ci, npy_cdouble_wrapper Cx)
+      csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx,
+          int Cp, int Ci, npy_clongdouble_wrapper Cx)
+      """
+    return _csc.csc_eldiv_csc(*args)
 
 def csc_plus_csc(*args):
-  """
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bi, signed char Bx, int Cp, int Ci, 
-        signed char Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx, int Cp, 
-        int Ci, unsigned char Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bi, short Bx, int Cp, int Ci, short Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx, int Cp, 
-        int Ci, unsigned short Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bi, int Bx, int Cp, int Ci, int Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx, int Cp, 
-        int Ci, unsigned int Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bi, long long Bx, int Cp, int Ci, 
-        long long Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx, 
-        int Cp, int Ci, unsigned long long Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bi, float Bx, int Cp, int Ci, float Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bi, double Bx, int Cp, int Ci, double Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bi, long double Bx, int Cp, int Ci, 
-        long double Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx, 
-        int Cp, int Ci, npy_cfloat_wrapper Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx, 
-        int Cp, int Ci, npy_cdouble_wrapper Cx)
-    csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx, 
-        int Cp, int Ci, npy_clongdouble_wrapper Cx)
     """
-  return _csc.csc_plus_csc(*args)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bi, signed char Bx, int Cp, int Ci,
+          signed char Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx, int Cp,
+          int Ci, unsigned char Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bi, short Bx, int Cp, int Ci, short Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx, int Cp,
+          int Ci, unsigned short Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bi, int Bx, int Cp, int Ci, int Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx, int Cp,
+          int Ci, unsigned int Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bi, long long Bx, int Cp, int Ci,
+          long long Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx,
+          int Cp, int Ci, unsigned long long Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bi, float Bx, int Cp, int Ci, float Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bi, double Bx, int Cp, int Ci, double Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bi, long double Bx, int Cp, int Ci,
+          long double Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx,
+          int Cp, int Ci, npy_cfloat_wrapper Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx,
+          int Cp, int Ci, npy_cdouble_wrapper Cx)
+      csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx,
+          int Cp, int Ci, npy_clongdouble_wrapper Cx)
+      """
+    return _csc.csc_plus_csc(*args)
 
 def csc_minus_csc(*args):
-  """
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax, 
-        int Bp, int Bi, signed char Bx, int Cp, int Ci, 
-        signed char Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx, int Cp, 
-        int Ci, unsigned char Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp, 
-        int Bi, short Bx, int Cp, int Ci, short Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx, int Cp, 
-        int Ci, unsigned short Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, 
-        int Bi, int Bx, int Cp, int Ci, int Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx, int Cp, 
-        int Ci, unsigned int Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax, 
-        int Bp, int Bi, long long Bx, int Cp, int Ci, 
-        long long Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx, 
-        int Cp, int Ci, unsigned long long Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, 
-        int Bi, float Bx, int Cp, int Ci, float Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, 
-        int Bi, double Bx, int Cp, int Ci, double Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax, 
-        int Bp, int Bi, long double Bx, int Cp, int Ci, 
-        long double Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx, 
-        int Cp, int Ci, npy_cfloat_wrapper Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx, 
-        int Cp, int Ci, npy_cdouble_wrapper Cx)
-    csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx, 
-        int Cp, int Ci, npy_clongdouble_wrapper Cx)
     """
-  return _csc.csc_minus_csc(*args)
-
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, signed char Ax,
+          int Bp, int Bi, signed char Bx, int Cp, int Ci,
+          signed char Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx, int Cp,
+          int Ci, unsigned char Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, short Ax, int Bp,
+          int Bi, short Bx, int Cp, int Ci, short Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx, int Cp,
+          int Ci, unsigned short Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp,
+          int Bi, int Bx, int Cp, int Ci, int Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx, int Cp,
+          int Ci, unsigned int Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long long Ax,
+          int Bp, int Bi, long long Bx, int Cp, int Ci,
+          long long Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx,
+          int Cp, int Ci, unsigned long long Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp,
+          int Bi, float Bx, int Cp, int Ci, float Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp,
+          int Bi, double Bx, int Cp, int Ci, double Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long double Ax,
+          int Bp, int Bi, long double Bx, int Cp, int Ci,
+          long double Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx,
+          int Cp, int Ci, npy_cfloat_wrapper Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx,
+          int Cp, int Ci, npy_cdouble_wrapper Cx)
+      csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx,
+          int Cp, int Ci, npy_clongdouble_wrapper Cx)
+      """
+    return _csc.csc_minus_csc(*args)

Modified: trunk/scipy/sparse/sparsetools/csr.py
===================================================================
--- trunk/scipy/sparse/sparsetools/csr.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/csr.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -50,532 +50,531 @@
 
 
 def expandptr(*args):
-  """expandptr(int n_row, int Ap, int Bi)"""
-  return _csr.expandptr(*args)
+    """expandptr(int n_row, int Ap, int Bi)"""
+    return _csr.expandptr(*args)
 
 def csr_count_blocks(*args):
-  """csr_count_blocks(int n_row, int n_col, int R, int C, int Ap, int Aj) -> int"""
-  return _csr.csr_count_blocks(*args)
+    """csr_count_blocks(int n_row, int n_col, int R, int C, int Ap, int Aj) -> int"""
+    return _csr.csr_count_blocks(*args)
 
 def csr_matmat_pass1(*args):
-  """
-    csr_matmat_pass1(int n_row, int n_col, int Ap, int Aj, int Bp, int Bj, 
-        int Cp)
     """
-  return _csr.csr_matmat_pass1(*args)
+      csr_matmat_pass1(int n_row, int n_col, int Ap, int Aj, int Bp, int Bj,
+          int Cp)
+      """
+    return _csr.csr_matmat_pass1(*args)
 
 def csr_has_sorted_indices(*args):
-  """csr_has_sorted_indices(int n_row, int Ap, int Aj) -> bool"""
-  return _csr.csr_has_sorted_indices(*args)
+    """csr_has_sorted_indices(int n_row, int Ap, int Aj) -> bool"""
+    return _csr.csr_has_sorted_indices(*args)
 
 
 def csr_diagonal(*args):
-  """
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        signed char Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        unsigned char Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        unsigned short Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        unsigned int Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        long long Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        unsigned long long Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        long double Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Yx)
-    csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Yx)
     """
-  return _csr.csr_diagonal(*args)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          signed char Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          unsigned char Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, short Ax, short Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          unsigned short Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, int Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          unsigned int Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          long long Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          unsigned long long Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, float Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, double Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          long double Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Yx)
+      csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Yx)
+      """
+    return _csr.csr_diagonal(*args)
 
 def csr_scale_rows(*args):
-  """
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        signed char Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        unsigned char Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        unsigned short Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        unsigned int Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        long long Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        unsigned long long Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        long double Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Xx)
-    csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Xx)
     """
-  return _csr.csr_scale_rows(*args)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          signed char Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          unsigned char Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          unsigned short Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          unsigned int Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          long long Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          unsigned long long Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          long double Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Xx)
+      csr_scale_rows(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Xx)
+      """
+    return _csr.csr_scale_rows(*args)
 
 def csr_scale_columns(*args):
-  """
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        signed char Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        unsigned char Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        unsigned short Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        unsigned int Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        long long Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        unsigned long long Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        long double Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Xx)
-    csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Xx)
     """
-  return _csr.csr_scale_columns(*args)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          signed char Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          unsigned char Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          unsigned short Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          unsigned int Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          long long Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          unsigned long long Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          long double Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Xx)
+      csr_scale_columns(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Xx)
+      """
+    return _csr.csr_scale_columns(*args)
 
 def csr_tocsc(*args):
-  """
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bi, signed char Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bi, unsigned char Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bi, short Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bi, unsigned short Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bi, int Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bi, unsigned int Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bi, long long Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bi, unsigned long long Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bi, float Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bi, double Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bi, long double Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bi, npy_cfloat_wrapper Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bi, npy_cdouble_wrapper Bx)
-    csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bi, npy_clongdouble_wrapper Bx)
     """
-  return _csr.csr_tocsc(*args)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bi, signed char Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bi, unsigned char Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bi, short Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bi, unsigned short Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bi, int Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bi, unsigned int Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bi, long long Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bi, unsigned long long Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bi, float Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bi, double Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bi, long double Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bi, npy_cfloat_wrapper Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bi, npy_cdouble_wrapper Bx)
+      csr_tocsc(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bi, npy_clongdouble_wrapper Bx)
+      """
+    return _csr.csr_tocsc(*args)
 
 def csr_tobsr(*args):
-  """
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        signed char Ax, int Bp, int Bj, signed char Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned char Ax, int Bp, int Bj, unsigned char Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        short Ax, int Bp, int Bj, short Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned short Ax, int Bp, int Bj, unsigned short Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        int Ax, int Bp, int Bj, int Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned int Ax, int Bp, int Bj, unsigned int Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long long Ax, int Bp, int Bj, long long Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        unsigned long long Ax, int Bp, int Bj, unsigned long long Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        float Ax, int Bp, int Bj, float Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        double Ax, int Bp, int Bj, double Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        long double Ax, int Bp, int Bj, long double Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx)
-    csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj, 
-        npy_clongdouble_wrapper Ax, int Bp, int Bj, 
-        npy_clongdouble_wrapper Bx)
     """
-  return _csr.csr_tobsr(*args)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          signed char Ax, int Bp, int Bj, signed char Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned char Ax, int Bp, int Bj, unsigned char Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          short Ax, int Bp, int Bj, short Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned short Ax, int Bp, int Bj, unsigned short Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          int Ax, int Bp, int Bj, int Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned int Ax, int Bp, int Bj, unsigned int Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long long Ax, int Bp, int Bj, long long Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          unsigned long long Ax, int Bp, int Bj, unsigned long long Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          float Ax, int Bp, int Bj, float Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          double Ax, int Bp, int Bj, double Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          long double Ax, int Bp, int Bj, long double Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cfloat_wrapper Ax, int Bp, int Bj, npy_cfloat_wrapper Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_cdouble_wrapper Ax, int Bp, int Bj, npy_cdouble_wrapper Bx)
+      csr_tobsr(int n_row, int n_col, int R, int C, int Ap, int Aj,
+          npy_clongdouble_wrapper Ax, int Bp, int Bj,
+          npy_clongdouble_wrapper Bx)
+      """
+    return _csr.csr_tobsr(*args)
 
 def csr_matmat_pass2(*args):
-  """
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx, int Cp, int Cj, 
-        signed char Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx, int Cp, 
-        int Cj, unsigned char Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx, int Cp, 
-        int Cj, unsigned short Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx, int Cp, 
-        int Cj, unsigned int Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx, int Cp, int Cj, 
-        long long Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx, int Cp, int Cj, 
-        long double Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx, 
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _csr.csr_matmat_pass2(*args)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx, int Cp, int Cj,
+          signed char Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx, int Cp,
+          int Cj, unsigned char Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bj, short Bx, int Cp, int Cj, short Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx, int Cp,
+          int Cj, unsigned short Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bj, int Bx, int Cp, int Cj, int Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx, int Cp,
+          int Cj, unsigned int Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx, int Cp, int Cj,
+          long long Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bj, float Bx, int Cp, int Cj, float Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bj, double Bx, int Cp, int Cj, double Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx, int Cp, int Cj,
+          long double Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      csr_matmat_pass2(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx,
+          int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _csr.csr_matmat_pass2(*args)
 
 def csr_matvec(*args):
-  """
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        signed char Xx, signed char Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        unsigned char Xx, unsigned char Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx, 
-        short Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        unsigned short Xx, unsigned short Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx, 
-        int Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        unsigned int Xx, unsigned int Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        long long Xx, long long Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        unsigned long long Xx, unsigned long long Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx, 
-        float Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx, 
-        double Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        long double Xx, long double Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
-    csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
     """
-  return _csr.csr_matvec(*args)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          signed char Xx, signed char Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          unsigned char Xx, unsigned char Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, short Ax, short Xx,
+          short Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          unsigned short Xx, unsigned short Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx,
+          int Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          unsigned int Xx, unsigned int Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          long long Xx, long long Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          unsigned long long Xx, unsigned long long Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx,
+          float Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx,
+          double Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          long double Xx, long double Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          npy_cfloat_wrapper Xx, npy_cfloat_wrapper Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          npy_cdouble_wrapper Xx, npy_cdouble_wrapper Yx)
+      csr_matvec(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
+      """
+    return _csr.csr_matvec(*args)
 
 def csr_elmul_csr(*args):
-  """
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx, int Cp, int Cj, 
-        signed char Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx, int Cp, 
-        int Cj, unsigned char Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx, int Cp, 
-        int Cj, unsigned short Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx, int Cp, 
-        int Cj, unsigned int Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx, int Cp, int Cj, 
-        long long Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx, int Cp, int Cj, 
-        long double Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx, 
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _csr.csr_elmul_csr(*args)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx, int Cp, int Cj,
+          signed char Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx, int Cp,
+          int Cj, unsigned char Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bj, short Bx, int Cp, int Cj, short Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx, int Cp,
+          int Cj, unsigned short Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bj, int Bx, int Cp, int Cj, int Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx, int Cp,
+          int Cj, unsigned int Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx, int Cp, int Cj,
+          long long Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bj, float Bx, int Cp, int Cj, float Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bj, double Bx, int Cp, int Cj, double Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx, int Cp, int Cj,
+          long double Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx,
+          int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _csr.csr_elmul_csr(*args)
 
 def csr_eldiv_csr(*args):
-  """
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx, int Cp, int Cj, 
-        signed char Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx, int Cp, 
-        int Cj, unsigned char Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx, int Cp, 
-        int Cj, unsigned short Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx, int Cp, 
-        int Cj, unsigned int Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx, int Cp, int Cj, 
-        long long Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx, int Cp, int Cj, 
-        long double Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx, 
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _csr.csr_eldiv_csr(*args)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx, int Cp, int Cj,
+          signed char Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx, int Cp,
+          int Cj, unsigned char Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bj, short Bx, int Cp, int Cj, short Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx, int Cp,
+          int Cj, unsigned short Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bj, int Bx, int Cp, int Cj, int Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx, int Cp,
+          int Cj, unsigned int Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx, int Cp, int Cj,
+          long long Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bj, float Bx, int Cp, int Cj, float Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bj, double Bx, int Cp, int Cj, double Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx, int Cp, int Cj,
+          long double Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx,
+          int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _csr.csr_eldiv_csr(*args)
 
 def csr_plus_csr(*args):
-  """
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx, int Cp, int Cj, 
-        signed char Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx, int Cp, 
-        int Cj, unsigned char Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx, int Cp, 
-        int Cj, unsigned short Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx, int Cp, 
-        int Cj, unsigned int Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx, int Cp, int Cj, 
-        long long Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx, int Cp, int Cj, 
-        long double Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx, 
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _csr.csr_plus_csr(*args)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx, int Cp, int Cj,
+          signed char Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx, int Cp,
+          int Cj, unsigned char Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bj, short Bx, int Cp, int Cj, short Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx, int Cp,
+          int Cj, unsigned short Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bj, int Bx, int Cp, int Cj, int Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx, int Cp,
+          int Cj, unsigned int Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx, int Cp, int Cj,
+          long long Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bj, float Bx, int Cp, int Cj, float Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bj, double Bx, int Cp, int Cj, double Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx, int Cp, int Cj,
+          long double Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx,
+          int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _csr.csr_plus_csr(*args)
 
 def csr_minus_csr(*args):
-  """
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int Bp, int Bj, signed char Bx, int Cp, int Cj, 
-        signed char Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int Bp, int Bj, unsigned char Bx, int Cp, 
-        int Cj, unsigned char Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp, 
-        int Bj, short Bx, int Cp, int Cj, short Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int Bp, int Bj, unsigned short Bx, int Cp, 
-        int Cj, unsigned short Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, 
-        int Bj, int Bx, int Cp, int Cj, int Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int Bp, int Bj, unsigned int Bx, int Cp, 
-        int Cj, unsigned int Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int Bp, int Bj, long long Bx, int Cp, int Cj, 
-        long long Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int Bp, int Bj, unsigned long long Bx, 
-        int Cp, int Cj, unsigned long long Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, 
-        int Bj, float Bx, int Cp, int Cj, float Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, 
-        int Bj, double Bx, int Cp, int Cj, double Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int Bp, int Bj, long double Bx, int Cp, int Cj, 
-        long double Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int Bp, int Bj, npy_cfloat_wrapper Bx, 
-        int Cp, int Cj, npy_cfloat_wrapper Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int Bp, int Bj, npy_cdouble_wrapper Bx, 
-        int Cp, int Cj, npy_cdouble_wrapper Cx)
-    csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int Bp, int Bj, npy_clongdouble_wrapper Bx, 
-        int Cp, int Cj, npy_clongdouble_wrapper Cx)
     """
-  return _csr.csr_minus_csr(*args)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int Bp, int Bj, signed char Bx, int Cp, int Cj,
+          signed char Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int Bp, int Bj, unsigned char Bx, int Cp,
+          int Cj, unsigned char Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, short Ax, int Bp,
+          int Bj, short Bx, int Cp, int Cj, short Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int Bp, int Bj, unsigned short Bx, int Cp,
+          int Cj, unsigned short Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp,
+          int Bj, int Bx, int Cp, int Cj, int Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int Bp, int Bj, unsigned int Bx, int Cp,
+          int Cj, unsigned int Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int Bp, int Bj, long long Bx, int Cp, int Cj,
+          long long Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int Bp, int Bj, unsigned long long Bx,
+          int Cp, int Cj, unsigned long long Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp,
+          int Bj, float Bx, int Cp, int Cj, float Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp,
+          int Bj, double Bx, int Cp, int Cj, double Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int Bp, int Bj, long double Bx, int Cp, int Cj,
+          long double Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int Bp, int Bj, npy_cfloat_wrapper Bx,
+          int Cp, int Cj, npy_cfloat_wrapper Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int Bp, int Bj, npy_cdouble_wrapper Bx,
+          int Cp, int Cj, npy_cdouble_wrapper Cx)
+      csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int Bp, int Bj, npy_clongdouble_wrapper Bx,
+          int Cp, int Cj, npy_clongdouble_wrapper Cx)
+      """
+    return _csr.csr_minus_csr(*args)
 
 def csr_sort_indices(*args):
-  """
-    csr_sort_indices(int n_row, int Ap, int Aj, signed char Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned char Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, short Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned short Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, int Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned int Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, long long Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, unsigned long long Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, float Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, double Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, long double Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_sort_indices(int n_row, int Ap, int Aj, npy_clongdouble_wrapper Ax)
     """
-  return _csr.csr_sort_indices(*args)
+      csr_sort_indices(int n_row, int Ap, int Aj, signed char Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, unsigned char Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, short Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, unsigned short Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, int Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, unsigned int Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, long long Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, unsigned long long Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, float Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, double Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, long double Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, npy_cfloat_wrapper Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, npy_cdouble_wrapper Ax)
+      csr_sort_indices(int n_row, int Ap, int Aj, npy_clongdouble_wrapper Ax)
+      """
+    return _csr.csr_sort_indices(*args)
 
 def csr_eliminate_zeros(*args):
-  """
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, signed char Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, short Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, int Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long long Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, float Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, double Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long double Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
     """
-  return _csr.csr_eliminate_zeros(*args)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, signed char Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, short Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, int Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long long Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, float Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, double Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, long double Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
+      csr_eliminate_zeros(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
+      """
+    return _csr.csr_eliminate_zeros(*args)
 
 def csr_sum_duplicates(*args):
-  """
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, signed char Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, short Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, int Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long long Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, float Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, double Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long double Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
-    csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
     """
-  return _csr.csr_sum_duplicates(*args)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, signed char Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned char Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, short Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned short Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, int Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned int Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long long Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, float Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, double Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, long double Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)
+      csr_sum_duplicates(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax)
+      """
+    return _csr.csr_sum_duplicates(*args)
 
 def get_csr_submatrix(*args):
-  """
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, signed char Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(signed char)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned char Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(unsigned char)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, short Ax, int ir0, 
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(short)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned short Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(unsigned short)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, int Ax, int ir0, 
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(int)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned int Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(unsigned int)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long long Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(long long)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax, 
-        int ir0, int ir1, int ic0, int ic1, 
-        std::vector<(int)> Bp, std::vector<(int)> Bj, 
-        std::vector<(unsigned long long)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, float Ax, int ir0, 
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(float)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, double Ax, int ir0, 
-        int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(double)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long double Ax, 
-        int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp, 
-        std::vector<(int)> Bj, std::vector<(long double)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, 
-        int ir0, int ir1, int ic0, int ic1, 
-        std::vector<(int)> Bp, std::vector<(int)> Bj, 
-        std::vector<(npy_cfloat_wrapper)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, 
-        int ir0, int ir1, int ic0, int ic1, 
-        std::vector<(int)> Bp, std::vector<(int)> Bj, 
-        std::vector<(npy_cdouble_wrapper)> Bx)
-    get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax, 
-        int ir0, int ir1, int ic0, int ic1, 
-        std::vector<(int)> Bp, std::vector<(int)> Bj, 
-        std::vector<(npy_clongdouble_wrapper)> Bx)
     """
-  return _csr.get_csr_submatrix(*args)
-
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, signed char Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(signed char)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned char Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(unsigned char)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, short Ax, int ir0,
+          int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(short)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned short Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(unsigned short)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, int Ax, int ir0,
+          int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(int)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned int Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(unsigned int)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long long Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(long long)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, unsigned long long Ax,
+          int ir0, int ir1, int ic0, int ic1,
+          std::vector<(int)> Bp, std::vector<(int)> Bj,
+          std::vector<(unsigned long long)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, float Ax, int ir0,
+          int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(float)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, double Ax, int ir0,
+          int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(double)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long double Ax,
+          int ir0, int ir1, int ic0, int ic1, std::vector<(int)> Bp,
+          std::vector<(int)> Bj, std::vector<(long double)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax,
+          int ir0, int ir1, int ic0, int ic1,
+          std::vector<(int)> Bp, std::vector<(int)> Bj,
+          std::vector<(npy_cfloat_wrapper)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax,
+          int ir0, int ir1, int ic0, int ic1,
+          std::vector<(int)> Bp, std::vector<(int)> Bj,
+          std::vector<(npy_cdouble_wrapper)> Bx)
+      get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_clongdouble_wrapper Ax,
+          int ir0, int ir1, int ic0, int ic1,
+          std::vector<(int)> Bp, std::vector<(int)> Bj,
+          std::vector<(npy_clongdouble_wrapper)> Bx)
+      """
+    return _csr.get_csr_submatrix(*args)

Modified: trunk/scipy/sparse/sparsetools/dia.py
===================================================================
--- trunk/scipy/sparse/sparsetools/dia.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/dia.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -51,40 +51,39 @@
 
 
 def dia_matvec(*args):
-  """
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        signed char diags, signed char Xx, signed char Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        unsigned char diags, unsigned char Xx, unsigned char Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        short diags, short Xx, short Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        unsigned short diags, unsigned short Xx, 
-        unsigned short Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        int diags, int Xx, int Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        unsigned int diags, unsigned int Xx, unsigned int Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        long long diags, long long Xx, long long Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        unsigned long long diags, unsigned long long Xx, 
-        unsigned long long Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        float diags, float Xx, float Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        double diags, double Xx, double Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        long double diags, long double Xx, long double Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        npy_cfloat_wrapper diags, npy_cfloat_wrapper Xx, 
-        npy_cfloat_wrapper Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        npy_cdouble_wrapper diags, npy_cdouble_wrapper Xx, 
-        npy_cdouble_wrapper Yx)
-    dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets, 
-        npy_clongdouble_wrapper diags, npy_clongdouble_wrapper Xx, 
-        npy_clongdouble_wrapper Yx)
     """
-  return _dia.dia_matvec(*args)
-
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          signed char diags, signed char Xx, signed char Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          unsigned char diags, unsigned char Xx, unsigned char Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          short diags, short Xx, short Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          unsigned short diags, unsigned short Xx,
+          unsigned short Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          int diags, int Xx, int Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          unsigned int diags, unsigned int Xx, unsigned int Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          long long diags, long long Xx, long long Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          unsigned long long diags, unsigned long long Xx,
+          unsigned long long Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          float diags, float Xx, float Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          double diags, double Xx, double Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          long double diags, long double Xx, long double Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          npy_cfloat_wrapper diags, npy_cfloat_wrapper Xx,
+          npy_cfloat_wrapper Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          npy_cdouble_wrapper diags, npy_cdouble_wrapper Xx,
+          npy_cdouble_wrapper Yx)
+      dia_matvec(int n_row, int n_col, int n_diags, int L, int offsets,
+          npy_clongdouble_wrapper diags, npy_clongdouble_wrapper Xx,
+          npy_clongdouble_wrapper Yx)
+      """
+    return _dia.dia_matvec(*args)

Modified: trunk/scipy/sparse/sparsetools/setupscons.py
===================================================================
--- trunk/scipy/sparse/sparsetools/setupscons.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sparsetools/setupscons.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -7,7 +7,7 @@
     import numpy
     from numpy.distutils.misc_util import Configuration
 
-    config = Configuration('sparsetools',parent_package,top_path, 
+    config = Configuration('sparsetools',parent_package,top_path,
                            setup_name = 'setupscons.py')
 
     config.add_sconscript('SConstruct')

Modified: trunk/scipy/sparse/spfuncs.py
===================================================================
--- trunk/scipy/sparse/spfuncs.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/spfuncs.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -82,12 +82,12 @@
         elif e33 > efficiency:
             return (3,3)
         elif e22 > efficiency:
-            return (2,2) 
+            return (2,2)
         else:
             return (1,1)
 
 def count_blocks(A,blocksize):
-    """For a given blocksize=(r,c) count the number of occupied 
+    """For a given blocksize=(r,c) count the number of occupied
     blocks in a sparse matrix A
     """
     r,c = blocksize
@@ -101,4 +101,3 @@
         return count_blocks(A.T,(c,r))
     else:
         return count_blocks(csr_matrix(A),blocksize)
-

Modified: trunk/scipy/sparse/sputils.py
===================================================================
--- trunk/scipy/sparse/sputils.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/sputils.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -16,14 +16,14 @@
 supported_dtypes = [ np.typeDict[x] for x in supported_dtypes]
 
 def upcast(*args):
-    """Returns the nearest supported sparse dtype for the 
+    """Returns the nearest supported sparse dtype for the
     combination of one or more types.
 
     upcast(t0, t1, ..., tn) -> T  where T is a supported dtype
 
     Examples
     --------
-    
+
     >>> upcast('int32')
     <type 'numpy.int32'>
     >>> upcast('bool')
@@ -38,12 +38,12 @@
     for t in args[1:]:
         sample = sample + np.array([0],dtype=t)
 
-    upcast = sample.dtype 
+    upcast = sample.dtype
 
     for t in supported_dtypes:
         if np.can_cast(sample.dtype,t):
             return t
-    
+
     raise TypeError,'no supported conversion for types: %s' % args
 
 
@@ -118,4 +118,3 @@
 
 def isdense(x):
     return _isinstance(x, np.ndarray)
-

Modified: trunk/scipy/sparse/tests/bench_sparse.py
===================================================================
--- trunk/scipy/sparse/tests/bench_sparse.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/tests/bench_sparse.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -50,7 +50,7 @@
         #matrices.append( ('A','Identity', sparse.identity(500**2,format='csr')) )
         matrices.append( ('A','Poisson5pt', poisson2d(500,format='csr'))  )
         matrices.append( ('B','Poisson5pt^2', poisson2d(500,format='csr')**2)  )
-   
+
         print
         print '                 Sparse Matrix Arithmetic'
         print '===================================================================='
@@ -64,7 +64,7 @@
             dtype = mat.dtype.name.center(9)
             print fmt % (var,name,shape,dtype,mat.nnz)
 
-        space = ' ' * 10 
+        space = ' ' * 10
         print
         print space+'              Timings'
         print space+'=========================================='
@@ -91,7 +91,7 @@
                     operation = (X + '.' + op + '(' + Y + ')').center(17)
                     print fmt % (format,operation,msec_per_it)
 
-  
+
     def bench_sort(self):
         """sort CSR column indices"""
         matrices = []
@@ -109,14 +109,14 @@
         fmt = '  %3s | %12s | %20s | %8d |   %6.2f  '
 
         for name,N,K in matrices:
-            N = int(N) 
+            N = int(N)
             A = random_sparse(N,N,K)
-            
+
             start = time.clock()
             iter = 0
             while iter < 5 and time.clock() - start < 1:
                 A.has_sorted_indices = False
-                A.sort_indices() 
+                A.sort_indices()
                 iter += 1
             end = time.clock()
 
@@ -136,7 +136,7 @@
         A = sparse.kron(poisson2d(150),ones((2,2))).tobsr(blocksize=(2,2))
         matrices.append( ('Block2x2', A.tocsr()) )
         matrices.append( ('Block2x2', A) )
-        
+
         A = sparse.kron(poisson2d(100),ones((3,3))).tobsr(blocksize=(3,3))
         matrices.append( ('Block3x3', A.tocsr()) )
         matrices.append( ('Block3x3', A) )
@@ -172,14 +172,14 @@
             MFLOPs = (2*A.nnz*iter/(end-start))/float(1e6)
 
             print fmt % (A.format,name,shape,A.nnz,MFLOPs)
-            
+
     def bench_construction(self):
         """build matrices by inserting single values"""
         matrices = []
         matrices.append( ('Empty',csr_matrix((10000,10000))) )
         matrices.append( ('Identity',sparse.identity(10000)) )
         matrices.append( ('Poisson5pt', poisson2d(100)) )
-        
+
         print
         print '                    Sparse Matrix Construction'
         print '===================================================================='
@@ -189,11 +189,11 @@
 
         for name,A in matrices:
             A = A.tocoo()
-             
-            for format in ['lil','dok']: 
 
+            for format in ['lil','dok']:
+
                 start = time.clock()
-                
+
                 iter = 0
                 while time.clock() < start + 0.5:
                     T = eval(format + '_matrix')(A.shape)
@@ -212,16 +212,16 @@
         A = poisson2d(100)
 
         formats = ['csr','csc','coo','lil','dok']
-       
+
         print
         print '                Sparse Matrix Conversion'
         print '=========================================================='
         print ' format | tocsr() | tocsc() | tocoo() | tolil() | todok() '
         print '----------------------------------------------------------'
-        
+
         for fromfmt in formats:
             base = getattr(A,'to' + fromfmt)()
- 
+
             times = []
 
             for tofmt in formats:
@@ -237,7 +237,7 @@
                         x = fn()
                         iter += 1
                     end = time.clock()
-                    del x 
+                    del x
                     times.append( (end - start)/float(iter))
 
             output = "  %3s   " % fromfmt
@@ -245,7 +245,7 @@
                 if t is None:
                     output += '|    n/a    '
                 else:
-                    output += '| %5.1fms ' % (1000*t) 
+                    output += '| %5.1fms ' % (1000*t)
             print output
 
 
@@ -278,4 +278,3 @@
 
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-

Modified: trunk/scipy/sparse/tests/test_base.py
===================================================================
--- trunk/scipy/sparse/tests/test_base.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/tests/test_base.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -43,7 +43,7 @@
     def setUp(self):
         self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
         self.datsp = self.spmatrix(self.dat)
-   
+
     def test_repr(self):
         repr(self.datsp)
 
@@ -79,7 +79,7 @@
         D = matrix([[1 + 3j, 2 - 4j]])
         A = self.spmatrix(D)
         assert_equal(A.real.todense(),D.real)
-    
+
     def test_imag(self):
         D = matrix([[1 + 3j, 2 - 4j]])
         A = self.spmatrix(D)
@@ -103,7 +103,7 @@
 
         for m in mats:
             assert_equal(self.spmatrix(m).diagonal(),diag(m))
-        
+
     def test_sum(self):
         """Does the matrix's .sum(axis=...) method work?
         """
@@ -156,13 +156,13 @@
         assert_array_equal(dense_dot_dense, check2)
 
     def test_asfptype(self):
-        A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) ) 
+        A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) )
 
         assert_equal( A.dtype , 'int32' )
         assert_equal( A.asfptype().dtype, 'float64' )
         assert_equal( A.astype('int16').asfptype().dtype , 'float32' )
         assert_equal( A.astype('complex128').asfptype().dtype , 'complex128' )
-        
+
         B = A.asfptype()
         C = B.asfptype()
         assert( B is C )
@@ -171,7 +171,7 @@
     def test_mul_scalar(self):
         assert_array_equal(self.dat*2,(self.datsp*2).todense())
         assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense())
-    
+
     def test_rmul_scalar(self):
         assert_array_equal(2*self.dat,(2*self.datsp).todense())
         assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense())
@@ -215,13 +215,13 @@
         assert_array_equal(c.todense(),[[1,0,0,4],[9,0,1,0],[0,4,0,0]])
 
     def test_eldiv(self):
-        expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] 
+        expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]]
         assert_array_equal((self.datsp / self.datsp).todense(),expected)
 
         denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
         res = matrix([[1,0,0,0.5],[-3,0,numpy.inf,0],[0,0.25,0,0]],'d')
         assert_array_equal((self.datsp / denom).todense(),res)
-    
+
     def test_pow(self):
         A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
         B = self.spmatrix( A )
@@ -257,8 +257,8 @@
         #check result type
         assert(isinstance( M * array([1,2,3]), ndarray))
         assert(isinstance( M * matrix([1,2,3]).T, matrix))
-        
 
+
         #ensure exception is raised for improper dimensions
         bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
                     matrix([1,2,3]), matrix([[1],[2]])]
@@ -363,7 +363,7 @@
             a = A.asformat(format)
             assert_equal(a.format,format)
             assert_array_equal(a.todense(), D)
-            
+
             b = self.spmatrix(D+3j).asformat(format)
             assert_equal(b.format,format)
             assert_array_equal(b.todense(), D+3j)
@@ -373,11 +373,11 @@
             assert_array_equal(c.todense(), D)
 
 
-            
+
     def test_todia(self):
         #TODO, add and test .todia(maxdiags)
         pass
-    
+
     def test_tocompressedblock(self):
         x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
         y = array([[0,1,2],[3,0,5]])
@@ -385,7 +385,7 @@
         Asp = self.spmatrix(A)
         for format in ['bsr']:
             fn = getattr(Asp, 'to' + format )
-            
+
             for X in [ 1, 2, 3, 6 ]:
                 for Y in [ 1, 2, 3, 4, 6, 12]:
                     assert_equal( fn(blocksize=(X,Y)).todense(), A)
@@ -396,7 +396,7 @@
         b = self.dat.transpose()
         assert_array_equal(a.todense(), b)
         assert_array_equal(a.transpose().todense(), self.dat)
-        
+
         assert_array_equal( self.spmatrix((3,4)).T.todense(), zeros((4,3)) )
 
 
@@ -426,7 +426,7 @@
         assert_equal(A.copy().format, A.format)
         assert_equal(A.__class__(A,copy=True).format,  A.format)
         assert_equal(A.__class__(A,copy=False).format, A.format)
-        
+
         assert_equal(A.copy().todense(), A.todense())
         assert_equal(A.__class__(A,copy=True).todense(),  A.todense())
         assert_equal(A.__class__(A,copy=False).todense(), A.todense())
@@ -436,12 +436,12 @@
         assert_equal(toself().format, A.format)
         assert_equal(toself(copy=True).format, A.format)
         assert_equal(toself(copy=False).format, A.format)
-        
+
         assert_equal(toself().todense(), A.todense())
         assert_equal(toself(copy=True).todense(), A.todense())
         assert_equal(toself(copy=False).todense(), A.todense())
-        
 
+
         #TODO how can we check whether the data is copied?
         pass
 
@@ -483,16 +483,16 @@
 
 class _TestMatvecOutput:
     """test using the matvec() output parameter"""
-    def test_matvec_output(self): 
+    def test_matvec_output(self):
         pass  #Currently disabled
 
 #        #flat array
 #        x = array([1.25, -6.5, 0.125, -3.75],dtype='d')
 #        y = zeros(3,dtype='d')
-#        
+#
 #        self.datsp.matvec(x,y)
 #        assert_array_equal(self.datsp*x,y)
-#    
+#
 #        #column vector
 #        x = array([1.25, -6.5, 0.125, -3.75],dtype='d')
 #        x = x.reshape(4,1)
@@ -500,24 +500,24 @@
 #
 #        self.datsp.matvec(x,y)
 #        assert_array_equal(self.datsp*x,y)
-#   
+#
 #        # improper output type
 #        x = array([1.25, -6.5, 0.125, -3.75],dtype='d')
 #        y = zeros(3,dtype='i')
-#        
+#
 #        self.assertRaises( ValueError, self.datsp.matvec, x, y )
-#        
+#
 #        # improper output shape
 #        x = array([1.25, -6.5, 0.125, -3.75],dtype='d')
 #        y = zeros(2,dtype='d')
-#        
+#
 #        self.assertRaises( ValueError, self.datsp.matvec, x, y )
 #
 #        # proper upcast output type
 #        x = array([1.25, -6.5, 0.125, -3.75],dtype='complex64')
 #        x.imag = [1,2,3,4]
 #        y = zeros(3,dtype='complex128')
-#       
+#
 #        self.datsp.matvec(x,y)
 #        assert_array_equal(self.datsp*x,y)
 #        assert_equal((self.datsp*x).dtype,y.dtype)
@@ -638,7 +638,7 @@
         B = asmatrix(arange(50.).reshape(5,10))
         A = self.spmatrix(B)
         assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
-        assert_array_equal(A[1:,:-1].todense(),  B[1:,:-1]) 
+        assert_array_equal(A[1:,:-1].todense(),  B[1:,:-1])
         assert_array_equal(A[:-1,1:].todense(),  B[:-1,1:])
 
         # Now test slicing when a column contains only zeros
@@ -663,7 +663,7 @@
         # [i,1:2]
         assert_equal(A[2,:].todense(),B[2,:])
         assert_equal(A[2,5:-2].todense(),B[2,5:-2])
-       
+
         # [i,[1,2]]
         assert_equal(A[3,[1,3]].todense(),B[3,[1,3]])
         assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
@@ -681,15 +681,15 @@
         # [[1,2],j]
         assert_equal(A[[1,3],3].todense(),B[[1,3],3])
         assert_equal(A[[2,-5],-4].todense(),B[[2,-5],-4])
-        
+
         # [[1,2],1:2]
         assert_equal(A[[1,3],:].todense(),B[[1,3],:])
         assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
-    
+
         # [[1,2],[1,2]]
         assert_equal(A[[1,3],[2,4]],B[[1,3],[2,4]])
         assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]])
-        
+
         # [[[1],[2]],[1,2]]
         assert_equal(A[[[1],[3]],[2,4]].todense(),B[[[1],[3]],[2,4]])
         assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
@@ -882,8 +882,8 @@
         data    = array([1,2,3,4])
         csr = csr_matrix((data, indices, indptr))
         assert_array_equal(csr.shape,(3,6))
-    
 
+
     def test_sort_indices(self):
         data    = arange( 5 )
         indices = array( [7, 2, 1, 5, 4] )
@@ -956,7 +956,7 @@
         data    = array([1,2,3,4])
         csc = csc_matrix((data, indices, indptr))
         assert_array_equal(csc.shape,(6,3))
-    
+
     def test_eliminate_zeros(self):
         data    = array( [1, 0, 0, 0, 2, 0, 3, 0] )
         indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
@@ -967,14 +967,14 @@
         assert_array_equal(asp.nnz, 3)
         assert_array_equal(asp.data,[1, 2, 3])
         assert_array_equal(asp.todense(),bsp.todense())
-    
+
     def test_sort_indices(self):
         data = arange( 5 )
         row = array( [7, 2, 1, 5, 4] )
         ptr = [0, 3, 5]
         asp = csc_matrix( (data, row, ptr), shape=(10,2) )
         bsp = asp.copy()
-        asp.sort_indices() 
+        asp.sort_indices()
         assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
         assert_array_equal(asp.todense(),bsp.todense())
 
@@ -1086,7 +1086,7 @@
         assert_equal(caught,5)
 
 
-class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing, 
+class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing,
         _TestBothSlicing, _TestGetSet, _TestSolve,
         _TestArithmetic, _TestInplaceArithmetic,
         TestCase):
@@ -1288,15 +1288,15 @@
     def test_constructor1(self):
         pass
         #TODO add test
-    
 
+
 class TestBSR(_TestCommon, _TestArithmetic, _TestInplaceArithmetic,
         _TestMatvecOutput, TestCase):
     spmatrix = bsr_matrix
 
     def test_constructor1(self):
         """check native BSR format constructor"""
-        indptr  = array([0,2,2,4]) 
+        indptr  = array([0,2,2,4])
         indices = array([0,2,2,3])
         data    = zeros((4,2,3))
 
@@ -1312,14 +1312,14 @@
         A = numpy.kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
         Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
         assert_equal(Asp.todense(),A)
-        
+
         #infer shape from arrays
         Asp = bsr_matrix((data,indices,indptr))
         assert_equal(Asp.todense(),A)
 
     def test_constructor2(self):
         """construct from dense"""
-   
+
         #test zero mats
         for shape in [ (1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
             A = zeros(shape)
@@ -1337,10 +1337,10 @@
         assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
         assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
         assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
-        
+
         A = numpy.kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] )
         assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
-        
+
     def test_eliminate_zeros(self):
         data = numpy.kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
         data = data.reshape(-1,2,2)
@@ -1352,6 +1352,6 @@
         assert_array_equal(asp.nnz, 3*4)
         assert_array_equal(asp.todense(),bsp.todense())
 
-                
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])

Modified: trunk/scipy/sparse/tests/test_construct.py
===================================================================
--- trunk/scipy/sparse/tests/test_construct.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/tests/test_construct.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -19,7 +19,7 @@
         diags3 = array( [[ 1, 2, 3, 4, 5],
                          [ 6, 7, 8, 9,10],
                          [11,12,13,14,15]] )
-       
+
         cases = []
         cases.append( (diags1,  0,  1, 1, [[1]]) )
         cases.append( (diags1, [0], 1, 1, [[1]]) )
@@ -58,8 +58,8 @@
 
         for d,o,m,n,result in cases:
             assert_equal( spdiags(d,o,m,n).todense(), result )
-        
-           
+
+
     def test_identity(self):
         a = identity(3)
         b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d')
@@ -98,7 +98,7 @@
         cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
         cases.append(array([[0,1,0,2,0,5,8]]))
         cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
-        
+
         for a in cases:
             for b in cases:
                 result   = kron(csr_matrix(a),csr_matrix(b)).todense()
@@ -116,7 +116,7 @@
         cases.append(array([[0,2],[5,0]]))
         cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
         cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
-        
+
         for a in cases:
             for b in cases:
                 result   = kronsum(csr_matrix(a),csr_matrix(b)).todense()
@@ -133,7 +133,7 @@
                            [3, 4],
                            [5, 6]])
         assert_equal( vstack( [A,B] ).todense(), expected )
-    
+
     def test_hstack(self):
 
         A = coo_matrix([[1,2],[3,4]])
@@ -154,17 +154,17 @@
                            [0, 0, 7]])
         assert_equal( bmat( [[A,B],[None,C]] ).todense(), expected )
 
- 
+
         expected = matrix([[1, 2, 0],
                            [3, 4, 0],
                            [0, 0, 7]])
         assert_equal( bmat( [[A,None],[None,C]] ).todense(), expected )
-    
+
         expected = matrix([[0, 5],
                            [0, 6],
                            [7, 0]])
         assert_equal( bmat( [[None,B],[C,None]] ).todense(), expected )
-    
+
         #TODO test failure cases
 
     def test_lil_diags(self):
@@ -200,4 +200,3 @@
 
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-

Modified: trunk/scipy/sparse/tests/test_spfuncs.py
===================================================================
--- trunk/scipy/sparse/tests/test_spfuncs.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/tests/test_spfuncs.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -11,8 +11,8 @@
         D = matrix([[1,0,0,2,3],
                     [0,4,0,5,0],
                     [0,0,6,7,0]])
-        
-        
+
+
         #TODO expose through function
         S = csr_matrix(D)
         v = array([1,2,3])
@@ -35,7 +35,7 @@
         v = array([1,2,3,4,5,6,7,8,9,10])
         bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v)
         assert_equal(S.todense(), E*diag(v) )
-        
+
         E = kron(D,[[1,2,3],[4,5,6]])
         S = bsr_matrix(E,blocksize=(2,3))
         v = array([1,2,3,4,5,6])
@@ -49,15 +49,15 @@
 
 
 
-            
 
+
     def test_estimate_blocksize(self):
         mats = []
         mats.append( [[0,1],[1,0]] )
         mats.append( [[1,1,0],[0,0,1],[1,0,1]] )
         mats.append( [[0],[0],[1]] )
         mats = [array(x) for x in mats]
-    
+
         blks = []
         blks.append( [[1]] )
         blks.append( [[1,1],[1,1]] )
@@ -77,12 +77,12 @@
             R,C = bs
             I,J = A.nonzero()
             return len( set( zip(I/R,J/C) ) )
-        
+
         mats = []
-        mats.append( [[0]] ) 
-        mats.append( [[1]] ) 
-        mats.append( [[1,0]] ) 
-        mats.append( [[1,1]] ) 
+        mats.append( [[0]] )
+        mats.append( [[1]] )
+        mats.append( [[1,0]] )
+        mats.append( [[1,1]] )
         mats.append( [[0,1],[1,0]] )
         mats.append( [[1,1,0],[0,0,1],[1,0,1]] )
         mats.append( [[0],[0],[1]] )
@@ -94,7 +94,7 @@
                 for R in range(1,6):
                     for C in range(1,6):
                         assert_equal(count_blocks(Y,(R,C)),gold(X,(R,C)))
-        
+
         X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]])
         Y = csc_matrix(X)
         assert_equal(count_blocks(X,(1,2)),gold(X,(1,2)))
@@ -103,4 +103,3 @@
 
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-

Modified: trunk/scipy/sparse/tests/test_sputils.py
===================================================================
--- trunk/scipy/sparse/tests/test_sputils.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/sparse/tests/test_sputils.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -58,15 +58,13 @@
         assert_equal(issequence( [1] ),True)
         assert_equal(issequence( [1,2,3] ),True)
         assert_equal(issequence( np.array([1,2,3]) ),True)
-        
+
         assert_equal(issequence( np.array([[1],[2],[3]]) ),False)
         assert_equal(issequence( 3 ),False)
 
     def test_isdense(self):
         assert_equal(isdense( np.array([1]) ),True)
         assert_equal(isdense( np.matrix([1]) ),True)
-                
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
-
-

Modified: trunk/scipy/splinalg/__init__.py
===================================================================
--- trunk/scipy/splinalg/__init__.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/splinalg/__init__.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -3,4 +3,3 @@
 warn('scipy.splinalg has moved to scipy.sparse.linalg', DeprecationWarning)
 
 from scipy.sparse.linalg import *
-

Modified: trunk/scipy/stats/mmorestats.py
===================================================================
--- trunk/scipy/stats/mmorestats.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/mmorestats.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -58,7 +58,7 @@
 Notes
 -----
     The function is restricted to 2D arrays.
-    
+
     """
     def _hd_1D(data,prob,var):
         "Computes the HD quantiles for a 1D array. Returns nan for invalid data."
@@ -113,7 +113,7 @@
         Axis along which to compute the quantiles. If None, use a flattened array.
     var : boolean
         Whether to return the variance of the estimate.
-        
+
     """
     result = hdquantiles(data,[0.5], axis=axis, var=var)
     return result.squeeze()
@@ -136,7 +136,7 @@
 Notes
 -----
     The function is restricted to 2D arrays.
-    
+
     """
     def _hdsd_1D(data,prob):
         "Computes the std error for 1D arrays."
@@ -176,7 +176,7 @@
 #---- --- Confidence intervals ---
 #####--------------------------------------------------------------------------
 
-def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True), 
+def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
                     alpha=0.05, axis=None):
     """Returns the selected confidence interval of the trimmed mean along the
 given axis.
@@ -191,13 +191,13 @@
     alpha : float
         Confidence level of the intervals.
     inclusive : tuple of boolean
-        If relative==False, tuple indicating whether values exactly equal to the 
+        If relative==False, tuple indicating whether values exactly equal to the
         absolute limits are allowed.
         If relative==True, tuple indicating whether the number of data being masked
         on each side should be rounded (True) or truncated (False).
     axis : int
         Axis along which to cut. If None, uses a flattened version of the input.
-    
+
     """
     data = ma.array(data, copy=False)
     trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
@@ -220,7 +220,7 @@
         Sequence of quantiles to compute.
     axis : int
         Axis along which to compute the quantiles. If None, use a flattened array.
-    
+
     """
     def _mjci_1D(data, p):
         data = np.sort(data.compressed())
@@ -340,7 +340,7 @@
 
 
 def idealfourths(data, axis=None):
-    """Returns an estimate of the lower and upper quartiles of the data along 
+    """Returns an estimate of the lower and upper quartiles of the data along
     the given axis, as computed with the ideal fourths.
     """
     def _idf(data):
@@ -383,8 +383,7 @@
     h = 1.2 * (r[-1]-r[0]) / n**(1./5)
     nhi = (data[:,None] <= points[None,:] + h).sum(0)
     nlo = (data[:,None] < points[None,:] - h).sum(0)
-    return (nhi-nlo) / (2.*n*h)   
+    return (nhi-nlo) / (2.*n*h)
 
 
 ###############################################################################
-

Modified: trunk/scipy/stats/models/contrast.py
===================================================================
--- trunk/scipy/stats/models/contrast.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/models/contrast.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -127,7 +127,7 @@
 
     L = N.asarray(L)
     D = N.asarray(D)
-    
+
     n, p = D.shape
 
     if L.shape[0] != n and L.shape[1] != p:
@@ -141,12 +141,12 @@
     else:
         C = L
         C = N.dot(pseudo, N.dot(D, C.T)).T
-        
+
     Lp = N.dot(D, C.T)
 
     if len(Lp.shape) == 1:
         Lp.shape = (n, 1)
-        
+
     if utils.rank(Lp) != Lp.shape[1]:
         Lp = utils.fullrank(Lp)
         C = N.dot(pseudo, Lp).T

Modified: trunk/scipy/stats/models/formula.py
===================================================================
--- trunk/scipy/stats/models/formula.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/models/formula.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -27,10 +27,10 @@
     ----------------------------------------------------
 
     By default, the namespace is empty, which means it must be
-    specified before evaluating the design matrix. 
+    specified before evaluating the design matrix.
 
     When it is unambiguous, the namespaces of objects are derived from the
-    context. 
+    context.
 
     Rules:
     ------
@@ -256,12 +256,12 @@
     def main_effect(self, reference=None):
         """
         Return the 'main effect' columns of a factor, choosing
-        an optional reference key. 
+        an optional reference key.
 
-	The reference key can be one of the keys of the Factor,
+        The reference key can be one of the keys of the Factor,
         or an integer, representing which column to remove.
         It defaults to 0.
-        
+
         """
 
         names = self.names()
@@ -295,10 +295,10 @@
     def __getitem__(self, key):
         """
         Retrieve the column corresponding to key in a Formula.
-        
+
         :Parameters:
             key : one of the Factor's keys
-        
+
         :Returns: ndarray corresponding to key, when evaluated in
                   current namespace
         """
@@ -704,7 +704,7 @@
     is an integer, it is changed to range(1,order+1), so
     order=3 is equivalent to order=[1,2,3], generating
     all one, two and three-way interactions.
-    
+
     If any entry of order is greater than len(terms), it is
     effectively treated as len(terms).
 
@@ -731,7 +731,7 @@
         for m in range(I.shape[1]):
 
             # only keep combinations that have unique entries
-            
+
             if (N.unique(I[:,m]).shape == I[:,m].shape and
                 N.alltrue(N.equal(N.sort(I[:,m]), I[:,m]))):
                 ll = [terms[j] for j in I[:,m]]
@@ -742,7 +742,7 @@
 
     key = values.keys()[0]
     value = values[key]; del(values[key])
-    
+
     for v in values.values():
         value += v
     return value

Modified: trunk/scipy/stats/models/tests/test_formula.py
===================================================================
--- trunk/scipy/stats/models/tests/test_formula.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/models/tests/test_formula.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -306,7 +306,7 @@
         f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
         ff = f - f['a*b']
         assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
-        
+
         ff = f - f['a*b'] - f['a*c']
         assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
 

Modified: trunk/scipy/stats/mstats.py
===================================================================
--- trunk/scipy/stats/mstats.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/mstats.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -1,5 +1,5 @@
 """
-An extension of scipy.stats.stats to support masked arrays 
+An extension of scipy.stats.stats to support masked arrays
 
 :author: Pierre GF Gerard-Marchant
 :contact: pierregm_at_uga_edu
@@ -56,7 +56,7 @@
 genmissingvaldoc = """
 Notes
 -----
-    Missing values are considered pair-wise: if a value is missing in x, 
+    Missing values are considered pair-wise: if a value is missing in x,
     the corresponding value in y is masked.
 """
 #------------------------------------------------------------------------------
@@ -92,7 +92,7 @@
 def argstoarray(*args):
     """Constructs a 2D array from a sequence of sequences. Sequences are filled
     with missing values to match the length of the longest sequence.
-    
+
     Returns
     -------
         output : MaskedArray
@@ -120,19 +120,19 @@
 def find_repeats(arr):
     """Find repeats in arr and return a tuple (repeats, repeat_count).
     Masked values are discarded.
-    
+
 Parameters
 ----------
     arr : sequence
         Input array. The array is flattened if it is not 1D.
-        
+
 Returns
 -------
     repeats : ndarray
         Array of repeated values.
     counts : ndarray
         Array of counts.
-    
+
     """
     marr = ma.compressed(arr)
     if not marr.size:
@@ -142,22 +142,22 @@
 
 
 def count_tied_groups(x, use_missing=False):
-    """Counts the number of tied values in x, and returns a dictionary 
+    """Counts the number of tied values in x, and returns a dictionary
     (nb of ties: nb of groups).
-    
+
 Parameters
 ----------
     x : sequence
         Sequence of data on which to counts the ties
     use_missing : boolean
         Whether to consider missing values as tied.
-    
+
 Example
 -------
     >>>z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
     >>>count_tied_groups(z)
     >>>{2:1, 3:2}
-    >>># The ties were 0 (3x), 2 (3x) and 3 (2x) 
+    >>># The ties were 0 (3x), 2 (3x) and 3 (2x)
     >>>z = ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
     >>>count_tied_groups(z)
     >>>{2:2, 3:1}
@@ -181,14 +181,14 @@
         except KeyError:
             nties[nmasked] = 1
     return nties
-        
 
+
 def rankdata(data, axis=None, use_missing=False):
     """Returns the rank (also known as order statistics) of each data point
     along the given axis.
 
     If some values are tied, their rank is averaged.
-    If some values are masked, their rank is set to 0 if use_missing is False, 
+    If some values are masked, their rank is set to 0 if use_missing is False,
     or set to the average rank of the unmasked values if use_missing is True.
 
     Parameters
@@ -196,8 +196,8 @@
         data : sequence
             Input data. The data is transformed to a masked array
         axis : {None,int} optional
-            Axis along which to perform the ranking. 
-            If None, the array is first flattened. An exception is raised if 
+            Axis along which to perform the ranking.
+            If None, the array is first flattened. An exception is raised if
             the axis is specified for arrays with a dimension larger than 2
         use_missing : {boolean} optional
             Whether the masked values have a rank of 0 (False) or equal to the
@@ -325,7 +325,7 @@
     common_mask = ma.mask_or(ma.getmask(x), ma.getmask(y))
     if allow_masked:
         x.unshare_mask()
-        y.unshare_mask() 
+        y.unshare_mask()
         x._mask = y._mask = common_mask
     elif common_mask is not nomask:
         raise ValueError("Cannot process masked data...")
@@ -362,7 +362,7 @@
         If True, then each row is a variable with obersvations in columns.
         If False, each column is a variable and the observations are in the rows.
     bias : {False, True} optional
-        Whether to use a biased (True) or unbiased (False) estimate of the 
+        Whether to use a biased (True) or unbiased (False) estimate of the
         covariance.
         If True, then the normalization is by N, the number of observations.
         Otherwise, the normalization is by (N-1).
@@ -420,7 +420,7 @@
         return (masked, masked)
     #
     (mx, my) = (x.mean(), y.mean())
-    (xm, ym) = (x-mx, y-my)   
+    (xm, ym) = (x-mx, y-my)
     #
     r_num = n*(ma.add.reduce(xm*ym))
     r_den = n*ma.sqrt(ma.dot(xm,xm)*ma.dot(ym,ym))
@@ -437,8 +437,8 @@
     else:
         prob = betai(0.5*df,0.5,df/(df+t*t))
     return (r,prob)
-    
 
+
 def spearmanr(x, y, use_ties=True):
     """Calculates a Spearman rank-order correlation coefficient and the p-value
     to test for non-correlation.
@@ -451,8 +451,8 @@
     +1 imply an exact linear relationship. Positive correlations imply that
     as x increases, so does y. Negative correlations imply that as x
     increases, y decreases.
-    
-    Missing values are discarded pair-wise: if a value is missing in x, the 
+
+    Missing values are discarded pair-wise: if a value is missing in x, the
     corresponding value in y is masked.
 
     The p-value roughly indicates the probability of an uncorrelated system
@@ -517,9 +517,9 @@
 
 def kendalltau(x, y, use_ties=True, use_missing=False):
     """Computes Kendall's rank correlation tau on two variables *x* and *y*.
-    
+
 Parameters
-----------    
+----------
     xdata: sequence
         First data list (for example, time).
     ydata: sequence
@@ -527,7 +527,7 @@
     use_ties: {True, False} optional
         Whether ties correction should be performed.
     use_missing: {False, True} optional
-        Whether missing data should be allocated a rank of 0 (False) or the 
+        Whether missing data should be allocated a rank of 0 (False) or the
         average rank (True)
     """
     (x, y, n) = _chk_size(x, y)
@@ -542,9 +542,9 @@
     ry = ma.masked_equal(rankdata(y, use_missing=use_missing),0)
     idx = rx.argsort()
     (rx, ry) = (rx[idx], ry[idx])
-    C = np.sum((((ry[i+1:]>ry[i])*(rx[i+1:]>rx[i])).filled(0).sum() 
+    C = np.sum((((ry[i+1:]>ry[i])*(rx[i+1:]>rx[i])).filled(0).sum()
                 for i in range(len(ry)-1)))
-    D = np.sum((((ry[i+1:]<ry[i])*(rx[i+1:]>rx[i])).filled(0).sum() 
+    D = np.sum((((ry[i+1:]<ry[i])*(rx[i+1:]>rx[i])).filled(0).sum()
                 for i in range(len(ry)-1)))
     if use_ties:
         xties = count_tied_groups(x)
@@ -575,12 +575,12 @@
     return (tau,prob)
 
 
-def kendalltau_seasonal(x):    
+def kendalltau_seasonal(x):
     """Computes a multivariate extension Kendall's rank correlation tau, designed
     for seasonal data.
-    
+
 Parameters
-----------    
+----------
     x: 2D array
         Array of seasonal data, with seasons in columns.
     """
@@ -606,7 +606,7 @@
         corr_j = np.sum(v*k*(k-1) for (k,v) in ties_j.iteritems())
         cmb = n_p[j]*(n_p[j]-1)
         for k in range(j,m,1):
-            K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() 
+            K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
                                for i in range(n))
             covmat[j,k] = (K[j,k] +4*(R[:,j]*R[:,k]).sum() - \
                            n*(n_p[j]+1)*(n_p[k]+1))/3.
@@ -693,7 +693,7 @@
         r = 0.0
     else:
         r = Sxy / r_den
-        if (r > 1.0): 
+        if (r > 1.0):
             r = 1.0 # from numerical error
     #z = 0.5*log((1.0+r+TINY)/(1.0-r+TINY))
     df = n-2
@@ -709,7 +709,7 @@
 def theilslopes(y, x=None, alpha=0.05):
     """Computes the Theil slope over the dataset (x,y), as the median of all slopes
     between paired values.
-    
+
     Parameters
     ----------
         y : sequence
@@ -718,7 +718,7 @@
             Independent variable. If None, use arange(len(y)) instead.
         alpha : float
             Confidence degree.
-    
+
     """
     y = ma.asarray(y).flatten()
     y[-1] = masked
@@ -736,7 +736,7 @@
     slopes = ma.hstack([(y[i+1:]-y[i])/(x[i+1:]-x[i]) for i in range(n-1)])
     slopes.sort()
     medslope = ma.median(slopes)
-    medinter = ma.median(y) - medslope*ma.median(x) 
+    medinter = ma.median(y) - medslope*ma.median(x)
     #
     if alpha > 0.5:
         alpha = 1.-alpha
@@ -748,7 +748,7 @@
     sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in xties.iteritems())
     sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in yties.iteritems())
     sigma = np.sqrt(sigsq)
-    
+
     Ru = np.round((nt - z*sigma)/2. + 1)
     Rl = np.round((nt + z*sigma)/2.)
     delta = slopes[[Rl,Ru]]
@@ -759,7 +759,7 @@
     x = ma.array(x, subok=True, copy=False, ndmin=2)
     (n,_) = x.shape
     # Get list of slopes per season
-    szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] 
+    szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
                             for i in range(n)])
     szn_medslopes = ma.median(szn_slopes, axis=0)
     medslope = ma.median(szn_slopes, axis=None)
@@ -830,21 +830,21 @@
 def mannwhitneyu(x,y, use_continuity=True):
     """Computes the Mann-Whitney on samples x and y.
     Missing values in x and/or y are discarded.
-    
+
     Parameters
     ----------
         x : sequence
         y : sequence
         use_continuity : {True, False} optional
             Whether a continuity correction (1/2.) should be taken into account.
-            
+
     Returns
     -------
         u : float
             The Mann-Whitney statistics
         prob : float
             Approximate p-value assuming a normal distribution.
-    
+
     """
     x = ma.asarray(x).compressed().view(ndarray)
     y = ma.asarray(y).compressed().view(ndarray)
@@ -860,7 +860,7 @@
     ties = count_tied_groups(ranks)
     sigsq -= np.sum(v*(k**3-k) for (k,v) in ties.iteritems())/12.
     sigsq *= nx*ny/float(nt*(nt-1))
-    #    
+    #
     if use_continuity:
         z = (U - 1/2. - mu) / ma.sqrt(sigsq)
     else:
@@ -903,11 +903,11 @@
                                        + (n-j) * np.log(1-x-j/float(n))
                                        + (j-1) * np.log(x+j/float(n))))
 
-    
+
 def ks_twosamp(data1, data2, alternative="two_sided"):
-    """Computes the Kolmogorov-Smirnov test on two samples. 
+    """Computes the Kolmogorov-Smirnov test on two samples.
     Missing values are discarded.
-    
+
     Parameters
     ----------
         data1 : sequence
@@ -915,15 +915,15 @@
         data2 : sequence
             Second data set
         alternative : {'two_sided', 'less', 'greater'} optional
-            Indicates the alternative hypothesis. 
-    
+            Indicates the alternative hypothesis.
+
     Returns
     -------
         d : float
             Value of the Kolmogorov Smirnov test
         p : float
             Corresponding p-value.
-    
+
     """
     (data1, data2) = (ma.asarray(data1), ma.asarray(data2))
     (n1, n2) = (data1.count(), data2.count())
@@ -997,23 +997,23 @@
     return a
 
 
-def trima(a, limits=None, inclusive=(True,True)):  
+def trima(a, limits=None, inclusive=(True,True)):
     """Trims an array by masking the data outside some given limits.
     Returns a masked version of the input array.
-    
+
     Parameters
     ----------
     a : sequence
         Input array.
     limits : {None, tuple} optional
-        Tuple of (lower limit, upper limit) in absolute values. 
-        Values of the input array lower (greater) than the lower (upper) limit 
+        Tuple of (lower limit, upper limit) in absolute values.
+        Values of the input array lower (greater) than the lower (upper) limit
         will be masked. A limit is None indicates an open interval.
     inclusive : {(True,True) tuple} optional
         Tuple of (lower flag, upper flag), indicating whether values exactly
         equal to the lower (upper) limit are allowed.
-        
-    """ 
+
+    """
     a = ma.asarray(a)
     a.unshare_mask()
     if limits is None:
@@ -1033,20 +1033,20 @@
             condition |= (a >= upper_lim)
     a[condition.filled(True)] = masked
     return a
-    
 
+
 def trimr(a, limits=None, inclusive=(True, True), axis=None):
     """Trims an array by masking some proportion of the data on each end.
     Returns a masked version of the input array.
-    
+
     Parameters
     ----------
     a : sequence
         Input array.
     limits : {None, tuple} optional
-        Tuple of the percentages to cut on each side of the array, with respect 
+        Tuple of the percentages to cut on each side of the array, with respect
         to the number of unmasked data, as floats between 0. and 1.
-        Noting n the number of unmasked data before trimming, the (n*limits[0])th 
+        Noting n the number of unmasked data before trimming, the (n*limits[0])th
         smallest data and the (n*limits[1])th largest data are masked, and the
         total number of unmasked data after trimming is n*(1.-sum(limits))
         The value of one limit can be set to None to indicate an open interval.
@@ -1054,9 +1054,9 @@
         Tuple of flags indicating whether the number of data being masked on the
         left (right) end should be truncated (True) or rounded (False) to integers.
     axis : {None,int} optional
-        Axis along which to trim. If None, the whole array is trimmed, but its 
+        Axis along which to trim. If None, the whole array is trimmed, but its
         shape is maintained.
-    
+
     """
     def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
         n = a.count()
@@ -1096,26 +1096,26 @@
         return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
     else:
         return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
- 
+
 trimdoc = """
     Parameters
     ----------
     a : sequence
         Input array
     limits : {None, tuple} optional
-        If relative == False, tuple (lower limit, upper limit) in absolute values. 
-        Values of the input array lower (greater) than the lower (upper) limit are 
+        If relative == False, tuple (lower limit, upper limit) in absolute values.
+        Values of the input array lower (greater) than the lower (upper) limit are
         masked.
-        If relative == True, tuple (lower percentage, upper percentage) to cut 
-        on each side of the  array, with respect to the number of unmasked data. 
-        Noting n the number of unmasked data before trimming, the (n*limits[0])th 
+        If relative == True, tuple (lower percentage, upper percentage) to cut
+        on each side of the  array, with respect to the number of unmasked data.
+        Noting n the number of unmasked data before trimming, the (n*limits[0])th
         smallest data and the (n*limits[1])th largest data are masked, and the
         total number of unmasked data after trimming is n*(1.-sum(limits))
         In each case, the value of one limit can be set to None to indicate an
         open interval.
         If limits is None, no trimming is performed
     inclusive : {(True, True) tuple} optional
-        If relative==False, tuple indicating whether values exactly equal to the 
+        If relative==False, tuple indicating whether values exactly equal to the
         absolute limits are allowed.
         If relative==True, tuple indicating whether the number of data being masked
         on each side should be rounded (True) or truncated (False).
@@ -1124,14 +1124,14 @@
         to cut (True).
     axis : {None, integer}, optional
         Axis along which to trim.
-"""       
-        
-    
+"""
+
+
 def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
     """Trims an array by masking the data outside some given limits.
     Returns a masked version of the input array.
    %s
-        
+
     Examples
     --------
         >>>z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
@@ -1139,19 +1139,19 @@
         [--,--, 3, 4, 5, 6, 7, 8,--,--]
         >>>trim(z,(0.1,0.2),relative=True)
         [--, 2, 3, 4, 5, 6, 7, 8,--,--]
-        
-    
+
+
     """
     if relative:
-        return trimr(a, limits=limits, inclusive=inclusive, axis=axis) 
+        return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
     else:
-        return trima(a, limits=limits, inclusive=inclusive) 
+        return trima(a, limits=limits, inclusive=inclusive)
 trim.__doc__ = trim.__doc__ % trimdoc
 
 
 def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
-    """Trims the data by masking the int(proportiontocut*n) smallest and 
-    int(proportiontocut*n) largest values of data along the given axis, where n 
+    """Trims the data by masking the int(proportiontocut*n) smallest and
+    int(proportiontocut*n) largest values of data along the given axis, where n
     is the number of unmasked values before trimming.
 
 Parameters
@@ -1159,25 +1159,25 @@
     data : ndarray
         Data to trim.
     proportiontocut : {0.2, float} optional
-        Percentage of trimming (as a float between 0 and 1). 
-        If n is the number of unmasked values before trimming, the number of 
+        Percentage of trimming (as a float between 0 and 1).
+        If n is the number of unmasked values before trimming, the number of
         values after trimming is:
             (1-2*proportiontocut)*n.
     inclusive : {(True, True) tuple} optional
-        Tuple indicating whether the number of data being masked on each side 
+        Tuple indicating whether the number of data being masked on each side
         should be rounded (True) or truncated (False).
     axis : {None, integer}, optional
-        Axis along which to perform the trimming. 
+        Axis along which to perform the trimming.
         If None, the input array is first flattened.
 
     """
-    return trimr(data, limits=(proportiontocut,proportiontocut), 
+    return trimr(data, limits=(proportiontocut,proportiontocut),
                  inclusive=inclusive, axis=axis)
-    
+
 #..............................................................................
-def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), 
+def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
              axis=None):
-    """Trims the data by masking int(trim*n) values from ONE tail of the 
+    """Trims the data by masking int(trim*n) values from ONE tail of the
     data along the given axis, where n is the number of unmasked values.
 
 Parameters
@@ -1185,17 +1185,17 @@
     data : {ndarray}
         Data to trim.
     proportiontocut : {0.2, float} optional
-        Percentage of trimming. If n is the number of unmasked values 
-        before trimming, the number of values after trimming is 
+        Percentage of trimming. If n is the number of unmasked values
+        before trimming, the number of values after trimming is
         (1-proportiontocut)*n.
     tail : {'left','right'} optional
         If left (right), the ``proportiontocut`` lowest (greatest) values will
-        be masked. 
+        be masked.
     inclusive : {(True, True) tuple} optional
-        Tuple indicating whether the number of data being masked on each side 
+        Tuple indicating whether the number of data being masked on each side
         should be rounded (True) or truncated (False).
     axis : {None, integer}, optional
-        Axis along which to perform the trimming. 
+        Axis along which to perform the trimming.
         If None, the input array is first flattened.
 
     """
@@ -1205,14 +1205,14 @@
     elif tail == 'r':
         limits = (None, proportiontocut)
     else:
-        raise TypeError("The tail argument should be in ('left','right')")   
+        raise TypeError("The tail argument should be in ('left','right')")
     return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
 
 trim1 = trimtail
 
-def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, 
+def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
                  axis=None):
-    """Returns the trimmed mean of the data along the given axis. 
+    """Returns the trimmed mean of the data along the given axis.
 
     %s
 
@@ -1225,9 +1225,9 @@
         return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
 
 
-def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, 
+def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
                 axis=None, ddof=0):
-    """Returns the trimmed variance of the data along the given axis. 
+    """Returns the trimmed variance of the data along the given axis.
 
     %s
     ddof : {0,integer}, optional
@@ -1245,9 +1245,9 @@
     return out.var(axis=axis, ddof=ddof)
 
 
-def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, 
+def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
                 axis=None, ddof=0):
-    """Returns the trimmed standard deviation of the data along the given axis. 
+    """Returns the trimmed standard deviation of the data along the given axis.
 
     %s
     ddof : {0,integer}, optional
@@ -1273,16 +1273,16 @@
     a : sequence
         Input array
     limits : {(0.1,0.1), tuple of float} optional
-        tuple (lower percentage, upper percentage) to cut  on each side of the  
-        array, with respect to the number of unmasked data. 
-        Noting n the number of unmasked data before trimming, the (n*limits[0])th 
+        tuple (lower percentage, upper percentage) to cut  on each side of the
+        array, with respect to the number of unmasked data.
+        Noting n the number of unmasked data before trimming, the (n*limits[0])th
         smallest data and the (n*limits[1])th largest data are masked, and the
         total number of unmasked data after trimming is n*(1.-sum(limits))
         In each case, the value of one limit can be set to None to indicate an
         open interval.
         If limits is None, no trimming is performed
     inclusive : {(True, True) tuple} optional
-        Tuple indicating whether the number of data being masked on each side 
+        Tuple indicating whether the number of data being masked on each side
         should be rounded (True) or truncated (False).
     axis : {None, integer}, optional
         Axis along which to trim.
@@ -1369,31 +1369,31 @@
 
 def winsorize(a, limits=None, inclusive=(True,True), inplace=False, axis=None):
     """Returns a Winsorized version of the input array.
-    
-    The (limits[0])th lowest values are set to the (limits[0])th percentile, 
-    and the (limits[1])th highest values are set to the (limits[1])th 
+
+    The (limits[0])th lowest values are set to the (limits[0])th percentile,
+    and the (limits[1])th highest values are set to the (limits[1])th
     percentile.
     Masked values are skipped.
-    
-    
+
+
     Parameters
     ----------
     a : sequence
         Input array.
     limits : {None, tuple of float} optional
-        Tuple of the percentages to cut on each side of the array, with respect 
+        Tuple of the percentages to cut on each side of the array, with respect
         to the number of unmasked data, as floats between 0. and 1.
-        Noting n the number of unmasked data before trimming, the (n*limits[0])th 
+        Noting n the number of unmasked data before trimming, the (n*limits[0])th
         smallest data and the (n*limits[1])th largest data are masked, and the
         total number of unmasked data after trimming is n*(1.-sum(limits))
         The value of one limit can be set to None to indicate an open interval.
     inclusive : {(True, True) tuple} optional
-        Tuple indicating whether the number of data being masked on each side 
+        Tuple indicating whether the number of data being masked on each side
         should be rounded (True) or truncated (False).
     inplace : {False, True} optional
         Whether to winsorize in place (True) or to use a copy (False)
     axis : {None, int} optional
-        Axis along which to trim. If None, the whole array is trimmed, but its 
+        Axis along which to trim. If None, the whole array is trimmed, but its
         shape is maintained.
 
     """
@@ -1437,8 +1437,8 @@
         return _winsorize1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
     else:
         return ma.apply_along_axis(_winsorize1D, axis,a,lolim,uplim,loinc,upinc)
- 
 
+
 #####--------------------------------------------------------------------------
 #---- --- Moments ---
 #####--------------------------------------------------------------------------
@@ -1540,7 +1540,7 @@
         data : ndarray
             Data to trim.
         axis : {None,int} optional
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     """
@@ -1603,7 +1603,7 @@
     term2 = ma.power((1-2.0/A)/denom,1/3.0)
     Z = ( term1 - term2 ) / np.sqrt(2/(9.0*A))
     return Z, (1.0-stats.zprob(Z))*2
-kurtosistest.__doc__ = stats.kurtosistest.__doc__ 
+kurtosistest.__doc__ = stats.kurtosistest.__doc__
 
 
 def normaltest(a, axis=0):
@@ -1658,7 +1658,7 @@
     beta : {0.4, float} optional
         Plotting positions parameter.
     axis : {None, int} optional
-        Axis along which to perform the trimming. 
+        Axis along which to perform the trimming.
         If None, the input array is first flattened.
     limit : tuple
         Tuple of (lower, upper) values. Values of a outside this closed interval
@@ -1679,8 +1679,8 @@
     # Initialization & checks ---------
     data = ma.array(data, copy=False)
     if limit:
-        condition = (limit[0]<data) & (data<limit[1]) 
-        data[condition.filled(True)] = masked 
+        condition = (limit[0]<data) & (data<limit[1])
+        data[condition.filled(True)] = masked
     p = np.array(prob, copy=False, ndmin=1)
     m = alphap + p*(1.-alphap-betap)
     # Computes quantiles along axis (or globally)
@@ -1689,8 +1689,8 @@
     else:
         assert data.ndim <= 2, "Array should be 2D at most !"
         return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
-    
 
+
 def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
     """Calculate the score at the given 'per' percentile of the
     sequence a.  For example, the score at per=50 is the median.
@@ -1747,7 +1747,7 @@
     plpos[data.argsort()[:n]] = (np.arange(1,n+1) - alpha)/(n+1-alpha-beta)
     return ma.array(plpos, mask=data._mask)
 
-meppf = plotting_positions    
+meppf = plotting_positions
 
 #####--------------------------------------------------------------------------
 #---- --- Variability ---
@@ -1776,11 +1776,11 @@
     if not ma.allclose(v,data.mean(0)):
         raise ValueError("Lack of convergence in obrientransform.")
     return data
-    
 
+
 def signaltonoise(data, axis=0):
-    """Calculates the signal-to-noise ratio, as the ratio of the mean over 
-    standard deviation along the given axis.  
+    """Calculates the signal-to-noise ratio, as the ratio of the mean over
+    standard deviation along the given axis.
 
     Parameters
     ----------
@@ -1808,11 +1808,11 @@
             Axis along which to compute. If None, the computation is performed
             on a flat version of the array.
     """
-    return ma.asarray(data).var(axis=axis,ddof=0)    
+    return ma.asarray(data).var(axis=axis,ddof=0)
 
 
 def samplestd(data, axis=0):
-    """Returns a biased estimate of the standard deviation of the data, as the 
+    """Returns a biased estimate of the standard deviation of the data, as the
     square root of the average squared deviations from the mean.
 
     Parameters
@@ -1822,13 +1822,13 @@
         axis : {0,int} optional
             Axis along which to compute. If None, the computation is performed
             on a flat version of the array.
-            
+
     Notes
     -----
-        samplestd(a) is equivalent to a.std(ddof=0)  
-            
+        samplestd(a) is equivalent to a.std(ddof=0)
+
     """
-    return ma.asarray(data).std(axis=axis,ddof=0)   
+    return ma.asarray(data).std(axis=axis,ddof=0)
 
 
 def var(a,axis=None):
@@ -1919,12 +1919,12 @@
     return n_um / d_en
 
 
-    
+
 def friedmanchisquare(*args):
     """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
-    This function calculates the Friedman Chi-square test for repeated measures 
-    and returns the result, along with the associated probability value.  
-    
+    This function calculates the Friedman Chi-square test for repeated measures
+    and returns the result, along with the associated probability value.
+
     Each input is considered a given group. Ideally, the number of treatments
     among each group should be equal. If this is not the case, only the first
     n treatments are taken into account, where n is the number of treatments
@@ -1932,9 +1932,9 @@
     If a group has some missing values, the corresponding treatments are masked
     in the other groups.
     The test statistic is corrected for ties.
-    
+
     Masked values in one group are propagated to the other groups.
-    
+
     Returns: chi-square statistic, associated p-value
     """
     data = argstoarray(*args).astype(float)

Modified: trunk/scipy/stats/stats.py
===================================================================
--- trunk/scipy/stats/stats.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/stats.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -767,8 +767,8 @@
 
     Returns
     -------
-    The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's 
-    definition and 0 for Pearson's definition. 
+    The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's
+    definition and 0 for Pearson's definition.
 
 
     References

Modified: trunk/scipy/stats/tests/test_mmorestats.py
===================================================================
--- trunk/scipy/stats/tests/test_mmorestats.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/tests/test_mmorestats.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -12,7 +12,7 @@
 from numpy.ma import masked
 
 import scipy.stats.mstats as ms
-import scipy.stats.mmorestats as mms 
+import scipy.stats.mmorestats as mms
 
 from scipy.testing import *
 
@@ -33,11 +33,11 @@
         data = ma.array([545,555,558,572,575,576,578,580,
                          594,605,635,651,653,661,666])
         assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
-        assert_equal(np.round(mms.trimmed_mean_ci(data,(0.2,0.2)),1), 
+        assert_equal(np.round(mms.trimmed_mean_ci(data,(0.2,0.2)),1),
                      [561.8, 630.6])
     #
     def test_idealfourths(self):
-        "Tests ideal-fourths"      
+        "Tests ideal-fourths"
         test = np.arange(100)
         assert_almost_equal(np.asarray(mms.idealfourths(test)),
                             [24.416667,74.583333],6)

Modified: trunk/scipy/stats/tests/test_mstats.py
===================================================================
--- trunk/scipy/stats/tests/test_mstats.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/stats/tests/test_mstats.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -8,7 +8,7 @@
 import numpy.ma as ma
 from numpy.ma import masked, nomask
 
-import scipy.stats.mstats as mstats 
+import scipy.stats.mstats as mstats
 from scipy.testing import *
 from numpy.ma.testutils import assert_equal, assert_almost_equal, \
     assert_array_almost_equal
@@ -97,12 +97,12 @@
         x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
         assert_almost_equal(mstats.rankdata(x),[[1,3,3,3,5],[6,7,8.5,8.5,10]])
         assert_almost_equal(mstats.rankdata(x,axis=1),[[1,3,3,3,5],[1,2,3.5,3.5,5]])
-        assert_almost_equal(mstats.rankdata(x,axis=0),[[1,1,1,1,1],[2,2,2,2,2,]])        
-        
-        
+        assert_almost_equal(mstats.rankdata(x,axis=0),[[1,1,1,1,1],[2,2,2,2,2,]])
+
+
 class TestCorr(TestCase):
     #
-    def test_pearsonr(self):       
+    def test_pearsonr(self):
         "Tests some computations of Pearson's r"
         x = ma.arange(10)
         assert_almost_equal(mstats.pearsonr(x,x)[0], 1.0)
@@ -121,16 +121,16 @@
         (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
         assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
         #
-        x = [ 2.0, 47.4, 42.0, 10.8, 60.1,  1.7, 64.0, 63.1, 
+        x = [ 2.0, 47.4, 42.0, 10.8, 60.1,  1.7, 64.0, 63.1,
               1.0,  1.4,  7.9,  0.3,  3.9,  0.3,  6.7]
         y = [22.6, 08.3, 44.4, 11.9, 24.6,  0.6,  5.7, 41.6,
-              0.0,  0.6,  6.7,  3.8,  1.0,  1.2,  1.4]       
+              0.0,  0.6,  6.7,  3.8,  1.0,  1.2,  1.4]
         assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
-        x = [ 2.0, 47.4, 42.0, 10.8, 60.1,  1.7, 64.0, 63.1, 
+        x = [ 2.0, 47.4, 42.0, 10.8, 60.1,  1.7, 64.0, 63.1,
               1.0,  1.4,  7.9,  0.3,  3.9,  0.3,  6.7, np.nan]
         y = [22.6, 08.3, 44.4, 11.9, 24.6,  0.6,  5.7, 41.6,
-              0.0,  0.6,  6.7,  3.8,  1.0,  1.2,  1.4, np.nan]      
-        (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) 
+              0.0,  0.6,  6.7,  3.8,  1.0,  1.2,  1.4, np.nan]
+        (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
         assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
     #
     def test_kendalltau(self):
@@ -138,14 +138,14 @@
         x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
         y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
         z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
-        assert_almost_equal(np.asarray(mstats.kendalltau(x,y)), 
+        assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
                             [+0.3333333,0.4969059])
-        assert_almost_equal(np.asarray(mstats.kendalltau(x,z)), 
+        assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
                             [-0.5477226,0.2785987])
         #
-        x = ma.fix_invalid([ 0, 0, 0, 0,20,20, 0,60, 0,20, 
+        x = ma.fix_invalid([ 0, 0, 0, 0,20,20, 0,60, 0,20,
                             10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
-        y = ma.fix_invalid([ 0,80,80,80,10,33,60, 0,67,27, 
+        y = ma.fix_invalid([ 0,80,80,80,10,33,60, 0,67,27,
                             25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
         result = mstats.kendalltau(x,y)
         assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
@@ -179,7 +179,7 @@
         c = mstats.cov(x[1])
         assert_equal(c, x[1].var(ddof=1))
         c = mstats.cov(x)
-        assert_equal(c[1,0], (x[0].anom()*x[1].anom()).sum())  
+        assert_equal(c[1,0], (x[0].anom()*x[1].anom()).sum())
         #
         x = [[nan,nan,  4,  2, 16, 26,  5,  1,  5,  1,  2,  3,  1],
              [  4,  3,  5,  3,  2,  7,  3,  1,  1,  2,  3,  5,  3],
@@ -188,15 +188,15 @@
         x = ma.fix_invalid(x).T
         (winter,spring,summer,fall) = x.T
         #
-        assert_almost_equal(mstats.cov(winter,winter,bias=True),  
+        assert_almost_equal(mstats.cov(winter,winter,bias=True),
                             winter.var(ddof=0))
-        assert_almost_equal(mstats.cov(winter,winter,bias=False), 
+        assert_almost_equal(mstats.cov(winter,winter,bias=False),
                             winter.var(ddof=1))
         assert_almost_equal(mstats.cov(winter,spring), 7.7)
         assert_almost_equal(mstats.cov(winter,summer), 19.1111111, 7)
-        assert_almost_equal(mstats.cov(winter,fall), 20)      
-        
+        assert_almost_equal(mstats.cov(winter,fall), 20)
 
+
 class TestTrimming(TestCase):
     #
     def test_trim(self):
@@ -206,7 +206,7 @@
         a = ma.arange(10)
         assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
         a = ma.arange(10)
-        assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)), 
+        assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
                      [None,None,None,3,4,5,6,7,None,None])
         a = ma.arange(10)
         assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
@@ -214,7 +214,7 @@
         #
         a = ma.arange(12)
         a[[0,-1]] = a[5] = masked
-        assert_equal(mstats.trim(a,(2,8)), 
+        assert_equal(mstats.trim(a,(2,8)),
                      [None,None,2,3,4,None,6,7,8,None,None,None])
         #
         x = ma.arange(100).reshape(10,10)
@@ -267,7 +267,7 @@
         "Tests the Winsorization of the data."
         data = ma.array([ 77, 87, 88,114,151,210,219,246,253,262,
                          296,299,306,376,428,515,666,1310,2611])
-        assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1), 
+        assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
                             21551.4, 1)
         data[5] = masked
         winsorized = mstats.winsorize(data)
@@ -285,7 +285,7 @@
         Note that both test cases came from here.
     """
     testcase = [1,2,3,4]
-    testmathworks = ma.fix_invalid([1.165 , 0.6268, 0.0751, 0.3516, -0.6965, 
+    testmathworks = ma.fix_invalid([1.165 , 0.6268, 0.0751, 0.3516, -0.6965,
                                     np.nan])
     def test_moment(self):
         """
@@ -315,7 +315,7 @@
         assert_almost_equal(y,-0.437111105023940,10)
         y = mstats.skew(self.testcase)
         assert_almost_equal(y,0.0,10)
-        
+
     def test_kurtosis(self):
         """
             sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
@@ -424,7 +424,7 @@
 ##        assert_approx_equal(y,0.775177399)
         y = mstats.stderr(self.testcase)
         assert_almost_equal(y,0.6454972244)
-        
+
     def test_sem(self):
         """
         this is not in R, so used
@@ -449,22 +449,22 @@
         (testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4)
         """
         y = mstats.zs(self.testcase)
-        desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996 , 
+        desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996 ,
                                   0.44721359549996 , 1.3416407864999, np.nan])
         assert_almost_equal(desired,y,decimal=12)
-        
-        
 
+
+
 class TestMisc(TestCase):
     #
     def test_obrientransform(self):
         "Tests Obrien transform"
-        args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2, 
+        args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
                 [6]+[7]*2+[8]*4+[9]*9+[10]*16]
         result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
                   [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
-        assert_almost_equal(np.round(mstats.obrientransform(*args).T,4), 
-                            result,4) 
+        assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
+                            result,4)
     #
     def test_kstwosamp(self):
         "Tests the Kolmogorov-Smirnov 2 samples test"
@@ -503,4 +503,4 @@
 
 
 if __name__ == "__main__":
-    nose.run(argv=['', __file__])
\ No newline at end of file
+    nose.run(argv=['', __file__])

Modified: trunk/scipy/testing/decorators.py
===================================================================
--- trunk/scipy/testing/decorators.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/decorators.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -22,7 +22,7 @@
     hardware-dependent, but in general any individual test that requires more
     than a second or two should be labeled as slow (the whole suite consits of
     thousands of tests, so even a second is significant)."""
-    
+
     t.slow = True
     return t
 
@@ -39,7 +39,7 @@
     >>> def func_with_test_in_name(arg1, arg2): pass
     ...
     >>>
-    
+
     This decorator cannot use the nose namespace, because it can be
     called from a non-test module. See also istest and nottest in
     nose.tools

Modified: trunk/scipy/testing/examples/test_foo.py
===================================================================
--- trunk/scipy/testing/examples/test_foo.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/examples/test_foo.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -40,7 +40,7 @@
     """A regular unittest, with the extra Numpy features."""
     def test_1(self):
         print 'First test'
-        
+
     def test_2(self):
         print 'Second test'
 
@@ -99,8 +99,7 @@
 def test_warn():
     "A simple test that prints a warning."
     warn('Bad things are happening...')
-    
+
 def test_error():
     "A simple test that prints an error message."
     error('Really bad things are happening...')
-    

Modified: trunk/scipy/testing/nosetester.py
===================================================================
--- trunk/scipy/testing/nosetester.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/nosetester.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -13,20 +13,20 @@
     """ Nose test runner.
 
     Usage: NoseTester(<package>).test()
-    
+
     <package> is package path or module Default for package is None. A
     value of None finds calling module path.
 
     Typical call is from module __init__, and corresponds to this:
-    
+
     >>> test = NoseTester().test
-    
+
     In practice, because nose may not be importable, the __init__
     files actually have:
-    
+
     >>> from scipy.testing.pkgtester import Tester
     >>> test = Tester().test
-    
+
     The pkgtester module checks for the presence of nose on the path,
     returning this class if nose is present, and a null class
     otherwise.
@@ -50,7 +50,7 @@
         elif isinstance(package, type(os)):
             package = os.path.dirname(package.__file__)
         self.package_path = package
-        
+
     def _add_doc(testtype):
         ''' Decorator to add docstring to functions using test labels
 
@@ -73,9 +73,9 @@
                 'not slow'.
             'full' - fast (as above) and slow %(testtype)s as in
                 no -A option to nosetests - same as ''
-            None or '' - run all %(testtype)ss 
+            None or '' - run all %(testtype)ss
             attribute_identifier - string passed directly to
-                nosetests as '-A' 
+                nosetests as '-A'
         verbose : integer
             verbosity value for test outputs, 1-10
         extra_argv : list
@@ -103,11 +103,11 @@
         if extra_argv:
             argv += extra_argv
         return argv
-        
-    @_add_doc('test')        
+
+    @_add_doc('test')
     def test(self, label='fast', verbose=1, extra_argv=None, doctests=False):
         ''' Run tests for module using nose
-        
+
         %(test_header)s
         doctests : boolean
             If True, run doctests in module, default False
@@ -116,7 +116,7 @@
         if doctests:
             argv+=['--with-doctest']
         nose.run(argv=argv)
-        
+
     @_add_doc('benchmark')
     def bench(self, label='fast', verbose=1, extra_argv=None):
         ''' Run benchmarks for module using nose
@@ -125,4 +125,3 @@
         argv = self._test_argv(label, verbose, extra_argv)
         argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
         nose.run(argv=argv)
-        

Modified: trunk/scipy/testing/nulltester.py
===================================================================
--- trunk/scipy/testing/nulltester.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/nulltester.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -13,4 +13,3 @@
               'Need nose >=0.10 for tests - see %s' % \
               'http://somethingaboutorange.com/mrl/projects/nose'
     bench = test
-    

Modified: trunk/scipy/testing/pkgtester.py
===================================================================
--- trunk/scipy/testing/pkgtester.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/pkgtester.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -20,7 +20,7 @@
     nose_version = nose.__versioninfo__
     if nose_version[0] < 1 and nose_version[1] < 10:
         fine_nose = False
-            
+
 if fine_nose:
     from scipy.testing.nosetester import NoseTester as Tester
 else:

Modified: trunk/scipy/testing/utils.py
===================================================================
--- trunk/scipy/testing/utils.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/testing/utils.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -95,4 +95,3 @@
         if testmatch.search(funcname) and not funcname.startswith('_'):
             setattr(cls, funcname, decorator(function))
     return
-

Modified: trunk/scipy/weave/size_check.py
===================================================================
--- trunk/scipy/weave/size_check.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/weave/size_check.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -278,4 +278,3 @@
 # so this is gonna take some thought (probably some tree manipulation).
 def take(ary,axis=0): raise NotImplemented
 # and all the rest
-

Modified: trunk/scipy/weave/tests/test_blitz_tools.py
===================================================================
--- trunk/scipy/weave/tests/test_blitz_tools.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/weave/tests/test_blitz_tools.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -142,7 +142,7 @@
         expr = "result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \
                                   "+ b[1:-1,2:] + b[1:-1,:-2]) / 5."
         self.generic_2d(expr,float32)
-        
+
     @dec.slow
     def test_5point_avg_2d_double(self):
         """ result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]

Modified: trunk/scipy/weave/tests/test_c_spec.py
===================================================================
--- trunk/scipy/weave/tests/test_c_spec.py	2008-04-20 08:27:24 UTC (rev 4153)
+++ trunk/scipy/weave/tests/test_c_spec.py	2008-04-20 12:15:19 UTC (rev 4154)
@@ -62,8 +62,8 @@
 #         print "Probably don't have Compiler: %s"%c
 #     else:
 #         compilers.append(c)
-    
 
+
 class IntConverter(TestCase):
     compiler = ''
     @dec.slow
@@ -650,73 +650,73 @@
 #     compiler = ''
 # class TestGccIntConverter(TestIntConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcFloatConverter(TestFloatConverter):
 #     compiler = 'msvc'
-# 
+#
 # class TestMsvcFloatConverter(TestFloatConverter):
 #     compiler = 'msvc'
 # class TestUnixFloatConverter(TestFloatConverter):
 #     compiler = ''
 # class TestGccFloatConverter(TestFloatConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcComplexConverter(TestComplexConverter):
 #     compiler = 'msvc'
 # class TestUnixComplexConverter(TestComplexConverter):
 #     compiler = ''
 # class TestGccComplexConverter(TestComplexConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcFileConverter(TestFileConverter):
 #     compiler = 'msvc'
 # class TestUnixFileConverter(TestFileConverter):
 #     compiler = ''
 # class TestGccFileConverter(TestFileConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcCallableConverter(TestCallableConverter):
 #     compiler = 'msvc'
 # class TestUnixCallableConverter(TestCallableConverter):
 #     compiler = ''
 # class TestGccCallableConverter(TestCallableConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcSequenceConverter(TestSequenceConverter):
 #     compiler = 'msvc'
 # class TestUnixSequenceConverter(TestSequenceConverter):
 #     compiler = ''
 # class TestGccSequenceConverter(TestSequenceConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcStringConverter(TestStringConverter):
 #     compiler = 'msvc'
 # class TestUnixStringConverter(TestStringConverter):
 #     compiler = ''
 # class TestGccStringConverter(TestStringConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcListConverter(TestListConverter):
 #     compiler = 'msvc'
 # class TestUnixListConverter(TestListConverter):
 #     compiler = ''
 # class TestGccListConverter(TestListConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcTupleConverter(TestTupleConverter):
 #     compiler = 'msvc'
 # class TestUnixTupleConverter(TestTupleConverter):
 #     compiler = ''
 # class TestGccTupleConverter(TestTupleConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcDictConverter(TestDictConverter):
 #     compiler = 'msvc'
 # class TestUnixDictConverter(TestDictConverter):
 #     compiler = ''
 # class TestGccDictConverter(TestDictConverter):
 #     compiler = 'gcc'
-# 
+#
 # class TestMsvcInstanceConverter(TestInstanceConverter):
 #     compiler = 'msvc'
 # class TestUnixInstanceConverter(TestInstanceConverter):
@@ -751,10 +751,10 @@
 # else:
 #     for _n in dir():
 #         if _n[:8]=='TestUnix': exec 'del '+_n
-# 
+#
 # if not (gcc_exists() and msvc_exists() and sys.platform == 'win32'):
 #     for _n in dir():
 #         if _n[:7]=='TestGcc': exec 'del '+_n
-# 
+#
 if __name__ == "__main__":
     nose.run(argv=['', __file__])



More information about the Scipy-svn mailing list